hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7909dccee6cfa7b82cb045faf4538d6ccb83e2a7
| 630
|
py
|
Python
|
cannes_accomodation/gen_docker_compose.py
|
Xogiga/CPOA_INEC_SAVIGNY_VALADE
|
f33a9e9448f011bcc56abc0c2270bf0c3d9ae43a
|
[
"MIT"
] | null | null | null |
cannes_accomodation/gen_docker_compose.py
|
Xogiga/CPOA_INEC_SAVIGNY_VALADE
|
f33a9e9448f011bcc56abc0c2270bf0c3d9ae43a
|
[
"MIT"
] | null | null | null |
cannes_accomodation/gen_docker_compose.py
|
Xogiga/CPOA_INEC_SAVIGNY_VALADE
|
f33a9e9448f011bcc56abc0c2270bf0c3d9ae43a
|
[
"MIT"
] | null | null | null |
from accomodation_website.secrets import DB_PWD
docker_compose = """---
version: '3'
services:
web:
build: .
publish:
- 80
links:
- db:db
db:
image: mariadb
environment:
MYSQL_DATABASE: cannes_db
MYSQL_ROOT_PASSWORD: """ + DB_PWD
with open('docker-compose.yml', 'w') as f:
f.write(docker_compose)
| 31.5
| 60
| 0.349206
|
from accomodation_website.secrets import DB_PWD
docker_compose = """---
version: '3'
services:
web:
build: .
publish:
- 80
links:
- db:db
db:
image: mariadb
environment:
MYSQL_DATABASE: cannes_db
MYSQL_ROOT_PASSWORD: """ + DB_PWD
with open('docker-compose.yml', 'w') as f:
f.write(docker_compose)
| true
| true
|
7909ddc1c9c24250060fbf7335eb390653c183b7
| 9,837
|
py
|
Python
|
TimeWrapper_JE/venv/Lib/site-packages/setuptools/_vendor/packaging/markers.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | 2
|
2020-09-22T14:38:24.000Z
|
2020-10-30T03:11:36.000Z
|
TimeWrapper_JE/venv/Lib/site-packages/setuptools/_vendor/packaging/markers.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
TimeWrapper_JE/venv/Lib/site-packages/setuptools/_vendor/packaging/markers.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from setuptools.extern.pyparsing import Literal as L # noqa
from ._compat import string_types
from ._typing import TYPE_CHECKING
from .specifiers import Specifier, InvalidSpecifier
if TYPE_CHECKING: # pragma: no cover
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
Operator = Callable[[str, str], bool]
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
# type: (Any) -> None
self.value = value
def __str__(self):
# type: () -> str
return str(self.value)
def __repr__(self):
# type: () -> str
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
# type: () -> str
raise NotImplementedError
class Variable(Node):
def serialize(self):
# type: () -> str
return str(self)
class Value(Node):
def serialize(self):
# type: () -> str
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
# type: () -> str
return str(self)
VARIABLE = (
L("implementation_version")
| L("platform_python_implementation")
| L("implementation_name")
| L("python_full_version")
| L("platform_release")
| L("platform_version")
| L("platform_machine")
| L("platform_system")
| L("python_version")
| L("sys_platform")
| L("os_name")
| L("os.name") # PEP-345
| L("sys.platform") # PEP-345
| L("platform.version") # PEP-345
| L("platform.machine") # PEP-345
| L("platform.python_implementation") # PEP-345
| L("python_implementation") # undocumented setuptools legacy
| L("extra") # PEP-508
)
ALIASES = {
"os.name": "os_name",
"sys.platform": "sys_platform",
"platform.version": "platform_version",
"platform.machine": "platform_machine",
"platform.python_implementation": "platform_python_implementation",
"python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
# type: (Union[ParseResults, List[Any]]) -> List[Any]
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
# type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
} # type: Dict[str, Operator]
def _eval_op(lhs, op, rhs):
# type: (str, Op, str) -> bool
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize()) # type: Optional[Operator]
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
class Undefined(object):
pass
_undefined = Undefined()
def _get_env(environment, name):
# type: (Dict[str, str], str) -> str
value = environment.get(name, _undefined) # type: Union[str, Undefined]
if isinstance(value, Undefined):
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
# type: (List[Any], Dict[str, str]) -> bool
groups = [[]] # type: List[List[bool]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
# type: (sys._version_info) -> str
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment():
# type: () -> Dict[str, str]
if hasattr(sys, "implementation"):
# Ignoring the `sys.implementation` reference for type checking due to
# mypy not liking that the attribute doesn't exist in Python 2.7 when
# run with the `--py27` flag.
iver = format_full_version(sys.implementation.version) # type: ignore
implementation_name = sys.implementation.name # type: ignore
else:
iver = "0"
implementation_name = ""
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
# type: (str) -> None
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc : e.loc + 8]
)
raise InvalidMarker(err_str)
def __str__(self):
# type: () -> str
return _format_marker(self._markers)
def __repr__(self):
# type: () -> str
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
# type: (Optional[Dict[str, str]]) -> bool
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
| 29.899696
| 93
| 0.603741
|
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from setuptools.extern.pyparsing import Literal as L
from ._compat import string_types
from ._typing import TYPE_CHECKING
from .specifiers import Specifier, InvalidSpecifier
if TYPE_CHECKING:
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
Operator = Callable[[str, str], bool]
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
class InvalidMarker(ValueError):
class UndefinedComparison(ValueError):
class UndefinedEnvironmentName(ValueError):
class Node(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
raise NotImplementedError
class Variable(Node):
def serialize(self):
return str(self)
class Value(Node):
def serialize(self):
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
return str(self)
VARIABLE = (
L("implementation_version")
| L("platform_python_implementation")
| L("implementation_name")
| L("python_full_version")
| L("platform_release")
| L("platform_version")
| L("platform_machine")
| L("platform_system")
| L("python_version")
| L("sys_platform")
| L("os_name")
| L("os.name")
| L("sys.platform")
| L("platform.version")
| L("platform.machine")
| L("platform.python_implementation")
| L("python_implementation")
| L("extra")
)
ALIASES = {
"os.name": "os_name",
"sys.platform": "sys_platform",
"platform.version": "platform_version",
"platform.machine": "platform_machine",
"platform.python_implementation": "platform_python_implementation",
"python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
# type: (Union[ParseResults, List[Any]]) -> List[Any]
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
# type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
} # type: Dict[str, Operator]
def _eval_op(lhs, op, rhs):
# type: (str, Op, str) -> bool
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize()) # type: Optional[Operator]
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
class Undefined(object):
pass
_undefined = Undefined()
def _get_env(environment, name):
# type: (Dict[str, str], str) -> str
value = environment.get(name, _undefined) # type: Union[str, Undefined]
if isinstance(value, Undefined):
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
# type: (List[Any], Dict[str, str]) -> bool
groups = [[]] # type: List[List[bool]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
# type: (sys._version_info) -> str
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment():
# type: () -> Dict[str, str]
if hasattr(sys, "implementation"):
# Ignoring the `sys.implementation` reference for type checking due to
# mypy not liking that the attribute doesn't exist in Python 2.7 when
# run with the `--py27` flag.
iver = format_full_version(sys.implementation.version) # type: ignore
implementation_name = sys.implementation.name # type: ignore
else:
iver = "0"
implementation_name = ""
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
# type: (str) -> None
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc : e.loc + 8]
)
raise InvalidMarker(err_str)
def __str__(self):
# type: () -> str
return _format_marker(self._markers)
def __repr__(self):
# type: () -> str
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
# type: (Optional[Dict[str, str]]) -> bool
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
| true
| true
|
7909df315f60a520d86b5dda95a110d334a5c6f3
| 2,432
|
py
|
Python
|
masakarimonitors/version.py
|
iorchard/masakari-monitors
|
bcf6129798a821975ab22cff56c791c81883f5da
|
[
"Apache-2.0"
] | 11
|
2016-11-20T08:00:52.000Z
|
2020-08-31T10:25:42.000Z
|
masakarimonitors/version.py
|
iorchard/masakari-monitors
|
bcf6129798a821975ab22cff56c791c81883f5da
|
[
"Apache-2.0"
] | null | null | null |
masakarimonitors/version.py
|
iorchard/masakari-monitors
|
bcf6129798a821975ab22cff56c791c81883f5da
|
[
"Apache-2.0"
] | 10
|
2016-11-17T05:58:44.000Z
|
2019-07-01T09:10:44.000Z
|
# Copyright(c) 2016 Nippon Telegraph and Telephone Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pbr import version as pbr_version
MONITORS_VENDOR = "OpenStack Foundation"
MONITORS_PRODUCT = "OpenStack Masakari Monitors"
MONITORS_PACKAGE = None # OS distro package version suffix
loaded = False
version_info = pbr_version.VersionInfo('masakari-monitors')
version_string = version_info.version_string
def _load_config():
# Don't load in global context, since we can't assume
# these modules are accessible when distutils uses
# this module
import configparser
from oslo_config import cfg
from oslo_log import log as logging
global loaded, MONITORS_VENDOR, MONITORS_PRODUCT, MONITORS_PACKAGE
if loaded:
return
loaded = True
cfgfile = cfg.CONF.find_file("release")
if cfgfile is None:
return
try:
cfg = configparser.RawConfigParser()
cfg.read(cfgfile)
if cfg.has_option("Masakarimonitors", "vendor"):
MONITORS_VENDOR = cfg.get("Masakarimonitors", "vendor")
if cfg.has_option("Masakarimonitors", "product"):
MONITORS_PRODUCT = cfg.get("Masakarimonitors", "product")
if cfg.has_option("Masakarimonitors", "package"):
MONITORS_PACKAGE = cfg.get("Masakarimonitors", "package")
except Exception as ex:
LOG = logging.getLogger(__name__)
LOG.error("Failed to load %(cfgfile)s: %(ex)s",
{'cfgfile': cfgfile, 'ex': ex})
def vendor_string():
_load_config()
return MONITORS_VENDOR
def product_string():
_load_config()
return MONITORS_PRODUCT
def package_string():
_load_config()
return MONITORS_PACKAGE
def version_string_with_package():
if package_string() is None:
return version_info.version_string()
else:
return "%s-%s" % (version_info.version_string(), package_string())
| 27.954023
| 74
| 0.704359
|
from pbr import version as pbr_version
MONITORS_VENDOR = "OpenStack Foundation"
MONITORS_PRODUCT = "OpenStack Masakari Monitors"
MONITORS_PACKAGE = None
loaded = False
version_info = pbr_version.VersionInfo('masakari-monitors')
version_string = version_info.version_string
def _load_config():
import configparser
from oslo_config import cfg
from oslo_log import log as logging
global loaded, MONITORS_VENDOR, MONITORS_PRODUCT, MONITORS_PACKAGE
if loaded:
return
loaded = True
cfgfile = cfg.CONF.find_file("release")
if cfgfile is None:
return
try:
cfg = configparser.RawConfigParser()
cfg.read(cfgfile)
if cfg.has_option("Masakarimonitors", "vendor"):
MONITORS_VENDOR = cfg.get("Masakarimonitors", "vendor")
if cfg.has_option("Masakarimonitors", "product"):
MONITORS_PRODUCT = cfg.get("Masakarimonitors", "product")
if cfg.has_option("Masakarimonitors", "package"):
MONITORS_PACKAGE = cfg.get("Masakarimonitors", "package")
except Exception as ex:
LOG = logging.getLogger(__name__)
LOG.error("Failed to load %(cfgfile)s: %(ex)s",
{'cfgfile': cfgfile, 'ex': ex})
def vendor_string():
_load_config()
return MONITORS_VENDOR
def product_string():
_load_config()
return MONITORS_PRODUCT
def package_string():
_load_config()
return MONITORS_PACKAGE
def version_string_with_package():
if package_string() is None:
return version_info.version_string()
else:
return "%s-%s" % (version_info.version_string(), package_string())
| true
| true
|
7909df6bb968934f0cd637987860c8a1104ed839
| 4,663
|
py
|
Python
|
ernie/classification/service/client.py
|
lerry-lee/similarity-model
|
74ea7f4fc97382d87e6ab71531e66182ca1ba3f4
|
[
"MIT"
] | 4
|
2021-05-24T10:34:12.000Z
|
2021-12-17T06:28:30.000Z
|
ernie/classification/service/client.py
|
lerry-lee/similarity-model
|
74ea7f4fc97382d87e6ab71531e66182ca1ba3f4
|
[
"MIT"
] | null | null | null |
ernie/classification/service/client.py
|
lerry-lee/similarity-model
|
74ea7f4fc97382d87e6ab71531e66182ca1ba3f4
|
[
"MIT"
] | 2
|
2021-03-01T16:06:49.000Z
|
2021-08-08T16:09:14.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from propeller.service.client import InferenceClient
from propeller import log
import six
from tmp import util
from time import time
import numpy as np
class ErnieClient(InferenceClient):
def __init__(self,
vocab_file,
host='localhost',
port=8888,
batch_size=32,
num_coroutine=1,
timeout=10.,
max_seqlen=128):
host_port = 'tcp://%s:%d' % (host, port)
client = super(ErnieClient, self).__init__(host_port, batch_size=batch_size, num_coroutine=num_coroutine, timeout=timeout)
self.vocab = {j.strip().split(b'\t')[0].decode('utf8'): i for i, j in enumerate(open(vocab_file, 'rb'))}
self.tokenizer = util.data.CharTokenizer(self.vocab.keys())
self.max_seqlen = max_seqlen
self.cls_id = self.vocab['[CLS]']
self.sep_id = self.vocab['[SEP]']
def txt_2_id(self, text):
ids = np.array([self.vocab[i] for i in self.tokenizer(text)])
return ids
def pad_and_batch(self, ids):
max_len = max(map(len, ids))
padded = np.stack([np.pad(i, [[0, max_len - len(i)]], mode='constant')for i in ids])
padded = np.expand_dims(padded, axis=-1)
return padded
def __call__(self, text_a, text_b=None):
if text_b is not None and len(text_a) != len(text_b):
raise ValueError('text_b %d has different size than text_a %d' % (text_b, text_a))
text_a = [i.encode('utf8') if isinstance(i, six.string_types) else i for i in text_a]
if text_b is not None:
text_b = [i.encode('utf8') if isinstance(i, six.string_types) else i for i in text_b]
ids_a = map(self.txt_2_id, text_a)
if text_b is not None:
ids_b = map(self.txt_2_id, text_b)
ret = [util.data.build_2_pair(a, b, self.max_seqlen, self.cls_id, self.sep_id) for a, b in zip(ids_a, ids_b)]
else:
ret = [util.data.build_1_pair(a, self.max_seqlen, self.cls_id, self.sep_id) for a in ids_a]
sen_ids, token_type_ids = zip(*ret)
sen_ids = self.pad_and_batch(sen_ids)
token_type_ids = self.pad_and_batch(token_type_ids)
ret, = super(ErnieClient, self).__call__(sen_ids, token_type_ids)
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ernie_encoder_client')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('-i', '--input', type=str, required=True)
parser.add_argument('-o', '--output', type=str, required=True)
parser.add_argument('-p', '--port', type=int, default=8888)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--num_coroutine', type=int, default=1)
parser.add_argument('--vocab', type=str, required=True)
args = parser.parse_args()
client = ErnieClient(args.vocab, args.host, args.port, batch_size=args.batch_size, num_coroutine=args.num_coroutine)
inputs = [i.strip().split(b'\t') for i in open(args.input, 'rb').readlines()]
if len(inputs) == 0:
raise ValueError('empty input')
send_batch = args.num_coroutine * args.batch_size
send_num = len(inputs) // send_batch + 1
rets = []
start = time()
for i in range(send_num):
slice = inputs[i * send_batch: (i + 1) * send_batch]
if len(slice) == 0:
continue
columns = list(zip(*slice))
if len(columns) > 2:
raise ValueError('inputs file has more than 2 columns')
ret = client(*columns)
if len(ret.shape) == 3:
ret = ret[:, 0, :] # take cls
rets.append(ret)
end = time()
with open(args.output, 'wb') as outf:
arr = np.concatenate(rets, 0)
np.save(outf, arr)
log.info('query num: %d average latency %.5f' % (len(inputs), (end - start)/len(inputs)))
| 42.009009
| 130
| 0.650011
|
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from propeller.service.client import InferenceClient
from propeller import log
import six
from tmp import util
from time import time
import numpy as np
class ErnieClient(InferenceClient):
def __init__(self,
vocab_file,
host='localhost',
port=8888,
batch_size=32,
num_coroutine=1,
timeout=10.,
max_seqlen=128):
host_port = 'tcp://%s:%d' % (host, port)
client = super(ErnieClient, self).__init__(host_port, batch_size=batch_size, num_coroutine=num_coroutine, timeout=timeout)
self.vocab = {j.strip().split(b'\t')[0].decode('utf8'): i for i, j in enumerate(open(vocab_file, 'rb'))}
self.tokenizer = util.data.CharTokenizer(self.vocab.keys())
self.max_seqlen = max_seqlen
self.cls_id = self.vocab['[CLS]']
self.sep_id = self.vocab['[SEP]']
def txt_2_id(self, text):
ids = np.array([self.vocab[i] for i in self.tokenizer(text)])
return ids
def pad_and_batch(self, ids):
max_len = max(map(len, ids))
padded = np.stack([np.pad(i, [[0, max_len - len(i)]], mode='constant')for i in ids])
padded = np.expand_dims(padded, axis=-1)
return padded
def __call__(self, text_a, text_b=None):
if text_b is not None and len(text_a) != len(text_b):
raise ValueError('text_b %d has different size than text_a %d' % (text_b, text_a))
text_a = [i.encode('utf8') if isinstance(i, six.string_types) else i for i in text_a]
if text_b is not None:
text_b = [i.encode('utf8') if isinstance(i, six.string_types) else i for i in text_b]
ids_a = map(self.txt_2_id, text_a)
if text_b is not None:
ids_b = map(self.txt_2_id, text_b)
ret = [util.data.build_2_pair(a, b, self.max_seqlen, self.cls_id, self.sep_id) for a, b in zip(ids_a, ids_b)]
else:
ret = [util.data.build_1_pair(a, self.max_seqlen, self.cls_id, self.sep_id) for a in ids_a]
sen_ids, token_type_ids = zip(*ret)
sen_ids = self.pad_and_batch(sen_ids)
token_type_ids = self.pad_and_batch(token_type_ids)
ret, = super(ErnieClient, self).__call__(sen_ids, token_type_ids)
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ernie_encoder_client')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('-i', '--input', type=str, required=True)
parser.add_argument('-o', '--output', type=str, required=True)
parser.add_argument('-p', '--port', type=int, default=8888)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--num_coroutine', type=int, default=1)
parser.add_argument('--vocab', type=str, required=True)
args = parser.parse_args()
client = ErnieClient(args.vocab, args.host, args.port, batch_size=args.batch_size, num_coroutine=args.num_coroutine)
inputs = [i.strip().split(b'\t') for i in open(args.input, 'rb').readlines()]
if len(inputs) == 0:
raise ValueError('empty input')
send_batch = args.num_coroutine * args.batch_size
send_num = len(inputs) // send_batch + 1
rets = []
start = time()
for i in range(send_num):
slice = inputs[i * send_batch: (i + 1) * send_batch]
if len(slice) == 0:
continue
columns = list(zip(*slice))
if len(columns) > 2:
raise ValueError('inputs file has more than 2 columns')
ret = client(*columns)
if len(ret.shape) == 3:
ret = ret[:, 0, :]
rets.append(ret)
end = time()
with open(args.output, 'wb') as outf:
arr = np.concatenate(rets, 0)
np.save(outf, arr)
log.info('query num: %d average latency %.5f' % (len(inputs), (end - start)/len(inputs)))
| true
| true
|
7909e030920127315f407061227ef507ac37bca0
| 493
|
py
|
Python
|
dynamicProgramming/303_range_sum_query_immutable.py
|
weilincheng/LeetCode-practice
|
6e105bf381661d9f737d724c2dd42aa9cf9a5588
|
[
"MIT"
] | null | null | null |
dynamicProgramming/303_range_sum_query_immutable.py
|
weilincheng/LeetCode-practice
|
6e105bf381661d9f737d724c2dd42aa9cf9a5588
|
[
"MIT"
] | null | null | null |
dynamicProgramming/303_range_sum_query_immutable.py
|
weilincheng/LeetCode-practice
|
6e105bf381661d9f737d724c2dd42aa9cf9a5588
|
[
"MIT"
] | null | null | null |
class NumArray:
# O(n) time | O(n) space - where n is the length of the input list
def __init__(self, nums: List[int]):
self.nums = []
currentSum = 0
for num in nums:
currentSum += num
self.nums.append(currentSum)
# O(1) time to look up the nums list
def sumRange(self, left: int, right: int) -> int:
if left > 0:
return self.nums[right] - self.nums[left - 1]
else:
return self.nums[right]
| 35.214286
| 70
| 0.549696
|
class NumArray:
def __init__(self, nums: List[int]):
self.nums = []
currentSum = 0
for num in nums:
currentSum += num
self.nums.append(currentSum)
def sumRange(self, left: int, right: int) -> int:
if left > 0:
return self.nums[right] - self.nums[left - 1]
else:
return self.nums[right]
| true
| true
|
7909e0e8a61bb7cf1b1a37b957ce9515fb66e3c4
| 1,584
|
py
|
Python
|
src/olympia/pages/urls.py
|
anik31/addons-server
|
cecb61da98d6e830fb45a2b1d61b41e72812137e
|
[
"BSD-3-Clause"
] | 2
|
2021-07-19T03:26:43.000Z
|
2021-07-24T03:12:52.000Z
|
src/olympia/pages/urls.py
|
anik31/addons-server
|
cecb61da98d6e830fb45a2b1d61b41e72812137e
|
[
"BSD-3-Clause"
] | 760
|
2021-05-17T07:59:30.000Z
|
2022-03-31T11:14:15.000Z
|
src/olympia/pages/urls.py
|
championshuttler/addons-server
|
5d4c1bfbed2fc509ecc1f3f5065955996e057eeb
|
[
"BSD-3-Clause"
] | 1
|
2021-07-19T03:26:52.000Z
|
2021-07-19T03:26:52.000Z
|
from django.conf import settings
from django.urls import re_path
from django.http import HttpResponsePermanentRedirect as perma_redirect
from django.urls import reverse
from django.views.generic.base import TemplateView
urlpatterns = [
re_path(
r'^about$',
TemplateView.as_view(template_name='pages/about.lhtml'),
name='pages.about',
),
re_path(
r'^google1f3e37b7351799a5\.html$',
TemplateView.as_view(template_name='pages/google_webmaster_verification.html'),
),
re_path(
r'^google231a41e803e464e9\.html$',
TemplateView.as_view(template_name='pages/google_search_console.html'),
),
re_path(
r'^review_guide$',
TemplateView.as_view(template_name='pages/review_guide.html'),
name='pages.review_guide',
),
re_path(
r'^shield-study-2/',
lambda req: perma_redirect(settings.SHIELD_STUDIES_SUPPORT_URL),
),
re_path(
r'^shield_study_\d{1,2}$',
lambda req: perma_redirect(settings.SHIELD_STUDIES_SUPPORT_URL),
),
re_path(
r'^pages/review_guide$',
lambda req: perma_redirect(reverse('pages.review_guide')),
),
re_path(
r'^pages/developer_agreement$',
lambda req: perma_redirect(reverse('devhub.docs', args=['policies/agreement'])),
),
re_path(
r'^pages/validation$', lambda req: perma_redirect(settings.VALIDATION_FAQ_URL)
),
re_path(
r'^pioneer$',
TemplateView.as_view(template_name='pages/pioneer.html'),
name='pages.pioneer',
),
]
| 30.461538
| 88
| 0.659091
|
from django.conf import settings
from django.urls import re_path
from django.http import HttpResponsePermanentRedirect as perma_redirect
from django.urls import reverse
from django.views.generic.base import TemplateView
urlpatterns = [
re_path(
r'^about$',
TemplateView.as_view(template_name='pages/about.lhtml'),
name='pages.about',
),
re_path(
r'^google1f3e37b7351799a5\.html$',
TemplateView.as_view(template_name='pages/google_webmaster_verification.html'),
),
re_path(
r'^google231a41e803e464e9\.html$',
TemplateView.as_view(template_name='pages/google_search_console.html'),
),
re_path(
r'^review_guide$',
TemplateView.as_view(template_name='pages/review_guide.html'),
name='pages.review_guide',
),
re_path(
r'^shield-study-2/',
lambda req: perma_redirect(settings.SHIELD_STUDIES_SUPPORT_URL),
),
re_path(
r'^shield_study_\d{1,2}$',
lambda req: perma_redirect(settings.SHIELD_STUDIES_SUPPORT_URL),
),
re_path(
r'^pages/review_guide$',
lambda req: perma_redirect(reverse('pages.review_guide')),
),
re_path(
r'^pages/developer_agreement$',
lambda req: perma_redirect(reverse('devhub.docs', args=['policies/agreement'])),
),
re_path(
r'^pages/validation$', lambda req: perma_redirect(settings.VALIDATION_FAQ_URL)
),
re_path(
r'^pioneer$',
TemplateView.as_view(template_name='pages/pioneer.html'),
name='pages.pioneer',
),
]
| true
| true
|
7909e0ffec49b0bf73dd169d12190aee676388b8
| 1,047
|
py
|
Python
|
augur/routes/user.py
|
guowenbin90/augur
|
bccaf3139663ff4ea6a0ac6f45fe6d39d164c2cd
|
[
"MIT"
] | 5
|
2019-04-14T14:05:05.000Z
|
2019-12-05T15:57:50.000Z
|
augur/routes/user.py
|
guowenbin90/augur
|
bccaf3139663ff4ea6a0ac6f45fe6d39d164c2cd
|
[
"MIT"
] | 16
|
2019-04-25T00:06:57.000Z
|
2022-01-22T04:26:28.000Z
|
augur/routes/user.py
|
guowenbin90/augur
|
bccaf3139663ff4ea6a0ac6f45fe6d39d164c2cd
|
[
"MIT"
] | 3
|
2019-04-12T20:34:35.000Z
|
2019-04-14T19:06:25.000Z
|
import json
from flask import Flask, request, Response, send_from_directory, redirect, flash
from flask_login import LoginManager, current_user, login_user
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
from ..models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
def create_user_routes(server):
@server.login.user_loader
def load_user(id):
return User.query.get(int(id))
@server.app.route(f'/{server.api_version}/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
nxt = request.args.get('next')
if form.validate_on_submit():
user = User()
login_user(user)
flash('Logged in successfully.')
return redirect(nxt or '/')
| 31.727273
| 80
| 0.698185
|
import json
from flask import Flask, request, Response, send_from_directory, redirect, flash
from flask_login import LoginManager, current_user, login_user
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
from ..models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
def create_user_routes(server):
@server.login.user_loader
def load_user(id):
return User.query.get(int(id))
@server.app.route(f'/{server.api_version}/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
nxt = request.args.get('next')
if form.validate_on_submit():
user = User()
login_user(user)
flash('Logged in successfully.')
return redirect(nxt or '/')
| true
| true
|
7909e130e348c46558e8dd0081a32a0a4c2fc740
| 31,540
|
py
|
Python
|
mesonbuild/cmake/traceparser.py
|
andriyor/meson
|
f9bfeb2add70973113ab4a98454a5c5d7e3a26ae
|
[
"Apache-2.0"
] | 1
|
2022-02-25T00:07:10.000Z
|
2022-02-25T00:07:10.000Z
|
mesonbuild/cmake/traceparser.py
|
andriyor/meson
|
f9bfeb2add70973113ab4a98454a5c5d7e3a26ae
|
[
"Apache-2.0"
] | null | null | null |
mesonbuild/cmake/traceparser.py
|
andriyor/meson
|
f9bfeb2add70973113ab4a98454a5c5d7e3a26ae
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
from .common import CMakeException
from .generator import parse_generator_expressions
from .. import mlog
from ..mesonlib import version_compare
import typing as T
from pathlib import Path
from functools import lru_cache
import re
import json
import textwrap
class CMakeTraceLine:
def __init__(self, file_str: str, line: int, func: str, args: T.List[str]) -> None:
self.file = CMakeTraceLine._to_path(file_str)
self.line = line
self.func = func.lower()
self.args = args
@staticmethod
@lru_cache(maxsize=None)
def _to_path(file_str: str) -> Path:
return Path(file_str)
def __repr__(self) -> str:
s = 'CMake TRACE: {0}:{1} {2}({3})'
return s.format(self.file, self.line, self.func, self.args)
class CMakeCacheEntry(T.NamedTuple):
value: T.List[str]
type: str
class CMakeTarget:
def __init__(
self,
name: str,
target_type: str,
properties: T.Optional[T.Dict[str, T.List[str]]] = None,
imported: bool = False,
tline: T.Optional[CMakeTraceLine] = None
):
if properties is None:
properties = {}
self.name = name
self.type = target_type
self.properties = properties
self.imported = imported
self.tline = tline
self.depends = [] # type: T.List[str]
self.current_bin_dir = None # type: T.Optional[Path]
self.current_src_dir = None # type: T.Optional[Path]
def __repr__(self) -> str:
s = 'CMake TARGET:\n -- name: {}\n -- type: {}\n -- imported: {}\n -- properties: {{\n{} }}\n -- tline: {}'
propSTR = ''
for i in self.properties:
propSTR += " '{}': {}\n".format(i, self.properties[i])
return s.format(self.name, self.type, self.imported, propSTR, self.tline)
def strip_properties(self) -> None:
# Strip the strings in the properties
if not self.properties:
return
for key, val in self.properties.items():
self.properties[key] = [x.strip() for x in val]
assert all([';' not in x for x in self.properties[key]])
class CMakeGeneratorTarget(CMakeTarget):
def __init__(self, name: str) -> None:
super().__init__(name, 'CUSTOM', {})
self.outputs = [] # type: T.List[Path]
self.command = [] # type: T.List[T.List[str]]
self.working_dir = None # type: T.Optional[Path]
class CMakeTraceParser:
def __init__(self, cmake_version: str, build_dir: Path, permissive: bool = True) -> None:
self.vars: T.Dict[str, T.List[str]] = {}
self.vars_by_file: T.Dict[Path, T.Dict[str, T.List[str]]] = {}
self.targets: T.Dict[str, CMakeTarget] = {}
self.cache: T.Dict[str, CMakeCacheEntry] = {}
self.explicit_headers = set() # type: T.Set[Path]
# T.List of targes that were added with add_custom_command to generate files
self.custom_targets = [] # type: T.List[CMakeGeneratorTarget]
self.permissive = permissive # type: bool
self.cmake_version = cmake_version # type: str
self.trace_file = 'cmake_trace.txt'
self.trace_file_path = build_dir / self.trace_file
self.trace_format = 'json-v1' if version_compare(cmake_version, '>=3.17') else 'human'
# State for delayed command execution. Delayed command execution is realised
# with a custom CMake file that overrides some functions and adds some
# introspection information to the trace.
self.delayed_commands = [] # type: T.List[str]
self.stored_commands = [] # type: T.List[CMakeTraceLine]
# All supported functions
self.functions = {
'set': self._cmake_set,
'unset': self._cmake_unset,
'add_executable': self._cmake_add_executable,
'add_library': self._cmake_add_library,
'add_custom_command': self._cmake_add_custom_command,
'add_custom_target': self._cmake_add_custom_target,
'set_property': self._cmake_set_property,
'set_target_properties': self._cmake_set_target_properties,
'target_compile_definitions': self._cmake_target_compile_definitions,
'target_compile_options': self._cmake_target_compile_options,
'target_include_directories': self._cmake_target_include_directories,
'target_link_libraries': self._cmake_target_link_libraries,
'target_link_options': self._cmake_target_link_options,
'add_dependencies': self._cmake_add_dependencies,
# Special functions defined in the preload script.
# These functions do nothing in the CMake code, but have special
# meaning here in the trace parser.
'meson_ps_execute_delayed_calls': self._meson_ps_execute_delayed_calls,
'meson_ps_reload_vars': self._meson_ps_reload_vars,
'meson_ps_disabled_function': self._meson_ps_disabled_function,
} # type: T.Dict[str, T.Callable[[CMakeTraceLine], None]]
if version_compare(self.cmake_version, '<3.17.0'):
mlog.deprecation(textwrap.dedent(f'''\
CMake support for versions <3.17 is deprecated since Meson 0.62.0.
|
| However, Meson was only able to find CMake {self.cmake_version}.
|
| Support for all CMake versions below 3.17.0 will be removed once
| newer CMake versions are more widely adopted. If you encounter
| any errors please try upgrading CMake to a newer version first.
'''), once=True)
def trace_args(self) -> T.List[str]:
arg_map = {
'human': ['--trace', '--trace-expand'],
'json-v1': ['--trace-expand', '--trace-format=json-v1'],
}
base_args = ['--no-warn-unused-cli']
if not self.requires_stderr():
base_args += [f'--trace-redirect={self.trace_file}']
return arg_map[self.trace_format] + base_args
def requires_stderr(self) -> bool:
return version_compare(self.cmake_version, '<3.16')
def parse(self, trace: T.Optional[str] = None) -> None:
# First load the trace (if required)
if not self.requires_stderr():
if not self.trace_file_path.exists and not self.trace_file_path.is_file():
raise CMakeException(f'CMake: Trace file "{self.trace_file_path!s}" not found')
trace = self.trace_file_path.read_text(errors='ignore', encoding='utf-8')
if not trace:
raise CMakeException('CMake: The CMake trace was not provided or is empty')
# Second parse the trace
lexer1 = None
if self.trace_format == 'human':
lexer1 = self._lex_trace_human(trace)
elif self.trace_format == 'json-v1':
lexer1 = self._lex_trace_json(trace)
else:
raise CMakeException(f'CMake: Internal error: Invalid trace format {self.trace_format}. Expected [human, json-v1]')
# Primary pass -- parse everything
for l in lexer1:
# store the function if its execution should be delayed
if l.func in self.delayed_commands:
self.stored_commands += [l]
continue
# "Execute" the CMake function if supported
fn = self.functions.get(l.func, None)
if fn:
fn(l)
# Postprocess
for tgt in self.targets.values():
tgt.strip_properties()
def get_first_cmake_var_of(self, var_list: T.List[str]) -> T.List[str]:
# Return the first found CMake variable in list var_list
for i in var_list:
if i in self.vars:
return self.vars[i]
return []
def get_cmake_var(self, var: str) -> T.List[str]:
# Return the value of the CMake variable var or an empty list if var does not exist
if var in self.vars:
return self.vars[var]
return []
def var_to_str(self, var: str) -> T.Optional[str]:
if var in self.vars and self.vars[var]:
return self.vars[var][0]
return None
def _str_to_bool(self, expr: T.Union[str, T.List[str]]) -> bool:
if not expr:
return False
if isinstance(expr, list):
expr_str = expr[0]
else:
expr_str = expr
expr_str = expr_str.upper()
return expr_str not in ['0', 'OFF', 'NO', 'FALSE', 'N', 'IGNORE'] and not expr_str.endswith('NOTFOUND')
def var_to_bool(self, var: str) -> bool:
return self._str_to_bool(self.vars.get(var, []))
def _gen_exception(self, function: str, error: str, tline: CMakeTraceLine) -> None:
# Generate an exception if the parser is not in permissive mode
if self.permissive:
mlog.debug(f'CMake trace warning: {function}() {error}\n{tline}')
return None
raise CMakeException(f'CMake: {function}() {error}\n{tline}')
def _cmake_set(self, tline: CMakeTraceLine) -> None:
"""Handler for the CMake set() function in all variaties.
comes in three flavors:
set(<var> <value> [PARENT_SCOPE])
set(<var> <value> CACHE <type> <docstring> [FORCE])
set(ENV{<var>} <value>)
We don't support the ENV variant, and any uses of it will be ignored
silently. the other two variates are supported, with some caveats:
- we don't properly handle scoping, so calls to set() inside a
function without PARENT_SCOPE set could incorrectly shadow the
outer scope.
- We don't honor the type of CACHE arguments
"""
# DOC: https://cmake.org/cmake/help/latest/command/set.html
cache_type = None
cache_force = 'FORCE' in tline.args
try:
cache_idx = tline.args.index('CACHE')
cache_type = tline.args[cache_idx + 1]
except (ValueError, IndexError):
pass
# 1st remove PARENT_SCOPE and CACHE from args
args = []
for i in tline.args:
if not i or i == 'PARENT_SCOPE':
continue
# Discard everything after the CACHE keyword
if i == 'CACHE':
break
args.append(i)
if len(args) < 1:
return self._gen_exception('set', 'requires at least one argument', tline)
# Now that we've removed extra arguments all that should be left is the
# variable identifier and the value, join the value back together to
# ensure spaces in the value are correctly handled. This assumes that
# variable names don't have spaces. Please don't do that...
identifier = args.pop(0)
value = ' '.join(args)
# Write to the CMake cache instead
if cache_type:
# Honor how the CMake FORCE parameter works
if identifier not in self.cache or cache_force:
self.cache[identifier] = CMakeCacheEntry(value.split(';'), cache_type)
if not value:
# Same as unset
if identifier in self.vars:
del self.vars[identifier]
else:
self.vars[identifier] = value.split(';')
self.vars_by_file.setdefault(tline.file, {})[identifier] = value.split(';')
def _cmake_unset(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/unset.html
if len(tline.args) < 1:
return self._gen_exception('unset', 'requires at least one argument', tline)
if tline.args[0] in self.vars:
del self.vars[tline.args[0]]
def _cmake_add_executable(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_executable.html
args = list(tline.args) # Make a working copy
# Make sure the exe is imported
is_imported = True
if 'IMPORTED' not in args:
return self._gen_exception('add_executable', 'non imported executables are not supported', tline)
args.remove('IMPORTED')
if len(args) < 1:
return self._gen_exception('add_executable', 'requires at least 1 argument', tline)
self.targets[args[0]] = CMakeTarget(args[0], 'EXECUTABLE', {}, tline=tline, imported=is_imported)
def _cmake_add_library(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_library.html
args = list(tline.args) # Make a working copy
# Make sure the lib is imported
if 'INTERFACE' in args:
args.remove('INTERFACE')
if len(args) < 1:
return self._gen_exception('add_library', 'interface library name not specified', tline)
self.targets[args[0]] = CMakeTarget(args[0], 'INTERFACE', {}, tline=tline, imported='IMPORTED' in args)
elif 'IMPORTED' in args:
args.remove('IMPORTED')
# Now, only look at the first two arguments (target_name and target_type) and ignore the rest
if len(args) < 2:
return self._gen_exception('add_library', 'requires at least 2 arguments', tline)
self.targets[args[0]] = CMakeTarget(args[0], args[1], {}, tline=tline, imported=True)
elif 'ALIAS' in args:
args.remove('ALIAS')
# Now, only look at the first two arguments (target_name and target_ref) and ignore the rest
if len(args) < 2:
return self._gen_exception('add_library', 'requires at least 2 arguments', tline)
# Simulate the ALIAS with INTERFACE_LINK_LIBRARIES
self.targets[args[0]] = CMakeTarget(args[0], 'ALIAS', {'INTERFACE_LINK_LIBRARIES': [args[1]]}, tline=tline)
elif 'OBJECT' in args:
return self._gen_exception('add_library', 'OBJECT libraries are not supported', tline)
else:
self.targets[args[0]] = CMakeTarget(args[0], 'NORMAL', {}, tline=tline)
def _cmake_add_custom_command(self, tline: CMakeTraceLine, name: T.Optional[str] = None) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_custom_command.html
args = self._flatten_args(list(tline.args)) # Commands can be passed as ';' separated lists
if not args:
return self._gen_exception('add_custom_command', 'requires at least 1 argument', tline)
# Skip the second function signature
if args[0] == 'TARGET':
return self._gen_exception('add_custom_command', 'TARGET syntax is currently not supported', tline)
magic_keys = ['OUTPUT', 'COMMAND', 'MAIN_DEPENDENCY', 'DEPENDS', 'BYPRODUCTS',
'IMPLICIT_DEPENDS', 'WORKING_DIRECTORY', 'COMMENT', 'DEPFILE',
'JOB_POOL', 'VERBATIM', 'APPEND', 'USES_TERMINAL', 'COMMAND_EXPAND_LISTS']
target = CMakeGeneratorTarget(name)
def handle_output(key: str, target: CMakeGeneratorTarget) -> None:
target.outputs += [Path(key)]
def handle_command(key: str, target: CMakeGeneratorTarget) -> None:
if key == 'ARGS':
return
target.command[-1] += [key]
def handle_depends(key: str, target: CMakeGeneratorTarget) -> None:
target.depends += [key]
working_dir = None
def handle_working_dir(key: str, target: CMakeGeneratorTarget) -> None:
nonlocal working_dir
if working_dir is None:
working_dir = key
else:
working_dir += ' '
working_dir += key
fn = None
for i in args:
if i in magic_keys:
if i == 'OUTPUT':
fn = handle_output
elif i == 'DEPENDS':
fn = handle_depends
elif i == 'WORKING_DIRECTORY':
fn = handle_working_dir
elif i == 'COMMAND':
fn = handle_command
target.command += [[]]
else:
fn = None
continue
if fn is not None:
fn(i, target)
cbinary_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_BINARY_DIR')
csource_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_SOURCE_DIR')
target.working_dir = Path(working_dir) if working_dir else None
target.current_bin_dir = Path(cbinary_dir) if cbinary_dir else None
target.current_src_dir = Path(csource_dir) if csource_dir else None
target.outputs = [Path(x) for x in self._guess_files([str(y) for y in target.outputs])]
target.depends = self._guess_files(target.depends)
target.command = [self._guess_files(x) for x in target.command]
self.custom_targets += [target]
if name:
self.targets[name] = target
def _cmake_add_custom_target(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_custom_target.html
# We only the first parameter (the target name) is interesting
if len(tline.args) < 1:
return self._gen_exception('add_custom_target', 'requires at least one argument', tline)
# It's pretty much the same as a custom command
self._cmake_add_custom_command(tline, tline.args[0])
def _cmake_set_property(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/set_property.html
args = list(tline.args)
scope = args.pop(0)
append = False
targets = []
while args:
curr = args.pop(0)
# XXX: APPEND_STRING is specifically *not* supposed to create a
# list, is treating them as aliases really okay?
if curr == 'APPEND' or curr == 'APPEND_STRING':
append = True
continue
if curr == 'PROPERTY':
break
targets += curr.split(';')
if not args:
return self._gen_exception('set_property', 'faild to parse argument list', tline)
if len(args) == 1:
# Tries to set property to nothing so nothing has to be done
return
identifier = args.pop(0)
if self.trace_format == 'human':
value = ' '.join(args).split(';')
else:
value = [y for x in args for y in x.split(';')]
if not value:
return
def do_target(t: str) -> None:
if t not in self.targets:
return self._gen_exception('set_property', f'TARGET {t} not found', tline)
tgt = self.targets[t]
if identifier not in tgt.properties:
tgt.properties[identifier] = []
if append:
tgt.properties[identifier] += value
else:
tgt.properties[identifier] = value
def do_source(src: str) -> None:
if identifier != 'HEADER_FILE_ONLY' or not self._str_to_bool(value):
return
current_src_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_SOURCE_DIR')
if not current_src_dir:
mlog.warning(textwrap.dedent('''\
CMake trace: set_property(SOURCE) called before the preload script was loaded.
Unable to determine CMAKE_CURRENT_SOURCE_DIR. This can lead to build errors.
'''))
current_src_dir = '.'
cur_p = Path(current_src_dir)
src_p = Path(src)
if not src_p.is_absolute():
src_p = cur_p / src_p
self.explicit_headers.add(src_p)
if scope == 'TARGET':
for i in targets:
do_target(i)
elif scope == 'SOURCE':
files = self._guess_files(targets)
for i in files:
do_source(i)
def _cmake_set_target_properties(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/set_target_properties.html
args = list(tline.args)
targets = []
while args:
curr = args.pop(0)
if curr == 'PROPERTIES':
break
targets.append(curr)
# Now we need to try to reconsitute the original quoted format of the
# arguments, as a property value could have spaces in it. Unlike
# set_property() this is not context free. There are two approaches I
# can think of, both have drawbacks:
#
# 1. Assume that the property will be capitalized ([A-Z_]), this is
# convention but cmake doesn't require it.
# 2. Maintain a copy of the list here: https://cmake.org/cmake/help/latest/manual/cmake-properties.7.html#target-properties
#
# Neither of these is awesome for obvious reasons. I'm going to try
# option 1 first and fall back to 2, as 1 requires less code and less
# synchroniztion for cmake changes.
#
# With the JSON output format, introduced in CMake 3.17, spaces are
# handled properly and we don't have to do either options
arglist = [] # type: T.List[T.Tuple[str, T.List[str]]]
if self.trace_format == 'human':
name = args.pop(0)
values = [] # type: T.List[str]
prop_regex = re.compile(r'^[A-Z_]+$')
for a in args:
if prop_regex.match(a):
if values:
arglist.append((name, ' '.join(values).split(';')))
name = a
values = []
else:
values.append(a)
if values:
arglist.append((name, ' '.join(values).split(';')))
else:
arglist = [(x[0], x[1].split(';')) for x in zip(args[::2], args[1::2])]
for name, value in arglist:
for i in targets:
if i not in self.targets:
return self._gen_exception('set_target_properties', f'TARGET {i} not found', tline)
self.targets[i].properties[name] = value
def _cmake_add_dependencies(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_dependencies.html
args = list(tline.args)
if len(args) < 2:
return self._gen_exception('add_dependencies', 'takes at least 2 arguments', tline)
target = self.targets.get(args[0])
if not target:
return self._gen_exception('add_dependencies', 'target not found', tline)
for i in args[1:]:
target.depends += i.split(';')
def _cmake_target_compile_definitions(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_compile_definitions.html
self._parse_common_target_options('target_compile_definitions', 'COMPILE_DEFINITIONS', 'INTERFACE_COMPILE_DEFINITIONS', tline)
def _cmake_target_compile_options(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_compile_options.html
self._parse_common_target_options('target_compile_options', 'COMPILE_OPTIONS', 'INTERFACE_COMPILE_OPTIONS', tline)
def _cmake_target_include_directories(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_include_directories.html
self._parse_common_target_options('target_include_directories', 'INCLUDE_DIRECTORIES', 'INTERFACE_INCLUDE_DIRECTORIES', tline, ignore=['SYSTEM', 'BEFORE'], paths=True)
def _cmake_target_link_options(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_link_options.html
self._parse_common_target_options('target_link_options', 'LINK_OPTIONS', 'INTERFACE_LINK_OPTIONS', tline)
def _cmake_target_link_libraries(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_link_libraries.html
self._parse_common_target_options('target_link_options', 'LINK_LIBRARIES', 'INTERFACE_LINK_LIBRARIES', tline)
def _parse_common_target_options(self, func: str, private_prop: str, interface_prop: str, tline: CMakeTraceLine, ignore: T.Optional[T.List[str]] = None, paths: bool = False) -> None:
if ignore is None:
ignore = ['BEFORE']
args = list(tline.args)
if len(args) < 1:
return self._gen_exception(func, 'requires at least one argument', tline)
target = args[0]
if target not in self.targets:
return self._gen_exception(func, f'TARGET {target} not found', tline)
interface = []
private = []
mode = 'PUBLIC'
for i in args[1:]:
if i in ignore:
continue
if i in ['INTERFACE', 'LINK_INTERFACE_LIBRARIES', 'PUBLIC', 'PRIVATE', 'LINK_PUBLIC', 'LINK_PRIVATE']:
mode = i
continue
if mode in ['INTERFACE', 'LINK_INTERFACE_LIBRARIES', 'PUBLIC', 'LINK_PUBLIC']:
interface += i.split(';')
if mode in ['PUBLIC', 'PRIVATE', 'LINK_PRIVATE']:
private += i.split(';')
if paths:
interface = self._guess_files(interface)
private = self._guess_files(private)
interface = [x for x in interface if x]
private = [x for x in private if x]
for j in [(private_prop, private), (interface_prop, interface)]:
if not j[0] in self.targets[target].properties:
self.targets[target].properties[j[0]] = []
self.targets[target].properties[j[0]] += j[1]
def _meson_ps_execute_delayed_calls(self, tline: CMakeTraceLine) -> None:
for l in self.stored_commands:
fn = self.functions.get(l.func, None)
if fn:
fn(l)
# clear the stored commands
self.stored_commands = []
def _meson_ps_reload_vars(self, tline: CMakeTraceLine) -> None:
self.delayed_commands = self.get_cmake_var('MESON_PS_DELAYED_CALLS')
def _meson_ps_disabled_function(self, tline: CMakeTraceLine) -> None:
args = list(tline.args)
if not args:
mlog.error('Invalid preload.cmake script! At least one argument to `meson_ps_disabled_function` is expected')
return
mlog.warning(f'The CMake function "{args[0]}" was disabled to avoid compatibility issues with Meson.')
def _lex_trace_human(self, trace: str) -> T.Generator[CMakeTraceLine, None, None]:
# The trace format is: '<file>(<line>): <func>(<args -- can contain \n> )\n'
reg_tline = re.compile(r'\s*(.*\.(cmake|txt))\(([0-9]+)\):\s*(\w+)\(([\s\S]*?) ?\)\s*\n', re.MULTILINE)
reg_other = re.compile(r'[^\n]*\n')
loc = 0
while loc < len(trace):
mo_file_line = reg_tline.match(trace, loc)
if not mo_file_line:
skip_match = reg_other.match(trace, loc)
if not skip_match:
print(trace[loc:])
raise CMakeException('Failed to parse CMake trace')
loc = skip_match.end()
continue
loc = mo_file_line.end()
file = mo_file_line.group(1)
line = mo_file_line.group(3)
func = mo_file_line.group(4)
args = mo_file_line.group(5)
args = parse_generator_expressions(args)
argl = args.split(' ')
argl = list(map(lambda x: x.strip(), argl))
yield CMakeTraceLine(file, int(line), func, argl)
def _lex_trace_json(self, trace: str) -> T.Generator[CMakeTraceLine, None, None]:
lines = trace.splitlines(keepends=False)
lines.pop(0) # The first line is the version
for i in lines:
data = json.loads(i)
assert isinstance(data['file'], str)
assert isinstance(data['line'], int)
assert isinstance(data['cmd'], str)
assert isinstance(data['args'], list)
args = data['args']
for j in args:
assert isinstance(j, str)
args = [parse_generator_expressions(x) for x in args]
yield CMakeTraceLine(data['file'], data['line'], data['cmd'], args)
def _flatten_args(self, args: T.List[str]) -> T.List[str]:
# Split lists in arguments
res = [] # type: T.List[str]
for i in args:
res += i.split(';')
return res
def _guess_files(self, broken_list: T.List[str]) -> T.List[str]:
# Nothing has to be done for newer formats
if self.trace_format != 'human':
return broken_list
# Try joining file paths that contain spaces
reg_start = re.compile(r'^([A-Za-z]:)?/(.*/)*[^./]+$')
reg_end = re.compile(r'^.*\.[a-zA-Z]+$')
fixed_list = [] # type: T.List[str]
curr_str = None # type: T.Optional[str]
path_found = False # type: bool
for i in broken_list:
if curr_str is None:
curr_str = i
path_found = False
elif Path(curr_str).is_file():
# Abort concatenation if curr_str is an existing file
fixed_list += [curr_str]
curr_str = i
path_found = False
elif not reg_start.match(curr_str):
# Abort concatenation if curr_str no longer matches the regex
fixed_list += [curr_str]
curr_str = i
path_found = False
elif reg_end.match(i):
# File detected
curr_str = f'{curr_str} {i}'
fixed_list += [curr_str]
curr_str = None
path_found = False
elif Path(f'{curr_str} {i}').exists():
# Path detected
curr_str = f'{curr_str} {i}'
path_found = True
elif path_found:
# Add path to fixed_list after ensuring the whole path is in curr_str
fixed_list += [curr_str]
curr_str = i
path_found = False
else:
curr_str = f'{curr_str} {i}'
path_found = False
if curr_str:
fixed_list += [curr_str]
return fixed_list
| 41.014304
| 186
| 0.590298
|
from .common import CMakeException
from .generator import parse_generator_expressions
from .. import mlog
from ..mesonlib import version_compare
import typing as T
from pathlib import Path
from functools import lru_cache
import re
import json
import textwrap
class CMakeTraceLine:
def __init__(self, file_str: str, line: int, func: str, args: T.List[str]) -> None:
self.file = CMakeTraceLine._to_path(file_str)
self.line = line
self.func = func.lower()
self.args = args
@staticmethod
@lru_cache(maxsize=None)
def _to_path(file_str: str) -> Path:
return Path(file_str)
def __repr__(self) -> str:
s = 'CMake TRACE: {0}:{1} {2}({3})'
return s.format(self.file, self.line, self.func, self.args)
class CMakeCacheEntry(T.NamedTuple):
value: T.List[str]
type: str
class CMakeTarget:
def __init__(
self,
name: str,
target_type: str,
properties: T.Optional[T.Dict[str, T.List[str]]] = None,
imported: bool = False,
tline: T.Optional[CMakeTraceLine] = None
):
if properties is None:
properties = {}
self.name = name
self.type = target_type
self.properties = properties
self.imported = imported
self.tline = tline
self.depends = []
self.current_bin_dir = None
self.current_src_dir = None
def __repr__(self) -> str:
s = 'CMake TARGET:\n -- name: {}\n -- type: {}\n -- imported: {}\n -- properties: {{\n{} }}\n -- tline: {}'
propSTR = ''
for i in self.properties:
propSTR += " '{}': {}\n".format(i, self.properties[i])
return s.format(self.name, self.type, self.imported, propSTR, self.tline)
def strip_properties(self) -> None:
if not self.properties:
return
for key, val in self.properties.items():
self.properties[key] = [x.strip() for x in val]
assert all([';' not in x for x in self.properties[key]])
class CMakeGeneratorTarget(CMakeTarget):
def __init__(self, name: str) -> None:
super().__init__(name, 'CUSTOM', {})
self.outputs = []
self.command = []
self.working_dir = None
class CMakeTraceParser:
def __init__(self, cmake_version: str, build_dir: Path, permissive: bool = True) -> None:
self.vars: T.Dict[str, T.List[str]] = {}
self.vars_by_file: T.Dict[Path, T.Dict[str, T.List[str]]] = {}
self.targets: T.Dict[str, CMakeTarget] = {}
self.cache: T.Dict[str, CMakeCacheEntry] = {}
self.explicit_headers = set()
self.custom_targets = []
self.permissive = permissive
self.cmake_version = cmake_version
self.trace_file = 'cmake_trace.txt'
self.trace_file_path = build_dir / self.trace_file
self.trace_format = 'json-v1' if version_compare(cmake_version, '>=3.17') else 'human'
self.delayed_commands = []
self.stored_commands = []
self.functions = {
'set': self._cmake_set,
'unset': self._cmake_unset,
'add_executable': self._cmake_add_executable,
'add_library': self._cmake_add_library,
'add_custom_command': self._cmake_add_custom_command,
'add_custom_target': self._cmake_add_custom_target,
'set_property': self._cmake_set_property,
'set_target_properties': self._cmake_set_target_properties,
'target_compile_definitions': self._cmake_target_compile_definitions,
'target_compile_options': self._cmake_target_compile_options,
'target_include_directories': self._cmake_target_include_directories,
'target_link_libraries': self._cmake_target_link_libraries,
'target_link_options': self._cmake_target_link_options,
'add_dependencies': self._cmake_add_dependencies,
'meson_ps_execute_delayed_calls': self._meson_ps_execute_delayed_calls,
'meson_ps_reload_vars': self._meson_ps_reload_vars,
'meson_ps_disabled_function': self._meson_ps_disabled_function,
}
if version_compare(self.cmake_version, '<3.17.0'):
mlog.deprecation(textwrap.dedent(f'''\
CMake support for versions <3.17 is deprecated since Meson 0.62.0.
|
| However, Meson was only able to find CMake {self.cmake_version}.
|
| Support for all CMake versions below 3.17.0 will be removed once
| newer CMake versions are more widely adopted. If you encounter
| any errors please try upgrading CMake to a newer version first.
'''), once=True)
def trace_args(self) -> T.List[str]:
arg_map = {
'human': ['--trace', '--trace-expand'],
'json-v1': ['--trace-expand', '--trace-format=json-v1'],
}
base_args = ['--no-warn-unused-cli']
if not self.requires_stderr():
base_args += [f'--trace-redirect={self.trace_file}']
return arg_map[self.trace_format] + base_args
def requires_stderr(self) -> bool:
return version_compare(self.cmake_version, '<3.16')
def parse(self, trace: T.Optional[str] = None) -> None:
if not self.requires_stderr():
if not self.trace_file_path.exists and not self.trace_file_path.is_file():
raise CMakeException(f'CMake: Trace file "{self.trace_file_path!s}" not found')
trace = self.trace_file_path.read_text(errors='ignore', encoding='utf-8')
if not trace:
raise CMakeException('CMake: The CMake trace was not provided or is empty')
lexer1 = None
if self.trace_format == 'human':
lexer1 = self._lex_trace_human(trace)
elif self.trace_format == 'json-v1':
lexer1 = self._lex_trace_json(trace)
else:
raise CMakeException(f'CMake: Internal error: Invalid trace format {self.trace_format}. Expected [human, json-v1]')
for l in lexer1:
if l.func in self.delayed_commands:
self.stored_commands += [l]
continue
fn = self.functions.get(l.func, None)
if fn:
fn(l)
for tgt in self.targets.values():
tgt.strip_properties()
def get_first_cmake_var_of(self, var_list: T.List[str]) -> T.List[str]:
for i in var_list:
if i in self.vars:
return self.vars[i]
return []
def get_cmake_var(self, var: str) -> T.List[str]:
if var in self.vars:
return self.vars[var]
return []
def var_to_str(self, var: str) -> T.Optional[str]:
if var in self.vars and self.vars[var]:
return self.vars[var][0]
return None
def _str_to_bool(self, expr: T.Union[str, T.List[str]]) -> bool:
if not expr:
return False
if isinstance(expr, list):
expr_str = expr[0]
else:
expr_str = expr
expr_str = expr_str.upper()
return expr_str not in ['0', 'OFF', 'NO', 'FALSE', 'N', 'IGNORE'] and not expr_str.endswith('NOTFOUND')
def var_to_bool(self, var: str) -> bool:
return self._str_to_bool(self.vars.get(var, []))
def _gen_exception(self, function: str, error: str, tline: CMakeTraceLine) -> None:
if self.permissive:
mlog.debug(f'CMake trace warning: {function}() {error}\n{tline}')
return None
raise CMakeException(f'CMake: {function}() {error}\n{tline}')
def _cmake_set(self, tline: CMakeTraceLine) -> None:
cache_type = None
cache_force = 'FORCE' in tline.args
try:
cache_idx = tline.args.index('CACHE')
cache_type = tline.args[cache_idx + 1]
except (ValueError, IndexError):
pass
args = []
for i in tline.args:
if not i or i == 'PARENT_SCOPE':
continue
if i == 'CACHE':
break
args.append(i)
if len(args) < 1:
return self._gen_exception('set', 'requires at least one argument', tline)
# variable identifier and the value, join the value back together to
# ensure spaces in the value are correctly handled. This assumes that
# variable names don't have spaces. Please don't do that...
identifier = args.pop(0)
value = ' '.join(args)
# Write to the CMake cache instead
if cache_type:
# Honor how the CMake FORCE parameter works
if identifier not in self.cache or cache_force:
self.cache[identifier] = CMakeCacheEntry(value.split(';'), cache_type)
if not value:
# Same as unset
if identifier in self.vars:
del self.vars[identifier]
else:
self.vars[identifier] = value.split(';')
self.vars_by_file.setdefault(tline.file, {})[identifier] = value.split(';')
def _cmake_unset(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/unset.html
if len(tline.args) < 1:
return self._gen_exception('unset', 'requires at least one argument', tline)
if tline.args[0] in self.vars:
del self.vars[tline.args[0]]
def _cmake_add_executable(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_executable.html
args = list(tline.args) # Make a working copy
# Make sure the exe is imported
is_imported = True
if 'IMPORTED' not in args:
return self._gen_exception('add_executable', 'non imported executables are not supported', tline)
args.remove('IMPORTED')
if len(args) < 1:
return self._gen_exception('add_executable', 'requires at least 1 argument', tline)
self.targets[args[0]] = CMakeTarget(args[0], 'EXECUTABLE', {}, tline=tline, imported=is_imported)
def _cmake_add_library(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_library.html
args = list(tline.args) # Make a working copy
# Make sure the lib is imported
if 'INTERFACE' in args:
args.remove('INTERFACE')
if len(args) < 1:
return self._gen_exception('add_library', 'interface library name not specified', tline)
self.targets[args[0]] = CMakeTarget(args[0], 'INTERFACE', {}, tline=tline, imported='IMPORTED' in args)
elif 'IMPORTED' in args:
args.remove('IMPORTED')
# Now, only look at the first two arguments (target_name and target_type) and ignore the rest
if len(args) < 2:
return self._gen_exception('add_library', 'requires at least 2 arguments', tline)
self.targets[args[0]] = CMakeTarget(args[0], args[1], {}, tline=tline, imported=True)
elif 'ALIAS' in args:
args.remove('ALIAS')
# Now, only look at the first two arguments (target_name and target_ref) and ignore the rest
if len(args) < 2:
return self._gen_exception('add_library', 'requires at least 2 arguments', tline)
# Simulate the ALIAS with INTERFACE_LINK_LIBRARIES
self.targets[args[0]] = CMakeTarget(args[0], 'ALIAS', {'INTERFACE_LINK_LIBRARIES': [args[1]]}, tline=tline)
elif 'OBJECT' in args:
return self._gen_exception('add_library', 'OBJECT libraries are not supported', tline)
else:
self.targets[args[0]] = CMakeTarget(args[0], 'NORMAL', {}, tline=tline)
def _cmake_add_custom_command(self, tline: CMakeTraceLine, name: T.Optional[str] = None) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_custom_command.html
args = self._flatten_args(list(tline.args)) # Commands can be passed as ';' separated lists
if not args:
return self._gen_exception('add_custom_command', 'requires at least 1 argument', tline)
# Skip the second function signature
if args[0] == 'TARGET':
return self._gen_exception('add_custom_command', 'TARGET syntax is currently not supported', tline)
magic_keys = ['OUTPUT', 'COMMAND', 'MAIN_DEPENDENCY', 'DEPENDS', 'BYPRODUCTS',
'IMPLICIT_DEPENDS', 'WORKING_DIRECTORY', 'COMMENT', 'DEPFILE',
'JOB_POOL', 'VERBATIM', 'APPEND', 'USES_TERMINAL', 'COMMAND_EXPAND_LISTS']
target = CMakeGeneratorTarget(name)
def handle_output(key: str, target: CMakeGeneratorTarget) -> None:
target.outputs += [Path(key)]
def handle_command(key: str, target: CMakeGeneratorTarget) -> None:
if key == 'ARGS':
return
target.command[-1] += [key]
def handle_depends(key: str, target: CMakeGeneratorTarget) -> None:
target.depends += [key]
working_dir = None
def handle_working_dir(key: str, target: CMakeGeneratorTarget) -> None:
nonlocal working_dir
if working_dir is None:
working_dir = key
else:
working_dir += ' '
working_dir += key
fn = None
for i in args:
if i in magic_keys:
if i == 'OUTPUT':
fn = handle_output
elif i == 'DEPENDS':
fn = handle_depends
elif i == 'WORKING_DIRECTORY':
fn = handle_working_dir
elif i == 'COMMAND':
fn = handle_command
target.command += [[]]
else:
fn = None
continue
if fn is not None:
fn(i, target)
cbinary_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_BINARY_DIR')
csource_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_SOURCE_DIR')
target.working_dir = Path(working_dir) if working_dir else None
target.current_bin_dir = Path(cbinary_dir) if cbinary_dir else None
target.current_src_dir = Path(csource_dir) if csource_dir else None
target.outputs = [Path(x) for x in self._guess_files([str(y) for y in target.outputs])]
target.depends = self._guess_files(target.depends)
target.command = [self._guess_files(x) for x in target.command]
self.custom_targets += [target]
if name:
self.targets[name] = target
def _cmake_add_custom_target(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_custom_target.html
# We only the first parameter (the target name) is interesting
if len(tline.args) < 1:
return self._gen_exception('add_custom_target', 'requires at least one argument', tline)
# It's pretty much the same as a custom command
self._cmake_add_custom_command(tline, tline.args[0])
def _cmake_set_property(self, tline: CMakeTraceLine) -> None:
args = list(tline.args)
scope = args.pop(0)
append = False
targets = []
while args:
curr = args.pop(0)
if curr == 'APPEND' or curr == 'APPEND_STRING':
append = True
continue
if curr == 'PROPERTY':
break
targets += curr.split(';')
if not args:
return self._gen_exception('set_property', 'faild to parse argument list', tline)
if len(args) == 1:
return
identifier = args.pop(0)
if self.trace_format == 'human':
value = ' '.join(args).split(';')
else:
value = [y for x in args for y in x.split(';')]
if not value:
return
def do_target(t: str) -> None:
if t not in self.targets:
return self._gen_exception('set_property', f'TARGET {t} not found', tline)
tgt = self.targets[t]
if identifier not in tgt.properties:
tgt.properties[identifier] = []
if append:
tgt.properties[identifier] += value
else:
tgt.properties[identifier] = value
def do_source(src: str) -> None:
if identifier != 'HEADER_FILE_ONLY' or not self._str_to_bool(value):
return
current_src_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_SOURCE_DIR')
if not current_src_dir:
mlog.warning(textwrap.dedent('''\
CMake trace: set_property(SOURCE) called before the preload script was loaded.
Unable to determine CMAKE_CURRENT_SOURCE_DIR. This can lead to build errors.
'''))
current_src_dir = '.'
cur_p = Path(current_src_dir)
src_p = Path(src)
if not src_p.is_absolute():
src_p = cur_p / src_p
self.explicit_headers.add(src_p)
if scope == 'TARGET':
for i in targets:
do_target(i)
elif scope == 'SOURCE':
files = self._guess_files(targets)
for i in files:
do_source(i)
def _cmake_set_target_properties(self, tline: CMakeTraceLine) -> None:
args = list(tline.args)
targets = []
while args:
curr = args.pop(0)
if curr == 'PROPERTIES':
break
targets.append(curr)
# 2. Maintain a copy of the list here: https://cmake.org/cmake/help/latest/manual/cmake-properties.7.html#target-properties
#
# Neither of these is awesome for obvious reasons. I'm going to try
arglist = [] # type: T.List[T.Tuple[str, T.List[str]]]
if self.trace_format == 'human':
name = args.pop(0)
values = [] # type: T.List[str]
prop_regex = re.compile(r'^[A-Z_]+$')
for a in args:
if prop_regex.match(a):
if values:
arglist.append((name, ' '.join(values).split(';')))
name = a
values = []
else:
values.append(a)
if values:
arglist.append((name, ' '.join(values).split(';')))
else:
arglist = [(x[0], x[1].split(';')) for x in zip(args[::2], args[1::2])]
for name, value in arglist:
for i in targets:
if i not in self.targets:
return self._gen_exception('set_target_properties', f'TARGET {i} not found', tline)
self.targets[i].properties[name] = value
def _cmake_add_dependencies(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_dependencies.html
args = list(tline.args)
if len(args) < 2:
return self._gen_exception('add_dependencies', 'takes at least 2 arguments', tline)
target = self.targets.get(args[0])
if not target:
return self._gen_exception('add_dependencies', 'target not found', tline)
for i in args[1:]:
target.depends += i.split(';')
def _cmake_target_compile_definitions(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_compile_definitions.html
self._parse_common_target_options('target_compile_definitions', 'COMPILE_DEFINITIONS', 'INTERFACE_COMPILE_DEFINITIONS', tline)
def _cmake_target_compile_options(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_compile_options.html
self._parse_common_target_options('target_compile_options', 'COMPILE_OPTIONS', 'INTERFACE_COMPILE_OPTIONS', tline)
def _cmake_target_include_directories(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_include_directories.html
self._parse_common_target_options('target_include_directories', 'INCLUDE_DIRECTORIES', 'INTERFACE_INCLUDE_DIRECTORIES', tline, ignore=['SYSTEM', 'BEFORE'], paths=True)
def _cmake_target_link_options(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_link_options.html
self._parse_common_target_options('target_link_options', 'LINK_OPTIONS', 'INTERFACE_LINK_OPTIONS', tline)
def _cmake_target_link_libraries(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_link_libraries.html
self._parse_common_target_options('target_link_options', 'LINK_LIBRARIES', 'INTERFACE_LINK_LIBRARIES', tline)
def _parse_common_target_options(self, func: str, private_prop: str, interface_prop: str, tline: CMakeTraceLine, ignore: T.Optional[T.List[str]] = None, paths: bool = False) -> None:
if ignore is None:
ignore = ['BEFORE']
args = list(tline.args)
if len(args) < 1:
return self._gen_exception(func, 'requires at least one argument', tline)
target = args[0]
if target not in self.targets:
return self._gen_exception(func, f'TARGET {target} not found', tline)
interface = []
private = []
mode = 'PUBLIC'
for i in args[1:]:
if i in ignore:
continue
if i in ['INTERFACE', 'LINK_INTERFACE_LIBRARIES', 'PUBLIC', 'PRIVATE', 'LINK_PUBLIC', 'LINK_PRIVATE']:
mode = i
continue
if mode in ['INTERFACE', 'LINK_INTERFACE_LIBRARIES', 'PUBLIC', 'LINK_PUBLIC']:
interface += i.split(';')
if mode in ['PUBLIC', 'PRIVATE', 'LINK_PRIVATE']:
private += i.split(';')
if paths:
interface = self._guess_files(interface)
private = self._guess_files(private)
interface = [x for x in interface if x]
private = [x for x in private if x]
for j in [(private_prop, private), (interface_prop, interface)]:
if not j[0] in self.targets[target].properties:
self.targets[target].properties[j[0]] = []
self.targets[target].properties[j[0]] += j[1]
def _meson_ps_execute_delayed_calls(self, tline: CMakeTraceLine) -> None:
for l in self.stored_commands:
fn = self.functions.get(l.func, None)
if fn:
fn(l)
# clear the stored commands
self.stored_commands = []
def _meson_ps_reload_vars(self, tline: CMakeTraceLine) -> None:
self.delayed_commands = self.get_cmake_var('MESON_PS_DELAYED_CALLS')
def _meson_ps_disabled_function(self, tline: CMakeTraceLine) -> None:
args = list(tline.args)
if not args:
mlog.error('Invalid preload.cmake script! At least one argument to `meson_ps_disabled_function` is expected')
return
mlog.warning(f'The CMake function "{args[0]}" was disabled to avoid compatibility issues with Meson.')
def _lex_trace_human(self, trace: str) -> T.Generator[CMakeTraceLine, None, None]:
# The trace format is: '<file>(<line>): <func>(<args -- can contain \n> )\n'
reg_tline = re.compile(r'\s*(.*\.(cmake|txt))\(([0-9]+)\):\s*(\w+)\(([\s\S]*?) ?\)\s*\n', re.MULTILINE)
reg_other = re.compile(r'[^\n]*\n')
loc = 0
while loc < len(trace):
mo_file_line = reg_tline.match(trace, loc)
if not mo_file_line:
skip_match = reg_other.match(trace, loc)
if not skip_match:
print(trace[loc:])
raise CMakeException('Failed to parse CMake trace')
loc = skip_match.end()
continue
loc = mo_file_line.end()
file = mo_file_line.group(1)
line = mo_file_line.group(3)
func = mo_file_line.group(4)
args = mo_file_line.group(5)
args = parse_generator_expressions(args)
argl = args.split(' ')
argl = list(map(lambda x: x.strip(), argl))
yield CMakeTraceLine(file, int(line), func, argl)
def _lex_trace_json(self, trace: str) -> T.Generator[CMakeTraceLine, None, None]:
lines = trace.splitlines(keepends=False)
lines.pop(0) # The first line is the version
for i in lines:
data = json.loads(i)
assert isinstance(data['file'], str)
assert isinstance(data['line'], int)
assert isinstance(data['cmd'], str)
assert isinstance(data['args'], list)
args = data['args']
for j in args:
assert isinstance(j, str)
args = [parse_generator_expressions(x) for x in args]
yield CMakeTraceLine(data['file'], data['line'], data['cmd'], args)
def _flatten_args(self, args: T.List[str]) -> T.List[str]:
# Split lists in arguments
res = [] # type: T.List[str]
for i in args:
res += i.split(';')
return res
def _guess_files(self, broken_list: T.List[str]) -> T.List[str]:
# Nothing has to be done for newer formats
if self.trace_format != 'human':
return broken_list
# Try joining file paths that contain spaces
reg_start = re.compile(r'^([A-Za-z]:)?/(.*/)*[^./]+$')
reg_end = re.compile(r'^.*\.[a-zA-Z]+$')
fixed_list = [] # type: T.List[str]
curr_str = None # type: T.Optional[str]
path_found = False # type: bool
for i in broken_list:
if curr_str is None:
curr_str = i
path_found = False
elif Path(curr_str).is_file():
# Abort concatenation if curr_str is an existing file
fixed_list += [curr_str]
curr_str = i
path_found = False
elif not reg_start.match(curr_str):
# Abort concatenation if curr_str no longer matches the regex
fixed_list += [curr_str]
curr_str = i
path_found = False
elif reg_end.match(i):
# File detected
curr_str = f'{curr_str} {i}'
fixed_list += [curr_str]
curr_str = None
path_found = False
elif Path(f'{curr_str} {i}').exists():
# Path detected
curr_str = f'{curr_str} {i}'
path_found = True
elif path_found:
# Add path to fixed_list after ensuring the whole path is in curr_str
fixed_list += [curr_str]
curr_str = i
path_found = False
else:
curr_str = f'{curr_str} {i}'
path_found = False
if curr_str:
fixed_list += [curr_str]
return fixed_list
| true
| true
|
7909e1c565267d6d7277ce4981ac6e27c41bf18b
| 676
|
py
|
Python
|
build/scripts-3.9/django-admin.py
|
HelloAny/nwalgo
|
e02493da60da4f2c33b2bfa77e0441ef66b7b5ad
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null |
build/scripts-3.9/django-admin.py
|
HelloAny/nwalgo
|
e02493da60da4f2c33b2bfa77e0441ef66b7b5ad
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null |
build/scripts-3.9/django-admin.py
|
HelloAny/nwalgo
|
e02493da60da4f2c33b2bfa77e0441ef66b7b5ad
|
[
"CNRI-Python-GPL-Compatible",
"BSD-3-Clause"
] | null | null | null |
#!/usr/local/opt/python@3.9/bin/python3.9
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| 30.727273
| 80
| 0.720414
|
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| true
| true
|
7909e27cfe0db281cd4aefe7fb96cde4b0521937
| 623
|
py
|
Python
|
Lambda functions/LF0.py
|
nikhilkhaneja/Dining-Concierge-using-AWS-1
|
1b60a462cfb470a58abd0c2eeaa6c1b5f47a8b26
|
[
"MIT"
] | null | null | null |
Lambda functions/LF0.py
|
nikhilkhaneja/Dining-Concierge-using-AWS-1
|
1b60a462cfb470a58abd0c2eeaa6c1b5f47a8b26
|
[
"MIT"
] | null | null | null |
Lambda functions/LF0.py
|
nikhilkhaneja/Dining-Concierge-using-AWS-1
|
1b60a462cfb470a58abd0c2eeaa6c1b5f47a8b26
|
[
"MIT"
] | 1
|
2021-10-09T20:08:50.000Z
|
2021-10-09T20:08:50.000Z
|
import json
import boto3
def lambda_handler(event, context):
print(event)
lex = boto3.client('lex-runtime')
lex_resp = lex.post_text(
botName = 'dining_concierge_bot',
botAlias = 'Test',
userId = 'user01',
inputText = event['messages'][0]['unstructured']['text'],
activeContexts=[]
)
response = {
"messages":
[
{"type": "unstructured",
"unstructured":
{
"text": lex_resp['message']
}
}
]
}
return response
| 23.961538
| 65
| 0.457464
|
import json
import boto3
def lambda_handler(event, context):
print(event)
lex = boto3.client('lex-runtime')
lex_resp = lex.post_text(
botName = 'dining_concierge_bot',
botAlias = 'Test',
userId = 'user01',
inputText = event['messages'][0]['unstructured']['text'],
activeContexts=[]
)
response = {
"messages":
[
{"type": "unstructured",
"unstructured":
{
"text": lex_resp['message']
}
}
]
}
return response
| true
| true
|
7909e384bcde82f9031aaba69932e02ed3690af6
| 5,331
|
py
|
Python
|
Lab07/lab07/lab07_stack.py
|
stevensu1977/aws-cdk-handson
|
39aecfb516b0c6e65e36cf794ab85c3da2ac6276
|
[
"Apache-2.0"
] | null | null | null |
Lab07/lab07/lab07_stack.py
|
stevensu1977/aws-cdk-handson
|
39aecfb516b0c6e65e36cf794ab85c3da2ac6276
|
[
"Apache-2.0"
] | 2
|
2021-05-11T23:51:20.000Z
|
2021-05-11T23:52:57.000Z
|
Lab07/lab07/lab07_stack.py
|
stevensu1977/aws-cdk-handson
|
39aecfb516b0c6e65e36cf794ab85c3da2ac6276
|
[
"Apache-2.0"
] | 1
|
2019-08-03T12:56:31.000Z
|
2019-08-03T12:56:31.000Z
|
from aws_cdk import (
core,
aws_iam as iam,
aws_kinesis as kinesis,
aws_kinesisfirehose as kinesisfirehose
)
class Lab07Stack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# The code that defines your stack goes here
role01 = iam.CfnRole(self,id="firehose01_role",assume_role_policy_document= {
"Statement": [{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
}
}],
"Version": "2012-10-17"
},managed_policy_arns=[
"arn:aws:iam::aws:policy/service-role/AWSLambdaKinesisExecutionRole"
])
policy01=iam.CfnPolicy(self,id="firehose01_policy",policy_name="firehose01_policy",policy_document={
'Version': "2012-10-17",
'Statement': [
{
"Action": [
's3:AbortMultipartUpload',
's3:GetBucketLocation',
's3:GetObject',
's3:ListBucket',
's3:ListBucketMultipartUploads',
's3:PutObject'
],
"Resource": ['*'],
"Effect": "Allow"
}
]
},roles=[role01.ref])
delivery_stream = kinesisfirehose.CfnDeliveryStream(self, id = "firehose01",
delivery_stream_name = "firehose01",
extended_s3_destination_configuration = {
# s3桶信息
'bucketArn': 'arn:aws:s3:::fluent-bit-s3',
# 压缩设置,老方案:gzip,新方案待定
'compressionFormat': 'GZIP',
# 格式转换,是否转换为orc,parquet,默认无
'DataFormatConversionConfiguration':"Disabled",
# 是否加密:默认无
'EncryptionConfiguration':"NoEncryption",
# 错误输出前缀
'bufferingHints': {
'intervalInSeconds': 600,
'sizeInMBs': 128
},
'ProcessingConfiguration': {
"Enabled": True,
"Processor": {
"Type": "Lambda",
"Parameters": [
{
"ParameterName": "BufferIntervalInSeconds",
"ParameterValue": "60"
},
{
"ParameterName": "BufferSizeInMBs",
"ParameterValue": "3"
},
{
"ParameterName": "LambdaArn",
"ParameterValue": "arn:aws:lambda:ap-southeast-1:596030579944:function:firehose-test"
}
]
}
},
'roleArn': 'arn:aws:iam::596030579944:role/avalon_lambda_kinesis_role',
'S3BackupConfiguration': {
"BucketARN": 'arn:aws:s3:::fluent-bit-s3',
'bufferingHints': {
'intervalInSeconds': 600,
'sizeInMBs': 128
},
'compressionFormat': 'GZIP',
'EncryptionConfiguration':"NoEncryption",
'Prefix': "/backup",
'roleArn': 'arn:aws:iam::596030579944:role/avalon_lambda_kinesis_role'
}
},
)
| 49.82243
| 149
| 0.298068
|
from aws_cdk import (
core,
aws_iam as iam,
aws_kinesis as kinesis,
aws_kinesisfirehose as kinesisfirehose
)
class Lab07Stack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
role01 = iam.CfnRole(self,id="firehose01_role",assume_role_policy_document= {
"Statement": [{
"Action": "sts:AssumeRole",
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
}
}],
"Version": "2012-10-17"
},managed_policy_arns=[
"arn:aws:iam::aws:policy/service-role/AWSLambdaKinesisExecutionRole"
])
policy01=iam.CfnPolicy(self,id="firehose01_policy",policy_name="firehose01_policy",policy_document={
'Version': "2012-10-17",
'Statement': [
{
"Action": [
's3:AbortMultipartUpload',
's3:GetBucketLocation',
's3:GetObject',
's3:ListBucket',
's3:ListBucketMultipartUploads',
's3:PutObject'
],
"Resource": ['*'],
"Effect": "Allow"
}
]
},roles=[role01.ref])
delivery_stream = kinesisfirehose.CfnDeliveryStream(self, id = "firehose01",
delivery_stream_name = "firehose01",
extended_s3_destination_configuration = {
'bucketArn': 'arn:aws:s3:::fluent-bit-s3',
'compressionFormat': 'GZIP',
'DataFormatConversionConfiguration':"Disabled",
'EncryptionConfiguration':"NoEncryption",
'bufferingHints': {
'intervalInSeconds': 600,
'sizeInMBs': 128
},
'ProcessingConfiguration': {
"Enabled": True,
"Processor": {
"Type": "Lambda",
"Parameters": [
{
"ParameterName": "BufferIntervalInSeconds",
"ParameterValue": "60"
},
{
"ParameterName": "BufferSizeInMBs",
"ParameterValue": "3"
},
{
"ParameterName": "LambdaArn",
"ParameterValue": "arn:aws:lambda:ap-southeast-1:596030579944:function:firehose-test"
}
]
}
},
'roleArn': 'arn:aws:iam::596030579944:role/avalon_lambda_kinesis_role',
'S3BackupConfiguration': {
"BucketARN": 'arn:aws:s3:::fluent-bit-s3',
'bufferingHints': {
'intervalInSeconds': 600,
'sizeInMBs': 128
},
'compressionFormat': 'GZIP',
'EncryptionConfiguration':"NoEncryption",
'Prefix': "/backup",
'roleArn': 'arn:aws:iam::596030579944:role/avalon_lambda_kinesis_role'
}
},
)
| true
| true
|
7909e39c0d39b1347ed8fb0fa30c3d2324d0f0a2
| 496
|
py
|
Python
|
officialWebsite/members/migrations/0002_member_team.py
|
paras55/officialWebsite
|
12210e995a5d236023d082a123cfa71e2f2855ce
|
[
"MIT"
] | null | null | null |
officialWebsite/members/migrations/0002_member_team.py
|
paras55/officialWebsite
|
12210e995a5d236023d082a123cfa71e2f2855ce
|
[
"MIT"
] | null | null | null |
officialWebsite/members/migrations/0002_member_team.py
|
paras55/officialWebsite
|
12210e995a5d236023d082a123cfa71e2f2855ce
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0 on 2019-12-12 08:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('team', '0002_auto_20191210_2330'),
('members', '0001_initial'),
]
operations = [
migrations.AddField(model_name='member', name='team', field=models.ForeignKey(blank=True, default='', on_delete=django.db.models.deletion.CASCADE, to='team.Team'), preserve_default=False,),
]
| 29.176471
| 197
| 0.693548
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('team', '0002_auto_20191210_2330'),
('members', '0001_initial'),
]
operations = [
migrations.AddField(model_name='member', name='team', field=models.ForeignKey(blank=True, default='', on_delete=django.db.models.deletion.CASCADE, to='team.Team'), preserve_default=False,),
]
| true
| true
|
7909e3d677e3ada13ac205864112f6a60c7ba440
| 11,474
|
py
|
Python
|
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
wileeam/airflow
|
f46be8152a4d89c57db4ca46f5b3339e4876b723
|
[
"Apache-2.0"
] | 1
|
2021-11-08T12:26:34.000Z
|
2021-11-08T12:26:34.000Z
|
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
devlocalca/airflow
|
58c3542ed25061320ce61dbe0adf451a44c738dd
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/google/cloud/operators/sql_to_gcs.py
|
devlocalca/airflow
|
58c3542ed25061320ce61dbe0adf451a44c738dd
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Base operator for SQL to GCS operators.
"""
import abc
import json
import warnings
from tempfile import NamedTemporaryFile
import unicodecsv as csv
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
class BaseSQLToGCSOperator(BaseOperator, metaclass=abc.ABCMeta):
"""
:param sql: The SQL to execute.
:type sql: str
:param bucket: The bucket to upload to.
:type bucket: str
:param filename: The filename to use as the object name when uploading
to Google Cloud Storage. A {} should be specified in the filename
to allow the operator to inject file numbers in cases where the
file is split due to size.
:type filename: str
:param schema_filename: If set, the filename to use as the object name
when uploading a .json file containing the BigQuery schema fields
for the table that was dumped from the database.
:type schema_filename: str
:param approx_max_file_size_bytes: This operator supports the ability
to split large table dumps into multiple files (see notes in the
filename param docs above). This param allows developers to specify the
file size of the splits. Check https://cloud.google.com/storage/quotas
to see the maximum allowed file size for a single object.
:type approx_max_file_size_bytes: long
:param export_format: Desired format of files to be exported.
:type export_format: str
:param field_delimiter: The delimiter to be used for CSV files.
:type field_delimiter: str
:param gzip: Option to compress file for upload (does not apply to schemas).
:type gzip: bool
:param schema: The schema to use, if any. Should be a list of dict or
a str. Pass a string if using Jinja template, otherwise, pass a list of
dict. Examples could be seen: https://cloud.google.com/bigquery/docs
/schemas#specifying_a_json_schema_file
:type schema: str or list
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud
Platform. This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any. For this to
work, the service account making the request must have domain-wide
delegation enabled.
:param parameters: a parameters dict that is substituted at query runtime.
:type parameters: dict
"""
template_fields = ('sql', 'bucket', 'filename', 'schema_filename', 'schema', 'parameters')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
sql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
export_format='json',
field_delimiter=',',
gzip=False,
schema=None,
parameters=None,
gcp_conn_id='google_cloud_default',
google_cloud_storage_conn_id=None,
delegate_to=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = google_cloud_storage_conn_id
self.sql = sql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.export_format = export_format.lower()
self.field_delimiter = field_delimiter
self.gzip = gzip
self.schema = schema
self.parameters = parameters
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.parameters = parameters
def execute(self, context):
cursor = self.query()
files_to_upload = self._write_local_data_files(cursor)
# If a schema is set, create a BQ schema JSON file.
if self.schema_filename:
files_to_upload.append(self._write_local_schema_file(cursor))
# Flush all files before uploading
for tmp_file in files_to_upload:
tmp_file['file_handle'].flush()
self._upload_to_gcs(files_to_upload)
# Close all temp file handles.
for tmp_file in files_to_upload:
tmp_file['file_handle'].close()
def convert_types(self, schema, col_type_dict, row):
"""Convert values from DBAPI to output-friendly formats."""
return [
self.convert_type(value, col_type_dict.get(name))
for name, value in zip(schema, row)
]
def _write_local_data_files(self, cursor):
"""
Takes a cursor, and writes results to a local file.
:return: A dictionary where keys are filenames to be used as object
names in GCS, and values are file handles to local files that
contain the data for the GCS objects.
"""
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
if self.export_format == 'csv':
file_mime_type = 'text/csv'
else:
file_mime_type = 'application/json'
files_to_upload = [{
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
}]
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
for row in cursor:
# Convert datetime objects to utc seconds, and decimals to floats.
# Convert binary type object to string encoded with base64.
row = self.convert_types(schema, col_type_dict, row)
if self.export_format == 'csv':
csv_writer.writerow(row)
else:
row_dict = dict(zip(schema, row))
# TODO validate that row isn't > 2MB. BQ enforces a hard row size of 2MB.
tmp_file_handle.write(json.dumps(row_dict, sort_keys=True).encode('utf-8'))
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
files_to_upload.append({
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
})
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
return files_to_upload
def _configure_csv_file(self, file_handle, schema):
"""Configure a csv writer with the file_handle and write schema
as headers for the new file.
"""
csv_writer = csv.writer(file_handle, encoding='utf-8',
delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
@abc.abstractmethod
def query(self):
"""Execute DBAPI query."""
@abc.abstractmethod
def field_to_bigquery(self, field):
"""Convert a DBAPI field to BigQuery schema format."""
@abc.abstractmethod
def convert_type(self, value, schema_type):
"""Convert a value from DBAPI to output-friendly formats."""
def _get_col_type_dict(self):
"""
Return a dict of column name and column type based on self.schema if not None.
"""
schema = []
if isinstance(self.schema, str):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif self.schema is not None:
self.log.warning('Using default schema due to unexpected type.'
'Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warning('Using default schema due to missing name or type. Please '
'refer to: https://cloud.google.com/bigquery/docs/schemas'
'#specifying_a_json_schema_file')
return col_type_dict
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = [self.field_to_bigquery(field) for field in cursor.description]
self.log.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
tmp_schema_file_handle.write(json.dumps(schema, sort_keys=True).encode('utf-8'))
schema_file_to_upload = {
'file_name': self.schema_filename,
'file_handle': tmp_schema_file_handle,
'file_mime_type': 'application/json',
}
return schema_file_to_upload
def _upload_to_gcs(self, files_to_upload):
"""
Upload all of the file splits (and optionally the schema .json file) to
Google Cloud Storage.
"""
hook = GCSHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
for tmp_file in files_to_upload:
hook.upload(self.bucket, tmp_file.get('file_name'),
tmp_file.get('file_handle').name,
mime_type=tmp_file.get('file_mime_type'),
gzip=self.gzip if tmp_file.get('file_name') == self.schema_filename else False)
| 41.273381
| 104
| 0.643891
|
import abc
import json
import warnings
from tempfile import NamedTemporaryFile
import unicodecsv as csv
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
class BaseSQLToGCSOperator(BaseOperator, metaclass=abc.ABCMeta):
template_fields = ('sql', 'bucket', 'filename', 'schema_filename', 'schema', 'parameters')
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(self,
sql,
bucket,
filename,
schema_filename=None,
approx_max_file_size_bytes=1900000000,
export_format='json',
field_delimiter=',',
gzip=False,
schema=None,
parameters=None,
gcp_conn_id='google_cloud_default',
google_cloud_storage_conn_id=None,
delegate_to=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = google_cloud_storage_conn_id
self.sql = sql
self.bucket = bucket
self.filename = filename
self.schema_filename = schema_filename
self.approx_max_file_size_bytes = approx_max_file_size_bytes
self.export_format = export_format.lower()
self.field_delimiter = field_delimiter
self.gzip = gzip
self.schema = schema
self.parameters = parameters
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.parameters = parameters
def execute(self, context):
cursor = self.query()
files_to_upload = self._write_local_data_files(cursor)
if self.schema_filename:
files_to_upload.append(self._write_local_schema_file(cursor))
for tmp_file in files_to_upload:
tmp_file['file_handle'].flush()
self._upload_to_gcs(files_to_upload)
for tmp_file in files_to_upload:
tmp_file['file_handle'].close()
def convert_types(self, schema, col_type_dict, row):
return [
self.convert_type(value, col_type_dict.get(name))
for name, value in zip(schema, row)
]
def _write_local_data_files(self, cursor):
schema = list(map(lambda schema_tuple: schema_tuple[0], cursor.description))
col_type_dict = self._get_col_type_dict()
file_no = 0
tmp_file_handle = NamedTemporaryFile(delete=True)
if self.export_format == 'csv':
file_mime_type = 'text/csv'
else:
file_mime_type = 'application/json'
files_to_upload = [{
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
}]
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
for row in cursor:
row = self.convert_types(schema, col_type_dict, row)
if self.export_format == 'csv':
csv_writer.writerow(row)
else:
row_dict = dict(zip(schema, row))
tmp_file_handle.write(json.dumps(row_dict, sort_keys=True).encode('utf-8'))
# Append newline to make dumps BigQuery compatible.
tmp_file_handle.write(b'\n')
# Stop if the file exceeds the file size limit.
if tmp_file_handle.tell() >= self.approx_max_file_size_bytes:
file_no += 1
tmp_file_handle = NamedTemporaryFile(delete=True)
files_to_upload.append({
'file_name': self.filename.format(file_no),
'file_handle': tmp_file_handle,
'file_mime_type': file_mime_type
})
if self.export_format == 'csv':
csv_writer = self._configure_csv_file(tmp_file_handle, schema)
return files_to_upload
def _configure_csv_file(self, file_handle, schema):
csv_writer = csv.writer(file_handle, encoding='utf-8',
delimiter=self.field_delimiter)
csv_writer.writerow(schema)
return csv_writer
@abc.abstractmethod
def query(self):
@abc.abstractmethod
def field_to_bigquery(self, field):
@abc.abstractmethod
def convert_type(self, value, schema_type):
def _get_col_type_dict(self):
schema = []
if isinstance(self.schema, str):
schema = json.loads(self.schema)
elif isinstance(self.schema, list):
schema = self.schema
elif self.schema is not None:
self.log.warning('Using default schema due to unexpected type.'
'Should be a string or list.')
col_type_dict = {}
try:
col_type_dict = {col['name']: col['type'] for col in schema}
except KeyError:
self.log.warning('Using default schema due to missing name or type. Please '
'refer to: https://cloud.google.com/bigquery/docs/schemas'
'
return col_type_dict
def _write_local_schema_file(self, cursor):
schema = [self.field_to_bigquery(field) for field in cursor.description]
self.log.info('Using schema for %s: %s', self.schema_filename, schema)
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
tmp_schema_file_handle.write(json.dumps(schema, sort_keys=True).encode('utf-8'))
schema_file_to_upload = {
'file_name': self.schema_filename,
'file_handle': tmp_schema_file_handle,
'file_mime_type': 'application/json',
}
return schema_file_to_upload
def _upload_to_gcs(self, files_to_upload):
hook = GCSHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
for tmp_file in files_to_upload:
hook.upload(self.bucket, tmp_file.get('file_name'),
tmp_file.get('file_handle').name,
mime_type=tmp_file.get('file_mime_type'),
gzip=self.gzip if tmp_file.get('file_name') == self.schema_filename else False)
| true
| true
|
7909e3fef16b591896700bd1fcfde7a65f495191
| 7,274
|
py
|
Python
|
third_party/chromite/cbuildbot/archive_lib_unittest.py
|
zipated/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
third_party/chromite/cbuildbot/archive_lib_unittest.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
third_party/chromite/cbuildbot/archive_lib_unittest.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test the archive_lib module."""
from __future__ import print_function
import mock
from chromite.cbuildbot import archive_lib
from chromite.cbuildbot import cbuildbot_run
from chromite.lib import config_lib
from chromite.lib import config_lib_unittest
from chromite.lib import cros_test_lib
from chromite.lib import parallel_unittest
DEFAULT_ARCHIVE_PREFIX = 'bogus_bucket/TheArchiveBase'
DEFAULT_ARCHIVE_BASE = 'gs://%s' % DEFAULT_ARCHIVE_PREFIX
DEFAULT_BUILDROOT = '/tmp/foo/bar/buildroot'
DEFAULT_BUILDNUMBER = 12345
DEFAULT_BRANCH = 'TheBranch'
DEFAULT_CHROME_BRANCH = 'TheChromeBranch'
DEFAULT_VERSION_STRING = 'TheVersionString'
DEFAULT_BOARD = 'TheBoard'
DEFAULT_BOT_NAME = 'TheCoolBot'
# Access to protected member.
# pylint: disable=W0212
DEFAULT_OPTIONS = cros_test_lib.EasyAttr(
archive_base=DEFAULT_ARCHIVE_BASE,
buildroot=DEFAULT_BUILDROOT,
buildnumber=DEFAULT_BUILDNUMBER,
buildbot=True,
branch=DEFAULT_BRANCH,
remote_trybot=False,
debug=False,
)
DEFAULT_CONFIG = config_lib.BuildConfig(
name=DEFAULT_BOT_NAME,
master=True,
boards=[DEFAULT_BOARD],
child_configs=[config_lib.BuildConfig(name='foo'),
config_lib.BuildConfig(name='bar'),
],
gs_path=config_lib.GS_PATH_DEFAULT
)
def _ExtendDefaultOptions(**kwargs):
"""Extend DEFAULT_OPTIONS with keys/values in kwargs."""
options_kwargs = DEFAULT_OPTIONS.copy()
options_kwargs.update(kwargs)
return cros_test_lib.EasyAttr(**options_kwargs)
def _ExtendDefaultConfig(**kwargs):
"""Extend DEFAULT_CONFIG with keys/values in kwargs."""
config_kwargs = DEFAULT_CONFIG.copy()
config_kwargs.update(kwargs)
return config_lib.BuildConfig(**config_kwargs)
def _NewBuilderRun(options=None, config=None):
"""Create a BuilderRun objection from options and config values.
Args:
options: Specify options or default to DEFAULT_OPTIONS.
config: Specify build config or default to DEFAULT_CONFIG.
Returns:
BuilderRun object.
"""
manager = parallel_unittest.FakeMultiprocessManager()
options = options or DEFAULT_OPTIONS
config = config or DEFAULT_CONFIG
site_config = config_lib_unittest.MockSiteConfig()
site_config[config.name] = config
return cbuildbot_run.BuilderRun(options, site_config, config, manager)
class GetBaseUploadURITest(cros_test_lib.TestCase):
"""Test the GetBaseUploadURI function."""
ARCHIVE_BASE = '/tmp/the/archive/base'
BOT_ID = 'TheNewBotId'
def setUp(self):
self.cfg = DEFAULT_CONFIG
def _GetBaseUploadURI(self, *args, **kwargs):
"""Test GetBaseUploadURI with archive_base and no bot_id."""
return archive_lib.GetBaseUploadURI(self.cfg, *args, **kwargs)
def testArchiveBase(self):
expected_result = '%s/%s' % (self.ARCHIVE_BASE, DEFAULT_BOT_NAME)
result = self._GetBaseUploadURI(archive_base=self.ARCHIVE_BASE)
self.assertEqual(expected_result, result)
def testArchiveBaseBotId(self):
expected_result = '%s/%s' % (self.ARCHIVE_BASE, self.BOT_ID)
result = self._GetBaseUploadURI(archive_base=self.ARCHIVE_BASE,
bot_id=self.BOT_ID)
self.assertEqual(expected_result, result)
def testBotId(self):
expected_result = ('%s/%s' %
(config_lib.GetConfig().params.ARCHIVE_URL,
self.BOT_ID))
result = self._GetBaseUploadURI(bot_id=self.BOT_ID)
self.assertEqual(expected_result, result)
def testDefaultGSPath(self):
"""Test GetBaseUploadURI with default gs_path value in config."""
self.cfg = _ExtendDefaultConfig(gs_path=config_lib.GS_PATH_DEFAULT)
# Test without bot_id.
expected_result = ('%s/%s' %
(config_lib.GetConfig().params.ARCHIVE_URL,
DEFAULT_BOT_NAME))
result = self._GetBaseUploadURI()
self.assertEqual(expected_result, result)
# Test with bot_id.
expected_result = ('%s/%s' %
(config_lib.GetConfig().params.ARCHIVE_URL,
self.BOT_ID))
result = self._GetBaseUploadURI(bot_id=self.BOT_ID)
self.assertEqual(expected_result, result)
def testOverrideGSPath(self):
"""Test GetBaseUploadURI with default gs_path value in config."""
self.cfg = _ExtendDefaultConfig(gs_path='gs://funkytown/foo/bar')
# Test without bot_id.
expected_result = 'gs://funkytown/foo/bar/TheCoolBot'
result = self._GetBaseUploadURI()
self.assertEqual(expected_result, result)
# Test with bot_id.
expected_result = 'gs://funkytown/foo/bar/TheNewBotId'
result = self._GetBaseUploadURI(bot_id=self.BOT_ID)
self.assertEqual(expected_result, result)
class ArchiveTest(cros_test_lib.TestCase):
"""Test the Archive class."""
_VERSION = '6543.2.1'
def _GetAttributeValue(self, attr, options=None, config=None):
with mock.patch.object(cbuildbot_run._BuilderRunBase, 'GetVersion') as m:
m.return_value = self._VERSION
run = _NewBuilderRun(options, config)
return getattr(run.GetArchive(), attr)
def testVersion(self):
value = self._GetAttributeValue('version')
self.assertEqual(self._VERSION, value)
def testVersionNotReady(self):
run = _NewBuilderRun()
self.assertRaises(AttributeError, getattr, run, 'version')
def testArchivePathTrybot(self):
options = _ExtendDefaultOptions(buildbot=False)
value = self._GetAttributeValue('archive_path', options=options)
expected_value = ('%s/%s/%s/%s' %
(DEFAULT_BUILDROOT,
archive_lib.Archive._TRYBOT_ARCHIVE,
DEFAULT_BOT_NAME,
self._VERSION))
self.assertEqual(expected_value, value)
def testArchivePathBuildbot(self):
value = self._GetAttributeValue('archive_path')
expected_value = ('%s/%s/%s/%s' %
(DEFAULT_BUILDROOT,
archive_lib.Archive._BUILDBOT_ARCHIVE,
DEFAULT_BOT_NAME,
self._VERSION))
self.assertEqual(expected_value, value)
def testUploadUri(self):
value = self._GetAttributeValue('upload_url')
expected_value = '%s/%s/%s' % (DEFAULT_ARCHIVE_BASE,
DEFAULT_BOT_NAME,
self._VERSION)
self.assertEqual(expected_value, value)
def testDownloadURLBuildbot(self):
value = self._GetAttributeValue('download_url')
expected_value = ('%s%s/%s/%s' %
(archive_lib.gs.PRIVATE_BASE_HTTPS_DOWNLOAD_URL,
DEFAULT_ARCHIVE_PREFIX,
DEFAULT_BOT_NAME,
self._VERSION))
self.assertEqual(expected_value, value)
def testDownloadURLFileBuildbot(self):
value = self._GetAttributeValue('download_url_file')
expected_value = ('%s%s/%s/%s' %
(archive_lib.gs.PRIVATE_BASE_HTTPS_URL,
DEFAULT_ARCHIVE_PREFIX,
DEFAULT_BOT_NAME,
self._VERSION))
self.assertEqual(expected_value, value)
| 34.311321
| 77
| 0.688617
|
from __future__ import print_function
import mock
from chromite.cbuildbot import archive_lib
from chromite.cbuildbot import cbuildbot_run
from chromite.lib import config_lib
from chromite.lib import config_lib_unittest
from chromite.lib import cros_test_lib
from chromite.lib import parallel_unittest
DEFAULT_ARCHIVE_PREFIX = 'bogus_bucket/TheArchiveBase'
DEFAULT_ARCHIVE_BASE = 'gs://%s' % DEFAULT_ARCHIVE_PREFIX
DEFAULT_BUILDROOT = '/tmp/foo/bar/buildroot'
DEFAULT_BUILDNUMBER = 12345
DEFAULT_BRANCH = 'TheBranch'
DEFAULT_CHROME_BRANCH = 'TheChromeBranch'
DEFAULT_VERSION_STRING = 'TheVersionString'
DEFAULT_BOARD = 'TheBoard'
DEFAULT_BOT_NAME = 'TheCoolBot'
DEFAULT_OPTIONS = cros_test_lib.EasyAttr(
archive_base=DEFAULT_ARCHIVE_BASE,
buildroot=DEFAULT_BUILDROOT,
buildnumber=DEFAULT_BUILDNUMBER,
buildbot=True,
branch=DEFAULT_BRANCH,
remote_trybot=False,
debug=False,
)
DEFAULT_CONFIG = config_lib.BuildConfig(
name=DEFAULT_BOT_NAME,
master=True,
boards=[DEFAULT_BOARD],
child_configs=[config_lib.BuildConfig(name='foo'),
config_lib.BuildConfig(name='bar'),
],
gs_path=config_lib.GS_PATH_DEFAULT
)
def _ExtendDefaultOptions(**kwargs):
options_kwargs = DEFAULT_OPTIONS.copy()
options_kwargs.update(kwargs)
return cros_test_lib.EasyAttr(**options_kwargs)
def _ExtendDefaultConfig(**kwargs):
config_kwargs = DEFAULT_CONFIG.copy()
config_kwargs.update(kwargs)
return config_lib.BuildConfig(**config_kwargs)
def _NewBuilderRun(options=None, config=None):
manager = parallel_unittest.FakeMultiprocessManager()
options = options or DEFAULT_OPTIONS
config = config or DEFAULT_CONFIG
site_config = config_lib_unittest.MockSiteConfig()
site_config[config.name] = config
return cbuildbot_run.BuilderRun(options, site_config, config, manager)
class GetBaseUploadURITest(cros_test_lib.TestCase):
ARCHIVE_BASE = '/tmp/the/archive/base'
BOT_ID = 'TheNewBotId'
def setUp(self):
self.cfg = DEFAULT_CONFIG
def _GetBaseUploadURI(self, *args, **kwargs):
return archive_lib.GetBaseUploadURI(self.cfg, *args, **kwargs)
def testArchiveBase(self):
expected_result = '%s/%s' % (self.ARCHIVE_BASE, DEFAULT_BOT_NAME)
result = self._GetBaseUploadURI(archive_base=self.ARCHIVE_BASE)
self.assertEqual(expected_result, result)
def testArchiveBaseBotId(self):
expected_result = '%s/%s' % (self.ARCHIVE_BASE, self.BOT_ID)
result = self._GetBaseUploadURI(archive_base=self.ARCHIVE_BASE,
bot_id=self.BOT_ID)
self.assertEqual(expected_result, result)
def testBotId(self):
expected_result = ('%s/%s' %
(config_lib.GetConfig().params.ARCHIVE_URL,
self.BOT_ID))
result = self._GetBaseUploadURI(bot_id=self.BOT_ID)
self.assertEqual(expected_result, result)
def testDefaultGSPath(self):
self.cfg = _ExtendDefaultConfig(gs_path=config_lib.GS_PATH_DEFAULT)
expected_result = ('%s/%s' %
(config_lib.GetConfig().params.ARCHIVE_URL,
DEFAULT_BOT_NAME))
result = self._GetBaseUploadURI()
self.assertEqual(expected_result, result)
expected_result = ('%s/%s' %
(config_lib.GetConfig().params.ARCHIVE_URL,
self.BOT_ID))
result = self._GetBaseUploadURI(bot_id=self.BOT_ID)
self.assertEqual(expected_result, result)
def testOverrideGSPath(self):
self.cfg = _ExtendDefaultConfig(gs_path='gs://funkytown/foo/bar')
expected_result = 'gs://funkytown/foo/bar/TheCoolBot'
result = self._GetBaseUploadURI()
self.assertEqual(expected_result, result)
expected_result = 'gs://funkytown/foo/bar/TheNewBotId'
result = self._GetBaseUploadURI(bot_id=self.BOT_ID)
self.assertEqual(expected_result, result)
class ArchiveTest(cros_test_lib.TestCase):
_VERSION = '6543.2.1'
def _GetAttributeValue(self, attr, options=None, config=None):
with mock.patch.object(cbuildbot_run._BuilderRunBase, 'GetVersion') as m:
m.return_value = self._VERSION
run = _NewBuilderRun(options, config)
return getattr(run.GetArchive(), attr)
def testVersion(self):
value = self._GetAttributeValue('version')
self.assertEqual(self._VERSION, value)
def testVersionNotReady(self):
run = _NewBuilderRun()
self.assertRaises(AttributeError, getattr, run, 'version')
def testArchivePathTrybot(self):
options = _ExtendDefaultOptions(buildbot=False)
value = self._GetAttributeValue('archive_path', options=options)
expected_value = ('%s/%s/%s/%s' %
(DEFAULT_BUILDROOT,
archive_lib.Archive._TRYBOT_ARCHIVE,
DEFAULT_BOT_NAME,
self._VERSION))
self.assertEqual(expected_value, value)
def testArchivePathBuildbot(self):
value = self._GetAttributeValue('archive_path')
expected_value = ('%s/%s/%s/%s' %
(DEFAULT_BUILDROOT,
archive_lib.Archive._BUILDBOT_ARCHIVE,
DEFAULT_BOT_NAME,
self._VERSION))
self.assertEqual(expected_value, value)
def testUploadUri(self):
value = self._GetAttributeValue('upload_url')
expected_value = '%s/%s/%s' % (DEFAULT_ARCHIVE_BASE,
DEFAULT_BOT_NAME,
self._VERSION)
self.assertEqual(expected_value, value)
def testDownloadURLBuildbot(self):
value = self._GetAttributeValue('download_url')
expected_value = ('%s%s/%s/%s' %
(archive_lib.gs.PRIVATE_BASE_HTTPS_DOWNLOAD_URL,
DEFAULT_ARCHIVE_PREFIX,
DEFAULT_BOT_NAME,
self._VERSION))
self.assertEqual(expected_value, value)
def testDownloadURLFileBuildbot(self):
value = self._GetAttributeValue('download_url_file')
expected_value = ('%s%s/%s/%s' %
(archive_lib.gs.PRIVATE_BASE_HTTPS_URL,
DEFAULT_ARCHIVE_PREFIX,
DEFAULT_BOT_NAME,
self._VERSION))
self.assertEqual(expected_value, value)
| true
| true
|
7909e501d609b7a93a7368f9cef1f69decd64be6
| 7,898
|
py
|
Python
|
cadee/prep/genseqs.py
|
kamerlinlab/cadee
|
8fa34fc4f7fc496c8843e9380075ae11fca7aaa7
|
[
"MIT"
] | 10
|
2017-01-11T09:21:27.000Z
|
2021-06-27T03:56:15.000Z
|
cadee/prep/genseqs.py
|
kamerlinlab/cadee
|
8fa34fc4f7fc496c8843e9380075ae11fca7aaa7
|
[
"MIT"
] | 2
|
2017-07-18T06:54:17.000Z
|
2020-08-25T14:03:14.000Z
|
cadee/prep/genseqs.py
|
kamerlinlab/cadee
|
8fa34fc4f7fc496c8843e9380075ae11fca7aaa7
|
[
"MIT"
] | 3
|
2017-03-15T12:18:13.000Z
|
2021-02-28T05:09:36.000Z
|
#!/usr/bin/env python
"""
Generate Sequence from a pdbfile and to modify the squences.
Author: {0} ({1})
This module is part of CADEE, the framework for
Computer-Aided Directed Evolution of Enzymes.
"""
from __future__ import print_function
import logging
import os
import sys
import time
import config
__author__ = "Beat Amrein"
__email__ = "beat.amrein@gmail.com"
logger = logging.getLogger('prep.genseqs')
# ERROR/EXIT CODES
ERR_USAGE = 1
ERR_OUTPUTFOLDER_EXISTS = 2
ERR_TOPO_GENERATION_WT = 3
ERR_QPREP5_INEXISTENT = 4
ERR_MKTOP_INEXISTENT = 5
ERR_NO_BABEL = 6
# CONSTANTS
NLC = '\n'
def genseq2(wtseq, mutations, keepdupes=False):
""" generate a sequences library based of wtseq
@param: list of tupel, [ (resid, library), (resid, library), ...]
@returns: list of sequences
"""
def estimator(mutations):
est = 1
for mut in mutations:
lib = mut[1]
est *= (len(lib)+1)
return est
logger.info('will mutate wtseq %s and create about %s mutations',
wtseq, estimator(mutations))
seqo = list(wtseq)
sequences = [seqo]
while len(mutations) > 0:
newseqs = sequences[:]
res, lib = mutations.pop()
for seqo in sequences:
res = int(res)
if res < 1:
raise ValueError('Impossible: resid < 1!', res)
pos = res - 1
for aa in lib:
if len(aa) != 1:
raise ValueError('Impossible 1-letter aminoacid',
aa, 'in lib', lib)
seqn = seqo[:]
seqn[pos] = aa
if keepdupes or seqn not in newseqs:
newseqs.append(seqn)
sequences = newseqs
return sequences
def combine(lib, pos):
"""generate combinations of up to 7.
@param lib: library
@param pos: positions to mutate
# TODO: implement in readable (recursively)
"""
numseqs = 1
for each in lib:
numseqs *= len(each)
logger.info('Generating %s %s', numseqs, 'sequeces. Please wait.')
seqlib = []
logger.info('Library %s, Positions %s', lib, pos)
for every in lib[0]:
if len(pos) > 1:
for every2, in lib[1]:
if len(pos) > 2:
for every3, in lib[2]:
if len(pos) > 3:
for every4, in lib[3]:
if len(pos) > 4:
for every5, in lib[4]:
if len(pos) > 5:
for every6, in lib[5]:
if len(pos) > 6:
for every7 in lib[6]:
seqlib.append([every,
every2,
every3,
every4,
every5,
every6,
every7])
else:
seqlib.append([every,
every2,
every3,
every4,
every5,
every6])
else:
seqlib.append([every,
every2,
every3,
every4,
every5])
else:
seqlib.append([every, every2, every3,
every4, every4])
else:
seqlib.append([every, every2, every3])
else:
seqlib.append([every, every2])
else:
seqlib.append([every])
return seqlib
def gen_seqlib(sequence, pos, lib):
"""
Generates sequences, mutating at pos[x] to all as in lib[x]
Generates sequences, mutating at pos[x] if len(lib)==1,
the same lib will be used for all
Return sequences
"""
# is lib a string?
if isinstance(lib, str):
lib = [lib]
# when only 1 library is given, reuse it
if len(lib) == 1:
while range(1, len(pos)):
lib.append(lib[0])
if len(pos) != len(lib):
msg = 'Bad Input: Dimensions of pos and lib must be equal: '
msg += 'found: #pos: {0}, #lib {1}'.format(len(pos), len(lib))
raise (Exception, msg)
seqlib = combine(lib, pos)
# insert combinations into sequence
sequences_1d = {}
for i in range(0, len(seqlib)):
nfa = list(sequence)
for j, posj in pos:
if nfa[posj].upper() != seqlib[i][j].upper():
nfa[posj] = seqlib[i][j]
modseq = ''.join(nfa)
sequences_1d[modseq] = 1
return sequences_1d
def get_fasta(wtpdb):
"""Return fasta code of wtpdb"""
# preparations
from pyscwrl import babel_pdb_for_scwrl
babel_pdb_for_scwrl(wtpdb)
# read fasta
fasta = ''
for line in open('proper.fasta'):
line = line[:-1]
if line[0] == '>':
# fasta-comment, ignore line
continue
for char in line:
fasta += char.lower()
return fasta
def get_sequences(wtpdb, resids, library):
"""Return list of sequences for resids, created with library"""
print(wtpdb, resids)
# Get the fasta sequence from pdbfile
fasta = get_fasta(wtpdb)
posids = []
# position - ids start from 0 (not 1), so we have to convert
for resid in resids:
posids.append(int(resid)-1)
# generate sequences:
sequences = gen_seqlib(fasta, posids, [library])
return sequences
if __name__ == "__main__":
# Parse Command Line
LIB = config.SatLibs.ALL
def usage():
"""Print Usage and exit"""
print('')
print('Usage:')
print(' ' + sys.argv[0] + ' qprep-wt.pdb res1 [ res2 ...] ]')
print('')
sys.exit(ERR_USAGE)
def get_resnumbers(args):
"""Return residue-numbers as list-of-integers"""
resids = []
for resid in args:
try:
resids.append(int(resid))
except ValueError:
print('ValueError with ', resid, ' expected: Integer')
usage()
if len(resids) > 7:
print('FATAL:')
print('You ask me to mutate more than 7 residues at one time.')
print('This is NOT IMPLEMENTED... ...probably a BAD IDEA :')
print('This is a bad idea, because we grow with LIBRARY^{#RES}!')
print('In your case ', len(LIB), '^', len(LIB), '=',
len(LIB)**len(resids), '!')
usage()
return resids
START = time.time()
if len(sys.argv) < 3:
usage()
if len(get_resnumbers) > 7:
usage()
get_sequences(os.path.abspath(sys.argv[1]),
get_resnumbers(sys.argv[2:]), LIB)
print('time', round(time.time()-START, 2), 's')
| 30.851563
| 79
| 0.443024
|
from __future__ import print_function
import logging
import os
import sys
import time
import config
__author__ = "Beat Amrein"
__email__ = "beat.amrein@gmail.com"
logger = logging.getLogger('prep.genseqs')
ERR_USAGE = 1
ERR_OUTPUTFOLDER_EXISTS = 2
ERR_TOPO_GENERATION_WT = 3
ERR_QPREP5_INEXISTENT = 4
ERR_MKTOP_INEXISTENT = 5
ERR_NO_BABEL = 6
NLC = '\n'
def genseq2(wtseq, mutations, keepdupes=False):
def estimator(mutations):
est = 1
for mut in mutations:
lib = mut[1]
est *= (len(lib)+1)
return est
logger.info('will mutate wtseq %s and create about %s mutations',
wtseq, estimator(mutations))
seqo = list(wtseq)
sequences = [seqo]
while len(mutations) > 0:
newseqs = sequences[:]
res, lib = mutations.pop()
for seqo in sequences:
res = int(res)
if res < 1:
raise ValueError('Impossible: resid < 1!', res)
pos = res - 1
for aa in lib:
if len(aa) != 1:
raise ValueError('Impossible 1-letter aminoacid',
aa, 'in lib', lib)
seqn = seqo[:]
seqn[pos] = aa
if keepdupes or seqn not in newseqs:
newseqs.append(seqn)
sequences = newseqs
return sequences
def combine(lib, pos):
numseqs = 1
for each in lib:
numseqs *= len(each)
logger.info('Generating %s %s', numseqs, 'sequeces. Please wait.')
seqlib = []
logger.info('Library %s, Positions %s', lib, pos)
for every in lib[0]:
if len(pos) > 1:
for every2, in lib[1]:
if len(pos) > 2:
for every3, in lib[2]:
if len(pos) > 3:
for every4, in lib[3]:
if len(pos) > 4:
for every5, in lib[4]:
if len(pos) > 5:
for every6, in lib[5]:
if len(pos) > 6:
for every7 in lib[6]:
seqlib.append([every,
every2,
every3,
every4,
every5,
every6,
every7])
else:
seqlib.append([every,
every2,
every3,
every4,
every5,
every6])
else:
seqlib.append([every,
every2,
every3,
every4,
every5])
else:
seqlib.append([every, every2, every3,
every4, every4])
else:
seqlib.append([every, every2, every3])
else:
seqlib.append([every, every2])
else:
seqlib.append([every])
return seqlib
def gen_seqlib(sequence, pos, lib):
if isinstance(lib, str):
lib = [lib]
if len(lib) == 1:
while range(1, len(pos)):
lib.append(lib[0])
if len(pos) != len(lib):
msg = 'Bad Input: Dimensions of pos and lib must be equal: '
msg += 'found: #pos: {0}, #lib {1}'.format(len(pos), len(lib))
raise (Exception, msg)
seqlib = combine(lib, pos)
sequences_1d = {}
for i in range(0, len(seqlib)):
nfa = list(sequence)
for j, posj in pos:
if nfa[posj].upper() != seqlib[i][j].upper():
nfa[posj] = seqlib[i][j]
modseq = ''.join(nfa)
sequences_1d[modseq] = 1
return sequences_1d
def get_fasta(wtpdb):
from pyscwrl import babel_pdb_for_scwrl
babel_pdb_for_scwrl(wtpdb)
fasta = ''
for line in open('proper.fasta'):
line = line[:-1]
if line[0] == '>':
continue
for char in line:
fasta += char.lower()
return fasta
def get_sequences(wtpdb, resids, library):
print(wtpdb, resids)
fasta = get_fasta(wtpdb)
posids = []
for resid in resids:
posids.append(int(resid)-1)
sequences = gen_seqlib(fasta, posids, [library])
return sequences
if __name__ == "__main__":
LIB = config.SatLibs.ALL
def usage():
print('')
print('Usage:')
print(' ' + sys.argv[0] + ' qprep-wt.pdb res1 [ res2 ...] ]')
print('')
sys.exit(ERR_USAGE)
def get_resnumbers(args):
resids = []
for resid in args:
try:
resids.append(int(resid))
except ValueError:
print('ValueError with ', resid, ' expected: Integer')
usage()
if len(resids) > 7:
print('FATAL:')
print('You ask me to mutate more than 7 residues at one time.')
print('This is NOT IMPLEMENTED... ...probably a BAD IDEA :')
print('This is a bad idea, because we grow with LIBRARY^{#RES}!')
print('In your case ', len(LIB), '^', len(LIB), '=',
len(LIB)**len(resids), '!')
usage()
return resids
START = time.time()
if len(sys.argv) < 3:
usage()
if len(get_resnumbers) > 7:
usage()
get_sequences(os.path.abspath(sys.argv[1]),
get_resnumbers(sys.argv[2:]), LIB)
print('time', round(time.time()-START, 2), 's')
| true
| true
|
7909e681569f02e29cbbb0f973c01f46b7ffd0bb
| 3,746
|
py
|
Python
|
easytransfer/losses/kd_loss.py
|
mczhuge/Kaleido-BERT
|
50579660fb8dc1e250c7cc40e0f10294c54532e3
|
[
"MIT"
] | 109
|
2021-04-14T04:15:53.000Z
|
2022-03-24T05:24:43.000Z
|
easytransfer/losses/kd_loss.py
|
NoLoPhe/Kaleido-BERT
|
1b14073e3ad3490c50bbd1e7e94846830671b332
|
[
"MIT"
] | 12
|
2021-04-18T13:21:07.000Z
|
2022-01-27T09:42:51.000Z
|
easytransfer/losses/kd_loss.py
|
NoLoPhe/Kaleido-BERT
|
1b14073e3ad3490c50bbd1e7e94846830671b332
|
[
"MIT"
] | 12
|
2021-04-25T08:40:09.000Z
|
2022-03-24T08:56:29.000Z
|
# coding=utf-8
# Copyright (c) 2019 Alibaba PAI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tensorflow as tf
def build_kd_loss(teacher_logits,
student_logits,
task_balance=0.3,
distill_tempreture=2.0,
labels=None,
loss_type='mse'):
if loss_type == 'mse':
# mean square error
return mse_loss(teacher_logits, student_logits)
elif loss_type == 'xent':
# cross entropy
return xent_loss(teacher_logits, student_logits, labels,
distill_tempreture, task_balance)
else:
# kl divergence
return kld_loss(teacher_logits, student_logits, labels,
distill_tempreture, task_balance)
def mse_loss(teacher_logits, student_logits):
loss = tf.reduce_mean(tf.nn.l2_loss(teacher_logits - student_logits))
return loss
def xent_loss(teacher_logits, student_logits, labels, distill_tempreture,
task_balance):
student_task_xent = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(labels),
logits=student_logits))
teacher_targets = tf.nn.softmax(teacher_logits / distill_tempreture)
student_distill_xent = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=tf.stop_gradient(teacher_targets), logits=student_logits))
losses = task_balance * student_task_xent
losses += (1 - task_balance) * student_distill_xent
return losses
def kld_loss(teacher_logits, student_logits, labels, distill_temperature,
task_balance):
student_task_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.squeeze(labels), logits=student_logits)
student_distill = tf.reduce_sum(tf.nn.softmax(student_logits / distill_temperature) * (
tf.log(tf.nn.softmax(student_logits / distill_temperature + 1e-5) -
tf.log(tf.nn.softmax(teacher_logits / distill_temperature + 1e-5)))))
losses = task_balance * tf.reduce_mean(student_task_xent)
losses += (1 - task_balance) * tf.reduce_mean(student_distill)
return losses
def build_kd_probes_loss(teacher_logits,
student_logits,
task_balance=0.3,
distill_tempreture=2.0,
labels=None,
loss_type='mse'):
teacher_n_layers = len(teacher_logits) - 1
student_n_layers = len(student_logits) - 1
probes_kd_loss = 0.0
for i in range(student_n_layers):
proportional_layer_idx = int(math.ceil(i * teacher_n_layers / student_n_layers))
student_layer_logits = student_logits[i]
teacher_layer_logits = teacher_logits[proportional_layer_idx]
probes_kd_loss += build_kd_loss(teacher_logits=teacher_layer_logits,
student_logits=student_layer_logits,
task_balance=task_balance,
distill_tempreture=distill_tempreture,
labels=labels,
loss_type=loss_type)
return probes_kd_loss
| 40.717391
| 91
| 0.657501
|
import math
import tensorflow as tf
def build_kd_loss(teacher_logits,
student_logits,
task_balance=0.3,
distill_tempreture=2.0,
labels=None,
loss_type='mse'):
if loss_type == 'mse':
return mse_loss(teacher_logits, student_logits)
elif loss_type == 'xent':
return xent_loss(teacher_logits, student_logits, labels,
distill_tempreture, task_balance)
else:
return kld_loss(teacher_logits, student_logits, labels,
distill_tempreture, task_balance)
def mse_loss(teacher_logits, student_logits):
loss = tf.reduce_mean(tf.nn.l2_loss(teacher_logits - student_logits))
return loss
def xent_loss(teacher_logits, student_logits, labels, distill_tempreture,
task_balance):
student_task_xent = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(labels),
logits=student_logits))
teacher_targets = tf.nn.softmax(teacher_logits / distill_tempreture)
student_distill_xent = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=tf.stop_gradient(teacher_targets), logits=student_logits))
losses = task_balance * student_task_xent
losses += (1 - task_balance) * student_distill_xent
return losses
def kld_loss(teacher_logits, student_logits, labels, distill_temperature,
task_balance):
student_task_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.squeeze(labels), logits=student_logits)
student_distill = tf.reduce_sum(tf.nn.softmax(student_logits / distill_temperature) * (
tf.log(tf.nn.softmax(student_logits / distill_temperature + 1e-5) -
tf.log(tf.nn.softmax(teacher_logits / distill_temperature + 1e-5)))))
losses = task_balance * tf.reduce_mean(student_task_xent)
losses += (1 - task_balance) * tf.reduce_mean(student_distill)
return losses
def build_kd_probes_loss(teacher_logits,
student_logits,
task_balance=0.3,
distill_tempreture=2.0,
labels=None,
loss_type='mse'):
teacher_n_layers = len(teacher_logits) - 1
student_n_layers = len(student_logits) - 1
probes_kd_loss = 0.0
for i in range(student_n_layers):
proportional_layer_idx = int(math.ceil(i * teacher_n_layers / student_n_layers))
student_layer_logits = student_logits[i]
teacher_layer_logits = teacher_logits[proportional_layer_idx]
probes_kd_loss += build_kd_loss(teacher_logits=teacher_layer_logits,
student_logits=student_layer_logits,
task_balance=task_balance,
distill_tempreture=distill_tempreture,
labels=labels,
loss_type=loss_type)
return probes_kd_loss
| true
| true
|
7909e6d83be248a0c4c1f62ad15eda46b6b81739
| 548
|
py
|
Python
|
client.py
|
simenvg/projetBeehive
|
ace9e105985647454ea850a0b9e3fd22905115b0
|
[
"MIT"
] | null | null | null |
client.py
|
simenvg/projetBeehive
|
ace9e105985647454ea850a0b9e3fd22905115b0
|
[
"MIT"
] | null | null | null |
client.py
|
simenvg/projetBeehive
|
ace9e105985647454ea850a0b9e3fd22905115b0
|
[
"MIT"
] | null | null | null |
from socket import *
from datetime import datetime
import json
#rew
def send(temperature, humidity, socket):
dict = {'timestamp':datetime.now().strftime("%X"), 'temperature':temperature, 'humidity':humidity}
message = json.dumps(dict)
try :
socket.send(message)
except :
socket.close()
s = socket(AF_INET, SOCK_STREAM)
PORT = 40017
s.connect(('', PORT))
while(1):
temperature = raw_input('Temperature: ')
humidity = raw_input('Humidity: ')
send(temperature, humidity, s)
s.close()
| 21.076923
| 103
| 0.644161
|
from socket import *
from datetime import datetime
import json
def send(temperature, humidity, socket):
dict = {'timestamp':datetime.now().strftime("%X"), 'temperature':temperature, 'humidity':humidity}
message = json.dumps(dict)
try :
socket.send(message)
except :
socket.close()
s = socket(AF_INET, SOCK_STREAM)
PORT = 40017
s.connect(('', PORT))
while(1):
temperature = raw_input('Temperature: ')
humidity = raw_input('Humidity: ')
send(temperature, humidity, s)
s.close()
| true
| true
|
7909e7a1e55e682a2b4294c7b151519f39f02483
| 9,111
|
py
|
Python
|
packandroid.py
|
skela/r
|
4c5b8574a7868c3a95ab1ccbcb07d5ba2dc5d5d0
|
[
"MIT"
] | null | null | null |
packandroid.py
|
skela/r
|
4c5b8574a7868c3a95ab1ccbcb07d5ba2dc5d5d0
|
[
"MIT"
] | null | null | null |
packandroid.py
|
skela/r
|
4c5b8574a7868c3a95ab1ccbcb07d5ba2dc5d5d0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import xmltodict # sudo easy_install xmltodict
import subprocess
import zipfile
class PackAndroid(object):
def __init__(self, root, project_folder, project, input_apk, destination, keystore, keystore_alias, apk_name=None, zipalign=None, jarsigner=None, configuration='Release', keystore_password=None):
self.name = project_folder
self.proj_folder = project_folder
self.project = project
self.input_apk = input_apk
self.destination = os.path.expanduser(destination)
self.configuration = configuration
self.keystore = keystore
self.keystore_alias = keystore_alias
self.keystore_password = keystore_password
# Name of the final apk
self.apk_name = apk_name
if self.apk_name is None and self.keystore_alias is not None:
self.apk_name = self.keystore_alias.lower()
if self.apk_name is None:
projf = os.path.basename(project)
self.apk_name = projf.replace('.csproj', '')
self.final_apk = os.path.join(self.destination, "%s-" % self.apk_name)
self.signed_apk = os.path.join(self.destination, "%s-signed.apk" % self.apk_name)
self.zipalign = zipalign
if self.zipalign is None:
self.zipalign = '/usr/bin/zipalign'
self.jarsigner = jarsigner
if self.jarsigner is None:
self.jarsigner = "/usr/bin/jarsigner"
self.keystore = os.path.join(root, self.keystore)
self.project = os.path.join(root, self.project)
self.proj_folder = os.path.join(root, self.proj_folder)
self.input_apk = os.path.join(self.proj_folder, self.input_apk)
if not os.path.exists(self.keystore):
exit("Failed to locate keystore - " + self.keystore)
if not os.path.exists(self.zipalign):
exit("Failed to locate zipalign - " + self.zipalign)
if not os.path.exists(self.jarsigner):
exit("Failed to locate jarsigner - " + self.jarsigner)
def clean(self):
bin_folder = os.path.join(self.proj_folder, 'bin')
obj_folder = os.path.join(self.proj_folder, 'obj')
if os.path.exists(bin_folder):
print 'Clearing away ' + bin_folder
os.system('rm -fdr ' + bin_folder)
if os.path.exists(obj_folder):
print 'Clearing away ' + obj_folder
os.system('rm -fdr ' + obj_folder)
def get_manifest_dictionary(self):
manifest = os.path.join(self.proj_folder, 'Properties/AndroidManifest.xml')
if not os.path.exists(manifest):
exit("Failed to locate AndroidManifest.xml - " + manifest)
f = file(manifest)
xml = f.read()
f.close()
doc = xmltodict.parse(xml)
return doc
def get_build_number(self):
doc = self.get_manifest_dictionary()
return doc['manifest']['@android:versionCode']
def get_version_number(self):
doc = self.get_manifest_dictionary()
return doc['manifest']['@android:versionName']
def set_build_number(self, build_num):
doc = self.get_manifest_dictionary()
doc['manifest']['@android:versionCode'] = build_num
xml = xmltodict.unparse(doc, pretty=True)
manifest = os.path.join(self.proj_folder, 'Properties/AndroidManifest.xml')
if not os.path.exists(manifest):
exit("Failed to locate AndroidManifest.xml - " + manifest)
f = file(manifest, 'w')
f.write(xml)
f.close()
def increment_build_number(self):
build_number = self.get_build_number()
if build_number is None:
build_number = "1"
else:
build_number = str(int(build_number)+1)
self.set_build_number(build_number)
def decrement_build_number(self):
build_number = self.get_build_number()
if build_number is None:
build_number = "1"
else:
build_number = str(int(build_number)-1)
self.set_build_number(build_number)
def set_version_number(self, version):
doc = self.get_manifest_dictionary()
doc['manifest']['@android:versionName'] = version
xml = xmltodict.unparse(doc, pretty=True)
manifest = os.path.join(self.proj_folder, 'Properties/AndroidManifest.xml')
if not os.path.exists(manifest):
exit("Failed to locate AndroidManifest.xml - " + manifest)
f = file(manifest, 'w')
f.write(xml)
f.close()
def build(self):
cmd_update = "msbuild %s /t:UpdateAndroidResources /p:Configuration=%s" % (self.project, self.configuration)
os.system(cmd_update)
cmd = "msbuild %s /t:SignAndroidPackage /p:Configuration=%s" % (self.project, self.configuration)
os.system(cmd)
if not os.path.exists(self.input_apk):
exit("Failed to build raw apk, i.e. its missing - " + self.input_apk)
@staticmethod
def convert_windows_path(any_path):
chars = []
for i in range(len(any_path)):
char = any_path[i]
if char == '\\':
chars.append('/')
else:
chars.append(char)
return ''.join(chars)
@staticmethod
def update_solution_resources(solution,configuration):
if not os.path.exists(solution):
exit("Failed to locate %s - " % os.path.basename(solution))
f = file(solution)
sln = f.read()
f.close()
projects = []
lines = sln.split('\n')
for line in lines:
if line.startswith("Project("):
start = line.find(",")
rest = line[start+3:len(line)]
end = rest.find(",")
projects.append(os.path.abspath(os.path.join(os.path.dirname(solution),PackAndroid.convert_windows_path(rest[0:end-1]))))
# print projects
for project in projects:
cmd_update = "msbuild %s /t:UpdateAndroidResources /p:Configuration=%s" % (project, configuration)
os.system(cmd_update)
def sign(self):
sign_cmd = [self.jarsigner, "-verbose", "-sigalg", "MD5withRSA", "-digestalg", "SHA1", "-keystore", self.keystore]
if not self.keystore_password is None:
sign_cmd.extend(["-storepass",self.keystore_password])
sign_cmd.extend(["-signedjar", self.signed_apk, self.input_apk, self.keystore_alias])
subprocess.call(sign_cmd)
subprocess.call([self.zipalign, "-f", "-v", "4", self.signed_apk, self.final_apk])
if os.path.exists(self.final_apk):
if os.path.exists(self.signed_apk):
os.system('rm ' + self.signed_apk)
def update_version(self):
build_number = self.get_build_number()
print build_number
q = raw_input("Would you like to increment the build number for %s? y/n\n> " % self.apk_name)
if q == "y":
build_number = str(int(build_number)+1)
self.set_build_number(build_number)
version_number = self.get_version_number()
print version_number
q = raw_input("Would you like to change the version number for %s? y/n\n> " % self.apk_name)
if q == "y":
version_number = raw_input("What to?> ")
self.set_version_number(version_number)
def copy_symbols(self):
artifacts_folder = os.path.join(self.proj_folder, 'bin', 'Release')
stuff = os.listdir(artifacts_folder)
msym_folder = None
for name in stuff:
if name.endswith(".mSYM"):
msym_folder = os.path.join(artifacts_folder, name)
break
if msym_folder is not None:
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file),os.path.relpath(os.path.join(root, file), os.path.join(path, '..')))
msym_destination = os.path.join(os.path.expanduser("~/Desktop/"), os.path.basename(self.final_apk)) + ".mSYM.zip"
zipf = zipfile.ZipFile(msym_destination, 'w', zipfile.ZIP_DEFLATED)
zipdir(msym_folder, zipf)
zipf.close()
def run(self, update_versions=True, confirm_build=True):
self.clean()
self.final_apk = os.path.join(self.destination, "%s-" % self.apk_name)
if update_versions:
self.update_version()
build_number = self.get_build_number()
version_number = self.get_version_number()
if confirm_build:
print 'So thats version ' + version_number + " build " + build_number
q = raw_input("Would you like to continue? y/n\n> ")
if q != "y":
print "Ok, not doing the build, suit yourself..."
return None
self.final_apk = self.final_apk + build_number + '-' + version_number + '.apk'
print self.final_apk
self.build()
self.sign()
self.copy_symbols()
return self.final_apk
| 38.443038
| 199
| 0.609044
|
import os
import xmltodict
import subprocess
import zipfile
class PackAndroid(object):
def __init__(self, root, project_folder, project, input_apk, destination, keystore, keystore_alias, apk_name=None, zipalign=None, jarsigner=None, configuration='Release', keystore_password=None):
self.name = project_folder
self.proj_folder = project_folder
self.project = project
self.input_apk = input_apk
self.destination = os.path.expanduser(destination)
self.configuration = configuration
self.keystore = keystore
self.keystore_alias = keystore_alias
self.keystore_password = keystore_password
self.apk_name = apk_name
if self.apk_name is None and self.keystore_alias is not None:
self.apk_name = self.keystore_alias.lower()
if self.apk_name is None:
projf = os.path.basename(project)
self.apk_name = projf.replace('.csproj', '')
self.final_apk = os.path.join(self.destination, "%s-" % self.apk_name)
self.signed_apk = os.path.join(self.destination, "%s-signed.apk" % self.apk_name)
self.zipalign = zipalign
if self.zipalign is None:
self.zipalign = '/usr/bin/zipalign'
self.jarsigner = jarsigner
if self.jarsigner is None:
self.jarsigner = "/usr/bin/jarsigner"
self.keystore = os.path.join(root, self.keystore)
self.project = os.path.join(root, self.project)
self.proj_folder = os.path.join(root, self.proj_folder)
self.input_apk = os.path.join(self.proj_folder, self.input_apk)
if not os.path.exists(self.keystore):
exit("Failed to locate keystore - " + self.keystore)
if not os.path.exists(self.zipalign):
exit("Failed to locate zipalign - " + self.zipalign)
if not os.path.exists(self.jarsigner):
exit("Failed to locate jarsigner - " + self.jarsigner)
def clean(self):
bin_folder = os.path.join(self.proj_folder, 'bin')
obj_folder = os.path.join(self.proj_folder, 'obj')
if os.path.exists(bin_folder):
print 'Clearing away ' + bin_folder
os.system('rm -fdr ' + bin_folder)
if os.path.exists(obj_folder):
print 'Clearing away ' + obj_folder
os.system('rm -fdr ' + obj_folder)
def get_manifest_dictionary(self):
manifest = os.path.join(self.proj_folder, 'Properties/AndroidManifest.xml')
if not os.path.exists(manifest):
exit("Failed to locate AndroidManifest.xml - " + manifest)
f = file(manifest)
xml = f.read()
f.close()
doc = xmltodict.parse(xml)
return doc
def get_build_number(self):
doc = self.get_manifest_dictionary()
return doc['manifest']['@android:versionCode']
def get_version_number(self):
doc = self.get_manifest_dictionary()
return doc['manifest']['@android:versionName']
def set_build_number(self, build_num):
doc = self.get_manifest_dictionary()
doc['manifest']['@android:versionCode'] = build_num
xml = xmltodict.unparse(doc, pretty=True)
manifest = os.path.join(self.proj_folder, 'Properties/AndroidManifest.xml')
if not os.path.exists(manifest):
exit("Failed to locate AndroidManifest.xml - " + manifest)
f = file(manifest, 'w')
f.write(xml)
f.close()
def increment_build_number(self):
build_number = self.get_build_number()
if build_number is None:
build_number = "1"
else:
build_number = str(int(build_number)+1)
self.set_build_number(build_number)
def decrement_build_number(self):
build_number = self.get_build_number()
if build_number is None:
build_number = "1"
else:
build_number = str(int(build_number)-1)
self.set_build_number(build_number)
def set_version_number(self, version):
doc = self.get_manifest_dictionary()
doc['manifest']['@android:versionName'] = version
xml = xmltodict.unparse(doc, pretty=True)
manifest = os.path.join(self.proj_folder, 'Properties/AndroidManifest.xml')
if not os.path.exists(manifest):
exit("Failed to locate AndroidManifest.xml - " + manifest)
f = file(manifest, 'w')
f.write(xml)
f.close()
def build(self):
cmd_update = "msbuild %s /t:UpdateAndroidResources /p:Configuration=%s" % (self.project, self.configuration)
os.system(cmd_update)
cmd = "msbuild %s /t:SignAndroidPackage /p:Configuration=%s" % (self.project, self.configuration)
os.system(cmd)
if not os.path.exists(self.input_apk):
exit("Failed to build raw apk, i.e. its missing - " + self.input_apk)
@staticmethod
def convert_windows_path(any_path):
chars = []
for i in range(len(any_path)):
char = any_path[i]
if char == '\\':
chars.append('/')
else:
chars.append(char)
return ''.join(chars)
@staticmethod
def update_solution_resources(solution,configuration):
if not os.path.exists(solution):
exit("Failed to locate %s - " % os.path.basename(solution))
f = file(solution)
sln = f.read()
f.close()
projects = []
lines = sln.split('\n')
for line in lines:
if line.startswith("Project("):
start = line.find(",")
rest = line[start+3:len(line)]
end = rest.find(",")
projects.append(os.path.abspath(os.path.join(os.path.dirname(solution),PackAndroid.convert_windows_path(rest[0:end-1]))))
for project in projects:
cmd_update = "msbuild %s /t:UpdateAndroidResources /p:Configuration=%s" % (project, configuration)
os.system(cmd_update)
def sign(self):
sign_cmd = [self.jarsigner, "-verbose", "-sigalg", "MD5withRSA", "-digestalg", "SHA1", "-keystore", self.keystore]
if not self.keystore_password is None:
sign_cmd.extend(["-storepass",self.keystore_password])
sign_cmd.extend(["-signedjar", self.signed_apk, self.input_apk, self.keystore_alias])
subprocess.call(sign_cmd)
subprocess.call([self.zipalign, "-f", "-v", "4", self.signed_apk, self.final_apk])
if os.path.exists(self.final_apk):
if os.path.exists(self.signed_apk):
os.system('rm ' + self.signed_apk)
def update_version(self):
build_number = self.get_build_number()
print build_number
q = raw_input("Would you like to increment the build number for %s? y/n\n> " % self.apk_name)
if q == "y":
build_number = str(int(build_number)+1)
self.set_build_number(build_number)
version_number = self.get_version_number()
print version_number
q = raw_input("Would you like to change the version number for %s? y/n\n> " % self.apk_name)
if q == "y":
version_number = raw_input("What to?> ")
self.set_version_number(version_number)
def copy_symbols(self):
artifacts_folder = os.path.join(self.proj_folder, 'bin', 'Release')
stuff = os.listdir(artifacts_folder)
msym_folder = None
for name in stuff:
if name.endswith(".mSYM"):
msym_folder = os.path.join(artifacts_folder, name)
break
if msym_folder is not None:
def zipdir(path, ziph):
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file),os.path.relpath(os.path.join(root, file), os.path.join(path, '..')))
msym_destination = os.path.join(os.path.expanduser("~/Desktop/"), os.path.basename(self.final_apk)) + ".mSYM.zip"
zipf = zipfile.ZipFile(msym_destination, 'w', zipfile.ZIP_DEFLATED)
zipdir(msym_folder, zipf)
zipf.close()
def run(self, update_versions=True, confirm_build=True):
self.clean()
self.final_apk = os.path.join(self.destination, "%s-" % self.apk_name)
if update_versions:
self.update_version()
build_number = self.get_build_number()
version_number = self.get_version_number()
if confirm_build:
print 'So thats version ' + version_number + " build " + build_number
q = raw_input("Would you like to continue? y/n\n> ")
if q != "y":
print "Ok, not doing the build, suit yourself..."
return None
self.final_apk = self.final_apk + build_number + '-' + version_number + '.apk'
print self.final_apk
self.build()
self.sign()
self.copy_symbols()
return self.final_apk
| false
| true
|
7909e7bf0a5e8cfd96d919d244dabfd1d2677ca1
| 781
|
py
|
Python
|
artinvestor_server/users/urls.py
|
jondelmil/artinvestor-server
|
93b07ae030761321dca3e8ffa87c08f01e0c9996
|
[
"MIT"
] | null | null | null |
artinvestor_server/users/urls.py
|
jondelmil/artinvestor-server
|
93b07ae030761321dca3e8ffa87c08f01e0c9996
|
[
"MIT"
] | null | null | null |
artinvestor_server/users/urls.py
|
jondelmil/artinvestor-server
|
93b07ae030761321dca3e8ffa87c08f01e0c9996
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
# URL pattern for the UserListView
url(
regex=r'^$',
view=views.UserListView.as_view(),
name='list'
),
# URL pattern for the UserRedirectView
url(
regex=r'^~redirect/$',
view=views.UserRedirectView.as_view(),
name='redirect'
),
# URL pattern for the UserDetailView
url(
regex=r'^(?P<username>[\w.@+-]+)/$',
view=views.UserDetailView.as_view(),
name='detail'
),
# URL pattern for the UserUpdateView
url(
regex=r'^~update/$',
view=views.UserUpdateView.as_view(),
name='update'
),
]
| 20.552632
| 56
| 0.581306
|
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^$',
view=views.UserListView.as_view(),
name='list'
),
url(
regex=r'^~redirect/$',
view=views.UserRedirectView.as_view(),
name='redirect'
),
url(
regex=r'^(?P<username>[\w.@+-]+)/$',
view=views.UserDetailView.as_view(),
name='detail'
),
url(
regex=r'^~update/$',
view=views.UserUpdateView.as_view(),
name='update'
),
]
| true
| true
|
7909e7ef4a0d522985fecaad88a319e4653f1382
| 16,510
|
py
|
Python
|
.vscode-server/data/User/History/-1f47d17c/IWlp.py
|
UNIZAR-30226-2022-09/back-end
|
7f20e141e34bf0ae7cce70515a1e4bb0cd85b173
|
[
"MIT"
] | null | null | null |
.vscode-server/data/User/History/-1f47d17c/IWlp.py
|
UNIZAR-30226-2022-09/back-end
|
7f20e141e34bf0ae7cce70515a1e4bb0cd85b173
|
[
"MIT"
] | 1
|
2022-02-16T12:12:43.000Z
|
2022-02-16T12:15:03.000Z
|
.vscode-server/data/User/History/-1f47d17c/IWlp.py
|
UNIZAR-30226-2022-09/back-end
|
7f20e141e34bf0ae7cce70515a1e4bb0cd85b173
|
[
"MIT"
] | null | null | null |
# from flask import Flask, Blueprint
# from flask_sqlalchemy import SQLAlchemy
# from flask_login import LoginManager
# import os
from flask import Flask, jsonify, request, make_response, redirect, url_for
import jwt
import datetime
import os
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
from sqlalchemy import select
from flask_migrate import Migrate, migrate
from flask_cors import CORS
from sqlalchemy import inspect
from sqlalchemy import Table, Column, MetaData, Integer, Computed
from numpy import array
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretollave'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
ABSOLUTE_PATH_TO_YOUR_FOLDER ='/home/dani/flask/static/fotosPerfil'
ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER ='/home/dani/flask/static/pdf'
CORS(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
# Models
class Usuario(db.Model):
nick = db.Column(db.String(20), primary_key=True)
Nombre_de_usuario = db.Column(db.String(50))
password = db.Column(db.String(50))
e_mail = db.Column(db.String(50), unique=True, nullable=False)
descripcion = db.Column(db.String(1000))
link = db.Column(db.String(200))
foto_de_perfil = db.Column(db.String(400))
class Sigue(db.Model):
#id = db.Column(db.Integer, primary_key=True )
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Chat(db.Model):
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
mensaje = db.Column(db.String(1000))
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Publicacion(db.Model):
id = db.Column(Integer,primary_key=True)
#id = db.Sequence('id', start=1, increment=1)
descripcion = db.Column(db.String(1000))
#Column('timestamp', TIMESTAMP(timezone=False), nullable=False, default=datetime.now())
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'))
class Propia(db.Model):
pdf = db.Column(db.String(400))
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Recomendacion(db.Model):
link = db.Column(db.String(200),nullable=False)
titulo = db.Column(db.String(200),nullable=False)
autor = db.Column(db.String(200),nullable=False)
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Tematica(db.Model):
tema = db.Column(db.String(50), primary_key=True )
class Notificaciones(db.Model):
id = db.Column(db.Integer, primary_key=True )
fecha = db.Column(db.Date)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Prefiere(db.Model):
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Trata_pub_del_tema(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Gusta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Comenta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
comentario = db.Column(db.String(1000))
class Guarda(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Trata(db.Model):
id_publi = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
id_notif = db.Column(db.String(20), db.ForeignKey('notificaciones.id'),primary_key=True)
class Genera(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
#token = request.args.get('token') #http://127.0.0.1:5000/route?token=djsnvidnoffofn
#data = request.get_json()
token = request.headers['token']
#token = data['token']
if not token:
return jsonify({'error': 'Token no existe'}), 403
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Usuario.query.filter_by(nick=data['nick']).first()
current_user = data['nick']
except:
return jsonify({'error': 'Token no valido'}), 403
return f(current_user,*args, **kwargs)
return decorated
@app.route('/unprotected')
def unprotected():
return jsonify({'message': 'Puede entrar tol mundo'})
@app.route('/protected')
@token_required
def protected(current_user):
print(current_user)
return jsonify({'message': 'Puedes entrar si puedes'})
# Ruta para el login
@app.route('/register', methods=['POST'])
def add_data():
data= request.get_json()
#nick = request.form.get("nick")
#password = request.form.get("password")
#e_mail = request.form.get("e_mail")
user = Usuario.query.filter_by(e_mail=data['e_mail']).first()
nick = Usuario.query.filter_by(nick=data['nick']).first()
if user: # si esto devuelve algo entonces el email existe
return jsonify({'error': 'Existe correo'}) #json diciendo error existe email
if nick:
return jsonify({'error': 'Existe nick'})
#if (check_email(e_mail) == True and check_password(data['password']) == True ):
register = Usuario(nick=data['nick'],password=generate_password_hash(data['password']), e_mail=data['e_mail'],foto_de_perfil="platon.jpg")
db.session.add(register)
db.session.commit()
token = jwt.encode({'nick' : data['nick'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/login', methods=['POST'])
def login():
# auth = request.authorization #new ESTO SI LO HACES CON AUTH
data= request.get_json()
if '@' in data['nickOcorreo']:
user = Usuario.query.filter_by(e_mail=data['nickOcorreo']).first()
else:
user = Usuario.query.filter_by(nick=data['nickOcorreo']).first()
if not user:
return jsonify({'error': 'No existe ese usuario'})#error mal user
if not check_password_hash(user.password, data['password']):
return jsonify({'error': 'Mal contraseña'}) #error mala contraseña
token = jwt.encode({'nick' : data['nickOcorreo'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=9999999)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/editarPerfil', methods=['GET'])
@token_required
def editarPerfilget(current_user):
s = select([Usuario.Nombre_de_usuario, Usuario.descripcion,Usuario.link, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
result = db.session.execute(s)
seguidos= db.session.query(Sigue).filter(Sigue.Usuario_Nicka == current_user ).count()
seguidores= db.session.query(Sigue).filter(Sigue.Usuario_Nickb == current_user ).count()
nposts= db.session.query(Publicacion).filter(Publicacion.Usuario_Nicka == current_user ).count()
tema = select([Prefiere.tema]).where((Prefiere.Usuario_Nicka == current_user))
temas = db.session.execute(tema)
vector = []
for row in temas:
vector += row
for row in result:
fila = {
"nick": current_user,
"nombre_de_usuario":row[0],
"descripcion":row[1],
"link":row[2],
"foto_de_perfil": 'http://51.255.50.207:5000/display/' + row[3],
"nsiguiendo": seguidos,
"nseguidores": seguidores,
"nposts": nposts,
"tematicas": vector
#"foto_de_perfil" :url_for('static', filename='fotosPerfil/' + row[3])
}
return fila
@app.route('/display/<filename>')
def foto(filename):
return redirect(url_for('static', filename='fotosPerfil/' + filename),code = 301)
@app.route('/editarPerfil', methods=['POST'])
@token_required
def editarPerfilpost(current_user):
data= request.get_json()
user = Usuario.query.filter_by(nick=current_user).first()
user.Nombre_de_usuario = data['nombre_de_usuario']
print(data['nombre_de_usuario'])
print(data['descripcion'])
print(data['link'])
print(data['tematicas'])
user.descripcion = data['descripcion']
user.link = data['link']
tematicas = data['tematicas']
for temas in tematicas:
tema = Prefiere.query.filter_by(tema=temas).first()
if not tema:
tema = Prefiere(Usuario_Nicka=current_user, tema = temas)
db.session.add(tema)
#db.session.commit()
#cambia_foto
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/actualizarImagen', methods=['POST'])
@token_required
def actualizarImagen(current_user):
user = Usuario.query.filter_by(nick=current_user).first()
if request.files['nueva_foto'] is not None: #data['cambia_foto']:
file = request.files['nueva_foto']
print(request.files['nueva_foto'])
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_FOLDER, filename))
user.foto_de_perfil = filename
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/subirPost', methods=['POST'])
@token_required
def subirPost(current_user):
data= request.get_json()
publicacion = Publicacion(descripcion=data['descripcion'],Usuario_Nicka=current_user) #coger id
db.session.add(publicacion)
db.session.commit()
tematicas = data['tematicas']
for temas in tematicas:
temita = Tematica.query.filter_by(tema=temas).first()
if temita:
nuevo = Trata_pub_del_tema(id=publicacion.id, tema = temita.tema)
db.session.add(nuevo)
db.session.commit()
if (data['tipo']=="1"): # articulo
print("xd")
guardarPDF(request.files['pdf'], publicacion.id)
elif(data['tipo']=="2"): # recomendacion
recomendacion = Recomendacion(link=data['link'],titulo=data['titulo'], autor = data['autor'], id = publicacion.id)
db.session.add(recomendacion)
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
def guardarPDF(pdf,_id):
propia = Propia.query.filter_by(id=_id).first()
if pdf is not None:
file = pdf
print(pdf)
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER, filename))
propia.pdf = filename
db.session.add(propia)
@app.route('/getPostsPropios', methods=['GET'])
@token_required
def getPostsPropios(current_user):
data= request.get_json()
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resulta = db.session.execute(a)
#s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)
s=select(Publicacion).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
for r in results:
for i in range(data['id']-8,data['id']):
a = select([Propia.id, Propia.pdf]).where((Propia.id == r.id))
resulta = db.session.execute(a)
Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == row[1] ).count()
Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == row[1] ).count()
Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == row[1] ).count()
fila = {
"id": r.id,
"nick": current_user,
"descripcion":r.descripcion,
"timestamp":r.timestamp,
"pdf": 'http://51.255.50.207:5000/display2/' + a.pdf,
"nlikes": Gustas,
"ncomentarios": Comentarios,
"nguardados": Guardados,
"usuario": resulta.nombre_de_usuario
}
return fila
@app.route('/display2/<filename>')
def pdf(filename):
return redirect(url_for('static', filename='pdf/' + filename),code = 301)
@app.route('/getPostsRecomendados', methods=['GET'])
@token_required
def getPostsRecomendados(current_user):
#data= request.get_json()
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resultb = db.session.execute(a)
Nombre_de_usuario = ""
for b in resultb:
Nombre_de_usuario=b.Nombre_de_usuario
#s = select([Publicacion.Usuario_Nicka, Publicacion.descripcion,Publicacion.timestamp]).where((Publicacion.Usuario_Nicka == current_user and Publicacion.id>data['id']-8 and and Publicacion.id<=data['id'])).order_by(Publicacion.id)
s = select([Publicacion]).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
# for record in results:
# print("\n", record)
vector0 = array([])
vector1 = []
vector2 = []
for r in results:
print(str(r.id))
vector0 += r.id
vector1 += str(r.descripcion)
vector2 += str(r.timestamp)
# for r in results:
# for b in resultb:
# a = select([Recomendacion.id, Recomendacion.link,Recomendacion.titulo,Recomendacion.autor]).where((Recomendacion.id == r.id))
# resulta = db.session.execute(a)
# for a in resultaa:
# Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == r.id ).count()
# Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == r.id ).count()
# Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == r.id ).count()
print(vector0)
fila = {
"id": vector0,
#"link": a.link,
#"titulo": a.titulo,
#"autor": a.autor,
"nick": current_user,
"descripcion": vector1,
"timestamp": vector2,
#"nlikes": Gustas,
#"ncomentarios": Comentarios,
#"nguardados": Guardados,
"usuario": Nombre_de_usuario
}
return fila
def check_email(email):
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if(re.search(regex,email)):
return True
else:
return False
# Contraseñas de entre 8 y 32 carácteres.
def check_password(password):
regex = '^(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])(?=.*[*.!@$%^&(){}[]:;<>,.?/~_+-=|\]).{8,32}$'
if(re.search(regex,password)):
return True
else:
return False
if __name__ == '__main__':
app.run(debug=True)
| 33.831967
| 235
| 0.656208
|
from flask import Flask, jsonify, request, make_response, redirect, url_for
import jwt
import datetime
import os
from functools import wraps
from flask_sqlalchemy import SQLAlchemy
import uuid
from werkzeug.security import generate_password_hash, check_password_hash
from werkzeug.utils import secure_filename
from sqlalchemy import select
from flask_migrate import Migrate, migrate
from flask_cors import CORS
from sqlalchemy import inspect
from sqlalchemy import Table, Column, MetaData, Integer, Computed
from numpy import array
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretollave'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///todo.db'
ABSOLUTE_PATH_TO_YOUR_FOLDER ='/home/dani/flask/static/fotosPerfil'
ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER ='/home/dani/flask/static/pdf'
CORS(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class Usuario(db.Model):
nick = db.Column(db.String(20), primary_key=True)
Nombre_de_usuario = db.Column(db.String(50))
password = db.Column(db.String(50))
e_mail = db.Column(db.String(50), unique=True, nullable=False)
descripcion = db.Column(db.String(1000))
link = db.Column(db.String(200))
foto_de_perfil = db.Column(db.String(400))
class Sigue(db.Model):
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Chat(db.Model):
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
mensaje = db.Column(db.String(1000))
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
Usuario_Nickb = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Publicacion(db.Model):
id = db.Column(Integer,primary_key=True)
descripcion = db.Column(db.String(1000))
timestamp = db.Column(db.TIMESTAMP, nullable=False,
server_default=db.func.now(),
onupdate=db.func.now())
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'))
class Propia(db.Model):
pdf = db.Column(db.String(400))
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Recomendacion(db.Model):
link = db.Column(db.String(200),nullable=False)
titulo = db.Column(db.String(200),nullable=False)
autor = db.Column(db.String(200),nullable=False)
id = db.Column(db.String(20), db.ForeignKey('publicacion.id'),primary_key=True)
class Tematica(db.Model):
tema = db.Column(db.String(50), primary_key=True )
class Notificaciones(db.Model):
id = db.Column(db.Integer, primary_key=True )
fecha = db.Column(db.Date)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Prefiere(db.Model):
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Trata_pub_del_tema(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
tema = db.Column(db.String(50), db.ForeignKey('tematica.tema'),primary_key=True)
class Gusta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Comenta(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
comentario = db.Column(db.String(1000))
class Guarda(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
class Trata(db.Model):
id_publi = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
id_notif = db.Column(db.String(20), db.ForeignKey('notificaciones.id'),primary_key=True)
class Genera(db.Model):
id = db.Column(db.Integer, db.ForeignKey('publicacion.id'),primary_key=True)
Usuario_Nicka = db.Column(db.String(20), db.ForeignKey('usuario.nick'),primary_key=True)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
]
if not token:
return jsonify({'error': 'Token no existe'}), 403
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Usuario.query.filter_by(nick=data['nick']).first()
current_user = data['nick']
except:
return jsonify({'error': 'Token no valido'}), 403
return f(current_user,*args, **kwargs)
return decorated
@app.route('/unprotected')
def unprotected():
return jsonify({'message': 'Puede entrar tol mundo'})
@app.route('/protected')
@token_required
def protected(current_user):
print(current_user)
return jsonify({'message': 'Puedes entrar si puedes'})
@app.route('/register', methods=['POST'])
def add_data():
data= request.get_json()
user = Usuario.query.filter_by(e_mail=data['e_mail']).first()
nick = Usuario.query.filter_by(nick=data['nick']).first()
if user:
return jsonify({'error': 'Existe correo'})
if nick:
return jsonify({'error': 'Existe nick'})
register = Usuario(nick=data['nick'],password=generate_password_hash(data['password']), e_mail=data['e_mail'],foto_de_perfil="platon.jpg")
db.session.add(register)
db.session.commit()
token = jwt.encode({'nick' : data['nick'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/login', methods=['POST'])
def login():
if '@' in data['nickOcorreo']:
user = Usuario.query.filter_by(e_mail=data['nickOcorreo']).first()
else:
user = Usuario.query.filter_by(nick=data['nickOcorreo']).first()
if not user:
return jsonify({'error': 'No existe ese usuario'})
if not check_password_hash(user.password, data['password']):
return jsonify({'error': 'Mal contraseña'})
token = jwt.encode({'nick' : data['nickOcorreo'], 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=9999999)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/editarPerfil', methods=['GET'])
@token_required
def editarPerfilget(current_user):
s = select([Usuario.Nombre_de_usuario, Usuario.descripcion,Usuario.link, Usuario.foto_de_perfil]).where((Usuario.nick == current_user))
result = db.session.execute(s)
seguidos= db.session.query(Sigue).filter(Sigue.Usuario_Nicka == current_user ).count()
seguidores= db.session.query(Sigue).filter(Sigue.Usuario_Nickb == current_user ).count()
nposts= db.session.query(Publicacion).filter(Publicacion.Usuario_Nicka == current_user ).count()
tema = select([Prefiere.tema]).where((Prefiere.Usuario_Nicka == current_user))
temas = db.session.execute(tema)
vector = []
for row in temas:
vector += row
for row in result:
fila = {
"nick": current_user,
"nombre_de_usuario":row[0],
"descripcion":row[1],
"link":row[2],
"foto_de_perfil": 'http://51.255.50.207:5000/display/' + row[3],
"nsiguiendo": seguidos,
"nseguidores": seguidores,
"nposts": nposts,
"tematicas": vector
}
return fila
@app.route('/display/<filename>')
def foto(filename):
return redirect(url_for('static', filename='fotosPerfil/' + filename),code = 301)
@app.route('/editarPerfil', methods=['POST'])
@token_required
def editarPerfilpost(current_user):
data= request.get_json()
user = Usuario.query.filter_by(nick=current_user).first()
user.Nombre_de_usuario = data['nombre_de_usuario']
print(data['nombre_de_usuario'])
print(data['descripcion'])
print(data['link'])
print(data['tematicas'])
user.descripcion = data['descripcion']
user.link = data['link']
tematicas = data['tematicas']
for temas in tematicas:
tema = Prefiere.query.filter_by(tema=temas).first()
if not tema:
tema = Prefiere(Usuario_Nicka=current_user, tema = temas)
db.session.add(tema)
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/actualizarImagen', methods=['POST'])
@token_required
def actualizarImagen(current_user):
user = Usuario.query.filter_by(nick=current_user).first()
if request.files['nueva_foto'] is not None:
file = request.files['nueva_foto']
print(request.files['nueva_foto'])
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_FOLDER, filename))
user.foto_de_perfil = filename
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
@app.route('/subirPost', methods=['POST'])
@token_required
def subirPost(current_user):
data= request.get_json()
publicacion = Publicacion(descripcion=data['descripcion'],Usuario_Nicka=current_user)
db.session.add(publicacion)
db.session.commit()
tematicas = data['tematicas']
for temas in tematicas:
temita = Tematica.query.filter_by(tema=temas).first()
if temita:
nuevo = Trata_pub_del_tema(id=publicacion.id, tema = temita.tema)
db.session.add(nuevo)
db.session.commit()
if (data['tipo']=="1"):
print("xd")
guardarPDF(request.files['pdf'], publicacion.id)
elif(data['tipo']=="2"):
recomendacion = Recomendacion(link=data['link'],titulo=data['titulo'], autor = data['autor'], id = publicacion.id)
db.session.add(recomendacion)
db.session.commit()
token = jwt.encode({'nick' : current_user, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)}, app.config['SECRET_KEY'])
return jsonify({'token' : token.decode('UTF-8')})
def guardarPDF(pdf,_id):
propia = Propia.query.filter_by(id=_id).first()
if pdf is not None:
file = pdf
print(pdf)
filename = secure_filename(file.filename)
file.save(os.path.join(ABSOLUTE_PATH_TO_YOUR_PDF_FOLDER, filename))
propia.pdf = filename
db.session.add(propia)
@app.route('/getPostsPropios', methods=['GET'])
@token_required
def getPostsPropios(current_user):
data= request.get_json()
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resulta = db.session.execute(a)
s=select(Publicacion).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
for r in results:
for i in range(data['id']-8,data['id']):
a = select([Propia.id, Propia.pdf]).where((Propia.id == r.id))
resulta = db.session.execute(a)
Gustas= db.session.query(Gusta).filter(Gusta.Usuario_Nicka == current_user, Gusta.id == row[1] ).count()
Comentarios= db.session.query(Comenta).filter(Comenta.Usuario_Nicka == current_user, Comenta.id == row[1] ).count()
Guardados= db.session.query(Guarda).filter(Guarda.Usuario_Nicka == current_user, Guarda.id == row[1] ).count()
fila = {
"id": r.id,
"nick": current_user,
"descripcion":r.descripcion,
"timestamp":r.timestamp,
"pdf": 'http://51.255.50.207:5000/display2/' + a.pdf,
"nlikes": Gustas,
"ncomentarios": Comentarios,
"nguardados": Guardados,
"usuario": resulta.nombre_de_usuario
}
return fila
@app.route('/display2/<filename>')
def pdf(filename):
return redirect(url_for('static', filename='pdf/' + filename),code = 301)
@app.route('/getPostsRecomendados', methods=['GET'])
@token_required
def getPostsRecomendados(current_user):
a = select([Usuario.Nombre_de_usuario]).where((Usuario.nick == current_user))
resultb = db.session.execute(a)
Nombre_de_usuario = ""
for b in resultb:
Nombre_de_usuario=b.Nombre_de_usuario
s = select([Publicacion]).where(Publicacion.Usuario_Nicka == current_user).order_by(Publicacion.id.desc())
results = db.session.execute(s)
vector0 = array([])
vector1 = []
vector2 = []
for r in results:
print(str(r.id))
vector0 += r.id
vector1 += str(r.descripcion)
vector2 += str(r.timestamp)
print(vector0)
fila = {
"id": vector0,
"nick": current_user,
"descripcion": vector1,
"timestamp": vector2,
"usuario": Nombre_de_usuario
}
return fila
def check_email(email):
regex = '^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$'
if(re.search(regex,email)):
return True
else:
return False
def check_password(password):
regex = '^(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z])(?=.*[*.!@$%^&(){}[]:;<>,.?/~_+-=|\]).{8,32}$'
if(re.search(regex,password)):
return True
else:
return False
if __name__ == '__main__':
app.run(debug=True)
| true
| true
|
7909e81118783589e22c3f8571d4f2cbf4e44bdc
| 422
|
py
|
Python
|
site-code/blog/views.py
|
notmehul/Blogie
|
f4972565d8a042f3984c4c9f8cee901adebc0d44
|
[
"WTFPL"
] | null | null | null |
site-code/blog/views.py
|
notmehul/Blogie
|
f4972565d8a042f3984c4c9f8cee901adebc0d44
|
[
"WTFPL"
] | null | null | null |
site-code/blog/views.py
|
notmehul/Blogie
|
f4972565d8a042f3984c4c9f8cee901adebc0d44
|
[
"WTFPL"
] | null | null | null |
from django.views import generic
from .models import Post
# Create your views here.
class PostList(generic.ListView):
queryset = Post.objects.filter(status=1).order_by('-created_on')
# only published blogs shown
# the order by says that the latest posts will be there at the top B)
template_name = 'index.html'
class PostDetail(generic.DetailView):
model = Post
template_name = 'post_detail.html'
| 32.461538
| 73
| 0.736967
|
from django.views import generic
from .models import Post
class PostList(generic.ListView):
queryset = Post.objects.filter(status=1).order_by('-created_on')
template_name = 'index.html'
class PostDetail(generic.DetailView):
model = Post
template_name = 'post_detail.html'
| true
| true
|
7909e906ab74d7282ae70509be6ade65b9a6e4ef
| 1,912
|
py
|
Python
|
menus/nodes/hermite.py
|
fsanges/neMenuManager
|
733a281b1e0217ff24bc2fe9adf74c97a4715a2b
|
[
"Apache-2.0"
] | 1
|
2021-01-28T05:11:55.000Z
|
2021-01-28T05:11:55.000Z
|
menus/nodes/hermite.py
|
fsanges/neMenuManager
|
733a281b1e0217ff24bc2fe9adf74c97a4715a2b
|
[
"Apache-2.0"
] | null | null | null |
menus/nodes/hermite.py
|
fsanges/neMenuManager
|
733a281b1e0217ff24bc2fe9adf74c97a4715a2b
|
[
"Apache-2.0"
] | null | null | null |
try:
from maya import cmds
except ImportError:
pass
from menus import typeIDs as nem_typeids, base as nem_base
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
def createOutputJnts(*args):
## Create outputs for the selected hermite nodes
exitB = "Exit"
doitB = "doIt"
if not cmds.ls(sl=True):
logger.warning("You must have a {} selected!".format(nem_typeids.HA_NODENAME))
return
confirm = cmds.confirmDialog(title="Create?", message="Ok?", button=doitB, db=doitB, b=exitB, cb=exitB)
if confirm == doitB:
for e in cmds.ls(sl=True):
outCount = cmds.getAttr("{}.outputCount".format(e))
for x in range(outCount):
loc = cmds.joint(n='{}_out{}'.format(e, x))
cmds.select(clear=True)
cmds.connectAttr("{}.outputs[{}].translate".format(e, x), "{}.translate".format(loc), f=True)
cmds.connectAttr("{}.outputs[{}].rotate".format(e, x), "{}.rotate".format(loc), f=True)
cmds.connectAttr("{}.outputs[{}].scale".format(e, x), "{}.scale".format(loc), f=True)
class HermiteArraySOUTH(nem_base.MenuBase):
ID = nem_typeids.HASOUTH
MENUNAME = nem_typeids.HASOUTH_MENUNAME
NODENAME = nem_typeids.HA_NODENAME
FUNCTION = createOutputJnts
def __init__(self):
nem_base.MenuBase.__init__(self,
isRadial=nem_typeids.HASOUTH_ISRADIAL,
radialPos=nem_typeids.HASOUTH_RADIALPOS)
class HermiteArrayNORTH(nem_base.MenuBase):
ID = nem_typeids.HANORTH
MENUNAME = nem_typeids.HANORTH_MENUNAME
NODENAME = nem_typeids.HA_NODENAME
def __init__(self):
nem_base.MenuBase.__init__(self,
isRadial=nem_typeids.HANORTH_ISRADIAL,
radialPos=nem_typeids.HANORTH_RADIALPOS)
| 37.490196
| 109
| 0.625
|
try:
from maya import cmds
except ImportError:
pass
from menus import typeIDs as nem_typeids, base as nem_base
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
def createOutputJnts(*args):
ot cmds.ls(sl=True):
logger.warning("You must have a {} selected!".format(nem_typeids.HA_NODENAME))
return
confirm = cmds.confirmDialog(title="Create?", message="Ok?", button=doitB, db=doitB, b=exitB, cb=exitB)
if confirm == doitB:
for e in cmds.ls(sl=True):
outCount = cmds.getAttr("{}.outputCount".format(e))
for x in range(outCount):
loc = cmds.joint(n='{}_out{}'.format(e, x))
cmds.select(clear=True)
cmds.connectAttr("{}.outputs[{}].translate".format(e, x), "{}.translate".format(loc), f=True)
cmds.connectAttr("{}.outputs[{}].rotate".format(e, x), "{}.rotate".format(loc), f=True)
cmds.connectAttr("{}.outputs[{}].scale".format(e, x), "{}.scale".format(loc), f=True)
class HermiteArraySOUTH(nem_base.MenuBase):
ID = nem_typeids.HASOUTH
MENUNAME = nem_typeids.HASOUTH_MENUNAME
NODENAME = nem_typeids.HA_NODENAME
FUNCTION = createOutputJnts
def __init__(self):
nem_base.MenuBase.__init__(self,
isRadial=nem_typeids.HASOUTH_ISRADIAL,
radialPos=nem_typeids.HASOUTH_RADIALPOS)
class HermiteArrayNORTH(nem_base.MenuBase):
ID = nem_typeids.HANORTH
MENUNAME = nem_typeids.HANORTH_MENUNAME
NODENAME = nem_typeids.HA_NODENAME
def __init__(self):
nem_base.MenuBase.__init__(self,
isRadial=nem_typeids.HANORTH_ISRADIAL,
radialPos=nem_typeids.HANORTH_RADIALPOS)
| true
| true
|
7909e90e316520511b46e1e4501ecb55ca8cb104
| 640
|
py
|
Python
|
contrib/codeanalysis/src/python/pants/contrib/codeanalysis/register.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | 1
|
2021-05-05T18:58:28.000Z
|
2021-05-05T18:58:28.000Z
|
contrib/codeanalysis/src/python/pants/contrib/codeanalysis/register.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | null | null | null |
contrib/codeanalysis/src/python/pants/contrib/codeanalysis/register.py
|
revl/pants
|
8ad83e4ca80c095d44efceafd8b41e575da39c65
|
[
"Apache-2.0"
] | 3
|
2020-06-30T08:28:13.000Z
|
2021-07-28T09:35:57.000Z
|
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.goal.task_registrar import TaskRegistrar as task
from pants.contrib.codeanalysis.tasks.bundle_entries import BundleEntries
from pants.contrib.codeanalysis.tasks.extract_java import ExtractJava
from pants.contrib.codeanalysis.tasks.index_java import IndexJava
def register_goals():
task(name="kythe-java-extract", action=ExtractJava).install("index")
task(name="kythe-java-index", action=IndexJava).install("index")
task(name="bundle-entries", action=BundleEntries).install("index")
| 42.666667
| 73
| 0.798438
|
from pants.goal.task_registrar import TaskRegistrar as task
from pants.contrib.codeanalysis.tasks.bundle_entries import BundleEntries
from pants.contrib.codeanalysis.tasks.extract_java import ExtractJava
from pants.contrib.codeanalysis.tasks.index_java import IndexJava
def register_goals():
task(name="kythe-java-extract", action=ExtractJava).install("index")
task(name="kythe-java-index", action=IndexJava).install("index")
task(name="bundle-entries", action=BundleEntries).install("index")
| true
| true
|
7909e9bbc3387e387d1010e11dc2be833c74a2f4
| 2,164
|
py
|
Python
|
Arrays/python/compareStringByFrequencyOfSmallestCharacter.py
|
kiruba-r11/DSA-guide
|
0687bddf81a14955fa0740610ade3b67bcdf97fb
|
[
"MIT"
] | 60
|
2020-10-04T13:19:26.000Z
|
2022-01-23T09:09:27.000Z
|
Arrays/python/compareStringByFrequencyOfSmallestCharacter.py
|
kiruba-r11/DSA-guide
|
0687bddf81a14955fa0740610ade3b67bcdf97fb
|
[
"MIT"
] | 202
|
2020-10-04T13:03:46.000Z
|
2021-07-29T07:39:15.000Z
|
Arrays/python/compareStringByFrequencyOfSmallestCharacter.py
|
kiruba-r11/DSA-guide
|
0687bddf81a14955fa0740610ade3b67bcdf97fb
|
[
"MIT"
] | 169
|
2020-10-04T13:21:09.000Z
|
2022-03-20T16:59:35.000Z
|
"""
Problem Statement:
Let the function f(s) be the frequency of the lexicographically smallest character in a non-empty string s. For example, if s = "dcce" then f(s) = 2 because the lexicographically smallest character is 'c', which has a frequency of 2.
You are given an array of strings words and another array of query strings queries. For each query queries[i], count the number of words in words such that f(queries[i]) < f(W) for each W in words.
Return an integer array answer, where each answer[i] is the answer to the ith query.
Example 1:
Input: queries = ["cbd"], words = ["zaaaz"]
Output: [1]
"""
from collections import Counter
def numSmallerByFrequency(queries, words):
# Calculate the frequency of smallest character for each word of query array
fre_queries = fre(queries)
# Calculate the frequency of smallest character for each word of words array & sort it in reverse order.
fre_words = sorted(fre(words))[::-1]
res = []
# compare reach frequency in fre_queries with each element of fre_words & increase count accordingly
for q in fre_queries:
count = 0
for w in fre_words:
if w <= q:
break
else:
count += 1
res.append(count)
return res
# A function to find the frequency of smallest character.
def fre(arrs):
# Sort the array
sorted_arrs = [sorted(arr) for arr in arrs]
fre = []
for arr in sorted_arrs:
fre.append(list(Counter(arr).items())[0][1])
return fre
# Main begins here
input_queries = input('Enter elements of a queries separated by space: ')
print("\n")
# This would split the input string separated by spaces into string array
queries_list = input_queries.split()
input_words = input('Enter elements of a words separated by space: ')
print("\n")
# This would split the input string separated by spaces into string array
words_list = input_words.split()
# print(queries_list)
# print(words_list)
ans = numSmallerByFrequency(queries_list,words_list)
print("Output: ",ans)
| 36.677966
| 233
| 0.66451
|
from collections import Counter
def numSmallerByFrequency(queries, words):
fre_queries = fre(queries)
fre_words = sorted(fre(words))[::-1]
res = []
for q in fre_queries:
count = 0
for w in fre_words:
if w <= q:
break
else:
count += 1
res.append(count)
return res
def fre(arrs):
sorted_arrs = [sorted(arr) for arr in arrs]
fre = []
for arr in sorted_arrs:
fre.append(list(Counter(arr).items())[0][1])
return fre
input_queries = input('Enter elements of a queries separated by space: ')
print("\n")
queries_list = input_queries.split()
input_words = input('Enter elements of a words separated by space: ')
print("\n")
words_list = input_words.split()
ans = numSmallerByFrequency(queries_list,words_list)
print("Output: ",ans)
| true
| true
|
7909ea27095366a05767c1ea8be765cbd9e262f0
| 1,411
|
py
|
Python
|
aiida_abinit/utils/resources.py
|
azadoks/aiida-abinit
|
82c9f54e3d77152bd270dbfeb756f9cce4d327c7
|
[
"MIT"
] | null | null | null |
aiida_abinit/utils/resources.py
|
azadoks/aiida-abinit
|
82c9f54e3d77152bd270dbfeb756f9cce4d327c7
|
[
"MIT"
] | 5
|
2021-04-13T13:30:20.000Z
|
2021-12-07T16:56:35.000Z
|
aiida_abinit/utils/resources.py
|
azadoks/aiida-abinit
|
82c9f54e3d77152bd270dbfeb756f9cce4d327c7
|
[
"MIT"
] | 2
|
2020-10-21T16:10:04.000Z
|
2021-06-18T12:13:43.000Z
|
# -*- coding: utf-8 -*-
"""Utilities for calculation job resources."""
__all__ = (
'get_default_options',
'seconds_to_timelimit',
)
def get_default_options(max_num_machines: int = 1, max_wallclock_seconds: int = 1800, with_mpi: bool = False) -> dict:
"""Return an instance of the options dictionary with the minimally required parameters for a `CalcJob`.
:param max_num_machines: set the number of nodes, default=1
:param max_wallclock_seconds: set the maximum number of wallclock seconds, default=1800
:param with_mpi: whether to run the calculation with MPI enabled
"""
return {
'resources': {
'num_machines': int(max_num_machines)
},
'max_wallclock_seconds': int(max_wallclock_seconds),
'withmpi': with_mpi,
}
def seconds_to_timelimit(seconds: int) -> str:
"""Convert seconds into a Slum-notation time limit for the ABINIT flag `--timelimit`.
:param seconds: time limit in seconds
:returns: Slurm-notation time limit (hours:minutes:seconds)
"""
days = seconds // 86400
seconds -= days * 86400
hours = seconds // 3600
seconds -= hours * 3600
minutes = seconds // 60
seconds -= minutes * 60
timelimit = ''
if days > 0:
timelimit += f'{days}-'
if hours > 0:
timelimit += f'{hours:02d}:'
timelimit += f'{minutes:02d}:{seconds:02d}'
return timelimit
| 31.355556
| 118
| 0.653437
|
__all__ = (
'get_default_options',
'seconds_to_timelimit',
)
def get_default_options(max_num_machines: int = 1, max_wallclock_seconds: int = 1800, with_mpi: bool = False) -> dict:
return {
'resources': {
'num_machines': int(max_num_machines)
},
'max_wallclock_seconds': int(max_wallclock_seconds),
'withmpi': with_mpi,
}
def seconds_to_timelimit(seconds: int) -> str:
days = seconds // 86400
seconds -= days * 86400
hours = seconds // 3600
seconds -= hours * 3600
minutes = seconds // 60
seconds -= minutes * 60
timelimit = ''
if days > 0:
timelimit += f'{days}-'
if hours > 0:
timelimit += f'{hours:02d}:'
timelimit += f'{minutes:02d}:{seconds:02d}'
return timelimit
| true
| true
|
7909ea75221ff41707047de51661627abe857855
| 734
|
py
|
Python
|
ch08/half_float_network.py
|
gangigammo/deep-learning-1
|
3fe803514c3733d8715cf1211a82ffd8ea660af2
|
[
"MIT"
] | null | null | null |
ch08/half_float_network.py
|
gangigammo/deep-learning-1
|
3fe803514c3733d8715cf1211a82ffd8ea660af2
|
[
"MIT"
] | null | null | null |
ch08/half_float_network.py
|
gangigammo/deep-learning-1
|
3fe803514c3733d8715cf1211a82ffd8ea660af2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from ch08.deep_convnet import DeepConvNet
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)
network = DeepConvNet()
network.load_params("deep_convnet_params.pkl")
sampled = 10000 # 高速化のため
x_test = x_test[:sampled]
t_test = t_test[:sampled]
print("caluculate accuracy (float64) ... ")
print(network.accuracy(x_test, t_test))
# float16に型変換
x_test = x_test.astype(np.float16)
for param in network.params.values():
param[...] = param.astype(np.float16)
print("caluculate accuracy (float16) ... ")
print(network.accuracy(x_test, t_test))
| 25.310345
| 64
| 0.757493
|
import sys, os
sys.path.append(os.pardir)
import numpy as np
import matplotlib.pyplot as plt
from ch08.deep_convnet import DeepConvNet
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=False)
network = DeepConvNet()
network.load_params("deep_convnet_params.pkl")
sampled = 10000
x_test = x_test[:sampled]
t_test = t_test[:sampled]
print("caluculate accuracy (float64) ... ")
print(network.accuracy(x_test, t_test))
x_test = x_test.astype(np.float16)
for param in network.params.values():
param[...] = param.astype(np.float16)
print("caluculate accuracy (float16) ... ")
print(network.accuracy(x_test, t_test))
| true
| true
|
7909eb2dc8c35601cf7bcd36512e95f0ddda4dd6
| 948
|
py
|
Python
|
python/python_function/func.py
|
technonac/studycode
|
3f952859735d27435a79e8aae674eac9fd277bd8
|
[
"Apache-2.0"
] | null | null | null |
python/python_function/func.py
|
technonac/studycode
|
3f952859735d27435a79e8aae674eac9fd277bd8
|
[
"Apache-2.0"
] | null | null | null |
python/python_function/func.py
|
technonac/studycode
|
3f952859735d27435a79e8aae674eac9fd277bd8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
函数
在python中函数默认的返回对象是None
"""
# 默认返回值为None
def hello():
print("Hello World!")
print(type(hello()))
# 可以返回多个对象,默认是元组
def foo():
return ['xyz', 1000, -98.6]
x, y, z = foo()
print(x, y, z)
# 关键字参数
def foo1(x):
print(x)
foo1(x='abc')
"""
创建函数
def function_name(arguments):
"function documentation string"
function body suite
"""
def helloSomeOne(who):
"""hello to someone"""
print("hello" + who)
print(helloSomeOne.__doc__)
"""
内部/内嵌函数
如果内部函数的定义包含了在外部函数里定义的对象的引用,内部函数被称为闭包
"""
def fo():
def ba():
print("ba called")
print("fo called")
ba()
fo()
"""
传递函数
函数是可以被引用的(访问或者以其他变量作为别名)
对对象是函数,这个对象的所有别名都是可以调用的
"""
def foo():
print("in foo()")
bar = foo
bar()
def convert(func, seq):
return [func(eachNum) for eachNum in seq]
myseq = (123, 45.67, -6.2e8, 999999L)
print(convert(int, myseq))
print(convert(float, myseq))
| 10.896552
| 45
| 0.613924
|
"""
函数
在python中函数默认的返回对象是None
"""
def hello():
print("Hello World!")
print(type(hello()))
def foo():
return ['xyz', 1000, -98.6]
x, y, z = foo()
print(x, y, z)
def foo1(x):
print(x)
foo1(x='abc')
"""
创建函数
def function_name(arguments):
"function documentation string"
function body suite
"""
def helloSomeOne(who):
"""hello to someone"""
print("hello" + who)
print(helloSomeOne.__doc__)
"""
内部/内嵌函数
如果内部函数的定义包含了在外部函数里定义的对象的引用,内部函数被称为闭包
"""
def fo():
def ba():
print("ba called")
print("fo called")
ba()
fo()
"""
传递函数
函数是可以被引用的(访问或者以其他变量作为别名)
对对象是函数,这个对象的所有别名都是可以调用的
"""
def foo():
print("in foo()")
bar = foo
bar()
def convert(func, seq):
return [func(eachNum) for eachNum in seq]
myseq = (123, 45.67, -6.2e8, 999999L)
print(convert(int, myseq))
print(convert(float, myseq))
| false
| true
|
7909ed88d7ea2b91ef3e231d82cd4d1536b93029
| 1,264
|
py
|
Python
|
models/hr_job_task.py
|
aroodooteam/aro_hr
|
bf26f026593493486b4b5c13b26b9b47b9fe3825
|
[
"BSD-2-Clause"
] | null | null | null |
models/hr_job_task.py
|
aroodooteam/aro_hr
|
bf26f026593493486b4b5c13b26b9b47b9fe3825
|
[
"BSD-2-Clause"
] | null | null | null |
models/hr_job_task.py
|
aroodooteam/aro_hr
|
bf26f026593493486b4b5c13b26b9b47b9fe3825
|
[
"BSD-2-Clause"
] | 3
|
2017-09-11T08:02:11.000Z
|
2020-04-04T08:13:23.000Z
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models
class HrJobTask(models.Model):
_name = 'hr.job.task'
name = fields.Char(string='Description')
job_id = fields.Many2one(comodel_name='hr.job', string='Job')
categ_id = fields.Many2one(comodel_name='hr.task.categ', string='Category')
| 42.133333
| 79
| 0.624209
| true
| true
|
|
7909ee93aef6c55de364b6c3a846268c3d44a1c1
| 2,664
|
py
|
Python
|
Homework/forwarding/grade.py
|
DaixuanLi/Router-Lab
|
4d677d38ae02c24af2fef1a0528612c6b86d2758
|
[
"Linux-OpenIB"
] | null | null | null |
Homework/forwarding/grade.py
|
DaixuanLi/Router-Lab
|
4d677d38ae02c24af2fef1a0528612c6b86d2758
|
[
"Linux-OpenIB"
] | null | null | null |
Homework/forwarding/grade.py
|
DaixuanLi/Router-Lab
|
4d677d38ae02c24af2fef1a0528612c6b86d2758
|
[
"Linux-OpenIB"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import sys
import os
import json
import subprocess
import time
from os.path import isfile, join
import random
import string
import signal
import glob
import traceback
prefix = 'forwarding'
exe = prefix
if len(sys.argv) > 1:
exe = sys.argv[1]
#try:
import pyshark
#except Exception:
#print('Install pyshark (pip3 install pyshark) first!')
#sys.exit(1)
def write_grade(grade, total):
data = {}
data['grade'] = grade
if os.isatty(1):
print('Passed: {}/{}'.format(grade, total))
else:
print(json.dumps(data))
sys.exit(0)
if __name__ == '__main__':
if sys.version_info[0] != 3:
print("Plz use python3")
sys.exit()
if os.isatty(1):
print('Removing all output files')
os.system('rm -f data/{}user*.out'.format(prefix))
total = len(glob.glob("data/{}_input*.pcap".format(prefix)))
grade = 0
for i in range(1, total+1):
in_file = "data/{}_input{}.pcap".format(prefix, i)
out_file = "data/{}_user{}.out".format(prefix, i)
ans_file = "data/{}_output{}.out".format(prefix, i)
if os.isatty(1):
print('Running \'./{} < {} > {}\''.format(exe, in_file, out_file))
p = subprocess.Popen(['./{}'.format(exe)], stdout=open(out_file, 'w'), stdin=open(in_file, 'r'))
start_time = time.time()
while p.poll() is None:
if time.time() - start_time > 1:
p.kill()
try:
out = [line.strip() for line in open(out_file, 'r').readlines() if line.strip()]
ans = [line.strip() for line in open(ans_file, 'r').readlines() if line.strip()]
if out == ans:
grade += 1
elif os.isatty(1):
limit = 1
count = 0
reader = pyshark.FileCapture(in_file)
packets = list(reader)
print('Wrong Answer (showing only first {} packets):'.format(limit))
for i in range(len(ans)):
if i >= len(out) or out[i] != ans[i]:
count += 1
print('Answer is wrong for packet #{}: {}'.format(i, packets[i]['ip']))
if count == limit:
break
print('Diff: ')
os.system('diff -u {} {} | head -n 10'.format(out_file, ans_file))
reader.close()
except Exception:
if os.isatty(1):
print('Unexpected exception caught:')
traceback.print_exc()
write_grade(grade, total)
| 28.042105
| 104
| 0.522523
|
import re
import sys
import os
import json
import subprocess
import time
from os.path import isfile, join
import random
import string
import signal
import glob
import traceback
prefix = 'forwarding'
exe = prefix
if len(sys.argv) > 1:
exe = sys.argv[1]
import pyshark
def write_grade(grade, total):
data = {}
data['grade'] = grade
if os.isatty(1):
print('Passed: {}/{}'.format(grade, total))
else:
print(json.dumps(data))
sys.exit(0)
if __name__ == '__main__':
if sys.version_info[0] != 3:
print("Plz use python3")
sys.exit()
if os.isatty(1):
print('Removing all output files')
os.system('rm -f data/{}user*.out'.format(prefix))
total = len(glob.glob("data/{}_input*.pcap".format(prefix)))
grade = 0
for i in range(1, total+1):
in_file = "data/{}_input{}.pcap".format(prefix, i)
out_file = "data/{}_user{}.out".format(prefix, i)
ans_file = "data/{}_output{}.out".format(prefix, i)
if os.isatty(1):
print('Running \'./{} < {} > {}\''.format(exe, in_file, out_file))
p = subprocess.Popen(['./{}'.format(exe)], stdout=open(out_file, 'w'), stdin=open(in_file, 'r'))
start_time = time.time()
while p.poll() is None:
if time.time() - start_time > 1:
p.kill()
try:
out = [line.strip() for line in open(out_file, 'r').readlines() if line.strip()]
ans = [line.strip() for line in open(ans_file, 'r').readlines() if line.strip()]
if out == ans:
grade += 1
elif os.isatty(1):
limit = 1
count = 0
reader = pyshark.FileCapture(in_file)
packets = list(reader)
print('Wrong Answer (showing only first {} packets):'.format(limit))
for i in range(len(ans)):
if i >= len(out) or out[i] != ans[i]:
count += 1
print('Answer is wrong for packet #{}: {}'.format(i, packets[i]['ip']))
if count == limit:
break
print('Diff: ')
os.system('diff -u {} {} | head -n 10'.format(out_file, ans_file))
reader.close()
except Exception:
if os.isatty(1):
print('Unexpected exception caught:')
traceback.print_exc()
write_grade(grade, total)
| true
| true
|
7909eeb85a032f99de28e9730df6b8357736e2e0
| 234
|
py
|
Python
|
tpd_pete/template/template.py
|
totalpunch/TPD-Pete
|
e14f47d3fa05f628a5f4422917d1ff4ce4e56e88
|
[
"MIT"
] | 1
|
2020-04-24T11:55:31.000Z
|
2020-04-24T11:55:31.000Z
|
tpd_pete/template/template.py
|
totalpunch/TPD-Pete
|
e14f47d3fa05f628a5f4422917d1ff4ce4e56e88
|
[
"MIT"
] | 34
|
2020-04-24T10:55:29.000Z
|
2021-07-25T09:29:32.000Z
|
tpd_pete/template/template.py
|
totalpunch/TPD-Pete
|
e14f47d3fa05f628a5f4422917d1ff4ce4e56e88
|
[
"MIT"
] | 1
|
2020-04-24T11:03:28.000Z
|
2020-04-24T11:03:28.000Z
|
CLOUDFORMATION_TEMPLATE = """
AWSTemplateFormatVersion: '2010-09-09'
Parameters:
s3FileName:
Type: String
environment:
Type: String
deploymentBucket:
Type: String
Resources:
# Place your AWS resources here
"""
| 13.764706
| 38
| 0.709402
|
CLOUDFORMATION_TEMPLATE = """
AWSTemplateFormatVersion: '2010-09-09'
Parameters:
s3FileName:
Type: String
environment:
Type: String
deploymentBucket:
Type: String
Resources:
# Place your AWS resources here
"""
| true
| true
|
7909eff5f75c95d6d3185c95679c3ff3910616dc
| 203
|
py
|
Python
|
src/__init__.py
|
Farhad-Shabani/TSETMC_Dashboard
|
8279bd9579f02447b9ab70dfe491d56713810f51
|
[
"MIT"
] | 5
|
2021-04-10T17:04:48.000Z
|
2021-09-17T11:49:53.000Z
|
src/__init__.py
|
Farhad-Shabani/TSETMC_Dashboard
|
8279bd9579f02447b9ab70dfe491d56713810f51
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
Farhad-Shabani/TSETMC_Dashboard
|
8279bd9579f02447b9ab70dfe491d56713810f51
|
[
"MIT"
] | 3
|
2021-07-04T16:52:10.000Z
|
2021-08-18T15:17:10.000Z
|
from .Essential_Functions import URL_Maker, Negative_Detector
from .Scrape_Index import Scrape_Index
from .Scrape_StockInfo import StockInfo
from .Scrape_StockData_Realtime import Realtime_StockData
| 40.6
| 62
| 0.871921
|
from .Essential_Functions import URL_Maker, Negative_Detector
from .Scrape_Index import Scrape_Index
from .Scrape_StockInfo import StockInfo
from .Scrape_StockData_Realtime import Realtime_StockData
| true
| true
|
7909f1a0ee647d79a43a6c909ebad14af1caea03
| 3,228
|
py
|
Python
|
SchemaCollaboration/datapackage_to_documentation/main.py
|
Swiss-Polar-Institute/schema-collaboration-arctic-century
|
e02b598c7b11b26815be6e9811ff355757263995
|
[
"MIT"
] | 15
|
2020-07-09T01:15:59.000Z
|
2021-09-24T14:47:12.000Z
|
SchemaCollaboration/datapackage_to_documentation/main.py
|
Swiss-Polar-Institute/schema-collaboration-arctic-century
|
e02b598c7b11b26815be6e9811ff355757263995
|
[
"MIT"
] | 21
|
2020-10-27T11:33:57.000Z
|
2022-02-27T11:03:38.000Z
|
SchemaCollaboration/datapackage_to_documentation/main.py
|
Swiss-Polar-Institute/schema-collaboration-arctic-century
|
e02b598c7b11b26815be6e9811ff355757263995
|
[
"MIT"
] | 3
|
2020-08-12T09:33:17.000Z
|
2021-07-12T09:31:33.000Z
|
import os
import subprocess
from tempfile import NamedTemporaryFile
from jinja2 import Template
# This file designed in a way that is independent of Django
# in order to be easy (but changes are required) to be used
# outside Django in the future
# That's why is using jinja2 as a template language instead of
# Django's template language.
#
# Example of use:
# Make sure to have jinja2 template language:
# python3 -m venv venv
# pip3 install jinja2
#
# In a Python file:
# import json
# import main # or the name that this file is saved as...
#
# datapackage = json.load(open("datapackage.json"))
# main.datapackage_to_markdown(datapackage)
def datapackage_to_markdown(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: str with the Markdown documentation
"""
template = Template(template_to_md)
rendered = template.render(datapackage)
return rendered.encode('utf-8')
def datapackage_to_pdf(datapackage):
"""
datapackage: datapackage schema as a dictionary
returns: binary content with the PDF or None if the conversion failed.
"""
markdown = datapackage_to_markdown(datapackage)
f = NamedTemporaryFile(suffix='.pdf', delete=False)
f.close()
command_line = ['pandoc', '--to=latex', f'--output={f.name}']
try:
pandoc_process = subprocess.run(command_line,
input=markdown)
except FileNotFoundError:
os.unlink(f.name)
raise OSError(f'FileNotFoundError trying to execute: {command_line}')
except subprocess.CalledProcessError:
os.unlink(f.name)
raise RuntimeError(f'CalledProcessError trying to execute: {command_line}')
if pandoc_process.returncode != 0:
os.unlink(f.name)
raise RuntimeError(f'Command {command_line} returned a PDF file of size 0')
pdf_file = open(f.name, 'rb')
pdf_content = pdf_file.read()
os.unlink(f.name)
return pdf_content
template_to_md = '''# {{ title }}
## Dataset description
{{ description }}
{% if contributors|length == 1 %}
## Contributor
{% else %}
## Contributors
{% endif %}{% for contributor in contributors %} * {{ contributor.title }} ({{ contributor.role }})
{% endfor %}
{% if keywords|length == 1 %}
## Keyword
{% else %}## Keywords
{% endif %}{% for keyword in keywords %} * {{ keyword }}
{% endfor %}
## Version
{{ version }}
## Homepage
[{{ homepage }}]({{ homepage }})
{% if licenses|length == 1 %}
## Dataset license
{% else %}
## Dataset license
{% endif %}{% for license in licenses %} * {{ license.title }} ([{{ license.name }}]({{ license.path }}))
{% endfor %}
## Resources
{% for resource in resources %}
### {{ resource.title }}
* Name: {{ resource.name }}
* Profile: {{ resource.profile }}
* Path: {{ resource.path }}
{% if resource.format %} * Format: {{ resource.format }}{% endif %}
{% if resource.encoding %} * Encoding: {{ resource.encoding }}{% endif %}
{% if resource.description %} * Desription: {{ resource.description }}{% endif %}
{% if resource.schema.fields %}
#### Fields
{% for field in resource.schema.fields %} * **{{ field.name }}** ({{ field.type }}): {{ field.description }}
{% endfor %}
{% endif %}
{% endfor %}
'''
| 27.355932
| 108
| 0.656753
|
import os
import subprocess
from tempfile import NamedTemporaryFile
from jinja2 import Template
# Django's template language.
ge):
template = Template(template_to_md)
rendered = template.render(datapackage)
return rendered.encode('utf-8')
def datapackage_to_pdf(datapackage):
markdown = datapackage_to_markdown(datapackage)
f = NamedTemporaryFile(suffix='.pdf', delete=False)
f.close()
command_line = ['pandoc', '--to=latex', f'--output={f.name}']
try:
pandoc_process = subprocess.run(command_line,
input=markdown)
except FileNotFoundError:
os.unlink(f.name)
raise OSError(f'FileNotFoundError trying to execute: {command_line}')
except subprocess.CalledProcessError:
os.unlink(f.name)
raise RuntimeError(f'CalledProcessError trying to execute: {command_line}')
if pandoc_process.returncode != 0:
os.unlink(f.name)
raise RuntimeError(f'Command {command_line} returned a PDF file of size 0')
pdf_file = open(f.name, 'rb')
pdf_content = pdf_file.read()
os.unlink(f.name)
return pdf_content
template_to_md = '''# {{ title }}
## Dataset description
{{ description }}
{% if contributors|length == 1 %}
## Contributor
{% else %}
## Contributors
{% endif %}{% for contributor in contributors %} * {{ contributor.title }} ({{ contributor.role }})
{% endfor %}
{% if keywords|length == 1 %}
## Keyword
{% else %}## Keywords
{% endif %}{% for keyword in keywords %} * {{ keyword }}
{% endfor %}
## Version
{{ version }}
## Homepage
[{{ homepage }}]({{ homepage }})
{% if licenses|length == 1 %}
## Dataset license
{% else %}
## Dataset license
{% endif %}{% for license in licenses %} * {{ license.title }} ([{{ license.name }}]({{ license.path }}))
{% endfor %}
## Resources
{% for resource in resources %}
### {{ resource.title }}
* Name: {{ resource.name }}
* Profile: {{ resource.profile }}
* Path: {{ resource.path }}
{% if resource.format %} * Format: {{ resource.format }}{% endif %}
{% if resource.encoding %} * Encoding: {{ resource.encoding }}{% endif %}
{% if resource.description %} * Desription: {{ resource.description }}{% endif %}
{% if resource.schema.fields %}
#### Fields
{% for field in resource.schema.fields %} * **{{ field.name }}** ({{ field.type }}): {{ field.description }}
{% endfor %}
{% endif %}
{% endfor %}
'''
| true
| true
|
7909f1e0dda631692135da74c065a76d22cccec5
| 93
|
py
|
Python
|
doc/src/site/sphinx/extensions/contentui/__init__.py
|
krmartin/sparkling-water
|
494f23f9b8891288b77a78fe3620fd8cee8e89b2
|
[
"Apache-2.0"
] | 990
|
2015-01-06T09:33:30.000Z
|
2022-03-13T04:34:13.000Z
|
doc/src/site/sphinx/extensions/contentui/__init__.py
|
krmartin/sparkling-water
|
494f23f9b8891288b77a78fe3620fd8cee8e89b2
|
[
"Apache-2.0"
] | 993
|
2015-01-08T19:40:12.000Z
|
2022-03-31T12:09:29.000Z
|
doc/src/site/sphinx/extensions/contentui/__init__.py
|
krmartin/sparkling-water
|
494f23f9b8891288b77a78fe3620fd8cee8e89b2
|
[
"Apache-2.0"
] | 439
|
2015-01-13T06:59:47.000Z
|
2022-03-31T06:02:32.000Z
|
# The code for this extension is based on https://github.com/ulrobix/sphinxcontrib-contentui
| 46.5
| 92
| 0.806452
| true
| true
|
|
7909f23d483986233ec2317982a20122cfda9c23
| 6,795
|
py
|
Python
|
WFSOverlayServer.py
|
relet/kivyMaps
|
8b1fb0d4403ce230b0b4082ccca79aaf883207fa
|
[
"MIT"
] | 10
|
2015-05-09T12:12:44.000Z
|
2018-11-30T19:38:55.000Z
|
WFSOverlayServer.py
|
relet/kivyMaps
|
8b1fb0d4403ce230b0b4082ccca79aaf883207fa
|
[
"MIT"
] | null | null | null |
WFSOverlayServer.py
|
relet/kivyMaps
|
8b1fb0d4403ce230b0b4082ccca79aaf883207fa
|
[
"MIT"
] | 5
|
2015-03-20T22:46:48.000Z
|
2020-06-16T04:06:31.000Z
|
from projections import *
from urllib2 import urlopen
from httplib import HTTPConnection
from threading import Thread
from kivy.logger import Logger
from kivy.loader import Loader
from os.path import join, dirname
import time, os
import hashlib
GMLNS = "http://www.opengis.net/gml"
try:
from pyproj import Proj
from lxml.etree import ElementTree as ET
except:
# try:
from xml.etree import ElementTree as ET
# except:
# pass
class WFSOverlayServer(object):
cache = {}
available_maptype = dict(roadmap = 'Roadmap') # default
type = "wfs" # TODO: replace handling in mapviewer with action handlers in the overlay class
def __init__(self, progress_callback=None):
self.progress_callback = progress_callback
def setProgressCallback(self, progress_callback):
self.progress_callback = progress_callback
def load(self, url):
# read from internet
blocksize = 4096
self.progress_callback(0)
fd = urlopen(url)
idata = fd.read(blocksize)
loaded = blocksize
while True:
bdata = fd.read(blocksize)
if not bdata: break
loaded += blocksize
if self.progress_callback:
self.progress_callback(loaded)
idata += bdata
fd.close()
self.progress_callback(-1)
return idata
def findGeometry(self, elem):
geoms = elem.find("{%s}Point" % GMLNS)
if geoms is not None:
return geoms
geoms = elem.find("{%s}LinearRing" % GMLNS)
if geoms is not None:
return geoms
for c in elem.getchildren():
geom = self.findGeometry(c)
if geom is not None:
return geom
def findGeometries(self, members):
geoms = []
for m in members:
geom = self.findGeometry(m)
if geom is not None:
geoms.append(geom)
return geoms
def get(self, parent, width, height):
self.bl = parent.bottom_left
self.tr = parent.top_right
self.zoom = parent.zoom
url = self.geturl(self.bl[0], self.bl[1], self.tr[0], self.tr[1])
if not url:
return None
key = hashlib.md5(url).hexdigest()
if key in self.cache:
return self.cache[key]
try:
xml = self.load('http://' + self.provider_host + url)
tree = ET.fromstring(xml)
members = tree.findall("{%s}featureMember" % GMLNS)
self.geometries = self.findGeometries(members)
self.cache[key] = self.geometries
return self.geometries
except Exception,e:
Logger.error('OverlayServer could not find (or read) WFS from %s [%s]' % (url, e))
image = None
def getInfoText(self, member):
fields = member.getchildren()[0].getchildren()
info = ""
for field in fields:
if field.text is not None and field.text.strip() != "":
info += "%s: %s\n" % (field.tag[field.tag.index("}")+1:], field.text)
return info
def getInfo(self, lat, lon, epsilon):
try:
url = self.geturl(lat-epsilon, lon-epsilon, lat+epsilon, lon+epsilon)
except:
return None
try:
xml = self.load('http://' + self.provider_host + url)
tree = ET.fromstring(xml)
member = tree.find("{%s}featureMember" % GMLNS)
if member is not None:
infotext = self.getInfoText(member)
return infotext
except Exception,e:
Logger.error('OverlayServer could not find (or read) WFS from %s [%s]' % (url, e))
return None
def xy_to_co(self, lat, lon):
if self.customBounds:
x, y = latlon_to_custom(lat, lon, self.bounds)
elif self.isPLatLon: # patch for android - does not require pyproj library
x, y = lon, lat
elif self.isPGoogle: # patch for android - does not require pyproj library
x, y = latlon_to_google (lat, lon)
else:
x, y = transform(pLatlon, self.projection, lon, lat)
return x,y
def co_to_ll(self,x,y):
if self.customBounds:
l, m = custom_to_latlon(x, y, self.bounds)
elif self.isPLatLon: # patch for android - does not require pyproj library
l, m = y, x
elif self.isPGoogle: # patch for android - does not require pyproj library
l, m = google_to_latlon (y, x)
else:
l, m = transform(self.projection, pLatlon, y, x)
return l, m
def geturl(self, lat1, lon1, lat2, lon2):
try:
x1, y1 = self.xy_to_co(lat1, lon1)
x2, y2 = self.xy_to_co(lat2, lon2)
return self.url + "&bbox=%f,%f,%f,%f" % (x1, y1, x2, y2)
except RuntimeError, e:
return None
def parseFeature(self, feature, data):
try:
name = feature.find("Name").text
title = feature.find("Title").text
except:
name = None
title = None
srss = feature.findall("DefaultSRS")
if name:# and srss:
data[name] = map(lambda x:x.text, srss)
if self.debug:
print "Provider %s provides feature %s in projections %s" % (self.provider_host, name, data[name])
def initFromGetCapabilities(self, host, baseurl, feature = None, index = 0, srs = None):
self.debug = (feature == None) and (index == 0)
# GetCapabilities (Features + SRS)
capabilities = urlopen(host + baseurl + "?SERVICE=WFS&Request=GetCapabilities").read().strip()
try:
tree = ET.fromstring(capabilities)
if self.debug:
ET.dump(tree)
features = tree.findall("FeatureType") #TODO: proper parsing of cascading layers and their SRS
data = {}
for f in features:
self.parseFeature(f, data)
# Choose Feature and SRS by (alphabetical) index
if feature is None:
feature = sorted(data.keys())[index]
if srs is None:
srs = sorted(data[feature])[0]
except:
pass
print "Displaying from %s/%s: feature %s in SRS %s." % (host, baseurl, feature, srs)
# generate tile URL and init projection by EPSG code
self.feature = feature
self.url = baseurl + "?typeName=namespace:%s&SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&maxFeatures=50" % (feature)
self.isPGoogle = False
self.isPLatLon = False
if srs=="EPSG:4326":
self.isPLatLon = True
elif srs=="EPSG:900913" or srs == "EPSG:3857":
self.isPGoogle = True
try:
self.projection = pGoogle
except:
pass
else:
try:
self.projection = Proj(init=srs)
except:
pass
| 33.308824
| 123
| 0.583959
|
from projections import *
from urllib2 import urlopen
from httplib import HTTPConnection
from threading import Thread
from kivy.logger import Logger
from kivy.loader import Loader
from os.path import join, dirname
import time, os
import hashlib
GMLNS = "http://www.opengis.net/gml"
try:
from pyproj import Proj
from lxml.etree import ElementTree as ET
except:
from xml.etree import ElementTree as ET
class WFSOverlayServer(object):
cache = {}
available_maptype = dict(roadmap = 'Roadmap')
type = "wfs"
def __init__(self, progress_callback=None):
self.progress_callback = progress_callback
def setProgressCallback(self, progress_callback):
self.progress_callback = progress_callback
def load(self, url):
blocksize = 4096
self.progress_callback(0)
fd = urlopen(url)
idata = fd.read(blocksize)
loaded = blocksize
while True:
bdata = fd.read(blocksize)
if not bdata: break
loaded += blocksize
if self.progress_callback:
self.progress_callback(loaded)
idata += bdata
fd.close()
self.progress_callback(-1)
return idata
def findGeometry(self, elem):
geoms = elem.find("{%s}Point" % GMLNS)
if geoms is not None:
return geoms
geoms = elem.find("{%s}LinearRing" % GMLNS)
if geoms is not None:
return geoms
for c in elem.getchildren():
geom = self.findGeometry(c)
if geom is not None:
return geom
def findGeometries(self, members):
geoms = []
for m in members:
geom = self.findGeometry(m)
if geom is not None:
geoms.append(geom)
return geoms
def get(self, parent, width, height):
self.bl = parent.bottom_left
self.tr = parent.top_right
self.zoom = parent.zoom
url = self.geturl(self.bl[0], self.bl[1], self.tr[0], self.tr[1])
if not url:
return None
key = hashlib.md5(url).hexdigest()
if key in self.cache:
return self.cache[key]
try:
xml = self.load('http://' + self.provider_host + url)
tree = ET.fromstring(xml)
members = tree.findall("{%s}featureMember" % GMLNS)
self.geometries = self.findGeometries(members)
self.cache[key] = self.geometries
return self.geometries
except Exception,e:
Logger.error('OverlayServer could not find (or read) WFS from %s [%s]' % (url, e))
image = None
def getInfoText(self, member):
fields = member.getchildren()[0].getchildren()
info = ""
for field in fields:
if field.text is not None and field.text.strip() != "":
info += "%s: %s\n" % (field.tag[field.tag.index("}")+1:], field.text)
return info
def getInfo(self, lat, lon, epsilon):
try:
url = self.geturl(lat-epsilon, lon-epsilon, lat+epsilon, lon+epsilon)
except:
return None
try:
xml = self.load('http://' + self.provider_host + url)
tree = ET.fromstring(xml)
member = tree.find("{%s}featureMember" % GMLNS)
if member is not None:
infotext = self.getInfoText(member)
return infotext
except Exception,e:
Logger.error('OverlayServer could not find (or read) WFS from %s [%s]' % (url, e))
return None
def xy_to_co(self, lat, lon):
if self.customBounds:
x, y = latlon_to_custom(lat, lon, self.bounds)
elif self.isPLatLon:
x, y = lon, lat
elif self.isPGoogle:
x, y = latlon_to_google (lat, lon)
else:
x, y = transform(pLatlon, self.projection, lon, lat)
return x,y
def co_to_ll(self,x,y):
if self.customBounds:
l, m = custom_to_latlon(x, y, self.bounds)
elif self.isPLatLon:
l, m = y, x
elif self.isPGoogle:
l, m = google_to_latlon (y, x)
else:
l, m = transform(self.projection, pLatlon, y, x)
return l, m
def geturl(self, lat1, lon1, lat2, lon2):
try:
x1, y1 = self.xy_to_co(lat1, lon1)
x2, y2 = self.xy_to_co(lat2, lon2)
return self.url + "&bbox=%f,%f,%f,%f" % (x1, y1, x2, y2)
except RuntimeError, e:
return None
def parseFeature(self, feature, data):
try:
name = feature.find("Name").text
title = feature.find("Title").text
except:
name = None
title = None
srss = feature.findall("DefaultSRS")
if name:
data[name] = map(lambda x:x.text, srss)
if self.debug:
print "Provider %s provides feature %s in projections %s" % (self.provider_host, name, data[name])
def initFromGetCapabilities(self, host, baseurl, feature = None, index = 0, srs = None):
self.debug = (feature == None) and (index == 0)
capabilities = urlopen(host + baseurl + "?SERVICE=WFS&Request=GetCapabilities").read().strip()
try:
tree = ET.fromstring(capabilities)
if self.debug:
ET.dump(tree)
features = tree.findall("FeatureType")
data = {}
for f in features:
self.parseFeature(f, data)
if feature is None:
feature = sorted(data.keys())[index]
if srs is None:
srs = sorted(data[feature])[0]
except:
pass
print "Displaying from %s/%s: feature %s in SRS %s." % (host, baseurl, feature, srs)
self.feature = feature
self.url = baseurl + "?typeName=namespace:%s&SERVICE=WFS&VERSION=1.1.0&REQUEST=GetFeature&maxFeatures=50" % (feature)
self.isPGoogle = False
self.isPLatLon = False
if srs=="EPSG:4326":
self.isPLatLon = True
elif srs=="EPSG:900913" or srs == "EPSG:3857":
self.isPGoogle = True
try:
self.projection = pGoogle
except:
pass
else:
try:
self.projection = Proj(init=srs)
except:
pass
| false
| true
|
7909f3df663de7147c3dbf424eb18b31b5d6a585
| 3,285
|
py
|
Python
|
services/discovery/jobs/box/cpe.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
services/discovery/jobs/box/cpe.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
services/discovery/jobs/box/cpe.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# CPE check
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import datetime
# NOC modules
from noc.services.discovery.jobs.base import DiscoveryCheck
from noc.sa.models.managedobject import ManagedObject
from noc.sa.models.profile import Profile
class CPECheck(DiscoveryCheck):
"""
CPE check
@todo: Remove stale CPE
"""
name = "cpe"
required_script = "get_cpe"
required_capabilities = ["CPE | Controller"]
def handler(self):
self.logger.info("Checking CPEs")
now = datetime.datetime.now()
result = self.object.scripts.get_cpe()
for cpe in result:
if cpe["status"] != "active":
self.logger.debug(
"[%s|%s] CPE status is '%s'. Skipping",
cpe["id"],
cpe["global_id"],
cpe["status"],
)
continue
mo = self.find_cpe(cpe["global_id"])
if mo:
changes = self.update_if_changed(
mo,
{
"controller": self.object,
"local_cpe_id": cpe["id"],
"global_cpe_id": cpe["global_id"],
"address": cpe["ip"],
"last_seen": now,
},
)
if changes:
self.logger.info(
"[%s|%s] Changed: %s",
cpe["id"],
cpe["global_id"],
", ".join("%s='%s'" % c for c in changes),
)
else:
name = cpe.get("name") or "cpe-%s" % cpe["global_id"]
if ManagedObject.objects.filter(name=name).exists():
name = "cpe-%s" % cpe["global_id"]
self.logger.info("[%s|%s] Created CPE %s", cpe["id"], cpe["global_id"], name)
mo = ManagedObject(
name=name,
pool=self.object.pool,
profile=Profile.get_by_id(Profile.get_generic_profile_id()),
object_profile=self.object.object_profile.cpe_profile
or self.object.object_profile,
administrative_domain=self.object.administrative_domain,
scheme=self.object.scheme,
segment=self.object.segment,
auth_profile=self.object.object_profile.cpe_auth_profile
or self.object.auth_profile,
address=cpe.get("ip") or "0.0.0.0",
controller=self.object,
last_seen=now,
local_cpe_id=cpe["id"],
global_cpe_id=cpe["global_id"],
)
mo.save()
@classmethod
def find_cpe(cls, global_id):
try:
return ManagedObject.objects.get(global_cpe_id=global_id)
except ManagedObject.DoesNotExist:
return None
| 36.910112
| 93
| 0.448706
|
import datetime
from noc.services.discovery.jobs.base import DiscoveryCheck
from noc.sa.models.managedobject import ManagedObject
from noc.sa.models.profile import Profile
class CPECheck(DiscoveryCheck):
name = "cpe"
required_script = "get_cpe"
required_capabilities = ["CPE | Controller"]
def handler(self):
self.logger.info("Checking CPEs")
now = datetime.datetime.now()
result = self.object.scripts.get_cpe()
for cpe in result:
if cpe["status"] != "active":
self.logger.debug(
"[%s|%s] CPE status is '%s'. Skipping",
cpe["id"],
cpe["global_id"],
cpe["status"],
)
continue
mo = self.find_cpe(cpe["global_id"])
if mo:
changes = self.update_if_changed(
mo,
{
"controller": self.object,
"local_cpe_id": cpe["id"],
"global_cpe_id": cpe["global_id"],
"address": cpe["ip"],
"last_seen": now,
},
)
if changes:
self.logger.info(
"[%s|%s] Changed: %s",
cpe["id"],
cpe["global_id"],
", ".join("%s='%s'" % c for c in changes),
)
else:
name = cpe.get("name") or "cpe-%s" % cpe["global_id"]
if ManagedObject.objects.filter(name=name).exists():
name = "cpe-%s" % cpe["global_id"]
self.logger.info("[%s|%s] Created CPE %s", cpe["id"], cpe["global_id"], name)
mo = ManagedObject(
name=name,
pool=self.object.pool,
profile=Profile.get_by_id(Profile.get_generic_profile_id()),
object_profile=self.object.object_profile.cpe_profile
or self.object.object_profile,
administrative_domain=self.object.administrative_domain,
scheme=self.object.scheme,
segment=self.object.segment,
auth_profile=self.object.object_profile.cpe_auth_profile
or self.object.auth_profile,
address=cpe.get("ip") or "0.0.0.0",
controller=self.object,
last_seen=now,
local_cpe_id=cpe["id"],
global_cpe_id=cpe["global_id"],
)
mo.save()
@classmethod
def find_cpe(cls, global_id):
try:
return ManagedObject.objects.get(global_cpe_id=global_id)
except ManagedObject.DoesNotExist:
return None
| true
| true
|
7909f404cefc5656b0fd6cbdce47cadfbbb8a8ab
| 4,794
|
py
|
Python
|
cactus/consensus/get_block_challenge.py
|
grayfallstown/cactus-blockchain
|
680d68d0bb7694bd4b99e4906b356e014bca7734
|
[
"Apache-2.0"
] | 20
|
2021-07-16T18:08:13.000Z
|
2022-03-20T02:38:39.000Z
|
cactus/consensus/get_block_challenge.py
|
grayfallstown/cactus-blockchain
|
680d68d0bb7694bd4b99e4906b356e014bca7734
|
[
"Apache-2.0"
] | 29
|
2021-07-17T00:38:18.000Z
|
2022-03-29T19:11:48.000Z
|
cactus/consensus/get_block_challenge.py
|
grayfallstown/cactus-blockchain
|
680d68d0bb7694bd4b99e4906b356e014bca7734
|
[
"Apache-2.0"
] | 21
|
2021-07-17T02:18:57.000Z
|
2022-03-15T08:26:56.000Z
|
import logging
from typing import List, Union
from cactus.consensus.block_record import BlockRecord
from cactus.consensus.blockchain_interface import BlockchainInterface
from cactus.consensus.constants import ConsensusConstants
from cactus.types.blockchain_format.sized_bytes import bytes32
from cactus.types.full_block import FullBlock
from cactus.types.header_block import HeaderBlock
from cactus.types.unfinished_block import UnfinishedBlock
from cactus.types.unfinished_header_block import UnfinishedHeaderBlock
from cactus.util.ints import uint64
log = logging.getLogger(__name__)
def final_eos_is_already_included(
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
sub_slot_iters: uint64,
) -> bool:
"""
Args:
header_block: An overflow block, with potentially missing information about the new sub slot
blocks: all blocks that have been included before header_block
sub_slot_iters: sub_slot_iters at the header_block
Returns: True iff the missing sub slot was already included in a previous block. Returns False if the sub
slot was not included yet, and therefore it is the responsibility of this block to include it
"""
if len(header_block.finished_sub_slots) > 0:
# We already have an included empty sub slot, which means the prev block is 2 sub slots behind.
return False
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
# We also check if curr is close to header_block, which means it's in the same sub slot
seen_overflow_block = curr.overflow and (header_block.total_iters - curr.total_iters < sub_slot_iters // 2)
while not curr.first_in_sub_slot and not curr.height == 0:
if curr.overflow and header_block.total_iters - curr.total_iters < sub_slot_iters // 2:
seen_overflow_block = True
curr = blocks.block_record(curr.prev_hash)
if curr.first_in_sub_slot and seen_overflow_block:
# We have seen another overflow block in this slot (same as header_block), therefore there are no
# missing sub slots
return True
# We have not seen any overflow blocks, therefore header_block will have to include the missing sub slot in
# the future
return False
def get_block_challenge(
constants: ConsensusConstants,
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
genesis_block: bool,
overflow: bool,
skip_overflow_last_ss_validation: bool,
):
if len(header_block.finished_sub_slots) > 0:
if overflow:
# New sub-slot with overflow block
if skip_overflow_last_ss_validation:
# In this case, we are missing the final sub-slot bundle (it's not finished yet), however
# There is a whole empty slot before this block is infused
challenge: bytes32 = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
challenge = header_block.finished_sub_slots[
-1
].challenge_chain.challenge_chain_end_of_slot_vdf.challenge
else:
# No overflow, new slot with a new challenge
challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
if genesis_block:
challenge = constants.GENESIS_CHALLENGE
else:
if overflow:
if skip_overflow_last_ss_validation:
# Overflow infusion without the new slot, so get the last challenge
challenges_to_look_for = 1
else:
# Overflow infusion, so get the second to last challenge. skip_overflow_last_ss_validation is False,
# Which means no sub slots are omitted
challenges_to_look_for = 2
else:
challenges_to_look_for = 1
reversed_challenge_hashes: List[bytes32] = []
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
while len(reversed_challenge_hashes) < challenges_to_look_for:
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
reversed_challenge_hashes += reversed(curr.finished_challenge_slot_hashes)
if curr.height == 0:
assert curr.finished_challenge_slot_hashes is not None
assert len(curr.finished_challenge_slot_hashes) > 0
break
curr = blocks.block_record(curr.prev_hash)
challenge = reversed_challenge_hashes[challenges_to_look_for - 1]
return challenge
| 46.543689
| 120
| 0.695244
|
import logging
from typing import List, Union
from cactus.consensus.block_record import BlockRecord
from cactus.consensus.blockchain_interface import BlockchainInterface
from cactus.consensus.constants import ConsensusConstants
from cactus.types.blockchain_format.sized_bytes import bytes32
from cactus.types.full_block import FullBlock
from cactus.types.header_block import HeaderBlock
from cactus.types.unfinished_block import UnfinishedBlock
from cactus.types.unfinished_header_block import UnfinishedHeaderBlock
from cactus.util.ints import uint64
log = logging.getLogger(__name__)
def final_eos_is_already_included(
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
sub_slot_iters: uint64,
) -> bool:
if len(header_block.finished_sub_slots) > 0:
return False
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
seen_overflow_block = curr.overflow and (header_block.total_iters - curr.total_iters < sub_slot_iters // 2)
while not curr.first_in_sub_slot and not curr.height == 0:
if curr.overflow and header_block.total_iters - curr.total_iters < sub_slot_iters // 2:
seen_overflow_block = True
curr = blocks.block_record(curr.prev_hash)
if curr.first_in_sub_slot and seen_overflow_block:
# We have seen another overflow block in this slot (same as header_block), therefore there are no
# missing sub slots
return True
# We have not seen any overflow blocks, therefore header_block will have to include the missing sub slot in
# the future
return False
def get_block_challenge(
constants: ConsensusConstants,
header_block: Union[UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock],
blocks: BlockchainInterface,
genesis_block: bool,
overflow: bool,
skip_overflow_last_ss_validation: bool,
):
if len(header_block.finished_sub_slots) > 0:
if overflow:
# New sub-slot with overflow block
if skip_overflow_last_ss_validation:
# In this case, we are missing the final sub-slot bundle (it's not finished yet), however
challenge: bytes32 = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
challenge = header_block.finished_sub_slots[
-1
].challenge_chain.challenge_chain_end_of_slot_vdf.challenge
else:
challenge = header_block.finished_sub_slots[-1].challenge_chain.get_hash()
else:
if genesis_block:
challenge = constants.GENESIS_CHALLENGE
else:
if overflow:
if skip_overflow_last_ss_validation:
challenges_to_look_for = 1
else:
challenges_to_look_for = 2
else:
challenges_to_look_for = 1
reversed_challenge_hashes: List[bytes32] = []
curr: BlockRecord = blocks.block_record(header_block.prev_header_hash)
while len(reversed_challenge_hashes) < challenges_to_look_for:
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
reversed_challenge_hashes += reversed(curr.finished_challenge_slot_hashes)
if curr.height == 0:
assert curr.finished_challenge_slot_hashes is not None
assert len(curr.finished_challenge_slot_hashes) > 0
break
curr = blocks.block_record(curr.prev_hash)
challenge = reversed_challenge_hashes[challenges_to_look_for - 1]
return challenge
| true
| true
|
7909f65fa203f3ee3263cda6c548cd60e9a11dc9
| 2,093
|
py
|
Python
|
alipay/aop/api/response/SsdataDataserviceDtevalIdentitycheckQueryResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/SsdataDataserviceDtevalIdentitycheckQueryResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/SsdataDataserviceDtevalIdentitycheckQueryResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class SsdataDataserviceDtevalIdentitycheckQueryResponse(AlipayResponse):
def __init__(self):
super(SsdataDataserviceDtevalIdentitycheckQueryResponse, self).__init__()
self._evidence = None
self._ext_map = None
self._id_card_no_match_flag = None
self._name_match_flag = None
self._push_ant_data_flag = None
@property
def evidence(self):
return self._evidence
@evidence.setter
def evidence(self, value):
self._evidence = value
@property
def ext_map(self):
return self._ext_map
@ext_map.setter
def ext_map(self, value):
self._ext_map = value
@property
def id_card_no_match_flag(self):
return self._id_card_no_match_flag
@id_card_no_match_flag.setter
def id_card_no_match_flag(self, value):
self._id_card_no_match_flag = value
@property
def name_match_flag(self):
return self._name_match_flag
@name_match_flag.setter
def name_match_flag(self, value):
self._name_match_flag = value
@property
def push_ant_data_flag(self):
return self._push_ant_data_flag
@push_ant_data_flag.setter
def push_ant_data_flag(self, value):
self._push_ant_data_flag = value
def parse_response_content(self, response_content):
response = super(SsdataDataserviceDtevalIdentitycheckQueryResponse, self).parse_response_content(response_content)
if 'evidence' in response:
self.evidence = response['evidence']
if 'ext_map' in response:
self.ext_map = response['ext_map']
if 'id_card_no_match_flag' in response:
self.id_card_no_match_flag = response['id_card_no_match_flag']
if 'name_match_flag' in response:
self.name_match_flag = response['name_match_flag']
if 'push_ant_data_flag' in response:
self.push_ant_data_flag = response['push_ant_data_flag']
| 31.712121
| 122
| 0.697086
|
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class SsdataDataserviceDtevalIdentitycheckQueryResponse(AlipayResponse):
def __init__(self):
super(SsdataDataserviceDtevalIdentitycheckQueryResponse, self).__init__()
self._evidence = None
self._ext_map = None
self._id_card_no_match_flag = None
self._name_match_flag = None
self._push_ant_data_flag = None
@property
def evidence(self):
return self._evidence
@evidence.setter
def evidence(self, value):
self._evidence = value
@property
def ext_map(self):
return self._ext_map
@ext_map.setter
def ext_map(self, value):
self._ext_map = value
@property
def id_card_no_match_flag(self):
return self._id_card_no_match_flag
@id_card_no_match_flag.setter
def id_card_no_match_flag(self, value):
self._id_card_no_match_flag = value
@property
def name_match_flag(self):
return self._name_match_flag
@name_match_flag.setter
def name_match_flag(self, value):
self._name_match_flag = value
@property
def push_ant_data_flag(self):
return self._push_ant_data_flag
@push_ant_data_flag.setter
def push_ant_data_flag(self, value):
self._push_ant_data_flag = value
def parse_response_content(self, response_content):
response = super(SsdataDataserviceDtevalIdentitycheckQueryResponse, self).parse_response_content(response_content)
if 'evidence' in response:
self.evidence = response['evidence']
if 'ext_map' in response:
self.ext_map = response['ext_map']
if 'id_card_no_match_flag' in response:
self.id_card_no_match_flag = response['id_card_no_match_flag']
if 'name_match_flag' in response:
self.name_match_flag = response['name_match_flag']
if 'push_ant_data_flag' in response:
self.push_ant_data_flag = response['push_ant_data_flag']
| true
| true
|
7909f6d6dcf0d7fea954dcd9105c3e41eca265c5
| 2,109
|
py
|
Python
|
bibliopixel/util/deprecated.py
|
rec/leds
|
ed5fd11ed155e7008d4ef6d5b3d82cd7f8b3ed6a
|
[
"MIT"
] | 253
|
2015-01-03T23:17:57.000Z
|
2021-12-14T02:31:08.000Z
|
bibliopixel/util/deprecated.py
|
rec/leds
|
ed5fd11ed155e7008d4ef6d5b3d82cd7f8b3ed6a
|
[
"MIT"
] | 879
|
2015-01-11T16:07:25.000Z
|
2021-12-10T16:24:31.000Z
|
bibliopixel/util/deprecated.py
|
rec/leds
|
ed5fd11ed155e7008d4ef6d5b3d82cd7f8b3ed6a
|
[
"MIT"
] | 71
|
2015-01-04T01:02:47.000Z
|
2022-03-25T18:30:10.000Z
|
import os, sys
CHOICES = 'ignore', 'fail', 'warn', 'warn_once'
DEFAULT = 'warn_once'
ACTION = None
HELP = """
Specify what to do when a project uses deprecated features:
ignore: do nothing
warn: print warning messages for each feature
warn_once: print a warning message, but only once for each type of feature
fail: throw an exception
"""
DEPRECATED = set()
FLAG = '--deprecated'
V4_FLAG = '--v4'
ENVIRONMENT_VARIABLE = 'BP_DEPRECATED'
V4_HELP = """\
Run BiblioPixel in v4 compatibility mode, to see if it will work with
future releases v4.x
"""
def add_arguments(parser):
parser.add_argument(V4_FLAG, action='store_true', help=V4_HELP)
def allowed():
_compute_action()
return ACTION != 'fail'
def deprecated(msg, *args, **kwds):
_compute_action()
if ACTION == 'ignore':
return
if ACTION == 'warn_once' and msg in DEPRECATED:
return
formatted = msg.format(*args, **kwds)
if ACTION == 'fail':
raise ValueError(formatted)
DEPRECATED.add(msg)
from . import log
log.warning(formatted)
def _compute_action():
global ACTION
if ACTION:
return
if FLAG in sys.argv:
raise ValueError('%s needs an argument (one of %s)' %
(FLAG, ', '.join(CHOICES)))
if V4_FLAG in sys.argv:
ACTION = 'fail'
d = [i for i, v in enumerate(sys.argv) if v.startswith(FLAG + '=')]
if len(d) > 1:
raise ValueError('Only one %s argument can be used' % FLAG)
if not d:
ACTION = os.getenv(ENVIRONMENT_VARIABLE, ACTION or DEFAULT)
else:
arg = sys.argv.pop(d[0])
_, *rest = arg.split('=')
if len(rest) > 1:
raise ValueError('Extra = in flag %s' % arg)
if not (rest and rest[0].strip()):
raise ValueError('%s needs an argument (one of %s)' %
(FLAG, ', '.join(CHOICES)))
ACTION = rest[0]
if ACTION not in CHOICES:
ACTION = None
raise ValueError('Unknown deprecation value (must be one of %s)' %
', '.join(CHOICES))
| 23.696629
| 76
| 0.598388
|
import os, sys
CHOICES = 'ignore', 'fail', 'warn', 'warn_once'
DEFAULT = 'warn_once'
ACTION = None
HELP = """
Specify what to do when a project uses deprecated features:
ignore: do nothing
warn: print warning messages for each feature
warn_once: print a warning message, but only once for each type of feature
fail: throw an exception
"""
DEPRECATED = set()
FLAG = '--deprecated'
V4_FLAG = '--v4'
ENVIRONMENT_VARIABLE = 'BP_DEPRECATED'
V4_HELP = """\
Run BiblioPixel in v4 compatibility mode, to see if it will work with
future releases v4.x
"""
def add_arguments(parser):
parser.add_argument(V4_FLAG, action='store_true', help=V4_HELP)
def allowed():
_compute_action()
return ACTION != 'fail'
def deprecated(msg, *args, **kwds):
_compute_action()
if ACTION == 'ignore':
return
if ACTION == 'warn_once' and msg in DEPRECATED:
return
formatted = msg.format(*args, **kwds)
if ACTION == 'fail':
raise ValueError(formatted)
DEPRECATED.add(msg)
from . import log
log.warning(formatted)
def _compute_action():
global ACTION
if ACTION:
return
if FLAG in sys.argv:
raise ValueError('%s needs an argument (one of %s)' %
(FLAG, ', '.join(CHOICES)))
if V4_FLAG in sys.argv:
ACTION = 'fail'
d = [i for i, v in enumerate(sys.argv) if v.startswith(FLAG + '=')]
if len(d) > 1:
raise ValueError('Only one %s argument can be used' % FLAG)
if not d:
ACTION = os.getenv(ENVIRONMENT_VARIABLE, ACTION or DEFAULT)
else:
arg = sys.argv.pop(d[0])
_, *rest = arg.split('=')
if len(rest) > 1:
raise ValueError('Extra = in flag %s' % arg)
if not (rest and rest[0].strip()):
raise ValueError('%s needs an argument (one of %s)' %
(FLAG, ', '.join(CHOICES)))
ACTION = rest[0]
if ACTION not in CHOICES:
ACTION = None
raise ValueError('Unknown deprecation value (must be one of %s)' %
', '.join(CHOICES))
| true
| true
|
7909f86339678f4ff6b4300bc9b4937f8e52b4e1
| 2,298
|
py
|
Python
|
src/equation_parser/equations.py
|
sushmaakoju/parser
|
e40e3f818921141044b499e231ae75e6bf4141c2
|
[
"MIT"
] | null | null | null |
src/equation_parser/equations.py
|
sushmaakoju/parser
|
e40e3f818921141044b499e231ae75e6bf4141c2
|
[
"MIT"
] | null | null | null |
src/equation_parser/equations.py
|
sushmaakoju/parser
|
e40e3f818921141044b499e231ae75e6bf4141c2
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
#SymPy is a non-commercial alternative to Mathematica and Maple
# SymPy can map variable to a value or a matrix.
# SymPy's Symbolic Statistical Modelling uses scintific computing.
import sys
import numpy as np
import sympy as sp
import pandas as pd
from pathlib import Path
from .tokens import *
from .equation import *
class Equations(Equation):
def __init__(self):
path = Path(__file__).parent
self.filepath = path.joinpath("fixtures","equations.xlsx")
self.equations_sheet = "equations"
self.column_mapping_sheet = "col_var_mapping"
self.data_sheet = "values"
self.mappings = None
self.df = None
self.equations_df = pd.DataFrame()
self.equations = dict()
self.lhs = None
self.values = dict()
def upload_data_equations(self, filepath, equations_sheet, data_sheet, column_mapping_sheet=""):
if not self.validate_file_inputs(filepath, equations_sheet, data_sheet):
return False
self.filepath = filepath
self.equations_df = pd.read_excel(self.filepath, sheet_name=equations_sheet, mangle_dupe_cols=True)
self.df = pd.read_excel(self.filepath, sheet_name=data_sheet, mangle_dupe_cols=True)
if column_mapping_sheet:
self.mappings = pd.read_excel(self.filepath, sheet_name=column_mapping_sheet, mangle_dupe_cols=True)
def validate_file_inputs(self, filepath, equations_sheet, data_sheet):
if not filepath or not equations_sheet or not data_sheet:
raise Exception("Empty upload data inputs. Please provide valid inputs to file upload.")
else:
return True
return False
def process_equations(self):
self.lhs = self.equations_df['name']
eq_list = self.equations_df['equation']
self.equations = dict()
for variable, equation in zip(self.lhs, eq_list):
self.equations[variable] = Equation(equation, self.df)
self.equations[variable].set_symbols(self.mappings)
self.values[variable] = self.equations[variable].evaluate(self.values)
result_df = pd.DataFrame.from_dict(self.values)
result_df.to_csv("results.csv", index=False)
return self.values
| 41.781818
| 112
| 0.691036
|
from __future__ import absolute_import
import sys
import numpy as np
import sympy as sp
import pandas as pd
from pathlib import Path
from .tokens import *
from .equation import *
class Equations(Equation):
def __init__(self):
path = Path(__file__).parent
self.filepath = path.joinpath("fixtures","equations.xlsx")
self.equations_sheet = "equations"
self.column_mapping_sheet = "col_var_mapping"
self.data_sheet = "values"
self.mappings = None
self.df = None
self.equations_df = pd.DataFrame()
self.equations = dict()
self.lhs = None
self.values = dict()
def upload_data_equations(self, filepath, equations_sheet, data_sheet, column_mapping_sheet=""):
if not self.validate_file_inputs(filepath, equations_sheet, data_sheet):
return False
self.filepath = filepath
self.equations_df = pd.read_excel(self.filepath, sheet_name=equations_sheet, mangle_dupe_cols=True)
self.df = pd.read_excel(self.filepath, sheet_name=data_sheet, mangle_dupe_cols=True)
if column_mapping_sheet:
self.mappings = pd.read_excel(self.filepath, sheet_name=column_mapping_sheet, mangle_dupe_cols=True)
def validate_file_inputs(self, filepath, equations_sheet, data_sheet):
if not filepath or not equations_sheet or not data_sheet:
raise Exception("Empty upload data inputs. Please provide valid inputs to file upload.")
else:
return True
return False
def process_equations(self):
self.lhs = self.equations_df['name']
eq_list = self.equations_df['equation']
self.equations = dict()
for variable, equation in zip(self.lhs, eq_list):
self.equations[variable] = Equation(equation, self.df)
self.equations[variable].set_symbols(self.mappings)
self.values[variable] = self.equations[variable].evaluate(self.values)
result_df = pd.DataFrame.from_dict(self.values)
result_df.to_csv("results.csv", index=False)
return self.values
| true
| true
|
7909f880ff9597791c6b835ba92450100aaff982
| 1,514
|
py
|
Python
|
djangocms_oembed/cms_plugins.py
|
MatthewWilkes/djangocms-oembed
|
168436cb2496def1bf64605dde5ce253769d3822
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_oembed/cms_plugins.py
|
MatthewWilkes/djangocms-oembed
|
168436cb2496def1bf64605dde5ce253769d3822
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_oembed/cms_plugins.py
|
MatthewWilkes/djangocms-oembed
|
168436cb2496def1bf64605dde5ce253769d3822
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from .models import OembedVideoPlugin, OembedRichPlugin
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
class CMSOembedVideoPlugin(CMSPluginBase):
name = _('Video (embedded)')
model = OembedVideoPlugin
render_template = 'djangocms_oembed/plugins/video.html'
admin_preview = False
text_enabled = True
fieldsets = (
(None, {'fields': ('oembed_url', ('width', 'height',), 'autoplay', 'loop', 'show_related',)}),
('advanced', {'fields': ('type', 'provider', 'html', 'data'), 'classes': ['collapse']}),
)
readonly_fields = ('type', 'provider', 'html', 'data',)
def icon_src(self, instance):
return settings.STATIC_URL + u"cms/images/plugins/snippet.png"
plugin_pool.register_plugin(CMSOembedVideoPlugin)
class CMSOembedRichPlugin(CMSPluginBase):
name = _('Rich Content (embedded)')
model = OembedRichPlugin
render_template = 'djangocms_oembed/plugins/rich.html'
admin_preview = False
text_enabled = True
fieldsets = (
(None, {'fields': ('oembed_url',)}),
('advanced', {'fields': ('type', 'provider', 'html', 'data'), 'classes': ['collapse']}),
)
readonly_fields = ('type', 'provider', 'html', 'data',)
def icon_src(self, instance):
return settings.STATIC_URL + u"cms/images/plugins/snippet.png"
plugin_pool.register_plugin(CMSOembedRichPlugin)
| 35.209302
| 102
| 0.678996
|
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from .models import OembedVideoPlugin, OembedRichPlugin
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
class CMSOembedVideoPlugin(CMSPluginBase):
name = _('Video (embedded)')
model = OembedVideoPlugin
render_template = 'djangocms_oembed/plugins/video.html'
admin_preview = False
text_enabled = True
fieldsets = (
(None, {'fields': ('oembed_url', ('width', 'height',), 'autoplay', 'loop', 'show_related',)}),
('advanced', {'fields': ('type', 'provider', 'html', 'data'), 'classes': ['collapse']}),
)
readonly_fields = ('type', 'provider', 'html', 'data',)
def icon_src(self, instance):
return settings.STATIC_URL + u"cms/images/plugins/snippet.png"
plugin_pool.register_plugin(CMSOembedVideoPlugin)
class CMSOembedRichPlugin(CMSPluginBase):
name = _('Rich Content (embedded)')
model = OembedRichPlugin
render_template = 'djangocms_oembed/plugins/rich.html'
admin_preview = False
text_enabled = True
fieldsets = (
(None, {'fields': ('oembed_url',)}),
('advanced', {'fields': ('type', 'provider', 'html', 'data'), 'classes': ['collapse']}),
)
readonly_fields = ('type', 'provider', 'html', 'data',)
def icon_src(self, instance):
return settings.STATIC_URL + u"cms/images/plugins/snippet.png"
plugin_pool.register_plugin(CMSOembedRichPlugin)
| true
| true
|
7909f8f0919a2f941f14c7d197539ae57e894d4e
| 4,483
|
py
|
Python
|
telemetry/telemetry/page/shared_page_state_unittest.py
|
ravitejavalluri/catapult
|
246a39a82c2213d913a96fff020a263838dc76e6
|
[
"BSD-3-Clause"
] | 4
|
2017-06-04T05:37:39.000Z
|
2021-06-26T05:30:15.000Z
|
telemetry/telemetry/page/shared_page_state_unittest.py
|
ravitejavalluri/catapult
|
246a39a82c2213d913a96fff020a263838dc76e6
|
[
"BSD-3-Clause"
] | 9
|
2017-09-10T19:49:04.000Z
|
2018-04-14T04:39:55.000Z
|
telemetry/telemetry/page/shared_page_state_unittest.py
|
ravitejavalluri/catapult
|
246a39a82c2213d913a96fff020a263838dc76e6
|
[
"BSD-3-Clause"
] | 2
|
2017-09-10T20:30:38.000Z
|
2017-09-12T19:50:03.000Z
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.internal import story_runner
from telemetry.page import page
from telemetry.page import legacy_page_test
from telemetry.page import shared_page_state
from telemetry import story as story_module
from telemetry.testing import fakes
from telemetry.util import wpr_modes
def SetUpPageRunnerArguments(options):
parser = options.CreateParser()
story_runner.AddCommandLineArgs(parser)
options.MergeDefaultValues(parser.get_default_values())
story_runner.ProcessCommandLineArgs(parser, options)
class DummyTest(legacy_page_test.LegacyPageTest):
def ValidateAndMeasurePage(self, *_):
pass
class SharedPageStateTests(unittest.TestCase):
def setUp(self):
self.options = fakes.CreateBrowserFinderOptions()
self.options.use_live_sites = False
self.options.output_formats = ['none']
self.options.suppress_gtest_report = True
def testUseLiveSitesFlagSet(self):
self.options.use_live_sites = True
run_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_module.StorySet())
self.assertTrue(run_state.platform.network_controller.is_open)
self.assertEquals(run_state.platform.network_controller.wpr_mode,
wpr_modes.WPR_OFF)
self.assertTrue(run_state.platform.network_controller.use_live_traffic)
def testUseLiveSitesFlagUnset(self):
run_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_module.StorySet())
self.assertTrue(run_state.platform.network_controller.is_open)
self.assertEquals(run_state.platform.network_controller.wpr_mode,
wpr_modes.WPR_REPLAY)
self.assertFalse(run_state.platform.network_controller.use_live_traffic)
def testWPRRecordEnable(self):
self.options.browser_options.wpr_mode = wpr_modes.WPR_RECORD
run_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_module.StorySet())
self.assertTrue(run_state.platform.network_controller.is_open)
self.assertEquals(run_state.platform.network_controller.wpr_mode,
wpr_modes.WPR_RECORD)
self.assertFalse(run_state.platform.network_controller.use_live_traffic)
def testConstructorCallsSetOptions(self):
test = DummyTest()
shared_page_state.SharedPageState(
test, self.options, story_module.StorySet())
self.assertEqual(test.options, self.options)
def assertUserAgentSetCorrectly(
self, shared_page_state_class, expected_user_agent):
story = page.Page(
'http://www.google.com',
shared_page_state_class=shared_page_state_class)
test = DummyTest()
story_set = story_module.StorySet()
story_set.AddStory(story)
story.shared_state_class(test, self.options, story_set)
browser_options = self.options.browser_options
actual_user_agent = browser_options.browser_user_agent_type
self.assertEqual(expected_user_agent, actual_user_agent)
def testPageStatesUserAgentType(self):
self.assertUserAgentSetCorrectly(
shared_page_state.SharedMobilePageState, 'mobile')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedDesktopPageState, 'desktop')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedTabletPageState, 'tablet')
self.assertUserAgentSetCorrectly(
shared_page_state.Shared10InchTabletPageState, 'tablet_10_inch')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedPageState, None)
def testBrowserStartupURLSetCorrectly(self):
story_set = story_module.StorySet()
google_page = page.Page(
'http://www.google.com',
startup_url='http://www.google.com', page_set=story_set)
example_page = page.Page(
'https://www.example.com',
startup_url='https://www.example.com', page_set=story_set)
gmail_page = page.Page(
'https://www.gmail.com',
startup_url='https://www.gmail.com', page_set=story_set)
for p in (google_page, example_page, gmail_page):
story_set.AddStory(p)
shared_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_set)
for p in (google_page, example_page, gmail_page):
shared_state.WillRunStory(p)
self.assertEquals(
p.startup_url, self.options.browser_options.startup_url)
| 38.646552
| 76
| 0.758867
|
import unittest
from telemetry.internal import story_runner
from telemetry.page import page
from telemetry.page import legacy_page_test
from telemetry.page import shared_page_state
from telemetry import story as story_module
from telemetry.testing import fakes
from telemetry.util import wpr_modes
def SetUpPageRunnerArguments(options):
parser = options.CreateParser()
story_runner.AddCommandLineArgs(parser)
options.MergeDefaultValues(parser.get_default_values())
story_runner.ProcessCommandLineArgs(parser, options)
class DummyTest(legacy_page_test.LegacyPageTest):
def ValidateAndMeasurePage(self, *_):
pass
class SharedPageStateTests(unittest.TestCase):
def setUp(self):
self.options = fakes.CreateBrowserFinderOptions()
self.options.use_live_sites = False
self.options.output_formats = ['none']
self.options.suppress_gtest_report = True
def testUseLiveSitesFlagSet(self):
self.options.use_live_sites = True
run_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_module.StorySet())
self.assertTrue(run_state.platform.network_controller.is_open)
self.assertEquals(run_state.platform.network_controller.wpr_mode,
wpr_modes.WPR_OFF)
self.assertTrue(run_state.platform.network_controller.use_live_traffic)
def testUseLiveSitesFlagUnset(self):
run_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_module.StorySet())
self.assertTrue(run_state.platform.network_controller.is_open)
self.assertEquals(run_state.platform.network_controller.wpr_mode,
wpr_modes.WPR_REPLAY)
self.assertFalse(run_state.platform.network_controller.use_live_traffic)
def testWPRRecordEnable(self):
self.options.browser_options.wpr_mode = wpr_modes.WPR_RECORD
run_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_module.StorySet())
self.assertTrue(run_state.platform.network_controller.is_open)
self.assertEquals(run_state.platform.network_controller.wpr_mode,
wpr_modes.WPR_RECORD)
self.assertFalse(run_state.platform.network_controller.use_live_traffic)
def testConstructorCallsSetOptions(self):
test = DummyTest()
shared_page_state.SharedPageState(
test, self.options, story_module.StorySet())
self.assertEqual(test.options, self.options)
def assertUserAgentSetCorrectly(
self, shared_page_state_class, expected_user_agent):
story = page.Page(
'http://www.google.com',
shared_page_state_class=shared_page_state_class)
test = DummyTest()
story_set = story_module.StorySet()
story_set.AddStory(story)
story.shared_state_class(test, self.options, story_set)
browser_options = self.options.browser_options
actual_user_agent = browser_options.browser_user_agent_type
self.assertEqual(expected_user_agent, actual_user_agent)
def testPageStatesUserAgentType(self):
self.assertUserAgentSetCorrectly(
shared_page_state.SharedMobilePageState, 'mobile')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedDesktopPageState, 'desktop')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedTabletPageState, 'tablet')
self.assertUserAgentSetCorrectly(
shared_page_state.Shared10InchTabletPageState, 'tablet_10_inch')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedPageState, None)
def testBrowserStartupURLSetCorrectly(self):
story_set = story_module.StorySet()
google_page = page.Page(
'http://www.google.com',
startup_url='http://www.google.com', page_set=story_set)
example_page = page.Page(
'https://www.example.com',
startup_url='https://www.example.com', page_set=story_set)
gmail_page = page.Page(
'https://www.gmail.com',
startup_url='https://www.gmail.com', page_set=story_set)
for p in (google_page, example_page, gmail_page):
story_set.AddStory(p)
shared_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_set)
for p in (google_page, example_page, gmail_page):
shared_state.WillRunStory(p)
self.assertEquals(
p.startup_url, self.options.browser_options.startup_url)
| true
| true
|
7909f92ef6bcd853749f31c3c2ed423ef8aa9401
| 2,581
|
py
|
Python
|
benchmarks/software_nontermination/f3_hints/C_Integer/Stroeder_15/Urban-WST2013-Fig1_false-termination.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/software_nontermination/f3_hints/C_Integer/Stroeder_15/Urban-WST2013-Fig1_false-termination.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/software_nontermination/f3_hints/C_Integer/Stroeder_15/Urban-WST2013-Fig1_false-termination.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
symbols = frozenset([pc, x])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = 11
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x <= 10) -> pc' = 1
cond = mgr.LE(x, ints[10])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x <= 10) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 & (x > 6) -> pc' = 2
cond = mgr.GT(x, ints[6])
cfg.append(mgr.Implies(mgr.And(pcs[1], cond), x_pcs[0]))
# pc = 1 & !(x > 6) -> pc' = 0
cfg.append(mgr.Implies(mgr.And(pcs[1], mgr.Not(cond)), x_pcs[0]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same = mgr.Equals(x_x, x)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> same
trans.append(mgr.Implies(pcs[1], same))
# pc = 2 -> x' = x + 2
trans.append(mgr.Implies(pcs[2], mgr.Equals(x_x, mgr.Plus(x, ints[2]))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
symbs = frozenset([pc, x])
i_5 = mgr.Int(5)
x_x = symb_to_next(mgr, x)
loc = Location(env, mgr.Equals(x, i_5))
loc.set_progress(0, mgr.Equals(x_x, x))
h_x = Hint("h_x", env, frozenset([x]), symbs)
h_x.set_locs([loc])
return frozenset([h_x])
| 26.885417
| 77
| 0.573809
|
from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
symbols = frozenset([pc, x])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = 11
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
cond = mgr.LE(x, ints[10])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x <= 10) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
cond = mgr.GT(x, ints[6])
cfg.append(mgr.Implies(mgr.And(pcs[1], cond), x_pcs[0]))
# pc = 1 & !(x > 6) -> pc' = 0
cfg.append(mgr.Implies(mgr.And(pcs[1], mgr.Not(cond)), x_pcs[0]))
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same = mgr.Equals(x_x, x)
trans.append(mgr.Implies(pcs[0], same))
trans.append(mgr.Implies(pcs[1], same))
trans.append(mgr.Implies(pcs[2], mgr.Equals(x_x, mgr.Plus(x, ints[2]))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
symbs = frozenset([pc, x])
i_5 = mgr.Int(5)
x_x = symb_to_next(mgr, x)
loc = Location(env, mgr.Equals(x, i_5))
loc.set_progress(0, mgr.Equals(x_x, x))
h_x = Hint("h_x", env, frozenset([x]), symbs)
h_x.set_locs([loc])
return frozenset([h_x])
| true
| true
|
7909f996dfdfb5eb58bed16ac63b28b8783a5e7c
| 3,481
|
py
|
Python
|
openhtf/output/callbacks/__init__.py
|
kunaljdoshi/openhtf
|
bd6f93708a84e6516ab1a1cb8f308b3108818744
|
[
"Apache-2.0"
] | 1
|
2019-06-16T09:27:27.000Z
|
2019-06-16T09:27:27.000Z
|
openhtf/output/callbacks/__init__.py
|
airdeng/openhtf
|
cb544023e0d9acf758add54026d43668cc37e091
|
[
"Apache-2.0"
] | null | null | null |
openhtf/output/callbacks/__init__.py
|
airdeng/openhtf
|
cb544023e0d9acf758add54026d43668cc37e091
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains support for various built-in output mechanisms.
Here, a base OutputToFile class is implemented to provide simple output to
a file via the pickle serialization mechanism. It can be subclassed to implement
alternative serialization schemes, see json_factory.py and mfg_inspector.py for
examples.
"""
import contextlib
try:
import cPickle as pickle
except:
import pickle
import shutil
import tempfile
from openhtf import util
from openhtf.util import data
import six
# TODO(wallacbe): Switch to util
class Atomic(object):
"""Class that does atomic write in a contextual manner."""
def __init__(self, filename):
self.filename = filename
self.temp = tempfile.NamedTemporaryFile(delete=False)
def write(self, write_data):
if hasattr(write_data, 'decode'):
return self.temp.write(write_data)
return self.temp.write(write_data.encode())
def close(self):
self.temp.close()
shutil.move(self.temp.name, self.filename)
class OutputToFile(object):
"""Output the given TestRecord to a file.
Instances of this class are intended to be used as an output callback
(see Test.add_output_callbacks) to output TestRecord results to a file.
This base implementation outputs the TestRecord by serializing it via
the pickle module. Subclasses may change this by overriding the
serialize_test_record() method. Additionally, subclasses may implement
more complex file naming mechanisms by overriding the open_file() method.
Args:
test_record: The TestRecord to write out to a file.
"""
def __init__(self, filename_pattern):
self.filename_pattern = filename_pattern
@staticmethod
def serialize_test_record(test_record):
"""Override method to alter how test records are serialized to file data."""
return pickle.dumps(test_record, -1)
@staticmethod
def open_file(filename):
"""Override method to alter file open behavior or file types."""
return Atomic(filename)
@contextlib.contextmanager
def open_output_file(self, test_record):
"""Open file based on pattern."""
# Ignore keys for the log filename to not convert larger data structures.
record_dict = data.convert_to_base_types(
test_record, ignore_keys=('code_info', 'phases', 'log_records'))
pattern = self.filename_pattern
if isinstance(pattern, six.string_types) or callable(pattern):
output_file = self.open_file(util.format_string(pattern, record_dict))
try:
yield output_file
finally:
output_file.close()
elif hasattr(self.filename_pattern, 'write'):
yield self.filename_pattern
else:
raise ValueError(
'filename_pattern must be string, callable, or File-like object')
def __call__(self, test_record):
with self.open_output_file(test_record) as outfile:
outfile.write(self.serialize_test_record(test_record))
| 33.796117
| 80
| 0.747774
|
import contextlib
try:
import cPickle as pickle
except:
import pickle
import shutil
import tempfile
from openhtf import util
from openhtf.util import data
import six
class Atomic(object):
def __init__(self, filename):
self.filename = filename
self.temp = tempfile.NamedTemporaryFile(delete=False)
def write(self, write_data):
if hasattr(write_data, 'decode'):
return self.temp.write(write_data)
return self.temp.write(write_data.encode())
def close(self):
self.temp.close()
shutil.move(self.temp.name, self.filename)
class OutputToFile(object):
def __init__(self, filename_pattern):
self.filename_pattern = filename_pattern
@staticmethod
def serialize_test_record(test_record):
return pickle.dumps(test_record, -1)
@staticmethod
def open_file(filename):
return Atomic(filename)
@contextlib.contextmanager
def open_output_file(self, test_record):
record_dict = data.convert_to_base_types(
test_record, ignore_keys=('code_info', 'phases', 'log_records'))
pattern = self.filename_pattern
if isinstance(pattern, six.string_types) or callable(pattern):
output_file = self.open_file(util.format_string(pattern, record_dict))
try:
yield output_file
finally:
output_file.close()
elif hasattr(self.filename_pattern, 'write'):
yield self.filename_pattern
else:
raise ValueError(
'filename_pattern must be string, callable, or File-like object')
def __call__(self, test_record):
with self.open_output_file(test_record) as outfile:
outfile.write(self.serialize_test_record(test_record))
| true
| true
|
7909fa2fdea672a13711a6d9641ac0903034d64d
| 43,055
|
py
|
Python
|
bin/storm.py
|
xulunfan/storm
|
8cb7b5a59b4a0556a52cb3d08c6b6c02e1b9f4a7
|
[
"Apache-2.0"
] | 1
|
2018-08-30T12:46:52.000Z
|
2018-08-30T12:46:52.000Z
|
bin/storm.py
|
xulunfan/storm
|
8cb7b5a59b4a0556a52cb3d08c6b6c02e1b9f4a7
|
[
"Apache-2.0"
] | 5
|
2020-04-23T21:30:46.000Z
|
2020-04-23T21:32:32.000Z
|
bin/storm.py
|
xulunfan/storm
|
8cb7b5a59b4a0556a52cb3d08c6b6c02e1b9f4a7
|
[
"Apache-2.0"
] | 1
|
2020-03-12T20:26:34.000Z
|
2020-03-12T20:26:34.000Z
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
# python 3
from urllib.parse import quote_plus
except ImportError:
# python 2
from urllib import quote_plus
try:
# python 3
import configparser
except ImportError:
# python 2
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
# For debug purpose, uncomment when you need to debug DependencyResolver
# print("Resolved dependencies: %s" % output)
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
"""Syntax: [storm localconfvalue conf-name]
Prints out the value for conf-name in the local Storm configs.
The local Storm configs are the ones in ~/.storm/storm.yaml merged
in with the configs in defaults.yaml.
"""
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
"""Syntax: [storm remoteconfvalue conf-name]
Prints out the value for conf-name in the cluster's Storm configs.
The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml
merged in with the configs in defaults.yaml.
This command must be run on a cluster machine.
"""
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
"""Takes a string of whitespace-separated tokens and parses it into a list.
Whitespace inside tokens may be quoted with single quotes, double quotes or
backslash (similar to command-line arguments in bash).
>>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f\'f' "g\"g" "i""i" 'j''j' k" "k l' l' mm n\\n''')
['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n\n']
"""
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
"""Syntax: [storm local topology-jar-path class ...]
Runs the main method of class with the specified arguments but pointing to a local cluster
The storm jars and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
and others will interact with a local cluster instead of the one configured by default.
Most options should work just like with the storm jar command.
local also adds in the option --local-ttl which sets the number of seconds the
local cluster will run for before it shuts down.
--java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK
--java-debug transport=dt_socket,address=localhost:8000
will open up a debugging server on port 8000.
"""
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
"""Syntax: [storm jar topology-jar-path class ...]
Runs the main method of class with the specified arguments.
The storm worker dependencies and configs in ~/.storm are put on the classpath.
The process is configured so that StormSubmitter
(http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)
will upload the jar at topology-jar-path when the topology is submitted.
When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.
For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.
And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.
You can also exclude some dependencies like what you're doing in maven pom.
Please add exclusion artifacts with '^' separated string after the artifact.
For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.
When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.
Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters.
For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.
You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile).
You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy:
--proxyUrl: URL representation of proxy ('http://host:port')
--proxyUsername: username of proxy if it requires basic auth
--proxyPassword: password of proxy if it requires basic auth
Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`
When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.
If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
"""
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
"""Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode
Compiles the SQL statements into a Trident topology and submits it to Storm.
If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.
--jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.
Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.
You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
"""
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
"""Syntax: [storm kill topology-name [-w wait-time-secs]]
Kills the topology with the name topology-name. Storm will
first deactivate the topology's spouts for the duration of
the topology's message timeout to allow all messages currently
being processed to finish processing. Storm will then shutdown
the workers and clean up their state. You can override the length
of time Storm waits between deactivation and shutdown with the -w flag.
"""
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
"""Syntax: [storm upload-credentials topology-name [credkey credvalue]*]
Uploads a new set of credentials to a running topology
"""
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
"""Syntax: [storm blobstore cmd]
list [KEY...] - lists blobs currently in the blob store
cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).
create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE
or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.
update [-f FILE] KEY - update the contents of a blob. Contents comes from
a FILE or STDIN (requires write access).
delete KEY - delete an entry from the blob store (requires write access).
set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma
separated list (requires admin access).
replication --read KEY - Used to read the replication factor of the blob.
replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the
replication factor of a blob.
For example, the following would create a mytopo:data.tgz key using the data
stored in data.tgz. User alice would have full access, bob would have
read/write access and everyone else would have read access.
storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
"""
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
"""Syntax: [storm heartbeats [cmd]]
list PATH - lists heartbeats nodes under PATH currently in the ClusterState.
get PATH - Get the heartbeat data at PATH
"""
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
"""Syntax: [storm activate topology-name]
Activates the specified topology's spouts.
"""
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
"""
Dynamically change topology log levels
Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]
where log level is one of:
ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF
and timeout is integer seconds.
e.g.
./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name
Set the root logger's level to DEBUG for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN topology-name
Set the com.myapp logger's level to WARN for 30 seconds
./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name
Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger
to ERROR for 123 seconds
./bin/storm set_log_level -r com.myOtherLogger topology-name
Clears settings, resetting back to the original level
"""
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
"""Syntax: [storm list]
List the running topologies and their statuses.
"""
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
"""Syntax: [storm deactivate topology-name]
Deactivates the specified topology's spouts.
"""
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
"""Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']]
Sometimes you may wish to spread out the workers for a running topology.
For example, let's say you have a 10 node cluster running
4 workers per node, and then let's say you add another 10 nodes to
the cluster. You may wish to have Storm spread out the workers for the
running topology so that each node runs 2 workers. One way to do this
is to kill the topology and resubmit it, but Storm provides a "rebalance"
command that provides an easier way to do this.
Rebalance will first deactivate the topology for the duration of the
message timeout (overridable with the -w flag) make requested adjustments to the topology
and let the scheduler try to find a better scheduling based off of the
new situation. The topology will then return to its previous state of activation
(so a deactivated topology will still be deactivated and an activated
topology will go back to being activated).
Some of what you can change about a topology includes the number of requested workers (-n flag)
The number of executors for a given component (-e flag) the resources each component is
requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
"""
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
"""Syntax: [storm get-errors topology-name]
Get the latest error from the running topology. The returned result contains
the key value pairs for component-name and component-error for the components in error.
The result is returned in json format.
"""
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
"""Syntax: [storm node-health-check]
Run health checks on the local supervisor.
"""
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
"""Syntax: [storm kill_workers]
Kill the workers running on this supervisor. This command should be run
on a supervisor node. If the cluster is running in secure mode, then user needs
to have admin rights on the node to be able to successfully kill all workers.
"""
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
"""Syntax: [storm admin cmd [options]]
The storm admin command provides access to several operations that can help
an administrator debug or fix a cluster.
remove_corrupt_topologies - This command should be run on a nimbus node as
the same user nimbus runs as. It will go directly to zookeeper + blobstore
and find topologies that appear to be corrupted because of missing blobs.
It will kill those topologies.
zk_cli [options] - This command will launch a zookeeper cli pointing to the
storm zookeeper instance logged in as the nimbus user. It should be run on
a nimbus server as the user nimbus runs as.
-s --server <connection string>: Set the connection string to use,
defaults to storm connection string.
-t --time-out <timeout>: Set the timeout to use, defaults to storm
zookeeper timeout.
-w --write: Allow for writes, defaults to read only, we don't want to
cause problems.
-n --no-root: Don't include the storm root on the default connection string.
-j --jaas <jaas_file>: Include a jaas file that should be used when
authenticating with ZK defaults to the
java.security.auth.login.config conf.
creds topology_id - Print the credential keys for a topology.
"""
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
"""Syntax: [storm shell resourcesdir command args]
Archives resources to jar and uploads jar to Nimbus, and executes following arguments on "local". Useful for non JVM languages.
eg: `storm shell resources/ python topology.py arg1 arg2`
"""
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
"""Syntax: [storm repl]
Opens up a Clojure REPL with the storm jars and configuration
on the classpath. Useful for debugging.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
"""Syntax: [storm nimbus]
Launches the nimbus daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
"""Syntax: [storm pacemaker]
Launches the Pacemaker daemon. This command should be run under
supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
"""Syntax: [storm supervisor]
Launches the supervisor daemon. This command should be run
under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
"""Syntax: [storm ui]
Launches the UI daemon. The UI provides a web interface for a Storm
cluster and shows detailed stats about running topologies. This command
should be run under supervision with a tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
"""Syntax: [storm logviewer]
Launches the log viewer daemon. It provides a web interface for viewing
storm log files. This command should be run under supervision with a
tool like daemontools or monit.
See Setting up a Storm cluster for more information.
(http://storm.apache.org/documentation/Setting-up-a-Storm-cluster)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
"""Syntax: [storm drpc-client [options] ([function argument]*)|(argument*)]
Provides a very simple way to send DRPC requests.
If a -f argument is supplied to set the function name all of the arguments are treated
as arguments to the function. If no function is given the arguments must
be pairs of function argument.
The server and port are picked from the configs.
"""
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
"""Syntax: [storm drpc]
Launches a DRPC daemon. This command should be run under supervision
with a tool like daemontools or monit.
See Distributed RPC for more information.
(http://storm.apache.org/documentation/Distributed-RPC)
"""
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
"""Syntax: [storm dev-zookeeper]
Launches a fresh Zookeeper server using "dev.zookeeper.path" as its local dir and
"storm.zookeeper.port" as its port. This is only intended for development/testing, the
Zookeeper instance launched is not configured to be used in production.
"""
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
"""Syntax: [storm version]
Prints the version number of this Storm release.
"""
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
"""Syntax: [storm classpath]
Prints the classpath used by the storm client when running commands.
"""
print(get_classpath([], client=True))
def print_server_classpath():
"""Syntax: [storm server_classpath]
Prints the classpath used by the storm servers when running commands.
"""
print(get_classpath([], daemon=True))
def monitor(*args):
"""Syntax: [storm monitor topology-name [-i interval-secs] [-m component-id] [-s stream-id] [-w [emitted | transferred]]]
Monitor given topology's throughput interactively.
One can specify poll-interval, component-id, stream-id, watch-item[emitted | transferred]
By default,
poll-interval is 4 seconds;
all component-ids will be list;
stream-id is 'default';
watch-item is 'emitted';
"""
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
"""Print all client commands and link to documentation"""
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
"""Print one help message or list of available commands"""
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
| 40.200747
| 523
| 0.68373
|
from __future__ import print_function
import os
import random
import re
import shlex
import tempfile
import uuid
import subprocess as sub
import json
import sys
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
try:
import configparser
except ImportError:
import ConfigParser as configparser
def is_windows():
return sys.platform.startswith('win')
def identity(x):
return x
def cygpath(x):
command = ["cygpath", "-wp", x]
p = sub.Popen(command,stdout=sub.PIPE)
output, errors = p.communicate()
lines = output.split(os.linesep)
return lines[0]
def init_storm_env():
global CLUSTER_CONF_DIR
ini_file = os.path.join(CLUSTER_CONF_DIR, 'storm_env.ini')
if not os.path.isfile(ini_file):
return
config = configparser.ConfigParser()
config.optionxform = str
config.read(ini_file)
options = config.options('environment')
for option in options:
value = config.get('environment', option)
os.environ[option] = value
def get_java_cmd():
cmd = 'java' if not is_windows() else 'java.exe'
if JAVA_HOME:
cmd = os.path.join(JAVA_HOME, 'bin', cmd)
return cmd
normclasspath = cygpath if sys.platform == 'cygwin' else identity
STORM_DIR = os.sep.join(os.path.realpath( __file__ ).split(os.sep)[:-2])
USER_CONF_DIR = os.path.expanduser("~" + os.sep + ".storm")
STORM_CONF_DIR = os.getenv('STORM_CONF_DIR', None)
if STORM_CONF_DIR == None:
CLUSTER_CONF_DIR = os.path.join(STORM_DIR, "conf")
else:
CLUSTER_CONF_DIR = STORM_CONF_DIR
if (not os.path.isfile(os.path.join(USER_CONF_DIR, "storm.yaml"))):
USER_CONF_DIR = CLUSTER_CONF_DIR
STORM_WORKER_LIB_DIR = os.path.join(STORM_DIR, "lib-worker")
STORM_LIB_DIR = os.path.join(STORM_DIR, "lib")
STORM_TOOLS_LIB_DIR = os.path.join(STORM_DIR, "lib-tools")
STORM_WEBAPP_LIB_DIR = os.path.join(STORM_DIR, "lib-webapp")
STORM_BIN_DIR = os.path.join(STORM_DIR, "bin")
STORM_LOG4J2_CONF_DIR = os.path.join(STORM_DIR, "log4j2")
STORM_SUPERVISOR_LOG_FILE = os.getenv('STORM_SUPERVISOR_LOG_FILE', "supervisor.log")
init_storm_env()
CONFIG_OPTS = []
CONFFILE = ""
JAR_JVM_OPTS = shlex.split(os.getenv('STORM_JAR_JVM_OPTS', ''))
JAVA_HOME = os.getenv('JAVA_HOME', None)
JAVA_CMD = get_java_cmd();
if JAVA_HOME and not os.path.exists(JAVA_CMD):
print("ERROR: JAVA_HOME is invalid. Could not find bin/java at %s." % JAVA_HOME)
sys.exit(1)
STORM_EXT_CLASSPATH = os.getenv('STORM_EXT_CLASSPATH', None)
STORM_EXT_CLASSPATH_DAEMON = os.getenv('STORM_EXT_CLASSPATH_DAEMON', None)
DEP_JARS_OPTS = []
DEP_ARTIFACTS_OPTS = []
DEP_ARTIFACTS_REPOSITORIES_OPTS = []
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = None
DEP_PROXY_URL = None
DEP_PROXY_USERNAME = None
DEP_PROXY_PASSWORD = None
def get_config_opts():
global CONFIG_OPTS
return "-Dstorm.options=" + ','.join(map(quote_plus,CONFIG_OPTS))
if not os.path.exists(STORM_LIB_DIR):
print("******************************************")
print("The storm client can only be run from within a release. You appear to be trying to run the client from a checkout of Storm's source code.")
print("\nYou can download a Storm release at http://storm.apache.org/downloads.html")
print("******************************************")
sys.exit(1)
def get_jars_full(adir):
files = []
if os.path.isdir(adir):
files = os.listdir(adir)
elif os.path.exists(adir):
files = [adir]
ret = []
for f in files:
if f.endswith(".jar"):
ret.append(os.path.join(adir, f))
return ret
# If given path is a dir, make it a wildcard so the JVM will include all JARs in the directory.
def get_wildcard_dir(path):
if os.path.isdir(path):
ret = [(os.path.join(path, "*"))]
elif os.path.exists(path):
ret = [path]
return ret
def get_classpath(extrajars, daemon=True, client=False):
ret = get_wildcard_dir(STORM_DIR)
if client:
ret.extend(get_wildcard_dir(STORM_WORKER_LIB_DIR))
else :
ret.extend(get_wildcard_dir(STORM_LIB_DIR))
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib")))
if daemon:
ret.extend(get_wildcard_dir(os.path.join(STORM_DIR, "extlib-daemon")))
if STORM_EXT_CLASSPATH != None:
ret.append(STORM_EXT_CLASSPATH)
if daemon and STORM_EXT_CLASSPATH_DAEMON != None:
ret.append(STORM_EXT_CLASSPATH_DAEMON)
ret.extend(extrajars)
return normclasspath(os.pathsep.join(ret))
def confvalue(name, extrapaths, daemon=True):
global CONFFILE
command = [
JAVA_CMD, "-client", get_config_opts(), "-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrapaths, daemon), "org.apache.storm.command.ConfigValue", name
]
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
# python 3
if not isinstance(output, str):
output = output.decode('utf-8')
lines = output.split(os.linesep)
for line in lines:
tokens = line.split(" ")
if tokens[0] == "VALUE:":
return " ".join(tokens[1:])
return ""
def resolve_dependencies(artifacts, artifact_repositories, maven_local_repos_dir, proxy_url, proxy_username, proxy_password):
if len(artifacts) == 0:
return {}
print("Resolving dependencies on demand: artifacts (%s) with repositories (%s)" % (artifacts, artifact_repositories))
if maven_local_repos_dir is not None:
print("Local repository directory: %s" % maven_local_repos_dir)
if proxy_url is not None:
print("Proxy information: url (%s) username (%s)" % (proxy_url, proxy_username))
sys.stdout.flush()
# storm-submit module doesn't rely on storm-core and relevant libs
extrajars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "submit-tools"))
classpath = normclasspath(os.pathsep.join(extrajars))
command = [
JAVA_CMD, "-client", "-cp", classpath, "org.apache.storm.submit.command.DependencyResolverMain"
]
command.extend(["--artifacts", ",".join(artifacts)])
command.extend(["--artifactRepositories", ",".join(artifact_repositories)])
if maven_local_repos_dir is not None:
command.extend(["--mavenLocalRepositoryDirectory", maven_local_repos_dir])
if proxy_url is not None:
command.extend(["--proxyUrl", proxy_url])
if proxy_username is not None:
command.extend(["--proxyUsername", proxy_username])
command.extend(["--proxyPassword", proxy_password])
p = sub.Popen(command, stdout=sub.PIPE)
output, errors = p.communicate()
if p.returncode != 0:
raise RuntimeError("dependency handler returns non-zero code: code<%s> syserr<%s>" % (p.returncode, errors))
if not isinstance(output, str):
output = output.decode('utf-8')
try:
out_dict = json.loads(output)
return out_dict
except:
raise RuntimeError("dependency handler returns non-json response: sysout<%s>", output)
def print_localconfvalue(name):
print(name + ": " + confvalue(name, [USER_CONF_DIR]))
def print_remoteconfvalue(name):
print(name + ": " + confvalue(name, [CLUSTER_CONF_DIR]))
def parse_args(string):
re_split = re.compile(r'''((?:
[^\s"'\\] |
"(?: [^"\\] | \\.)*" |
'(?: [^'\\] | \\.)*' |
\\.
)+)''', re.VERBOSE)
args = re_split.split(string)[1::2]
args = [re.compile(r'"((?:[^"\\]|\\.)*)"').sub('\\1', x) for x in args]
args = [re.compile(r"'((?:[^'\\]|\\.)*)'").sub('\\1', x) for x in args]
return [re.compile(r'\\(.)').sub('\\1', x) for x in args]
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""):
global CONFFILE
storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR])
if(storm_log_dir == None or storm_log_dir == "null"):
storm_log_dir = os.path.join(STORM_DIR, "logs")
all_args = [
JAVA_CMD, jvmtype,
"-Ddaemon.name=" + daemonName,
get_config_opts(),
"-Dstorm.home=" + STORM_DIR,
"-Dstorm.log.dir=" + storm_log_dir,
"-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon),
"-Dstorm.conf.file=" + CONFFILE,
"-cp", get_classpath(extrajars, daemon, client=client),
] + jvmopts + [klass] + list(args)
print("Running: " + " ".join(all_args))
sys.stdout.flush()
exit_code = 0
if fork:
exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args)
elif is_windows():
# handling whitespaces in JAVA_CMD
try:
ret = sub.check_output(all_args, stderr=sub.STDOUT)
print(ret)
except sub.CalledProcessError as e:
print(e.output)
sys.exit(e.returncode)
else:
os.execvp(JAVA_CMD, all_args)
return exit_code
def run_client_jar(jarfile, klass, args, daemon=False, client=True, extrajvmopts=[]):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
extra_jars=[jarfile, USER_CONF_DIR, STORM_BIN_DIR]
extra_jars.extend(local_jars)
extra_jars.extend(artifact_to_file_jars.values())
exec_storm_class(
klass,
jvmtype="-client",
extrajars=extra_jars,
args=args,
daemon=False,
jvmopts=JAR_JVM_OPTS + extrajvmopts + ["-Dstorm.jar=" + jarfile] +
["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def local(jarfile, klass, *args):
[ttl, debug_args, args] = parse_local_opts(args)
extrajvmopts = ["-Dstorm.local.sleeptime=" + ttl]
if debug_args != None:
extrajvmopts = extrajvmopts + ["-agentlib:jdwp=" + debug_args]
run_client_jar(jarfile, "org.apache.storm.LocalCluster", [klass] + list(args), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args):
[server_class_path, args] = parse_jar_opts(args)
run_client_jar(jarfile, klass, list(args), client=not server_class_path, daemon=False)
def sql(sql_file, topology_name):
global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
local_jars = DEP_JARS_OPTS
artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD)
# include storm-sql-runtime jar(s) to local jar list
# --jars doesn't support wildcard so it should call get_jars_full
sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "runtime"))
local_jars.extend(sql_runtime_jars)
extrajars=[USER_CONF_DIR, STORM_BIN_DIR]
extrajars.extend(local_jars)
extrajars.extend(artifact_to_file_jars.values())
# include this for running StormSqlRunner, but not for generated topology
sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, "sql", "core"))
extrajars.extend(sql_core_jars)
if topology_name == "--explain":
args = ["--file", sql_file, "--explain"]
else:
args = ["--file", sql_file, "--topology", topology_name]
exec_storm_class(
"org.apache.storm.sql.StormSqlRunner",
jvmtype="-client",
extrajars=extrajars,
args=args,
daemon=False,
jvmopts=["-Dstorm.dependency.jars=" + ",".join(local_jars)] +
["-Dstorm.dependency.artifacts=" + json.dumps(artifact_to_file_jars)])
def kill(*args):
if not args:
print_usage(command="kill")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.KillTopology",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args):
if not args:
print_usage(command="upload-credentials")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.UploadCredentials",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args):
exec_storm_class(
"org.apache.storm.command.Blobstore",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args):
exec_storm_class(
"org.apache.storm.command.Heartbeats",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args):
if not args:
print_usage(command="activate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Activate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args):
exec_storm_class(
"org.apache.storm.command.SetLogLevel",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args):
exec_storm_class(
"org.apache.storm.command.ListTopologies",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args):
if not args:
print_usage(command="deactivate")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Deactivate",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args):
if not args:
print_usage(command="rebalance")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.Rebalance",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args):
if not args:
print_usage(command="get-errors")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.GetErrors",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def healthcheck(*args):
exec_storm_class(
"org.apache.storm.command.HealthCheck",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def kill_workers(*args):
exec_storm_class(
"org.apache.storm.command.KillWorkers",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def admin(*args):
exec_storm_class(
"org.apache.storm.command.AdminCommands",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, "bin")])
def shell(resourcesdir, command, *args):
tmpjarpath = "stormshell" + str(random.randint(0, 10000000)) + ".jar"
os.system("jar cf %s %s" % (tmpjarpath, resourcesdir))
runnerargs = [tmpjarpath, command]
runnerargs.extend(args)
exec_storm_class(
"org.apache.storm.command.shell_submission",
args=runnerargs,
jvmtype="-client",
extrajars=[USER_CONF_DIR],
fork=True)
os.system("rm " + tmpjarpath)
def repl():
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class("clojure.main", jvmtype="-client", extrajars=cppaths)
def get_log4j2_conf_dir():
cppaths = [CLUSTER_CONF_DIR]
storm_log4j2_conf_dir = confvalue("storm.log4j2.conf.dir", cppaths)
if(storm_log4j2_conf_dir == None or storm_log4j2_conf_dir == "null"):
storm_log4j2_conf_dir = STORM_LOG4J2_CONF_DIR
elif(not os.path.isabs(storm_log4j2_conf_dir)):
storm_log4j2_conf_dir = os.path.join(STORM_DIR, storm_log4j2_conf_dir)
return storm_log4j2_conf_dir
def nimbus(klass="org.apache.storm.daemon.nimbus.Nimbus"):
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("nimbus.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=nimbus.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="nimbus",
extrajars=cppaths,
jvmopts=jvmopts)
def pacemaker(klass="org.apache.storm.pacemaker.Pacemaker"):
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("pacemaker.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=pacemaker.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="pacemaker",
extrajars=cppaths,
jvmopts=jvmopts)
def supervisor(klass="org.apache.storm.daemon.supervisor.Supervisor"):
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("supervisor.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=" + STORM_SUPERVISOR_LOG_FILE,
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml"),
]
exec_storm_class(
klass,
jvmtype="-server",
daemonName="supervisor",
extrajars=cppaths,
jvmopts=jvmopts)
def ui():
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("ui.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=ui.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.ui.UIServer",
jvmtype="-server",
daemonName="ui",
jvmopts=jvmopts,
extrajars=allextrajars)
def logviewer():
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("logviewer.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=logviewer.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.logviewer.LogviewerServer",
jvmtype="-server",
daemonName="logviewer",
jvmopts=jvmopts,
extrajars=allextrajars)
def drpcclient(*args):
if not args:
print_usage(command="drpc-client")
sys.exit(2)
exec_storm_class(
"org.apache.storm.command.BasicDrpcClient",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def drpc():
cppaths = [CLUSTER_CONF_DIR]
jvmopts = parse_args(confvalue("drpc.childopts", cppaths)) + [
"-Djava.deserialization.disabled=true",
"-Dlogfile.name=drpc.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
allextrajars = get_wildcard_dir(STORM_WEBAPP_LIB_DIR)
allextrajars.append(CLUSTER_CONF_DIR)
exec_storm_class(
"org.apache.storm.daemon.drpc.DRPCServer",
jvmtype="-server",
daemonName="drpc",
jvmopts=jvmopts,
extrajars=allextrajars)
def dev_zookeeper():
jvmopts = [
"-Dlogfile.name=dev-zookeeper.log",
"-Dlog4j.configurationFile=" + os.path.join(get_log4j2_conf_dir(), "cluster.xml")
]
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.command.DevZookeeper",
jvmtype="-server",
daemonName="dev_zookeeper",
jvmopts=jvmopts,
extrajars=[CLUSTER_CONF_DIR])
def version():
cppaths = [CLUSTER_CONF_DIR]
exec_storm_class(
"org.apache.storm.utils.VersionInfo",
jvmtype="-client",
extrajars=[CLUSTER_CONF_DIR])
def print_classpath():
print(get_classpath([], client=True))
def print_server_classpath():
print(get_classpath([], daemon=True))
def monitor(*args):
exec_storm_class(
"org.apache.storm.command.Monitor",
args=args,
jvmtype="-client",
extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def print_commands():
print("Commands:\n\t" + "\n\t".join(sorted(COMMANDS.keys())))
print("\nHelp: \n\thelp \n\thelp <command>")
print("\nDocumentation for the storm client can be found at http://storm.apache.org/documentation/Command-line-client.html\n")
print("Configs can be overridden using one or more -c flags, e.g. \"storm list -c nimbus.host=nimbus.mycompany.com\"\n")
def print_usage(command=None):
if command != None:
if command in COMMANDS:
print(COMMANDS[command].__doc__ or
"No documentation provided for <%s>" % command)
else:
print("<%s> is not a valid command" % command)
else:
print_commands()
def unknown_command(*args):
print("Unknown command: [storm %s]" % ' '.join(sys.argv[1:]))
print_usage()
sys.exit(254)
COMMANDS = {"local": local, "jar": jar, "kill": kill, "shell": shell, "nimbus": nimbus, "ui": ui, "logviewer": logviewer,
"drpc": drpc, "drpc-client": drpcclient, "supervisor": supervisor, "localconfvalue": print_localconfvalue,
"remoteconfvalue": print_remoteconfvalue, "repl": repl, "classpath": print_classpath, "server_classpath": print_server_classpath,
"activate": activate, "deactivate": deactivate, "rebalance": rebalance, "help": print_usage,
"list": listtopos, "dev-zookeeper": dev_zookeeper, "version": version, "monitor": monitor,
"upload-credentials": upload_credentials, "pacemaker": pacemaker, "heartbeats": heartbeats, "blobstore": blobstore,
"get-errors": get_errors, "set_log_level": set_log_level, "kill_workers": kill_workers,
"node-health-check": healthcheck, "sql": sql, "admin": admin}
def parse_config(config_list):
global CONFIG_OPTS
if len(config_list) > 0:
for config in config_list:
CONFIG_OPTS.append(config)
def parse_local_opts(args):
curr = list(args[:])
curr.reverse()
ttl = "20"
debug_args = None
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--local-ttl":
ttl = curr.pop()
elif token == "--java-debug":
debug_args = curr.pop()
else:
args_list.append(token)
return ttl, debug_args, args_list
def parse_jar_opts(args):
curr = list(args[:])
curr.reverse()
server_class_path = False
args_list = []
while len(curr) > 0:
token = curr.pop()
if token == "--storm-server-classpath":
server_class_path = True
else:
args_list.append(token)
return server_class_path, args_list
def parse_config_opts(args):
curr = args[:]
curr.reverse()
config_list = []
args_list = []
jars_list = []
artifacts_list = []
artifact_repositories_list = []
maven_local_repository_dir = None
proxy_url = None
proxy_username = None
proxy_password = None
while len(curr) > 0:
token = curr.pop()
if token == "-c":
config_list.append(curr.pop())
elif token == "--config":
global CONFFILE
CONFFILE = curr.pop()
elif token == "--jars":
jars_list.extend(curr.pop().split(','))
elif token == "--artifacts":
artifacts_list.extend(curr.pop().split(','))
elif token == "--artifactRepositories":
artifact_repositories_list.extend(curr.pop().split(','))
elif token == "--mavenLocalRepositoryDirectory":
maven_local_repository_dir = curr.pop()
elif token == "--proxyUrl":
proxy_url = curr.pop()
elif token == "--proxyUsername":
proxy_username = curr.pop()
elif token == "--proxyPassword":
proxy_password = curr.pop()
else:
args_list.append(token)
return config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_repository_dir, \
proxy_url, proxy_username, proxy_password, args_list
def main():
if len(sys.argv) <= 1:
print_usage()
sys.exit(-1)
global CONFIG_OPTS, DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, \
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, \
DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD
config_list, jars_list, artifacts_list, artifact_repositories_list, maven_local_directory, proxy_url, \
proxy_username, proxy_password, args = parse_config_opts(sys.argv[1:])
parse_config(config_list)
DEP_JARS_OPTS = jars_list
DEP_ARTIFACTS_OPTS = artifacts_list
DEP_ARTIFACTS_REPOSITORIES_OPTS = artifact_repositories_list
DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY = maven_local_directory
DEP_PROXY_URL = proxy_url
DEP_PROXY_USERNAME = proxy_username
DEP_PROXY_PASSWORD = proxy_password
COMMAND = args[0]
ARGS = args[1:]
(COMMANDS.get(COMMAND, unknown_command))(*ARGS)
if __name__ == "__main__":
main()
| true
| true
|
7909fb560d815d77567795084d83d2173d6d5ab1
| 1,907
|
py
|
Python
|
scripts/features/structure_extractor.py
|
chakki-works/elephant-sense
|
ba7c95e557d8b5a2bdce699fb473de3183a7ca6f
|
[
"Apache-2.0"
] | 14
|
2017-04-07T10:46:49.000Z
|
2019-08-07T09:58:54.000Z
|
scripts/features/structure_extractor.py
|
chakki-works/elephant-sense
|
ba7c95e557d8b5a2bdce699fb473de3183a7ca6f
|
[
"Apache-2.0"
] | null | null | null |
scripts/features/structure_extractor.py
|
chakki-works/elephant-sense
|
ba7c95e557d8b5a2bdce699fb473de3183a7ca6f
|
[
"Apache-2.0"
] | null | null | null |
import re
from scripts.features.feature_extractor import FeatureExtractor
from bs4 import BeautifulSoup
class ItemizationCountExtractor(FeatureExtractor):
def extract(self, post, extracted=None):
soup = BeautifulSoup(post.rendered_body, "html.parser")
count = len(soup.find_all("ul"))
return count
class ImageCountExtractor(FeatureExtractor):
def extract(self, post, extracted=None):
soup = BeautifulSoup(post.rendered_body, "html.parser")
count = len(soup.find_all("img"))
return count
class FormulaCountExtractor(FeatureExtractor):
def extract(self, post, extracted=None):
count = len(re.findall(r'\$.*?\$+', post.rendered_body))
return count
class ItemizationRatioExtractor(FeatureExtractor):
def __init__(self, text):
self.text = text
def extract(self, post, extracted=None):
soup = BeautifulSoup(post.rendered_body, "html.parser")
target_count = len(soup.find_all("ul"))
lines_count = len(self.text.split("。"))
ratio = target_count / lines_count if target_count != 0 else 0
return ratio
class ImageRatioExtractor(FeatureExtractor):
def __init__(self, text):
self.text = text
def extract(self, post, extracted=None):
soup = BeautifulSoup(post.rendered_body, "html.parser")
target_count = len(soup.find_all("img"))
lines_count = len(self.text.split("。"))
ratio = target_count / lines_count if target_count != 0 else 0
return ratio
class FormulaRatioExtractor(FeatureExtractor):
def __init__(self, text):
self.text = text
def extract(self, post, extracted=None):
target_count = len(re.findall(r'\$.*?\$+', post.rendered_body))
lines_count = len(self.text.split("。"))
ratio = target_count / lines_count if target_count != 0 else 0
return ratio
| 27.242857
| 71
| 0.668589
|
import re
from scripts.features.feature_extractor import FeatureExtractor
from bs4 import BeautifulSoup
class ItemizationCountExtractor(FeatureExtractor):
def extract(self, post, extracted=None):
soup = BeautifulSoup(post.rendered_body, "html.parser")
count = len(soup.find_all("ul"))
return count
class ImageCountExtractor(FeatureExtractor):
def extract(self, post, extracted=None):
soup = BeautifulSoup(post.rendered_body, "html.parser")
count = len(soup.find_all("img"))
return count
class FormulaCountExtractor(FeatureExtractor):
def extract(self, post, extracted=None):
count = len(re.findall(r'\$.*?\$+', post.rendered_body))
return count
class ItemizationRatioExtractor(FeatureExtractor):
def __init__(self, text):
self.text = text
def extract(self, post, extracted=None):
soup = BeautifulSoup(post.rendered_body, "html.parser")
target_count = len(soup.find_all("ul"))
lines_count = len(self.text.split("。"))
ratio = target_count / lines_count if target_count != 0 else 0
return ratio
class ImageRatioExtractor(FeatureExtractor):
def __init__(self, text):
self.text = text
def extract(self, post, extracted=None):
soup = BeautifulSoup(post.rendered_body, "html.parser")
target_count = len(soup.find_all("img"))
lines_count = len(self.text.split("。"))
ratio = target_count / lines_count if target_count != 0 else 0
return ratio
class FormulaRatioExtractor(FeatureExtractor):
def __init__(self, text):
self.text = text
def extract(self, post, extracted=None):
target_count = len(re.findall(r'\$.*?\$+', post.rendered_body))
lines_count = len(self.text.split("。"))
ratio = target_count / lines_count if target_count != 0 else 0
return ratio
| true
| true
|
7909fc0c5d8095ff6a62e02ac890a57d40931f40
| 1,352
|
py
|
Python
|
qa/rpc-tests/staticr-tx-send.py
|
diyathrajapakshe/bethel-core
|
9f272d635da18b91582dbbb2ba47cfce1a1fc9ca
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/staticr-tx-send.py
|
diyathrajapakshe/bethel-core
|
9f272d635da18b91582dbbb2ba47cfce1a1fc9ca
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/staticr-tx-send.py
|
diyathrajapakshe/bethel-core
|
9f272d635da18b91582dbbb2ba47cfce1a1fc9ca
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bethel Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BethelTestFramework
from test_framework.staticr_util import *
import time
class StaticRTxSend(BethelTestFramework):
"""Tests the tx sending after softfork activation."""
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = self.setup_nodes()
self.is_network_split = split
def run_test(self):
#check that a transaction can be sent after the reward changes to static
activate_staticr(self.nodes[0])
blockcount = self.nodes[0].getblockcount()
address = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(address, 100)
# wait for a new block to be mined
while self.nodes[0].getblockcount() == blockcount:
print("waiting for a new block...")
time.sleep(5)
transaction = self.nodes[0].gettransaction(txid)
# check the transaction confirmed
assert(transaction["confirmations"] > 0)
if __name__ == '__main__':
StaticRTxSend().main()
| 30.044444
| 80
| 0.678994
|
from test_framework.test_framework import BethelTestFramework
from test_framework.staticr_util import *
import time
class StaticRTxSend(BethelTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self, split=False):
self.nodes = self.setup_nodes()
self.is_network_split = split
def run_test(self):
activate_staticr(self.nodes[0])
blockcount = self.nodes[0].getblockcount()
address = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(address, 100)
while self.nodes[0].getblockcount() == blockcount:
print("waiting for a new block...")
time.sleep(5)
transaction = self.nodes[0].gettransaction(txid)
assert(transaction["confirmations"] > 0)
if __name__ == '__main__':
StaticRTxSend().main()
| true
| true
|
7909fc6dac6c123e255fc08303846d478c8de9e3
| 508
|
py
|
Python
|
test/aqua/operators/__init__.py
|
hushaohan/aqua
|
8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d
|
[
"Apache-2.0"
] | 14
|
2019-05-06T13:51:06.000Z
|
2022-02-28T05:48:16.000Z
|
test/aqua/operators/__init__.py
|
hushaohan/aqua
|
8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d
|
[
"Apache-2.0"
] | 2
|
2019-10-12T02:55:27.000Z
|
2019-10-13T00:14:07.000Z
|
test/aqua/operators/__init__.py
|
hushaohan/aqua
|
8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d
|
[
"Apache-2.0"
] | 17
|
2019-06-04T08:53:44.000Z
|
2022-01-02T22:22:38.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
| 36.285714
| 77
| 0.740157
| true
| true
|
|
7909fc76cf288ff1860027a65567cc79b4653cf3
| 5,623
|
py
|
Python
|
rklearn/tests/it/cifar10_cnn.py
|
rejux/rklearn-lib
|
56bc4f087a8c971cb545d65b0c1f9bafaaec3d67
|
[
"MIT"
] | null | null | null |
rklearn/tests/it/cifar10_cnn.py
|
rejux/rklearn-lib
|
56bc4f087a8c971cb545d65b0c1f9bafaaec3d67
|
[
"MIT"
] | null | null | null |
rklearn/tests/it/cifar10_cnn.py
|
rejux/rklearn-lib
|
56bc4f087a8c971cb545d65b0c1f9bafaaec3d67
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#############
## Imports ##
#############
import os
import sys ; sys.path.append("/home/developer/workspace/rklearn-lib")
import tensorflow as tf
from rklearn.tfoo_v1 import BaseModel
#################
## CIFAR10CNN ##
#################
class CIFAR10CNN(BaseModel):
################
## __init__() ##
################
def __init__(self, config, logger = None):
super().__init__(config, logger)
try:
# these parameters are sent to the trainer through the model because it is easier
self.num_epochs = self.config.cifar10_cnn["num_epochs"]
self.learning_rate = self.config.cifar10_cnn["learning_rate"]
self.max_to_keep = self.config.cifar10_cnn["max_to_keep"]
self.checkpoint_dir = self.config.cifar10_cnn["checkpoint_dir"]
self.model_dir = self.config.cifar10_cnn["model_dir"]
os.makedirs(self.checkpoint_dir, exist_ok = True)
os.makedirs(self.model_dir, exist_ok = True)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logger.error("error msg = {}, error type = {}, error file = {}, error line = {}".format(e, exc_type, fname, exc_tb.tb_lineno))
raise RuntimeError("Error in CIFAR10CNN construction regarding the checkpoints and model directories!")
###################
## build_model() ##
###################
def build_model(self):
"""
Build the custom CNN for the CIFAR-10 dataset.
"""
# The input data holders (cf. shapes after prepa)
self.X = tf.compat.v1.placeholder(tf.float32, shape = (None,
self.config.data["image_size"],
self.config.data["image_size"],
self.config.data["num_channels"]), name="X") # ex. (50000, 32, 32, 3)
self.y = tf.compat.v1.placeholder(tf.int32, shape = (None, self.config.data["num_categories"]), name="y") # ex. (50000, 10)
self.train = tf.compat.v1.placeholder(tf.bool)
# The CNN architecture = conv/poo layers + flatten layer + connected layers
with tf.name_scope("cnn"):
# a. Create convolution/pooling layers = conv + drop + pool + conv + drop + pool + conv + pool + conv + drop
self.conv1 = tf.layers.conv2d(self.X,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop1 = tf.layers.dropout(self.conv1, self.config.cifar10_cnn["keep_prob"], training=self.train)
self.pool1 = tf.layers.max_pooling2d(self.drop1, 2, 2)
self.conv2 = tf.layers.conv2d(self.pool1,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop2 = tf.layers.dropout(self.conv2, self.config.cifar10_cnn["keep_prob"], training=self.train)
self.pool2 = tf.layers.max_pooling2d(self.drop2, 2, 2)
self.conv3 = tf.layers.conv2d(self.pool2,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.pool3 = tf.layers.max_pooling2d(self.conv3, 2, 2)
self.conv4 = tf.layers.conv2d(self.pool3,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop3 = tf.layers.dropout(self.conv4, self.config.cifar10_cnn["keep_prob"], training=self.train)
# b. Flatten input data
self.flatten = tf.reshape(self.drop3, [-1, self.config.cifar10_cnn["fc1_nb_units"]])
# Create connected layers: fc1, fc2
with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected],
normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={"is_training": self.train}):
self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn["fc1_nb_units"])
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data["num_categories"], activation_fn=None)
# Compute loss
with tf.name_scope("loss"):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y))
# Optimizer
with tf.name_scope("training_op"):
self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
# Perf metrics
with tf.name_scope("accuracy"):
prediction = tf.equal(tf.argmax(self.fc2, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
| 47.652542
| 138
| 0.538858
|
nt_dir, exist_ok = True)
os.makedirs(self.model_dir, exist_ok = True)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logger.error("error msg = {}, error type = {}, error file = {}, error line = {}".format(e, exc_type, fname, exc_tb.tb_lineno))
raise RuntimeError("Error in CIFAR10CNN construction regarding the checkpoints and model directories!")
um_channels"]), name="X")
self.y = tf.compat.v1.placeholder(tf.int32, shape = (None, self.config.data["num_categories"]), name="y")
self.train = tf.compat.v1.placeholder(tf.bool)
with tf.name_scope("cnn"):
self.conv1 = tf.layers.conv2d(self.X,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop1 = tf.layers.dropout(self.conv1, self.config.cifar10_cnn["keep_prob"], training=self.train)
self.pool1 = tf.layers.max_pooling2d(self.drop1, 2, 2)
self.conv2 = tf.layers.conv2d(self.pool1,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop2 = tf.layers.dropout(self.conv2, self.config.cifar10_cnn["keep_prob"], training=self.train)
self.pool2 = tf.layers.max_pooling2d(self.drop2, 2, 2)
self.conv3 = tf.layers.conv2d(self.pool2,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.pool3 = tf.layers.max_pooling2d(self.conv3, 2, 2)
self.conv4 = tf.layers.conv2d(self.pool3,
self.config.cifar10_cnn["num_filters"],
self.config.cifar10_cnn["filter_size"],
padding='same', activation=tf.nn.relu)
self.drop3 = tf.layers.dropout(self.conv4, self.config.cifar10_cnn["keep_prob"], training=self.train)
self.flatten = tf.reshape(self.drop3, [-1, self.config.cifar10_cnn["fc1_nb_units"]])
with tf.contrib.framework.arg_scope([tf.contrib.layers.fully_connected],
normalizer_fn=tf.contrib.layers.batch_norm,
normalizer_params={"is_training": self.train}):
self.fc1 = tf.contrib.layers.fully_connected(self.flatten, self.config.cifar10_cnn["fc1_nb_units"])
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, self.config.data["num_categories"], activation_fn=None)
with tf.name_scope("loss"):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.fc2, labels=self.y))
with tf.name_scope("training_op"):
self.training_op = tf.compat.v1.train.AdamOptimizer(self.learning_rate).minimize(self.loss)
with tf.name_scope("accuracy"):
prediction = tf.equal(tf.argmax(self.fc2, 1), tf.argmax(self.y, 1))
self.accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))
| true
| true
|
7909fcd7f87bea28649dafa37c3a9dd8cefc7e39
| 6,812
|
py
|
Python
|
colorpy/colorpy-0.1.0/blackbody.py
|
gmweir/QuasiOptics
|
0974178984f845597c5209217613c26edf931ed0
|
[
"MIT"
] | 1
|
2020-11-06T18:16:00.000Z
|
2020-11-06T18:16:00.000Z
|
colorpy/colorpy-0.1.1/blackbody.py
|
gmweir/QuasiOptics
|
0974178984f845597c5209217613c26edf931ed0
|
[
"MIT"
] | null | null | null |
colorpy/colorpy-0.1.1/blackbody.py
|
gmweir/QuasiOptics
|
0974178984f845597c5209217613c26edf931ed0
|
[
"MIT"
] | null | null | null |
'''
blackbody.py - Color of thermal blackbodies.
Description:
Calculate the spectrum of a thermal blackbody at an arbitrary temperature.
Constants:
PLANCK_CONSTANT - Planck's constant, in J-sec
SPEED_OF_LIGHT - Speed of light, in m/sec
BOLTZMAN_CONSTANT - Boltzman's constant, in J/K
SUN_TEMPERATURE - Surface temperature of the Sun, in K
Functions:
blackbody_specific_intensity (wl_nm, T_K) -
Get the monochromatic specific intensity for a blackbody -
wl_nm = wavelength [nm]
T_K = temperature [K]
This is the energy radiated per second per unit wavelength per unit solid angle.
Reference - Shu, eq. 4.6, p. 78.
blackbody_spectrum (T_K) -
Get the spectrum of a blackbody, as a numpy array.
blackbody_color (T_K) -
Given a temperature (K), return the xyz color of a thermal blackbody.
Plots:
blackbody_patch_plot (T_list, title, filename) -
Draw a patch plot of blackbody colors for the given temperature range.
blackbody_color_vs_temperature_plot (T_list, title, filename) -
Draw a color vs temperature plot for the given temperature range.
blackbody_spectrum_plot (T_K) -
Draw the spectrum of a blackbody at the given temperature.
References:
Frank H. Shu, The Physical Universe. An Introduction to Astronomy,
University Science Books, Mill Valley, California. 1982. ISBN 0-935702-05-9.
Charles Kittel and Herbert Kroemer, Thermal Physics, 2nd edition,
W. H. Freeman, New York, 1980. ISBN 0-7167-1088-9.
License:
Copyright (C) 2008 Mark Kness
Author - Mark Kness - mkness@alumni.utexas.net
This file is part of ColorPy.
ColorPy is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ColorPy is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with ColorPy. If not, see <http://www.gnu.org/licenses/>.
'''
import math, numpy, pylab
import colormodels
import ciexyz
import plots
# Physical constants in mks units
PLANCK_CONSTANT = 6.6237e-34 # J-sec
SPEED_OF_LIGHT = 2.997925e+08 # m/sec
BOLTZMAN_CONSTANT = 1.3802e-23 # J/K
SUN_TEMPERATURE = 5778.0 # K
def blackbody_specific_intensity (wl_nm, T_K):
'''Get the monochromatic specific intensity for a blackbody -
wl_nm = wavelength [nm]
T_K = temperature [K]
This is the energy radiated per second per unit wavelength per unit solid angle.
Reference - Shu, eq. 4.6, p. 78.'''
# precalculations that could be made global
a = (PLANCK_CONSTANT * SPEED_OF_LIGHT) / (BOLTZMAN_CONSTANT)
b = (2.0 * PLANCK_CONSTANT * SPEED_OF_LIGHT * SPEED_OF_LIGHT)
wl_m = wl_nm * 1.0e-9
try:
exponent = a / (wl_m * T_K)
except ZeroDivisionError:
# treat same as large exponent
return 0.0
if exponent > 500.0:
# so large that the final result is nearly zero - avoid the giant intermediate
return 0.0
specific_intensity = b / (math.pow (wl_m, 5) * (math.exp (exponent) - 1.0))
return specific_intensity
def blackbody_spectrum (T_K):
'''Get the spectrum of a blackbody, as a numpy array.'''
spectrum = ciexyz.empty_spectrum()
(num_rows, num_cols) = spectrum.shape
for i in xrange (0, num_rows):
specific_intensity = blackbody_specific_intensity (spectrum [i][0], T_K)
# scale by size of wavelength interval
spectrum [i][1] = specific_intensity * ciexyz.delta_wl_nm * 1.0e-9
return spectrum
def blackbody_color (T_K):
'''Given a temperature (K), return the xyz color of a thermal blackbody.'''
spectrum = blackbody_spectrum (T_K)
xyz = ciexyz.xyz_from_spectrum (spectrum)
return xyz
#
# Figures
#
def blackbody_patch_plot (T_list, title, filename):
'''Draw a patch plot of blackbody colors for the given temperature range.'''
xyz_colors = []
color_names = []
for Ti in T_list:
xyz = blackbody_color (Ti)
xyz_colors.append (xyz)
name = '%g K' % (Ti)
color_names.append (name)
plots.xyz_patch_plot (xyz_colors, color_names, title, filename)
def blackbody_color_vs_temperature_plot (T_list, title, filename):
'''Draw a color vs temperature plot for the given temperature range.'''
num_T = len (T_list)
rgb_list = numpy.empty ((num_T, 3))
for i in xrange (0, num_T):
T_i = T_list [i]
xyz = blackbody_color (T_i)
rgb_list [i] = colormodels.rgb_from_xyz (xyz)
# note that b and g become negative for low T - MatPlotLib skips those on the semilog plot.
plots.color_vs_param_plot (
T_list,
rgb_list,
title,
filename,
plotfunc = pylab.semilogy,
tight = True,
xlabel = r'Temperature (K)',
ylabel = r'RGB Color')
def blackbody_spectrum_plot (T_K):
'''Draw the spectrum of a blackbody at the given temperature.'''
spectrum = blackbody_spectrum (T_K)
title = 'Blackbody Spectrum - T %d K' % (int (T_K))
filename = 'BlackbodySpectrum-%dK' % (int (T_K))
plots.spectrum_plot (
spectrum,
title,
filename,
xlabel = 'Wavelength (nm)',
ylabel = 'Specific Intensity')
#ylabel = 'Intensity ($W/m^2$)') # with LaTex symbols, the axis text gets too big...
# Create sample figures
def figures ():
'''Create some blackbody plots.'''
# patch plots
T_list_0 = plots.log_interpolate ( 1200.0, 20000.0, 48)
T_list_hot = plots.log_interpolate (10000.0, 40000.0, 24)
T_list_cool = plots.log_interpolate ( 950.0, 1200.0, 24)
blackbody_patch_plot (T_list_0, 'Blackbody Colors', 'Blackbody-Patch')
blackbody_patch_plot (T_list_hot, 'Hot Blackbody Colors', 'Blackbody-HotPatch')
blackbody_patch_plot (T_list_cool, 'Cool Blackbody Colors', 'Blackbody-CoolPatch')
# color vs temperature
blackbody_color_vs_temperature_plot (range (1200, 16000, 50), 'Blackbody Colors', 'Blackbody-Colors')
blackbody_color_vs_temperature_plot (range (10000, 40000, 100), 'Hot Blackbody Colors', 'Blackbody-HotColors')
blackbody_color_vs_temperature_plot (range (950, 1200, 1), 'Cool Blackbody Colors', 'Blackbody-CoolColors')
# spectrum of specific temperatures
blackbody_spectrum_plot (2000.0)
blackbody_spectrum_plot (3000.0) # Proxima Centauri
blackbody_spectrum_plot (SUN_TEMPERATURE) # Sun
blackbody_spectrum_plot (11000.0) # Rigel
blackbody_spectrum_plot (15000.0)
| 35.852632
| 116
| 0.695831
|
import math, numpy, pylab
import colormodels
import ciexyz
import plots
PLANCK_CONSTANT = 6.6237e-34
SPEED_OF_LIGHT = 2.997925e+08
BOLTZMAN_CONSTANT = 1.3802e-23
SUN_TEMPERATURE = 5778.0
def blackbody_specific_intensity (wl_nm, T_K):
a = (PLANCK_CONSTANT * SPEED_OF_LIGHT) / (BOLTZMAN_CONSTANT)
b = (2.0 * PLANCK_CONSTANT * SPEED_OF_LIGHT * SPEED_OF_LIGHT)
wl_m = wl_nm * 1.0e-9
try:
exponent = a / (wl_m * T_K)
except ZeroDivisionError:
return 0.0
if exponent > 500.0:
return 0.0
specific_intensity = b / (math.pow (wl_m, 5) * (math.exp (exponent) - 1.0))
return specific_intensity
def blackbody_spectrum (T_K):
spectrum = ciexyz.empty_spectrum()
(num_rows, num_cols) = spectrum.shape
for i in xrange (0, num_rows):
specific_intensity = blackbody_specific_intensity (spectrum [i][0], T_K)
spectrum [i][1] = specific_intensity * ciexyz.delta_wl_nm * 1.0e-9
return spectrum
def blackbody_color (T_K):
spectrum = blackbody_spectrum (T_K)
xyz = ciexyz.xyz_from_spectrum (spectrum)
return xyz
def blackbody_patch_plot (T_list, title, filename):
xyz_colors = []
color_names = []
for Ti in T_list:
xyz = blackbody_color (Ti)
xyz_colors.append (xyz)
name = '%g K' % (Ti)
color_names.append (name)
plots.xyz_patch_plot (xyz_colors, color_names, title, filename)
def blackbody_color_vs_temperature_plot (T_list, title, filename):
num_T = len (T_list)
rgb_list = numpy.empty ((num_T, 3))
for i in xrange (0, num_T):
T_i = T_list [i]
xyz = blackbody_color (T_i)
rgb_list [i] = colormodels.rgb_from_xyz (xyz)
plots.color_vs_param_plot (
T_list,
rgb_list,
title,
filename,
plotfunc = pylab.semilogy,
tight = True,
xlabel = r'Temperature (K)',
ylabel = r'RGB Color')
def blackbody_spectrum_plot (T_K):
spectrum = blackbody_spectrum (T_K)
title = 'Blackbody Spectrum - T %d K' % (int (T_K))
filename = 'BlackbodySpectrum-%dK' % (int (T_K))
plots.spectrum_plot (
spectrum,
title,
filename,
xlabel = 'Wavelength (nm)',
ylabel = 'Specific Intensity')
g_interpolate ( 1200.0, 20000.0, 48)
T_list_hot = plots.log_interpolate (10000.0, 40000.0, 24)
T_list_cool = plots.log_interpolate ( 950.0, 1200.0, 24)
blackbody_patch_plot (T_list_0, 'Blackbody Colors', 'Blackbody-Patch')
blackbody_patch_plot (T_list_hot, 'Hot Blackbody Colors', 'Blackbody-HotPatch')
blackbody_patch_plot (T_list_cool, 'Cool Blackbody Colors', 'Blackbody-CoolPatch')
blackbody_color_vs_temperature_plot (range (1200, 16000, 50), 'Blackbody Colors', 'Blackbody-Colors')
blackbody_color_vs_temperature_plot (range (10000, 40000, 100), 'Hot Blackbody Colors', 'Blackbody-HotColors')
blackbody_color_vs_temperature_plot (range (950, 1200, 1), 'Cool Blackbody Colors', 'Blackbody-CoolColors')
blackbody_spectrum_plot (2000.0)
blackbody_spectrum_plot (3000.0)
blackbody_spectrum_plot (SUN_TEMPERATURE)
blackbody_spectrum_plot (11000.0)
blackbody_spectrum_plot (15000.0)
| true
| true
|
7909feaa1363b66d123d84e63511a339ac928fb6
| 228
|
py
|
Python
|
shei/shei/doctype/price_configurator/test_price_configurator.py
|
CloudGround/shei
|
ab09f3c3cfd9a2c235f5f2d4ef64e029aff0436a
|
[
"MIT"
] | null | null | null |
shei/shei/doctype/price_configurator/test_price_configurator.py
|
CloudGround/shei
|
ab09f3c3cfd9a2c235f5f2d4ef64e029aff0436a
|
[
"MIT"
] | 2
|
2018-07-27T18:52:01.000Z
|
2018-12-18T16:37:59.000Z
|
shei/shei/doctype/price_configurator/test_price_configurator.py
|
CloudGround/shei
|
ab09f3c3cfd9a2c235f5f2d4ef64e029aff0436a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Aptitude technologie and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestPriceConfigurator(unittest.TestCase):
pass
| 20.727273
| 59
| 0.785088
|
from __future__ import unicode_literals
import frappe
import unittest
class TestPriceConfigurator(unittest.TestCase):
pass
| true
| true
|
7909feed5104e59222e59bddc53aaa2b982d1ff6
| 8,720
|
py
|
Python
|
yt/frontends/owls_subfind/data_structures.py
|
neutrinoceros2/yt
|
8cabf6091414e4d9a5037c4ff49199adf0ae64d6
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/frontends/owls_subfind/data_structures.py
|
neutrinoceros2/yt
|
8cabf6091414e4d9a5037c4ff49199adf0ae64d6
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/frontends/owls_subfind/data_structures.py
|
neutrinoceros2/yt
|
8cabf6091414e4d9a5037c4ff49199adf0ae64d6
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import glob
import os
from collections import defaultdict
import numpy as np
from yt.data_objects.static_output import ParticleDataset, ParticleFile
from yt.frontends.gadget.data_structures import _fix_unit_ordering
from yt.funcs import only_on_root, setdefaultattr
from yt.geometry.particle_geometry_handler import ParticleIndex
from yt.utilities.exceptions import YTException
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.on_demand_imports import _h5py as h5py
from .fields import OWLSSubfindFieldInfo
class OWLSSubfindParticleIndex(ParticleIndex):
def __init__(self, ds, dataset_type):
super(OWLSSubfindParticleIndex, self).__init__(ds, dataset_type)
def _calculate_particle_index_starts(self):
# Halo indices are not saved in the file, so we must count by hand.
# File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc.
particle_count = defaultdict(int)
offset_count = 0
for data_file in self.data_files:
data_file.index_start = dict(
[(ptype, particle_count[ptype]) for ptype in data_file.total_particles]
)
data_file.offset_start = offset_count
for ptype in data_file.total_particles:
particle_count[ptype] += data_file.total_particles[ptype]
offset_count += data_file.total_offset
def _calculate_file_offset_map(self):
# After the FOF is performed, a load-balancing step redistributes halos
# and then writes more fields. Here, for each file, we create a list of
# files which contain the rest of the redistributed particles.
ifof = np.array(
[data_file.total_particles["FOF"] for data_file in self.data_files]
)
isub = np.array([data_file.total_offset for data_file in self.data_files])
subend = isub.cumsum()
fofend = ifof.cumsum()
istart = np.digitize(fofend - ifof, subend - isub) - 1
iend = np.clip(np.digitize(fofend, subend), 0, ifof.size - 2)
for i, data_file in enumerate(self.data_files):
data_file.offset_files = self.data_files[istart[i] : iend[i] + 1]
def _detect_output_fields(self):
# TODO: Add additional fields
self._setup_filenames()
self._calculate_particle_index_starts()
self._calculate_file_offset_map()
dsl = []
units = {}
for dom in self.data_files:
fl, _units = self.io._identify_fields(dom)
units.update(_units)
dom._calculate_offsets(fl)
for f in fl:
if f not in dsl:
dsl.append(f)
self.field_list = dsl
ds = self.dataset
ds.particle_types = tuple(set(pt for pt, ds in dsl))
# This is an attribute that means these particle types *actually*
# exist. As in, they are real, in the dataset.
ds.field_units.update(units)
ds.particle_types_raw = ds.particle_types
class OWLSSubfindHDF5File(ParticleFile):
def __init__(self, ds, io, filename, file_id, bounds):
super(OWLSSubfindHDF5File, self).__init__(ds, io, filename, file_id, bounds)
with h5py.File(filename, mode="r") as f:
self.header = dict((field, f.attrs[field]) for field in f.attrs.keys())
class OWLSSubfindDataset(ParticleDataset):
_index_class = OWLSSubfindParticleIndex
_file_class = OWLSSubfindHDF5File
_field_info_class = OWLSSubfindFieldInfo
_suffix = ".hdf5"
def __init__(
self,
filename,
dataset_type="subfind_hdf5",
index_order=None,
index_filename=None,
units_override=None,
unit_system="cgs",
):
super(OWLSSubfindDataset, self).__init__(
filename,
dataset_type,
index_order=index_order,
index_filename=index_filename,
units_override=units_override,
unit_system=unit_system,
)
def _parse_parameter_file(self):
handle = h5py.File(self.parameter_filename, mode="r")
hvals = {}
hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
hvals["NumFiles"] = hvals["NumFilesPerSnapshot"]
hvals["Massarr"] = hvals["MassTable"]
self.dimensionality = 3
self.refine_by = 2
# Set standard values
self.current_time = self.quan(hvals["Time_GYR"], "Gyr")
self.domain_left_edge = np.zeros(3, "float64")
self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
self.domain_dimensions = np.ones(3, "int32")
self.cosmological_simulation = 1
self.periodicity = (True, True, True)
self.current_redshift = hvals["Redshift"]
self.omega_lambda = hvals["OmegaLambda"]
self.omega_matter = hvals["Omega0"]
self.hubble_constant = hvals["HubbleParam"]
self.parameters = hvals
prefix = os.path.abspath(
os.path.join(
os.path.dirname(self.parameter_filename),
os.path.basename(self.parameter_filename).split(".", 1)[0],
)
)
suffix = self.parameter_filename.rsplit(".", 1)[-1]
self.filename_template = f"{prefix}.%(num)i.{suffix}"
self.file_count = len(glob.glob(prefix + "*" + self._suffix))
if self.file_count == 0:
raise YTException(message="No data files found.", ds=self)
self.particle_types = ("FOF", "SUBFIND")
self.particle_types_raw = ("FOF", "SUBFIND")
# To avoid having to open files twice
self._unit_base = {}
self._unit_base.update((str(k), v) for k, v in handle["/Units"].attrs.items())
handle.close()
def _set_code_unit_attributes(self):
# Set a sane default for cosmological simulations.
if self._unit_base is None and self.cosmological_simulation == 1:
only_on_root(mylog.info, "Assuming length units are in Mpc/h (comoving)")
self._unit_base = dict(length=(1.0, "Mpccm/h"))
# The other same defaults we will use from the standard Gadget
# defaults.
unit_base = self._unit_base or {}
if "length" in unit_base:
length_unit = unit_base["length"]
elif "UnitLength_in_cm" in unit_base:
if self.cosmological_simulation == 0:
length_unit = (unit_base["UnitLength_in_cm"], "cm")
else:
length_unit = (unit_base["UnitLength_in_cm"], "cmcm/h")
else:
raise RuntimeError
length_unit = _fix_unit_ordering(length_unit)
setdefaultattr(self, "length_unit", self.quan(length_unit[0], length_unit[1]))
if "velocity" in unit_base:
velocity_unit = unit_base["velocity"]
elif "UnitVelocity_in_cm_per_s" in unit_base:
velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], "cm/s")
else:
velocity_unit = (1e5, "cm/s * sqrt(a)")
velocity_unit = _fix_unit_ordering(velocity_unit)
setdefaultattr(
self, "velocity_unit", self.quan(velocity_unit[0], velocity_unit[1])
)
# We set hubble_constant = 1.0 for non-cosmology, so this is safe.
# Default to 1e10 Msun/h if mass is not specified.
if "mass" in unit_base:
mass_unit = unit_base["mass"]
elif "UnitMass_in_g" in unit_base:
if self.cosmological_simulation == 0:
mass_unit = (unit_base["UnitMass_in_g"], "g")
else:
mass_unit = (unit_base["UnitMass_in_g"], "g/h")
else:
# Sane default
mass_unit = (1.0, "1e10*Msun/h")
mass_unit = _fix_unit_ordering(mass_unit)
setdefaultattr(self, "mass_unit", self.quan(mass_unit[0], mass_unit[1]))
if "time" in unit_base:
time_unit = unit_base["time"]
elif "UnitTime_in_s" in unit_base:
time_unit = (unit_base["UnitTime_in_s"], "s")
else:
tu = (self.length_unit / self.velocity_unit).to("yr/h")
time_unit = (tu.d, tu.units)
setdefaultattr(self, "time_unit", self.quan(time_unit[0], time_unit[1]))
@classmethod
def _is_valid(self, *args, **kwargs):
need_groups = ["Constants", "Header", "Parameters", "Units", "FOF"]
veto_groups = []
valid = True
try:
fh = h5py.File(args[0], mode="r")
valid = all(ng in fh["/"] for ng in need_groups) and not any(
vg in fh["/"] for vg in veto_groups
)
fh.close()
except Exception:
valid = False
pass
return valid
| 40
| 87
| 0.620642
|
import glob
import os
from collections import defaultdict
import numpy as np
from yt.data_objects.static_output import ParticleDataset, ParticleFile
from yt.frontends.gadget.data_structures import _fix_unit_ordering
from yt.funcs import only_on_root, setdefaultattr
from yt.geometry.particle_geometry_handler import ParticleIndex
from yt.utilities.exceptions import YTException
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.on_demand_imports import _h5py as h5py
from .fields import OWLSSubfindFieldInfo
class OWLSSubfindParticleIndex(ParticleIndex):
def __init__(self, ds, dataset_type):
super(OWLSSubfindParticleIndex, self).__init__(ds, dataset_type)
def _calculate_particle_index_starts(self):
particle_count = defaultdict(int)
offset_count = 0
for data_file in self.data_files:
data_file.index_start = dict(
[(ptype, particle_count[ptype]) for ptype in data_file.total_particles]
)
data_file.offset_start = offset_count
for ptype in data_file.total_particles:
particle_count[ptype] += data_file.total_particles[ptype]
offset_count += data_file.total_offset
def _calculate_file_offset_map(self):
ifof = np.array(
[data_file.total_particles["FOF"] for data_file in self.data_files]
)
isub = np.array([data_file.total_offset for data_file in self.data_files])
subend = isub.cumsum()
fofend = ifof.cumsum()
istart = np.digitize(fofend - ifof, subend - isub) - 1
iend = np.clip(np.digitize(fofend, subend), 0, ifof.size - 2)
for i, data_file in enumerate(self.data_files):
data_file.offset_files = self.data_files[istart[i] : iend[i] + 1]
def _detect_output_fields(self):
self._setup_filenames()
self._calculate_particle_index_starts()
self._calculate_file_offset_map()
dsl = []
units = {}
for dom in self.data_files:
fl, _units = self.io._identify_fields(dom)
units.update(_units)
dom._calculate_offsets(fl)
for f in fl:
if f not in dsl:
dsl.append(f)
self.field_list = dsl
ds = self.dataset
ds.particle_types = tuple(set(pt for pt, ds in dsl))
ds.field_units.update(units)
ds.particle_types_raw = ds.particle_types
class OWLSSubfindHDF5File(ParticleFile):
def __init__(self, ds, io, filename, file_id, bounds):
super(OWLSSubfindHDF5File, self).__init__(ds, io, filename, file_id, bounds)
with h5py.File(filename, mode="r") as f:
self.header = dict((field, f.attrs[field]) for field in f.attrs.keys())
class OWLSSubfindDataset(ParticleDataset):
_index_class = OWLSSubfindParticleIndex
_file_class = OWLSSubfindHDF5File
_field_info_class = OWLSSubfindFieldInfo
_suffix = ".hdf5"
def __init__(
self,
filename,
dataset_type="subfind_hdf5",
index_order=None,
index_filename=None,
units_override=None,
unit_system="cgs",
):
super(OWLSSubfindDataset, self).__init__(
filename,
dataset_type,
index_order=index_order,
index_filename=index_filename,
units_override=units_override,
unit_system=unit_system,
)
def _parse_parameter_file(self):
handle = h5py.File(self.parameter_filename, mode="r")
hvals = {}
hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
hvals["NumFiles"] = hvals["NumFilesPerSnapshot"]
hvals["Massarr"] = hvals["MassTable"]
self.dimensionality = 3
self.refine_by = 2
self.current_time = self.quan(hvals["Time_GYR"], "Gyr")
self.domain_left_edge = np.zeros(3, "float64")
self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
self.domain_dimensions = np.ones(3, "int32")
self.cosmological_simulation = 1
self.periodicity = (True, True, True)
self.current_redshift = hvals["Redshift"]
self.omega_lambda = hvals["OmegaLambda"]
self.omega_matter = hvals["Omega0"]
self.hubble_constant = hvals["HubbleParam"]
self.parameters = hvals
prefix = os.path.abspath(
os.path.join(
os.path.dirname(self.parameter_filename),
os.path.basename(self.parameter_filename).split(".", 1)[0],
)
)
suffix = self.parameter_filename.rsplit(".", 1)[-1]
self.filename_template = f"{prefix}.%(num)i.{suffix}"
self.file_count = len(glob.glob(prefix + "*" + self._suffix))
if self.file_count == 0:
raise YTException(message="No data files found.", ds=self)
self.particle_types = ("FOF", "SUBFIND")
self.particle_types_raw = ("FOF", "SUBFIND")
self._unit_base = {}
self._unit_base.update((str(k), v) for k, v in handle["/Units"].attrs.items())
handle.close()
def _set_code_unit_attributes(self):
if self._unit_base is None and self.cosmological_simulation == 1:
only_on_root(mylog.info, "Assuming length units are in Mpc/h (comoving)")
self._unit_base = dict(length=(1.0, "Mpccm/h"))
unit_base = self._unit_base or {}
if "length" in unit_base:
length_unit = unit_base["length"]
elif "UnitLength_in_cm" in unit_base:
if self.cosmological_simulation == 0:
length_unit = (unit_base["UnitLength_in_cm"], "cm")
else:
length_unit = (unit_base["UnitLength_in_cm"], "cmcm/h")
else:
raise RuntimeError
length_unit = _fix_unit_ordering(length_unit)
setdefaultattr(self, "length_unit", self.quan(length_unit[0], length_unit[1]))
if "velocity" in unit_base:
velocity_unit = unit_base["velocity"]
elif "UnitVelocity_in_cm_per_s" in unit_base:
velocity_unit = (unit_base["UnitVelocity_in_cm_per_s"], "cm/s")
else:
velocity_unit = (1e5, "cm/s * sqrt(a)")
velocity_unit = _fix_unit_ordering(velocity_unit)
setdefaultattr(
self, "velocity_unit", self.quan(velocity_unit[0], velocity_unit[1])
)
if "mass" in unit_base:
mass_unit = unit_base["mass"]
elif "UnitMass_in_g" in unit_base:
if self.cosmological_simulation == 0:
mass_unit = (unit_base["UnitMass_in_g"], "g")
else:
mass_unit = (unit_base["UnitMass_in_g"], "g/h")
else:
mass_unit = (1.0, "1e10*Msun/h")
mass_unit = _fix_unit_ordering(mass_unit)
setdefaultattr(self, "mass_unit", self.quan(mass_unit[0], mass_unit[1]))
if "time" in unit_base:
time_unit = unit_base["time"]
elif "UnitTime_in_s" in unit_base:
time_unit = (unit_base["UnitTime_in_s"], "s")
else:
tu = (self.length_unit / self.velocity_unit).to("yr/h")
time_unit = (tu.d, tu.units)
setdefaultattr(self, "time_unit", self.quan(time_unit[0], time_unit[1]))
@classmethod
def _is_valid(self, *args, **kwargs):
need_groups = ["Constants", "Header", "Parameters", "Units", "FOF"]
veto_groups = []
valid = True
try:
fh = h5py.File(args[0], mode="r")
valid = all(ng in fh["/"] for ng in need_groups) and not any(
vg in fh["/"] for vg in veto_groups
)
fh.close()
except Exception:
valid = False
pass
return valid
| true
| true
|
7909ff0d02864e4f0f1fb9e916578214988b459e
| 2,591
|
py
|
Python
|
spectral_clustering_fd/laplacian_sketch.py
|
AtsushiHashimoto/SpectralClusteringFD
|
dd150a08898ce354a1b59457dea2f5185d145ed7
|
[
"BSD-2-Clause"
] | null | null | null |
spectral_clustering_fd/laplacian_sketch.py
|
AtsushiHashimoto/SpectralClusteringFD
|
dd150a08898ce354a1b59457dea2f5185d145ed7
|
[
"BSD-2-Clause"
] | null | null | null |
spectral_clustering_fd/laplacian_sketch.py
|
AtsushiHashimoto/SpectralClusteringFD
|
dd150a08898ce354a1b59457dea2f5185d145ed7
|
[
"BSD-2-Clause"
] | null | null | null |
# coding: utf-8
import numpy as np
from frequent_direction import FrequentDirection
from sklearn.preprocessing import normalize
from sklearn.metrics.pairwise import pairwise_kernels
def laplacian_sketch(X,ell,k,do_normalize_feature,normed,callback,**args):
fd = FrequentDirection(ell,k)
D = np.array([np.sum(callback(X,i,**args)) for i in range(len(X))])
if normed:
D = np.sqrt(D)
isolation_mask = D==0
if do_normalize_feature:
# normalize original feature (for cosine distance)
X[-isolation_mask] = normalize(X[-isolation_mask],norm='l2', axis=1, copy=False)
D[:] = 1 # set 1 even to D==0 samples to avoid 0 division.
for i,isolation in enumerate(isolation_mask):
A_i = -1 * callback(X,i,**args)
if normed:
A_i /= D[i]
A_i /= D
A_i[i] = 1 - isolation # set 0 to isolated node.
else:
A_i[i] = D[i]
fd.add_sample(-A_i)
return fd.get_result().T, D
def laplacian_sketch_rbf_kernel(X,ell,k,normed=True,gamma=None):
return laplacian_sketch(X,ell,k,False,normed,one_row_rbf_kernel,gamma=None)
def laplacian_sketch_cosine_similarity(X,ell,k,normed=True):
return laplacian_sketch(X,ell,k,True,normed,one_row_cosine_similarity)
def one_row_rbf_kernel(X,i,gamma=None):
"""
X : array of shape (n_samples_X, n_features)
i : target sample in X (X[i])
gamma : float, default None
If None, defaults to 1.0 / n_samples_X
K(x, y) = exp(-gamma ||x-xi||^2)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
if gamma is None:
gamma = 1.0 / X.shape[0]
d = np.sum(np.power(X-X[i],2),axis=1)
return np.array(np.exp(-gamma * d))
def one_row_cosine_similarity(X,i):
"""
X : normalized matrix
i : target sample in X
"""
a = (np.dot(X,X[i].T)+1)/2
a[a<0]=0
return a
def debug_one_row_rbf_kernel(X,gamma=None):
W = np.zeros((X.shape[0],X.shape[0]))
W_gt = pairwise_kernels(X, metric='rbf',
filter_params=True,
gamma=gamma)
for i,row in enumerate(X):
W[i] = one_row_rbf_kernel(X,i,gamma=gamma)
#print(W)
#print(W_gt)
#print(np.sum(W-W_gt))
def debug_one_row_cosine_similarity(X):
W = np.zeros((X.shape[0],X.shape[0]))
W_gt = pairwise_kernels(X, metric='cosine',
filter_params=True)
for i,row in enumerate(X):
W[i] = one_row_cosine_similarity(X,i)
print(W)
print(W_gt)
print(np.sum(W-W_gt))
| 30.482353
| 88
| 0.618294
|
import numpy as np
from frequent_direction import FrequentDirection
from sklearn.preprocessing import normalize
from sklearn.metrics.pairwise import pairwise_kernels
def laplacian_sketch(X,ell,k,do_normalize_feature,normed,callback,**args):
fd = FrequentDirection(ell,k)
D = np.array([np.sum(callback(X,i,**args)) for i in range(len(X))])
if normed:
D = np.sqrt(D)
isolation_mask = D==0
if do_normalize_feature:
X[-isolation_mask] = normalize(X[-isolation_mask],norm='l2', axis=1, copy=False)
D[:] = 1
for i,isolation in enumerate(isolation_mask):
A_i = -1 * callback(X,i,**args)
if normed:
A_i /= D[i]
A_i /= D
A_i[i] = 1 - isolation
else:
A_i[i] = D[i]
fd.add_sample(-A_i)
return fd.get_result().T, D
def laplacian_sketch_rbf_kernel(X,ell,k,normed=True,gamma=None):
return laplacian_sketch(X,ell,k,False,normed,one_row_rbf_kernel,gamma=None)
def laplacian_sketch_cosine_similarity(X,ell,k,normed=True):
return laplacian_sketch(X,ell,k,True,normed,one_row_cosine_similarity)
def one_row_rbf_kernel(X,i,gamma=None):
if gamma is None:
gamma = 1.0 / X.shape[0]
d = np.sum(np.power(X-X[i],2),axis=1)
return np.array(np.exp(-gamma * d))
def one_row_cosine_similarity(X,i):
a = (np.dot(X,X[i].T)+1)/2
a[a<0]=0
return a
def debug_one_row_rbf_kernel(X,gamma=None):
W = np.zeros((X.shape[0],X.shape[0]))
W_gt = pairwise_kernels(X, metric='rbf',
filter_params=True,
gamma=gamma)
for i,row in enumerate(X):
W[i] = one_row_rbf_kernel(X,i,gamma=gamma)
def debug_one_row_cosine_similarity(X):
W = np.zeros((X.shape[0],X.shape[0]))
W_gt = pairwise_kernels(X, metric='cosine',
filter_params=True)
for i,row in enumerate(X):
W[i] = one_row_cosine_similarity(X,i)
print(W)
print(W_gt)
print(np.sum(W-W_gt))
| true
| true
|
7909ffb3942cd78c3b55d4f2b80409a1a536304f
| 328
|
py
|
Python
|
plugin.video.mrknow/mylib/tests_pydevd_runfiles/samples/nested_dir/nested2/deep_nest_test.py
|
mrknow/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 105
|
2015-11-28T00:03:11.000Z
|
2021-05-05T20:47:42.000Z
|
plugin.video.mrknow/mylib/tests_pydevd_runfiles/samples/nested_dir/nested2/deep_nest_test.py
|
rrosajp/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 918
|
2015-11-28T14:12:40.000Z
|
2022-03-23T20:24:49.000Z
|
plugin.video.mrknow/mylib/tests_pydevd_runfiles/samples/nested_dir/nested2/deep_nest_test.py
|
rrosajp/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 111
|
2015-12-01T14:06:10.000Z
|
2020-08-01T10:44:39.000Z
|
import unittest
class SampleTest(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
def test_non_unique_name(self):
pass
def test_asdf2(self):
pass
def test_i_am_a_unique_test_name(self):
pass
if __name__ == '__main__':
unittest.main()
| 14.26087
| 43
| 0.625
|
import unittest
class SampleTest(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
def test_non_unique_name(self):
pass
def test_asdf2(self):
pass
def test_i_am_a_unique_test_name(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
790a005d27683894422367f06a5fced6f2025836
| 1,115
|
py
|
Python
|
share/rpcuser/rpcuser.py
|
jazetjaz/monalisa
|
594b026ac5557d054f91e640e0a6315fdb1176e3
|
[
"MIT"
] | null | null | null |
share/rpcuser/rpcuser.py
|
jazetjaz/monalisa
|
594b026ac5557d054f91e640e0a6315fdb1176e3
|
[
"MIT"
] | null | null | null |
share/rpcuser/rpcuser.py
|
jazetjaz/monalisa
|
594b026ac5557d054f91e640e0a6315fdb1176e3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to monalisa.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
| 26.547619
| 79
| 0.728251
|
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
cryptogen = SystemRandom()
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to monalisa.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
| true
| true
|
790a011e5ef52727309bffc443a2624d07dfcd11
| 1,612
|
py
|
Python
|
conbench/api/commits.py
|
jonkeane/conbench
|
f096cc2f8b7a85d8e9aea32d8310127cf1923212
|
[
"MIT"
] | 48
|
2020-03-02T16:55:46.000Z
|
2022-02-26T00:35:57.000Z
|
conbench/api/commits.py
|
jonkeane/conbench
|
f096cc2f8b7a85d8e9aea32d8310127cf1923212
|
[
"MIT"
] | 103
|
2020-03-23T00:22:46.000Z
|
2022-03-31T22:34:40.000Z
|
conbench/api/commits.py
|
jonkeane/conbench
|
f096cc2f8b7a85d8e9aea32d8310127cf1923212
|
[
"MIT"
] | 6
|
2020-03-04T17:52:35.000Z
|
2022-03-30T11:53:40.000Z
|
from ..api import rule
from ..api._endpoint import ApiEndpoint, maybe_login_required
from ..entities._entity import NotFound
from ..entities.commit import Commit, CommitSerializer
class CommitListAPI(ApiEndpoint):
serializer = CommitSerializer()
@maybe_login_required
def get(self):
"""
---
description: Get a list of commits.
responses:
"200": "CommitList"
"401": "401"
tags:
- Commits
"""
commits = Commit.all(order_by=Commit.timestamp.desc(), limit=500)
return self.serializer.many.dump(commits)
class CommitEntityAPI(ApiEndpoint):
serializer = CommitSerializer()
def _get(self, commit_id):
try:
commit = Commit.one(id=commit_id)
except NotFound:
self.abort_404_not_found()
return commit
@maybe_login_required
def get(self, commit_id):
"""
---
description: Get a commit.
responses:
"200": "CommitEntity"
"401": "401"
"404": "404"
parameters:
- name: commit_id
in: path
schema:
type: string
tags:
- Commits
"""
commit = self._get(commit_id)
return self.serializer.one.dump(commit)
commit_entity_view = CommitEntityAPI.as_view("commit")
commit_list_view = CommitListAPI.as_view("commits")
rule(
"/commits/<commit_id>/",
view_func=commit_entity_view,
methods=["GET"],
)
rule(
"/commits/",
view_func=commit_list_view,
methods=["GET"],
)
| 23.362319
| 73
| 0.584367
|
from ..api import rule
from ..api._endpoint import ApiEndpoint, maybe_login_required
from ..entities._entity import NotFound
from ..entities.commit import Commit, CommitSerializer
class CommitListAPI(ApiEndpoint):
serializer = CommitSerializer()
@maybe_login_required
def get(self):
commits = Commit.all(order_by=Commit.timestamp.desc(), limit=500)
return self.serializer.many.dump(commits)
class CommitEntityAPI(ApiEndpoint):
serializer = CommitSerializer()
def _get(self, commit_id):
try:
commit = Commit.one(id=commit_id)
except NotFound:
self.abort_404_not_found()
return commit
@maybe_login_required
def get(self, commit_id):
commit = self._get(commit_id)
return self.serializer.one.dump(commit)
commit_entity_view = CommitEntityAPI.as_view("commit")
commit_list_view = CommitListAPI.as_view("commits")
rule(
"/commits/<commit_id>/",
view_func=commit_entity_view,
methods=["GET"],
)
rule(
"/commits/",
view_func=commit_list_view,
methods=["GET"],
)
| true
| true
|
790a0148bbbaea06ad3b9ef02df2dc9923e4a543
| 1,427
|
py
|
Python
|
src/helloworld/app.py
|
The-Heyman/helloworld
|
6b69f72c87b82c972fc91744157752525fbe95a5
|
[
"BSD-3-Clause"
] | null | null | null |
src/helloworld/app.py
|
The-Heyman/helloworld
|
6b69f72c87b82c972fc91744157752525fbe95a5
|
[
"BSD-3-Clause"
] | null | null | null |
src/helloworld/app.py
|
The-Heyman/helloworld
|
6b69f72c87b82c972fc91744157752525fbe95a5
|
[
"BSD-3-Clause"
] | null | null | null |
"""
My first application
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class HelloWorld(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
main_box = toga.Box(style=Pack(direction=COLUMN))
name_label = toga.Label(
'Your name: ',
style=Pack(padding=(0 ,5))
)
self.name_input = toga.TextInput(style=Pack(flex=1))
name_box = toga.Box(style=Pack(direction=ROW, padding=5))
name_box.add(name_label)
name_box.add(self.name_input)
button = toga.Button(
'Say Hello!',
on_press=self.say_hello,
style=Pack(padding=5)
)
main_box.add(name_box)
main_box.add(button)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def say_hello(self, widget):
if self.name_input.value:
name = self.name_input.value
else:
name = 'stranger'
self.main_window.info_dialog(
'Hi there!',
f"Hello, {name}"
)
def main():
return HelloWorld()
| 24.603448
| 72
| 0.580238
|
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class HelloWorld(toga.App):
def startup(self):
main_box = toga.Box(style=Pack(direction=COLUMN))
name_label = toga.Label(
'Your name: ',
style=Pack(padding=(0 ,5))
)
self.name_input = toga.TextInput(style=Pack(flex=1))
name_box = toga.Box(style=Pack(direction=ROW, padding=5))
name_box.add(name_label)
name_box.add(self.name_input)
button = toga.Button(
'Say Hello!',
on_press=self.say_hello,
style=Pack(padding=5)
)
main_box.add(name_box)
main_box.add(button)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def say_hello(self, widget):
if self.name_input.value:
name = self.name_input.value
else:
name = 'stranger'
self.main_window.info_dialog(
'Hi there!',
f"Hello, {name}"
)
def main():
return HelloWorld()
| true
| true
|
790a02c63f400be4c967f95f858a0f8b4d61be54
| 720
|
py
|
Python
|
priv/farmware/quickscripts/download_and_detect_coordinates.py
|
bahanni/custom_rpi4
|
ddefa85d30bacaae40151a63a9a0ebbf4ad30ed5
|
[
"MIT"
] | null | null | null |
priv/farmware/quickscripts/download_and_detect_coordinates.py
|
bahanni/custom_rpi4
|
ddefa85d30bacaae40151a63a9a0ebbf4ad30ed5
|
[
"MIT"
] | null | null | null |
priv/farmware/quickscripts/download_and_detect_coordinates.py
|
bahanni/custom_rpi4
|
ddefa85d30bacaae40151a63a9a0ebbf4ad30ed5
|
[
"MIT"
] | null | null | null |
"""Download an image from the Web App and detect coordinates.
download the image corresponding to the ID provided and run plant detection
and coordinate conversion
"""
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from plant_detection.PlantDetection import PlantDetection
from plant_detection import ENV
from plant_detection.Log import log
if __name__ == "__main__":
IMAGE_ID = ENV.load('PLANT_DETECTION_selected_image', get_json=False)
if IMAGE_ID is None:
log('No image selected.',
message_type='error', title='historical-plant-detection')
sys.exit(0)
PD = PlantDetection(coordinates=True, app=True, app_image_id=IMAGE_ID)
PD.detect_plants()
| 30
| 75
| 0.740278
|
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from plant_detection.PlantDetection import PlantDetection
from plant_detection import ENV
from plant_detection.Log import log
if __name__ == "__main__":
IMAGE_ID = ENV.load('PLANT_DETECTION_selected_image', get_json=False)
if IMAGE_ID is None:
log('No image selected.',
message_type='error', title='historical-plant-detection')
sys.exit(0)
PD = PlantDetection(coordinates=True, app=True, app_image_id=IMAGE_ID)
PD.detect_plants()
| true
| true
|
790a03d0316972ef58c05cfb186e214b701a7d0e
| 2,821
|
py
|
Python
|
NAACL/ensemble.py
|
acproject/GNNs
|
953d175f672f0bb1b7cd25f371878728f3d27f09
|
[
"Apache-2.0"
] | 1
|
2021-01-10T02:57:13.000Z
|
2021-01-10T02:57:13.000Z
|
NAACL/ensemble.py
|
acproject/GNNs
|
953d175f672f0bb1b7cd25f371878728f3d27f09
|
[
"Apache-2.0"
] | null | null | null |
NAACL/ensemble.py
|
acproject/GNNs
|
953d175f672f0bb1b7cd25f371878728f3d27f09
|
[
"Apache-2.0"
] | null | null | null |
'''Ensemble some predictions. '''
import argparse
import collections
import math
from scipy.special import logsumexp
import sys
MODES = ['mean', 'max', 'logsumexp', 'noisy_or', 'log_noisy_or', 'odds_ratio']
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices=MODES)
parser.add_argument('files', nargs='+')
parser.add_argument('--weights', '-w', type=lambda x:[float(t) for t in x.split(',')],
help='Comma-separated lit of multiplizer per file')
parser.add_argument('--out-file', '-o', default=None, help='Where to write all output')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args(args)
def read_preds(fn):
preds = []
with open(fn) as f:
for line in f:
idx, pmid, drug, gene, variant, prob = line.strip().split('\t')
prob = float(prob)
preds.append((pmid, drug, gene, variant, prob))
return preds
def main(OPTS):
preds_all = [read_preds(fn) for fn in OPTS.files]
groups = collections.defaultdict(list)
for i, preds in enumerate(preds_all):
if OPTS.weights:
weight = OPTS.weights[i]
else:
weight = 1.0
for pmid, drug, gene, variant, prob in preds:
groups[(pmid, drug, gene, variant)].append(weight * prob)
results = []
for i , ((pmid, drug, gene, variant), prob_list) in enumerate(groups.items()):
if OPTS.mode == 'mean':
prob = sum(prob_list) / len(prob_list)
elif OPTS.mode == 'max':
prob = max(prob_list)
elif OPTS.mode == 'logsumexp':
prob = logsumexp(prob_list)
elif OPTS.mode == 'noisy_or':
prob_no_rel = 1.0
for p in prob_list:
prob_no_rel *= 1.0 - p
prob =1.0 - prob_no_rel
elif OPTS.mode == 'log_noisy_or':
log_prob_no_rel = 0.0
for p in prob_list:
if p < 1.0:
log_prob_no_rel += math.log(1.0 - p)
else:
log_prob_no_rel -= 1000000
prob = -log_prob_no_rel
elif OPTS.mode == 'odds_ratio':
cur_log_odds = 0.0
for p in prob_list:
cur_log_odds += 10 + 0.001 * p #math.log(p / (1.0 - p) * 100000000)
prob = cur_log_odds
else:
raise ValueError(OPTS.mode)
results.append((i, pmid, drug, gene, variant, prob))
with open(OPTS.out_file, 'w') as f:
for item in results:
f.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(*item))
if __name__ == '__main__':
OPTS = parse_args(sys.argv[1:])
main(OPTS)
| 32.425287
| 92
| 0.538107
|
import argparse
import collections
import math
from scipy.special import logsumexp
import sys
MODES = ['mean', 'max', 'logsumexp', 'noisy_or', 'log_noisy_or', 'odds_ratio']
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices=MODES)
parser.add_argument('files', nargs='+')
parser.add_argument('--weights', '-w', type=lambda x:[float(t) for t in x.split(',')],
help='Comma-separated lit of multiplizer per file')
parser.add_argument('--out-file', '-o', default=None, help='Where to write all output')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args(args)
def read_preds(fn):
preds = []
with open(fn) as f:
for line in f:
idx, pmid, drug, gene, variant, prob = line.strip().split('\t')
prob = float(prob)
preds.append((pmid, drug, gene, variant, prob))
return preds
def main(OPTS):
preds_all = [read_preds(fn) for fn in OPTS.files]
groups = collections.defaultdict(list)
for i, preds in enumerate(preds_all):
if OPTS.weights:
weight = OPTS.weights[i]
else:
weight = 1.0
for pmid, drug, gene, variant, prob in preds:
groups[(pmid, drug, gene, variant)].append(weight * prob)
results = []
for i , ((pmid, drug, gene, variant), prob_list) in enumerate(groups.items()):
if OPTS.mode == 'mean':
prob = sum(prob_list) / len(prob_list)
elif OPTS.mode == 'max':
prob = max(prob_list)
elif OPTS.mode == 'logsumexp':
prob = logsumexp(prob_list)
elif OPTS.mode == 'noisy_or':
prob_no_rel = 1.0
for p in prob_list:
prob_no_rel *= 1.0 - p
prob =1.0 - prob_no_rel
elif OPTS.mode == 'log_noisy_or':
log_prob_no_rel = 0.0
for p in prob_list:
if p < 1.0:
log_prob_no_rel += math.log(1.0 - p)
else:
log_prob_no_rel -= 1000000
prob = -log_prob_no_rel
elif OPTS.mode == 'odds_ratio':
cur_log_odds = 0.0
for p in prob_list:
cur_log_odds += 10 + 0.001 * p
prob = cur_log_odds
else:
raise ValueError(OPTS.mode)
results.append((i, pmid, drug, gene, variant, prob))
with open(OPTS.out_file, 'w') as f:
for item in results:
f.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(*item))
if __name__ == '__main__':
OPTS = parse_args(sys.argv[1:])
main(OPTS)
| true
| true
|
790a040994f325960ff2ab17368dc7f580f3ab56
| 29,985
|
py
|
Python
|
O365/utils/windows_tz.py
|
berezovskyi/python-o365
|
aaad538b729e9e30c43300768d99e5ec0dec5e45
|
[
"Apache-2.0"
] | 900
|
2018-04-12T13:57:37.000Z
|
2022-03-30T08:39:04.000Z
|
O365/utils/windows_tz.py
|
berezovskyi/python-o365
|
aaad538b729e9e30c43300768d99e5ec0dec5e45
|
[
"Apache-2.0"
] | 603
|
2018-04-13T13:39:34.000Z
|
2022-03-31T11:26:53.000Z
|
O365/utils/windows_tz.py
|
berezovskyi/python-o365
|
aaad538b729e9e30c43300768d99e5ec0dec5e45
|
[
"Apache-2.0"
] | 289
|
2018-04-10T16:43:09.000Z
|
2022-03-16T15:03:51.000Z
|
"""
Mapping from iana timezones to windows timezones and vice versa
"""
from datetime import tzinfo
import pytz
# noinspection SpellCheckingInspection
IANA_TO_WIN = {
"Africa/Abidjan": "Greenwich Standard Time",
"Africa/Accra": "Greenwich Standard Time",
"Africa/Addis_Ababa": "E. Africa Standard Time",
"Africa/Algiers": "W. Central Africa Standard Time",
"Africa/Asmara": "E. Africa Standard Time",
"Africa/Asmera": "E. Africa Standard Time",
"Africa/Bamako": "Greenwich Standard Time",
"Africa/Bangui": "W. Central Africa Standard Time",
"Africa/Banjul": "Greenwich Standard Time",
"Africa/Bissau": "Greenwich Standard Time",
"Africa/Blantyre": "South Africa Standard Time",
"Africa/Brazzaville": "W. Central Africa Standard Time",
"Africa/Bujumbura": "South Africa Standard Time",
"Africa/Cairo": "Egypt Standard Time",
"Africa/Casablanca": "Morocco Standard Time",
"Africa/Ceuta": "Romance Standard Time",
"Africa/Conakry": "Greenwich Standard Time",
"Africa/Dakar": "Greenwich Standard Time",
"Africa/Dar_es_Salaam": "E. Africa Standard Time",
"Africa/Djibouti": "E. Africa Standard Time",
"Africa/Douala": "W. Central Africa Standard Time",
"Africa/El_Aaiun": "Morocco Standard Time",
"Africa/Freetown": "Greenwich Standard Time",
"Africa/Gaborone": "South Africa Standard Time",
"Africa/Harare": "South Africa Standard Time",
"Africa/Johannesburg": "South Africa Standard Time",
"Africa/Juba": "E. Africa Standard Time",
"Africa/Kampala": "E. Africa Standard Time",
"Africa/Khartoum": "Sudan Standard Time",
"Africa/Kigali": "South Africa Standard Time",
"Africa/Kinshasa": "W. Central Africa Standard Time",
"Africa/Lagos": "W. Central Africa Standard Time",
"Africa/Libreville": "W. Central Africa Standard Time",
"Africa/Lome": "Greenwich Standard Time",
"Africa/Luanda": "W. Central Africa Standard Time",
"Africa/Lubumbashi": "South Africa Standard Time",
"Africa/Lusaka": "South Africa Standard Time",
"Africa/Malabo": "W. Central Africa Standard Time",
"Africa/Maputo": "South Africa Standard Time",
"Africa/Maseru": "South Africa Standard Time",
"Africa/Mbabane": "South Africa Standard Time",
"Africa/Mogadishu": "E. Africa Standard Time",
"Africa/Monrovia": "Greenwich Standard Time",
"Africa/Nairobi": "E. Africa Standard Time",
"Africa/Ndjamena": "W. Central Africa Standard Time",
"Africa/Niamey": "W. Central Africa Standard Time",
"Africa/Nouakchott": "Greenwich Standard Time",
"Africa/Ouagadougou": "Greenwich Standard Time",
"Africa/Porto-Novo": "W. Central Africa Standard Time",
"Africa/Sao_Tome": "Sao Tome Standard Time",
"Africa/Timbuktu": "Greenwich Standard Time",
"Africa/Tripoli": "Libya Standard Time",
"Africa/Tunis": "W. Central Africa Standard Time",
"Africa/Windhoek": "Namibia Standard Time",
"America/Adak": "Aleutian Standard Time",
"America/Anchorage": "Alaskan Standard Time",
"America/Anguilla": "SA Western Standard Time",
"America/Antigua": "SA Western Standard Time",
"America/Araguaina": "Tocantins Standard Time",
"America/Argentina/Buenos_Aires": "Argentina Standard Time",
"America/Argentina/Catamarca": "Argentina Standard Time",
"America/Argentina/ComodRivadavia": "Argentina Standard Time",
"America/Argentina/Cordoba": "Argentina Standard Time",
"America/Argentina/Jujuy": "Argentina Standard Time",
"America/Argentina/La_Rioja": "Argentina Standard Time",
"America/Argentina/Mendoza": "Argentina Standard Time",
"America/Argentina/Rio_Gallegos": "Argentina Standard Time",
"America/Argentina/Salta": "Argentina Standard Time",
"America/Argentina/San_Juan": "Argentina Standard Time",
"America/Argentina/San_Luis": "Argentina Standard Time",
"America/Argentina/Tucuman": "Argentina Standard Time",
"America/Argentina/Ushuaia": "Argentina Standard Time",
"America/Aruba": "SA Western Standard Time",
"America/Asuncion": "Paraguay Standard Time",
"America/Atikokan": "SA Pacific Standard Time",
"America/Atka": "Aleutian Standard Time",
"America/Bahia": "Bahia Standard Time",
"America/Bahia_Banderas": "Central Standard Time (Mexico)",
"America/Barbados": "SA Western Standard Time",
"America/Belem": "SA Eastern Standard Time",
"America/Belize": "Central America Standard Time",
"America/Blanc-Sablon": "SA Western Standard Time",
"America/Boa_Vista": "SA Western Standard Time",
"America/Bogota": "SA Pacific Standard Time",
"America/Boise": "Mountain Standard Time",
"America/Buenos_Aires": "Argentina Standard Time",
"America/Cambridge_Bay": "Mountain Standard Time",
"America/Campo_Grande": "Central Brazilian Standard Time",
"America/Cancun": "Eastern Standard Time (Mexico)",
"America/Caracas": "Venezuela Standard Time",
"America/Catamarca": "Argentina Standard Time",
"America/Cayenne": "SA Eastern Standard Time",
"America/Cayman": "SA Pacific Standard Time",
"America/Chicago": "Central Standard Time",
"America/Chihuahua": "Mountain Standard Time (Mexico)",
"America/Coral_Harbour": "SA Pacific Standard Time",
"America/Cordoba": "Argentina Standard Time",
"America/Costa_Rica": "Central America Standard Time",
"America/Creston": "US Mountain Standard Time",
"America/Cuiaba": "Central Brazilian Standard Time",
"America/Curacao": "SA Western Standard Time",
"America/Danmarkshavn": "UTC",
"America/Dawson": "Pacific Standard Time",
"America/Dawson_Creek": "US Mountain Standard Time",
"America/Denver": "Mountain Standard Time",
"America/Detroit": "Eastern Standard Time",
"America/Dominica": "SA Western Standard Time",
"America/Edmonton": "Mountain Standard Time",
"America/Eirunepe": "SA Pacific Standard Time",
"America/El_Salvador": "Central America Standard Time",
"America/Ensenada": "Pacific Standard Time (Mexico)",
"America/Fort_Nelson": "US Mountain Standard Time",
"America/Fort_Wayne": "US Eastern Standard Time",
"America/Fortaleza": "SA Eastern Standard Time",
"America/Glace_Bay": "Atlantic Standard Time",
"America/Godthab": "Greenland Standard Time",
"America/Goose_Bay": "Atlantic Standard Time",
"America/Grand_Turk": "Turks And Caicos Standard Time",
"America/Grenada": "SA Western Standard Time",
"America/Guadeloupe": "SA Western Standard Time",
"America/Guatemala": "Central America Standard Time",
"America/Guayaquil": "SA Pacific Standard Time",
"America/Guyana": "SA Western Standard Time",
"America/Halifax": "Atlantic Standard Time",
"America/Havana": "Cuba Standard Time",
"America/Hermosillo": "US Mountain Standard Time",
"America/Indiana/Indianapolis": "US Eastern Standard Time",
"America/Indiana/Knox": "Central Standard Time",
"America/Indiana/Marengo": "US Eastern Standard Time",
"America/Indiana/Petersburg": "Eastern Standard Time",
"America/Indiana/Tell_City": "Central Standard Time",
"America/Indiana/Vevay": "US Eastern Standard Time",
"America/Indiana/Vincennes": "Eastern Standard Time",
"America/Indiana/Winamac": "Eastern Standard Time",
"America/Indianapolis": "US Eastern Standard Time",
"America/Inuvik": "Mountain Standard Time",
"America/Iqaluit": "Eastern Standard Time",
"America/Jamaica": "SA Pacific Standard Time",
"America/Jujuy": "Argentina Standard Time",
"America/Juneau": "Alaskan Standard Time",
"America/Kentucky/Louisville": "Eastern Standard Time",
"America/Kentucky/Monticello": "Eastern Standard Time",
"America/Knox_IN": "Central Standard Time",
"America/Kralendijk": "SA Western Standard Time",
"America/La_Paz": "SA Western Standard Time",
"America/Lima": "SA Pacific Standard Time",
"America/Los_Angeles": "Pacific Standard Time",
"America/Louisville": "Eastern Standard Time",
"America/Lower_Princes": "SA Western Standard Time",
"America/Maceio": "SA Eastern Standard Time",
"America/Managua": "Central America Standard Time",
"America/Manaus": "SA Western Standard Time",
"America/Marigot": "SA Western Standard Time",
"America/Martinique": "SA Western Standard Time",
"America/Matamoros": "Central Standard Time",
"America/Mazatlan": "Mountain Standard Time (Mexico)",
"America/Mendoza": "Argentina Standard Time",
"America/Menominee": "Central Standard Time",
"America/Merida": "Central Standard Time (Mexico)",
"America/Metlakatla": "Alaskan Standard Time",
"America/Mexico_City": "Central Standard Time (Mexico)",
"America/Miquelon": "Saint Pierre Standard Time",
"America/Moncton": "Atlantic Standard Time",
"America/Monterrey": "Central Standard Time (Mexico)",
"America/Montevideo": "Montevideo Standard Time",
"America/Montreal": "Eastern Standard Time",
"America/Montserrat": "SA Western Standard Time",
"America/Nassau": "Eastern Standard Time",
"America/New_York": "Eastern Standard Time",
"America/Nipigon": "Eastern Standard Time",
"America/Nome": "Alaskan Standard Time",
"America/Noronha": "UTC-02",
"America/North_Dakota/Beulah": "Central Standard Time",
"America/North_Dakota/Center": "Central Standard Time",
"America/North_Dakota/New_Salem": "Central Standard Time",
"America/Ojinaga": "Mountain Standard Time",
"America/Panama": "SA Pacific Standard Time",
"America/Pangnirtung": "Eastern Standard Time",
"America/Paramaribo": "SA Eastern Standard Time",
"America/Phoenix": "US Mountain Standard Time",
"America/Port-au-Prince": "Haiti Standard Time",
"America/Port_of_Spain": "SA Western Standard Time",
"America/Porto_Acre": "SA Pacific Standard Time",
"America/Porto_Velho": "SA Western Standard Time",
"America/Puerto_Rico": "SA Western Standard Time",
"America/Punta_Arenas": "Magallanes Standard Time",
"America/Rainy_River": "Central Standard Time",
"America/Rankin_Inlet": "Central Standard Time",
"America/Recife": "SA Eastern Standard Time",
"America/Regina": "Canada Central Standard Time",
"America/Resolute": "Central Standard Time",
"America/Rio_Branco": "SA Pacific Standard Time",
"America/Rosario": "Argentina Standard Time",
"America/Santa_Isabel": "Pacific Standard Time (Mexico)",
"America/Santarem": "SA Eastern Standard Time",
"America/Santiago": "Pacific SA Standard Time",
"America/Santo_Domingo": "SA Western Standard Time",
"America/Sao_Paulo": "E. South America Standard Time",
"America/Scoresbysund": "Azores Standard Time",
"America/Shiprock": "Mountain Standard Time",
"America/Sitka": "Alaskan Standard Time",
"America/St_Barthelemy": "SA Western Standard Time",
"America/St_Johns": "Newfoundland Standard Time",
"America/St_Kitts": "SA Western Standard Time",
"America/St_Lucia": "SA Western Standard Time",
"America/St_Thomas": "SA Western Standard Time",
"America/St_Vincent": "SA Western Standard Time",
"America/Swift_Current": "Canada Central Standard Time",
"America/Tegucigalpa": "Central America Standard Time",
"America/Thule": "Atlantic Standard Time",
"America/Thunder_Bay": "Eastern Standard Time",
"America/Tijuana": "Pacific Standard Time (Mexico)",
"America/Toronto": "Eastern Standard Time",
"America/Tortola": "SA Western Standard Time",
"America/Vancouver": "Pacific Standard Time",
"America/Virgin": "SA Western Standard Time",
"America/Whitehorse": "Pacific Standard Time",
"America/Winnipeg": "Central Standard Time",
"America/Yakutat": "Alaskan Standard Time",
"America/Yellowknife": "Mountain Standard Time",
"Antarctica/Casey": "W. Australia Standard Time",
"Antarctica/Davis": "SE Asia Standard Time",
"Antarctica/DumontDUrville": "West Pacific Standard Time",
"Antarctica/Macquarie": "Central Pacific Standard Time",
"Antarctica/Mawson": "West Asia Standard Time",
"Antarctica/McMurdo": "New Zealand Standard Time",
"Antarctica/Palmer": "Magallanes Standard Time",
"Antarctica/Rothera": "SA Eastern Standard Time",
"Antarctica/South_Pole": "New Zealand Standard Time",
"Antarctica/Syowa": "E. Africa Standard Time",
"Antarctica/Vostok": "Central Asia Standard Time",
"Arctic/Longyearbyen": "W. Europe Standard Time",
"Asia/Aden": "Arab Standard Time",
"Asia/Almaty": "Central Asia Standard Time",
"Asia/Amman": "Jordan Standard Time",
"Asia/Anadyr": "Russia Time Zone 11",
"Asia/Aqtau": "West Asia Standard Time",
"Asia/Aqtobe": "West Asia Standard Time",
"Asia/Ashgabat": "West Asia Standard Time",
"Asia/Ashkhabad": "West Asia Standard Time",
"Asia/Atyrau": "West Asia Standard Time",
"Asia/Baghdad": "Arabic Standard Time",
"Asia/Bahrain": "Arab Standard Time",
"Asia/Baku": "Azerbaijan Standard Time",
"Asia/Bangkok": "SE Asia Standard Time",
"Asia/Barnaul": "Altai Standard Time",
"Asia/Beirut": "Middle East Standard Time",
"Asia/Bishkek": "Central Asia Standard Time",
"Asia/Brunei": "Singapore Standard Time",
"Asia/Calcutta": "India Standard Time",
"Asia/Chita": "Transbaikal Standard Time",
"Asia/Choibalsan": "Ulaanbaatar Standard Time",
"Asia/Chongqing": "China Standard Time",
"Asia/Chungking": "China Standard Time",
"Asia/Colombo": "Sri Lanka Standard Time",
"Asia/Dacca": "Bangladesh Standard Time",
"Asia/Damascus": "Syria Standard Time",
"Asia/Dhaka": "Bangladesh Standard Time",
"Asia/Dili": "Tokyo Standard Time",
"Asia/Dubai": "Arabian Standard Time",
"Asia/Dushanbe": "West Asia Standard Time",
"Asia/Famagusta": "GTB Standard Time",
"Asia/Gaza": "West Bank Standard Time",
"Asia/Harbin": "China Standard Time",
"Asia/Hebron": "West Bank Standard Time",
"Asia/Ho_Chi_Minh": "SE Asia Standard Time",
"Asia/Hong_Kong": "China Standard Time",
"Asia/Hovd": "W. Mongolia Standard Time",
"Asia/Irkutsk": "North Asia East Standard Time",
"Asia/Istanbul": "Turkey Standard Time",
"Asia/Jakarta": "SE Asia Standard Time",
"Asia/Jayapura": "Tokyo Standard Time",
"Asia/Jerusalem": "Israel Standard Time",
"Asia/Kabul": "Afghanistan Standard Time",
"Asia/Kamchatka": "Kamchatka Standard Time",
"Asia/Karachi": "Pakistan Standard Time",
"Asia/Kashgar": "Central Asia Standard Time",
"Asia/Kathmandu": "Nepal Standard Time",
"Asia/Katmandu": "Nepal Standard Time",
"Asia/Khandyga": "Yakutsk Standard Time",
"Asia/Kolkata": "India Standard Time",
"Asia/Krasnoyarsk": "North Asia Standard Time",
"Asia/Kuala_Lumpur": "Singapore Standard Time",
"Asia/Kuching": "Singapore Standard Time",
"Asia/Kuwait": "Arab Standard Time",
"Asia/Macao": "China Standard Time",
"Asia/Macau": "China Standard Time",
"Asia/Magadan": "Magadan Standard Time",
"Asia/Makassar": "Singapore Standard Time",
"Asia/Manila": "Singapore Standard Time",
"Asia/Muscat": "Arabian Standard Time",
"Asia/Nicosia": "GTB Standard Time",
"Asia/Novokuznetsk": "North Asia Standard Time",
"Asia/Novosibirsk": "N. Central Asia Standard Time",
"Asia/Omsk": "Omsk Standard Time",
"Asia/Oral": "West Asia Standard Time",
"Asia/Phnom_Penh": "SE Asia Standard Time",
"Asia/Pontianak": "SE Asia Standard Time",
"Asia/Pyongyang": "North Korea Standard Time",
"Asia/Qatar": "Arab Standard Time",
"Asia/Qostanay": "Central Asia Standard Time",
"Asia/Qyzylorda": "Qyzylorda Standard Time",
"Asia/Rangoon": "Myanmar Standard Time",
"Asia/Riyadh": "Arab Standard Time",
"Asia/Saigon": "SE Asia Standard Time",
"Asia/Sakhalin": "Sakhalin Standard Time",
"Asia/Samarkand": "West Asia Standard Time",
"Asia/Seoul": "Korea Standard Time",
"Asia/Shanghai": "China Standard Time",
"Asia/Singapore": "Singapore Standard Time",
"Asia/Srednekolymsk": "Russia Time Zone 10",
"Asia/Taipei": "Taipei Standard Time",
"Asia/Tashkent": "West Asia Standard Time",
"Asia/Tbilisi": "Georgian Standard Time",
"Asia/Tehran": "Iran Standard Time",
"Asia/Tel_Aviv": "Israel Standard Time",
"Asia/Thimbu": "Bangladesh Standard Time",
"Asia/Thimphu": "Bangladesh Standard Time",
"Asia/Tokyo": "Tokyo Standard Time",
"Asia/Tomsk": "Tomsk Standard Time",
"Asia/Ujung_Pandang": "Singapore Standard Time",
"Asia/Ulaanbaatar": "Ulaanbaatar Standard Time",
"Asia/Ulan_Bator": "Ulaanbaatar Standard Time",
"Asia/Urumqi": "Central Asia Standard Time",
"Asia/Ust-Nera": "Vladivostok Standard Time",
"Asia/Vientiane": "SE Asia Standard Time",
"Asia/Vladivostok": "Vladivostok Standard Time",
"Asia/Yakutsk": "Yakutsk Standard Time",
"Asia/Yangon": "Myanmar Standard Time",
"Asia/Yekaterinburg": "Ekaterinburg Standard Time",
"Asia/Yerevan": "Caucasus Standard Time",
"Atlantic/Azores": "Azores Standard Time",
"Atlantic/Bermuda": "Atlantic Standard Time",
"Atlantic/Canary": "GMT Standard Time",
"Atlantic/Cape_Verde": "Cape Verde Standard Time",
"Atlantic/Faeroe": "GMT Standard Time",
"Atlantic/Faroe": "GMT Standard Time",
"Atlantic/Jan_Mayen": "W. Europe Standard Time",
"Atlantic/Madeira": "GMT Standard Time",
"Atlantic/Reykjavik": "Greenwich Standard Time",
"Atlantic/South_Georgia": "UTC-02",
"Atlantic/St_Helena": "Greenwich Standard Time",
"Atlantic/Stanley": "SA Eastern Standard Time",
"Australia/ACT": "AUS Eastern Standard Time",
"Australia/Adelaide": "Cen. Australia Standard Time",
"Australia/Brisbane": "E. Australia Standard Time",
"Australia/Broken_Hill": "Cen. Australia Standard Time",
"Australia/Canberra": "AUS Eastern Standard Time",
"Australia/Currie": "Tasmania Standard Time",
"Australia/Darwin": "AUS Central Standard Time",
"Australia/Eucla": "Aus Central W. Standard Time",
"Australia/Hobart": "Tasmania Standard Time",
"Australia/LHI": "Lord Howe Standard Time",
"Australia/Lindeman": "E. Australia Standard Time",
"Australia/Lord_Howe": "Lord Howe Standard Time",
"Australia/Melbourne": "AUS Eastern Standard Time",
"Australia/NSW": "AUS Eastern Standard Time",
"Australia/North": "AUS Central Standard Time",
"Australia/Perth": "W. Australia Standard Time",
"Australia/Queensland": "E. Australia Standard Time",
"Australia/South": "Cen. Australia Standard Time",
"Australia/Sydney": "AUS Eastern Standard Time",
"Australia/Tasmania": "Tasmania Standard Time",
"Australia/Victoria": "AUS Eastern Standard Time",
"Australia/West": "W. Australia Standard Time",
"Australia/Yancowinna": "Cen. Australia Standard Time",
"Brazil/Acre": "SA Pacific Standard Time",
"Brazil/DeNoronha": "UTC-02",
"Brazil/East": "E. South America Standard Time",
"Brazil/West": "SA Western Standard Time",
"CET": "Romance Standard Time",
"CST6CDT": "Central Standard Time",
"Canada/Atlantic": "Atlantic Standard Time",
"Canada/Central": "Central Standard Time",
"Canada/East-Saskatchewan": "Canada Central Standard Time",
"Canada/Eastern": "Eastern Standard Time",
"Canada/Mountain": "Mountain Standard Time",
"Canada/Newfoundland": "Newfoundland Standard Time",
"Canada/Pacific": "Pacific Standard Time",
"Canada/Saskatchewan": "Canada Central Standard Time",
"Canada/Yukon": "Pacific Standard Time",
"Chile/Continental": "Pacific SA Standard Time",
"Chile/EasterIsland": "Easter Island Standard Time",
"Cuba": "Cuba Standard Time",
"EET": "GTB Standard Time",
"EST": "SA Pacific Standard Time",
"EST5EDT": "Eastern Standard Time",
"Egypt": "Egypt Standard Time",
"Eire": "GMT Standard Time",
"Etc/GMT": "UTC",
"Etc/GMT+0": "UTC",
"Etc/GMT+1": "Cape Verde Standard Time",
"Etc/GMT+10": "Hawaiian Standard Time",
"Etc/GMT+11": "UTC-11",
"Etc/GMT+12": "Dateline Standard Time",
"Etc/GMT+2": "Mid-Atlantic Standard Time",
"Etc/GMT+3": "SA Eastern Standard Time",
"Etc/GMT+4": "SA Western Standard Time",
"Etc/GMT+5": "SA Pacific Standard Time",
"Etc/GMT+6": "Central America Standard Time",
"Etc/GMT+7": "US Mountain Standard Time",
"Etc/GMT+8": "UTC-08",
"Etc/GMT+9": "UTC-09",
"Etc/GMT-0": "UTC",
"Etc/GMT-1": "W. Central Africa Standard Time",
"Etc/GMT-10": "West Pacific Standard Time",
"Etc/GMT-11": "Central Pacific Standard Time",
"Etc/GMT-12": "UTC+12",
"Etc/GMT-13": "UTC+13",
"Etc/GMT-14": "Line Islands Standard Time",
"Etc/GMT-2": "South Africa Standard Time",
"Etc/GMT-3": "E. Africa Standard Time",
"Etc/GMT-4": "Arabian Standard Time",
"Etc/GMT-5": "West Asia Standard Time",
"Etc/GMT-6": "Central Asia Standard Time",
"Etc/GMT-7": "SE Asia Standard Time",
"Etc/GMT-8": "Singapore Standard Time",
"Etc/GMT-9": "Tokyo Standard Time",
"Etc/GMT0": "UTC",
"Etc/Greenwich": "UTC",
"Etc/UCT": "UTC",
"Etc/UTC": "UTC",
"Etc/Universal": "UTC",
"Etc/Zulu": "UTC",
"Europe/Amsterdam": "W. Europe Standard Time",
"Europe/Andorra": "W. Europe Standard Time",
"Europe/Astrakhan": "Astrakhan Standard Time",
"Europe/Athens": "GTB Standard Time",
"Europe/Belfast": "GMT Standard Time",
"Europe/Belgrade": "Central European Standard Time",
"Europe/Berlin": "W. Europe Standard Time",
"Europe/Bratislava": "Central Europe Standard Time",
"Europe/Brussels": "Romance Standard Time",
"Europe/Bucharest": "GTB Standard Time",
"Europe/Budapest": "Central Europe Standard Time",
"Europe/Busingen": "W. Europe Standard Time",
"Europe/Chisinau": "E. Europe Standard Time",
"Europe/Copenhagen": "Romance Standard Time",
"Europe/Dublin": "GMT Standard Time",
"Europe/Gibraltar": "W. Europe Standard Time",
"Europe/Guernsey": "GMT Standard Time",
"Europe/Helsinki": "FLE Standard Time",
"Europe/Isle_of_Man": "GMT Standard Time",
"Europe/Istanbul": "Turkey Standard Time",
"Europe/Jersey": "GMT Standard Time",
"Europe/Kaliningrad": "Kaliningrad Standard Time",
"Europe/Kiev": "FLE Standard Time",
"Europe/Kirov": "Russian Standard Time",
"Europe/Lisbon": "GMT Standard Time",
"Europe/Ljubljana": "Central European Standard Time",
"Europe/London": "GMT Standard Time",
"Europe/Luxembourg": "W. Europe Standard Time",
"Europe/Madrid": "Romance Standard Time",
"Europe/Malta": "W. Europe Standard Time",
"Europe/Mariehamn": "FLE Standard Time",
"Europe/Minsk": "Belarus Standard Time",
"Europe/Monaco": "W. Europe Standard Time",
"Europe/Moscow": "Russian Standard Time",
"Europe/Nicosia": "GTB Standard Time",
"Europe/Oslo": "W. Europe Standard Time",
"Europe/Paris": "Romance Standard Time",
"Europe/Podgorica": "Central European Standard Time",
"Europe/Prague": "Central Europe Standard Time",
"Europe/Riga": "FLE Standard Time",
"Europe/Rome": "W. Europe Standard Time",
"Europe/Samara": "Russia Time Zone 3",
"Europe/San_Marino": "W. Europe Standard Time",
"Europe/Sarajevo": "Central European Standard Time",
"Europe/Saratov": "Saratov Standard Time",
"Europe/Simferopol": "Russian Standard Time",
"Europe/Skopje": "Central European Standard Time",
"Europe/Sofia": "FLE Standard Time",
"Europe/Stockholm": "W. Europe Standard Time",
"Europe/Tallinn": "FLE Standard Time",
"Europe/Tirane": "Central Europe Standard Time",
"Europe/Tiraspol": "E. Europe Standard Time",
"Europe/Ulyanovsk": "Astrakhan Standard Time",
"Europe/Uzhgorod": "FLE Standard Time",
"Europe/Vaduz": "W. Europe Standard Time",
"Europe/Vatican": "W. Europe Standard Time",
"Europe/Vienna": "W. Europe Standard Time",
"Europe/Vilnius": "FLE Standard Time",
"Europe/Volgograd": "Volgograd Standard Time",
"Europe/Warsaw": "Central European Standard Time",
"Europe/Zagreb": "Central European Standard Time",
"Europe/Zaporozhye": "FLE Standard Time",
"Europe/Zurich": "W. Europe Standard Time",
"GB": "GMT Standard Time",
"GB-Eire": "GMT Standard Time",
"GMT": "UTC",
"GMT+0": "UTC",
"GMT-0": "UTC",
"GMT0": "UTC",
"Greenwich": "UTC",
"HST": "Hawaiian Standard Time",
"Hongkong": "China Standard Time",
"Iceland": "Greenwich Standard Time",
"Indian/Antananarivo": "E. Africa Standard Time",
"Indian/Chagos": "Central Asia Standard Time",
"Indian/Christmas": "SE Asia Standard Time",
"Indian/Cocos": "Myanmar Standard Time",
"Indian/Comoro": "E. Africa Standard Time",
"Indian/Kerguelen": "West Asia Standard Time",
"Indian/Mahe": "Mauritius Standard Time",
"Indian/Maldives": "West Asia Standard Time",
"Indian/Mauritius": "Mauritius Standard Time",
"Indian/Mayotte": "E. Africa Standard Time",
"Indian/Reunion": "Mauritius Standard Time",
"Iran": "Iran Standard Time",
"Israel": "Israel Standard Time",
"Jamaica": "SA Pacific Standard Time",
"Japan": "Tokyo Standard Time",
"Kwajalein": "UTC+12",
"Libya": "Libya Standard Time",
"MET": "W. Europe Standard Time",
"MST": "US Mountain Standard Time",
"MST7MDT": "Mountain Standard Time",
"Mexico/BajaNorte": "Pacific Standard Time (Mexico)",
"Mexico/BajaSur": "Mountain Standard Time (Mexico)",
"Mexico/General": "Central Standard Time (Mexico)",
"NZ": "New Zealand Standard Time",
"NZ-CHAT": "Chatham Islands Standard Time",
"Navajo": "Mountain Standard Time",
"PRC": "China Standard Time",
"PST8PDT": "Pacific Standard Time",
"Pacific/Apia": "Samoa Standard Time",
"Pacific/Auckland": "New Zealand Standard Time",
"Pacific/Bougainville": "Bougainville Standard Time",
"Pacific/Chatham": "Chatham Islands Standard Time",
"Pacific/Chuuk": "West Pacific Standard Time",
"Pacific/Easter": "Easter Island Standard Time",
"Pacific/Efate": "Central Pacific Standard Time",
"Pacific/Enderbury": "UTC+13",
"Pacific/Fakaofo": "UTC+13",
"Pacific/Fiji": "Fiji Standard Time",
"Pacific/Funafuti": "UTC+12",
"Pacific/Galapagos": "Central America Standard Time",
"Pacific/Gambier": "UTC-09",
"Pacific/Guadalcanal": "Central Pacific Standard Time",
"Pacific/Guam": "West Pacific Standard Time",
"Pacific/Honolulu": "Hawaiian Standard Time",
"Pacific/Johnston": "Hawaiian Standard Time",
"Pacific/Kiritimati": "Line Islands Standard Time",
"Pacific/Kosrae": "Central Pacific Standard Time",
"Pacific/Kwajalein": "UTC+12",
"Pacific/Majuro": "UTC+12",
"Pacific/Marquesas": "Marquesas Standard Time",
"Pacific/Midway": "UTC-11",
"Pacific/Nauru": "UTC+12",
"Pacific/Niue": "UTC-11",
"Pacific/Norfolk": "Norfolk Standard Time",
"Pacific/Noumea": "Central Pacific Standard Time",
"Pacific/Pago_Pago": "UTC-11",
"Pacific/Palau": "Tokyo Standard Time",
"Pacific/Pitcairn": "UTC-08",
"Pacific/Pohnpei": "Central Pacific Standard Time",
"Pacific/Ponape": "Central Pacific Standard Time",
"Pacific/Port_Moresby": "West Pacific Standard Time",
"Pacific/Rarotonga": "Hawaiian Standard Time",
"Pacific/Saipan": "West Pacific Standard Time",
"Pacific/Samoa": "UTC-11",
"Pacific/Tahiti": "Hawaiian Standard Time",
"Pacific/Tarawa": "UTC+12",
"Pacific/Tongatapu": "Tonga Standard Time",
"Pacific/Truk": "West Pacific Standard Time",
"Pacific/Wake": "UTC+12",
"Pacific/Wallis": "UTC+12",
"Pacific/Yap": "West Pacific Standard Time",
"Poland": "Central European Standard Time",
"Portugal": "GMT Standard Time",
"ROC": "Taipei Standard Time",
"ROK": "Korea Standard Time",
"Singapore": "Singapore Standard Time",
"Turkey": "Turkey Standard Time",
"UCT": "UTC",
"US/Alaska": "Alaskan Standard Time",
"US/Aleutian": "Aleutian Standard Time",
"US/Arizona": "US Mountain Standard Time",
"US/Central": "Central Standard Time",
"US/East-Indiana": "US Eastern Standard Time",
"US/Eastern": "Eastern Standard Time",
"US/Hawaii": "Hawaiian Standard Time",
"US/Indiana-Starke": "Central Standard Time",
"US/Michigan": "Eastern Standard Time",
"US/Mountain": "Mountain Standard Time",
"US/Pacific": "Pacific Standard Time",
"US/Pacific-New": "Pacific Standard Time",
"US/Samoa": "UTC-11",
"UTC": "UTC",
"Universal": "UTC",
"W-SU": "Russian Standard Time",
"WET": "GMT Standard Time",
"Zulu": "UTC"
}
# when converting to Iana, only consider for win UTC the value Iana UTC
WIN_TO_IANA = {v: k for k, v in IANA_TO_WIN.items() if v != 'UTC' or (v == 'UTC' and k == 'UTC')}
def get_iana_tz(windows_tz):
""" Returns a valid pytz TimeZone (Iana/Olson Timezones) from a given
windows TimeZone
:param windows_tz: windows format timezone usually returned by
microsoft api response
:return:
:rtype:
"""
timezone = WIN_TO_IANA.get(windows_tz)
if timezone is None:
# Nope, that didn't work. Try adding "Standard Time",
# it seems to work a lot of times:
timezone = WIN_TO_IANA.get(windows_tz + ' Standard Time')
# Return what we have.
if timezone is None:
raise pytz.UnknownTimeZoneError(
"Can't find Windows TimeZone " + windows_tz)
return timezone
def get_windows_tz(iana_tz):
""" Returns a valid windows TimeZone from a given pytz TimeZone
(Iana/Olson Timezones)
Note: Windows Timezones are SHIT!... no ... really THEY ARE
HOLY FUCKING SHIT!.
"""
timezone = IANA_TO_WIN.get(
iana_tz.zone if isinstance(iana_tz, tzinfo) else iana_tz)
if timezone is None:
raise pytz.UnknownTimeZoneError(
"Can't find Iana TimeZone " + iana_tz.zone)
return timezone
| 46.488372
| 97
| 0.675971
|
from datetime import tzinfo
import pytz
IANA_TO_WIN = {
"Africa/Abidjan": "Greenwich Standard Time",
"Africa/Accra": "Greenwich Standard Time",
"Africa/Addis_Ababa": "E. Africa Standard Time",
"Africa/Algiers": "W. Central Africa Standard Time",
"Africa/Asmara": "E. Africa Standard Time",
"Africa/Asmera": "E. Africa Standard Time",
"Africa/Bamako": "Greenwich Standard Time",
"Africa/Bangui": "W. Central Africa Standard Time",
"Africa/Banjul": "Greenwich Standard Time",
"Africa/Bissau": "Greenwich Standard Time",
"Africa/Blantyre": "South Africa Standard Time",
"Africa/Brazzaville": "W. Central Africa Standard Time",
"Africa/Bujumbura": "South Africa Standard Time",
"Africa/Cairo": "Egypt Standard Time",
"Africa/Casablanca": "Morocco Standard Time",
"Africa/Ceuta": "Romance Standard Time",
"Africa/Conakry": "Greenwich Standard Time",
"Africa/Dakar": "Greenwich Standard Time",
"Africa/Dar_es_Salaam": "E. Africa Standard Time",
"Africa/Djibouti": "E. Africa Standard Time",
"Africa/Douala": "W. Central Africa Standard Time",
"Africa/El_Aaiun": "Morocco Standard Time",
"Africa/Freetown": "Greenwich Standard Time",
"Africa/Gaborone": "South Africa Standard Time",
"Africa/Harare": "South Africa Standard Time",
"Africa/Johannesburg": "South Africa Standard Time",
"Africa/Juba": "E. Africa Standard Time",
"Africa/Kampala": "E. Africa Standard Time",
"Africa/Khartoum": "Sudan Standard Time",
"Africa/Kigali": "South Africa Standard Time",
"Africa/Kinshasa": "W. Central Africa Standard Time",
"Africa/Lagos": "W. Central Africa Standard Time",
"Africa/Libreville": "W. Central Africa Standard Time",
"Africa/Lome": "Greenwich Standard Time",
"Africa/Luanda": "W. Central Africa Standard Time",
"Africa/Lubumbashi": "South Africa Standard Time",
"Africa/Lusaka": "South Africa Standard Time",
"Africa/Malabo": "W. Central Africa Standard Time",
"Africa/Maputo": "South Africa Standard Time",
"Africa/Maseru": "South Africa Standard Time",
"Africa/Mbabane": "South Africa Standard Time",
"Africa/Mogadishu": "E. Africa Standard Time",
"Africa/Monrovia": "Greenwich Standard Time",
"Africa/Nairobi": "E. Africa Standard Time",
"Africa/Ndjamena": "W. Central Africa Standard Time",
"Africa/Niamey": "W. Central Africa Standard Time",
"Africa/Nouakchott": "Greenwich Standard Time",
"Africa/Ouagadougou": "Greenwich Standard Time",
"Africa/Porto-Novo": "W. Central Africa Standard Time",
"Africa/Sao_Tome": "Sao Tome Standard Time",
"Africa/Timbuktu": "Greenwich Standard Time",
"Africa/Tripoli": "Libya Standard Time",
"Africa/Tunis": "W. Central Africa Standard Time",
"Africa/Windhoek": "Namibia Standard Time",
"America/Adak": "Aleutian Standard Time",
"America/Anchorage": "Alaskan Standard Time",
"America/Anguilla": "SA Western Standard Time",
"America/Antigua": "SA Western Standard Time",
"America/Araguaina": "Tocantins Standard Time",
"America/Argentina/Buenos_Aires": "Argentina Standard Time",
"America/Argentina/Catamarca": "Argentina Standard Time",
"America/Argentina/ComodRivadavia": "Argentina Standard Time",
"America/Argentina/Cordoba": "Argentina Standard Time",
"America/Argentina/Jujuy": "Argentina Standard Time",
"America/Argentina/La_Rioja": "Argentina Standard Time",
"America/Argentina/Mendoza": "Argentina Standard Time",
"America/Argentina/Rio_Gallegos": "Argentina Standard Time",
"America/Argentina/Salta": "Argentina Standard Time",
"America/Argentina/San_Juan": "Argentina Standard Time",
"America/Argentina/San_Luis": "Argentina Standard Time",
"America/Argentina/Tucuman": "Argentina Standard Time",
"America/Argentina/Ushuaia": "Argentina Standard Time",
"America/Aruba": "SA Western Standard Time",
"America/Asuncion": "Paraguay Standard Time",
"America/Atikokan": "SA Pacific Standard Time",
"America/Atka": "Aleutian Standard Time",
"America/Bahia": "Bahia Standard Time",
"America/Bahia_Banderas": "Central Standard Time (Mexico)",
"America/Barbados": "SA Western Standard Time",
"America/Belem": "SA Eastern Standard Time",
"America/Belize": "Central America Standard Time",
"America/Blanc-Sablon": "SA Western Standard Time",
"America/Boa_Vista": "SA Western Standard Time",
"America/Bogota": "SA Pacific Standard Time",
"America/Boise": "Mountain Standard Time",
"America/Buenos_Aires": "Argentina Standard Time",
"America/Cambridge_Bay": "Mountain Standard Time",
"America/Campo_Grande": "Central Brazilian Standard Time",
"America/Cancun": "Eastern Standard Time (Mexico)",
"America/Caracas": "Venezuela Standard Time",
"America/Catamarca": "Argentina Standard Time",
"America/Cayenne": "SA Eastern Standard Time",
"America/Cayman": "SA Pacific Standard Time",
"America/Chicago": "Central Standard Time",
"America/Chihuahua": "Mountain Standard Time (Mexico)",
"America/Coral_Harbour": "SA Pacific Standard Time",
"America/Cordoba": "Argentina Standard Time",
"America/Costa_Rica": "Central America Standard Time",
"America/Creston": "US Mountain Standard Time",
"America/Cuiaba": "Central Brazilian Standard Time",
"America/Curacao": "SA Western Standard Time",
"America/Danmarkshavn": "UTC",
"America/Dawson": "Pacific Standard Time",
"America/Dawson_Creek": "US Mountain Standard Time",
"America/Denver": "Mountain Standard Time",
"America/Detroit": "Eastern Standard Time",
"America/Dominica": "SA Western Standard Time",
"America/Edmonton": "Mountain Standard Time",
"America/Eirunepe": "SA Pacific Standard Time",
"America/El_Salvador": "Central America Standard Time",
"America/Ensenada": "Pacific Standard Time (Mexico)",
"America/Fort_Nelson": "US Mountain Standard Time",
"America/Fort_Wayne": "US Eastern Standard Time",
"America/Fortaleza": "SA Eastern Standard Time",
"America/Glace_Bay": "Atlantic Standard Time",
"America/Godthab": "Greenland Standard Time",
"America/Goose_Bay": "Atlantic Standard Time",
"America/Grand_Turk": "Turks And Caicos Standard Time",
"America/Grenada": "SA Western Standard Time",
"America/Guadeloupe": "SA Western Standard Time",
"America/Guatemala": "Central America Standard Time",
"America/Guayaquil": "SA Pacific Standard Time",
"America/Guyana": "SA Western Standard Time",
"America/Halifax": "Atlantic Standard Time",
"America/Havana": "Cuba Standard Time",
"America/Hermosillo": "US Mountain Standard Time",
"America/Indiana/Indianapolis": "US Eastern Standard Time",
"America/Indiana/Knox": "Central Standard Time",
"America/Indiana/Marengo": "US Eastern Standard Time",
"America/Indiana/Petersburg": "Eastern Standard Time",
"America/Indiana/Tell_City": "Central Standard Time",
"America/Indiana/Vevay": "US Eastern Standard Time",
"America/Indiana/Vincennes": "Eastern Standard Time",
"America/Indiana/Winamac": "Eastern Standard Time",
"America/Indianapolis": "US Eastern Standard Time",
"America/Inuvik": "Mountain Standard Time",
"America/Iqaluit": "Eastern Standard Time",
"America/Jamaica": "SA Pacific Standard Time",
"America/Jujuy": "Argentina Standard Time",
"America/Juneau": "Alaskan Standard Time",
"America/Kentucky/Louisville": "Eastern Standard Time",
"America/Kentucky/Monticello": "Eastern Standard Time",
"America/Knox_IN": "Central Standard Time",
"America/Kralendijk": "SA Western Standard Time",
"America/La_Paz": "SA Western Standard Time",
"America/Lima": "SA Pacific Standard Time",
"America/Los_Angeles": "Pacific Standard Time",
"America/Louisville": "Eastern Standard Time",
"America/Lower_Princes": "SA Western Standard Time",
"America/Maceio": "SA Eastern Standard Time",
"America/Managua": "Central America Standard Time",
"America/Manaus": "SA Western Standard Time",
"America/Marigot": "SA Western Standard Time",
"America/Martinique": "SA Western Standard Time",
"America/Matamoros": "Central Standard Time",
"America/Mazatlan": "Mountain Standard Time (Mexico)",
"America/Mendoza": "Argentina Standard Time",
"America/Menominee": "Central Standard Time",
"America/Merida": "Central Standard Time (Mexico)",
"America/Metlakatla": "Alaskan Standard Time",
"America/Mexico_City": "Central Standard Time (Mexico)",
"America/Miquelon": "Saint Pierre Standard Time",
"America/Moncton": "Atlantic Standard Time",
"America/Monterrey": "Central Standard Time (Mexico)",
"America/Montevideo": "Montevideo Standard Time",
"America/Montreal": "Eastern Standard Time",
"America/Montserrat": "SA Western Standard Time",
"America/Nassau": "Eastern Standard Time",
"America/New_York": "Eastern Standard Time",
"America/Nipigon": "Eastern Standard Time",
"America/Nome": "Alaskan Standard Time",
"America/Noronha": "UTC-02",
"America/North_Dakota/Beulah": "Central Standard Time",
"America/North_Dakota/Center": "Central Standard Time",
"America/North_Dakota/New_Salem": "Central Standard Time",
"America/Ojinaga": "Mountain Standard Time",
"America/Panama": "SA Pacific Standard Time",
"America/Pangnirtung": "Eastern Standard Time",
"America/Paramaribo": "SA Eastern Standard Time",
"America/Phoenix": "US Mountain Standard Time",
"America/Port-au-Prince": "Haiti Standard Time",
"America/Port_of_Spain": "SA Western Standard Time",
"America/Porto_Acre": "SA Pacific Standard Time",
"America/Porto_Velho": "SA Western Standard Time",
"America/Puerto_Rico": "SA Western Standard Time",
"America/Punta_Arenas": "Magallanes Standard Time",
"America/Rainy_River": "Central Standard Time",
"America/Rankin_Inlet": "Central Standard Time",
"America/Recife": "SA Eastern Standard Time",
"America/Regina": "Canada Central Standard Time",
"America/Resolute": "Central Standard Time",
"America/Rio_Branco": "SA Pacific Standard Time",
"America/Rosario": "Argentina Standard Time",
"America/Santa_Isabel": "Pacific Standard Time (Mexico)",
"America/Santarem": "SA Eastern Standard Time",
"America/Santiago": "Pacific SA Standard Time",
"America/Santo_Domingo": "SA Western Standard Time",
"America/Sao_Paulo": "E. South America Standard Time",
"America/Scoresbysund": "Azores Standard Time",
"America/Shiprock": "Mountain Standard Time",
"America/Sitka": "Alaskan Standard Time",
"America/St_Barthelemy": "SA Western Standard Time",
"America/St_Johns": "Newfoundland Standard Time",
"America/St_Kitts": "SA Western Standard Time",
"America/St_Lucia": "SA Western Standard Time",
"America/St_Thomas": "SA Western Standard Time",
"America/St_Vincent": "SA Western Standard Time",
"America/Swift_Current": "Canada Central Standard Time",
"America/Tegucigalpa": "Central America Standard Time",
"America/Thule": "Atlantic Standard Time",
"America/Thunder_Bay": "Eastern Standard Time",
"America/Tijuana": "Pacific Standard Time (Mexico)",
"America/Toronto": "Eastern Standard Time",
"America/Tortola": "SA Western Standard Time",
"America/Vancouver": "Pacific Standard Time",
"America/Virgin": "SA Western Standard Time",
"America/Whitehorse": "Pacific Standard Time",
"America/Winnipeg": "Central Standard Time",
"America/Yakutat": "Alaskan Standard Time",
"America/Yellowknife": "Mountain Standard Time",
"Antarctica/Casey": "W. Australia Standard Time",
"Antarctica/Davis": "SE Asia Standard Time",
"Antarctica/DumontDUrville": "West Pacific Standard Time",
"Antarctica/Macquarie": "Central Pacific Standard Time",
"Antarctica/Mawson": "West Asia Standard Time",
"Antarctica/McMurdo": "New Zealand Standard Time",
"Antarctica/Palmer": "Magallanes Standard Time",
"Antarctica/Rothera": "SA Eastern Standard Time",
"Antarctica/South_Pole": "New Zealand Standard Time",
"Antarctica/Syowa": "E. Africa Standard Time",
"Antarctica/Vostok": "Central Asia Standard Time",
"Arctic/Longyearbyen": "W. Europe Standard Time",
"Asia/Aden": "Arab Standard Time",
"Asia/Almaty": "Central Asia Standard Time",
"Asia/Amman": "Jordan Standard Time",
"Asia/Anadyr": "Russia Time Zone 11",
"Asia/Aqtau": "West Asia Standard Time",
"Asia/Aqtobe": "West Asia Standard Time",
"Asia/Ashgabat": "West Asia Standard Time",
"Asia/Ashkhabad": "West Asia Standard Time",
"Asia/Atyrau": "West Asia Standard Time",
"Asia/Baghdad": "Arabic Standard Time",
"Asia/Bahrain": "Arab Standard Time",
"Asia/Baku": "Azerbaijan Standard Time",
"Asia/Bangkok": "SE Asia Standard Time",
"Asia/Barnaul": "Altai Standard Time",
"Asia/Beirut": "Middle East Standard Time",
"Asia/Bishkek": "Central Asia Standard Time",
"Asia/Brunei": "Singapore Standard Time",
"Asia/Calcutta": "India Standard Time",
"Asia/Chita": "Transbaikal Standard Time",
"Asia/Choibalsan": "Ulaanbaatar Standard Time",
"Asia/Chongqing": "China Standard Time",
"Asia/Chungking": "China Standard Time",
"Asia/Colombo": "Sri Lanka Standard Time",
"Asia/Dacca": "Bangladesh Standard Time",
"Asia/Damascus": "Syria Standard Time",
"Asia/Dhaka": "Bangladesh Standard Time",
"Asia/Dili": "Tokyo Standard Time",
"Asia/Dubai": "Arabian Standard Time",
"Asia/Dushanbe": "West Asia Standard Time",
"Asia/Famagusta": "GTB Standard Time",
"Asia/Gaza": "West Bank Standard Time",
"Asia/Harbin": "China Standard Time",
"Asia/Hebron": "West Bank Standard Time",
"Asia/Ho_Chi_Minh": "SE Asia Standard Time",
"Asia/Hong_Kong": "China Standard Time",
"Asia/Hovd": "W. Mongolia Standard Time",
"Asia/Irkutsk": "North Asia East Standard Time",
"Asia/Istanbul": "Turkey Standard Time",
"Asia/Jakarta": "SE Asia Standard Time",
"Asia/Jayapura": "Tokyo Standard Time",
"Asia/Jerusalem": "Israel Standard Time",
"Asia/Kabul": "Afghanistan Standard Time",
"Asia/Kamchatka": "Kamchatka Standard Time",
"Asia/Karachi": "Pakistan Standard Time",
"Asia/Kashgar": "Central Asia Standard Time",
"Asia/Kathmandu": "Nepal Standard Time",
"Asia/Katmandu": "Nepal Standard Time",
"Asia/Khandyga": "Yakutsk Standard Time",
"Asia/Kolkata": "India Standard Time",
"Asia/Krasnoyarsk": "North Asia Standard Time",
"Asia/Kuala_Lumpur": "Singapore Standard Time",
"Asia/Kuching": "Singapore Standard Time",
"Asia/Kuwait": "Arab Standard Time",
"Asia/Macao": "China Standard Time",
"Asia/Macau": "China Standard Time",
"Asia/Magadan": "Magadan Standard Time",
"Asia/Makassar": "Singapore Standard Time",
"Asia/Manila": "Singapore Standard Time",
"Asia/Muscat": "Arabian Standard Time",
"Asia/Nicosia": "GTB Standard Time",
"Asia/Novokuznetsk": "North Asia Standard Time",
"Asia/Novosibirsk": "N. Central Asia Standard Time",
"Asia/Omsk": "Omsk Standard Time",
"Asia/Oral": "West Asia Standard Time",
"Asia/Phnom_Penh": "SE Asia Standard Time",
"Asia/Pontianak": "SE Asia Standard Time",
"Asia/Pyongyang": "North Korea Standard Time",
"Asia/Qatar": "Arab Standard Time",
"Asia/Qostanay": "Central Asia Standard Time",
"Asia/Qyzylorda": "Qyzylorda Standard Time",
"Asia/Rangoon": "Myanmar Standard Time",
"Asia/Riyadh": "Arab Standard Time",
"Asia/Saigon": "SE Asia Standard Time",
"Asia/Sakhalin": "Sakhalin Standard Time",
"Asia/Samarkand": "West Asia Standard Time",
"Asia/Seoul": "Korea Standard Time",
"Asia/Shanghai": "China Standard Time",
"Asia/Singapore": "Singapore Standard Time",
"Asia/Srednekolymsk": "Russia Time Zone 10",
"Asia/Taipei": "Taipei Standard Time",
"Asia/Tashkent": "West Asia Standard Time",
"Asia/Tbilisi": "Georgian Standard Time",
"Asia/Tehran": "Iran Standard Time",
"Asia/Tel_Aviv": "Israel Standard Time",
"Asia/Thimbu": "Bangladesh Standard Time",
"Asia/Thimphu": "Bangladesh Standard Time",
"Asia/Tokyo": "Tokyo Standard Time",
"Asia/Tomsk": "Tomsk Standard Time",
"Asia/Ujung_Pandang": "Singapore Standard Time",
"Asia/Ulaanbaatar": "Ulaanbaatar Standard Time",
"Asia/Ulan_Bator": "Ulaanbaatar Standard Time",
"Asia/Urumqi": "Central Asia Standard Time",
"Asia/Ust-Nera": "Vladivostok Standard Time",
"Asia/Vientiane": "SE Asia Standard Time",
"Asia/Vladivostok": "Vladivostok Standard Time",
"Asia/Yakutsk": "Yakutsk Standard Time",
"Asia/Yangon": "Myanmar Standard Time",
"Asia/Yekaterinburg": "Ekaterinburg Standard Time",
"Asia/Yerevan": "Caucasus Standard Time",
"Atlantic/Azores": "Azores Standard Time",
"Atlantic/Bermuda": "Atlantic Standard Time",
"Atlantic/Canary": "GMT Standard Time",
"Atlantic/Cape_Verde": "Cape Verde Standard Time",
"Atlantic/Faeroe": "GMT Standard Time",
"Atlantic/Faroe": "GMT Standard Time",
"Atlantic/Jan_Mayen": "W. Europe Standard Time",
"Atlantic/Madeira": "GMT Standard Time",
"Atlantic/Reykjavik": "Greenwich Standard Time",
"Atlantic/South_Georgia": "UTC-02",
"Atlantic/St_Helena": "Greenwich Standard Time",
"Atlantic/Stanley": "SA Eastern Standard Time",
"Australia/ACT": "AUS Eastern Standard Time",
"Australia/Adelaide": "Cen. Australia Standard Time",
"Australia/Brisbane": "E. Australia Standard Time",
"Australia/Broken_Hill": "Cen. Australia Standard Time",
"Australia/Canberra": "AUS Eastern Standard Time",
"Australia/Currie": "Tasmania Standard Time",
"Australia/Darwin": "AUS Central Standard Time",
"Australia/Eucla": "Aus Central W. Standard Time",
"Australia/Hobart": "Tasmania Standard Time",
"Australia/LHI": "Lord Howe Standard Time",
"Australia/Lindeman": "E. Australia Standard Time",
"Australia/Lord_Howe": "Lord Howe Standard Time",
"Australia/Melbourne": "AUS Eastern Standard Time",
"Australia/NSW": "AUS Eastern Standard Time",
"Australia/North": "AUS Central Standard Time",
"Australia/Perth": "W. Australia Standard Time",
"Australia/Queensland": "E. Australia Standard Time",
"Australia/South": "Cen. Australia Standard Time",
"Australia/Sydney": "AUS Eastern Standard Time",
"Australia/Tasmania": "Tasmania Standard Time",
"Australia/Victoria": "AUS Eastern Standard Time",
"Australia/West": "W. Australia Standard Time",
"Australia/Yancowinna": "Cen. Australia Standard Time",
"Brazil/Acre": "SA Pacific Standard Time",
"Brazil/DeNoronha": "UTC-02",
"Brazil/East": "E. South America Standard Time",
"Brazil/West": "SA Western Standard Time",
"CET": "Romance Standard Time",
"CST6CDT": "Central Standard Time",
"Canada/Atlantic": "Atlantic Standard Time",
"Canada/Central": "Central Standard Time",
"Canada/East-Saskatchewan": "Canada Central Standard Time",
"Canada/Eastern": "Eastern Standard Time",
"Canada/Mountain": "Mountain Standard Time",
"Canada/Newfoundland": "Newfoundland Standard Time",
"Canada/Pacific": "Pacific Standard Time",
"Canada/Saskatchewan": "Canada Central Standard Time",
"Canada/Yukon": "Pacific Standard Time",
"Chile/Continental": "Pacific SA Standard Time",
"Chile/EasterIsland": "Easter Island Standard Time",
"Cuba": "Cuba Standard Time",
"EET": "GTB Standard Time",
"EST": "SA Pacific Standard Time",
"EST5EDT": "Eastern Standard Time",
"Egypt": "Egypt Standard Time",
"Eire": "GMT Standard Time",
"Etc/GMT": "UTC",
"Etc/GMT+0": "UTC",
"Etc/GMT+1": "Cape Verde Standard Time",
"Etc/GMT+10": "Hawaiian Standard Time",
"Etc/GMT+11": "UTC-11",
"Etc/GMT+12": "Dateline Standard Time",
"Etc/GMT+2": "Mid-Atlantic Standard Time",
"Etc/GMT+3": "SA Eastern Standard Time",
"Etc/GMT+4": "SA Western Standard Time",
"Etc/GMT+5": "SA Pacific Standard Time",
"Etc/GMT+6": "Central America Standard Time",
"Etc/GMT+7": "US Mountain Standard Time",
"Etc/GMT+8": "UTC-08",
"Etc/GMT+9": "UTC-09",
"Etc/GMT-0": "UTC",
"Etc/GMT-1": "W. Central Africa Standard Time",
"Etc/GMT-10": "West Pacific Standard Time",
"Etc/GMT-11": "Central Pacific Standard Time",
"Etc/GMT-12": "UTC+12",
"Etc/GMT-13": "UTC+13",
"Etc/GMT-14": "Line Islands Standard Time",
"Etc/GMT-2": "South Africa Standard Time",
"Etc/GMT-3": "E. Africa Standard Time",
"Etc/GMT-4": "Arabian Standard Time",
"Etc/GMT-5": "West Asia Standard Time",
"Etc/GMT-6": "Central Asia Standard Time",
"Etc/GMT-7": "SE Asia Standard Time",
"Etc/GMT-8": "Singapore Standard Time",
"Etc/GMT-9": "Tokyo Standard Time",
"Etc/GMT0": "UTC",
"Etc/Greenwich": "UTC",
"Etc/UCT": "UTC",
"Etc/UTC": "UTC",
"Etc/Universal": "UTC",
"Etc/Zulu": "UTC",
"Europe/Amsterdam": "W. Europe Standard Time",
"Europe/Andorra": "W. Europe Standard Time",
"Europe/Astrakhan": "Astrakhan Standard Time",
"Europe/Athens": "GTB Standard Time",
"Europe/Belfast": "GMT Standard Time",
"Europe/Belgrade": "Central European Standard Time",
"Europe/Berlin": "W. Europe Standard Time",
"Europe/Bratislava": "Central Europe Standard Time",
"Europe/Brussels": "Romance Standard Time",
"Europe/Bucharest": "GTB Standard Time",
"Europe/Budapest": "Central Europe Standard Time",
"Europe/Busingen": "W. Europe Standard Time",
"Europe/Chisinau": "E. Europe Standard Time",
"Europe/Copenhagen": "Romance Standard Time",
"Europe/Dublin": "GMT Standard Time",
"Europe/Gibraltar": "W. Europe Standard Time",
"Europe/Guernsey": "GMT Standard Time",
"Europe/Helsinki": "FLE Standard Time",
"Europe/Isle_of_Man": "GMT Standard Time",
"Europe/Istanbul": "Turkey Standard Time",
"Europe/Jersey": "GMT Standard Time",
"Europe/Kaliningrad": "Kaliningrad Standard Time",
"Europe/Kiev": "FLE Standard Time",
"Europe/Kirov": "Russian Standard Time",
"Europe/Lisbon": "GMT Standard Time",
"Europe/Ljubljana": "Central European Standard Time",
"Europe/London": "GMT Standard Time",
"Europe/Luxembourg": "W. Europe Standard Time",
"Europe/Madrid": "Romance Standard Time",
"Europe/Malta": "W. Europe Standard Time",
"Europe/Mariehamn": "FLE Standard Time",
"Europe/Minsk": "Belarus Standard Time",
"Europe/Monaco": "W. Europe Standard Time",
"Europe/Moscow": "Russian Standard Time",
"Europe/Nicosia": "GTB Standard Time",
"Europe/Oslo": "W. Europe Standard Time",
"Europe/Paris": "Romance Standard Time",
"Europe/Podgorica": "Central European Standard Time",
"Europe/Prague": "Central Europe Standard Time",
"Europe/Riga": "FLE Standard Time",
"Europe/Rome": "W. Europe Standard Time",
"Europe/Samara": "Russia Time Zone 3",
"Europe/San_Marino": "W. Europe Standard Time",
"Europe/Sarajevo": "Central European Standard Time",
"Europe/Saratov": "Saratov Standard Time",
"Europe/Simferopol": "Russian Standard Time",
"Europe/Skopje": "Central European Standard Time",
"Europe/Sofia": "FLE Standard Time",
"Europe/Stockholm": "W. Europe Standard Time",
"Europe/Tallinn": "FLE Standard Time",
"Europe/Tirane": "Central Europe Standard Time",
"Europe/Tiraspol": "E. Europe Standard Time",
"Europe/Ulyanovsk": "Astrakhan Standard Time",
"Europe/Uzhgorod": "FLE Standard Time",
"Europe/Vaduz": "W. Europe Standard Time",
"Europe/Vatican": "W. Europe Standard Time",
"Europe/Vienna": "W. Europe Standard Time",
"Europe/Vilnius": "FLE Standard Time",
"Europe/Volgograd": "Volgograd Standard Time",
"Europe/Warsaw": "Central European Standard Time",
"Europe/Zagreb": "Central European Standard Time",
"Europe/Zaporozhye": "FLE Standard Time",
"Europe/Zurich": "W. Europe Standard Time",
"GB": "GMT Standard Time",
"GB-Eire": "GMT Standard Time",
"GMT": "UTC",
"GMT+0": "UTC",
"GMT-0": "UTC",
"GMT0": "UTC",
"Greenwich": "UTC",
"HST": "Hawaiian Standard Time",
"Hongkong": "China Standard Time",
"Iceland": "Greenwich Standard Time",
"Indian/Antananarivo": "E. Africa Standard Time",
"Indian/Chagos": "Central Asia Standard Time",
"Indian/Christmas": "SE Asia Standard Time",
"Indian/Cocos": "Myanmar Standard Time",
"Indian/Comoro": "E. Africa Standard Time",
"Indian/Kerguelen": "West Asia Standard Time",
"Indian/Mahe": "Mauritius Standard Time",
"Indian/Maldives": "West Asia Standard Time",
"Indian/Mauritius": "Mauritius Standard Time",
"Indian/Mayotte": "E. Africa Standard Time",
"Indian/Reunion": "Mauritius Standard Time",
"Iran": "Iran Standard Time",
"Israel": "Israel Standard Time",
"Jamaica": "SA Pacific Standard Time",
"Japan": "Tokyo Standard Time",
"Kwajalein": "UTC+12",
"Libya": "Libya Standard Time",
"MET": "W. Europe Standard Time",
"MST": "US Mountain Standard Time",
"MST7MDT": "Mountain Standard Time",
"Mexico/BajaNorte": "Pacific Standard Time (Mexico)",
"Mexico/BajaSur": "Mountain Standard Time (Mexico)",
"Mexico/General": "Central Standard Time (Mexico)",
"NZ": "New Zealand Standard Time",
"NZ-CHAT": "Chatham Islands Standard Time",
"Navajo": "Mountain Standard Time",
"PRC": "China Standard Time",
"PST8PDT": "Pacific Standard Time",
"Pacific/Apia": "Samoa Standard Time",
"Pacific/Auckland": "New Zealand Standard Time",
"Pacific/Bougainville": "Bougainville Standard Time",
"Pacific/Chatham": "Chatham Islands Standard Time",
"Pacific/Chuuk": "West Pacific Standard Time",
"Pacific/Easter": "Easter Island Standard Time",
"Pacific/Efate": "Central Pacific Standard Time",
"Pacific/Enderbury": "UTC+13",
"Pacific/Fakaofo": "UTC+13",
"Pacific/Fiji": "Fiji Standard Time",
"Pacific/Funafuti": "UTC+12",
"Pacific/Galapagos": "Central America Standard Time",
"Pacific/Gambier": "UTC-09",
"Pacific/Guadalcanal": "Central Pacific Standard Time",
"Pacific/Guam": "West Pacific Standard Time",
"Pacific/Honolulu": "Hawaiian Standard Time",
"Pacific/Johnston": "Hawaiian Standard Time",
"Pacific/Kiritimati": "Line Islands Standard Time",
"Pacific/Kosrae": "Central Pacific Standard Time",
"Pacific/Kwajalein": "UTC+12",
"Pacific/Majuro": "UTC+12",
"Pacific/Marquesas": "Marquesas Standard Time",
"Pacific/Midway": "UTC-11",
"Pacific/Nauru": "UTC+12",
"Pacific/Niue": "UTC-11",
"Pacific/Norfolk": "Norfolk Standard Time",
"Pacific/Noumea": "Central Pacific Standard Time",
"Pacific/Pago_Pago": "UTC-11",
"Pacific/Palau": "Tokyo Standard Time",
"Pacific/Pitcairn": "UTC-08",
"Pacific/Pohnpei": "Central Pacific Standard Time",
"Pacific/Ponape": "Central Pacific Standard Time",
"Pacific/Port_Moresby": "West Pacific Standard Time",
"Pacific/Rarotonga": "Hawaiian Standard Time",
"Pacific/Saipan": "West Pacific Standard Time",
"Pacific/Samoa": "UTC-11",
"Pacific/Tahiti": "Hawaiian Standard Time",
"Pacific/Tarawa": "UTC+12",
"Pacific/Tongatapu": "Tonga Standard Time",
"Pacific/Truk": "West Pacific Standard Time",
"Pacific/Wake": "UTC+12",
"Pacific/Wallis": "UTC+12",
"Pacific/Yap": "West Pacific Standard Time",
"Poland": "Central European Standard Time",
"Portugal": "GMT Standard Time",
"ROC": "Taipei Standard Time",
"ROK": "Korea Standard Time",
"Singapore": "Singapore Standard Time",
"Turkey": "Turkey Standard Time",
"UCT": "UTC",
"US/Alaska": "Alaskan Standard Time",
"US/Aleutian": "Aleutian Standard Time",
"US/Arizona": "US Mountain Standard Time",
"US/Central": "Central Standard Time",
"US/East-Indiana": "US Eastern Standard Time",
"US/Eastern": "Eastern Standard Time",
"US/Hawaii": "Hawaiian Standard Time",
"US/Indiana-Starke": "Central Standard Time",
"US/Michigan": "Eastern Standard Time",
"US/Mountain": "Mountain Standard Time",
"US/Pacific": "Pacific Standard Time",
"US/Pacific-New": "Pacific Standard Time",
"US/Samoa": "UTC-11",
"UTC": "UTC",
"Universal": "UTC",
"W-SU": "Russian Standard Time",
"WET": "GMT Standard Time",
"Zulu": "UTC"
}
WIN_TO_IANA = {v: k for k, v in IANA_TO_WIN.items() if v != 'UTC' or (v == 'UTC' and k == 'UTC')}
def get_iana_tz(windows_tz):
timezone = WIN_TO_IANA.get(windows_tz)
if timezone is None:
# it seems to work a lot of times:
timezone = WIN_TO_IANA.get(windows_tz + ' Standard Time')
# Return what we have.
if timezone is None:
raise pytz.UnknownTimeZoneError(
"Can't find Windows TimeZone " + windows_tz)
return timezone
def get_windows_tz(iana_tz):
timezone = IANA_TO_WIN.get(
iana_tz.zone if isinstance(iana_tz, tzinfo) else iana_tz)
if timezone is None:
raise pytz.UnknownTimeZoneError(
"Can't find Iana TimeZone " + iana_tz.zone)
return timezone
| true
| true
|
790a06859cb9b44945e24cea00eb5e089b3fac70
| 5,955
|
py
|
Python
|
djangoFiles/theJekyllProject/models.py
|
silvrwolfboy/theJekyllProject
|
e36aa6605e762d8b14277e636322096d19455aa9
|
[
"MIT"
] | 20
|
2017-09-29T20:00:33.000Z
|
2021-11-08T15:01:40.000Z
|
djangoFiles/theJekyllProject/models.py
|
silvrwolfboy/theJekyllProject
|
e36aa6605e762d8b14277e636322096d19455aa9
|
[
"MIT"
] | 75
|
2017-10-03T12:32:05.000Z
|
2022-01-13T00:44:15.000Z
|
djangoFiles/theJekyllProject/models.py
|
singh1114/theJekyllProject
|
e36aa6605e762d8b14277e636322096d19455aa9
|
[
"MIT"
] | 14
|
2017-12-29T12:32:08.000Z
|
2020-03-06T12:42:19.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from ckeditor.fields import RichTextField
from theJekyllProject.choices import BlogTemplates
class Contact(models.Model):
first_name = models.CharField(
max_length=200
)
last_name = models.CharField(
max_length=200,
null=True,
blank=True
)
email = models.EmailField(
max_length=200,
null=True,
blank=True
)
message = models.CharField(
max_length=5000
)
def __str__(self):
return '%s sent message %s' % (self.email, self.message)
class Repo(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE
)
repo = models.CharField(
max_length=200,
)
main = models.BooleanField(
default=False
)
template = models.CharField(
max_length=2,
choices=BlogTemplates.choices,
default=BlogTemplates.TEMPLATE_NOT_SET
)
def __str__(self):
return '%s has made %s and is %s' % (self.user, self.repo, self.main)
class CName(models.Model):
"""
CName model value is used to store the CNAME info of the repo
"""
repo = models.OneToOneField(
Repo,
on_delete=models.CASCADE,
)
c_name = models.CharField(max_length=200)
class Post(models.Model):
repo = models.ForeignKey(
Repo,
on_delete=models.CASCADE,
)
author = models.CharField(max_length=100, null=True, blank=True)
comments = models.BooleanField(default=True)
date = models.DateField(auto_now_add=True,)
time = models.TimeField(auto_now_add=True,)
layouts = (
('post', 'post'),
)
layout = models.CharField(
max_length=100,
choices=layouts,
null=True,
blank=True
)
title = models.CharField(max_length=2000)
slug = models.CharField(max_length=2000, null=True, blank=True)
content = RichTextField()
background = models.ImageField(upload_to='pictures/', null=True,
blank=True)
def __str__(self):
return '%s on %s' % (self.title, self.date)
class Page(models.Model):
repo = models.ForeignKey(
Repo,
on_delete=models.CASCADE
)
title = models.CharField(max_length=2000)
permalink = models.CharField(max_length=2000)
layout = models.CharField(max_length=2000)
description = models.CharField(
max_length=2000,
default='Description of the Page',
)
background = models.ImageField(upload_to='pictures/', null=True,
blank=True)
content = RichTextField()
class PostCategory(models.Model):
post = models.ForeignKey(Post)
category = models.CharField(max_length=200, null=True, blank=True)
class SiteData(models.Model):
repo = models.OneToOneField(
Repo,
on_delete=models.CASCADE,
primary_key=True,
)
name = models.CharField(
max_length=200,
default='Your site title',
)
description = models.CharField(
max_length=2000,
default='Description of the site',
)
avatar = models.URLField(
null=True,
blank=True
)
author = models.CharField(
max_length=2000,
default='Author of the site',
null=True,
blank=True
)
baseurl = models.CharField(
max_length=200,
default='/jekyllblog',
null=True,
blank=True
)
url = models.CharField(
max_length=200,
default='http://blog.jeklog.com',
null=True,
blank=True
)
class SiteSocialProfile(models.Model):
repo = models.OneToOneField(
Repo,
on_delete=models.CASCADE,
primary_key=True,
)
dribbble = models.CharField(
max_length=200,
null=True,
blank=True
)
email = models.EmailField(
max_length=200,
null=True,
blank=True
)
facebook = models.CharField(
max_length=200,
null=True,
blank=True
)
flickr = models.CharField(
max_length=200,
null=True,
blank=True
)
github = models.CharField(
max_length=200,
null=True,
blank=True
)
instagram = models.CharField(
max_length=200,
null=True,
blank=True
)
linkedin = models.CharField(
max_length=200,
null=True,
blank=True
)
pinterest = models.CharField(
max_length=200,
null=True,
blank=True
)
rss = models.CharField(
max_length=200,
null=True,
blank=True
)
twitter = models.CharField(
max_length=200,
null=True,
blank=True
)
stackoverflow = models.CharField(
max_length=200,
null=True,
blank=True
)
youtube = models.CharField(
max_length=200,
null=True,
blank=True
)
googleplus = models.CharField(
max_length=200,
null=True,
blank=True
)
disqus = models.CharField(
max_length=200,
null=True,
blank=True
)
google_analytics = models.CharField(
max_length=200,
null=True,
blank=True
)
class SitePlugin(models.Model):
repo = models.ForeignKey(
Repo,
on_delete=models.CASCADE,
)
plugin = models.CharField(
max_length=200,
)
class SiteExclude(models.Model):
repo = models.ForeignKey(
Repo,
on_delete=models.CASCADE,
)
exclude = models.CharField(
max_length=200,
)
class SiteTheme(models.Model):
repo = models.OneToOneField(
Repo,
on_delete=models.CASCADE,
primary_key=True,
)
theme = models.CharField(
max_length=200,
)
| 22.471698
| 77
| 0.589253
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
from ckeditor.fields import RichTextField
from theJekyllProject.choices import BlogTemplates
class Contact(models.Model):
first_name = models.CharField(
max_length=200
)
last_name = models.CharField(
max_length=200,
null=True,
blank=True
)
email = models.EmailField(
max_length=200,
null=True,
blank=True
)
message = models.CharField(
max_length=5000
)
def __str__(self):
return '%s sent message %s' % (self.email, self.message)
class Repo(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE
)
repo = models.CharField(
max_length=200,
)
main = models.BooleanField(
default=False
)
template = models.CharField(
max_length=2,
choices=BlogTemplates.choices,
default=BlogTemplates.TEMPLATE_NOT_SET
)
def __str__(self):
return '%s has made %s and is %s' % (self.user, self.repo, self.main)
class CName(models.Model):
repo = models.OneToOneField(
Repo,
on_delete=models.CASCADE,
)
c_name = models.CharField(max_length=200)
class Post(models.Model):
repo = models.ForeignKey(
Repo,
on_delete=models.CASCADE,
)
author = models.CharField(max_length=100, null=True, blank=True)
comments = models.BooleanField(default=True)
date = models.DateField(auto_now_add=True,)
time = models.TimeField(auto_now_add=True,)
layouts = (
('post', 'post'),
)
layout = models.CharField(
max_length=100,
choices=layouts,
null=True,
blank=True
)
title = models.CharField(max_length=2000)
slug = models.CharField(max_length=2000, null=True, blank=True)
content = RichTextField()
background = models.ImageField(upload_to='pictures/', null=True,
blank=True)
def __str__(self):
return '%s on %s' % (self.title, self.date)
class Page(models.Model):
repo = models.ForeignKey(
Repo,
on_delete=models.CASCADE
)
title = models.CharField(max_length=2000)
permalink = models.CharField(max_length=2000)
layout = models.CharField(max_length=2000)
description = models.CharField(
max_length=2000,
default='Description of the Page',
)
background = models.ImageField(upload_to='pictures/', null=True,
blank=True)
content = RichTextField()
class PostCategory(models.Model):
post = models.ForeignKey(Post)
category = models.CharField(max_length=200, null=True, blank=True)
class SiteData(models.Model):
repo = models.OneToOneField(
Repo,
on_delete=models.CASCADE,
primary_key=True,
)
name = models.CharField(
max_length=200,
default='Your site title',
)
description = models.CharField(
max_length=2000,
default='Description of the site',
)
avatar = models.URLField(
null=True,
blank=True
)
author = models.CharField(
max_length=2000,
default='Author of the site',
null=True,
blank=True
)
baseurl = models.CharField(
max_length=200,
default='/jekyllblog',
null=True,
blank=True
)
url = models.CharField(
max_length=200,
default='http://blog.jeklog.com',
null=True,
blank=True
)
class SiteSocialProfile(models.Model):
repo = models.OneToOneField(
Repo,
on_delete=models.CASCADE,
primary_key=True,
)
dribbble = models.CharField(
max_length=200,
null=True,
blank=True
)
email = models.EmailField(
max_length=200,
null=True,
blank=True
)
facebook = models.CharField(
max_length=200,
null=True,
blank=True
)
flickr = models.CharField(
max_length=200,
null=True,
blank=True
)
github = models.CharField(
max_length=200,
null=True,
blank=True
)
instagram = models.CharField(
max_length=200,
null=True,
blank=True
)
linkedin = models.CharField(
max_length=200,
null=True,
blank=True
)
pinterest = models.CharField(
max_length=200,
null=True,
blank=True
)
rss = models.CharField(
max_length=200,
null=True,
blank=True
)
twitter = models.CharField(
max_length=200,
null=True,
blank=True
)
stackoverflow = models.CharField(
max_length=200,
null=True,
blank=True
)
youtube = models.CharField(
max_length=200,
null=True,
blank=True
)
googleplus = models.CharField(
max_length=200,
null=True,
blank=True
)
disqus = models.CharField(
max_length=200,
null=True,
blank=True
)
google_analytics = models.CharField(
max_length=200,
null=True,
blank=True
)
class SitePlugin(models.Model):
repo = models.ForeignKey(
Repo,
on_delete=models.CASCADE,
)
plugin = models.CharField(
max_length=200,
)
class SiteExclude(models.Model):
repo = models.ForeignKey(
Repo,
on_delete=models.CASCADE,
)
exclude = models.CharField(
max_length=200,
)
class SiteTheme(models.Model):
repo = models.OneToOneField(
Repo,
on_delete=models.CASCADE,
primary_key=True,
)
theme = models.CharField(
max_length=200,
)
| true
| true
|
790a06e6ecf4d0361e902f90e38add8657bb3375
| 363
|
py
|
Python
|
functions/register_module/main.py
|
mondele/tx-manager
|
ddbbeeae5990a327ffc14b42c478d3ea435c0533
|
[
"MIT"
] | 3
|
2017-03-17T02:25:21.000Z
|
2017-05-18T22:18:20.000Z
|
functions/register_module/main.py
|
mondele/tx-manager
|
ddbbeeae5990a327ffc14b42c478d3ea435c0533
|
[
"MIT"
] | 184
|
2016-10-13T02:56:16.000Z
|
2021-03-25T21:27:20.000Z
|
functions/register_module/main.py
|
mondele/tx-manager
|
ddbbeeae5990a327ffc14b42c478d3ea435c0533
|
[
"MIT"
] | 16
|
2016-09-15T23:34:19.000Z
|
2019-07-25T07:06:32.000Z
|
from __future__ import unicode_literals, print_function
from libraries.lambda_handlers.register_module_handler import RegisterModuleHandler
def handle(event, context):
"""
Called by a module when it is deployed to register it
:param dict event:
:param context:
:return dict:
"""
return RegisterModuleHandler().handle(event, context)
| 27.923077
| 83
| 0.752066
|
from __future__ import unicode_literals, print_function
from libraries.lambda_handlers.register_module_handler import RegisterModuleHandler
def handle(event, context):
return RegisterModuleHandler().handle(event, context)
| true
| true
|
790a07139748007d8b569917c15f6b0f85c2a59c
| 1,249
|
py
|
Python
|
errorCheckTool/py23.py
|
peerke88/error-check-tool
|
c5b701561a3f17c966352d3f5195b04f04ae7fc9
|
[
"MIT"
] | 2
|
2021-12-11T22:46:33.000Z
|
2022-01-02T02:29:03.000Z
|
errorCheckTool/py23.py
|
peerke88/error-check-tool
|
c5b701561a3f17c966352d3f5195b04f04ae7fc9
|
[
"MIT"
] | null | null | null |
errorCheckTool/py23.py
|
peerke88/error-check-tool
|
c5b701561a3f17c966352d3f5195b04f04ae7fc9
|
[
"MIT"
] | null | null | null |
"""
Makes python 2 behave more like python 3.
Ideally we import this globally so all our python 2 interpreters will assist in spotting errors early.
"""
# future imports are harmless if they implement behaviour that already exists in the current interpreter version
from __future__ import absolute_import, division, print_function
import sys
from collections import OrderedDict
if sys.version_info.major == 2:
# Override dict and make items() behave like iteritems() to retain performance
class dict(dict):
def items(self):
return super(dict, self).iteritems()
def keys(self):
return super(dict, self).iterkeys()
def values(self):
return super(dict, self).itervalues()
class OrderedDict(OrderedDict):
def items(self):
return super(OrderedDict, self).iteritems()
def keys(self):
return super(OrderedDict, self).iterkeys()
def values(self):
return super(OrderedDict, self).itervalues()
# Override range with xrange to mimic python3's range
range = xrange
import cStringIO as io
else:
unicode = str
long = int
import io
try:
from typing import *
T = TypeVar('T')
except:
pass
| 27.755556
| 112
| 0.670136
|
from __future__ import absolute_import, division, print_function
import sys
from collections import OrderedDict
if sys.version_info.major == 2:
class dict(dict):
def items(self):
return super(dict, self).iteritems()
def keys(self):
return super(dict, self).iterkeys()
def values(self):
return super(dict, self).itervalues()
class OrderedDict(OrderedDict):
def items(self):
return super(OrderedDict, self).iteritems()
def keys(self):
return super(OrderedDict, self).iterkeys()
def values(self):
return super(OrderedDict, self).itervalues()
range = xrange
import cStringIO as io
else:
unicode = str
long = int
import io
try:
from typing import *
T = TypeVar('T')
except:
pass
| true
| true
|
790a072b24f91a57ca39a637043e1157ec49e433
| 1,174
|
py
|
Python
|
term06 (permutation and combination).py
|
ahammadshawki8/Proggraming-Terms
|
264156b6cfb347fc1b3aaa966c44aeab8dca26c2
|
[
"MIT"
] | 1
|
2021-06-07T00:22:28.000Z
|
2021-06-07T00:22:28.000Z
|
term06 (permutation and combination).py
|
ahammadshawki8/Proggraming-Terms
|
264156b6cfb347fc1b3aaa966c44aeab8dca26c2
|
[
"MIT"
] | 2
|
2021-03-03T02:22:42.000Z
|
2021-04-24T03:26:42.000Z
|
term06 (permutation and combination).py
|
ahammadshawki8/Proggraming-Terms
|
264156b6cfb347fc1b3aaa966c44aeab8dca26c2
|
[
"MIT"
] | null | null | null |
# COMBINATION:
# combination is all the different ways that we can group something where the order does not matter.
# PERMUTATION:
# permutation is all the different ways that we can group something where the order does matter.
import itertools
my_list=[1,2,3]
my_combinations=itertools.combinations(my_list,2)# here first arguement is a list and second arguement is how many items we want in a group. it is r of nCr.
for c in my_combinations:
print(c)
my_permutations=itertools.permutations(my_list,2)
for c in my_permutations:
print(c)
# When we should use combination and permutation?
# if the order doent matter we should use combinations.
import itertools
my_list=[1,2,3,4,5,6]
my_combinations=itertools.combinations(my_list,3)
answer=[results for results in my_combinations if sum(results)==10]
for i in answer:
print(i)
# if the order does matter we should use permutations.
# word macthing game.
import itertools
word="sample"
my_letters="pslame"
my_permutations=itertools.permutations(my_letters,len(my_letters))
for p in my_permutations:
if "".join(p) == word:
print("Match!")
break
else:
print("No match")
| 29.35
| 156
| 0.748722
|
import itertools
my_list=[1,2,3]
my_combinations=itertools.combinations(my_list,2)
for c in my_combinations:
print(c)
my_permutations=itertools.permutations(my_list,2)
for c in my_permutations:
print(c)
import itertools
my_list=[1,2,3,4,5,6]
my_combinations=itertools.combinations(my_list,3)
answer=[results for results in my_combinations if sum(results)==10]
for i in answer:
print(i)
import itertools
word="sample"
my_letters="pslame"
my_permutations=itertools.permutations(my_letters,len(my_letters))
for p in my_permutations:
if "".join(p) == word:
print("Match!")
break
else:
print("No match")
| true
| true
|
790a07ec3f26b37d4f73c9edf3deb5cd4d6acc16
| 9,386
|
py
|
Python
|
spectral_analysis/unsupervised_learning/autoencoder/autoencoder_bestmodel.py
|
csepreghy/spectral_analysis
|
1cbd9770347a71721164a7daf7b133ad0eeba8e4
|
[
"MIT"
] | 5
|
2019-05-29T07:16:27.000Z
|
2019-08-20T07:15:54.000Z
|
spectral_analysis/unsupervised_learning/autoencoder/autoencoder_bestmodel.py
|
csepreghy/spectral-analysis
|
1cbd9770347a71721164a7daf7b133ad0eeba8e4
|
[
"MIT"
] | 19
|
2020-02-20T09:48:46.000Z
|
2020-02-24T11:42:54.000Z
|
spectral_analysis/unsupervised_learning/autoencoder/autoencoder_bestmodel.py
|
csepreghy/spectral_analysis
|
1cbd9770347a71721164a7daf7b133ad0eeba8e4
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import time
from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.callbacks import TensorBoard, History, EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import Adam, Nadam, RMSprop
from tensorflow.keras.callbacks import EarlyStopping
from kerastuner.engine.hyperparameters import HyperParameters
from kerastuner.tuners import RandomSearch
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import seaborn as sns
from spectral_analysis.classifiers.neural_network.helper_functions import train_test_split
from spectral_analysis.plotify import Plotify
class AutoEncoder():
def __init__(self, df_source_info, df_fluxes, df_wavelengths, load_model, weights_path=''):
self.load_model = load_model
self.weights_path = weights_path
X = self._prepare_data(df_source_info, df_fluxes, df_wavelengths)
indeces = list(range(len(X)))
X_train, X_test, self.i_train, self.i_test = train_test_split(X, 0.2, indeces=indeces)
X_train, X_val, self.i_train, self.i_val = train_test_split(X_train, 0.2, indeces=indeces)
self.scaler = StandardScaler()
X_train = self.scaler.fit_transform(X_train)
X_test = self.scaler.transform(X_test)
X_val = self.scaler.transform(X_val)
self.X_train = np.expand_dims(X_train, axis=2)
self.X_test = np.expand_dims(X_test, axis=2)
self.X_val = np.expand_dims(X_val, axis=2)
def _prepare_data(self, df_source_info, df_fluxes, df_wavelengths):
# self.df_source_info = df_source_info.loc[df_source_info['class'] == 'QSO']
self.df_source_info = df_source_info
self.objids = self.df_source_info['objid'].to_numpy()
fluxes = df_fluxes.loc[df_fluxes['objid'].isin(self.objids)]
X = np.delete(fluxes.values, 0, axis=1)
X = X[:, 0::2]
print(f'X.shape = {X.shape}')
X = X[:, np.mod(np.arange(X[0].size),25)!=0]
X = X[:,:1792]
print(f'X.shape = {X.shape}')
wavelengths = df_wavelengths.to_numpy()
wavelengths = wavelengths[::2]
self.wavelengths = wavelengths[0:1792]
# plot_spectrum(X[0], wavelengths)
return X
def build_model(self):
# ================================================================================== #
# ==================================== ENCODER ===================================== #
# ================================================================================== #
input_layer = Input(shape=(self.X_train.shape[1], 1))
# encoder
x = Conv1D(filters=256,
kernel_size=7,
activation='relu',
padding='same')(input_layer)
x = MaxPooling1D(4)(x)
x = Conv1D(filters=128,
kernel_size=5,
activation='relu',
padding='same')(x)
x = MaxPooling1D(4)(x)
x = Conv1D(filters=64,
kernel_size=5,
activation='relu',
padding='same')(x)
x = MaxPooling1D(2)(x)
x = Conv1D(filters=32,
kernel_size=3,
activation='relu',
padding='same')(x)
x = MaxPooling1D(2)(x)
x = Conv1D(filters=32,
kernel_size=3,
activation='relu',
padding='same')(x)
x = MaxPooling1D(2)(x)
x = Conv1D(filters=1,
kernel_size=3,
activation='relu',
padding='same')(x)
encoded = MaxPooling1D(2, padding='same')(x)
# ================================================================================== #
# ==================================== DECODER ===================================== #
# ================================================================================== #
x = Conv1D(filters=1,
kernel_size=3,
activation='relu',
padding='same')(encoded)
x = UpSampling1D(2)(x)
x = Conv1D(filters=32,
kernel_size=3,
activation='relu',
padding='same')(x)
x = UpSampling1D(2)(x)
x = Conv1D(filters=32,
kernel_size=3,
activation='relu',
padding='same')(x)
x = UpSampling1D(2)(x)
x = Conv1D(filters=64,
kernel_size=5,
activation='relu',
padding='same')(x)
x = UpSampling1D(2)(x)
x = Conv1D(filters=128,
kernel_size=5,
activation='relu',
padding='same')(x)
x = UpSampling1D(4)(x)
x = Conv1D(filters=256,
kernel_size=7,
activation='relu',
padding='same')(x)
x = UpSampling1D(4)(x)
decoded = Conv1D(1, 1, activation='tanh', padding='same')(x)
self.autoencoder = Model(input_layer, decoded)
self.autoencoder.summary()
self.autoencoder.compile(loss='mse', optimizer='adam')
return self.autoencoder
def train_model(self, epochs, batch_size=32):
model = self.build_model()
if self.load_model == False:
modelcheckpoint = ModelCheckpoint(filepath='logs/1-14_autoencoder.epoch{epoch:02d}.h5',
monitor='val_loss',
save_best_only=True)
history = model.fit(x=self.X_train,
y=self.X_train,
epochs=epochs,
batch_size=32,
validation_data=(self.X_val, self.X_val),
callbacks=[EarlyStopping('val_loss', patience=8), modelcheckpoint])
self.evaluate_model(model)
else:
model.load_weights(self.weights_path)
print(f'model = {model}')
# self.evaluate_model(model)
self.get_bottleneck_values(model)
return model
def get_bottleneck_values(self, model):
bottleneck = model.get_layer('conv1d_5')
extractor = Model(inputs=model.inputs, outputs=[bottleneck.output])
features = extractor(self.X_test)
features = np.squeeze(features, axis=2)
df_source_info_test = pd.DataFrame({'class': self.df_source_info.iloc[self.i_test]['class'].values})
print(f'df_source_info_test = {df_source_info_test}')
df = pd.DataFrame(features)
df = df.join(df_source_info_test)
print(f'df = {df}')
sns.set(style="ticks", color_codes=True)
sns.pairplot(df, hue='class')
plt.savefig('plots/autoencoder_pairplot', dpi=100)
def evaluate_model(self, model):
preds = model.predict(self.X_test)
print(self.X_test.shape)
self.X_test = np.squeeze(self.X_test, axis=2)
preds = np.squeeze(preds, axis=2)
print(self.X_test.shape)
self.X_test = self.scaler.inverse_transform(self.X_test)
preds = self.scaler.inverse_transform(preds)
for i in range(100):
qso_ra = self.df_source_info.iloc[self.i_test[i]]['ra']
qso_dec = self.df_source_info.iloc[self.i_test[i]]['dec']
qso_plate = self.df_source_info.iloc[self.i_test[i]]['plate']
qso_z = self.df_source_info.iloc[self.i_test[i]]['z']
qso_class = self.df_source_info.iloc[self.i_test[i]]['class']
plotify = Plotify(theme='ugly')
_, axs = plotify.get_figax(nrows=2, figsize=(5.8, 8))
axs[0].plot(self.wavelengths, self.X_test[i], color=plotify.c_orange)
axs[1].plot(self.wavelengths, preds[i], color=plotify.c_orange)
axs[0].set_title(f'ra = {qso_ra}, dec = {qso_dec}, \n z = {qso_z}, plate = {qso_plate}, class = {qso_class} \n', fontsize=14)
axs[1].set_title(f'Autoencoder recreation \n')
axs[0].set_ylabel(r'$F_{\lambda[10^{-17} erg \: cm^{-2}s^{-1} Å^{-1}]}$', fontsize=14)
axs[1].set_ylabel(r'$F_{\lambda[10^{-17} erg \: cm^{-2}s^{-1} Å^{-1}]}$', fontsize=14)
axs[1].set_xlabel('Wavelength (Å)')
plt.subplots_adjust(hspace=0.4)
plt.savefig(f'plots/autoencoder/__all_sources/_autoencoder_{i}', dpi=160)
return preds
def main():
df_fluxes = pd.read_hdf('data/sdss/preprocessed/balanced.h5', key='fluxes').head(5000)
df_source_info = pd.read_hdf('data/sdss/preprocessed/balanced.h5', key='source_info').head(5000)
df_wavelengths = pd.read_hdf('data/sdss/preprocessed/balanced.h5', key='wavelengths')
ae = AutoEncoder(df_source_info, df_fluxes, df_wavelengths, load_model=False, weights_path='logs/colab-logs/_all_sources1-14_autoencoder.epoch30.h5')
ae.train_model(epochs=12, batch_size=64)
if __name__ == "__main__":
main()
| 38
| 153
| 0.544002
|
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import time
from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.callbacks import TensorBoard, History, EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import Adam, Nadam, RMSprop
from tensorflow.keras.callbacks import EarlyStopping
from kerastuner.engine.hyperparameters import HyperParameters
from kerastuner.tuners import RandomSearch
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import seaborn as sns
from spectral_analysis.classifiers.neural_network.helper_functions import train_test_split
from spectral_analysis.plotify import Plotify
class AutoEncoder():
def __init__(self, df_source_info, df_fluxes, df_wavelengths, load_model, weights_path=''):
self.load_model = load_model
self.weights_path = weights_path
X = self._prepare_data(df_source_info, df_fluxes, df_wavelengths)
indeces = list(range(len(X)))
X_train, X_test, self.i_train, self.i_test = train_test_split(X, 0.2, indeces=indeces)
X_train, X_val, self.i_train, self.i_val = train_test_split(X_train, 0.2, indeces=indeces)
self.scaler = StandardScaler()
X_train = self.scaler.fit_transform(X_train)
X_test = self.scaler.transform(X_test)
X_val = self.scaler.transform(X_val)
self.X_train = np.expand_dims(X_train, axis=2)
self.X_test = np.expand_dims(X_test, axis=2)
self.X_val = np.expand_dims(X_val, axis=2)
def _prepare_data(self, df_source_info, df_fluxes, df_wavelengths):
self.df_source_info = df_source_info
self.objids = self.df_source_info['objid'].to_numpy()
fluxes = df_fluxes.loc[df_fluxes['objid'].isin(self.objids)]
X = np.delete(fluxes.values, 0, axis=1)
X = X[:, 0::2]
print(f'X.shape = {X.shape}')
X = X[:, np.mod(np.arange(X[0].size),25)!=0]
X = X[:,:1792]
print(f'X.shape = {X.shape}')
wavelengths = df_wavelengths.to_numpy()
wavelengths = wavelengths[::2]
self.wavelengths = wavelengths[0:1792]
return X
def build_model(self):
input_layer = Input(shape=(self.X_train.shape[1], 1))
x = Conv1D(filters=256,
kernel_size=7,
activation='relu',
padding='same')(input_layer)
x = MaxPooling1D(4)(x)
x = Conv1D(filters=128,
kernel_size=5,
activation='relu',
padding='same')(x)
x = MaxPooling1D(4)(x)
x = Conv1D(filters=64,
kernel_size=5,
activation='relu',
padding='same')(x)
x = MaxPooling1D(2)(x)
x = Conv1D(filters=32,
kernel_size=3,
activation='relu',
padding='same')(x)
x = MaxPooling1D(2)(x)
x = Conv1D(filters=32,
kernel_size=3,
activation='relu',
padding='same')(x)
x = MaxPooling1D(2)(x)
x = Conv1D(filters=1,
kernel_size=3,
activation='relu',
padding='same')(x)
encoded = MaxPooling1D(2, padding='same')(x)
x = Conv1D(filters=1,
kernel_size=3,
activation='relu',
padding='same')(encoded)
x = UpSampling1D(2)(x)
x = Conv1D(filters=32,
kernel_size=3,
activation='relu',
padding='same')(x)
x = UpSampling1D(2)(x)
x = Conv1D(filters=32,
kernel_size=3,
activation='relu',
padding='same')(x)
x = UpSampling1D(2)(x)
x = Conv1D(filters=64,
kernel_size=5,
activation='relu',
padding='same')(x)
x = UpSampling1D(2)(x)
x = Conv1D(filters=128,
kernel_size=5,
activation='relu',
padding='same')(x)
x = UpSampling1D(4)(x)
x = Conv1D(filters=256,
kernel_size=7,
activation='relu',
padding='same')(x)
x = UpSampling1D(4)(x)
decoded = Conv1D(1, 1, activation='tanh', padding='same')(x)
self.autoencoder = Model(input_layer, decoded)
self.autoencoder.summary()
self.autoencoder.compile(loss='mse', optimizer='adam')
return self.autoencoder
def train_model(self, epochs, batch_size=32):
model = self.build_model()
if self.load_model == False:
modelcheckpoint = ModelCheckpoint(filepath='logs/1-14_autoencoder.epoch{epoch:02d}.h5',
monitor='val_loss',
save_best_only=True)
history = model.fit(x=self.X_train,
y=self.X_train,
epochs=epochs,
batch_size=32,
validation_data=(self.X_val, self.X_val),
callbacks=[EarlyStopping('val_loss', patience=8), modelcheckpoint])
self.evaluate_model(model)
else:
model.load_weights(self.weights_path)
print(f'model = {model}')
self.get_bottleneck_values(model)
return model
def get_bottleneck_values(self, model):
bottleneck = model.get_layer('conv1d_5')
extractor = Model(inputs=model.inputs, outputs=[bottleneck.output])
features = extractor(self.X_test)
features = np.squeeze(features, axis=2)
df_source_info_test = pd.DataFrame({'class': self.df_source_info.iloc[self.i_test]['class'].values})
print(f'df_source_info_test = {df_source_info_test}')
df = pd.DataFrame(features)
df = df.join(df_source_info_test)
print(f'df = {df}')
sns.set(style="ticks", color_codes=True)
sns.pairplot(df, hue='class')
plt.savefig('plots/autoencoder_pairplot', dpi=100)
def evaluate_model(self, model):
preds = model.predict(self.X_test)
print(self.X_test.shape)
self.X_test = np.squeeze(self.X_test, axis=2)
preds = np.squeeze(preds, axis=2)
print(self.X_test.shape)
self.X_test = self.scaler.inverse_transform(self.X_test)
preds = self.scaler.inverse_transform(preds)
for i in range(100):
qso_ra = self.df_source_info.iloc[self.i_test[i]]['ra']
qso_dec = self.df_source_info.iloc[self.i_test[i]]['dec']
qso_plate = self.df_source_info.iloc[self.i_test[i]]['plate']
qso_z = self.df_source_info.iloc[self.i_test[i]]['z']
qso_class = self.df_source_info.iloc[self.i_test[i]]['class']
plotify = Plotify(theme='ugly')
_, axs = plotify.get_figax(nrows=2, figsize=(5.8, 8))
axs[0].plot(self.wavelengths, self.X_test[i], color=plotify.c_orange)
axs[1].plot(self.wavelengths, preds[i], color=plotify.c_orange)
axs[0].set_title(f'ra = {qso_ra}, dec = {qso_dec}, \n z = {qso_z}, plate = {qso_plate}, class = {qso_class} \n', fontsize=14)
axs[1].set_title(f'Autoencoder recreation \n')
axs[0].set_ylabel(r'$F_{\lambda[10^{-17} erg \: cm^{-2}s^{-1} Å^{-1}]}$', fontsize=14)
axs[1].set_ylabel(r'$F_{\lambda[10^{-17} erg \: cm^{-2}s^{-1} Å^{-1}]}$', fontsize=14)
axs[1].set_xlabel('Wavelength (Å)')
plt.subplots_adjust(hspace=0.4)
plt.savefig(f'plots/autoencoder/__all_sources/_autoencoder_{i}', dpi=160)
return preds
def main():
df_fluxes = pd.read_hdf('data/sdss/preprocessed/balanced.h5', key='fluxes').head(5000)
df_source_info = pd.read_hdf('data/sdss/preprocessed/balanced.h5', key='source_info').head(5000)
df_wavelengths = pd.read_hdf('data/sdss/preprocessed/balanced.h5', key='wavelengths')
ae = AutoEncoder(df_source_info, df_fluxes, df_wavelengths, load_model=False, weights_path='logs/colab-logs/_all_sources1-14_autoencoder.epoch30.h5')
ae.train_model(epochs=12, batch_size=64)
if __name__ == "__main__":
main()
| true
| true
|
790a07f146d0f02d2a2bf08d785a46f783433494
| 452
|
py
|
Python
|
data/scripts/templates/object/mobile/shared_space_comm_station_talus.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/mobile/shared_space_comm_station_talus.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/mobile/shared_space_comm_station_talus.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_space_comm_station_talus.iff"
result.attribute_template_id = 9
result.stfName("npc_name","selonian_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 26.588235
| 70
| 0.734513
| true
| true
|
|
790a0835018bac4f5687de4538b36440ca7223cf
| 1,719
|
py
|
Python
|
nncf/layer_utils.py
|
krodyush/nncf
|
476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a
|
[
"Apache-2.0"
] | null | null | null |
nncf/layer_utils.py
|
krodyush/nncf
|
476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a
|
[
"Apache-2.0"
] | null | null | null |
nncf/layer_utils.py
|
krodyush/nncf
|
476a274a90a3f2f1ace7a4cb0c9d90d1ddeb7f6a
|
[
"Apache-2.0"
] | 1
|
2021-04-05T09:33:51.000Z
|
2021-04-05T09:33:51.000Z
|
import torch.nn as nn
from .registry import Registry
COMPRESSION_MODULES = Registry('compression modules')
class ProxyModule:
def __init__(self, module):
self._module = module
def __getattr__(self, name):
return getattr(self._module, name)
class _NNCFModuleMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
_NNCFModuleMixin.add_mixin_fields(self)
@staticmethod
def add_mixin_fields(obj):
obj.pre_ops = nn.ModuleDict()
obj.post_ops = nn.ModuleDict()
def get_pre_op(self, key):
return self.pre_ops[key]
def get_post_op(self, key):
return self.post_ops[key]
def register_pre_forward_operation(self, op):
key = str(len(self.pre_ops))
self.pre_ops[key] = op
return key
def remove_pre_forward_operation(self, key):
return self.pre_ops.pop(key)
def register_post_forward_operation(self, op):
key = str(len(self.post_ops))
self.post_ops[key] = op
return key
def remove_post_forward_operation(self, key):
return self.post_ops.pop(key)
def forward(self, *args):
proxy_module = ProxyModule(self)
for op in self.pre_ops.values():
op_args = op(proxy_module, args)
if op_args is not None:
if not isinstance(op_args, tuple):
op_args = tuple([op_args])
args = op_args
results = super().forward.__func__(proxy_module, *args)
for op in self.post_ops.values():
op_results = op(proxy_module, results)
if op_results is not None:
results = op_results
return results
| 27.725806
| 63
| 0.623618
|
import torch.nn as nn
from .registry import Registry
COMPRESSION_MODULES = Registry('compression modules')
class ProxyModule:
def __init__(self, module):
self._module = module
def __getattr__(self, name):
return getattr(self._module, name)
class _NNCFModuleMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
_NNCFModuleMixin.add_mixin_fields(self)
@staticmethod
def add_mixin_fields(obj):
obj.pre_ops = nn.ModuleDict()
obj.post_ops = nn.ModuleDict()
def get_pre_op(self, key):
return self.pre_ops[key]
def get_post_op(self, key):
return self.post_ops[key]
def register_pre_forward_operation(self, op):
key = str(len(self.pre_ops))
self.pre_ops[key] = op
return key
def remove_pre_forward_operation(self, key):
return self.pre_ops.pop(key)
def register_post_forward_operation(self, op):
key = str(len(self.post_ops))
self.post_ops[key] = op
return key
def remove_post_forward_operation(self, key):
return self.post_ops.pop(key)
def forward(self, *args):
proxy_module = ProxyModule(self)
for op in self.pre_ops.values():
op_args = op(proxy_module, args)
if op_args is not None:
if not isinstance(op_args, tuple):
op_args = tuple([op_args])
args = op_args
results = super().forward.__func__(proxy_module, *args)
for op in self.post_ops.values():
op_results = op(proxy_module, results)
if op_results is not None:
results = op_results
return results
| true
| true
|
790a08f366bde280be8d0312fc96104bd4d44c5a
| 3,753
|
py
|
Python
|
simmbse/structure_item.py
|
tsherburne/ma_sim
|
4082da1c80401dec4293415bc9e9239a6bb8185d
|
[
"BSD-3-Clause"
] | null | null | null |
simmbse/structure_item.py
|
tsherburne/ma_sim
|
4082da1c80401dec4293415bc9e9239a6bb8185d
|
[
"BSD-3-Clause"
] | null | null | null |
simmbse/structure_item.py
|
tsherburne/ma_sim
|
4082da1c80401dec4293415bc9e9239a6bb8185d
|
[
"BSD-3-Clause"
] | null | null | null |
import simpy
import logging
"""
Recursive GraphQL schema for JSON StructureItem - passed as Python dictionary
type StructureItem {
id: ID!
type: StructureType
# optional annotation for a Branch
annotation: String
# reference UUID / Name / Num for: Function, Exit / ExitCondition (Exit), Replicate (DomainSet) types
referenceID: String
referenceName: String
referenceNum: String
structure: [StructureItem]
}
"""
class StructureItem:
"""
The base class for all call StructureItems (Branch, Parallel, Select, Loop, Function, etc.)
"""
def __init__(self, env: simpy.Environment, logger: logging.Logger,
construct_id: str, systemModel: dict, structureItem: dict):
from .branch import Branch
from .function import Function
#from .exit import Exit, ExitCondition
from .loop import Loop # , LoopExit
from .parallel import Parallel
#from .replicate import Replicate
from .select import Select
import simapp
self.env = env
self.logger = logger
self.construct_id = construct_id
self.systemModel = systemModel
self.structureItem = structureItem
self.structureItems = list()
self.structureType = "" # overidden by subclass
self.name = "" # overidden by subclass
for num, struct in enumerate(self.structureItem['structure'], start=1):
next_construct_id = self.construct_id + "." + str(num)
if struct['type'] == "Branch":
self.structureItems.append(Branch(self.env, self.logger,
next_construct_id, self.systemModel, struct))
elif struct['type'] == "Function":
try:
# Check for override function
override_class = getattr(simapp, struct['referenceName'].capitalize())
self.structureItems.append(override_class(self.env, self.logger,
next_construct_id, self.systemModel, struct))
except AttributeError:
# No function override exists
self.structureItems.append(Function(self.env, self.logger,
next_construct_id, self.systemModel, struct))
elif struct['type'] == "Loop":
self.structureItems.append(Loop(self.env, self.logger,
next_construct_id, self.systemModel, struct))
elif struct['type'] == "Parallel":
self.structureItems.append(Parallel(self.env, self.logger,
next_construct_id, self.systemModel, struct))
elif struct['type'] == "Select":
self.structureItems.append(Select(self.env, self.logger,
next_construct_id, self.systemModel, struct))
def __str__(self):
"""
Recursively print CallStructure
"""
# indent by construct_id depth (number of dots)
stmt = ("\n" + "." * self.construct_id.count(".") +
"Struct: %s: %s" % (self.construct_id, self.structureType))
for struct in self.structureItems:
stmt += struct.__str__()
return (stmt)
def log_start(self):
self.logger.info('SIM Time: %08.2f : %-20s:Start:%10s:%-s' %
(self.env.now, self.construct_id, self.structureType, self.name))
def log_end(self):
self.logger.info('SIM Time: %08.2f : %-20s: End:%10s:%-s' %
(self.env.now, self.construct_id, self.structureType, self.name))
| 39.925532
| 103
| 0.572342
|
import simpy
import logging
class StructureItem:
def __init__(self, env: simpy.Environment, logger: logging.Logger,
construct_id: str, systemModel: dict, structureItem: dict):
from .branch import Branch
from .function import Function
from .loop import Loop
from .parallel import Parallel
from .select import Select
import simapp
self.env = env
self.logger = logger
self.construct_id = construct_id
self.systemModel = systemModel
self.structureItem = structureItem
self.structureItems = list()
self.structureType = ""
self.name = ""
for num, struct in enumerate(self.structureItem['structure'], start=1):
next_construct_id = self.construct_id + "." + str(num)
if struct['type'] == "Branch":
self.structureItems.append(Branch(self.env, self.logger,
next_construct_id, self.systemModel, struct))
elif struct['type'] == "Function":
try:
override_class = getattr(simapp, struct['referenceName'].capitalize())
self.structureItems.append(override_class(self.env, self.logger,
next_construct_id, self.systemModel, struct))
except AttributeError:
self.structureItems.append(Function(self.env, self.logger,
next_construct_id, self.systemModel, struct))
elif struct['type'] == "Loop":
self.structureItems.append(Loop(self.env, self.logger,
next_construct_id, self.systemModel, struct))
elif struct['type'] == "Parallel":
self.structureItems.append(Parallel(self.env, self.logger,
next_construct_id, self.systemModel, struct))
elif struct['type'] == "Select":
self.structureItems.append(Select(self.env, self.logger,
next_construct_id, self.systemModel, struct))
def __str__(self):
stmt = ("\n" + "." * self.construct_id.count(".") +
"Struct: %s: %s" % (self.construct_id, self.structureType))
for struct in self.structureItems:
stmt += struct.__str__()
return (stmt)
def log_start(self):
self.logger.info('SIM Time: %08.2f : %-20s:Start:%10s:%-s' %
(self.env.now, self.construct_id, self.structureType, self.name))
def log_end(self):
self.logger.info('SIM Time: %08.2f : %-20s: End:%10s:%-s' %
(self.env.now, self.construct_id, self.structureType, self.name))
| true
| true
|
790a08ff134289c294b118a1f3c876565ca12c25
| 35,642
|
py
|
Python
|
senlin-7.0.0/senlin/tests/unit/profiles/test_nova_server_validate.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
senlin-7.0.0/senlin/tests/unit/profiles/test_nova_server_validate.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
senlin-7.0.0/senlin/tests/unit/profiles/test_nova_server_validate.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from senlin.common import exception as exc
from senlin.profiles.os.nova import server
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'context': {},
'auto_disk_config': True,
'availability_zone': 'FAKE_AZ',
'block_device_mapping': [{
'device_name': 'FAKE_NAME',
'volume_size': 1000,
}],
'flavor': 'FLAV',
'image': 'FAKE_IMAGE',
'key_name': 'FAKE_KEYNAME',
"metadata": {"meta var": "meta val"},
'name': 'FAKE_SERVER_NAME',
'networks': [{
'floating_ip': 'FAKE_FLOATING_IP',
'floating_network': 'FAKE_FLOATING_NET',
'security_groups': ['FAKE_SECURITY_GROUP'],
'port': 'FAKE_PORT',
'fixed_ip': 'FAKE_IP',
'network': 'FAKE_NET',
}],
'scheduler_hints': {
'same_host': 'HOST_ID',
},
}
}
class TestAvailabilityZoneValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[['FAKE_AZ']],
result='FAKE_AZ',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FAKE_AZ',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=[[]],
result='FAKE_AZ',
exception=exc.InvalidSpec,
message=("The specified availability_zone 'FAKE_AZ' could "
"not be found"))),
('create:success', dict(
reason='create',
success=True,
validate_result=[['FAKE_AZ']],
result='FAKE_AZ',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FAKE_AZ',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=[[]],
result='FAKE_AZ',
exception=exc.EResourceCreation,
message=("Failed in creating server: The specified "
"availability_zone 'FAKE_AZ' could not be found.")))
]
def setUp(self):
super(TestAvailabilityZoneValidation, self).setUp()
self.cc = mock.Mock()
prof = server.ServerProfile('t', spec)
prof._computeclient = self.cc
self.profile = prof
def test_validation(self):
self.cc.validate_azs.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID')
if self.success:
res = self.profile._validate_az(node, 'FAKE_AZ', self.reason)
self.assertEqual(self.result, res)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_az,
node, 'FAKE_AZ', self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.cc.validate_azs.assert_called_once_with(['FAKE_AZ'])
class TestFlavorValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='FID', is_disabled=False)],
result='FID',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified flavor 'FLAVOR' could not be found.")),
('validate:disabled', dict(
reason=None,
success=False,
validate_result=[mock.Mock(id='FID', is_disabled=True)],
result='FID',
exception=exc.InvalidSpec,
message="The specified flavor 'FLAVOR' is disabled")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='FID', is_disabled=False)],
result='FID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
('create:disabled', dict(
reason='create',
success=False,
validate_result=[mock.Mock(id='FID', is_disabled=True)],
result='FID',
exception=exc.EResourceCreation,
message=("Failed in creating server: The specified flavor "
"'FLAVOR' is disabled."))),
('update:success', dict(
reason='update',
success=True,
validate_result=[mock.Mock(id='FID', is_disabled=False)],
result='FID',
exception=None,
message='')),
('update:driver_failure', dict(
reason='update',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:not_found', dict(
reason='update',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:disabled', dict(
reason='update',
success=False,
validate_result=[mock.Mock(id='FID', is_disabled=True)],
result='FID',
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': The specified "
"flavor 'FLAVOR' is disabled.")))
]
def setUp(self):
super(TestFlavorValidation, self).setUp()
self.cc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._computeclient = self.cc
def test_validation(self):
self.cc.flavor_find.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
flavor = 'FLAVOR'
if self.success:
res = self.profile._validate_flavor(node, flavor, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_flavor,
node, flavor, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.cc.flavor_find.assert_called_once_with(flavor, False)
class TestImageValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified image 'IMAGE' could not be found.")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
('update:success', dict(
reason='update',
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('update:driver_failure', dict(
reason='update',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:not_found', dict(
reason='update',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
]
def setUp(self):
super(TestImageValidation, self).setUp()
self.cc = mock.Mock()
self.gc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._computeclient = self.cc
self.profile._glanceclient = self.gc
def test_validation(self):
self.gc.image_find.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
image = 'IMAGE'
if self.success:
res = self.profile._validate_image(node, image, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_image,
node, image, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.gc.image_find.assert_called_once_with(image, False)
class TestVolumeValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='VOLUME_ID', status='available')],
result='VOLUME_ID',
exception=None,
message='')),
('validate:failure', dict(
reason=None,
success=False,
validate_result=[mock.Mock(id='VOLUME_ID', status='in-use')],
result='VOLUME_ID',
exception=exc.InvalidSpec,
message="The volume VOLUME should be in 'available' "
"status but is in 'in-use' status.")),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified volume 'VOLUME' could not be found.")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='VOLUME_ID', status='available')],
result='VOLUME_ID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
]
def setUp(self):
super(TestVolumeValidation, self).setUp()
bdm_v2 = [
{
'volume_size': 1,
'uuid': '6ce0be68',
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
},
]
volume_spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'flavor': 'FLAV',
'name': 'FAKE_SERVER_NAME',
'security_groups': ['HIGH_SECURITY_GROUP'],
'block_device_mapping_v2': bdm_v2,
}
}
self.vc = mock.Mock()
self.profile = server.ServerProfile('t', volume_spec)
self.profile._block_storageclient = self.vc
def test_validation(self):
self.vc.volume_get.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
volume = 'VOLUME'
if self.success:
res = self.profile._validate_volume(node, volume, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_volume,
node, volume, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.vc.volume_get.assert_called_once_with(volume)
class TestKeypairValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='KEY_ID')],
result='KEY_ID',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified key_name 'KEY' could not be found.")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
('update:success', dict(
reason='update',
success=True,
validate_result=[mock.Mock(id='KEY_ID')],
result='KEY_ID',
exception=None,
message='')),
('update:driver_failure', dict(
reason='update',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:not_found', dict(
reason='update',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
]
def setUp(self):
super(TestKeypairValidation, self).setUp()
self.cc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._computeclient = self.cc
def test_validation(self):
self.cc.keypair_find.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
key = 'KEY'
if self.success:
res = self.profile._validate_keypair(node, key, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_keypair,
node, key, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.cc.keypair_find.assert_called_once_with(key, False)
class TestNetworkValidation(base.SenlinTestCase):
scenarios = [
('validate:net-n:port-n:fixed_ip-n:sgroups-n', dict(
reason=None,
success=True,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={'port': 'PORT_ID'},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-n:sgroups-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'security_groups': ['default']},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[mock.Mock(id='SG_ID')],
floating_result=[],
result={'network': 'NET_ID', 'security_groups': ['SG_ID']},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'floating_network': 'NET_ID'},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-n:floating_net-y:floating_ip-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET',
'floating_ip': 'FLOATINGIP'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[mock.Mock(id='FLOATINGIP_ID', status='INACTIVE')],
result={'network': 'NET_ID', 'floating_network': 'NET_ID',
'floating_ip_id': 'FLOATINGIP_ID',
'floating_ip': 'FLOATINGIP'},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-y:sgroups-n', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'fixed_ip': 'FIXED_IP'},
exception=None,
message='')),
('validate:net-f:port-y:fixed_ip-n:sgroups-n', dict(
reason=None,
success=False,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[exc.InternalError(message='NET Failure')],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message='NET Failure')),
('validate:net-n:port-f:fixed_ip-n', dict(
reason=None,
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[exc.InternalError(message='PORT Failure')],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message='PORT Failure')),
('validate:net-n:port-active:fixed_ip-n', dict(
reason=None,
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message='The status of the port PORT must be DOWN')),
('validate:net-n:port-y:fixed_ip-n:floating_net-n:floating_ip-y', dict(
reason=None,
success=False,
inputs={'port': 'PORT', 'floating_ip': 'FLOATINGIP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[mock.Mock(id='FLOATINGIP_ID', status='INACTIVE')],
result={},
exception=exc.InvalidSpec,
message='Must specify a network to create floating IP')),
('validate:net-n:port-y:fixed_ip-n:floating_ip-active', dict(
reason=None,
success=False,
inputs={'port': 'PORT', 'floating_network': 'NET',
'floating_ip': 'FLOATINGIP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[mock.Mock(id='FLOATINGIP_ID', status='ACTIVE')],
result={},
exception=exc.InvalidSpec,
message='the floating IP FLOATINGIP has been used.')),
('validate:net-n:port-n:fixed_ip-n', dict(
reason=None,
success=False,
inputs={'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message="One of 'port' and 'network' must be provided")),
('validate:net-n:port-y:fixed_ip-y', dict(
reason=None,
success=False,
inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message=("The 'port' property and the 'fixed_ip' property cannot "
"be specified at the same time"))),
('create:net-y:port-y:fixed_ip-n', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'port': 'PORT_ID'},
exception=None,
message='')),
('create:net-y:port-n:fixed_ip-y', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'fixed_ip': 'FIXED_IP'},
exception=None,
message='')),
('create:net-y:port-n:fixed_ip-n:sgroups-y', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'security_groups': ['default']},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[mock.Mock(id='SG_ID')],
floating_result=[],
result={'network': 'NET_ID', 'security_groups': ['SG_ID']},
exception=None,
message='')),
('create:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'floating_network': 'NET_ID'},
exception=None,
message='')),
('create:net-f:port-y:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[exc.InternalError(message='NET Failure')],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message='Failed in creating server: NET Failure.')),
('create:net-n:port-f:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[exc.InternalError(message='PORT Failure')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message='Failed in creating server: PORT Failure.')),
('create:net-n:port-active:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message=('Failed in creating server: The status of the port PORT '
'must be DOWN.'))),
('create:net-n:port-n:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message=("Failed in creating server: One of 'port' "
"and 'network' must be provided."))),
('create:net-n:port-y:fixed_ip-y', dict(
reason='create',
success=False,
inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message=("Failed in creating server: The 'port' property and the "
"'fixed_ip' property cannot be specified at the same "
"time."))),
('update:net-y:port-y:fixed_ip-n', dict(
reason='update',
success=True,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'port': 'PORT_ID'},
exception=None,
message='')),
('update:net-y:port-n:fixed_ip-y', dict(
reason='update',
success=True,
inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID',
'fixed_ip': 'FIXED_IP'},
exception=None,
message='')),
('update:net-y:port-n:fixed_ip-n:sgroups-y', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'security_groups': ['default']},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[mock.Mock(id='SG_ID')],
floating_result=[],
result={'network': 'NET_ID', 'security_groups': ['SG_ID']},
exception=None,
message='')),
('update:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'floating_network': 'NET_ID'},
exception=None,
message='')),
('update:net-f:port-y:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[exc.InternalError(message='NET Failure')],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': NET Failure.")),
('update:net-n:port-f:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[exc.InternalError(message='PORT Failure')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': PORT Failure.")),
('update:net-n:port-active:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': The status of the "
"port PORT must be DOWN."))),
('update:net-n:port-n:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': One of 'port' "
"and 'network' must be provided."))),
('update:net-n:port-y:fixed_ip-y', dict(
reason='update',
success=False,
inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': The 'port' "
"property and the 'fixed_ip' property cannot be "
"specified at the same time."))),
]
def setUp(self):
super(TestNetworkValidation, self).setUp()
self.nc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._networkclient = self.nc
def test_validation(self):
self.nc.network_get.side_effect = self.net_result
self.nc.port_find.side_effect = self.port_result
self.nc.security_group_find.side_effect = self.sg_result
self.nc.floatingip_find.side_effect = self.floating_result
obj = mock.Mock(physical_id='NOVA_ID')
if self.success:
res = self.profile._validate_network(obj, self.inputs, self.reason)
self.assertEqual(self.result, res)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_network,
obj, self.inputs, self.reason)
self.assertEqual(self.message, six.text_type(ex))
if self.net_result:
self.nc.network_get.assert_called_with('NET')
if self.port_result:
self.nc.port_find.assert_called_once_with('PORT')
if self.sg_result:
self.nc.security_group_find.assert_called_once_with('default')
if self.floating_result:
self.nc.floatingip_find.assert_called_once_with('FLOATINGIP')
class TestNovaServerValidate(base.SenlinTestCase):
def setUp(self):
super(TestNovaServerValidate, self).setUp()
self.context = utils.dummy_context()
def test_do_validate_all_passed(self):
profile = server.ServerProfile('t', spec)
mock_az = self.patchobject(profile, '_validate_az')
mock_flavor = self.patchobject(profile, '_validate_flavor')
mock_image = self.patchobject(profile, '_validate_image')
mock_keypair = self.patchobject(profile, '_validate_keypair')
mock_network = self.patchobject(profile, '_validate_network')
obj = mock.Mock()
res = profile.do_validate(obj)
properties = spec['properties']
self.assertTrue(res)
mock_az.assert_called_once_with(obj, properties['availability_zone'])
mock_flavor.assert_called_once_with(obj, properties['flavor'])
mock_image.assert_called_once_with(obj, properties['image'])
mock_keypair.assert_called_once_with(obj, properties['key_name'])
mock_network.assert_called_once_with(obj, properties['networks'][0])
| 38.07906
| 79
| 0.537287
|
import mock
import six
from senlin.common import exception as exc
from senlin.profiles.os.nova import server
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'context': {},
'auto_disk_config': True,
'availability_zone': 'FAKE_AZ',
'block_device_mapping': [{
'device_name': 'FAKE_NAME',
'volume_size': 1000,
}],
'flavor': 'FLAV',
'image': 'FAKE_IMAGE',
'key_name': 'FAKE_KEYNAME',
"metadata": {"meta var": "meta val"},
'name': 'FAKE_SERVER_NAME',
'networks': [{
'floating_ip': 'FAKE_FLOATING_IP',
'floating_network': 'FAKE_FLOATING_NET',
'security_groups': ['FAKE_SECURITY_GROUP'],
'port': 'FAKE_PORT',
'fixed_ip': 'FAKE_IP',
'network': 'FAKE_NET',
}],
'scheduler_hints': {
'same_host': 'HOST_ID',
},
}
}
class TestAvailabilityZoneValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[['FAKE_AZ']],
result='FAKE_AZ',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FAKE_AZ',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=[[]],
result='FAKE_AZ',
exception=exc.InvalidSpec,
message=("The specified availability_zone 'FAKE_AZ' could "
"not be found"))),
('create:success', dict(
reason='create',
success=True,
validate_result=[['FAKE_AZ']],
result='FAKE_AZ',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FAKE_AZ',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=[[]],
result='FAKE_AZ',
exception=exc.EResourceCreation,
message=("Failed in creating server: The specified "
"availability_zone 'FAKE_AZ' could not be found.")))
]
def setUp(self):
super(TestAvailabilityZoneValidation, self).setUp()
self.cc = mock.Mock()
prof = server.ServerProfile('t', spec)
prof._computeclient = self.cc
self.profile = prof
def test_validation(self):
self.cc.validate_azs.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID')
if self.success:
res = self.profile._validate_az(node, 'FAKE_AZ', self.reason)
self.assertEqual(self.result, res)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_az,
node, 'FAKE_AZ', self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.cc.validate_azs.assert_called_once_with(['FAKE_AZ'])
class TestFlavorValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='FID', is_disabled=False)],
result='FID',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified flavor 'FLAVOR' could not be found.")),
('validate:disabled', dict(
reason=None,
success=False,
validate_result=[mock.Mock(id='FID', is_disabled=True)],
result='FID',
exception=exc.InvalidSpec,
message="The specified flavor 'FLAVOR' is disabled")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='FID', is_disabled=False)],
result='FID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
('create:disabled', dict(
reason='create',
success=False,
validate_result=[mock.Mock(id='FID', is_disabled=True)],
result='FID',
exception=exc.EResourceCreation,
message=("Failed in creating server: The specified flavor "
"'FLAVOR' is disabled."))),
('update:success', dict(
reason='update',
success=True,
validate_result=[mock.Mock(id='FID', is_disabled=False)],
result='FID',
exception=None,
message='')),
('update:driver_failure', dict(
reason='update',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:not_found', dict(
reason='update',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:disabled', dict(
reason='update',
success=False,
validate_result=[mock.Mock(id='FID', is_disabled=True)],
result='FID',
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': The specified "
"flavor 'FLAVOR' is disabled.")))
]
def setUp(self):
super(TestFlavorValidation, self).setUp()
self.cc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._computeclient = self.cc
def test_validation(self):
self.cc.flavor_find.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
flavor = 'FLAVOR'
if self.success:
res = self.profile._validate_flavor(node, flavor, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_flavor,
node, flavor, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.cc.flavor_find.assert_called_once_with(flavor, False)
class TestImageValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified image 'IMAGE' could not be found.")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
('update:success', dict(
reason='update',
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('update:driver_failure', dict(
reason='update',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:not_found', dict(
reason='update',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
]
def setUp(self):
super(TestImageValidation, self).setUp()
self.cc = mock.Mock()
self.gc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._computeclient = self.cc
self.profile._glanceclient = self.gc
def test_validation(self):
self.gc.image_find.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
image = 'IMAGE'
if self.success:
res = self.profile._validate_image(node, image, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_image,
node, image, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.gc.image_find.assert_called_once_with(image, False)
class TestVolumeValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='VOLUME_ID', status='available')],
result='VOLUME_ID',
exception=None,
message='')),
('validate:failure', dict(
reason=None,
success=False,
validate_result=[mock.Mock(id='VOLUME_ID', status='in-use')],
result='VOLUME_ID',
exception=exc.InvalidSpec,
message="The volume VOLUME should be in 'available' "
"status but is in 'in-use' status.")),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified volume 'VOLUME' could not be found.")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='VOLUME_ID', status='available')],
result='VOLUME_ID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
]
def setUp(self):
super(TestVolumeValidation, self).setUp()
bdm_v2 = [
{
'volume_size': 1,
'uuid': '6ce0be68',
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0,
},
]
volume_spec = {
'type': 'os.nova.server',
'version': '1.0',
'properties': {
'flavor': 'FLAV',
'name': 'FAKE_SERVER_NAME',
'security_groups': ['HIGH_SECURITY_GROUP'],
'block_device_mapping_v2': bdm_v2,
}
}
self.vc = mock.Mock()
self.profile = server.ServerProfile('t', volume_spec)
self.profile._block_storageclient = self.vc
def test_validation(self):
self.vc.volume_get.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
volume = 'VOLUME'
if self.success:
res = self.profile._validate_volume(node, volume, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_volume,
node, volume, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.vc.volume_get.assert_called_once_with(volume)
class TestKeypairValidation(base.SenlinTestCase):
scenarios = [
('validate:success', dict(
reason=None,
success=True,
validate_result=[mock.Mock(id='KEY_ID')],
result='KEY_ID',
exception=None,
message='')),
('validate:driver_failure', dict(
reason=None,
success=False,
validate_result=exc.InternalError(message='BANG.'),
result='FID',
exception=exc.InternalError,
message='BANG.')),
('validate:not_found', dict(
reason=None,
success=False,
validate_result=exc.InternalError(code=404, message='BANG.'),
result='FID',
exception=exc.InvalidSpec,
message="The specified key_name 'KEY' could not be found.")),
('create:success', dict(
reason='create',
success=True,
validate_result=[mock.Mock(id='IMAGE_ID')],
result='IMAGE_ID',
exception=None,
message='')),
('create:driver_failure', dict(
reason='create',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message='Failed in creating server: BANG.')),
('create:not_found', dict(
reason='create',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceCreation,
message="Failed in creating server: BANG.")),
('update:success', dict(
reason='update',
success=True,
validate_result=[mock.Mock(id='KEY_ID')],
result='KEY_ID',
exception=None,
message='')),
('update:driver_failure', dict(
reason='update',
success=False,
validate_result=exc.InternalError(message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
('update:not_found', dict(
reason='update',
success=False,
validate_result=exc.InternalError(code=404, message='BANG'),
result='FID',
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': BANG.")),
]
def setUp(self):
super(TestKeypairValidation, self).setUp()
self.cc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._computeclient = self.cc
def test_validation(self):
self.cc.keypair_find.side_effect = self.validate_result
node = mock.Mock(id='NODE_ID', physical_id='NOVA_ID')
key = 'KEY'
if self.success:
res = self.profile._validate_keypair(node, key, self.reason)
self.assertIsNotNone(res)
self.assertEqual(self.result, res.id)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_keypair,
node, key, self.reason)
self.assertEqual(self.message, six.text_type(ex))
self.cc.keypair_find.assert_called_once_with(key, False)
class TestNetworkValidation(base.SenlinTestCase):
scenarios = [
('validate:net-n:port-n:fixed_ip-n:sgroups-n', dict(
reason=None,
success=True,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={'port': 'PORT_ID'},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-n:sgroups-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'security_groups': ['default']},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[mock.Mock(id='SG_ID')],
floating_result=[],
result={'network': 'NET_ID', 'security_groups': ['SG_ID']},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'floating_network': 'NET_ID'},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-n:floating_net-y:floating_ip-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET',
'floating_ip': 'FLOATINGIP'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[mock.Mock(id='FLOATINGIP_ID', status='INACTIVE')],
result={'network': 'NET_ID', 'floating_network': 'NET_ID',
'floating_ip_id': 'FLOATINGIP_ID',
'floating_ip': 'FLOATINGIP'},
exception=None,
message='')),
('validate:net-y:port-n:fixed_ip-y:sgroups-n', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'fixed_ip': 'FIXED_IP'},
exception=None,
message='')),
('validate:net-f:port-y:fixed_ip-n:sgroups-n', dict(
reason=None,
success=False,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[exc.InternalError(message='NET Failure')],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message='NET Failure')),
('validate:net-n:port-f:fixed_ip-n', dict(
reason=None,
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[exc.InternalError(message='PORT Failure')],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message='PORT Failure')),
('validate:net-n:port-active:fixed_ip-n', dict(
reason=None,
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message='The status of the port PORT must be DOWN')),
('validate:net-n:port-y:fixed_ip-n:floating_net-n:floating_ip-y', dict(
reason=None,
success=False,
inputs={'port': 'PORT', 'floating_ip': 'FLOATINGIP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[mock.Mock(id='FLOATINGIP_ID', status='INACTIVE')],
result={},
exception=exc.InvalidSpec,
message='Must specify a network to create floating IP')),
('validate:net-n:port-y:fixed_ip-n:floating_ip-active', dict(
reason=None,
success=False,
inputs={'port': 'PORT', 'floating_network': 'NET',
'floating_ip': 'FLOATINGIP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[mock.Mock(id='FLOATINGIP_ID', status='ACTIVE')],
result={},
exception=exc.InvalidSpec,
message='the floating IP FLOATINGIP has been used.')),
('validate:net-n:port-n:fixed_ip-n', dict(
reason=None,
success=False,
inputs={'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message="One of 'port' and 'network' must be provided")),
('validate:net-n:port-y:fixed_ip-y', dict(
reason=None,
success=False,
inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={},
exception=exc.InvalidSpec,
message=("The 'port' property and the 'fixed_ip' property cannot "
"be specified at the same time"))),
('create:net-y:port-y:fixed_ip-n', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'port': 'PORT_ID'},
exception=None,
message='')),
('create:net-y:port-n:fixed_ip-y', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'fixed_ip': 'FIXED_IP'},
exception=None,
message='')),
('create:net-y:port-n:fixed_ip-n:sgroups-y', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'security_groups': ['default']},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[mock.Mock(id='SG_ID')],
floating_result=[],
result={'network': 'NET_ID', 'security_groups': ['SG_ID']},
exception=None,
message='')),
('create:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'floating_network': 'NET_ID'},
exception=None,
message='')),
('create:net-f:port-y:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[exc.InternalError(message='NET Failure')],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message='Failed in creating server: NET Failure.')),
('create:net-n:port-f:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[exc.InternalError(message='PORT Failure')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message='Failed in creating server: PORT Failure.')),
('create:net-n:port-active:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message=('Failed in creating server: The status of the port PORT '
'must be DOWN.'))),
('create:net-n:port-n:fixed_ip-n', dict(
reason='create',
success=False,
inputs={'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message=("Failed in creating server: One of 'port' "
"and 'network' must be provided."))),
('create:net-n:port-y:fixed_ip-y', dict(
reason='create',
success=False,
inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceCreation,
message=("Failed in creating server: The 'port' property and the "
"'fixed_ip' property cannot be specified at the same "
"time."))),
('update:net-y:port-y:fixed_ip-n', dict(
reason='update',
success=True,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'port': 'PORT_ID'},
exception=None,
message='')),
('update:net-y:port-n:fixed_ip-y', dict(
reason='update',
success=True,
inputs={'network': 'NET', 'fixed_ip': 'FIXED_IP'},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID',
'fixed_ip': 'FIXED_IP'},
exception=None,
message='')),
('update:net-y:port-n:fixed_ip-n:sgroups-y', dict(
reason='create',
success=True,
inputs={'network': 'NET', 'security_groups': ['default']},
net_result=[mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[mock.Mock(id='SG_ID')],
floating_result=[],
result={'network': 'NET_ID', 'security_groups': ['SG_ID']},
exception=None,
message='')),
('update:net-y:port-n:fixed_ip-n:sgroups-n:floating_net-y', dict(
reason=None,
success=True,
inputs={'network': 'NET', 'floating_network': 'NET'},
net_result=[mock.Mock(id='NET_ID'), mock.Mock(id='NET_ID')],
port_result=[],
sg_result=[],
floating_result=[],
result={'network': 'NET_ID', 'floating_network': 'NET_ID'},
exception=None,
message='')),
('update:net-f:port-y:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'network': 'NET', 'port': 'PORT'},
net_result=[exc.InternalError(message='NET Failure')],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': NET Failure.")),
('update:net-n:port-f:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[exc.InternalError(message='PORT Failure')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message="Failed in updating server 'NOVA_ID': PORT Failure.")),
('update:net-n:port-active:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'port': 'PORT'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='ACTIVE')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': The status of the "
"port PORT must be DOWN."))),
('update:net-n:port-n:fixed_ip-n', dict(
reason='update',
success=False,
inputs={'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': One of 'port' "
"and 'network' must be provided."))),
('update:net-n:port-y:fixed_ip-y', dict(
reason='update',
success=False,
inputs={'port': 'PORT', 'fixed_ip': 'FIXED_IP'},
net_result=[],
port_result=[mock.Mock(id='PORT_ID', status='DOWN')],
sg_result=[],
floating_result=[],
result={},
exception=exc.EResourceUpdate,
message=("Failed in updating server 'NOVA_ID': The 'port' "
"property and the 'fixed_ip' property cannot be "
"specified at the same time."))),
]
def setUp(self):
super(TestNetworkValidation, self).setUp()
self.nc = mock.Mock()
self.profile = server.ServerProfile('t', spec)
self.profile._networkclient = self.nc
def test_validation(self):
self.nc.network_get.side_effect = self.net_result
self.nc.port_find.side_effect = self.port_result
self.nc.security_group_find.side_effect = self.sg_result
self.nc.floatingip_find.side_effect = self.floating_result
obj = mock.Mock(physical_id='NOVA_ID')
if self.success:
res = self.profile._validate_network(obj, self.inputs, self.reason)
self.assertEqual(self.result, res)
else:
ex = self.assertRaises(self.exception,
self.profile._validate_network,
obj, self.inputs, self.reason)
self.assertEqual(self.message, six.text_type(ex))
if self.net_result:
self.nc.network_get.assert_called_with('NET')
if self.port_result:
self.nc.port_find.assert_called_once_with('PORT')
if self.sg_result:
self.nc.security_group_find.assert_called_once_with('default')
if self.floating_result:
self.nc.floatingip_find.assert_called_once_with('FLOATINGIP')
class TestNovaServerValidate(base.SenlinTestCase):
def setUp(self):
super(TestNovaServerValidate, self).setUp()
self.context = utils.dummy_context()
def test_do_validate_all_passed(self):
profile = server.ServerProfile('t', spec)
mock_az = self.patchobject(profile, '_validate_az')
mock_flavor = self.patchobject(profile, '_validate_flavor')
mock_image = self.patchobject(profile, '_validate_image')
mock_keypair = self.patchobject(profile, '_validate_keypair')
mock_network = self.patchobject(profile, '_validate_network')
obj = mock.Mock()
res = profile.do_validate(obj)
properties = spec['properties']
self.assertTrue(res)
mock_az.assert_called_once_with(obj, properties['availability_zone'])
mock_flavor.assert_called_once_with(obj, properties['flavor'])
mock_image.assert_called_once_with(obj, properties['image'])
mock_keypair.assert_called_once_with(obj, properties['key_name'])
mock_network.assert_called_once_with(obj, properties['networks'][0])
| true
| true
|
790a09b72053e70e52a971a73a45006398139d92
| 217
|
py
|
Python
|
ex007.1.py
|
ErosMLima/python-server-connection
|
a15706a007a95eff64597fa02e64d95b6b2da6a5
|
[
"MIT"
] | 2
|
2020-07-27T06:33:59.000Z
|
2021-02-02T15:17:56.000Z
|
ex007.1.py
|
ErosMLima/python-server-connection
|
a15706a007a95eff64597fa02e64d95b6b2da6a5
|
[
"MIT"
] | null | null | null |
ex007.1.py
|
ErosMLima/python-server-connection
|
a15706a007a95eff64597fa02e64d95b6b2da6a5
|
[
"MIT"
] | null | null | null |
#Nota Média do aluno
n1 = float(input('Primeira nota do aluno: '))
n2 = float(input('Segundo nota do aluno: '))
média = (n1 + n2) / 2
print('A média entre {:.1f} e {:.1f} é igual a {:.1f}'.format(n1, n2, média))
| 36.166667
| 78
| 0.608295
|
n1 = float(input('Primeira nota do aluno: '))
n2 = float(input('Segundo nota do aluno: '))
média = (n1 + n2) / 2
print('A média entre {:.1f} e {:.1f} é igual a {:.1f}'.format(n1, n2, média))
| true
| true
|
790a0b583b5fe66bc0445bd3f72b01d0fd82bd56
| 1,877
|
py
|
Python
|
cte_forest/__init__.py
|
kordian-kowalski/django-cte-forest
|
9bd3a7fb76d256cfb4d8050381b3ddf4f6d1c4f5
|
[
"BSD-3-Clause"
] | 22
|
2016-12-16T14:34:33.000Z
|
2021-06-14T15:16:43.000Z
|
cte_forest/__init__.py
|
kordian-kowalski/django-cte-forest
|
9bd3a7fb76d256cfb4d8050381b3ddf4f6d1c4f5
|
[
"BSD-3-Clause"
] | 1
|
2020-08-13T15:06:24.000Z
|
2020-08-13T15:06:24.000Z
|
cte_forest/__init__.py
|
kordian-kowalski/django-cte-forest
|
9bd3a7fb76d256cfb4d8050381b3ddf4f6d1c4f5
|
[
"BSD-3-Clause"
] | 4
|
2016-12-22T09:54:00.000Z
|
2020-08-25T07:23:43.000Z
|
# -*- coding: utf-8 -*-
#
# This document is free and open-source software, subject to the OSI-approved
# BSD license below.
#
# Copyright (c) 2011 - 2013 Alexis Petrounias <www.petrounias.org>,
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the author nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Django CTE Trees - an experimental PostgreSQL Common Table Expressions (CTE)
implementation of of Adjacency-Linked trees.
"""
VERSION = (0, 2, 2)
__version__ = ".".join(map(str, VERSION))
| 46.925
| 80
| 0.773042
|
VERSION = (0, 2, 2)
__version__ = ".".join(map(str, VERSION))
| true
| true
|
790a0b64ee5a29731b953f684419f0b0bc2a2ae6
| 6,647
|
py
|
Python
|
DBP/models/instance.py
|
Pusnow/DB-Project
|
2a9a485bacb4e513c6fbc159faf9855c919c657c
|
[
"MIT"
] | null | null | null |
DBP/models/instance.py
|
Pusnow/DB-Project
|
2a9a485bacb4e513c6fbc159faf9855c919c657c
|
[
"MIT"
] | null | null | null |
DBP/models/instance.py
|
Pusnow/DB-Project
|
2a9a485bacb4e513c6fbc159faf9855c919c657c
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
from DBP.models import Base, session
from DBP.models.user import User
from sqlalchemy.orm import class_mapper
from sqlalchemy.inspection import inspect
from sqlalchemy.sql import func
from sqlalchemy.dialects.mysql import INTEGER,VARCHAR, DATETIME
from datetime import datetime
import csv
import io
from openpyxl import Workbook
from openpyxl import load_workbook
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
class OriginalData (object):
def __init__(self, length, name, mappinginfo):
self.length = length
self.name = name
cols = inspect(self.__class__).columns
if len(mappinginfo) != len(cols) -3:
raise TypeError
for col in mappinginfo:
setattr(self,str( u"sch_"+col["label"]["name"]),int(col["col"]))
def dict(self):
data = {
"id" : self.id,
"length" : self.length,
"name" : self.name,
"mapinfo" : self.mapList()
}
return data
def getInfo(self):
data = self.dict()
data["parsednum"] = len(self.parseds)
data["tasknum"] = sum(map(lambda x: len(x.tasks),self.parseds))
return data
def mapList(self):
maplist = list()
for col in filter(lambda x: x.name[:3] == u"sch", inspect(self.__class__).columns ):
maplist.append(getattr(self,col.name))
return maplist
def getSchema(self):
return filter(lambda x: x.name[:3] == u"sch", inspect(self.__class__).columns )
def loadcsv(self,submitter,csvread,nth,duration_start,duration_end):
reader = csv.reader(csvread, delimiter=',', quotechar="'")
csvwrite = io.BytesIO()
writer = csv.writer(csvwrite, delimiter=',', quotechar="'")
maplist = self.mapList()
counter = 0
dupset = set()
dupcounter = 0
nullcount = dict()
schema = self.getSchema()
for col in schema:
nullcount[col.name] = 0
for rrow in reader:
crow = list()
for mapnum, col in zip(maplist, schema):
crow.append(rrow[mapnum])
if rrow[mapnum] == "":
nullcount[col.name] +=1
dupset.add(unicode(crow))
writer.writerow(crow)
counter += 1
evaluator = User.randomEvaluator()
parsedmodel = self.parsedclass(nth,duration_start,duration_end,csvwrite,counter, counter - len(dupset))
parsedmodel.submitterid = submitter.id
parsedmodel.evaluatorid = evaluator.id
self.taskrow.addUser(evaluator)
for col in schema :
setattr(parsedmodel,"null_" + col.name[4:] , nullcount[col.name] / (counter*1.0) )
self.parseds.append(parsedmodel)
session.commit()
return parsedmodel
def loadxlsx(self,submitter,xlsxread,nth,duration_start,duration_end):
wb = load_workbook(xlsxread)
ws = wb.active
csvwrite = io.BytesIO()
writer = csv.writer(csvwrite, delimiter=',', quotechar="'")
maplist = self.mapList()
counter = 0
dupset = set()
dupcounter = 0
nullcount = dict()
schema = self.getSchema()
for col in schema:
nullcount[col.name] = 0
for rrow in ws.rows:
crow = list()
for mapnum, col in zip(maplist, schema):
if type(rrow[mapnum].value) == datetime:
crow.append(rrow[mapnum].value.strftime("%Y-%m-%d %H:%M"))
else :
crow.append(rrow[mapnum].value)
if rrow[mapnum].value == "":
nullcount[col.name] +=1
dupset.add(unicode(crow))
utfrow = list ()
for x in crow:
if type(x) == unicode :
utfrow.append(x.encode("utf8"))
else :
utfrow.append(x)
writer.writerow(utfrow)
counter += 1
evaluator = User.randomEvaluator()
parsedmodel = self.parsedclass(nth,duration_start,duration_end,csvwrite,counter, counter - len(dupset))
parsedmodel.submitterid = submitter.id
parsedmodel.evaluatorid = evaluator.id
self.taskrow.addUser(evaluator)
for col in schema :
setattr(parsedmodel,"null_" + col.name[4:] , nullcount[col.name] / (counter*1.0) )
self.parseds.append(parsedmodel)
session.commit()
return parsedmodel
def getInfoByUser(self,user):
data = self.dict()
data["nth"] = self.getNextnth (user)
return data
def getNextnth(self,user):
nth = session.query( func.max(self.parsedclass.nth)).filter(self.parsedclass.originalid == self.id).filter(self.parsedclass.submitterid == user.id).first()
if nth[0]:
return nth[0] +1
else :
return 1
class ParsedData (object):
def __init__(self,nth,duration_start,duration_end, csvfile, tuplenum,duplicatetuplenum):
self.nth = nth
self.duration_start = duration_start
self.duration_end = duration_end
self.file = csvfile.getvalue()
self.tuplenum = tuplenum
self.duplicatetuplenum = duplicatetuplenum
def parsecsv(self):
csvread = io.StringIO(self.file.decode("utf8"))
reader = csv.reader(utf_8_encoder(csvread), delimiter=',', quotechar="'")
parsedlist = list()
for row in reader:
tsmodel = self.taskclass(User.getUser(self.submitterid).name, self.id)
for (column, data) in zip(filter(lambda x: x.name[:3] == u"sch", inspect(self.taskclass).columns ), row):
if type(column.type) == INTEGER:
try :
setattr(tsmodel,column.name, int(data))
except :
setattr(tsmodel,column.name, None)
elif type(column.type) == DATETIME:
try :
setattr(tsmodel,column.name, datetime.strptime( data, "%Y-%m-%d %H:%M"))
except :
setattr(tsmodel,column.name, None)
else :
setattr(tsmodel,column.name, data)
parsedlist.append(tsmodel)
return parsedlist
def insertcsv(self):
if self.pnp != "Pass":
return False
session.bulk_save_objects(self.parsecsv())
session.commit()
return True
def dict(self):
return {
"id" : self.id,
"nth" : self.nth,
"tuplenum" : self.tuplenum,
"duplicatetuplenum" : self.duplicatetuplenum,
"duration_start" : self.duration_start.isoformat(),
"duration_end" : self.duration_end.isoformat(),
"status" : self.status,
"score" : self.score,
"pnp" : self.pnp,
"submitter" : User.getUser(self.submitterid).name,
"original" : self.original.name,
"evaluator": User.getUser(self.evaluatorid).name,
"nullratio" : self.nullInfo()
}
def evaluate(self, score,pnp):
self.status = "Evaluated"
self.score = 5 * score + 25 *( 1.0 - self.duplicatetuplenum/(self.tuplenum * 1.0) ) + 25 * (1.0 - sum(map(lambda x : x['ratio'] ,self.nullInfo()))/(len(self.nullInfo())*1.0))
self.pnp = pnp
session.commit()
def nullInfo(self):
nulllist = list()
for col in filter(lambda x: x.name[:4] == u"null", inspect(self.__class__).columns ):
nulllist.append(dict(ratio=getattr(self,col.name) ,name = col.name[5:] ))
return nulllist
class TaskData (object):
def __init__ (self,submittername, parsedid):
self.submittername = submittername
self.parsedid = parsedid
| 24.083333
| 176
| 0.68136
|
from DBP.models import Base, session
from DBP.models.user import User
from sqlalchemy.orm import class_mapper
from sqlalchemy.inspection import inspect
from sqlalchemy.sql import func
from sqlalchemy.dialects.mysql import INTEGER,VARCHAR, DATETIME
from datetime import datetime
import csv
import io
from openpyxl import Workbook
from openpyxl import load_workbook
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
class OriginalData (object):
def __init__(self, length, name, mappinginfo):
self.length = length
self.name = name
cols = inspect(self.__class__).columns
if len(mappinginfo) != len(cols) -3:
raise TypeError
for col in mappinginfo:
setattr(self,str( u"sch_"+col["label"]["name"]),int(col["col"]))
def dict(self):
data = {
"id" : self.id,
"length" : self.length,
"name" : self.name,
"mapinfo" : self.mapList()
}
return data
def getInfo(self):
data = self.dict()
data["parsednum"] = len(self.parseds)
data["tasknum"] = sum(map(lambda x: len(x.tasks),self.parseds))
return data
def mapList(self):
maplist = list()
for col in filter(lambda x: x.name[:3] == u"sch", inspect(self.__class__).columns ):
maplist.append(getattr(self,col.name))
return maplist
def getSchema(self):
return filter(lambda x: x.name[:3] == u"sch", inspect(self.__class__).columns )
def loadcsv(self,submitter,csvread,nth,duration_start,duration_end):
reader = csv.reader(csvread, delimiter=',', quotechar="'")
csvwrite = io.BytesIO()
writer = csv.writer(csvwrite, delimiter=',', quotechar="'")
maplist = self.mapList()
counter = 0
dupset = set()
dupcounter = 0
nullcount = dict()
schema = self.getSchema()
for col in schema:
nullcount[col.name] = 0
for rrow in reader:
crow = list()
for mapnum, col in zip(maplist, schema):
crow.append(rrow[mapnum])
if rrow[mapnum] == "":
nullcount[col.name] +=1
dupset.add(unicode(crow))
writer.writerow(crow)
counter += 1
evaluator = User.randomEvaluator()
parsedmodel = self.parsedclass(nth,duration_start,duration_end,csvwrite,counter, counter - len(dupset))
parsedmodel.submitterid = submitter.id
parsedmodel.evaluatorid = evaluator.id
self.taskrow.addUser(evaluator)
for col in schema :
setattr(parsedmodel,"null_" + col.name[4:] , nullcount[col.name] / (counter*1.0) )
self.parseds.append(parsedmodel)
session.commit()
return parsedmodel
def loadxlsx(self,submitter,xlsxread,nth,duration_start,duration_end):
wb = load_workbook(xlsxread)
ws = wb.active
csvwrite = io.BytesIO()
writer = csv.writer(csvwrite, delimiter=',', quotechar="'")
maplist = self.mapList()
counter = 0
dupset = set()
dupcounter = 0
nullcount = dict()
schema = self.getSchema()
for col in schema:
nullcount[col.name] = 0
for rrow in ws.rows:
crow = list()
for mapnum, col in zip(maplist, schema):
if type(rrow[mapnum].value) == datetime:
crow.append(rrow[mapnum].value.strftime("%Y-%m-%d %H:%M"))
else :
crow.append(rrow[mapnum].value)
if rrow[mapnum].value == "":
nullcount[col.name] +=1
dupset.add(unicode(crow))
utfrow = list ()
for x in crow:
if type(x) == unicode :
utfrow.append(x.encode("utf8"))
else :
utfrow.append(x)
writer.writerow(utfrow)
counter += 1
evaluator = User.randomEvaluator()
parsedmodel = self.parsedclass(nth,duration_start,duration_end,csvwrite,counter, counter - len(dupset))
parsedmodel.submitterid = submitter.id
parsedmodel.evaluatorid = evaluator.id
self.taskrow.addUser(evaluator)
for col in schema :
setattr(parsedmodel,"null_" + col.name[4:] , nullcount[col.name] / (counter*1.0) )
self.parseds.append(parsedmodel)
session.commit()
return parsedmodel
def getInfoByUser(self,user):
data = self.dict()
data["nth"] = self.getNextnth (user)
return data
def getNextnth(self,user):
nth = session.query( func.max(self.parsedclass.nth)).filter(self.parsedclass.originalid == self.id).filter(self.parsedclass.submitterid == user.id).first()
if nth[0]:
return nth[0] +1
else :
return 1
class ParsedData (object):
def __init__(self,nth,duration_start,duration_end, csvfile, tuplenum,duplicatetuplenum):
self.nth = nth
self.duration_start = duration_start
self.duration_end = duration_end
self.file = csvfile.getvalue()
self.tuplenum = tuplenum
self.duplicatetuplenum = duplicatetuplenum
def parsecsv(self):
csvread = io.StringIO(self.file.decode("utf8"))
reader = csv.reader(utf_8_encoder(csvread), delimiter=',', quotechar="'")
parsedlist = list()
for row in reader:
tsmodel = self.taskclass(User.getUser(self.submitterid).name, self.id)
for (column, data) in zip(filter(lambda x: x.name[:3] == u"sch", inspect(self.taskclass).columns ), row):
if type(column.type) == INTEGER:
try :
setattr(tsmodel,column.name, int(data))
except :
setattr(tsmodel,column.name, None)
elif type(column.type) == DATETIME:
try :
setattr(tsmodel,column.name, datetime.strptime( data, "%Y-%m-%d %H:%M"))
except :
setattr(tsmodel,column.name, None)
else :
setattr(tsmodel,column.name, data)
parsedlist.append(tsmodel)
return parsedlist
def insertcsv(self):
if self.pnp != "Pass":
return False
session.bulk_save_objects(self.parsecsv())
session.commit()
return True
def dict(self):
return {
"id" : self.id,
"nth" : self.nth,
"tuplenum" : self.tuplenum,
"duplicatetuplenum" : self.duplicatetuplenum,
"duration_start" : self.duration_start.isoformat(),
"duration_end" : self.duration_end.isoformat(),
"status" : self.status,
"score" : self.score,
"pnp" : self.pnp,
"submitter" : User.getUser(self.submitterid).name,
"original" : self.original.name,
"evaluator": User.getUser(self.evaluatorid).name,
"nullratio" : self.nullInfo()
}
def evaluate(self, score,pnp):
self.status = "Evaluated"
self.score = 5 * score + 25 *( 1.0 - self.duplicatetuplenum/(self.tuplenum * 1.0) ) + 25 * (1.0 - sum(map(lambda x : x['ratio'] ,self.nullInfo()))/(len(self.nullInfo())*1.0))
self.pnp = pnp
session.commit()
def nullInfo(self):
nulllist = list()
for col in filter(lambda x: x.name[:4] == u"null", inspect(self.__class__).columns ):
nulllist.append(dict(ratio=getattr(self,col.name) ,name = col.name[5:] ))
return nulllist
class TaskData (object):
def __init__ (self,submittername, parsedid):
self.submittername = submittername
self.parsedid = parsedid
| true
| true
|
790a0ba86f5faf7a11b5b2fd062683b3364e94a1
| 1,175
|
py
|
Python
|
examples/example1.py
|
rob-blackbourn/bareasgi-jinja2
|
f2478b7287c4f16b4f1ed3b4e0aa2daa34cfd634
|
[
"Apache-2.0"
] | null | null | null |
examples/example1.py
|
rob-blackbourn/bareasgi-jinja2
|
f2478b7287c4f16b4f1ed3b4e0aa2daa34cfd634
|
[
"Apache-2.0"
] | null | null | null |
examples/example1.py
|
rob-blackbourn/bareasgi-jinja2
|
f2478b7287c4f16b4f1ed3b4e0aa2daa34cfd634
|
[
"Apache-2.0"
] | null | null | null |
"""An example of jinja2 templating"""
from bareasgi import Application, HttpRequest, HttpResponse
import jinja2
import pkg_resources
import uvicorn
from bareasgi_jinja2 import Jinja2TemplateProvider, add_jinja2
async def http_request_handler(request: HttpRequest) -> HttpResponse:
"""Handle the request"""
return await Jinja2TemplateProvider.apply(
request,
'example1.html',
{'name': 'rob'}
)
async def handle_no_template(request: HttpRequest) -> HttpResponse:
"""This is what happens if there is no template"""
return await Jinja2TemplateProvider.apply(
request,
'notemplate.html',
{'name': 'rob'}
)
if __name__ == '__main__':
TEMPLATES = pkg_resources.resource_filename(__name__, "templates")
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(TEMPLATES),
autoescape=jinja2.select_autoescape(['html', 'xml']),
enable_async=True
)
app = Application()
add_jinja2(app, env)
app.http_router.add({'GET'}, '/example1', http_request_handler)
app.http_router.add({'GET'}, '/notemplate', handle_no_template)
uvicorn.run(app, port=9010)
| 26.111111
| 70
| 0.689362
|
from bareasgi import Application, HttpRequest, HttpResponse
import jinja2
import pkg_resources
import uvicorn
from bareasgi_jinja2 import Jinja2TemplateProvider, add_jinja2
async def http_request_handler(request: HttpRequest) -> HttpResponse:
return await Jinja2TemplateProvider.apply(
request,
'example1.html',
{'name': 'rob'}
)
async def handle_no_template(request: HttpRequest) -> HttpResponse:
return await Jinja2TemplateProvider.apply(
request,
'notemplate.html',
{'name': 'rob'}
)
if __name__ == '__main__':
TEMPLATES = pkg_resources.resource_filename(__name__, "templates")
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(TEMPLATES),
autoescape=jinja2.select_autoescape(['html', 'xml']),
enable_async=True
)
app = Application()
add_jinja2(app, env)
app.http_router.add({'GET'}, '/example1', http_request_handler)
app.http_router.add({'GET'}, '/notemplate', handle_no_template)
uvicorn.run(app, port=9010)
| true
| true
|
790a0e96a9ca47a34174432cb6ccc7fcf67991ce
| 1,795
|
py
|
Python
|
st_package_reviewer/check/file/check_redundant_files.py
|
Thom1729/st_package_reviewer
|
71e4eaad60fe3391b0a4d39405b784ec84ea58bc
|
[
"MIT"
] | 8
|
2017-06-07T07:52:32.000Z
|
2021-04-26T23:46:36.000Z
|
st_package_reviewer/check/file/check_redundant_files.py
|
Thom1729/st_package_reviewer
|
71e4eaad60fe3391b0a4d39405b784ec84ea58bc
|
[
"MIT"
] | 26
|
2017-05-29T21:11:10.000Z
|
2021-05-16T20:58:23.000Z
|
st_package_reviewer/check/file/check_redundant_files.py
|
Thom1729/st_package_reviewer
|
71e4eaad60fe3391b0a4d39405b784ec84ea58bc
|
[
"MIT"
] | 8
|
2017-05-31T21:16:49.000Z
|
2021-03-20T16:43:26.000Z
|
import logging
from . import FileChecker
l = logging.getLogger(__name__)
class CheckPackageMetadata(FileChecker):
def check(self):
if self.sub_path("package-metadata.json").is_file():
self.fail("'package-metadata.json' is supposed to be automatically generated "
"by Package Control during installation")
class CheckPycFiles(FileChecker):
def check(self):
pyc_files = self.glob("**/*.pyc")
if not pyc_files:
return
for path in pyc_files:
if path.with_suffix(".py").is_file():
with self.file_context(path):
self.fail("'.pyc' file is redundant because its corresponding .py file exists")
class CheckCacheFiles(FileChecker):
def check(self):
cache_files = self.glob("**/*.cache")
if not cache_files:
return
for path in cache_files:
with self.file_context(path):
self.fail("'.cache' file is redundant and created by ST automatically")
class CheckSublimePackageFiles(FileChecker):
def check(self):
cache_files = self.glob("**/*.sublime-package")
if not cache_files:
return
for path in cache_files:
with self.file_context(path):
self.fail("'.sublime-package' files have no business being inside a package")
class CheckSublimeWorkspaceFiles(FileChecker):
def check(self):
cache_files = self.glob("**/*.sublime-workspace")
if not cache_files:
return
for path in cache_files:
with self.file_context(path):
self.fail("'.sublime-workspace' files contain session data and should never be "
"submitted to version control")
| 28.046875
| 99
| 0.610585
|
import logging
from . import FileChecker
l = logging.getLogger(__name__)
class CheckPackageMetadata(FileChecker):
def check(self):
if self.sub_path("package-metadata.json").is_file():
self.fail("'package-metadata.json' is supposed to be automatically generated "
"by Package Control during installation")
class CheckPycFiles(FileChecker):
def check(self):
pyc_files = self.glob("**/*.pyc")
if not pyc_files:
return
for path in pyc_files:
if path.with_suffix(".py").is_file():
with self.file_context(path):
self.fail("'.pyc' file is redundant because its corresponding .py file exists")
class CheckCacheFiles(FileChecker):
def check(self):
cache_files = self.glob("**/*.cache")
if not cache_files:
return
for path in cache_files:
with self.file_context(path):
self.fail("'.cache' file is redundant and created by ST automatically")
class CheckSublimePackageFiles(FileChecker):
def check(self):
cache_files = self.glob("**/*.sublime-package")
if not cache_files:
return
for path in cache_files:
with self.file_context(path):
self.fail("'.sublime-package' files have no business being inside a package")
class CheckSublimeWorkspaceFiles(FileChecker):
def check(self):
cache_files = self.glob("**/*.sublime-workspace")
if not cache_files:
return
for path in cache_files:
with self.file_context(path):
self.fail("'.sublime-workspace' files contain session data and should never be "
"submitted to version control")
| true
| true
|
790a0ef41e807ca17f216256fafd05fc723b18aa
| 2,222
|
py
|
Python
|
_pycharm_skeletons/renderdoc/BlendStats.py
|
Lex-DRL/renderdoc-py-stubs
|
75d280e4f500ded506f3315a49fc432b37ab4fa6
|
[
"MIT"
] | null | null | null |
_pycharm_skeletons/renderdoc/BlendStats.py
|
Lex-DRL/renderdoc-py-stubs
|
75d280e4f500ded506f3315a49fc432b37ab4fa6
|
[
"MIT"
] | null | null | null |
_pycharm_skeletons/renderdoc/BlendStats.py
|
Lex-DRL/renderdoc-py-stubs
|
75d280e4f500ded506f3315a49fc432b37ab4fa6
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module renderdoc
# from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd
# by generator 1.146
# no doc
# imports
import enum as __enum
from .SwigPyObject import SwigPyObject
class BlendStats(SwigPyObject):
""" Contains the statistics for blend state binds in a frame. """
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
calls = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""How many function calls were made."""
nulls = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""How many objects were unbound."""
redundants = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""How many calls made no change due to the existing bind being identical."""
sets = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""How many objects were bound."""
this = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
thisown = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is ''
| 31.295775
| 100
| 0.626463
|
import enum as __enum
from .SwigPyObject import SwigPyObject
class BlendStats(SwigPyObject):
def __eq__(self, *args, **kwargs):
pass
def __ge__(self, *args, **kwargs):
pass
def __gt__(self, *args, **kwargs):
pass
def __hash__(self, *args, **kwargs):
pass
def __init__(self, *args, **kwargs):
pass
def __le__(self, *args, **kwargs):
pass
def __lt__(self, *args, **kwargs):
pass
@staticmethod
def __new__(*args, **kwargs):
pass
def __ne__(self, *args, **kwargs):
pass
calls = property(lambda self: object(), lambda self, v: None, lambda self: None)
nulls = property(lambda self: object(), lambda self, v: None, lambda self: None)
redundants = property(lambda self: object(), lambda self, v: None, lambda self: None)
sets = property(lambda self: object(), lambda self, v: None, lambda self: None)
this = property(lambda self: object(), lambda self, v: None, lambda self: None)
thisown = property(lambda self: object(), lambda self, v: None, lambda self: None)
__dict__ = None
| true
| true
|
790a0fb418cc8a4ea09dd8a3d1a2ef22aeede881
| 3,800
|
py
|
Python
|
chapters/chapter2.py
|
amastis/Dead-End
|
ef25c1a50d1710ea678f628f42951bbf2c2c4a5f
|
[
"MIT"
] | 10
|
2021-08-14T06:39:43.000Z
|
2021-12-12T11:53:10.000Z
|
chapters/chapter2.py
|
amastis/Dead-End
|
ef25c1a50d1710ea678f628f42951bbf2c2c4a5f
|
[
"MIT"
] | 2
|
2021-12-30T05:46:21.000Z
|
2022-01-04T00:17:19.000Z
|
chapters/chapter2.py
|
amastis/Dead-End
|
ef25c1a50d1710ea678f628f42951bbf2c2c4a5f
|
[
"MIT"
] | 10
|
2021-05-14T00:59:32.000Z
|
2021-11-10T01:56:33.000Z
|
""" This file holds all the chapter 2 areas of the game. """
from time import sleep
# from classes import Player, Difficulty
from chapters.chapter import Chapter
from chapters.chapter3 import Chapter3
from other.sounds_effects import GameSounds
from game import player1, sounds, Difficulty
from choices import _player_choice, error_message
from other.colors import print_green, print_yellow, print_red, print_sleep, print_blue
class Chapter2(Chapter):
"""Contains all the main chapter 2 areas of the game."""
chapter_num = 2
def checkpoints(self):
"""runs movement to levels -- checkpoint when leaving area"""
return {'0': self.game,
'1': self.good_ending_and_continue,
'bad': self.bad_ending,
'3': self.woods_area,
}
def good_ending_and_continue(self):
"""Simply plays the good ending scene and then drops the player into chapter 2."""
self.good_ending()
Chapter3().game()
def game(self):
"""start of ch2"""
self.start()
print_sleep(
'Upon driving the car through the broken roads area, the sun is certainly dwindling and time in the car'
'says 2:35 AM.\nYou continue to grow yourself tired and restless from everything that had led to this '
'point\n', 2.5)
choices = [str(x) for x in range(1, 3)]
choice_options = [
'Due to the car getting low on gas, you must make a tough decision. (1) Drive back to the local gas '
'station in town (2) Turn off the car and set up a camp fire in the woods: ']
choice = _player_choice(choices, choice_options)
if choice == '1':
sounds.zombie_attack_inside()
print_sleep(
'While attempting to put the car in reverse and head backwards to the local gas station in town, '
'a swarm of zombies arise on the car while the car gets stuck into gear!\n', 2.5)
if not player1.user_attack():
return
player1.total_kills += 5
print_green('You have successfully killed off the heaping swarm of zombies surrounding the car!\n', 1)
self.continue_message()
elif choice == '2':
print_sleep(
'You have parked the car near the closet woods area and now need to gather up some supplies for a camp '
'fire.\n', 2)
self.woods_area()
def woods_area(self):
"""Checkpoint save 3"""
player1.checkpoint_save('3')
print_sleep(
'You have successfully gathered up some sticks and still need a source of flame to begin the campfire.\n',
2)
choices = [str(x) for x in range(1, 3)]
choice_options = [
'You can either test your luck in creating a fire by (1) Creating friction: Use sticks and rub against '
'nearby wood chips (2) Search for other useful resources: ']
choice = _player_choice(choices, choice_options)
if choice == '1':
sounds.flame_ignite()
print_sleep('Whoosh! after a few minutes of trying to create friction, the birth of a small ash turns into '
'a flame!\n', 2.5)
self.continue_message()
elif choice == '2':
sounds.zombie_attack_outside()
print_red(
'Whilst looking around for more resources, you begin hearing a group of 3 zombies running towards '
'you!\n', 2)
if not player1.user_attack():
return
player1.total_kills += 3
print_green('You have successfully killed off the group of 3 zombies running towards you!\n', 1)
self.continue_message()
| 44.186047
| 120
| 0.613421
|
from time import sleep
from chapters.chapter import Chapter
from chapters.chapter3 import Chapter3
from other.sounds_effects import GameSounds
from game import player1, sounds, Difficulty
from choices import _player_choice, error_message
from other.colors import print_green, print_yellow, print_red, print_sleep, print_blue
class Chapter2(Chapter):
chapter_num = 2
def checkpoints(self):
return {'0': self.game,
'1': self.good_ending_and_continue,
'bad': self.bad_ending,
'3': self.woods_area,
}
def good_ending_and_continue(self):
self.good_ending()
Chapter3().game()
def game(self):
self.start()
print_sleep(
'Upon driving the car through the broken roads area, the sun is certainly dwindling and time in the car'
'says 2:35 AM.\nYou continue to grow yourself tired and restless from everything that had led to this '
'point\n', 2.5)
choices = [str(x) for x in range(1, 3)]
choice_options = [
'Due to the car getting low on gas, you must make a tough decision. (1) Drive back to the local gas '
'station in town (2) Turn off the car and set up a camp fire in the woods: ']
choice = _player_choice(choices, choice_options)
if choice == '1':
sounds.zombie_attack_inside()
print_sleep(
'While attempting to put the car in reverse and head backwards to the local gas station in town, '
'a swarm of zombies arise on the car while the car gets stuck into gear!\n', 2.5)
if not player1.user_attack():
return
player1.total_kills += 5
print_green('You have successfully killed off the heaping swarm of zombies surrounding the car!\n', 1)
self.continue_message()
elif choice == '2':
print_sleep(
'You have parked the car near the closet woods area and now need to gather up some supplies for a camp '
'fire.\n', 2)
self.woods_area()
def woods_area(self):
player1.checkpoint_save('3')
print_sleep(
'You have successfully gathered up some sticks and still need a source of flame to begin the campfire.\n',
2)
choices = [str(x) for x in range(1, 3)]
choice_options = [
'You can either test your luck in creating a fire by (1) Creating friction: Use sticks and rub against '
'nearby wood chips (2) Search for other useful resources: ']
choice = _player_choice(choices, choice_options)
if choice == '1':
sounds.flame_ignite()
print_sleep('Whoosh! after a few minutes of trying to create friction, the birth of a small ash turns into '
'a flame!\n', 2.5)
self.continue_message()
elif choice == '2':
sounds.zombie_attack_outside()
print_red(
'Whilst looking around for more resources, you begin hearing a group of 3 zombies running towards '
'you!\n', 2)
if not player1.user_attack():
return
player1.total_kills += 3
print_green('You have successfully killed off the group of 3 zombies running towards you!\n', 1)
self.continue_message()
| true
| true
|
790a103f0a4a4ceeaae6372fb5b9b4c773807c63
| 1,875
|
py
|
Python
|
tests/test_MonkeyTest.py
|
Groops78/asn1editor
|
1c386b3a34bcfccd97b80aa29c0ce4cfcb81853f
|
[
"MIT"
] | 1
|
2021-01-21T14:32:43.000Z
|
2021-01-21T14:32:43.000Z
|
tests/test_MonkeyTest.py
|
Groops78/asn1editor
|
1c386b3a34bcfccd97b80aa29c0ce4cfcb81853f
|
[
"MIT"
] | null | null | null |
tests/test_MonkeyTest.py
|
Groops78/asn1editor
|
1c386b3a34bcfccd97b80aa29c0ce4cfcb81853f
|
[
"MIT"
] | null | null | null |
import os
import random
import threading
from time import sleep
from unittest import TestCase
import asn1tools
import wx
import asn1editor
from asn1editor.wxPython.ViewSelect import ViewType
from tests import testHelper
def actions(main_window: asn1editor.wxPython.MainWindow):
def get_children(window: wx.Window):
my_children = window.GetChildren()
if my_children is not None:
their_children = []
for my_child in my_children:
their_children += get_children(my_child)
return list(my_children) + their_children
else:
return []
sleep(1)
key_codes = [wx.WXK_TAB, wx.WXK_DOWN, wx.WXK_UP, wx.WXK_LEFT, wx.WXK_RIGHT, wx.WXK_SPACE] + [c for c in range(ord('1'), ord('9'))]
ui_sim = wx.UIActionSimulator()
for _ in range(1000):
main_window.SetFocus()
key_code = random.choice(key_codes)
ui_sim.KeyDown(key_code)
ui_sim.KeyUp(key_code)
try:
main_window.save_data_to_file('test.json')
except asn1tools.ConstraintsError:
pass
main_window.Close(True)
wx.GetApp().ExitMainLoop()
class MonkeyTest(TestCase):
@staticmethod
def test_monkey():
if os.getenv('TRAVIS') is not None or os.getenv('GITHUB_ACTIONS') is not None:
return
# noinspection PyUnusedLocal
app = testHelper.get_wx_app()
main_window = asn1editor.wxPython.MainWindow()
main_window.select_view(ViewType.GROUPS)
test_types = [('example/example.asn', 'EXAMPLE.Sequence')]
for spec, type_ in test_types:
main_window.load_spec(spec, type_)
action_thread = threading.Thread(target=actions, args=[main_window])
action_thread.start()
main_window.Show()
app.MainLoop()
action_thread.join(timeout=0.0)
| 28.846154
| 134
| 0.654933
|
import os
import random
import threading
from time import sleep
from unittest import TestCase
import asn1tools
import wx
import asn1editor
from asn1editor.wxPython.ViewSelect import ViewType
from tests import testHelper
def actions(main_window: asn1editor.wxPython.MainWindow):
def get_children(window: wx.Window):
my_children = window.GetChildren()
if my_children is not None:
their_children = []
for my_child in my_children:
their_children += get_children(my_child)
return list(my_children) + their_children
else:
return []
sleep(1)
key_codes = [wx.WXK_TAB, wx.WXK_DOWN, wx.WXK_UP, wx.WXK_LEFT, wx.WXK_RIGHT, wx.WXK_SPACE] + [c for c in range(ord('1'), ord('9'))]
ui_sim = wx.UIActionSimulator()
for _ in range(1000):
main_window.SetFocus()
key_code = random.choice(key_codes)
ui_sim.KeyDown(key_code)
ui_sim.KeyUp(key_code)
try:
main_window.save_data_to_file('test.json')
except asn1tools.ConstraintsError:
pass
main_window.Close(True)
wx.GetApp().ExitMainLoop()
class MonkeyTest(TestCase):
@staticmethod
def test_monkey():
if os.getenv('TRAVIS') is not None or os.getenv('GITHUB_ACTIONS') is not None:
return
app = testHelper.get_wx_app()
main_window = asn1editor.wxPython.MainWindow()
main_window.select_view(ViewType.GROUPS)
test_types = [('example/example.asn', 'EXAMPLE.Sequence')]
for spec, type_ in test_types:
main_window.load_spec(spec, type_)
action_thread = threading.Thread(target=actions, args=[main_window])
action_thread.start()
main_window.Show()
app.MainLoop()
action_thread.join(timeout=0.0)
| true
| true
|
790a1063bb88b75c4647565565ce2175ff64010d
| 1,669
|
py
|
Python
|
壁球/壁球游戏2.0/main.py
|
LZY2006/pygame-small-games
|
41c97a67f3781d4691b822b4fbdafa310f785938
|
[
"MIT"
] | null | null | null |
壁球/壁球游戏2.0/main.py
|
LZY2006/pygame-small-games
|
41c97a67f3781d4691b822b4fbdafa310f785938
|
[
"MIT"
] | null | null | null |
壁球/壁球游戏2.0/main.py
|
LZY2006/pygame-small-games
|
41c97a67f3781d4691b822b4fbdafa310f785938
|
[
"MIT"
] | null | null | null |
# Unit PYG02: Pygame Wall Ball Game version 3 操控型
import pygame,sys
pygame.init()
vINFO=pygame.display.Info()
print(vINFO)
size = width, height = vINFO.current_w,vINFO.current_h
speed = [1,1]
BLACK = 0, 0, 0
screen = pygame.display.set_mode(size,pygame.FULLSCREEN)
icon=pygame.image.load("1.png")
pygame.display.set_icon(icon)
pygame.display.set_caption("Pygame壁球")
ball = pygame.image.load("PYG02-ball.gif")
ballrect = ball.get_rect()
fps = 300
fclock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
speed[0] = speed[0] if speed[0] == 0 else (abs(speed[0]) - 1)*int(speed[0]/abs(speed[0]))
elif event.key == pygame.K_RIGHT:
speed[0] = speed[0] + 1 if speed[0] > 0 else speed[0] - 1
elif event.key == pygame.K_UP:
speed[1] = speed[1] + 1 if speed[1] > 0 else speed[1] - 1
elif event.key == pygame.K_DOWN:
speed[1] = speed[1] if speed[1] == 0 else (abs(speed[1]) - 1)*int(speed[1]/abs(speed[1]))
elif event.key==pygame.K_e:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
print(repr(event))
ballrect = ballrect.move(speed)
if ballrect.left < 0 or ballrect.right > width:
speed[0] = - speed[0]
if ballrect.top < 0 or ballrect.bottom > height:
speed[1] = - speed[1]
screen.fill(BLACK)
screen.blit(ball, ballrect)
pygame.display.update()
fclock.tick(fps)
| 32.096154
| 105
| 0.59257
|
import pygame,sys
pygame.init()
vINFO=pygame.display.Info()
print(vINFO)
size = width, height = vINFO.current_w,vINFO.current_h
speed = [1,1]
BLACK = 0, 0, 0
screen = pygame.display.set_mode(size,pygame.FULLSCREEN)
icon=pygame.image.load("1.png")
pygame.display.set_icon(icon)
pygame.display.set_caption("Pygame壁球")
ball = pygame.image.load("PYG02-ball.gif")
ballrect = ball.get_rect()
fps = 300
fclock = pygame.time.Clock()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
speed[0] = speed[0] if speed[0] == 0 else (abs(speed[0]) - 1)*int(speed[0]/abs(speed[0]))
elif event.key == pygame.K_RIGHT:
speed[0] = speed[0] + 1 if speed[0] > 0 else speed[0] - 1
elif event.key == pygame.K_UP:
speed[1] = speed[1] + 1 if speed[1] > 0 else speed[1] - 1
elif event.key == pygame.K_DOWN:
speed[1] = speed[1] if speed[1] == 0 else (abs(speed[1]) - 1)*int(speed[1]/abs(speed[1]))
elif event.key==pygame.K_e:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
print(repr(event))
ballrect = ballrect.move(speed)
if ballrect.left < 0 or ballrect.right > width:
speed[0] = - speed[0]
if ballrect.top < 0 or ballrect.bottom > height:
speed[1] = - speed[1]
screen.fill(BLACK)
screen.blit(ball, ballrect)
pygame.display.update()
fclock.tick(fps)
| true
| true
|
790a1071003702b23b70375e479230ceb5617489
| 12,846
|
py
|
Python
|
utils/heatmap-coverage.py
|
ved432/test
|
779914d1bc876414d1149161ec0d838d7bb16601
|
[
"Apache-2.0"
] | null | null | null |
utils/heatmap-coverage.py
|
ved432/test
|
779914d1bc876414d1149161ec0d838d7bb16601
|
[
"Apache-2.0"
] | null | null | null |
utils/heatmap-coverage.py
|
ved432/test
|
779914d1bc876414d1149161ec0d838d7bb16601
|
[
"Apache-2.0"
] | null | null | null |
from pandas import DataFrame
import matplotlib.pyplot as plt
import seaborn as sns
import csv,sys
ExperimentName=sys.argv[1]
with open(ExperimentName+'.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)
if ExperimentName == "pod-delete":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Stress: 3600/1s','Memory/CPU footprint for the stress run','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','With Force','Without Force','Different base image(alpine/nginx/centos)']
Cols = ['Is the test added?']
plt.title("Pod Delete Experiment", fontsize =20)
elif ExperimentName == "container-kill":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Large Duration and Interval']
Cols = ['Is the test added?']
plt.title("Container Kill Experiment", fontsize =20)
elif ExperimentName == "disk-fill":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Application image(nginx/centos/alpine)']
Cols = ['Is the test added?']
plt.title("Disk Fill Experiment", fontsize =20)
elif ExperimentName == "pod-cpu-hog":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)']
Cols = ['Is the test added?']
plt.title("Pod CPU Hog Experiment", fontsize =20)
elif ExperimentName == "pod-memory-hog":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)']
Cols = ['Is the test added?']
plt.title("Pod Memory Hog Experiment", fontsize =20)
elif ExperimentName == "pod-network-corruption":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host','With Target Container']
Cols = ['Is the test added?']
plt.title("Pod Network Corruption Experiment", fontsize =20)
elif ExperimentName == "pod-network-duplication":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host','With Target Container']
Cols = ['Is the test added?']
plt.title("Pod Network Duplication Experiment", fontsize =20)
elif ExperimentName == "pod-network-latency":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host']
Cols = ['Is the test added?']
plt.title("Pod Network Latency Experiment", fontsize =20)
elif ExperimentName == "pod-network-loss":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host']
Cols = ['Is the test added?']
plt.title("Pod Network Loss Experiment", fontsize =20)
elif ExperimentName == "pod-autoscaler":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With less replicas(say 5)','with more replicas(say 20)']
Cols = ['Is the test added?']
plt.title("Pod Autoscaler Experiment", fontsize =20)
elif ExperimentName == "kubelet-service-kill":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Target Node Specified','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different lib image(ubuntu/centos)','Without appinfo']
Cols = ['Is the test added?']
plt.title("Kubelet Service Kill", fontsize =20)
elif ExperimentName == "node-cpu-hog":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','NodeAffectedPercentage is 0','NodeAffectedPercentage is 100','NodeAffectedPercentage 0-100','Target Node Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, node_affected_perc=100','Sequence=parallel, node_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo']
Cols = ['Is the test added?']
plt.title("Node CPU Hog", fontsize =20)
elif ExperimentName == "node-memory-hog":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','NodeAffectedPercentage is 0','NodeAffectedPercentage is 100','NodeAffectedPercentage 0-100','Target Node Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, node_affected_perc=100','Sequence=parallel, node_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo']
Cols = ['Is the test added?']
plt.title("Node Memory Hog", fontsize =20)
elif ExperimentName == "node-drain":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Target node specified','Without appinfo']
Cols = ['Is the test added?']
plt.title("Node Drain Experiment", fontsize =20)
elif ExperimentName == "node-taint":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Target node specified','Without appinfo']
Cols = ['Is the test added?']
plt.title("Node Taint Experiment", fontsize =20)
elif ExperimentName == "node-io-stress":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','NodeAffectedPercentage is 0','NodeAffectedPercentage is 100','NodeAffectedPercentage 0-100','Target Node Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, node_affected_perc=100','Sequence=parallel, node_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo','W/ filesystem utilisation bytes specified','w/ filesystem utilisation percentage specified']
Cols = ['Is the test added?']
plt.title("Node IO Stress", fontsize =20)
elif ExperimentName == "pod-io-stress":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodAffectedPercentage is 0','PodAffectedPercentage is 100','PodAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo','W/ filesystem utilisation bytes specified','w/ filesystem utilisation percentage specified','w/ Volume mouth path specified']
Cols = ['Is the test added?']
plt.title("Pod IO Stress", fontsize =20)
else:
print("Experiment %s not supported",ExperimentName)
df = DataFrame(data, index=Index, columns=Cols)
df = df[df.columns].astype(float)
print(df)
svm = sns.heatmap(df, cmap="Reds")
figure = svm.get_figure()
plt.subplots_adjust(left=0.218,bottom=0.095,right=0.9,top=0.88,wspace=0.2,hspace=0.2)
figure.set_figheight(10)
figure.set_figwidth(15)
plt.savefig(ExperimentName+'-heatmap.png', dpi=250)
| 136.659574
| 675
| 0.763973
|
from pandas import DataFrame
import matplotlib.pyplot as plt
import seaborn as sns
import csv,sys
ExperimentName=sys.argv[1]
with open(ExperimentName+'.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)
if ExperimentName == "pod-delete":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Stress: 3600/1s','Memory/CPU footprint for the stress run','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','With Force','Without Force','Different base image(alpine/nginx/centos)']
Cols = ['Is the test added?']
plt.title("Pod Delete Experiment", fontsize =20)
elif ExperimentName == "container-kill":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Large Duration and Interval']
Cols = ['Is the test added?']
plt.title("Container Kill Experiment", fontsize =20)
elif ExperimentName == "disk-fill":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Application image(nginx/centos/alpine)']
Cols = ['Is the test added?']
plt.title("Disk Fill Experiment", fontsize =20)
elif ExperimentName == "pod-cpu-hog":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)']
Cols = ['Is the test added?']
plt.title("Pod CPU Hog Experiment", fontsize =20)
elif ExperimentName == "pod-memory-hog":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)']
Cols = ['Is the test added?']
plt.title("Pod Memory Hog Experiment", fontsize =20)
elif ExperimentName == "pod-network-corruption":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host','With Target Container']
Cols = ['Is the test added?']
plt.title("Pod Network Corruption Experiment", fontsize =20)
elif ExperimentName == "pod-network-duplication":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host','With Target Container']
Cols = ['Is the test added?']
plt.title("Pod Network Duplication Experiment", fontsize =20)
elif ExperimentName == "pod-network-latency":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host']
Cols = ['Is the test added?']
plt.title("Pod Network Latency Experiment", fontsize =20)
elif ExperimentName == "pod-network-loss":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodsAffectedPercentage is 0','PodsAffectedPercentage is 100','PodsAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With Target IP','With Target Host']
Cols = ['Is the test added?']
plt.title("Pod Network Loss Experiment", fontsize =20)
elif ExperimentName == "pod-autoscaler":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','With less replicas(say 5)','with more replicas(say 20)']
Cols = ['Is the test added?']
plt.title("Pod Autoscaler Experiment", fontsize =20)
elif ExperimentName == "kubelet-service-kill":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Target Node Specified','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Time the execution period','Apps w/ liveness/readiness probes','ARM Cluster','Different lib image(ubuntu/centos)','Without appinfo']
Cols = ['Is the test added?']
plt.title("Kubelet Service Kill", fontsize =20)
elif ExperimentName == "node-cpu-hog":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','NodeAffectedPercentage is 0','NodeAffectedPercentage is 100','NodeAffectedPercentage 0-100','Target Node Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, node_affected_perc=100','Sequence=parallel, node_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo']
Cols = ['Is the test added?']
plt.title("Node CPU Hog", fontsize =20)
elif ExperimentName == "node-memory-hog":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','NodeAffectedPercentage is 0','NodeAffectedPercentage is 100','NodeAffectedPercentage 0-100','Target Node Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, node_affected_perc=100','Sequence=parallel, node_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo']
Cols = ['Is the test added?']
plt.title("Node Memory Hog", fontsize =20)
elif ExperimentName == "node-drain":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Target node specified','Without appinfo']
Cols = ['Is the test added?']
plt.title("Node Drain Experiment", fontsize =20)
elif ExperimentName == "node-taint":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','With Annotation Specified','W/o Annotation Specified','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Containerd Cluster Runtime','Docker Cluster Runtime','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Target node specified','Without appinfo']
Cols = ['Is the test added?']
plt.title("Node Taint Experiment", fontsize =20)
elif ExperimentName == "node-io-stress":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','NodeAffectedPercentage is 0','NodeAffectedPercentage is 100','NodeAffectedPercentage 0-100','Target Node Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, node_affected_perc=100','Sequence=parallel, node_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo','W/ filesystem utilisation bytes specified','w/ filesystem utilisation percentage specified']
Cols = ['Is the test added?']
plt.title("Node IO Stress", fontsize =20)
elif ExperimentName == "pod-io-stress":
Index = ['Validation','For deployment application','For statefulset application','For deployment config application','PodAffectedPercentage is 0','PodAffectedPercentage is 100','PodAffectedPercentage 0-100','Target Pod Specification','With Annotation Specified','W/o Annotation Specified', 'Sequence=serial, pod_affected_perc=100','Sequence=parallel, pod_affected_perc=100','Abort w/o probes','AuxiliaryAppChecks','Along w/ Probes','Apps w/ liveness/readiness probes','ARM Cluster','Different base image(alpine/nginx/centos)','Without appinfo','W/ filesystem utilisation bytes specified','w/ filesystem utilisation percentage specified','w/ Volume mouth path specified']
Cols = ['Is the test added?']
plt.title("Pod IO Stress", fontsize =20)
else:
print("Experiment %s not supported",ExperimentName)
df = DataFrame(data, index=Index, columns=Cols)
df = df[df.columns].astype(float)
print(df)
svm = sns.heatmap(df, cmap="Reds")
figure = svm.get_figure()
plt.subplots_adjust(left=0.218,bottom=0.095,right=0.9,top=0.88,wspace=0.2,hspace=0.2)
figure.set_figheight(10)
figure.set_figwidth(15)
plt.savefig(ExperimentName+'-heatmap.png', dpi=250)
| true
| true
|
790a1094a20697f80ff8a238c317a81ce6746d16
| 3,588
|
py
|
Python
|
walle/service/git/repo.py
|
lgq9220/walle-web
|
06d90ee3b3577985f04567ef176c7c8e60e242bb
|
[
"Apache-2.0"
] | null | null | null |
walle/service/git/repo.py
|
lgq9220/walle-web
|
06d90ee3b3577985f04567ef176c7c8e60e242bb
|
[
"Apache-2.0"
] | 1
|
2021-03-20T05:32:23.000Z
|
2021-03-20T05:32:23.000Z
|
walle/service/git/repo.py
|
lgq9220/walle-web
|
06d90ee3b3577985f04567ef176c7c8e60e242bb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
walle-web
:copyright: © 2015-2019 walle-web.io
:created time: 2019-02-24 10:47:53
:author: wushuiyong@walle-web.io
"""
import os
import re
import os.path as osp
import git as PyGit
from git import Repo as PyRepo
class Repo:
path = None
def __init__(self, path=None):
self.path = path
def is_git_dir(self):
'''
判断是否为git目录
@param path:
@return:
'''
d = self.path + '/.git'
if osp.isdir(d):
if osp.isdir(osp.join(d, 'objects')) and osp.isdir(osp.join(d, 'refs')):
headref = osp.join(d, 'HEAD')
return osp.isfile(headref) or \
(osp.islink(headref) and
os.readlink(headref).startswith('refs'))
elif (osp.isfile(osp.join(d, 'gitdir')) and
osp.isfile(osp.join(d, 'commondir')) and
osp.isfile(osp.join(d, 'gitfile'))):
return False
return False
def init(self, url):
# 创建目录
if not os.path.exists(self.path):
os.makedirs(self.path)
# git clone
if self.is_git_dir():
return self.pull()
else:
return self.clone(url)
def clone(self, url):
'''
检出项目
@param branch:
@param kwargs:
@return:
'''
return PyRepo.clone_from(url, self.path)
def pull(self):
'''
更新项目
@param branch:
@param kwargs:
@return:
'''
repo = PyRepo(self.path)
return repo.remote().pull()
def checkout_2_branch(self, branch):
PyRepo(self.path).git.checkout(branch)
def checkout_2_commit(self, branch, commit):
'''
@todo 未完成
@param branch:
@param commit:
@return:
'''
PyRepo(self.path).git.checkout(branch)
# PyRepo(self.path).head.set_reference(branch)
# 方法有问题,只是做了reset,没有checkout
PyRepo(self.path).head.set_commit(commit)
def checkout_2_tag(self, tag):
PyRepo(self.path).git.checkout(tag)
def branches(self):
'''
获取所有分支
@param branch:
@param kwargs:
@return:
'''
# 去除 origin/HEAD -> 当前指向
# 去除远端前缀
branches = PyRepo(self.path).remote().refs
# fixbug https://github.com/meolu/walle-web/issues/705
return [str(branch).strip().lstrip('origin').lstrip('/') for branch in branches if
not str(branch).strip().startswith('origin/HEAD')]
def tags(self):
'''
获取所有tag
@param branch:
@param kwargs:
@return:
'''
return [str(tag) for tag in PyRepo(self.path).tags]
def commits(self, branch):
'''
获取分支的commits
@param branch:
@param kwargs:
@return:
'''
self.checkout_2_branch(branch)
commit_log = PyGit.Git(self.path).log('--pretty=%h #@_@# %an #@_@# %s', max_count=50)
commit_list = commit_log.split('\n')
commits = []
for commit in commit_list:
if not re.search('^.+ #@_@# .+ #@_@# .*$', commit):
continue
commit_dict = commit.split(' #@_@# ')
from flask import current_app
current_app.logger.info(commit_dict)
commits.append({
'id': commit_dict[0],
'name': commit_dict[1],
'message': commit_dict[2],
})
return commits
| 24.744828
| 93
| 0.510033
|
import os
import re
import os.path as osp
import git as PyGit
from git import Repo as PyRepo
class Repo:
path = None
def __init__(self, path=None):
self.path = path
def is_git_dir(self):
d = self.path + '/.git'
if osp.isdir(d):
if osp.isdir(osp.join(d, 'objects')) and osp.isdir(osp.join(d, 'refs')):
headref = osp.join(d, 'HEAD')
return osp.isfile(headref) or \
(osp.islink(headref) and
os.readlink(headref).startswith('refs'))
elif (osp.isfile(osp.join(d, 'gitdir')) and
osp.isfile(osp.join(d, 'commondir')) and
osp.isfile(osp.join(d, 'gitfile'))):
return False
return False
def init(self, url):
if not os.path.exists(self.path):
os.makedirs(self.path)
if self.is_git_dir():
return self.pull()
else:
return self.clone(url)
def clone(self, url):
return PyRepo.clone_from(url, self.path)
def pull(self):
repo = PyRepo(self.path)
return repo.remote().pull()
def checkout_2_branch(self, branch):
PyRepo(self.path).git.checkout(branch)
def checkout_2_commit(self, branch, commit):
PyRepo(self.path).git.checkout(branch)
PyRepo(self.path).head.set_commit(commit)
def checkout_2_tag(self, tag):
PyRepo(self.path).git.checkout(tag)
def branches(self):
branches = PyRepo(self.path).remote().refs
return [str(branch).strip().lstrip('origin').lstrip('/') for branch in branches if
not str(branch).strip().startswith('origin/HEAD')]
def tags(self):
return [str(tag) for tag in PyRepo(self.path).tags]
def commits(self, branch):
self.checkout_2_branch(branch)
commit_log = PyGit.Git(self.path).log('--pretty=%h #@_@# %an #@_@# %s', max_count=50)
commit_list = commit_log.split('\n')
commits = []
for commit in commit_list:
if not re.search('^.+ #@_@# .+ #@_@# .*$', commit):
continue
commit_dict = commit.split(' #@_@# ')
from flask import current_app
current_app.logger.info(commit_dict)
commits.append({
'id': commit_dict[0],
'name': commit_dict[1],
'message': commit_dict[2],
})
return commits
| true
| true
|
790a11b01276f7d332c9d88a8dde58030a7f58a9
| 1,599
|
py
|
Python
|
py/py_0564_maximal_polygons.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
py/py_0564_maximal_polygons.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
py/py_0564_maximal_polygons.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
# Solution of;
# Project Euler Problem 564: Maximal polygons
# https://projecteuler.net/problem=564
#
# A line segment of length $2n-3$ is randomly split into $n$ segments of
# integer length ($n \ge 3$). In the sequence given by this split, the
# segments are then used as consecutive sides of a convex $n$-polygon, formed
# in such a way that its area is maximal. All of the $\binom{2n-4} {n-1}$
# possibilities for splitting up the initial line segment occur with the same
# probability. Let $E(n)$ be the expected value of the area that is obtained
# by this procedure. For example, for $n=3$ the only possible split of the
# line segment of length $3$ results in three line segments with length $1$,
# that form an equilateral triangle with an area of $\frac 1 4 \sqrt{3}$.
# Therefore $E(3)=0. 433013$, rounded to $6$ decimal places. For $n=4$ you can
# find $4$ different possible splits, each of which is composed of three line
# segments with length $1$ and one line segment with length $2$. All of these
# splits lead to the same maximal quadrilateral with an area of $\frac 3 4
# \sqrt{3}$, thus $E(4)=1. 299038$, rounded to $6$ decimal places. Let
# $S(k)=\displaystyle \sum_{n=3}^k E(n)$. For example, $S(3)=0. 433013$,
# $S(4)=1. 732051$, $S(5)=4. 604767$ and $S(10)=66. 955511$, rounded to $6$
# decimal places each. Find $S(50)$, rounded to $6$ decimal places.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 564
timed.caller(dummy, n, i, prob_id)
| 44.416667
| 79
| 0.68793
|
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 564
timed.caller(dummy, n, i, prob_id)
| true
| true
|
790a12c98d343c9d305668dfe266339f5ab4440a
| 14,445
|
py
|
Python
|
awswrangler/neptune/neptune.py
|
minwook-shin/aws-data-wrangler
|
304e734db5e96cc5e11ff54b4f3a1cf7c4e5736b
|
[
"Apache-2.0"
] | null | null | null |
awswrangler/neptune/neptune.py
|
minwook-shin/aws-data-wrangler
|
304e734db5e96cc5e11ff54b4f3a1cf7c4e5736b
|
[
"Apache-2.0"
] | null | null | null |
awswrangler/neptune/neptune.py
|
minwook-shin/aws-data-wrangler
|
304e734db5e96cc5e11ff54b4f3a1cf7c4e5736b
|
[
"Apache-2.0"
] | null | null | null |
"""Amazon Neptune Module."""
import logging
import re
from typing import Any
import pandas as pd
from gremlin_python.process.graph_traversal import GraphTraversalSource, __
from gremlin_python.process.translator import Translator
from gremlin_python.process.traversal import Cardinality, T
from gremlin_python.structure.graph import Graph
from awswrangler import exceptions
from awswrangler.neptune.client import NeptuneClient
_logger: logging.Logger = logging.getLogger(__name__)
def execute_gremlin(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a Gremlin traversal as pandas dataframe.
Parameters
----------
client : neptune.Client
instance of the neptune client to use
traversal : str
The gremlin traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a Gremlin Query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_gremlin(client, "g.V().limit(1)")
"""
results = client.read_gremlin(query)
df = pd.DataFrame.from_records(results)
return df
def execute_opencypher(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a openCypher traversal as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The openCypher query to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run an openCypher query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> resp = wr.neptune.execute_opencypher(client, "MATCH (n) RETURN n LIMIT 1")
"""
resp = client.read_opencypher(query)
df = pd.DataFrame.from_dict(resp)
return df
def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:
"""Return results of a SPARQL query as pandas dataframe.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
query : str
The SPARQL traversal to execute
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Results as Pandas DataFrame
Examples
--------
Run a SPARQL query
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> df = wr.neptune.execute_sparql(client, "PREFIX foaf: <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE {
?person foaf:name ?name .
"""
data = client.read_sparql(query)
df = None
if "results" in data and "bindings" in data["results"]:
df = pd.DataFrame(data["results"]["bindings"])
df.applymap(lambda x: x["value"])
else:
df = pd.DataFrame(data)
return df
def to_property_graph(
client: NeptuneClient, df: pd.DataFrame, batch_size: int = 50, use_header_cardinality: bool = True
) -> bool:
"""Write records stored in a DataFrame into Amazon Neptune.
If writing to a property graph then DataFrames for vertices and edges must be written separately.
DataFrames for vertices must have a ~label column with the label and a ~id column for the vertex id.
If the ~id column does not exist, the specified id does not exists, or is empty then a new vertex will be added.
If no ~label column exists an exception will be thrown.
DataFrames for edges must have a ~id, ~label, ~to, and ~from column. If the ~id column does not exist
the specified id does not exists, or is empty then a new edge will be added. If no ~label, ~to, or ~from column
exists an exception will be thrown.
If you would like to save data using `single` cardinality then you can postfix (single) to the column header and
set use_header_cardinality=True (default). e.g. A column named `name(single)` will save the `name` property
as single
cardinality. You can disable this by setting by setting `use_header_cardinality=False`.
Parameters
----------
client : NeptuneClient
instance of the neptune client to use
df : pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
batch_size: int
The number of rows to save at a time. Default 50
use_header_cardinality: bool
If True, then the header cardinality will be used to save the data. Default True
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_property_graph(
... df=df
... )
"""
# check if ~id and ~label column exist and if not throw error
g = Graph().traversal()
is_edge_df = False
is_update_df = True
if "~id" in df.columns:
if "~label" in df.columns:
is_update_df = False
if "~to" in df.columns and "~from" in df.columns:
is_edge_df = True
else:
raise exceptions.InvalidArgumentValue(
"Dataframe must contain at least a ~id and a ~label column to be saved to Amazon Neptune"
)
# Loop through items in the DF
for (index, row) in df.iterrows():
# build up a query
if is_update_df:
g = _build_gremlin_update(g, row, use_header_cardinality)
elif is_edge_df:
g = _build_gremlin_insert_edges(g, row.to_dict(), use_header_cardinality)
else:
g = _build_gremlin_insert_vertices(g, row.to_dict(), use_header_cardinality)
# run the query
if index > 0 and index % batch_size == 0:
res = _run_gremlin_insert(client, g)
if res:
g = Graph().traversal()
return _run_gremlin_insert(client, g)
def to_rdf_graph(
client: NeptuneClient,
df: pd.DataFrame,
batch_size: int = 50,
subject_column: str = "s",
predicate_column: str = "p",
object_column: str = "o",
graph_column: str = "g",
) -> bool:
"""Write records stored in a DataFrame into Amazon Neptune.
The DataFrame must consist of triples with column names for the subject, predicate, and object specified.
If you want to add data into a named graph then you will also need the graph column.
Parameters
----------
client (NeptuneClient) :
instance of the neptune client to use
df (pandas.DataFrame) :
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
subject_column (str, optional) :
The column name in the dataframe for the subject. Defaults to 's'
predicate_column (str, optional) :
The column name in the dataframe for the predicate. Defaults to 'p'
object_column (str, optional) :
The column name in the dataframe for the object. Defaults to 'o'
graph_column (str, optional) :
The column name in the dataframe for the graph if sending across quads. Defaults to 'g'
Returns
-------
bool
True if records were written
Examples
--------
Writing to Amazon Neptune
>>> import awswrangler as wr
>>> client = wr.neptune.connect(neptune_endpoint, neptune_port, iam_enabled=False)
>>> wr.neptune.gremlin.to_rdf_graph(
... df=df
... )
"""
is_quads = False
if pd.Series([subject_column, object_column, predicate_column]).isin(df.columns).all():
if graph_column in df.columns:
is_quads = True
else:
raise exceptions.InvalidArgumentValue(
"""Dataframe must contain at least the subject, predicate, and object columns defined or the defaults
(s, p, o) to be saved to Amazon Neptune"""
)
query = ""
# Loop through items in the DF
for (index, row) in df.iterrows():
# build up a query
if is_quads:
insert = f"""INSERT DATA {{ GRAPH <{row[graph_column]}> {{<{row[subject_column]}>
<{str(row[predicate_column])}> <{row[object_column]}> . }} }}; """
query = query + insert
else:
insert = f"""INSERT DATA {{ <{row[subject_column]}> <{str(row[predicate_column])}>
<{row[object_column]}> . }}; """
query = query + insert
# run the query
if index > 0 and index % batch_size == 0:
res = client.write_sparql(query)
if res:
query = ""
return client.write_sparql(query)
def connect(host: str, port: int, iam_enabled: bool = False, **kwargs: Any) -> NeptuneClient:
"""Create a connection to a Neptune cluster.
Parameters
----------
host : str
The host endpoint to connect to
port : int
The port endpoint to connect to
iam_enabled : bool, optional
True if IAM is enabled on the cluster. Defaults to False.
Returns
-------
NeptuneClient
[description]
"""
return NeptuneClient(host, port, iam_enabled, **kwargs)
def _get_column_name(column: str) -> str:
if "(single)" in column.lower():
return re.compile(r"\(single\)", re.IGNORECASE).sub("", column)
return column
def _set_properties(g: GraphTraversalSource, use_header_cardinality: bool, row: Any) -> GraphTraversalSource:
for (column, value) in row.items():
if column not in ["~id", "~label", "~to", "~from"]:
# If the column header is specifying the cardinality then use it
if use_header_cardinality:
if column.lower().find("(single)") > 0 and pd.notna(value):
g = g.property(Cardinality.single, _get_column_name(column), value)
else:
g = _expand_properties(g, _get_column_name(column), value)
else:
# If not using header cardinality then use the default of set
g = _expand_properties(g, column, value)
return g
def _expand_properties(g: GraphTraversalSource, column: str, value: Any) -> GraphTraversalSource:
# If this is a list then expand it out into multiple property calls
if isinstance(value, list) and len(value) > 0:
for item in value:
g = g.property(Cardinality.set_, column, item)
elif pd.notna(value):
g = g.property(Cardinality.set_, column, value)
return g
def _build_gremlin_update(g: GraphTraversalSource, row: Any, use_header_cardinality: bool) -> GraphTraversalSource:
g = g.V(str(row["~id"]))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_vertices(
g: GraphTraversalSource, row: Any, use_header_cardinality: bool = False
) -> GraphTraversalSource:
g = g.V(str(row["~id"])).fold().coalesce(__.unfold(), __.addV(row["~label"]).property(T.id, str(row["~id"])))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_edges(
g: GraphTraversalSource, row: pd.Series, use_header_cardinality: bool
) -> GraphTraversalSource:
g = (
g.V(str(row["~from"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~from"], "~label": "Vertex"}))
.addE(row["~label"])
.property(T.id, str(row["~id"]))
.to(
__.V(str(row["~to"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~to"], "~label": "Vertex"}))
)
)
g = _set_properties(g, use_header_cardinality, row)
return g
def _run_gremlin_insert(client: NeptuneClient, g: GraphTraversalSource) -> bool:
translator = Translator("g")
s = translator.translate(g.bytecode)
s = s.replace("Cardinality.", "") # hack to fix parser error for set cardinality
_logger.debug(s)
res = client.write_gremlin(s)
return res
def flatten_nested_df(
df: pd.DataFrame, include_prefix: bool = True, seperator: str = "_", recursive: bool = True
) -> pd.DataFrame:
"""Flatten the lists and dictionaries of the input data frame.
Parameters
----------
df : pd.DataFrame
The input data frame
include_prefix : bool, optional
If True, then it will prefix the new column name with the original column name.
Defaults to True.
seperator : str, optional
The seperator to use between field names when a dictionary is exploded.
Defaults to "_".
recursive : bool, optional
If True, then this will recurse the fields in the data frame. Defaults to True.
Returns
-------
pd.DataFrame: The flattened data frame
"""
if seperator is None:
seperator = "_"
df = df.reset_index()
# search for list and map
s = (df.applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df.applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if len(list_columns) > 0 or len(dict_columns) > 0:
new_columns = []
for col in dict_columns:
# expand dictionaries horizontally
expanded = None
if include_prefix:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{col}{seperator}")
else:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{seperator}")
expanded.index = df.index
df = pd.concat([df, expanded], axis=1).drop(columns=[col])
new_columns.extend(expanded.columns)
for col in list_columns:
df = df.drop(columns=[col]).join(df[col].explode().to_frame())
new_columns.append(col)
# check if there are still dict o list fields to flatten
s = (df[new_columns].applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df[new_columns].applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if recursive and (len(list_columns) > 0 or len(dict_columns) > 0):
df = flatten_nested_df(df, include_prefix=include_prefix, seperator=seperator, recursive=recursive)
return df
| 34.640288
| 116
| 0.638975
|
import logging
import re
from typing import Any
import pandas as pd
from gremlin_python.process.graph_traversal import GraphTraversalSource, __
from gremlin_python.process.translator import Translator
from gremlin_python.process.traversal import Cardinality, T
from gremlin_python.structure.graph import Graph
from awswrangler import exceptions
from awswrangler.neptune.client import NeptuneClient
_logger: logging.Logger = logging.getLogger(__name__)
def execute_gremlin(client: NeptuneClient, query: str) -> pd.DataFrame:
results = client.read_gremlin(query)
df = pd.DataFrame.from_records(results)
return df
def execute_opencypher(client: NeptuneClient, query: str) -> pd.DataFrame:
resp = client.read_opencypher(query)
df = pd.DataFrame.from_dict(resp)
return df
def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:
data = client.read_sparql(query)
df = None
if "results" in data and "bindings" in data["results"]:
df = pd.DataFrame(data["results"]["bindings"])
df.applymap(lambda x: x["value"])
else:
df = pd.DataFrame(data)
return df
def to_property_graph(
client: NeptuneClient, df: pd.DataFrame, batch_size: int = 50, use_header_cardinality: bool = True
) -> bool:
g = Graph().traversal()
is_edge_df = False
is_update_df = True
if "~id" in df.columns:
if "~label" in df.columns:
is_update_df = False
if "~to" in df.columns and "~from" in df.columns:
is_edge_df = True
else:
raise exceptions.InvalidArgumentValue(
"Dataframe must contain at least a ~id and a ~label column to be saved to Amazon Neptune"
)
for (index, row) in df.iterrows():
if is_update_df:
g = _build_gremlin_update(g, row, use_header_cardinality)
elif is_edge_df:
g = _build_gremlin_insert_edges(g, row.to_dict(), use_header_cardinality)
else:
g = _build_gremlin_insert_vertices(g, row.to_dict(), use_header_cardinality)
if index > 0 and index % batch_size == 0:
res = _run_gremlin_insert(client, g)
if res:
g = Graph().traversal()
return _run_gremlin_insert(client, g)
def to_rdf_graph(
client: NeptuneClient,
df: pd.DataFrame,
batch_size: int = 50,
subject_column: str = "s",
predicate_column: str = "p",
object_column: str = "o",
graph_column: str = "g",
) -> bool:
is_quads = False
if pd.Series([subject_column, object_column, predicate_column]).isin(df.columns).all():
if graph_column in df.columns:
is_quads = True
else:
raise exceptions.InvalidArgumentValue(
"""Dataframe must contain at least the subject, predicate, and object columns defined or the defaults
(s, p, o) to be saved to Amazon Neptune"""
)
query = ""
for (index, row) in df.iterrows():
if is_quads:
insert = f"""INSERT DATA {{ GRAPH <{row[graph_column]}> {{<{row[subject_column]}>
<{str(row[predicate_column])}> <{row[object_column]}> . }} }}; """
query = query + insert
else:
insert = f"""INSERT DATA {{ <{row[subject_column]}> <{str(row[predicate_column])}>
<{row[object_column]}> . }}; """
query = query + insert
if index > 0 and index % batch_size == 0:
res = client.write_sparql(query)
if res:
query = ""
return client.write_sparql(query)
def connect(host: str, port: int, iam_enabled: bool = False, **kwargs: Any) -> NeptuneClient:
return NeptuneClient(host, port, iam_enabled, **kwargs)
def _get_column_name(column: str) -> str:
if "(single)" in column.lower():
return re.compile(r"\(single\)", re.IGNORECASE).sub("", column)
return column
def _set_properties(g: GraphTraversalSource, use_header_cardinality: bool, row: Any) -> GraphTraversalSource:
for (column, value) in row.items():
if column not in ["~id", "~label", "~to", "~from"]:
if use_header_cardinality:
if column.lower().find("(single)") > 0 and pd.notna(value):
g = g.property(Cardinality.single, _get_column_name(column), value)
else:
g = _expand_properties(g, _get_column_name(column), value)
else:
g = _expand_properties(g, column, value)
return g
def _expand_properties(g: GraphTraversalSource, column: str, value: Any) -> GraphTraversalSource:
if isinstance(value, list) and len(value) > 0:
for item in value:
g = g.property(Cardinality.set_, column, item)
elif pd.notna(value):
g = g.property(Cardinality.set_, column, value)
return g
def _build_gremlin_update(g: GraphTraversalSource, row: Any, use_header_cardinality: bool) -> GraphTraversalSource:
g = g.V(str(row["~id"]))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_vertices(
g: GraphTraversalSource, row: Any, use_header_cardinality: bool = False
) -> GraphTraversalSource:
g = g.V(str(row["~id"])).fold().coalesce(__.unfold(), __.addV(row["~label"]).property(T.id, str(row["~id"])))
g = _set_properties(g, use_header_cardinality, row)
return g
def _build_gremlin_insert_edges(
g: GraphTraversalSource, row: pd.Series, use_header_cardinality: bool
) -> GraphTraversalSource:
g = (
g.V(str(row["~from"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~from"], "~label": "Vertex"}))
.addE(row["~label"])
.property(T.id, str(row["~id"]))
.to(
__.V(str(row["~to"]))
.fold()
.coalesce(__.unfold(), _build_gremlin_insert_vertices(__, {"~id": row["~to"], "~label": "Vertex"}))
)
)
g = _set_properties(g, use_header_cardinality, row)
return g
def _run_gremlin_insert(client: NeptuneClient, g: GraphTraversalSource) -> bool:
translator = Translator("g")
s = translator.translate(g.bytecode)
s = s.replace("Cardinality.", "")
_logger.debug(s)
res = client.write_gremlin(s)
return res
def flatten_nested_df(
df: pd.DataFrame, include_prefix: bool = True, seperator: str = "_", recursive: bool = True
) -> pd.DataFrame:
if seperator is None:
seperator = "_"
df = df.reset_index()
s = (df.applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df.applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if len(list_columns) > 0 or len(dict_columns) > 0:
new_columns = []
for col in dict_columns:
expanded = None
if include_prefix:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{col}{seperator}")
else:
expanded = pd.json_normalize(df[col], sep=seperator).add_prefix(f"{seperator}")
expanded.index = df.index
df = pd.concat([df, expanded], axis=1).drop(columns=[col])
new_columns.extend(expanded.columns)
for col in list_columns:
df = df.drop(columns=[col]).join(df[col].explode().to_frame())
new_columns.append(col)
s = (df[new_columns].applymap(type) == list).all()
list_columns = s[s].index.tolist()
s = (df[new_columns].applymap(type) == dict).all()
dict_columns = s[s].index.tolist()
if recursive and (len(list_columns) > 0 or len(dict_columns) > 0):
df = flatten_nested_df(df, include_prefix=include_prefix, seperator=seperator, recursive=recursive)
return df
| true
| true
|
790a145bf13e03b8947b4cd75d739377fdb2db45
| 441
|
py
|
Python
|
djeddit/migrations/0002_thread_locked.py
|
EatEmAll/django-djedd
|
d5b988cc94d185320c933f77494f0b1f4680b178
|
[
"Apache-2.0"
] | 43
|
2017-04-22T11:28:21.000Z
|
2022-03-27T15:42:11.000Z
|
djeddit/migrations/0002_thread_locked.py
|
EatEmAll/django-djedd
|
d5b988cc94d185320c933f77494f0b1f4680b178
|
[
"Apache-2.0"
] | 32
|
2017-10-14T13:09:25.000Z
|
2020-05-19T14:18:33.000Z
|
djeddit/migrations/0002_thread_locked.py
|
physics-is-beautiful/django-react-djeddit
|
db7a392c14bbdef25d56a6124b2ed9449c7de337
|
[
"Apache-2.0"
] | 21
|
2017-08-05T12:07:05.000Z
|
2021-12-09T03:37:15.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-05-29 06:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djeddit', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='thread',
name='locked',
field=models.BooleanField(default=False),
),
]
| 21
| 53
| 0.60771
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djeddit', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='thread',
name='locked',
field=models.BooleanField(default=False),
),
]
| true
| true
|
790a146620e209d719aca4a5990aeca3b589696a
| 951
|
py
|
Python
|
catkin_ws/src/learning_communication/scripts/person_publisher.py
|
Colin1245/ROS-Theory-Application-Shenlan
|
49986c83a2c73c7ab4310fd3f010e1b6bc0de786
|
[
"Apache-2.0"
] | null | null | null |
catkin_ws/src/learning_communication/scripts/person_publisher.py
|
Colin1245/ROS-Theory-Application-Shenlan
|
49986c83a2c73c7ab4310fd3f010e1b6bc0de786
|
[
"Apache-2.0"
] | null | null | null |
catkin_ws/src/learning_communication/scripts/person_publisher.py
|
Colin1245/ROS-Theory-Application-Shenlan
|
49986c83a2c73c7ab4310fd3f010e1b6bc0de786
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 该例程将发布/person_info话题,自定义消息类型learning_communication::PersonMsg
import rospy
from learning_communication.msg import PersonMsg
def velocity_publisher():
# ROS节点初始化
rospy.init_node('person_publisher', anonymous=True)
# 创建一个Publisher,发布名为/person_info的topic,消息类型为PersonMsg,队列长度10
person_info_pub = rospy.Publisher('/person_info', PersonMsg, queue_size=10)
#设置循环的频率
rate = rospy.Rate(10)
while not rospy.is_shutdown():
# 初始化PersonMsg类型的消息
person_msg = PersonMsg()
person_msg.name = "Tom";
person_msg.age = 18;
person_msg.sex = PersonMsg.male;
# 发布消息
person_info_pub.publish(person_msg)
rospy.loginfo("Publsh person message[%s, %d, %d]",
person_msg.name, person_msg.age, person_msg.sex)
# 按照循环频率延时
rate.sleep()
if __name__ == '__main__':
try:
velocity_publisher()
except rospy.ROSInterruptException:
pass
| 25.026316
| 79
| 0.700315
|
import rospy
from learning_communication.msg import PersonMsg
def velocity_publisher():
rospy.init_node('person_publisher', anonymous=True)
person_info_pub = rospy.Publisher('/person_info', PersonMsg, queue_size=10)
rate = rospy.Rate(10)
while not rospy.is_shutdown():
person_msg = PersonMsg()
person_msg.name = "Tom";
person_msg.age = 18;
person_msg.sex = PersonMsg.male;
person_info_pub.publish(person_msg)
rospy.loginfo("Publsh person message[%s, %d, %d]",
person_msg.name, person_msg.age, person_msg.sex)
rate.sleep()
if __name__ == '__main__':
try:
velocity_publisher()
except rospy.ROSInterruptException:
pass
| false
| true
|
790a14779ac8fa7859adb2b9cf4a8951ef9b3abe
| 234
|
py
|
Python
|
wizer/admin.py
|
lucasace/workoutizer
|
7b12cfa08ffee63c10e53de8fc227f142ecfdc42
|
[
"MIT"
] | null | null | null |
wizer/admin.py
|
lucasace/workoutizer
|
7b12cfa08ffee63c10e53de8fc227f142ecfdc42
|
[
"MIT"
] | null | null | null |
wizer/admin.py
|
lucasace/workoutizer
|
7b12cfa08ffee63c10e53de8fc227f142ecfdc42
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Sport, Activity, Settings, Traces, Lap
admin.site.register(Sport)
admin.site.register(Activity)
admin.site.register(Settings)
admin.site.register(Traces)
admin.site.register(Lap)
| 23.4
| 58
| 0.807692
|
from django.contrib import admin
from .models import Sport, Activity, Settings, Traces, Lap
admin.site.register(Sport)
admin.site.register(Activity)
admin.site.register(Settings)
admin.site.register(Traces)
admin.site.register(Lap)
| true
| true
|
790a150445cdd43759ac5b9dd88e407a7abdeb40
| 676
|
py
|
Python
|
examples/run_msde.py
|
lukapecnik/NiaPy
|
a40ac08a4c06a13019ec5e39cc137461884928b0
|
[
"MIT"
] | null | null | null |
examples/run_msde.py
|
lukapecnik/NiaPy
|
a40ac08a4c06a13019ec5e39cc137461884928b0
|
[
"MIT"
] | null | null | null |
examples/run_msde.py
|
lukapecnik/NiaPy
|
a40ac08a4c06a13019ec5e39cc137461884928b0
|
[
"MIT"
] | 1
|
2020-03-25T16:20:36.000Z
|
2020-03-25T16:20:36.000Z
|
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix
import random
from NiaPy.algorithms.basic import MultiStrategyDifferentialEvolution
from NiaPy.util import StoppingTask, OptimizationType
from NiaPy.benchmarks import Sphere
#we will run Differential Evolution for 5 independent runs
for i in range(5):
task = StoppingTask(D=10, nFES=1000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere())
algo = MultiStrategyDifferentialEvolution(NP=50, F=0.5, CR=0.9)
best = algo.run(task=task)
print('%s -> %s' % (best[0].x, best[1]))
| 35.578947
| 99
| 0.755917
|
import sys
sys.path.append('../')
import random
from NiaPy.algorithms.basic import MultiStrategyDifferentialEvolution
from NiaPy.util import StoppingTask, OptimizationType
from NiaPy.benchmarks import Sphere
for i in range(5):
task = StoppingTask(D=10, nFES=1000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere())
algo = MultiStrategyDifferentialEvolution(NP=50, F=0.5, CR=0.9)
best = algo.run(task=task)
print('%s -> %s' % (best[0].x, best[1]))
| true
| true
|
790a15edb91b3a98a8ec8ff95da69e0148aa2c7c
| 371
|
py
|
Python
|
problems/007/run.v1.py
|
birdchan/project_euler
|
ec36d3a9e47cbca8c84bdbf41bd93cd49d320364
|
[
"MIT"
] | null | null | null |
problems/007/run.v1.py
|
birdchan/project_euler
|
ec36d3a9e47cbca8c84bdbf41bd93cd49d320364
|
[
"MIT"
] | null | null | null |
problems/007/run.v1.py
|
birdchan/project_euler
|
ec36d3a9e47cbca8c84bdbf41bd93cd49d320364
|
[
"MIT"
] | null | null | null |
import math
def is_prime(num):
if num < 2:
return False
for i in range(num):
if i < 2:
continue
if num % i == 0:
return False
return True
def get_nth_prime(n):
cnt = 0
i = 0
while cnt < n:
i += 1
if is_prime(i):
cnt += 1
return i
if __name__ == '__main__':
#print get_nth_prime(6)
print get_nth_prime(10001)
| 13.740741
| 28
| 0.568733
|
import math
def is_prime(num):
if num < 2:
return False
for i in range(num):
if i < 2:
continue
if num % i == 0:
return False
return True
def get_nth_prime(n):
cnt = 0
i = 0
while cnt < n:
i += 1
if is_prime(i):
cnt += 1
return i
if __name__ == '__main__':
print get_nth_prime(10001)
| false
| true
|
790a17b5e28a6c3768e3529ce2c7f86524db778b
| 899
|
py
|
Python
|
datasets/nba/Game.py
|
MediaBrain-SJTU/GroupNet
|
607541c8843f8b6206b1ffefd2d27fb07dcca073
|
[
"MIT"
] | 1
|
2022-03-28T12:52:40.000Z
|
2022-03-28T12:52:40.000Z
|
datasets/nba/Game.py
|
MediaBrain-SJTU/GroupNet
|
607541c8843f8b6206b1ffefd2d27fb07dcca073
|
[
"MIT"
] | null | null | null |
datasets/nba/Game.py
|
MediaBrain-SJTU/GroupNet
|
607541c8843f8b6206b1ffefd2d27fb07dcca073
|
[
"MIT"
] | null | null | null |
import pandas as pd
from Event import Event
from Team import Team
from Constant import Constant
import numpy as np
class Game:
"""A class for keeping info about the games"""
def __init__(self, path_to_json):
# self.events = None
self.home_team = None
self.guest_team = None
self.event = None
self.path_to_json = path_to_json
def read_json(self):
data_frame = pd.read_json(self.path_to_json)
last_default_index = len(data_frame) - 1
all_trajs = []
for i in range(last_default_index):
event = data_frame['events'][i]
self.event = Event(event)
trajs = self.event.get_traj() # (N,15,11,2)
if len(trajs) > 0:
all_trajs.append(trajs)
# print(i,len(trajs))
all_trajs = np.concatenate(all_trajs,axis=0)
return all_trajs
| 27.242424
| 56
| 0.602892
|
import pandas as pd
from Event import Event
from Team import Team
from Constant import Constant
import numpy as np
class Game:
def __init__(self, path_to_json):
self.home_team = None
self.guest_team = None
self.event = None
self.path_to_json = path_to_json
def read_json(self):
data_frame = pd.read_json(self.path_to_json)
last_default_index = len(data_frame) - 1
all_trajs = []
for i in range(last_default_index):
event = data_frame['events'][i]
self.event = Event(event)
trajs = self.event.get_traj()
if len(trajs) > 0:
all_trajs.append(trajs)
all_trajs = np.concatenate(all_trajs,axis=0)
return all_trajs
| true
| true
|
790a186c945a81d9aa3609e883b00c2fc8ba84d0
| 985
|
py
|
Python
|
config.py
|
kwahome/delivery-bot
|
70b4c6c77d1fe6743b2c5d776315452c5ee85d53
|
[
"MIT"
] | null | null | null |
config.py
|
kwahome/delivery-bot
|
70b4c6c77d1fe6743b2c5d776315452c5ee85d53
|
[
"MIT"
] | null | null | null |
config.py
|
kwahome/delivery-bot
|
70b4c6c77d1fe6743b2c5d776315452c5ee85d53
|
[
"MIT"
] | null | null | null |
import os
class DefaultConfig:
""" Bot Configuration """
HOST = "0.0.0.0"
PORT = 3978
CONNECTION_NAME = os.environ.get("CONNECTION_NAME", "echo-bot")
APP_ID = os.environ.get("MICROSOFT_APP_ID", "")
APP_PASSWORD = os.environ.get("MICROSOFT_APP_PASSWORD", "")
LUIS_APP_ID = os.environ.get("LUIS_APP_ID", "")
LUIS_API_KEY = os.environ.get("LUIS_API_KEY", "")
# LUIS endpoint host name, ie "westus.api.cognitive.microsoft.com"
LUIS_API_HOST_NAME = os.environ.get(
"LUIS_API_HOST_NAME", "westeurope.api.cognitive.microsoft.com"
)
LUIS_IS_DISABLED = True if os.environ.get("LUIS_IS_DISABLED", "False") == "True" else False
# cosmos storage
COSMOS_DB_SERVICE_ENDPOINT = os.environ.get("COSMOS_DB_SERVICE_ENDPOINT", "")
COSMOS_DB_KEY = os.environ.get("COSMOS_DB_KEY", "")
COSMOS_DB_DATABASE_ID = os.environ.get("COSMOS_DB_DATABASE_ID", "")
COSMOS_DB_CONTAINER_ID = os.environ.get("COSMOS_DB_CONTAINER_ID", "")
| 35.178571
| 95
| 0.694416
|
import os
class DefaultConfig:
HOST = "0.0.0.0"
PORT = 3978
CONNECTION_NAME = os.environ.get("CONNECTION_NAME", "echo-bot")
APP_ID = os.environ.get("MICROSOFT_APP_ID", "")
APP_PASSWORD = os.environ.get("MICROSOFT_APP_PASSWORD", "")
LUIS_APP_ID = os.environ.get("LUIS_APP_ID", "")
LUIS_API_KEY = os.environ.get("LUIS_API_KEY", "")
LUIS_API_HOST_NAME = os.environ.get(
"LUIS_API_HOST_NAME", "westeurope.api.cognitive.microsoft.com"
)
LUIS_IS_DISABLED = True if os.environ.get("LUIS_IS_DISABLED", "False") == "True" else False
COSMOS_DB_SERVICE_ENDPOINT = os.environ.get("COSMOS_DB_SERVICE_ENDPOINT", "")
COSMOS_DB_KEY = os.environ.get("COSMOS_DB_KEY", "")
COSMOS_DB_DATABASE_ID = os.environ.get("COSMOS_DB_DATABASE_ID", "")
COSMOS_DB_CONTAINER_ID = os.environ.get("COSMOS_DB_CONTAINER_ID", "")
| true
| true
|
790a18a5d898b6d8101ac1abd069d71bf03ec044
| 616
|
py
|
Python
|
kmux/tmux.py
|
kiemlicz/kmux
|
a1cf523cb02fc49b768780645c6287f30d2b7b83
|
[
"MIT"
] | null | null | null |
kmux/tmux.py
|
kiemlicz/kmux
|
a1cf523cb02fc49b768780645c6287f30d2b7b83
|
[
"MIT"
] | null | null | null |
kmux/tmux.py
|
kiemlicz/kmux
|
a1cf523cb02fc49b768780645c6287f30d2b7b83
|
[
"MIT"
] | null | null | null |
import libtmux
def ensure_server() -> libtmux.Server:
'''
Either create new or return existing server
'''
return libtmux.Server()
def spawn_session(name: str, kubeconfig_location: str, server: libtmux.Server):
if server.has_session(name):
return
else:
session = server.new_session(name)
session.set_environment("KUBECONFIG", kubeconfig_location)
# the new_session will create default window and pane which will not contain KUBECONFIG, add manually
session.attached_window.attached_pane.send_keys("export KUBECONFIG={}".format(kubeconfig_location))
| 32.421053
| 109
| 0.719156
|
import libtmux
def ensure_server() -> libtmux.Server:
return libtmux.Server()
def spawn_session(name: str, kubeconfig_location: str, server: libtmux.Server):
if server.has_session(name):
return
else:
session = server.new_session(name)
session.set_environment("KUBECONFIG", kubeconfig_location)
session.attached_window.attached_pane.send_keys("export KUBECONFIG={}".format(kubeconfig_location))
| true
| true
|
790a18adbfcf2eef41bdbdb8f2ba70a560921c94
| 2,591
|
py
|
Python
|
test/test_b_plot.py
|
cperales/Fourier-classifying-songs
|
54d13e2ce2d7d05fe7126bbbd884917758188d6d
|
[
"MIT"
] | 15
|
2018-10-07T07:48:23.000Z
|
2019-03-16T15:50:54.000Z
|
test/test_b_plot.py
|
cperales/Fourier-classifying-songs
|
54d13e2ce2d7d05fe7126bbbd884917758188d6d
|
[
"MIT"
] | 7
|
2018-10-17T18:13:29.000Z
|
2018-10-24T12:19:40.000Z
|
test/test_b_plot.py
|
cperales/Fourier-classifying-songs
|
54d13e2ce2d7d05fe7126bbbd884917758188d6d
|
[
"MIT"
] | 1
|
2020-10-22T22:28:28.000Z
|
2020-10-22T22:28:28.000Z
|
import unittest
from foucluster.plot import song_plot, diff_plot, heatmap_song
import configparser
import os
import json
from scipy.io.wavfile import read
import numpy as np
import pandas as pd
class TestPlot(unittest.TestCase):
@staticmethod
def _get_series(i=0):
"""
:return:
"""
config = configparser.ConfigParser()
config.read('config.ini')
fourier_folder = config['Folder']['Output']
first_file = os.path.join(fourier_folder,
os.listdir(fourier_folder)[i])
with open(first_file, 'r') as b:
j = json.load(b)
name = list(j.keys())[0]
song = j[name]
return song, name
@staticmethod
def _get_song(i=0):
"""
:return:
"""
config = configparser.ConfigParser()
config.read('config.ini')
song_folder = config['Folder']['Temp']
first_song = os.listdir(song_folder)[i]
rate, aud_data = read(os.path.join(song_folder,
first_song))
# Should be mono
if len(aud_data) != len(aud_data.ravel()):
aud_data = np.mean(aud_data, axis=1)
return aud_data,first_song
def test_diff(self):
"""
:return:
"""
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
song_1, name_1 = self._get_series(i=0)
song_2, name_2 = self._get_series(i=1)
diff_plot(song_1, song_2,
filename=name_1.split()[2].split('.')[0] + name_2.split()[2].split('.')[0],
folder=image_folder)
def test_song(self):
"""
:return:
"""
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
aud_data, name = self._get_song()
song_plot(aud_data,
filename=name.split('.')[0],
folder=image_folder)
def test_heatmap(self):
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
distance_folder = config['Folder']['Distance']
df = pd.read_csv(os.path.join(distance_folder, 'positive.csv'),
sep=';',
index_col=[0, 1])
heatmap_song(df,
image_name='heatmap_positive',
image_folder=image_folder)
if __name__ == '__main__':
unittest.main()
| 29.11236
| 93
| 0.547665
|
import unittest
from foucluster.plot import song_plot, diff_plot, heatmap_song
import configparser
import os
import json
from scipy.io.wavfile import read
import numpy as np
import pandas as pd
class TestPlot(unittest.TestCase):
@staticmethod
def _get_series(i=0):
config = configparser.ConfigParser()
config.read('config.ini')
fourier_folder = config['Folder']['Output']
first_file = os.path.join(fourier_folder,
os.listdir(fourier_folder)[i])
with open(first_file, 'r') as b:
j = json.load(b)
name = list(j.keys())[0]
song = j[name]
return song, name
@staticmethod
def _get_song(i=0):
config = configparser.ConfigParser()
config.read('config.ini')
song_folder = config['Folder']['Temp']
first_song = os.listdir(song_folder)[i]
rate, aud_data = read(os.path.join(song_folder,
first_song))
if len(aud_data) != len(aud_data.ravel()):
aud_data = np.mean(aud_data, axis=1)
return aud_data,first_song
def test_diff(self):
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
song_1, name_1 = self._get_series(i=0)
song_2, name_2 = self._get_series(i=1)
diff_plot(song_1, song_2,
filename=name_1.split()[2].split('.')[0] + name_2.split()[2].split('.')[0],
folder=image_folder)
def test_song(self):
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
aud_data, name = self._get_song()
song_plot(aud_data,
filename=name.split('.')[0],
folder=image_folder)
def test_heatmap(self):
config = configparser.ConfigParser()
config.read('config.ini')
image_folder = config['Folder']['Image']
distance_folder = config['Folder']['Distance']
df = pd.read_csv(os.path.join(distance_folder, 'positive.csv'),
sep=';',
index_col=[0, 1])
heatmap_song(df,
image_name='heatmap_positive',
image_folder=image_folder)
if __name__ == '__main__':
unittest.main()
| true
| true
|
790a18dffd0d0a5f2094714c60d754f3b4807b8e
| 2,745
|
py
|
Python
|
samples/client/petstore/python/tests/test_api_exception.py
|
deerghayu/swagger-codegen
|
1e33403e381be1db91b1427d3f70f7476cb4da67
|
[
"Apache-2.0"
] | 1
|
2016-12-01T21:19:51.000Z
|
2016-12-01T21:19:51.000Z
|
samples/client/petstore/python/tests/test_api_exception.py
|
deerghayu/swagger-codegen
|
1e33403e381be1db91b1427d3f70f7476cb4da67
|
[
"Apache-2.0"
] | 5
|
2017-01-05T09:41:49.000Z
|
2018-03-04T19:38:14.000Z
|
samples/client/petstore/python/tests/test_api_exception.py
|
deerghayu/swagger-codegen
|
1e33403e381be1db91b1427d3f70f7476cb4da67
|
[
"Apache-2.0"
] | 5
|
2020-04-01T02:37:35.000Z
|
2021-04-07T08:04:07.000Z
|
# coding: utf-8
"""
Run the tests.
$ pip install nose (optional)
$ cd swagger_client-python
$ nosetests -v
"""
import os
import sys
import time
import unittest
import swagger_client
from swagger_client.rest import ApiException
class ApiExceptionTests(unittest.TestCase):
def setUp(self):
self.api_client = swagger_client.ApiClient()
self.pet_api = swagger_client.PetApi(self.api_client)
self.setUpModels()
def setUpModels(self):
self.category = swagger_client.Category()
self.category.id = int(time.time())
self.category.name = "dog"
self.tag = swagger_client.Tag()
self.tag.id = int(time.time())
self.tag.name = "blank"
self.pet = swagger_client.Pet()
self.pet.id = int(time.time())
self.pet.name = "hello kity"
self.pet.photo_urls = ["http://foo.bar.com/1", "http://foo.bar.com/2"]
self.pet.status = "sold"
self.pet.category = self.category
self.pet.tags = [self.tag]
def tearDown(self):
time.sleep(1)
def test_404_error(self):
self.pet_api.add_pet(body=self.pet)
self.pet_api.delete_pet(pet_id=self.pet.id)
with self.checkRaiseRegex(ApiException, "Pet not found"):
self.pet_api.get_pet_by_id(pet_id=self.pet.id)
try:
self.pet_api.get_pet_by_id(pet_id=self.pet.id)
except ApiException as e:
self.assertEqual(e.status, 404)
self.assertEqual(e.reason, "Not Found")
self.checkRegex(e.body, "Pet not found")
def test_500_error(self):
self.pet_api.add_pet(body=self.pet)
with self.checkRaiseRegex(ApiException, "Internal Server Error"):
self.pet_api.upload_file(
pet_id=self.pet.id,
additional_metadata="special",
file=None
)
try:
self.pet_api.upload_file(
pet_id=self.pet.id,
additional_metadata="special",
file=None
)
except ApiException as e:
self.assertEqual(e.status, 500)
self.assertEqual(e.reason, "Internal Server Error")
self.checkRegex(e.body, "Error 500 Internal Server Error")
def checkRaiseRegex(self, expected_exception, expected_regex):
if sys.version_info < (3, 0):
return self.assertRaisesRegexp(expected_exception, expected_regex)
return self.assertRaisesRegex(expected_exception, expected_regex)
def checkRegex(self, text, expected_regex):
if sys.version_info < (3, 0):
return self.assertRegexpMatches(text, expected_regex)
return self.assertRegex(text, expected_regex)
| 30.164835
| 78
| 0.622222
|
import os
import sys
import time
import unittest
import swagger_client
from swagger_client.rest import ApiException
class ApiExceptionTests(unittest.TestCase):
def setUp(self):
self.api_client = swagger_client.ApiClient()
self.pet_api = swagger_client.PetApi(self.api_client)
self.setUpModels()
def setUpModels(self):
self.category = swagger_client.Category()
self.category.id = int(time.time())
self.category.name = "dog"
self.tag = swagger_client.Tag()
self.tag.id = int(time.time())
self.tag.name = "blank"
self.pet = swagger_client.Pet()
self.pet.id = int(time.time())
self.pet.name = "hello kity"
self.pet.photo_urls = ["http://foo.bar.com/1", "http://foo.bar.com/2"]
self.pet.status = "sold"
self.pet.category = self.category
self.pet.tags = [self.tag]
def tearDown(self):
time.sleep(1)
def test_404_error(self):
self.pet_api.add_pet(body=self.pet)
self.pet_api.delete_pet(pet_id=self.pet.id)
with self.checkRaiseRegex(ApiException, "Pet not found"):
self.pet_api.get_pet_by_id(pet_id=self.pet.id)
try:
self.pet_api.get_pet_by_id(pet_id=self.pet.id)
except ApiException as e:
self.assertEqual(e.status, 404)
self.assertEqual(e.reason, "Not Found")
self.checkRegex(e.body, "Pet not found")
def test_500_error(self):
self.pet_api.add_pet(body=self.pet)
with self.checkRaiseRegex(ApiException, "Internal Server Error"):
self.pet_api.upload_file(
pet_id=self.pet.id,
additional_metadata="special",
file=None
)
try:
self.pet_api.upload_file(
pet_id=self.pet.id,
additional_metadata="special",
file=None
)
except ApiException as e:
self.assertEqual(e.status, 500)
self.assertEqual(e.reason, "Internal Server Error")
self.checkRegex(e.body, "Error 500 Internal Server Error")
def checkRaiseRegex(self, expected_exception, expected_regex):
if sys.version_info < (3, 0):
return self.assertRaisesRegexp(expected_exception, expected_regex)
return self.assertRaisesRegex(expected_exception, expected_regex)
def checkRegex(self, text, expected_regex):
if sys.version_info < (3, 0):
return self.assertRegexpMatches(text, expected_regex)
return self.assertRegex(text, expected_regex)
| true
| true
|
790a1953e87b99b07c5ccf273009f041d8f7eaba
| 1,075
|
py
|
Python
|
kubernetes/test/test_v2beta1_horizontal_pod_autoscaler.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v2beta1_horizontal_pod_autoscaler.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v2beta1_horizontal_pod_autoscaler.py
|
scele/kubernetes-client-python
|
9e982cbdb5f19dc1a3935a75bdd92288f3b807fb
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v2beta1_horizontal_pod_autoscaler import V2beta1HorizontalPodAutoscaler
class TestV2beta1HorizontalPodAutoscaler(unittest.TestCase):
""" V2beta1HorizontalPodAutoscaler unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV2beta1HorizontalPodAutoscaler(self):
"""
Test V2beta1HorizontalPodAutoscaler
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v2beta1_horizontal_pod_autoscaler.V2beta1HorizontalPodAutoscaler()
pass
if __name__ == '__main__':
unittest.main()
| 23.888889
| 108
| 0.739535
|
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v2beta1_horizontal_pod_autoscaler import V2beta1HorizontalPodAutoscaler
class TestV2beta1HorizontalPodAutoscaler(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV2beta1HorizontalPodAutoscaler(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
790a1c76526cf01bdf87d418e5e2a9bd0bc77e5d
| 6,614
|
py
|
Python
|
vispy/visuals/tests/test_image.py
|
3DAlgoLab/vispy
|
91972307cf336674aad58198fb26b9e46f8f9ca1
|
[
"BSD-3-Clause"
] | 1
|
2021-08-04T06:31:02.000Z
|
2021-08-04T06:31:02.000Z
|
vispy/visuals/tests/test_image.py
|
3DAlgoLab/vispy
|
91972307cf336674aad58198fb26b9e46f8f9ca1
|
[
"BSD-3-Clause"
] | 1
|
2021-06-04T13:48:46.000Z
|
2021-06-05T10:57:33.000Z
|
vispy/visuals/tests/test_image.py
|
3DAlgoLab/vispy
|
91972307cf336674aad58198fb26b9e46f8f9ca1
|
[
"BSD-3-Clause"
] | 1
|
2018-09-17T07:00:38.000Z
|
2018-09-17T07:00:38.000Z
|
# -*- coding: utf-8 -*-
from unittest import mock
from vispy.scene.visuals import Image
from vispy.testing import (requires_application, TestingCanvas,
run_tests_if_main)
from vispy.testing.image_tester import assert_image_approved, downsample
import numpy as np
import pytest
@requires_application()
@pytest.mark.parametrize('is_3d', [True, False])
def test_image(is_3d):
"""Test image visual"""
size = (100, 50)
with TestingCanvas(size=size, bgcolor='w') as c:
image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
shape = (size[1]-10, size[0]-10) + ((3,) if is_3d else ())
np.random.seed(379823)
data = np.random.rand(*shape)
image.set_data(data)
assert_image_approved(c.render(), "visuals/image%s.png" %
("_rgb" if is_3d else "_mono"))
def _make_test_data(shape, input_dtype):
data = np.random.random_sample(shape)
if data.ndim == 3 and data.shape[-1] == 4:
# RGBA - make alpha fully opaque
data[..., -1] = 1.0
max_val = _max_for_dtype(input_dtype)
if max_val != 1:
data *= max_val
data = data.astype(input_dtype)
return data
def _compare_render(orig_data, rendered_data, previous_render=None, atol=1):
predicted = _make_rgba(orig_data)
np.testing.assert_allclose(rendered_data.astype(float), predicted.astype(float), atol=atol)
if previous_render is not None:
# assert not allclose
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_data, previous_render, atol=10)
def _set_image_data(image, data, should_fail):
if should_fail:
pytest.raises(ValueError, image.set_data, data)
return
image.set_data(data)
def _max_for_dtype(input_dtype):
if np.issubdtype(input_dtype, np.integer):
max_val = np.iinfo(input_dtype).max
else:
max_val = 1.0
return max_val
def _get_orig_and_new_clims(input_dtype):
new_clim = (0.3, 0.8)
max_val = _max_for_dtype(input_dtype)
if np.issubdtype(input_dtype, np.integer):
new_clim = (int(new_clim[0] * max_val), int(new_clim[1] * max_val))
return (0, max_val), new_clim
@requires_application()
@pytest.mark.parametrize('data_on_init', [False, True])
@pytest.mark.parametrize('clim_on_init', [False, True])
@pytest.mark.parametrize('num_channels', [0, 1, 3, 4])
@pytest.mark.parametrize('texture_format', [None, '__dtype__', 'auto'])
@pytest.mark.parametrize('input_dtype', [np.uint8, np.uint16, np.float32, np.float64])
def test_image_clims_and_gamma(input_dtype, texture_format, num_channels,
clim_on_init, data_on_init):
"""Test image visual with clims and gamma on shader."""
size = (40, 40)
if texture_format == '__dtype__':
texture_format = input_dtype
shape = size + (num_channels,) if num_channels > 0 else size
np.random.seed(0)
data = _make_test_data(shape, input_dtype)
orig_clim, new_clim = _get_orig_and_new_clims(input_dtype)
# 16-bit integers and above seem to have precision loss when scaled on the CPU
is_16int_cpu_scaled = (np.dtype(input_dtype).itemsize >= 2 and
np.issubdtype(input_dtype, np.integer) and
texture_format is None)
clim_atol = 2 if is_16int_cpu_scaled else 1
gamma_atol = 3 if is_16int_cpu_scaled else 2
kwargs = {}
if clim_on_init:
kwargs['clim'] = orig_clim
if data_on_init:
kwargs['data'] = data
# default is RGBA, anything except auto requires reformat
set_data_fails = (num_channels != 4 and
texture_format is not None and
texture_format != 'auto')
with TestingCanvas(size=size[::-1], bgcolor="w") as c:
image = Image(cmap='grays', texture_format=texture_format,
parent=c.scene, **kwargs)
if not data_on_init:
_set_image_data(image, data, set_data_fails)
if set_data_fails:
return
rendered = c.render()
_dtype = rendered.dtype
shape_ratio = rendered.shape[0] // data.shape[0]
rendered1 = downsample(rendered, shape_ratio, axis=(0, 1)).astype(_dtype)
_compare_render(data, rendered1)
# adjust color limits
image.clim = new_clim
rendered2 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
scaled_data = (np.clip(data, new_clim[0], new_clim[1]) - new_clim[0]) / (new_clim[1] - new_clim[0])
_compare_render(scaled_data, rendered2, rendered1, atol=clim_atol)
# adjust gamma
image.gamma = 2
rendered3 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
_compare_render(scaled_data ** 2, rendered3, rendered2, atol=gamma_atol)
@requires_application()
def test_image_vertex_updates():
"""Test image visual coordinates are only built when needed."""
size = (40, 40)
with TestingCanvas(size=size, bgcolor="w") as c:
shape = size + (3,)
np.random.seed(0)
image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
with mock.patch.object(
image, '_build_vertex_data',
wraps=image._build_vertex_data) as build_vertex_mock:
data = np.random.rand(*shape)
image.set_data(data)
c.render()
build_vertex_mock.assert_called_once()
build_vertex_mock.reset_mock() # reset the count to 0
# rendering again shouldn't cause vertex coordinates to be built
c.render()
build_vertex_mock.assert_not_called()
# changing to data of the same shape shouldn't cause it
data = np.zeros_like(data)
image.set_data(data)
c.render()
build_vertex_mock.assert_not_called()
# changing to another shape should
data = data[:-5, :-5]
image.set_data(data)
c.render()
build_vertex_mock.assert_called_once()
def _make_rgba(data_in):
max_val = _max_for_dtype(data_in.dtype)
if data_in.ndim == 3 and data_in.shape[-1] == 1:
data_in = data_in.squeeze()
if data_in.ndim == 2:
out = np.stack([data_in] * 4, axis=2)
out[:, :, 3] = max_val
elif data_in.shape[-1] == 3:
out = np.concatenate((data_in, np.ones((*data_in.shape[:2], 1)) * max_val), axis=2)
else:
out = data_in
return np.round((out.astype(np.float) * 255 / max_val)).astype(np.uint8)
run_tests_if_main()
| 36.744444
| 107
| 0.633958
|
from unittest import mock
from vispy.scene.visuals import Image
from vispy.testing import (requires_application, TestingCanvas,
run_tests_if_main)
from vispy.testing.image_tester import assert_image_approved, downsample
import numpy as np
import pytest
@requires_application()
@pytest.mark.parametrize('is_3d', [True, False])
def test_image(is_3d):
size = (100, 50)
with TestingCanvas(size=size, bgcolor='w') as c:
image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
shape = (size[1]-10, size[0]-10) + ((3,) if is_3d else ())
np.random.seed(379823)
data = np.random.rand(*shape)
image.set_data(data)
assert_image_approved(c.render(), "visuals/image%s.png" %
("_rgb" if is_3d else "_mono"))
def _make_test_data(shape, input_dtype):
data = np.random.random_sample(shape)
if data.ndim == 3 and data.shape[-1] == 4:
data[..., -1] = 1.0
max_val = _max_for_dtype(input_dtype)
if max_val != 1:
data *= max_val
data = data.astype(input_dtype)
return data
def _compare_render(orig_data, rendered_data, previous_render=None, atol=1):
predicted = _make_rgba(orig_data)
np.testing.assert_allclose(rendered_data.astype(float), predicted.astype(float), atol=atol)
if previous_render is not None:
pytest.raises(AssertionError, np.testing.assert_allclose,
rendered_data, previous_render, atol=10)
def _set_image_data(image, data, should_fail):
if should_fail:
pytest.raises(ValueError, image.set_data, data)
return
image.set_data(data)
def _max_for_dtype(input_dtype):
if np.issubdtype(input_dtype, np.integer):
max_val = np.iinfo(input_dtype).max
else:
max_val = 1.0
return max_val
def _get_orig_and_new_clims(input_dtype):
new_clim = (0.3, 0.8)
max_val = _max_for_dtype(input_dtype)
if np.issubdtype(input_dtype, np.integer):
new_clim = (int(new_clim[0] * max_val), int(new_clim[1] * max_val))
return (0, max_val), new_clim
@requires_application()
@pytest.mark.parametrize('data_on_init', [False, True])
@pytest.mark.parametrize('clim_on_init', [False, True])
@pytest.mark.parametrize('num_channels', [0, 1, 3, 4])
@pytest.mark.parametrize('texture_format', [None, '__dtype__', 'auto'])
@pytest.mark.parametrize('input_dtype', [np.uint8, np.uint16, np.float32, np.float64])
def test_image_clims_and_gamma(input_dtype, texture_format, num_channels,
clim_on_init, data_on_init):
size = (40, 40)
if texture_format == '__dtype__':
texture_format = input_dtype
shape = size + (num_channels,) if num_channels > 0 else size
np.random.seed(0)
data = _make_test_data(shape, input_dtype)
orig_clim, new_clim = _get_orig_and_new_clims(input_dtype)
is_16int_cpu_scaled = (np.dtype(input_dtype).itemsize >= 2 and
np.issubdtype(input_dtype, np.integer) and
texture_format is None)
clim_atol = 2 if is_16int_cpu_scaled else 1
gamma_atol = 3 if is_16int_cpu_scaled else 2
kwargs = {}
if clim_on_init:
kwargs['clim'] = orig_clim
if data_on_init:
kwargs['data'] = data
set_data_fails = (num_channels != 4 and
texture_format is not None and
texture_format != 'auto')
with TestingCanvas(size=size[::-1], bgcolor="w") as c:
image = Image(cmap='grays', texture_format=texture_format,
parent=c.scene, **kwargs)
if not data_on_init:
_set_image_data(image, data, set_data_fails)
if set_data_fails:
return
rendered = c.render()
_dtype = rendered.dtype
shape_ratio = rendered.shape[0] // data.shape[0]
rendered1 = downsample(rendered, shape_ratio, axis=(0, 1)).astype(_dtype)
_compare_render(data, rendered1)
image.clim = new_clim
rendered2 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
scaled_data = (np.clip(data, new_clim[0], new_clim[1]) - new_clim[0]) / (new_clim[1] - new_clim[0])
_compare_render(scaled_data, rendered2, rendered1, atol=clim_atol)
image.gamma = 2
rendered3 = downsample(c.render(), shape_ratio, axis=(0, 1)).astype(_dtype)
_compare_render(scaled_data ** 2, rendered3, rendered2, atol=gamma_atol)
@requires_application()
def test_image_vertex_updates():
size = (40, 40)
with TestingCanvas(size=size, bgcolor="w") as c:
shape = size + (3,)
np.random.seed(0)
image = Image(cmap='grays', clim=[0, 1], parent=c.scene)
with mock.patch.object(
image, '_build_vertex_data',
wraps=image._build_vertex_data) as build_vertex_mock:
data = np.random.rand(*shape)
image.set_data(data)
c.render()
build_vertex_mock.assert_called_once()
build_vertex_mock.reset_mock()
c.render()
build_vertex_mock.assert_not_called()
# changing to data of the same shape shouldn't cause it
data = np.zeros_like(data)
image.set_data(data)
c.render()
build_vertex_mock.assert_not_called()
data = data[:-5, :-5]
image.set_data(data)
c.render()
build_vertex_mock.assert_called_once()
def _make_rgba(data_in):
max_val = _max_for_dtype(data_in.dtype)
if data_in.ndim == 3 and data_in.shape[-1] == 1:
data_in = data_in.squeeze()
if data_in.ndim == 2:
out = np.stack([data_in] * 4, axis=2)
out[:, :, 3] = max_val
elif data_in.shape[-1] == 3:
out = np.concatenate((data_in, np.ones((*data_in.shape[:2], 1)) * max_val), axis=2)
else:
out = data_in
return np.round((out.astype(np.float) * 255 / max_val)).astype(np.uint8)
run_tests_if_main()
| true
| true
|
790a1ca85391110f418f3a086954422214318656
| 6,901
|
py
|
Python
|
Robotics/src/otonomgorev.py
|
ahmetakif/Voice-Controlled-Raspberry-Pi-Robot
|
00dcc15dfbb7441d6403fb0467b2144e8750cc0c
|
[
"Apache-2.0"
] | 5
|
2019-08-21T08:08:27.000Z
|
2021-06-14T06:56:50.000Z
|
Robotics/src/otonomgorev.py
|
ahmetakif/Voice-Controlled-Raspberry-Pi-Robot
|
00dcc15dfbb7441d6403fb0467b2144e8750cc0c
|
[
"Apache-2.0"
] | null | null | null |
Robotics/src/otonomgorev.py
|
ahmetakif/Voice-Controlled-Raspberry-Pi-Robot
|
00dcc15dfbb7441d6403fb0467b2144e8750cc0c
|
[
"Apache-2.0"
] | 2
|
2019-08-21T08:16:58.000Z
|
2021-04-07T11:56:11.000Z
|
import os
import RPi.GPIO as gpio
import time
import random
from mesafe import distance
motorhizi = 2.5
hiz = 100
aci2 = aci3 = aci4 = 6
aci = 5.5
in4 = 26
in3 = 4
in2 = 12
in1 = 8
solled = 9
sagled = 11
gpio.setwarnings(False)
def init():
gpio.setwarnings(False)
gpio.setmode(gpio.BCM)
gpio.setup(22,gpio.OUT)
gpio.setup(27,gpio.OUT)
gpio.setup(17,gpio.OUT)
gpio.setup(18,gpio.OUT)
gpio.setup(in4,gpio.OUT)
gpio.setup(in3,gpio.OUT)
gpio.setup(in2,gpio.OUT)
gpio.setup(in1,gpio.OUT)
gpio.setup(21,gpio.OUT)
gpio.setup(solled,gpio.OUT)
gpio.setup(sagled,gpio.OUT)
gpio.setup(23,gpio.IN)
gpio.setup(24,gpio.IN)
gpio.output(22,0)
gpio.output(18,0)
gpio.output(17,0)
gpio.output(27,0)
gpio.output(in4,0)
gpio.output(in3,0)
gpio.output(in2,0)
gpio.output(in1,0)
gpio.output(21,0)
gpio.output(solled,0)
gpio.output(sagled,0)
def ileri(tf,ff):
init()
gpio.output(17,0)
gpio.output(22,0)
ip = gpio.PWM(27,50)
ip2 = gpio.PWM(18,50)
ip.start(ff)
ip2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def geri(tf,ff):
init()
gpio.output(18,0)
gpio.output(27,0)
gp = gpio.PWM(22,50)
gp2 = gpio.PWM(17,50)
gp.start(ff)
gp2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def sol(tf,ff):
init()
gpio.output(17,0)
gpio.output(27,0)
sp = gpio.PWM(22,50)
sp2 = gpio.PWM(18,50)
sp.start(ff)
sp2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def sag(tf,ff):
init()
gpio.output(18,0)
gpio.output(22,0)
sap = gpio.PWM(27,50)
sap2 = gpio.PWM(17,50)
sap.start(ff)
sap2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def dur():
init()
gpio.output(22,0)
gpio.output(17,0)
gpio.output(18,0)
gpio.output(27,0)
gpio.cleanup()
def adim1(tf,y):
init()
if (y == 1): # sol
gpio.output(in1,1)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,0)
if (y == 0): # sag
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,1)
time.sleep(tf)
gpio.cleanup()
def adim2(tf,y):
init()
if (y == 1): # sol
gpio.output(in1,0)
gpio.output(in2,1)
gpio.output(in3,0)
gpio.output(in4,0)
if (y == 0): # sag
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,1)
gpio.output(in4,0)
time.sleep(tf)
gpio.cleanup()
def adim3(tf,y):
init()
if (y == 1): # sol
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,1)
gpio.output(in4,0)
if (y == 0): # sag
gpio.output(in1,0)
gpio.output(in2,1)
gpio.output(in3,0)
gpio.output(in4,0)
time.sleep(tf)
gpio.cleanup()
def adim4(tf,y):
init()
if (y == 1): # sol
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,1)
if (y == 0): # sag
gpio.output(in1,1)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,0)
time.sleep(tf)
gpio.cleanup()
def stepper(tf,ff,yf):
ff = float(ff)
ff = ff / 1000
if (yf == 0): # sag
for i in range(0,tf):
adim1(ff,0)
adim2(ff,0)
adim3(ff,0)
adim4(ff,0)
if (yf == 1): # sol
for i in range(0,tf):
adim1(ff,1)
adim2(ff,1)
adim3(ff,1)
adim4(ff,1)
def servo(tf):
gpio.setmode(gpio.BCM)
gpio.setup(5,gpio.OUT)
p = gpio.PWM(5,50)
p.start(5.5)
p.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def servo2(tf):
gpio.setmode(gpio.BCM)
gpio.setup(6,gpio.OUT)
p2 = gpio.PWM(6,50)
p2.start(6)
p2.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def servo3(tf):
gpio.setmode(gpio.BCM)
gpio.setup(20,gpio.OUT)
p3 = gpio.PWM(20,50)
p3.start(6)
p3.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def servo4(tf):
gpio.setmode(gpio.BCM)
gpio.setup(16,gpio.OUT)
p3 = gpio.PWM(16,50)
p3.start(6)
p3.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def ses(tf,ff):
init()
sp = gpio.PWM(21,ff)
sp.start(70)
time.sleep(tf)
gpio.cleanup()
def led(ff,tf,sf):
init()
sp = gpio.PWM(solled,500)
sap = gpio.PWM(sagled,500)
if (sf == 0):
sp.start(ff)
time.sleep(tf)
gpio.cleanup()
elif (sf == 1):
sap.start(ff)
time.sleep(tf)
gpio.cleanup()
elif (sf == 2):
sp.start(ff)
sap.start(ff)
time.sleep(tf)
gpio.cleanup()
def kapat():
os.system("pkill -9 -f main.py")
def kontrol():
x = random.randrange(1,3)
if (x == 1):
print ("sagabak")
servo(3)
time.sleep(0.05)
dis = distance('cm')
print (dis)
if dis < 15:
print ("solabak")
servo(9)
dis = distance('cm')
if dis < 15:
print ("cik")
servo(5.5)
geri(2,hiz)
else:
servo(5.5)
geri(0.5,hiz)
sol(0.7,hiz)
else:
servo(5.5)
geri(0.5,hiz)
sag(0.7,hiz)
if (x == 2):
print ("solabak")
servo(9)
time.sleep(0.05)
dis = distance('cm')
print (dis)
if dis < 15:
print ("sagabak")
servo(3)
dis = distance('cm')
if dis < 15:
print ("cik")
servo(5.5)
geri(2,hiz)
else:
servo(5.5)
geri(0.5,hiz)
sag(0.7,hiz)
else:
servo(5.5)
geri(0.5,hiz)
sol(0.7,hiz)
print (" ")
print ("otonomgorev yazilimi google speech api sesli komutlari ile robotun otonom hareket etmesi icin yazilmistir")
print (" ")
time.sleep(1)
def cizgi(rf):
for i in range(0,rf):
dis = distance('cm')
init()
if (gpio.input(23) == 0 and gpio.input(24) == 0):
ileri(0.1,hiz)
elif (gpio.input(23) == 1 and gpio.input(24) == 0):
sol(0.1,hiz)
elif (gpio.input(23) == 0 and gpio.input(24) == 1):
sag(0.1,hiz)
else:
pass
if dis < 15:
print ("cok dar",dis)
geri(0.5,hiz)
servo(5.5)
kontrol()
elif dis < 25:
print ("dar",dis)
else:
print ("temiz",dis)
dur()
aci2 = aci3 = aci4 = 6
aci = 5.5
| 20.848943
| 115
| 0.501522
|
import os
import RPi.GPIO as gpio
import time
import random
from mesafe import distance
motorhizi = 2.5
hiz = 100
aci2 = aci3 = aci4 = 6
aci = 5.5
in4 = 26
in3 = 4
in2 = 12
in1 = 8
solled = 9
sagled = 11
gpio.setwarnings(False)
def init():
gpio.setwarnings(False)
gpio.setmode(gpio.BCM)
gpio.setup(22,gpio.OUT)
gpio.setup(27,gpio.OUT)
gpio.setup(17,gpio.OUT)
gpio.setup(18,gpio.OUT)
gpio.setup(in4,gpio.OUT)
gpio.setup(in3,gpio.OUT)
gpio.setup(in2,gpio.OUT)
gpio.setup(in1,gpio.OUT)
gpio.setup(21,gpio.OUT)
gpio.setup(solled,gpio.OUT)
gpio.setup(sagled,gpio.OUT)
gpio.setup(23,gpio.IN)
gpio.setup(24,gpio.IN)
gpio.output(22,0)
gpio.output(18,0)
gpio.output(17,0)
gpio.output(27,0)
gpio.output(in4,0)
gpio.output(in3,0)
gpio.output(in2,0)
gpio.output(in1,0)
gpio.output(21,0)
gpio.output(solled,0)
gpio.output(sagled,0)
def ileri(tf,ff):
init()
gpio.output(17,0)
gpio.output(22,0)
ip = gpio.PWM(27,50)
ip2 = gpio.PWM(18,50)
ip.start(ff)
ip2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def geri(tf,ff):
init()
gpio.output(18,0)
gpio.output(27,0)
gp = gpio.PWM(22,50)
gp2 = gpio.PWM(17,50)
gp.start(ff)
gp2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def sol(tf,ff):
init()
gpio.output(17,0)
gpio.output(27,0)
sp = gpio.PWM(22,50)
sp2 = gpio.PWM(18,50)
sp.start(ff)
sp2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def sag(tf,ff):
init()
gpio.output(18,0)
gpio.output(22,0)
sap = gpio.PWM(27,50)
sap2 = gpio.PWM(17,50)
sap.start(ff)
sap2.start(ff)
tf = float(tf)
tf = tf / motorhizi
time.sleep(tf)
gpio.cleanup()
def dur():
init()
gpio.output(22,0)
gpio.output(17,0)
gpio.output(18,0)
gpio.output(27,0)
gpio.cleanup()
def adim1(tf,y):
init()
if (y == 1):
gpio.output(in1,1)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,0)
if (y == 0):
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,1)
time.sleep(tf)
gpio.cleanup()
def adim2(tf,y):
init()
if (y == 1):
gpio.output(in1,0)
gpio.output(in2,1)
gpio.output(in3,0)
gpio.output(in4,0)
if (y == 0):
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,1)
gpio.output(in4,0)
time.sleep(tf)
gpio.cleanup()
def adim3(tf,y):
init()
if (y == 1):
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,1)
gpio.output(in4,0)
if (y == 0):
gpio.output(in1,0)
gpio.output(in2,1)
gpio.output(in3,0)
gpio.output(in4,0)
time.sleep(tf)
gpio.cleanup()
def adim4(tf,y):
init()
if (y == 1):
gpio.output(in1,0)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,1)
if (y == 0):
gpio.output(in1,1)
gpio.output(in2,0)
gpio.output(in3,0)
gpio.output(in4,0)
time.sleep(tf)
gpio.cleanup()
def stepper(tf,ff,yf):
ff = float(ff)
ff = ff / 1000
if (yf == 0):
for i in range(0,tf):
adim1(ff,0)
adim2(ff,0)
adim3(ff,0)
adim4(ff,0)
if (yf == 1):
for i in range(0,tf):
adim1(ff,1)
adim2(ff,1)
adim3(ff,1)
adim4(ff,1)
def servo(tf):
gpio.setmode(gpio.BCM)
gpio.setup(5,gpio.OUT)
p = gpio.PWM(5,50)
p.start(5.5)
p.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def servo2(tf):
gpio.setmode(gpio.BCM)
gpio.setup(6,gpio.OUT)
p2 = gpio.PWM(6,50)
p2.start(6)
p2.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def servo3(tf):
gpio.setmode(gpio.BCM)
gpio.setup(20,gpio.OUT)
p3 = gpio.PWM(20,50)
p3.start(6)
p3.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def servo4(tf):
gpio.setmode(gpio.BCM)
gpio.setup(16,gpio.OUT)
p3 = gpio.PWM(16,50)
p3.start(6)
p3.ChangeDutyCycle(tf)
time.sleep(0.7)
gpio.cleanup()
def ses(tf,ff):
init()
sp = gpio.PWM(21,ff)
sp.start(70)
time.sleep(tf)
gpio.cleanup()
def led(ff,tf,sf):
init()
sp = gpio.PWM(solled,500)
sap = gpio.PWM(sagled,500)
if (sf == 0):
sp.start(ff)
time.sleep(tf)
gpio.cleanup()
elif (sf == 1):
sap.start(ff)
time.sleep(tf)
gpio.cleanup()
elif (sf == 2):
sp.start(ff)
sap.start(ff)
time.sleep(tf)
gpio.cleanup()
def kapat():
os.system("pkill -9 -f main.py")
def kontrol():
x = random.randrange(1,3)
if (x == 1):
print ("sagabak")
servo(3)
time.sleep(0.05)
dis = distance('cm')
print (dis)
if dis < 15:
print ("solabak")
servo(9)
dis = distance('cm')
if dis < 15:
print ("cik")
servo(5.5)
geri(2,hiz)
else:
servo(5.5)
geri(0.5,hiz)
sol(0.7,hiz)
else:
servo(5.5)
geri(0.5,hiz)
sag(0.7,hiz)
if (x == 2):
print ("solabak")
servo(9)
time.sleep(0.05)
dis = distance('cm')
print (dis)
if dis < 15:
print ("sagabak")
servo(3)
dis = distance('cm')
if dis < 15:
print ("cik")
servo(5.5)
geri(2,hiz)
else:
servo(5.5)
geri(0.5,hiz)
sag(0.7,hiz)
else:
servo(5.5)
geri(0.5,hiz)
sol(0.7,hiz)
print (" ")
print ("otonomgorev yazilimi google speech api sesli komutlari ile robotun otonom hareket etmesi icin yazilmistir")
print (" ")
time.sleep(1)
def cizgi(rf):
for i in range(0,rf):
dis = distance('cm')
init()
if (gpio.input(23) == 0 and gpio.input(24) == 0):
ileri(0.1,hiz)
elif (gpio.input(23) == 1 and gpio.input(24) == 0):
sol(0.1,hiz)
elif (gpio.input(23) == 0 and gpio.input(24) == 1):
sag(0.1,hiz)
else:
pass
if dis < 15:
print ("cok dar",dis)
geri(0.5,hiz)
servo(5.5)
kontrol()
elif dis < 25:
print ("dar",dis)
else:
print ("temiz",dis)
dur()
aci2 = aci3 = aci4 = 6
aci = 5.5
| true
| true
|
790a1d37a4ebf26c18ca7f0b51e38c2f5c1b8cb7
| 5,166
|
py
|
Python
|
umn/source/conf.py
|
kucerakk/template
|
30ba12f5b25b6afbb060bfe0379fc1a7b88626ff
|
[
"Apache-2.0"
] | null | null | null |
umn/source/conf.py
|
kucerakk/template
|
30ba12f5b25b6afbb060bfe0379fc1a7b88626ff
|
[
"Apache-2.0"
] | null | null | null |
umn/source/conf.py
|
kucerakk/template
|
30ba12f5b25b6afbb060bfe0379fc1a7b88626ff
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import os
import sys
extensions = [
'otcdocstheme'
]
html_theme = 'otcdocs'
html_theme_options = {
}
otcdocs_auto_name = False
otcdocs_auto_version = False
project = 'Dummy Service' # FIXME
otcdocs_repo_name = 'opentelekomcloud-docs/template' # FIXME
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2022-present, Open Telekom Cloud'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use
# for all documents.
# default_role = None
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Dummy UMN" # FIXME
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'dummydoc' # FIXME
latex_documents = [
('index',
'umn-dummy.tex', # FIXME
u'%s User Manual Documentation' % project,
u'OpenTelekomCloud', 'manual'),
]
| 32.696203
| 79
| 0.714092
|
import os
import sys
extensions = [
'otcdocstheme'
]
html_theme = 'otcdocs'
html_theme_options = {
}
otcdocs_auto_name = False
otcdocs_auto_version = False
project = 'Dummy Service'
otcdocs_repo_name = 'opentelekomcloud-docs/template'
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
source_suffix = '.rst'
master_doc = 'index'
copyright = u'2022-present, Open Telekom Cloud'
show_authors = False
pygments_style = 'native'
html_title = "Dummy UMN"
htmlhelp_basename = 'dummydoc'
latex_documents = [
('index',
'umn-dummy.tex',
u'%s User Manual Documentation' % project,
u'OpenTelekomCloud', 'manual'),
]
| true
| true
|
790a1e1db2413ea25ac85add932a5feef92f08a8
| 8,855
|
py
|
Python
|
qutip/graph.py
|
trxw/qutip
|
b923c973edd9a071d86eb849650661549f73585f
|
[
"BSD-3-Clause"
] | 1
|
2015-11-06T06:35:06.000Z
|
2015-11-06T06:35:06.000Z
|
qutip/graph.py
|
trxw/qutip
|
b923c973edd9a071d86eb849650661549f73585f
|
[
"BSD-3-Clause"
] | null | null | null |
qutip/graph.py
|
trxw/qutip
|
b923c973edd9a071d86eb849650661549f73585f
|
[
"BSD-3-Clause"
] | null | null | null |
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains a collection of graph theory routines used mainly
to reorder matrices for iterative steady state solvers.
"""
import numpy as np
import scipy.sparse as sp
from qutip.cy.graph_utils import (
_pseudo_peripheral_node, _breadth_first_search, _node_degrees,
_rcm, _bfs_matching, _weighted_bfs_matching)
from qutip.settings import debug
from warnings import warn
if debug:
import inspect
def graph_degree(A):
"""
Returns the degree for the nodes (rows) of a symmetric
graph in sparse CSR or CSC format, or a qobj.
Parameters
----------
A : qobj, csr_matrix, csc_matrix
Input quantum object or csr_matrix.
Returns
-------
degree : array
Array of integers giving the degree for each node (row).
"""
if A.__class__.__name__=='Qobj':
return _node_degrees(A.data.indices, A.data.indptr, A.shape[0])
else:
return _node_degrees(A.indices, A.indptr, A.shape[0])
def breadth_first_search(A,start):
"""
Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting
from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs.
This function requires a matrix with symmetric structure.
Use A+trans(A) if original matrix is not symmetric or not sure.
Parameters
----------
A : qobj, csr_matrix
Input graph in CSR matrix form
start : int
Staring node for BFS traversal.
Returns
-------
order : array
Order in which nodes are traversed from starting node.
levels : array
Level of the nodes in the order that they are traversed.
"""
if A.__class__.__name__=='Qobj':
A=A.data
num_rows=A.shape[0]
start=int(start)
order, levels = _breadth_first_search(A.indices,A.indptr, num_rows, start)
#since maybe not all nodes are in search, check for unused entires in arrays
return order[order!=-1], levels[levels!=-1]
def symrcm(A, sym=False):
"""
Returns the permutation array that orders a sparse CSR or CSC matrix or Qobj
in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric,
this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default).
It is assumed by default (*sym=False*) that the input matrix is not symmetric. This
is because it is faster to do A+Trans(A) than it is to check for symmetry for
a generic matrix. If you are guaranteed that the matrix is symmetric in structure
(values of matrix element do not matter) then set *sym=True*
Parameters
----------
A : csr_matrix, qobj
Input sparse csr_matrix or Qobj.
sym : bool {False, True}
Flag to set whether input matrix is symmetric.
Returns
-------
perm : array
Array of permuted row and column indices.
Notes
-----
This routine is used primarily for internal reordering of Lindblad super-operators
for use in iterative solver routines.
References
----------
E. Cuthill and J. McKee, "Reducing the Bandwidth of Sparse Symmetric Matrices",
ACM '69 Proceedings of the 1969 24th national conference, (1969).
"""
nrows = A.shape[0]
if A.__class__.__name__=='Qobj':
if not sym:
A = A.data+A.data.transpose()
return _rcm(A.indices, A.indptr, nrows)
else:
return _rcm(A.data.indices, A.data.indptr, nrows)
else:
if not sym:
A=A+A.transpose()
return _rcm(A.indices, A.indptr, nrows)
def bfs_matching(A):
"""
Returns an array of row permutations that removes nonzero elements
from the diagonal of a nonsingular square CSC sparse matrix. Such
a permutation is always possible provided that the matrix is
nonsingular.
This function looks at the structure of the matrix only.
Parameters
----------
A : csc_matrix
Input matrix
Returns
-------
perm : array
Array of row permutations.
Notes
-----
This function relies on a maximum cardinality bipartite matching algorithm
based on a breadth-first search (BFS) of the underlying graph[1]_.
References
----------
.. [1] I. S. Duff, K. Kaya, and B. Ucar, "Design, Implementation, and
Analysis of Maximum Transversal Algorithms", ACM Trans. Math. Softw.
38, no. 2, (2011).
"""
nrows = A.shape[0]
if A.shape[0]!=A.shape[1]:
raise ValueError('bfs_matching requires a square matrix.')
if A.__class__.__name__=='Qobj':
A = A.data.tocsc()
elif not sp.isspmatrix_csc(A):
A = sp.csc_matrix(A)
warn('bfs_matching requires CSC matrix format.',
sp.SparseEfficiencyWarning)
perm = _bfs_matching(A.indices, A.indptr, nrows)
if np.any(perm==-1):
raise Exception('Possibly singular input matrix.')
return perm
def weighted_bfs_matching(A):
"""
Returns an array of row permutations that attempts to maximize
the product of the ABS values of the diagonal elements in
a nonsingular square CSC sparse matrix. Such a permutation is
always possible provided that the matrix is nonsingular.
This function looks at both the structure and ABS values of the
underlying matrix.
Parameters
----------
A : csc_matrix
Input matrix
Returns
-------
perm : array
Array of row permutations.
Notes
-----
This function uses a weighted maximum cardinality bipartite matching
algorithm based on breadth-first search (BFS). The columns are weighted
according to the element of max ABS value in the associated rows and
are traversed in descending order by weight. When performing the BFS
traversal, the row associated to a given column is the one with maximum
weight. Unlike other techniques[1]_, this algorithm does not guarantee the
product of the diagonal is maximized. However, this limitation is offset
by the substantially faster runtime of this method.
References
----------
.. [1] I. S. Duff and J. Koster, "The design and use of algorithms for
permuting large entries to the diagonal of sparse matrices", SIAM J.
Matrix Anal. and Applics. 20, no. 4, 889 (1997).
"""
nrows = A.shape[0]
if A.shape[0]!=A.shape[1]:
raise ValueError('weighted_bfs_matching requires a square matrix.')
if A.__class__.__name__=='Qobj':
A = A.data.tocsc()
elif not sp.isspmatrix_csc(A):
A = sp.csc_matrix(A)
warn('weighted_bfs_matching requires CSC matrix format',
sp.SparseEfficiencyWarning)
perm = _weighted_bfs_matching(
np.asarray(np.abs(A.data), dtype=float),
A.indices, A.indptr, nrows)
if np.any(perm==-1):
raise Exception('Possibly singular input matrix.')
return perm
| 34.862205
| 90
| 0.658385
| true
| true
|
|
790a1f0fce49d9911909bd493fe73e1f22541631
| 1,368
|
py
|
Python
|
multithreading/multithreading_simple.py
|
guneykayim/python-examples
|
b5378570fdfa71bc27809801867b53381f8e5346
|
[
"MIT"
] | null | null | null |
multithreading/multithreading_simple.py
|
guneykayim/python-examples
|
b5378570fdfa71bc27809801867b53381f8e5346
|
[
"MIT"
] | null | null | null |
multithreading/multithreading_simple.py
|
guneykayim/python-examples
|
b5378570fdfa71bc27809801867b53381f8e5346
|
[
"MIT"
] | null | null | null |
import threading, queue
import time
import random
import logging
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-9s) %(message)s',)
NUMBER_OF_THREADS = 4
TIMEOUT_SECONDS = 5
class SampleThread(threading.Thread):
def __init__(self, group=None, target=None, name=None, id=None, kwargs=None):
super().__init__(group=group, target=target, name=name)
self.id = id
self.kwargs = kwargs
self.queue = kwargs['queue']
return
def run(self):
# do some work here
logging.debug(f'Tunning thread id={self.id}')
r = random.uniform(0, 5)
time.sleep(r)
self.queue.put(f'Thread id={self.id} finished running in {r} seconds')
if __name__ == '__main__':
print('Starting threads')
# create a list to hold running SampleThread object instances
threads = list()
# build a single queue to send to all thread objects
q = queue.Queue()
for i in range(NUMBER_OF_THREADS):
t = SampleThread(id = i, kwargs={'queue':q})
t.start()
threads.append(t)
# wait until all threads are finished
logging.debug('Waiting for all threads to finish running')
[t.join() for t in threads]
logging.debug('All processes are finished running')
logging.debug('Results')
while not q.empty():
logging.debug(q.get())
| 27.918367
| 82
| 0.645468
|
import threading, queue
import time
import random
import logging
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-9s) %(message)s',)
NUMBER_OF_THREADS = 4
TIMEOUT_SECONDS = 5
class SampleThread(threading.Thread):
def __init__(self, group=None, target=None, name=None, id=None, kwargs=None):
super().__init__(group=group, target=target, name=name)
self.id = id
self.kwargs = kwargs
self.queue = kwargs['queue']
return
def run(self):
logging.debug(f'Tunning thread id={self.id}')
r = random.uniform(0, 5)
time.sleep(r)
self.queue.put(f'Thread id={self.id} finished running in {r} seconds')
if __name__ == '__main__':
print('Starting threads')
threads = list()
q = queue.Queue()
for i in range(NUMBER_OF_THREADS):
t = SampleThread(id = i, kwargs={'queue':q})
t.start()
threads.append(t)
logging.debug('Waiting for all threads to finish running')
[t.join() for t in threads]
logging.debug('All processes are finished running')
logging.debug('Results')
while not q.empty():
logging.debug(q.get())
| true
| true
|
790a1f549ca885d773128c7ac9a9b19cad44bb99
| 8,346
|
py
|
Python
|
tec/snake_food_import_counting.py
|
thorwhalen/tec
|
7f23e1ee0750688cfe21838414d83e813217a853
|
[
"Apache-2.0"
] | 1
|
2022-01-19T13:15:30.000Z
|
2022-01-19T13:15:30.000Z
|
tec/snake_food_import_counting.py
|
thorwhalen/tec
|
7f23e1ee0750688cfe21838414d83e813217a853
|
[
"Apache-2.0"
] | null | null | null |
tec/snake_food_import_counting.py
|
thorwhalen/tec
|
7f23e1ee0750688cfe21838414d83e813217a853
|
[
"Apache-2.0"
] | null | null | null |
import inspect
import os
import re
import subprocess
from collections import Counter
from io import StringIO
import pandas as pd
from numpy import unique
file_sep = os.path.sep
def imports_in_module(module):
"""
Get a list of strings showing what is imported in a module.
:param module: An actual module object the file of the module (as given by inspect.getfile(module)
:return: A list of strings showing the imported objects (modules, functions, variables, classes...)
Note: Requires having snakefood installed:
http://furius.ca/snakefood/doc/snakefood-doc.html#installation
You may want to use ``imports_in_py_content(py_content)`` on the actual string content itself.
# >>> print('\\n'.join(imports_in_module(__file__))) # doctest: +SKIP
# StringIO.StringIO
# collections.Counter
# inspect
# numpy.unique
# os
# pandas
# re
# subprocess
# ut.pfile.iter.get_filepath_iterator
# ut.util.code.packages.get_module_name
# ut.util.code.packages.read_requirements
"""
if not isinstance(module, str):
module = inspect.getfile(module)
if module.endswith('c'):
module = module[:-1] # remove the 'c' of '.pyc'
t = subprocess.check_output(['sfood-imports', '-u', module])
return [x for x in t.split('\n') if len(x) > 0]
def base_modules_used_in_module(module):
"""
Get a list of strings showing what base modules that are imported in a module.
:param module: An actual module object the file of the module (as given by inspect.getfile(module)
:return: A list of strings showing the imported base modules (i.e. the X of import X.Y.Z or from X.Y import Z).
Note: Requires having snakefood installed:
http://furius.ca/snakefood/doc/snakefood-doc.html#installation
>>> base_modules_used_in_module(__file__) # doctest: +SKIP
['StringIO', 'collections', 'inspect', 'numpy', 'os', 'pandas', 're', 'subprocess', 'ut']
"""
return list(unique([re.compile('\w+').findall(x)[0] for x in imports_in_module(module)]))
def base_module_imports_in_module_recursive(module):
"""
Get a list of strings showing what base modules that are imported in a module, recursively.
It's the recursive version of the base_modules_used_in_module function.
Recursive in the sense that if module is a package module (i.e. containing a __init__.py and further submodules),
the base_modules_used_in_module function will be applied to all .py files under the mother folder.
Function returns a count (Counter object) of the number of modules where each base module was found.
:param module: An actual module object the file of the module (as given by inspect.getfile(module)
:param module_names: Modules to filter for.
None: Will grab all modules
A list or tuple: Of modules to grab
If not will assume module_names is a regex to apply to find module names
:return:
"""
# if module_names is None:
# module_names = any_module_import_regex
# elif isinstance(module_names, (tuple, list)):
# module_names = mk_multiple_package_import_regex(module_names)
if inspect.ismodule(module):
module = inspect.getsourcefile(module)
if module.endswith('__init__.py'):
module = os.path.dirname(module)
if os.path.isdir(module):
c = Counter()
it = get_filepath_iterator(module, pattern='.py$')
next(it) # to skip the seed module itself, and not get into an infinite loop
for _module in it:
try:
c.update(base_module_imports_in_module_recursive(_module))
except Exception as e:
if 'sfood-imports' in e.args[1]:
raise RuntimeError("You don't have sfood-imports installed (snakefood), so I can't do my job")
else:
print(("Error with module {}: {}".format(_module, e)))
return c
elif not os.path.isfile(module):
raise ValueError("module file not found: {}".format(module))
return Counter(base_modules_used_in_module(module))
# with open(module) as fp:
# module_contents = fp.read()
# return Counter(map(lambda x: x[1:], unique(module_names.findall(module_contents))))
def requirements_packages_in_module(module, requirements=None):
if requirements is None:
requirements = list(pip_licenses_df(include_module_name=False)['package_name'])
elif isinstance(requirements, str) and os.path.isfile(requirements):
with open(requirements) as fp:
requirements = fp.read().splitlines()
p = re.compile('^[^=]+')
module_names = list()
for x in requirements:
try:
xx = p.findall(x)
if xx:
module_name = get_module_name(xx[0])
module_names.append(module_name)
except Exception as e:
print(("Error with {}\n {}".format(x, e)))
return base_module_imports_in_module_recursive(module, module_names=requirements)
word_or_letter_p = re.compile('\w')
at_least_two_spaces_p = re.compile('\s{2,}')
def pip_licenses_df(package_names=None, include_module_name=True, on_module_search_error=None):
"""
Get a dataframe of pip packages and licences
:return:
"""
pip_licenses_output = subprocess.check_output(['pip-licenses'])
t = list(map(str.strip,
list(filter(word_or_letter_p.search,
pip_licenses_output.split('\n')))))
t = [at_least_two_spaces_p.sub('\t', x) for x in t]
t = '\n'.join(t)
df = pd.read_csv(StringIO(t), sep='\t')
df = df.rename(columns={'Name': 'package_name', 'Version': 'version', 'License': 'license'})
if include_module_name:
df['module'] = [get_module_name(x, on_error=on_module_search_error) for x in df['package_name']]
df = df[['module', 'package_name', 'version', 'license']] # reorder
if package_names is not None:
df = df[df['package_name'].isin(package_names)]
return df
def get_filepath_iterator(root_folder,
pattern='',
return_full_path=True,
apply_pattern_to_full_path=False):
if apply_pattern_to_full_path:
return recursive_file_walk_iterator_with_name_filter(root_folder, pattern, return_full_path)
else:
return recursive_file_walk_iterator_with_filepath_filter(root_folder, pattern, return_full_path)
def iter_relative_files_and_folder(root_folder):
from glob import iglob
if not root_folder.endswith(file_sep):
root_folder += file_sep
return map(lambda x: x.replace(root_folder, ''), iglob(root_folder + '*'))
def pattern_filter(pattern):
pattern = re.compile(pattern)
def _pattern_filter(s):
return pattern.search(s) is not None
return _pattern_filter
def recursive_file_walk_iterator_with_name_filter(root_folder, filt='', return_full_path=True):
if isinstance(filt, str):
filt = pattern_filter(filt)
# if isinstance(pattern, basestring):
# pattern = re.compile(pattern)
for name in iter_relative_files_and_folder(root_folder):
full_path = os.path.join(root_folder, name)
if os.path.isdir(full_path):
for entry in recursive_file_walk_iterator_with_name_filter(full_path, filt, return_full_path):
yield entry
else:
if os.path.isfile(full_path):
if filt(name):
if return_full_path:
yield full_path
else:
yield name
def recursive_file_walk_iterator_with_filepath_filter(root_folder, filt='', return_full_path=True):
if isinstance(filt, str):
filt = pattern_filter(filt)
for name in iter_relative_files_and_folder(root_folder):
full_path = os.path.join(root_folder, name)
if os.path.isdir(full_path):
for entry in recursive_file_walk_iterator_with_filepath_filter(full_path, filt, return_full_path):
yield entry
else:
if os.path.isfile(full_path):
if filt(full_path):
if return_full_path:
yield full_path
else:
yield name
| 38.638889
| 117
| 0.659358
|
import inspect
import os
import re
import subprocess
from collections import Counter
from io import StringIO
import pandas as pd
from numpy import unique
file_sep = os.path.sep
def imports_in_module(module):
if not isinstance(module, str):
module = inspect.getfile(module)
if module.endswith('c'):
module = module[:-1]
t = subprocess.check_output(['sfood-imports', '-u', module])
return [x for x in t.split('\n') if len(x) > 0]
def base_modules_used_in_module(module):
return list(unique([re.compile('\w+').findall(x)[0] for x in imports_in_module(module)]))
def base_module_imports_in_module_recursive(module):
if inspect.ismodule(module):
module = inspect.getsourcefile(module)
if module.endswith('__init__.py'):
module = os.path.dirname(module)
if os.path.isdir(module):
c = Counter()
it = get_filepath_iterator(module, pattern='.py$')
next(it)
for _module in it:
try:
c.update(base_module_imports_in_module_recursive(_module))
except Exception as e:
if 'sfood-imports' in e.args[1]:
raise RuntimeError("You don't have sfood-imports installed (snakefood), so I can't do my job")
else:
print(("Error with module {}: {}".format(_module, e)))
return c
elif not os.path.isfile(module):
raise ValueError("module file not found: {}".format(module))
return Counter(base_modules_used_in_module(module))
def requirements_packages_in_module(module, requirements=None):
if requirements is None:
requirements = list(pip_licenses_df(include_module_name=False)['package_name'])
elif isinstance(requirements, str) and os.path.isfile(requirements):
with open(requirements) as fp:
requirements = fp.read().splitlines()
p = re.compile('^[^=]+')
module_names = list()
for x in requirements:
try:
xx = p.findall(x)
if xx:
module_name = get_module_name(xx[0])
module_names.append(module_name)
except Exception as e:
print(("Error with {}\n {}".format(x, e)))
return base_module_imports_in_module_recursive(module, module_names=requirements)
word_or_letter_p = re.compile('\w')
at_least_two_spaces_p = re.compile('\s{2,}')
def pip_licenses_df(package_names=None, include_module_name=True, on_module_search_error=None):
pip_licenses_output = subprocess.check_output(['pip-licenses'])
t = list(map(str.strip,
list(filter(word_or_letter_p.search,
pip_licenses_output.split('\n')))))
t = [at_least_two_spaces_p.sub('\t', x) for x in t]
t = '\n'.join(t)
df = pd.read_csv(StringIO(t), sep='\t')
df = df.rename(columns={'Name': 'package_name', 'Version': 'version', 'License': 'license'})
if include_module_name:
df['module'] = [get_module_name(x, on_error=on_module_search_error) for x in df['package_name']]
df = df[['module', 'package_name', 'version', 'license']]
if package_names is not None:
df = df[df['package_name'].isin(package_names)]
return df
def get_filepath_iterator(root_folder,
pattern='',
return_full_path=True,
apply_pattern_to_full_path=False):
if apply_pattern_to_full_path:
return recursive_file_walk_iterator_with_name_filter(root_folder, pattern, return_full_path)
else:
return recursive_file_walk_iterator_with_filepath_filter(root_folder, pattern, return_full_path)
def iter_relative_files_and_folder(root_folder):
from glob import iglob
if not root_folder.endswith(file_sep):
root_folder += file_sep
return map(lambda x: x.replace(root_folder, ''), iglob(root_folder + '*'))
def pattern_filter(pattern):
pattern = re.compile(pattern)
def _pattern_filter(s):
return pattern.search(s) is not None
return _pattern_filter
def recursive_file_walk_iterator_with_name_filter(root_folder, filt='', return_full_path=True):
if isinstance(filt, str):
filt = pattern_filter(filt)
for name in iter_relative_files_and_folder(root_folder):
full_path = os.path.join(root_folder, name)
if os.path.isdir(full_path):
for entry in recursive_file_walk_iterator_with_name_filter(full_path, filt, return_full_path):
yield entry
else:
if os.path.isfile(full_path):
if filt(name):
if return_full_path:
yield full_path
else:
yield name
def recursive_file_walk_iterator_with_filepath_filter(root_folder, filt='', return_full_path=True):
if isinstance(filt, str):
filt = pattern_filter(filt)
for name in iter_relative_files_and_folder(root_folder):
full_path = os.path.join(root_folder, name)
if os.path.isdir(full_path):
for entry in recursive_file_walk_iterator_with_filepath_filter(full_path, filt, return_full_path):
yield entry
else:
if os.path.isfile(full_path):
if filt(full_path):
if return_full_path:
yield full_path
else:
yield name
| true
| true
|
790a1faf7ca690522cf3511872c292a6508ee3a2
| 5,276
|
py
|
Python
|
tools/metrics/histograms/PRESUBMIT.py
|
Ron423c/chromium
|
2edf7b980065b648f8b2a6e52193d83832fe36b7
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575
|
2015-06-18T23:58:20.000Z
|
2022-03-23T09:32:39.000Z
|
tools/metrics/histograms/PRESUBMIT.py
|
Ron423c/chromium
|
2edf7b980065b648f8b2a6e52193d83832fe36b7
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
tools/metrics/histograms/PRESUBMIT.py
|
iridium-browser/iridium-browser
|
907e31cf5ce5ad14d832796e3a7c11e496828959
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52
|
2015-07-14T10:40:50.000Z
|
2022-03-15T01:11:49.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into depot_tools.
"""
def GetPrettyPrintErrors(input_api, output_api, cwd, rel_path, results):
"""Runs pretty-print command for specified file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'pretty_print.py', rel_path, '--presubmit',
'--non-interactive'], cwd=cwd)
if exit_code != 0:
error_msg = (
'%s is not formatted correctly; run git cl format to fix.' % rel_path)
results.append(output_api.PresubmitError(error_msg))
def GetPrefixErrors(input_api, output_api, cwd, rel_path, results):
"""Validates histogram prefixes in specified file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_prefix.py', rel_path], cwd=cwd)
if exit_code != 0:
error_msg = ('%s contains histogram(s) with disallowed prefix, please run '
'validate_prefix.py %s to fix.' % (rel_path, rel_path))
results.append(output_api.PresubmitError(error_msg))
def GetObsoleteXmlErrors(input_api, output_api, cwd, results):
"""Validates all histograms in the file are obsolete."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_obsolete_histograms.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'histograms_xml/obsolete_histograms.xml contains non-obsolete '
'histograms, please run validate_obsolete_histograms.py to fix.')
results.append(output_api.PresubmitError(error_msg))
def GetValidateHistogramsError(input_api, output_api, cwd, results):
"""Validates histograms format and index file."""
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_format.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'Histograms are not well-formatted; please run %s/validate_format.py '
'and fix the reported errors.' % cwd)
results.append(output_api.PresubmitError(error_msg))
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_histograms_index.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'Histograms index file is not up-to-date. Please run '
'%s/histogram_paths.py to update it' % cwd)
results.append(output_api.PresubmitError(error_msg))
def ValidateSingleFile(input_api, output_api, file_obj, cwd, results):
"""Does corresponding validations if histograms.xml or enums.xml is changed.
Args:
input_api: An input_api instance that contains information about changes.
output_api: An output_api instance to create results of the PRESUBMIT check.
file_obj: A file object of one of the changed files.
cwd: Path to current working directory.
results: The returned variable which is a list of output_api results.
Returns:
A boolean that True if a histograms.xml or enums.xml file is changed.
"""
p = file_obj.AbsoluteLocalPath()
# Only do PRESUBMIT checks when |p| is under |cwd|.
if input_api.os_path.commonprefix([p, cwd]) != cwd:
return False
filepath = input_api.os_path.relpath(p, cwd)
if 'test_data' in filepath:
return False
# If the changed file is obsolete_histograms.xml, validate all histograms
# inside are obsolete.
if 'obsolete_histograms.xml' in filepath:
GetObsoleteXmlErrors(input_api, output_api, cwd, results)
# Return false here because we don't need to validate format if users only
# change obsolete_histograms.xml.
return False
# If the changed file is histograms.xml or histogram_suffixes_list.xml,
# pretty-print and validate prefix it.
elif ('histograms.xml' in filepath
or 'histogram_suffixes_list.xml' in filepath):
GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results)
# TODO(crbug/1120229): Re-enable validate prefix check once all histograms
# are split.
# GetPrefixErrors(input_api, output_api, cwd, filepath, results)
return True
# If the changed file is enums.xml, pretty-print it.
elif 'enums.xml' in filepath:
GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results)
return True
return False
def CheckChange(input_api, output_api):
"""Checks that histograms.xml is pretty-printed and well-formatted."""
results = []
cwd = input_api.PresubmitLocalPath()
xml_changed = False
# Only for changed files, do corresponding checks if the file is
# histograms.xml, enums.xml or obsolete_histograms.xml.
for file_obj in input_api.AffectedTextFiles():
is_changed = ValidateSingleFile(
input_api, output_api, file_obj, cwd, results)
xml_changed = xml_changed or is_changed
# Run validate_format.py and validate_histograms_index.py, if changed files
# contain histograms.xml or enums.xml.
if xml_changed:
GetValidateHistogramsError(input_api, output_api, cwd, results)
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
| 37.15493
| 80
| 0.736164
|
def GetPrettyPrintErrors(input_api, output_api, cwd, rel_path, results):
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'pretty_print.py', rel_path, '--presubmit',
'--non-interactive'], cwd=cwd)
if exit_code != 0:
error_msg = (
'%s is not formatted correctly; run git cl format to fix.' % rel_path)
results.append(output_api.PresubmitError(error_msg))
def GetPrefixErrors(input_api, output_api, cwd, rel_path, results):
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_prefix.py', rel_path], cwd=cwd)
if exit_code != 0:
error_msg = ('%s contains histogram(s) with disallowed prefix, please run '
'validate_prefix.py %s to fix.' % (rel_path, rel_path))
results.append(output_api.PresubmitError(error_msg))
def GetObsoleteXmlErrors(input_api, output_api, cwd, results):
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_obsolete_histograms.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'histograms_xml/obsolete_histograms.xml contains non-obsolete '
'histograms, please run validate_obsolete_histograms.py to fix.')
results.append(output_api.PresubmitError(error_msg))
def GetValidateHistogramsError(input_api, output_api, cwd, results):
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_format.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'Histograms are not well-formatted; please run %s/validate_format.py '
'and fix the reported errors.' % cwd)
results.append(output_api.PresubmitError(error_msg))
exit_code = input_api.subprocess.call(
[input_api.python_executable, 'validate_histograms_index.py'], cwd=cwd)
if exit_code != 0:
error_msg = (
'Histograms index file is not up-to-date. Please run '
'%s/histogram_paths.py to update it' % cwd)
results.append(output_api.PresubmitError(error_msg))
def ValidateSingleFile(input_api, output_api, file_obj, cwd, results):
p = file_obj.AbsoluteLocalPath()
if input_api.os_path.commonprefix([p, cwd]) != cwd:
return False
filepath = input_api.os_path.relpath(p, cwd)
if 'test_data' in filepath:
return False
if 'obsolete_histograms.xml' in filepath:
GetObsoleteXmlErrors(input_api, output_api, cwd, results)
# change obsolete_histograms.xml.
return False
# If the changed file is histograms.xml or histogram_suffixes_list.xml,
# pretty-print and validate prefix it.
elif ('histograms.xml' in filepath
or 'histogram_suffixes_list.xml' in filepath):
GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results)
# TODO(crbug/1120229): Re-enable validate prefix check once all histograms
# are split.
# GetPrefixErrors(input_api, output_api, cwd, filepath, results)
return True
# If the changed file is enums.xml, pretty-print it.
elif 'enums.xml' in filepath:
GetPrettyPrintErrors(input_api, output_api, cwd, filepath, results)
return True
return False
def CheckChange(input_api, output_api):
results = []
cwd = input_api.PresubmitLocalPath()
xml_changed = False
# Only for changed files, do corresponding checks if the file is
# histograms.xml, enums.xml or obsolete_histograms.xml.
for file_obj in input_api.AffectedTextFiles():
is_changed = ValidateSingleFile(
input_api, output_api, file_obj, cwd, results)
xml_changed = xml_changed or is_changed
# Run validate_format.py and validate_histograms_index.py, if changed files
# contain histograms.xml or enums.xml.
if xml_changed:
GetValidateHistogramsError(input_api, output_api, cwd, results)
return results
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
| true
| true
|
790a1fbce2d935b7a37511eef87b9aed2191f79b
| 32
|
py
|
Python
|
linguist/__init__.py
|
jayvdb/linguist
|
bac586366f1fc87eda72c90855a4eabb1f61922c
|
[
"BSD-3-Clause"
] | 75
|
2015-03-14T19:32:23.000Z
|
2022-02-14T13:01:43.000Z
|
linguist/__init__.py
|
jayvdb/linguist
|
bac586366f1fc87eda72c90855a4eabb1f61922c
|
[
"BSD-3-Clause"
] | 16
|
2016-04-08T03:22:02.000Z
|
2021-05-17T14:28:06.000Z
|
linguist/__init__.py
|
jayvdb/linguist
|
bac586366f1fc87eda72c90855a4eabb1f61922c
|
[
"BSD-3-Clause"
] | 22
|
2015-01-14T16:33:58.000Z
|
2021-11-02T11:09:51.000Z
|
VERSION = __version__ = '0.1.1'
| 16
| 31
| 0.65625
|
VERSION = __version__ = '0.1.1'
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.