hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71f8d6b054c39b44978826f1f894a184c1e7cce | 212 | py | Python | social/__init__.py | Diolor/python-social-auth | ba4e30d4a11b2e188954770bae4df9426d61a470 | [
"BSD-3-Clause"
] | 1 | 2015-04-19T21:38:46.000Z | 2015-04-19T21:38:46.000Z | social/__init__.py | nvbn/python-social-auth | 3e0e99404f20e7b6847ca069e0844ba8c090415f | [
"BSD-3-Clause"
] | null | null | null | social/__init__.py | nvbn/python-social-auth | 3e0e99404f20e7b6847ca069e0844ba8c090415f | [
"BSD-3-Clause"
] | 1 | 2020-05-23T05:49:36.000Z | 2020-05-23T05:49:36.000Z | """
python-social-auth application, allows OpenId or OAuth user
registration/authentication just adding a few configurations.
"""
version = (0, 1, 13)
extra = ''
__version__ = '.'.join(map(str, version)) + extra
| 26.5 | 61 | 0.721698 | version = (0, 1, 13)
extra = ''
__version__ = '.'.join(map(str, version)) + extra
| true | true |
f71f8de993bce72bcbeae4db86fff99ba77edf5b | 324 | py | Python | AOJ/pra_intro/7-d.py | Nishi05/Competitive-programming | e59a6755b706d9d5c1f359f4511d92c114e6a94e | [
"MIT"
] | null | null | null | AOJ/pra_intro/7-d.py | Nishi05/Competitive-programming | e59a6755b706d9d5c1f359f4511d92c114e6a94e | [
"MIT"
] | null | null | null | AOJ/pra_intro/7-d.py | Nishi05/Competitive-programming | e59a6755b706d9d5c1f359f4511d92c114e6a94e | [
"MIT"
] | null | null | null | n, m, l = map(int, input().split())
a = [list(map(int, input().split())) for i in range(n)]
b = [list(map(int, input().split())) for i in range(m)]
C = [[0]*l for _ in range(n)]
for i in range(n):
for j in range(l):
for k in range(m):
C[i][j] += a[i][k] * b[k][j]
for line in C:
print(*line)
| 21.6 | 55 | 0.506173 | n, m, l = map(int, input().split())
a = [list(map(int, input().split())) for i in range(n)]
b = [list(map(int, input().split())) for i in range(m)]
C = [[0]*l for _ in range(n)]
for i in range(n):
for j in range(l):
for k in range(m):
C[i][j] += a[i][k] * b[k][j]
for line in C:
print(*line)
| true | true |
f71f8e826c91b8b02c825b8f93c01a539cdf6534 | 1,888 | py | Python | tsutils/cog_settings.py | kary5678/tsutils | ab6ecdcd2f0e10ba19092028909b3f74bf1708a9 | [
"MIT"
] | 1 | 2021-07-28T19:41:18.000Z | 2021-07-28T19:41:18.000Z | tsutils/cog_settings.py | kary5678/tsutils | ab6ecdcd2f0e10ba19092028909b3f74bf1708a9 | [
"MIT"
] | 19 | 2020-09-14T07:55:14.000Z | 2022-03-06T17:23:14.000Z | tsutils/cog_settings.py | kary5678/tsutils | ab6ecdcd2f0e10ba19092028909b3f74bf1708a9 | [
"MIT"
] | 3 | 2020-09-14T07:47:27.000Z | 2021-09-14T02:16:33.000Z | from redbot.core import data_manager
from .json_utils import *
class CogSettings(object):
SETTINGS_FILE_NAME = "legacy_settings.json"
def __init__(self, cog_name, bot=None):
self.folder = str(data_manager.cog_data_path(raw_name=cog_name))
self.file_path = os.path.join(self.folder, CogSettings.SETTINGS_FILE_NAME)
self.bot = bot
self.check_folder()
self.default_settings = self.make_default_settings()
if not os.path.isfile(self.file_path):
logger.warning("CogSettings config for {} not found. Creating default...".format(self.file_path))
self.bot_settings = self.default_settings
self.save_settings()
else:
current = self.intify(read_json_file(self.file_path))
updated = False
for key in self.default_settings.keys():
if key not in current.keys():
current[key] = self.default_settings[key]
updated = True
self.bot_settings = current
if updated:
self.save_settings()
def check_folder(self):
if not os.path.exists(self.folder):
logger.info("Creating {}".format(self.folder))
os.makedirs(self.folder)
def save_settings(self):
write_json_file(self.file_path, self.bot_settings)
def make_default_settings(self):
return {}
@classmethod
def intify(cls, key):
if isinstance(key, dict):
return {cls.intify(k): cls.intify(v) for k, v in key.items()}
elif isinstance(key, (list, tuple)):
return [cls.intify(x) for x in key]
elif isinstance(key, str) and key.isdigit():
return int(key)
elif isinstance(key, str) and key.replace('.', '', 1).isdigit():
return float(key)
else:
return key
| 33.122807 | 110 | 0.603814 | from redbot.core import data_manager
from .json_utils import *
class CogSettings(object):
SETTINGS_FILE_NAME = "legacy_settings.json"
def __init__(self, cog_name, bot=None):
self.folder = str(data_manager.cog_data_path(raw_name=cog_name))
self.file_path = os.path.join(self.folder, CogSettings.SETTINGS_FILE_NAME)
self.bot = bot
self.check_folder()
self.default_settings = self.make_default_settings()
if not os.path.isfile(self.file_path):
logger.warning("CogSettings config for {} not found. Creating default...".format(self.file_path))
self.bot_settings = self.default_settings
self.save_settings()
else:
current = self.intify(read_json_file(self.file_path))
updated = False
for key in self.default_settings.keys():
if key not in current.keys():
current[key] = self.default_settings[key]
updated = True
self.bot_settings = current
if updated:
self.save_settings()
def check_folder(self):
if not os.path.exists(self.folder):
logger.info("Creating {}".format(self.folder))
os.makedirs(self.folder)
def save_settings(self):
write_json_file(self.file_path, self.bot_settings)
def make_default_settings(self):
return {}
@classmethod
def intify(cls, key):
if isinstance(key, dict):
return {cls.intify(k): cls.intify(v) for k, v in key.items()}
elif isinstance(key, (list, tuple)):
return [cls.intify(x) for x in key]
elif isinstance(key, str) and key.isdigit():
return int(key)
elif isinstance(key, str) and key.replace('.', '', 1).isdigit():
return float(key)
else:
return key
| true | true |
f71f90277bdccfb77a66e3b8e60c836a40516eb8 | 4,991 | py | Python | preql/core/casts.py | otherJL0/Preql | 958a8dfd3a040f9c40fa394a8bfc3295f32a3019 | [
"MIT"
] | null | null | null | preql/core/casts.py | otherJL0/Preql | 958a8dfd3a040f9c40fa394a8bfc3295f32a3019 | [
"MIT"
] | null | null | null | preql/core/casts.py | otherJL0/Preql | 958a8dfd3a040f9c40fa394a8bfc3295f32a3019 | [
"MIT"
] | null | null | null | from . import pql_objects as objects
from . import sql
from .exceptions import Signal
from .interp_common import call_builtin_func
from .pql_types import ITEM_NAME, T, dp_type
from .types_impl import kernel_type
@dp_type
def _cast(inst_type, target_type, inst):
if inst_type <= target_type:
return inst
raise Signal.make(
T.TypeError, None, f"Cast not implemented for {inst_type}->{target_type}"
)
@dp_type
def _cast(inst_type: T.list, target_type: T.list, inst):
if inst is objects.EmptyList:
return inst.replace(type=target_type)
if inst_type.elem <= target_type.elem:
return inst
value = inst.get_column(ITEM_NAME)
elem = _cast(value.type, target_type.elem, value)
code = sql.Select(target_type, inst.code, [sql.ColumnAlias(elem.code, ITEM_NAME)])
return inst.replace(code=code, type=T.list[elem.type])
@dp_type
def _cast(inst_type: T.aggregated, target_type: T.list, inst):
res = _cast(inst_type.elem, target_type.elem, inst)
return objects.aggregate(res) # ??
@dp_type
def _cast(inst_type: T.table, target_type: T.list, inst):
t = inst.type
if len(t.elems) != 1:
raise Signal.make(
T.TypeError,
None,
f"Cannot cast {inst_type} to {target_type}. Too many columns",
)
if not inst_type.elem <= target_type.elem:
raise Signal.make(
T.TypeError,
None,
f"Cannot cast {inst_type} to {target_type}. Elements not matching",
)
((elem_name, elem_type),) = inst_type.elems.items()
code = sql.Select(
T.list[elem_type],
inst.code,
[sql.ColumnAlias(sql.Name(elem_type, elem_name), ITEM_NAME)],
)
return objects.TableInstance.make(code, T.list[elem_type], [inst])
@dp_type
def _cast(inst_type: T.table, target_type: T.primitive, inst):
t = inst.type
if len(t.elems) != 1:
raise Signal.make(
T.TypeError,
None,
f"Cannot cast {inst_type} to {target_type}. Expected exactly 1 column, instead got {len(t.elems)}",
)
if not inst_type.elem <= target_type:
raise Signal.make(
T.TypeError,
None,
f"Cannot cast {inst_type} to {target_type}. Elements type doesn't match",
)
res = inst.localize()
if len(res) != 1:
raise Signal.make(
T.TypeError,
None,
f"Cannot cast {inst_type} to {target_type}. Expected exactly 1 row, instead got {len(res)}",
)
(item,) = res
return objects.pyvalue_inst(item, inst_type.elem)
@dp_type
def _cast(_inst_type: T.t_id, _target_type: T.int, inst):
return inst.replace(type=T.int)
@dp_type
def _cast(_inst_type: T.int, target_type: T.t_id, inst):
return inst.replace(type=target_type)
@dp_type
def _cast(_inst_type: T.union[T.float, T.bool], _target_type: T.int, inst):
code = sql.Cast(T.int, inst.code)
return objects.Instance.make(code, T.int, [inst])
@dp_type
def _cast(_inst_type: T.number, _target_type: T.bool, inst):
code = sql.Compare('!=', [inst.code, sql.make_value(0)])
return objects.Instance.make(code, T.bool, [inst])
@dp_type
def _cast(_inst_type: T.string, _target_type: T.bool, inst):
code = sql.Compare('!=', [inst.code, sql.make_value('')])
return objects.Instance.make(code, T.bool, [inst])
@dp_type
def _cast(_inst_type: T.string, _target_type: T.text, inst):
return inst.replace(type=T.text)
@dp_type
def _cast(_inst_type: T.text, _target_type: T.string, inst):
return inst.replace(type=T.string)
@dp_type
def _cast(
_inst_type: T.string, _target_type: T.string, inst
): # Disambiguate text<->string due to inheritance
return inst
@dp_type
def _cast(_inst_type: T.union[T.int, T.bool], _target_type: T.float, inst):
code = sql.Cast(T.float, inst.code)
return objects.Instance.make(code, T.float, [inst])
@dp_type
def _cast(_inst_type: T.string, _target_type: T.int, inst):
return call_builtin_func("_cast_string_to_int", [inst])
# @dp_type
# def _cast(_inst_type: T.string, _target_type: T.datetime, inst):
# # XXX unsafe cast, bad strings won't throw an error
# return objects.Instance.make(inst.code, T.datetime, [inst])
@dp_type
def _cast(_inst_type: T.primitive, _target_type: T.string, inst):
code = sql.Cast(T.string, inst.code)
return objects.Instance.make(code, T.string, [inst])
@dp_type
def _cast(_inst_type: T.t_relation, target_type: T.t_id, inst):
# TODO verify same table? same type?
return inst.replace(type=target_type)
@dp_type
def _cast(inst_type: T.t_relation, target_type: T.int, inst):
if inst.type.elem <= T.int:
return inst.replace(type=target_type)
raise Signal.make(
T.TypeError, None, f"Cast not implemented for {inst_type}->{target_type}"
)
def cast(obj, t):
res = _cast(kernel_type(obj.type), t, obj)
return objects.inherit_phantom_type(res, [obj])
| 28.19774 | 111 | 0.661992 | from . import pql_objects as objects
from . import sql
from .exceptions import Signal
from .interp_common import call_builtin_func
from .pql_types import ITEM_NAME, T, dp_type
from .types_impl import kernel_type
@dp_type
def _cast(inst_type, target_type, inst):
if inst_type <= target_type:
return inst
raise Signal.make(
T.TypeError, None, f"Cast not implemented for {inst_type}->{target_type}"
)
@dp_type
def _cast(inst_type: T.list, target_type: T.list, inst):
if inst is objects.EmptyList:
return inst.replace(type=target_type)
if inst_type.elem <= target_type.elem:
return inst
value = inst.get_column(ITEM_NAME)
elem = _cast(value.type, target_type.elem, value)
code = sql.Select(target_type, inst.code, [sql.ColumnAlias(elem.code, ITEM_NAME)])
return inst.replace(code=code, type=T.list[elem.type])
@dp_type
def _cast(inst_type: T.aggregated, target_type: T.list, inst):
res = _cast(inst_type.elem, target_type.elem, inst)
return objects.aggregate(res)
@dp_type
def _cast(inst_type: T.table, target_type: T.list, inst):
t = inst.type
if len(t.elems) != 1:
raise Signal.make(
T.TypeError,
None,
f"Cannot cast {inst_type} to {target_type}. Too many columns",
)
if not inst_type.elem <= target_type.elem:
raise Signal.make(
T.TypeError,
None,
f"Cannot cast {inst_type} to {target_type}. Elements not matching",
)
((elem_name, elem_type),) = inst_type.elems.items()
code = sql.Select(
T.list[elem_type],
inst.code,
[sql.ColumnAlias(sql.Name(elem_type, elem_name), ITEM_NAME)],
)
return objects.TableInstance.make(code, T.list[elem_type], [inst])
@dp_type
def _cast(inst_type: T.table, target_type: T.primitive, inst):
t = inst.type
if len(t.elems) != 1:
raise Signal.make(
T.TypeError,
None,
f"Cannot cast {inst_type} to {target_type}. Expected exactly 1 column, instead got {len(t.elems)}",
)
if not inst_type.elem <= target_type:
raise Signal.make(
T.TypeError,
None,
f"Cannot cast {inst_type} to {target_type}. Elements type doesn't match",
)
res = inst.localize()
if len(res) != 1:
raise Signal.make(
T.TypeError,
None,
f"Cannot cast {inst_type} to {target_type}. Expected exactly 1 row, instead got {len(res)}",
)
(item,) = res
return objects.pyvalue_inst(item, inst_type.elem)
@dp_type
def _cast(_inst_type: T.t_id, _target_type: T.int, inst):
return inst.replace(type=T.int)
@dp_type
def _cast(_inst_type: T.int, target_type: T.t_id, inst):
return inst.replace(type=target_type)
@dp_type
def _cast(_inst_type: T.union[T.float, T.bool], _target_type: T.int, inst):
code = sql.Cast(T.int, inst.code)
return objects.Instance.make(code, T.int, [inst])
@dp_type
def _cast(_inst_type: T.number, _target_type: T.bool, inst):
code = sql.Compare('!=', [inst.code, sql.make_value(0)])
return objects.Instance.make(code, T.bool, [inst])
@dp_type
def _cast(_inst_type: T.string, _target_type: T.bool, inst):
code = sql.Compare('!=', [inst.code, sql.make_value('')])
return objects.Instance.make(code, T.bool, [inst])
@dp_type
def _cast(_inst_type: T.string, _target_type: T.text, inst):
return inst.replace(type=T.text)
@dp_type
def _cast(_inst_type: T.text, _target_type: T.string, inst):
return inst.replace(type=T.string)
@dp_type
def _cast(
_inst_type: T.string, _target_type: T.string, inst
): # Disambiguate text<->string due to inheritance
return inst
@dp_type
def _cast(_inst_type: T.union[T.int, T.bool], _target_type: T.float, inst):
code = sql.Cast(T.float, inst.code)
return objects.Instance.make(code, T.float, [inst])
@dp_type
def _cast(_inst_type: T.string, _target_type: T.int, inst):
return call_builtin_func("_cast_string_to_int", [inst])
# @dp_type
# def _cast(_inst_type: T.string, _target_type: T.datetime, inst):
# # XXX unsafe cast, bad strings won't throw an error
@dp_type
def _cast(_inst_type: T.primitive, _target_type: T.string, inst):
code = sql.Cast(T.string, inst.code)
return objects.Instance.make(code, T.string, [inst])
@dp_type
def _cast(_inst_type: T.t_relation, target_type: T.t_id, inst):
return inst.replace(type=target_type)
@dp_type
def _cast(inst_type: T.t_relation, target_type: T.int, inst):
if inst.type.elem <= T.int:
return inst.replace(type=target_type)
raise Signal.make(
T.TypeError, None, f"Cast not implemented for {inst_type}->{target_type}"
)
def cast(obj, t):
res = _cast(kernel_type(obj.type), t, obj)
return objects.inherit_phantom_type(res, [obj])
| true | true |
f71f912ae78f467af2ef0d275e047683484c9024 | 132 | py | Python | clvm/EvalError.py | ChiaMineJP/clvm | 1a5cb17895d8707f784a85180bc97d3c6ebe71a0 | [
"Apache-2.0"
] | 83 | 2020-02-23T13:02:41.000Z | 2022-03-31T06:27:11.000Z | clvm/EvalError.py | ChiaMineJP/clvm | 1a5cb17895d8707f784a85180bc97d3c6ebe71a0 | [
"Apache-2.0"
] | 56 | 2020-01-30T00:28:33.000Z | 2022-03-29T10:38:14.000Z | venv/lib/python3.8/site-packages/clvm/EvalError.py | hu12305204/chia-analyze | 1c9e2104dbe340412e79051fad4cb5b591f6d1a3 | [
"Apache-2.0"
] | 31 | 2019-12-06T09:27:37.000Z | 2022-03-21T13:38:14.000Z | class EvalError(Exception):
def __init__(self, message: str, sexp):
super().__init__(message)
self._sexp = sexp
| 26.4 | 43 | 0.643939 | class EvalError(Exception):
def __init__(self, message: str, sexp):
super().__init__(message)
self._sexp = sexp
| true | true |
f71f91ba96fb0454f17f5b10798f2ab9bc54d086 | 7,164 | py | Python | src/socket_proxy/utils.py | fkantelberg/socket-proxy | 4fc971cfef29282f30299f40106263b53463cdd3 | [
"MIT"
] | 1 | 2021-05-18T02:28:37.000Z | 2021-05-18T02:28:37.000Z | src/socket_proxy/utils.py | fkantelberg/socket-proxy | 4fc971cfef29282f30299f40106263b53463cdd3 | [
"MIT"
] | null | null | null | src/socket_proxy/utils.py | fkantelberg/socket-proxy | 4fc971cfef29282f30299f40106263b53463cdd3 | [
"MIT"
] | null | null | null | import argparse
import ipaddress
import itertools
import logging
import os
import re
import secrets
import socket
import ssl
import sys
from random import shuffle
from typing import List, Tuple, Union
from urllib.parse import urlsplit
from . import base
_logger = logging.getLogger(__name__)
def configure_logging(log_file: str, level: str) -> None:
""" Configure the logging """
level = base.LOG_LEVELS.get(level.lower(), logging.DEBUG)
log = logging.getLogger()
log.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(base.LOG_FORMAT, style="{"))
log.addHandler(handler)
if log_file:
handler = logging.FileHandler(log_file)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(base.LOG_FORMAT, style="{"))
log.addHandler(handler)
def format_transfer(b: int) -> str:
""" Format a number of bytes in a more human readable format """
symbols = [("T", 1 << 40), ("G", 1 << 30), ("M", 1 << 20), ("K", 1 << 10)]
if b < 0:
raise ValueError("Must be bigger than 0")
for symbol, size in symbols:
if b >= size:
return f"{b / size:.1f} {symbol}"
return str(b)
def generate_token() -> bytes:
""" Generate a random token used for identification of clients and tunnels """
return secrets.token_bytes(base.CLIENT_NAME_SIZE)
def generate_ssl_context(
*,
cert: str = None,
key: str = None,
ca: str = None,
server: bool = False,
ciphers: List[str] = None,
check_hostname: bool = False,
) -> ssl.SSLContext:
""" Generate a SSL context for the tunnel """
# Set the protocol and create the basic context
proto = ssl.PROTOCOL_TLS_SERVER if server else ssl.PROTOCOL_TLS_CLIENT
ctx = ssl.SSLContext(proto)
ctx.check_hostname = check_hostname
ctx.minimum_version = ssl.TLSVersion.TLSv1_2
# Prevent the reuse of parameters
if server:
ctx.options |= ssl.OP_SINGLE_DH_USE | ssl.OP_SINGLE_ECDH_USE
# Load a certificate and key for the connection
if cert:
ctx.load_cert_chain(cert, keyfile=key)
# Load the CA to verify the other side
if ca:
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cafile=ca)
# Set possible ciphers to use
if ciphers:
ctx.set_ciphers(ciphers)
# Output debugging
_logger.info("CA usage: %s", bool(ca))
_logger.info("Certificate: %s", bool(cert))
_logger.info("Hostname verification: %s", bool(check_hostname))
_logger.info("Minimal TLS Versions: %s", ctx.minimum_version.name)
ciphers = sorted(c["name"] for c in ctx.get_ciphers())
_logger.info("Ciphers: %s", ", ".join(ciphers))
return ctx
def get_unused_port(min_port: int, max_port: int, udp: bool = False) -> int:
""" Returns a random unused port within the given range or None if all are used """
sock = socket.socket(type=socket.SOCK_DGRAM) if udp else socket.socket()
ports = list(range(min_port, max_port + 1))
shuffle(ports)
for port in ports:
try:
sock.bind(("", port))
sock.close()
return port
except Exception:
pass
return None
def merge_settings(a: int, b: int) -> int:
"""Merge the settings of the tunnel. If one of them is 0 the other one will
take place. otherwise the lower value will be used"""
return min(a, b) if a and b else max(a, b)
def optimize_networks(*networks: List[base.IPvXNetwork]) -> List[base.IPvXNetwork]:
"""Try to optimize the list of networks by using the minimal network
configuration"""
grouped = itertools.groupby(networks, lambda n: n.version)
groups = {}
for version, group in grouped:
group = sorted(set(group))
tmp = set()
for i, a in enumerate(group):
for b in group[i + 1 :]:
if b.subnet_of(a):
tmp.add(b)
break
else:
tmp.add(a)
groups[version] = sorted(tmp)
return sum([g for _, g in sorted(groups.items())], [])
def parse_address(
address: str, host: str = None, port: int = None, multiple: bool = False
) -> Tuple[Union[str, List[str]], int]:
"""Parse an address and split hostname and port. The port is required. The
default host is "" which means all"""
# Only the address without scheme and path. We only support IPs if multiple hosts
# are activated
pattern = r"[0-9.:\[\],]*?" if multiple else r"[0-9a-zA-Z.:\[\],]*?"
match = re.match(fr"^(?P<hosts>{pattern})(:(?P<port>\d+))?$", address)
if not match:
raise argparse.ArgumentTypeError(
"Invalid address parsed. Only host and port are supported."
)
# Try to parse the port first
data = match.groupdict()
if data.get("port"):
port = int(data["port"])
if port <= 0 or port >= 65536:
raise argparse.ArgumentTypeError("Invalid address parsed. Invalid port.")
if port is None:
raise argparse.ArgumentTypeError("Port required.")
# Try parsing the different host addresses
hosts = set()
for h in data.get("hosts", "").split(","):
if not h:
hosts.add(h or host)
continue
try:
parsed = urlsplit(f"http://{h}")
hosts.add(parsed.hostname)
except Exception as e:
raise argparse.ArgumentTypeError(
"Invalid address parsed. Invalid host."
) from e
# Multiple hosts are supported if the flag is set
if len(hosts) > 1 and multiple:
return sorted(hosts), port
# Otherwise we fail
if len(hosts) > 1:
raise argparse.ArgumentTypeError(
"Invalid address parsed. Only one host is required."
)
if len(hosts) == 1:
host = hosts.pop() or host
if host is not None:
return host, port
raise argparse.ArgumentTypeError("Invalid address parsed. Host required.")
def parse_networks(network: str) -> List[base.IPvXNetwork]:
""" Try to parse multiple networks and return them optimized """
try:
return optimize_networks(*map(ipaddress.ip_network, network.split(",")))
except Exception as e:
raise argparse.ArgumentTypeError("Invalid network format") from e
def valid_file(path: str) -> str:
"""Check if a file exists and return the absolute path otherwise raise an
error. This function is used for the argument parsing"""
path = os.path.abspath(path)
if not os.path.isfile(path):
raise argparse.ArgumentTypeError("Not a file.")
return path
def valid_ports(ports: Tuple[int, int]) -> Tuple[int, int]:
""" Check if the argument is a valid port range with IP family """
m = re.match(r"^(\d+):(\d+)?$", ports, re.IGNORECASE)
if m:
a, b = sorted(map(int, m.groups()))
if 0 < a < b < 65536:
return a, b
raise argparse.ArgumentTypeError("Port must be in range (1, 65536)")
raise argparse.ArgumentTypeError("Invalid port scheme.")
| 31.012987 | 87 | 0.630374 | import argparse
import ipaddress
import itertools
import logging
import os
import re
import secrets
import socket
import ssl
import sys
from random import shuffle
from typing import List, Tuple, Union
from urllib.parse import urlsplit
from . import base
_logger = logging.getLogger(__name__)
def configure_logging(log_file: str, level: str) -> None:
level = base.LOG_LEVELS.get(level.lower(), logging.DEBUG)
log = logging.getLogger()
log.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(base.LOG_FORMAT, style="{"))
log.addHandler(handler)
if log_file:
handler = logging.FileHandler(log_file)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter(base.LOG_FORMAT, style="{"))
log.addHandler(handler)
def format_transfer(b: int) -> str:
symbols = [("T", 1 << 40), ("G", 1 << 30), ("M", 1 << 20), ("K", 1 << 10)]
if b < 0:
raise ValueError("Must be bigger than 0")
for symbol, size in symbols:
if b >= size:
return f"{b / size:.1f} {symbol}"
return str(b)
def generate_token() -> bytes:
return secrets.token_bytes(base.CLIENT_NAME_SIZE)
def generate_ssl_context(
*,
cert: str = None,
key: str = None,
ca: str = None,
server: bool = False,
ciphers: List[str] = None,
check_hostname: bool = False,
) -> ssl.SSLContext:
proto = ssl.PROTOCOL_TLS_SERVER if server else ssl.PROTOCOL_TLS_CLIENT
ctx = ssl.SSLContext(proto)
ctx.check_hostname = check_hostname
ctx.minimum_version = ssl.TLSVersion.TLSv1_2
if server:
ctx.options |= ssl.OP_SINGLE_DH_USE | ssl.OP_SINGLE_ECDH_USE
if cert:
ctx.load_cert_chain(cert, keyfile=key)
if ca:
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cafile=ca)
if ciphers:
ctx.set_ciphers(ciphers)
_logger.info("CA usage: %s", bool(ca))
_logger.info("Certificate: %s", bool(cert))
_logger.info("Hostname verification: %s", bool(check_hostname))
_logger.info("Minimal TLS Versions: %s", ctx.minimum_version.name)
ciphers = sorted(c["name"] for c in ctx.get_ciphers())
_logger.info("Ciphers: %s", ", ".join(ciphers))
return ctx
def get_unused_port(min_port: int, max_port: int, udp: bool = False) -> int:
sock = socket.socket(type=socket.SOCK_DGRAM) if udp else socket.socket()
ports = list(range(min_port, max_port + 1))
shuffle(ports)
for port in ports:
try:
sock.bind(("", port))
sock.close()
return port
except Exception:
pass
return None
def merge_settings(a: int, b: int) -> int:
return min(a, b) if a and b else max(a, b)
def optimize_networks(*networks: List[base.IPvXNetwork]) -> List[base.IPvXNetwork]:
grouped = itertools.groupby(networks, lambda n: n.version)
groups = {}
for version, group in grouped:
group = sorted(set(group))
tmp = set()
for i, a in enumerate(group):
for b in group[i + 1 :]:
if b.subnet_of(a):
tmp.add(b)
break
else:
tmp.add(a)
groups[version] = sorted(tmp)
return sum([g for _, g in sorted(groups.items())], [])
def parse_address(
address: str, host: str = None, port: int = None, multiple: bool = False
) -> Tuple[Union[str, List[str]], int]:
pattern = r"[0-9.:\[\],]*?" if multiple else r"[0-9a-zA-Z.:\[\],]*?"
match = re.match(fr"^(?P<hosts>{pattern})(:(?P<port>\d+))?$", address)
if not match:
raise argparse.ArgumentTypeError(
"Invalid address parsed. Only host and port are supported."
)
data = match.groupdict()
if data.get("port"):
port = int(data["port"])
if port <= 0 or port >= 65536:
raise argparse.ArgumentTypeError("Invalid address parsed. Invalid port.")
if port is None:
raise argparse.ArgumentTypeError("Port required.")
hosts = set()
for h in data.get("hosts", "").split(","):
if not h:
hosts.add(h or host)
continue
try:
parsed = urlsplit(f"http://{h}")
hosts.add(parsed.hostname)
except Exception as e:
raise argparse.ArgumentTypeError(
"Invalid address parsed. Invalid host."
) from e
if len(hosts) > 1 and multiple:
return sorted(hosts), port
if len(hosts) > 1:
raise argparse.ArgumentTypeError(
"Invalid address parsed. Only one host is required."
)
if len(hosts) == 1:
host = hosts.pop() or host
if host is not None:
return host, port
raise argparse.ArgumentTypeError("Invalid address parsed. Host required.")
def parse_networks(network: str) -> List[base.IPvXNetwork]:
try:
return optimize_networks(*map(ipaddress.ip_network, network.split(",")))
except Exception as e:
raise argparse.ArgumentTypeError("Invalid network format") from e
def valid_file(path: str) -> str:
path = os.path.abspath(path)
if not os.path.isfile(path):
raise argparse.ArgumentTypeError("Not a file.")
return path
def valid_ports(ports: Tuple[int, int]) -> Tuple[int, int]:
m = re.match(r"^(\d+):(\d+)?$", ports, re.IGNORECASE)
if m:
a, b = sorted(map(int, m.groups()))
if 0 < a < b < 65536:
return a, b
raise argparse.ArgumentTypeError("Port must be in range (1, 65536)")
raise argparse.ArgumentTypeError("Invalid port scheme.")
| true | true |
f71f91d24c0fd013e9b7c15807da9faf8ffce3f9 | 63 | py | Python | src/com/view/__init__.py | amzpiper/synchronize_data | a0ef983a6445ac5b793691dd3d4a86790c0581a7 | [
"Apache-2.0"
] | null | null | null | src/com/view/__init__.py | amzpiper/synchronize_data | a0ef983a6445ac5b793691dd3d4a86790c0581a7 | [
"Apache-2.0"
] | null | null | null | src/com/view/__init__.py | amzpiper/synchronize_data | a0ef983a6445ac5b793691dd3d4a86790c0581a7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env/python
# -*- coding:utf-8 -*-
# Author:guoyuhang | 21 | 22 | 0.634921 | true | true | |
f71f928563424af0b3d6d9d923982a0a08ad1255 | 910 | py | Python | nautilus/network/events/util.py | AlecAivazis/python | 70e2acef27a2f87355590be1a6ca60ce3ab4d09c | [
"MIT"
] | 9 | 2019-02-17T01:33:43.000Z | 2022-02-03T02:14:12.000Z | nautilus/network/events/util.py | AlecAivazis/python | 70e2acef27a2f87355590be1a6ca60ce3ab4d09c | [
"MIT"
] | 59 | 2016-03-14T15:55:50.000Z | 2016-07-17T15:22:56.000Z | nautilus/network/events/util.py | AlecAivazis/python | 70e2acef27a2f87355590be1a6ca60ce3ab4d09c | [
"MIT"
] | 3 | 2017-08-03T20:18:59.000Z | 2018-07-18T02:03:41.000Z | """
This module defines various utilities for dealing with the network.
"""
from asyncio import iscoroutinefunction, iscoroutine
def combine_action_handlers(*handlers):
"""
This function combines the given action handlers into a single function
which will call all of them.
"""
# make sure each of the given handlers is callable
for handler in handlers:
# if the handler is not a function
if not (iscoroutinefunction(handler) or iscoroutine(handler)):
# yell loudly
raise ValueError("Provided handler is not a coroutine: %s" % handler)
# the combined action handler
async def combined_handler(*args, **kwds):
# goes over every given handler
for handler in handlers:
# call the handler
await handler(*args, **kwds)
# return the combined action handler
return combined_handler
| 33.703704 | 81 | 0.668132 | from asyncio import iscoroutinefunction, iscoroutine
def combine_action_handlers(*handlers):
for handler in handlers:
if not (iscoroutinefunction(handler) or iscoroutine(handler)):
raise ValueError("Provided handler is not a coroutine: %s" % handler)
async def combined_handler(*args, **kwds):
for handler in handlers:
await handler(*args, **kwds)
return combined_handler
| true | true |
f71f92cb89c3fdddcc269f270e17130bd0cea3de | 2,945 | py | Python | app/auth/forms.py | xdhuxc/xblog | ff0383140a5a0c1e8422223154cb98defee73121 | [
"W3C"
] | null | null | null | app/auth/forms.py | xdhuxc/xblog | ff0383140a5a0c1e8422223154cb98defee73121 | [
"W3C"
] | null | null | null | app/auth/forms.py | xdhuxc/xblog | ff0383140a5a0c1e8422223154cb98defee73121 | [
"W3C"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms import PasswordField
from wtforms import BooleanField
from wtforms import SubmitField
from wtforms.validators import DataRequired
from wtforms.validators import Length
from wtforms.validators import Email
from wtforms.validators import Regexp
from wtforms.validators import EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(FlaskForm):
"""
用户登录表单
"""
user_email = StringField('电子邮箱', validators=[DataRequired(), Length(1, 64), Email()])
password = PasswordField('密码', validators=[DataRequired()])
remember_me = BooleanField('记住我')
submit = SubmitField('登录')
class RegistrationForm(FlaskForm):
# 第一个参数是在页面显示的字符
user_email = StringField('电子邮箱', validators=[DataRequired(), Length(1, 64), Email()])
user_name = StringField('用户名', validators=[DataRequired(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$',
0, 'User Name must have two letters,numbers dots or underscores')])
password = PasswordField('密码', validators=[DataRequired(), EqualTo('password2', message='两次输入的密码必须一致。')])
password2 = PasswordField('确认密码', validators=[DataRequired()])
submit = SubmitField('注册')
"""
这个表单中还有两个自定义的验证函数,以方法的形式实现。
如果表单类中定义了以validate_开头且后面跟着字段名的方法,这个方法就和常规的验证函数一起调用。
"""
def validate_user_email(self, field):
if User.query.filter_by(user_email=field.data).first():
raise ValidationError('该邮件地址已经被注册。')
def validate_user_name(self, field):
if User.query.filter_by(user_name=field.data).first():
raise ValidationError('该用户名已经被使用。')
class ChangePasswordForm(FlaskForm):
"""
更新密码的表单
"""
old_password = PasswordField('旧密码', validators=[DataRequired()])
password = PasswordField('新密码', validators=[
DataRequired(), EqualTo('password2', message='两次输入的密码必须一致。')])
password2 = PasswordField('确认新密码', validators=[DataRequired()])
submit = SubmitField('更改密码')
class PasswordResetRequestForm(FlaskForm):
"""
重置密码请求表单
"""
user_email = StringField('电子邮箱', validators=[DataRequired(), Length(1, 64), Email()])
submit = SubmitField('重置密码')
class PasswordResetForm(FlaskForm):
"""
重置密码表单
"""
password = PasswordField('新密码', validators=[
DataRequired(), EqualTo('password2', message='两次输入的密码不一致。')])
password2 = PasswordField('确认密码', validators=[DataRequired()])
submit = SubmitField('重置密码')
class ChangeEmailForm(FlaskForm):
user_email = StringField('新电子邮件地址', validators=[DataRequired(), Length(1, 64), Email()])
password = PasswordField('密码', validators=[DataRequired()])
submit = SubmitField('更改电子邮箱')
@staticmethod
def validate_user_email(self, field):
if User.query.filter_by(user_email=field.data).first():
raise ValidationError('该邮箱已经注册。') | 32.722222 | 112 | 0.696774 |
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms import PasswordField
from wtforms import BooleanField
from wtforms import SubmitField
from wtforms.validators import DataRequired
from wtforms.validators import Length
from wtforms.validators import Email
from wtforms.validators import Regexp
from wtforms.validators import EqualTo
from wtforms import ValidationError
from ..models import User
class LoginForm(FlaskForm):
user_email = StringField('电子邮箱', validators=[DataRequired(), Length(1, 64), Email()])
password = PasswordField('密码', validators=[DataRequired()])
remember_me = BooleanField('记住我')
submit = SubmitField('登录')
class RegistrationForm(FlaskForm):
user_email = StringField('电子邮箱', validators=[DataRequired(), Length(1, 64), Email()])
user_name = StringField('用户名', validators=[DataRequired(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$',
0, 'User Name must have two letters,numbers dots or underscores')])
password = PasswordField('密码', validators=[DataRequired(), EqualTo('password2', message='两次输入的密码必须一致。')])
password2 = PasswordField('确认密码', validators=[DataRequired()])
submit = SubmitField('注册')
def validate_user_email(self, field):
if User.query.filter_by(user_email=field.data).first():
raise ValidationError('该邮件地址已经被注册。')
def validate_user_name(self, field):
if User.query.filter_by(user_name=field.data).first():
raise ValidationError('该用户名已经被使用。')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('旧密码', validators=[DataRequired()])
password = PasswordField('新密码', validators=[
DataRequired(), EqualTo('password2', message='两次输入的密码必须一致。')])
password2 = PasswordField('确认新密码', validators=[DataRequired()])
submit = SubmitField('更改密码')
class PasswordResetRequestForm(FlaskForm):
user_email = StringField('电子邮箱', validators=[DataRequired(), Length(1, 64), Email()])
submit = SubmitField('重置密码')
class PasswordResetForm(FlaskForm):
password = PasswordField('新密码', validators=[
DataRequired(), EqualTo('password2', message='两次输入的密码不一致。')])
password2 = PasswordField('确认密码', validators=[DataRequired()])
submit = SubmitField('重置密码')
class ChangeEmailForm(FlaskForm):
user_email = StringField('新电子邮件地址', validators=[DataRequired(), Length(1, 64), Email()])
password = PasswordField('密码', validators=[DataRequired()])
submit = SubmitField('更改电子邮箱')
@staticmethod
def validate_user_email(self, field):
if User.query.filter_by(user_email=field.data).first():
raise ValidationError('该邮箱已经注册。') | true | true |
f71f9324bc59454b4d777a1a8dfc0cbafae774e6 | 9,717 | py | Python | mbam/parsing/base.py | danebjork/AutomatedMBAM | 91183dcfef634ad9150ee187da8172cff6845fe3 | [
"MIT"
] | 2 | 2018-11-10T17:06:36.000Z | 2018-12-19T23:47:26.000Z | mbam/parsing/base.py | danebjork/AutomatedMBAM | 91183dcfef634ad9150ee187da8172cff6845fe3 | [
"MIT"
] | 6 | 2018-05-16T21:06:34.000Z | 2019-01-14T22:23:15.000Z | mbam/parsing/base.py | danebjork/AutomatedMBAM | 91183dcfef634ad9150ee187da8172cff6845fe3 | [
"MIT"
] | 2 | 2018-11-14T13:30:55.000Z | 2019-01-14T20:49:15.000Z | import os
from sympy.printing import julia_code
from sympy import Symbol
import json
import logging
import re
class BaseParser:
"""Parent class for all model parsers.
"""
def __init__(self, mbam_model, data_path):
"""
Parameters
----------
mbam_model : ``mbammodel``
Can be any of the following: Function, ODE, DAE.
data_path : ``str``
The full path to the hdf5 file to be included in the
model.
"""
self.logger = logging.getLogger("MBAM.BaseParser")
self.logger.debug("Initializing BaseParser")
self.mm = mbam_model
self.create_default_options()
# self.data_path = data_path
self.data_path = 'temp.h5' # os.path.join(os.pardir, 'temp.h5') # data should be in parent directory
self.script = '\n'
self.dir = os.path.join("julia_scripts", "models")
self.name = self.mm.name
self.file_name = self.name + ".jl"
self.file_path = os.path.join(os.getcwd(), os.path.join(self.dir, self.file_name))
self.create_julia_swap()
def update_options(self, options):
"""Creates and saves a new script generated with the given options.
Parameters
----------
options : ``dict``
Must follow format: {"bare": ``bool``, "weights": ``bool``, "imports": ``string``, "args": ``string``, "kwargs": ``string``}
"""
self.logger.debug("Updating options: %s" %options)
self.options = options
def create_default_options(self):
""" Used until options are updated with update_options
"""
self.options = {}
self.options['bare'] = False
self.options['weights'] = False
self.options["imports"] = ""
self.options["args"] = ""
self.options["kwargs"] = ""
def create_julia_swap(self):
""" Generates a list of sympy substitution tuples to sub out sympy vars
with julia formating.
Example
-------
Time update: t => _t.
Vars update: x1 => _x[1].
Params update: p1 => ps.p1.
Inputs update: u1 => _inp[1].
"""
self.julia_swap = []
self.julia_swap.append(('t', '_t'))
for p in self.mm.model_ps.list:
self.julia_swap.append((p, 'ps.' + p))
for i, v in enumerate(self.mm.model_vs.list):
self.julia_swap.append((v, '_x[{0}]'.format(i+1)))
for i, u in enumerate(self.mm.model_eqs['inp'].eq_symbols):
self.julia_swap.append((u.strip(), '_inp[{0}]'.format(i+1)))
def write_xi(self):
return 'xi = ParametricModels.xvalues(parametricmodel)\n'
def write_end(self):
return 'end # module'
## model specific
def write_bare_model(self):
ret = ''
ret += 'zerodata = ParametricModels.OLSData("%s"_zero, zero(ydata))\n' % self.name
ret += 'bareparametricmodel = @ParametricModels.ODEModel(zerodata, %s, ic, rhs, obs, _t, (), Tuple{Symbol, Any}[])\n' % self.name
ret += self.write_param_transforms(bare=True)
ret += 'modelbare = Models.Model(bareparametricmodel)\n'
return ret
def write_imports(self):
ret = 'module {0}_Model\n\n'.format(self.name)
ret += 'import Models\n'
ret += 'import ParametricModels\n'
ret += 'using Parameters\n'
self.logger.debug("Extra modules to import: %s" %self.options["imports"])
if self.options["imports"] != "":
ret += "import %s\n\n" %self.options["imports"]
else:
ret += "\n"
return ret
def write_params(self):
ret = '@with_kw mutable struct %s{T<:Real} <: ParametricModels.AbstractParameterSpace{T} @deftype T\n' % self.name
for p in self.mm.model_ps.dict['ps']:
ret += '\t'
ret += p['name']
ret += ' = '
ret += str(p['init_val'])
ret += '\n'
ret += 'end\n\n'
return ret
def write_inputs(self):
ret = 'function inp(ps::%s{T}, _t) where T <: Real\n' % self.name
ret += self.write_substitutions(self.mm.model_eqs['inp'].sbs_sym_list)
ret += self.write_equation_return(self.mm.model_eqs['inp'].eqs_sym_list)
return ret
def write_data(self):
ret = "import HDF5\n"
ret += 'ydata = HDF5.h5read("%s", "/ydata")\n' % self.data_path.replace("\\", "\\\\")
ret += '_t = HDF5.h5read("%s", "/t")\n' % self.data_path.replace("\\", "\\\\")
if not self.options['weights']:
ret += 'data = ParametricModels.OLSData("%s", ydata)\n' % self.name
else:
ret += 'weights = HDF5.h5read("%s", "/weights")\n' % self.data_path
ret += 'data = ParametricModels.WLSData("%s", ydata, weights)\n' % self.name
return ret
def write_substitutions(self, sub_list):
""" Given a list of substitutions, write them out prior to the return
statement or the main equations.
Parameters
----------
sub_list: ``list``
A list containing equation dictionaries.
Example
-------
c = a + b => {"sym": "c", "eq": "a + b"}
"""
ret = ''
for sub in sub_list:
ret += '\t'
ret += str(sub['sym'])
ret += ' = '
ret += julia_code(sub['eq'].subs(self.julia_swap))
ret += '\n'
return ret
def write_equation_return(self, eq_list):
""" Given a list of equations, write them out as a julia array following
the return statement.
Parameters
----------
eq_list: ``list``
A list containing equation dictionaries. Where each equation is
only on the right-hand side. There should be no symbol for this
function.
Example
-------
a + b => {"sym": "", "eq": "a + b"}
"""
ret = '\treturn '
ret += self.write_equation_list(eq_list)
ret += '\nend\n\n'
return ret
def write_equation_list(self, eq_list):
""" Given a list of equations, write them out as a julia array.
Parameters
----------
eq_list: ``list``
A list containing equation dictionaries. Where each equation is
only on the right-hand side. There should be no symbol for this
function. The non-julia values will be subbed with julia values.
Example
-------
x + b => {"sym": "", "eq": "a + b"}
"""
ret = 'T['
for i, eq in enumerate(eq_list):
ret += julia_code(eq['eq'].subs(self.julia_swap))
if i != len(eq_list)-1:
ret += ', '
ret += ']'
return ret
def write_constants(self):
ret = '@ParametricModels.SetConstant(parametricmodel, '
for i, p in enumerate(self.mm.model_ps.dict['ps']):
if p['transform'] == 'constant':
ret += p['name']
ret += ', '
ret = ret[:-2]
ret += ')\n'
return ret
def write_param_transforms(self, bare=False):
if not bare:
ret = 'for p in parametricmodel.parameters\n'
else:
ret = 'for p in bareparametricmodel.parameters\n'
ret += '\tif p.s in ' + self.list_out_param_type('log') + '\n'
ret += '\t\tp.t = exp\n'
ret += '\t\tp.invt = log\n'
ret += '\telseif p.s in ' + self.list_out_param_type('sinh') + '\n'
ret += '\t\tp.t = sinh\n'
ret += '\t\tp.invt = asinh\n'
ret += '\tend\n'
ret += 'end\n\n'
return ret
def list_out_param_type(self, p_type):
ret = '['
for i, p in enumerate(self.mm.model_ps.dict['ps']):
if p['transform'] == p_type:
ret += ':'+ p['name'] + ', '
if len(ret) > 1:
ret = ret[:-2]
ret += ']'
return ret
def init_models_dir(self):
""" Generates a directory: julia_scripts/models/ to save the model
file within.
"""
if not os.path.exists(self.dir):
os.makedirs(self.dir)
def save_to_file(self, script):
""" Overwrites the current script with the given script string.
Parameters
----------
script: ``str``
A string representation of a full julia model script.
"""
self.logger.info("Writing script to file: %s" %self.file_path)
self.init_models_dir()
with open(self.file_path, "w", encoding="utf-8") as jl:
jl.write(script)
def parse_args(self):
self.logger.debug("Parsing args: %s", self.options["args"])
if self.options["args"] == "":
args = "()"
else:
args = "(%s,)" %self.options["args"]
self.logger.debug("Parsed args = %s" %args)
return args
def parse_kwargs(self):
self.logger.debug("Parsing kwargs: %s", self.options["kwargs"])
if self.options["kwargs"] == "":
kwargs = "Tuple{Symbol, Any}[]"
else:
kwargs = "Tuple{Symbol, Any}["
for kwarg in self.options["kwargs"].split(','):
s,v = kwarg.split("=")
kwargs += "(:%s, %s)," %(s.strip(),v.strip())
kwargs += "]"
self.logger.debug("Parsed kwargs = %s" %kwargs)
return kwargs
def find_replace_vectorized(self,string):
d = {"\.\*": ' .* ', "\.\/": ' ./ ', "\.\^": ' .^ '}
for item in d.keys():
# sub item for item's paired value in string
string = re.sub(item, d[item], string)
return string
| 34.580071 | 137 | 0.529793 | import os
from sympy.printing import julia_code
from sympy import Symbol
import json
import logging
import re
class BaseParser:
def __init__(self, mbam_model, data_path):
self.logger = logging.getLogger("MBAM.BaseParser")
self.logger.debug("Initializing BaseParser")
self.mm = mbam_model
self.create_default_options()
self.data_path = 'temp.h5' self.dir = os.path.join("julia_scripts", "models")
self.name = self.mm.name
self.file_name = self.name + ".jl"
self.file_path = os.path.join(os.getcwd(), os.path.join(self.dir, self.file_name))
self.create_julia_swap()
def update_options(self, options):
self.logger.debug("Updating options: %s" %options)
self.options = options
def create_default_options(self):
self.options = {}
self.options['bare'] = False
self.options['weights'] = False
self.options["imports"] = ""
self.options["args"] = ""
self.options["kwargs"] = ""
def create_julia_swap(self):
self.julia_swap = []
self.julia_swap.append(('t', '_t'))
for p in self.mm.model_ps.list:
self.julia_swap.append((p, 'ps.' + p))
for i, v in enumerate(self.mm.model_vs.list):
self.julia_swap.append((v, '_x[{0}]'.format(i+1)))
for i, u in enumerate(self.mm.model_eqs['inp'].eq_symbols):
self.julia_swap.append((u.strip(), '_inp[{0}]'.format(i+1)))
def write_xi(self):
return 'xi = ParametricModels.xvalues(parametricmodel)\n'
def write_end(self):
return 'end # module'
are_model(self):
ret = ''
ret += 'zerodata = ParametricModels.OLSData("%s"_zero, zero(ydata))\n' % self.name
ret += 'bareparametricmodel = @ParametricModels.ODEModel(zerodata, %s, ic, rhs, obs, _t, (), Tuple{Symbol, Any}[])\n' % self.name
ret += self.write_param_transforms(bare=True)
ret += 'modelbare = Models.Model(bareparametricmodel)\n'
return ret
def write_imports(self):
ret = 'module {0}_Model\n\n'.format(self.name)
ret += 'import Models\n'
ret += 'import ParametricModels\n'
ret += 'using Parameters\n'
self.logger.debug("Extra modules to import: %s" %self.options["imports"])
if self.options["imports"] != "":
ret += "import %s\n\n" %self.options["imports"]
else:
ret += "\n"
return ret
def write_params(self):
ret = '@with_kw mutable struct %s{T<:Real} <: ParametricModels.AbstractParameterSpace{T} @deftype T\n' % self.name
for p in self.mm.model_ps.dict['ps']:
ret += '\t'
ret += p['name']
ret += ' = '
ret += str(p['init_val'])
ret += '\n'
ret += 'end\n\n'
return ret
def write_inputs(self):
ret = 'function inp(ps::%s{T}, _t) where T <: Real\n' % self.name
ret += self.write_substitutions(self.mm.model_eqs['inp'].sbs_sym_list)
ret += self.write_equation_return(self.mm.model_eqs['inp'].eqs_sym_list)
return ret
def write_data(self):
ret = "import HDF5\n"
ret += 'ydata = HDF5.h5read("%s", "/ydata")\n' % self.data_path.replace("\\", "\\\\")
ret += '_t = HDF5.h5read("%s", "/t")\n' % self.data_path.replace("\\", "\\\\")
if not self.options['weights']:
ret += 'data = ParametricModels.OLSData("%s", ydata)\n' % self.name
else:
ret += 'weights = HDF5.h5read("%s", "/weights")\n' % self.data_path
ret += 'data = ParametricModels.WLSData("%s", ydata, weights)\n' % self.name
return ret
def write_substitutions(self, sub_list):
ret = ''
for sub in sub_list:
ret += '\t'
ret += str(sub['sym'])
ret += ' = '
ret += julia_code(sub['eq'].subs(self.julia_swap))
ret += '\n'
return ret
def write_equation_return(self, eq_list):
ret = '\treturn '
ret += self.write_equation_list(eq_list)
ret += '\nend\n\n'
return ret
def write_equation_list(self, eq_list):
ret = 'T['
for i, eq in enumerate(eq_list):
ret += julia_code(eq['eq'].subs(self.julia_swap))
if i != len(eq_list)-1:
ret += ', '
ret += ']'
return ret
def write_constants(self):
ret = '@ParametricModels.SetConstant(parametricmodel, '
for i, p in enumerate(self.mm.model_ps.dict['ps']):
if p['transform'] == 'constant':
ret += p['name']
ret += ', '
ret = ret[:-2]
ret += ')\n'
return ret
def write_param_transforms(self, bare=False):
if not bare:
ret = 'for p in parametricmodel.parameters\n'
else:
ret = 'for p in bareparametricmodel.parameters\n'
ret += '\tif p.s in ' + self.list_out_param_type('log') + '\n'
ret += '\t\tp.t = exp\n'
ret += '\t\tp.invt = log\n'
ret += '\telseif p.s in ' + self.list_out_param_type('sinh') + '\n'
ret += '\t\tp.t = sinh\n'
ret += '\t\tp.invt = asinh\n'
ret += '\tend\n'
ret += 'end\n\n'
return ret
def list_out_param_type(self, p_type):
ret = '['
for i, p in enumerate(self.mm.model_ps.dict['ps']):
if p['transform'] == p_type:
ret += ':'+ p['name'] + ', '
if len(ret) > 1:
ret = ret[:-2]
ret += ']'
return ret
def init_models_dir(self):
if not os.path.exists(self.dir):
os.makedirs(self.dir)
def save_to_file(self, script):
self.logger.info("Writing script to file: %s" %self.file_path)
self.init_models_dir()
with open(self.file_path, "w", encoding="utf-8") as jl:
jl.write(script)
def parse_args(self):
self.logger.debug("Parsing args: %s", self.options["args"])
if self.options["args"] == "":
args = "()"
else:
args = "(%s,)" %self.options["args"]
self.logger.debug("Parsed args = %s" %args)
return args
def parse_kwargs(self):
self.logger.debug("Parsing kwargs: %s", self.options["kwargs"])
if self.options["kwargs"] == "":
kwargs = "Tuple{Symbol, Any}[]"
else:
kwargs = "Tuple{Symbol, Any}["
for kwarg in self.options["kwargs"].split(','):
s,v = kwarg.split("=")
kwargs += "(:%s, %s)," %(s.strip(),v.strip())
kwargs += "]"
self.logger.debug("Parsed kwargs = %s" %kwargs)
return kwargs
def find_replace_vectorized(self,string):
d = {"\.\*": ' .* ', "\.\/": ' ./ ', "\.\^": ' .^ '}
for item in d.keys():
string = re.sub(item, d[item], string)
return string
| true | true |
f71f93774042467cdcd4ca52abc67522df7b11cf | 649 | py | Python | intro-python/part2/structure.py | cobeam/DevNetRepo | d824bb6ad7d21bcae03485b571e97fc2b6b61df9 | [
"MIT"
] | 90 | 2018-04-07T00:39:23.000Z | 2020-06-09T02:44:02.000Z | intro-python/part2/structure.py | cobeam/DevNetRepo | d824bb6ad7d21bcae03485b571e97fc2b6b61df9 | [
"MIT"
] | 38 | 2018-04-06T18:11:36.000Z | 2020-05-11T23:36:24.000Z | intro-python/part2/structure.py | cobeam/DevNetRepo | d824bb6ad7d21bcae03485b571e97fc2b6b61df9 | [
"MIT"
] | 143 | 2018-04-20T00:17:24.000Z | 2020-06-12T15:07:42.000Z | #!/usr/bin/env python
"""Module docstring."""
# Imports
import os
import sys
# Module Constants
START_MESSAGE = "CLI Inspection Script"
# Module "Global" Variables
location = os.path.abspath(__file__)
# Module Functions and Classes
def main(*args):
"""My main script function.
Displays the full patch to this script, and a list of the arguments passed
to the script.
"""
print(START_MESSAGE)
print("Script Location:", location)
print("Arguments Passed:", args)
# Check to see if this file is the "__main__" script being executed
if __name__ == '__main__':
_, *script_args = sys.argv
main(*script_args)
| 19.088235 | 78 | 0.694915 |
import os
import sys
START_MESSAGE = "CLI Inspection Script"
location = os.path.abspath(__file__)
def main(*args):
print(START_MESSAGE)
print("Script Location:", location)
print("Arguments Passed:", args)
if __name__ == '__main__':
_, *script_args = sys.argv
main(*script_args)
| true | true |
f71f943b61674db1e41754a8a4cbc52954162b47 | 10,921 | py | Python | setup.py | dineshsonachalam/atheris | 7c96c70056478b29d81d634b197c356f479fb6d7 | [
"Apache-2.0"
] | null | null | null | setup.py | dineshsonachalam/atheris | 7c96c70056478b29d81d634b197c356f479fb6d7 | [
"Apache-2.0"
] | null | null | null | setup.py | dineshsonachalam/atheris | 7c96c70056478b29d81d634b197c356f479fb6d7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
# Copyright 2021 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setuptools for Atheris."""
import os
import shutil
import subprocess
import sys
import tempfile
import setuptools
from setuptools import Extension
from setuptools import setup
from setuptools.command.build_ext import build_ext
__version__ = os.getenv("ATHERIS_VERSION", "1.0.11")
if len(sys.argv) > 1 and sys.argv[1] == "print_version":
print(__version__)
quit()
clang_install_instructions = """download and build the latest version of Clang:
git clone https://github.com/llvm/llvm-project.git
cd llvm-project
mkdir build
cd build
cmake -DLLVM_ENABLE_PROJECTS='clang;compiler-rt' -G "Unix Makefiles" ../llvm
make -j 100 # This step is very slow
Then, set $CLANG_BIN="$(pwd)/bin/clang" and run pip again.
You should use this same Clang for building any Python extensions you plan to fuzz.
"""
too_old_error = """Your libFuzzer version is too old; set either $CLANG_BIN to point to a more recent Clang, or $LIBFUZZER_VERSION to point directly to a more recent libFuzzer .a file. If needed, """ + clang_install_instructions
no_libfuzzer_error = """Failed to find libFuzzer; set either $CLANG_BIN to point to your Clang binary, or $LIBFUZZER_LIB to point directly to your libFuzzer .a file. If needed, """ + clang_install_instructions
if sys.platform == "darwin":
too_old_error = ("Your libFuzzer version is too old.\nPlease" +
clang_install_instructions + "Do not use Apple "
"Clang; Apple Clang does not come with libFuzzer.")
no_libfuzzer_error = ("Failed to find libFuzzer; you may be building using "
"Apple Clang. Apple Clang does not come with "
"libFuzzer.\nPlease " + clang_install_instructions)
class PybindIncludeGetter(object):
"""Helper class to determine the pybind11 include path.
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked.
"""
def __str__(self):
import pybind11 # pylint: disable=g-import-not-at-top
return pybind11.get_include()
def check_libfuzzer_version(libfuzzer):
"""Verifies that the specified libFuzzer is of a sufficiently high version."""
current_path = os.path.dirname(os.path.realpath(__file__))
try:
version = subprocess.check_output(
[current_path + "/setup_utils/check_libfuzzer_version.sh", libfuzzer])
except subprocess.CalledProcessError as e:
sys.stderr.write("Failed to check libFuzzer version: %s" % e.stderr)
sys.stderr.write("Assuming libFuxzzer is up-to-date.")
return "up-to-date"
version = version.strip().decode("utf-8")
return version
def upgrade_libfuzzer(libfuzzer):
"""Hacky code for upgrading libFuzzer to be compatible with Atheris."""
current_path = os.path.dirname(os.path.realpath(__file__))
try:
new_libfuzzer = subprocess.check_output(
[current_path + "/setup_utils/upgrade_libfuzzer.sh", libfuzzer])
except subprocess.CalledProcessError as e:
sys.stderr.write("libFuzzer upgrade failed: %s" % e.stderr)
return libfuzzer
new_libfuzzer = new_libfuzzer.strip().decode("utf-8")
return new_libfuzzer
def get_libfuzzer_lib():
"""Returns path to the libFuzzer .a library."""
libfuzzer_lib = os.getenv("LIBFUZZER_LIB", "")
if libfuzzer_lib:
return libfuzzer_lib
current_path = os.path.dirname(os.path.realpath(__file__))
try:
libfuzzer = subprocess.check_output(
[current_path + "/setup_utils/find_libfuzzer.sh"])
except subprocess.CalledProcessError as e:
sys.stderr.write(no_libfuzzer_error + "\n")
raise RuntimeError(no_libfuzzer_error)
libfuzzer = libfuzzer.strip().decode("utf-8")
return libfuzzer
ext_modules = [
Extension(
"atheris.atheris",
sorted([
"atheris.cc",
"util.cc",
"fuzzed_data_provider.cc",
]),
include_dirs=[
# Path to pybind11 headers
PybindIncludeGetter(),
],
language="c++"),
Extension(
"atheris.core_with_libfuzzer",
sorted([
"core.cc",
"tracer.cc",
"util.cc",
]),
include_dirs=[
# Path to pybind11 headers
PybindIncludeGetter(),
],
language="c++"),
Extension(
"atheris.core_without_libfuzzer",
sorted([
"core.cc",
"tracer.cc",
"util.cc",
]),
include_dirs=[
# Path to pybind11 headers
PybindIncludeGetter(),
],
language="c++"),
]
# cf http://bugs.python.org/issue26689
def has_flag(compiler, flagname):
"""Return a boolean indicating whether a flag name."""
with tempfile.NamedTemporaryFile("w", suffix=".cpp", delete=False) as f:
f.write("int main (int argc, char **argv) { return 0; }")
fname = f.name
try:
compiler.compile([fname], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
finally:
try:
os.remove(fname)
except OSError:
pass
return True
def cpp_flag(compiler):
"""Return the highest-supported -std=c++[11/14/17] compiler flag."""
if os.getenv("FORCE_MIN_VERSION"):
# Use for testing, to make sure Atheris supports C++11
flags = ["-std=c++11"]
elif os.getenv("FORCE_VERSION"):
flags = ["-std=c++" + os.getenv("FORCE_VERSION")]
else:
flags = [
#"-std=c++17", C++17 disabled unless explicitly requested, to work
# around https://github.com/pybind/pybind11/issues/1818
"-std=c++14",
"-std=c++11"]
for flag in flags:
if has_flag(compiler, flag):
return flag
raise RuntimeError("Unsupported compiler -- at least C++11 support "
"is needed!")
class BuildExt(build_ext):
"""A custom build extension for adding compiler-specific options."""
def build_extensions(self):
libfuzzer = get_libfuzzer_lib()
orig_libfuzzer = libfuzzer
orig_libfuzzer_name = os.path.basename(libfuzzer)
version = check_libfuzzer_version(libfuzzer)
if sys.platform == "darwin" and version != "up-to-date":
raise RuntimeError(too_old_error)
if version == "outdated-unrecoverable":
raise RuntimeError(too_old_error)
elif version == "outdated-recoverable":
sys.stderr.write("Your libFuzzer version is too old, but it's possible "
"to attempt an in-place upgrade. Trying that now.\n")
libfuzzer = upgrade_libfuzzer(libfuzzer)
if check_libfuzzer_version(libfuzzer) != "up-to-date":
sys.stderr.write("Upgrade failed.")
raise RuntimeError(too_old_error)
elif version != "up-to-date":
raise RuntimeError("Unexpected up-to-date status: " + version)
sys.stderr.write("Your libFuzzer is up-to-date.\n")
c_opts = []
l_opts = []
if sys.platform == "darwin":
darwin_opts = ["-stdlib=libc++", "-mmacosx-version-min=10.7"]
c_opts += darwin_opts
l_opts += darwin_opts
ct = self.compiler.compiler_type
if ct == "unix":
c_opts.append(cpp_flag(self.compiler))
for ext in self.extensions:
ext.define_macros = [("VERSION_INFO",
"'{}'".format(self.distribution.get_version())),
("ATHERIS_MODULE_NAME", ext.name.split(".")[1])]
ext.extra_compile_args = c_opts
if ext.name == "atheris.core_with_libfuzzer":
ext.extra_link_args = l_opts + [libfuzzer]
else:
ext.extra_link_args = l_opts
build_ext.build_extensions(self)
try:
self.deploy_file(libfuzzer, orig_libfuzzer_name)
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write("\n")
# Deploy versions of ASan and UBSan that have been merged with libFuzzer
asan_name = orig_libfuzzer.replace(".fuzzer_no_main-", ".asan-")
merged_asan_name = "asan_with_fuzzer.so"
self.merge_deploy_libfuzzer_sanitizer(
libfuzzer, asan_name, merged_asan_name,
"asan_preinit.cc.o asan_preinit.cpp.o")
ubsan_name = orig_libfuzzer.replace(".fuzzer_no_main-",
".ubsan_standalone-")
merged_ubsan_name = "ubsan_with_fuzzer.so"
self.merge_deploy_libfuzzer_sanitizer(
libfuzzer, ubsan_name, merged_ubsan_name,
"ubsan_init_standalone_preinit.cc.o ubsan_init_standalone_preinit.cpp.o"
)
ubsanxx_name = orig_libfuzzer.replace(".fuzzer_no_main-",
".ubsan_standalone_cxx-")
merged_ubsanxx_name = "ubsan_cxx_with_fuzzer.so"
self.merge_deploy_libfuzzer_sanitizer(
libfuzzer, ubsanxx_name, merged_ubsanxx_name,
"ubsan_init_standalone_preinit.cc.o ubsan_init_standalone_preinit.cpp.o"
)
def deploy_file(self, name, target_filename):
atheris = self.get_ext_fullpath("atheris")
dest_file = os.path.join(os.path.dirname(atheris), target_filename)
shutil.copy(name, dest_file)
def merge_libfuzzer_sanitizer(self, libfuzzer, sanitizer, strip_preinit):
"""Generate a .so that contains both libFuzzer and a sanitizer."""
current_path = os.path.dirname(os.path.realpath(__file__))
new_sanitizer = subprocess.check_output([
os.path.join(current_path, "setup_utils/merge_libfuzzer_sanitizer.sh"),
libfuzzer, sanitizer, strip_preinit
])
return new_sanitizer.strip().decode("utf-8")
def merge_deploy_libfuzzer_sanitizer(self, libfuzzer, lib_name,
merged_lib_name, preinit):
try:
merged_lib = self.merge_libfuzzer_sanitizer(libfuzzer, lib_name, preinit)
self.deploy_file(merged_lib, merged_lib_name)
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write("\n")
setup(
name="atheris",
version=__version__,
author="Ian Eldred Pudney",
author_email="puddles@google.com",
url="https://pypi.org/project/atheris/",
description="A coverage-guided fuzzer for Python and Python extensions.",
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
packages=["atheris"],
ext_modules=ext_modules,
setup_requires=["pybind11>=2.5.0"],
cmdclass={"build_ext": BuildExt},
zip_safe=False,
)
| 34.451104 | 228 | 0.672557 |
import os
import shutil
import subprocess
import sys
import tempfile
import setuptools
from setuptools import Extension
from setuptools import setup
from setuptools.command.build_ext import build_ext
__version__ = os.getenv("ATHERIS_VERSION", "1.0.11")
if len(sys.argv) > 1 and sys.argv[1] == "print_version":
print(__version__)
quit()
clang_install_instructions = """download and build the latest version of Clang:
git clone https://github.com/llvm/llvm-project.git
cd llvm-project
mkdir build
cd build
cmake -DLLVM_ENABLE_PROJECTS='clang;compiler-rt' -G "Unix Makefiles" ../llvm
make -j 100 # This step is very slow
Then, set $CLANG_BIN="$(pwd)/bin/clang" and run pip again.
You should use this same Clang for building any Python extensions you plan to fuzz.
"""
too_old_error = """Your libFuzzer version is too old; set either $CLANG_BIN to point to a more recent Clang, or $LIBFUZZER_VERSION to point directly to a more recent libFuzzer .a file. If needed, """ + clang_install_instructions
no_libfuzzer_error = """Failed to find libFuzzer; set either $CLANG_BIN to point to your Clang binary, or $LIBFUZZER_LIB to point directly to your libFuzzer .a file. If needed, """ + clang_install_instructions
if sys.platform == "darwin":
too_old_error = ("Your libFuzzer version is too old.\nPlease" +
clang_install_instructions + "Do not use Apple "
"Clang; Apple Clang does not come with libFuzzer.")
no_libfuzzer_error = ("Failed to find libFuzzer; you may be building using "
"Apple Clang. Apple Clang does not come with "
"libFuzzer.\nPlease " + clang_install_instructions)
class PybindIncludeGetter(object):
def __str__(self):
import pybind11
return pybind11.get_include()
def check_libfuzzer_version(libfuzzer):
current_path = os.path.dirname(os.path.realpath(__file__))
try:
version = subprocess.check_output(
[current_path + "/setup_utils/check_libfuzzer_version.sh", libfuzzer])
except subprocess.CalledProcessError as e:
sys.stderr.write("Failed to check libFuzzer version: %s" % e.stderr)
sys.stderr.write("Assuming libFuxzzer is up-to-date.")
return "up-to-date"
version = version.strip().decode("utf-8")
return version
def upgrade_libfuzzer(libfuzzer):
current_path = os.path.dirname(os.path.realpath(__file__))
try:
new_libfuzzer = subprocess.check_output(
[current_path + "/setup_utils/upgrade_libfuzzer.sh", libfuzzer])
except subprocess.CalledProcessError as e:
sys.stderr.write("libFuzzer upgrade failed: %s" % e.stderr)
return libfuzzer
new_libfuzzer = new_libfuzzer.strip().decode("utf-8")
return new_libfuzzer
def get_libfuzzer_lib():
libfuzzer_lib = os.getenv("LIBFUZZER_LIB", "")
if libfuzzer_lib:
return libfuzzer_lib
current_path = os.path.dirname(os.path.realpath(__file__))
try:
libfuzzer = subprocess.check_output(
[current_path + "/setup_utils/find_libfuzzer.sh"])
except subprocess.CalledProcessError as e:
sys.stderr.write(no_libfuzzer_error + "\n")
raise RuntimeError(no_libfuzzer_error)
libfuzzer = libfuzzer.strip().decode("utf-8")
return libfuzzer
ext_modules = [
Extension(
"atheris.atheris",
sorted([
"atheris.cc",
"util.cc",
"fuzzed_data_provider.cc",
]),
include_dirs=[
PybindIncludeGetter(),
],
language="c++"),
Extension(
"atheris.core_with_libfuzzer",
sorted([
"core.cc",
"tracer.cc",
"util.cc",
]),
include_dirs=[
PybindIncludeGetter(),
],
language="c++"),
Extension(
"atheris.core_without_libfuzzer",
sorted([
"core.cc",
"tracer.cc",
"util.cc",
]),
include_dirs=[
PybindIncludeGetter(),
],
language="c++"),
]
def has_flag(compiler, flagname):
with tempfile.NamedTemporaryFile("w", suffix=".cpp", delete=False) as f:
f.write("int main (int argc, char **argv) { return 0; }")
fname = f.name
try:
compiler.compile([fname], extra_postargs=[flagname])
except setuptools.distutils.errors.CompileError:
return False
finally:
try:
os.remove(fname)
except OSError:
pass
return True
def cpp_flag(compiler):
if os.getenv("FORCE_MIN_VERSION"):
flags = ["-std=c++11"]
elif os.getenv("FORCE_VERSION"):
flags = ["-std=c++" + os.getenv("FORCE_VERSION")]
else:
flags = [
"-std=c++14",
"-std=c++11"]
for flag in flags:
if has_flag(compiler, flag):
return flag
raise RuntimeError("Unsupported compiler -- at least C++11 support "
"is needed!")
class BuildExt(build_ext):
def build_extensions(self):
libfuzzer = get_libfuzzer_lib()
orig_libfuzzer = libfuzzer
orig_libfuzzer_name = os.path.basename(libfuzzer)
version = check_libfuzzer_version(libfuzzer)
if sys.platform == "darwin" and version != "up-to-date":
raise RuntimeError(too_old_error)
if version == "outdated-unrecoverable":
raise RuntimeError(too_old_error)
elif version == "outdated-recoverable":
sys.stderr.write("Your libFuzzer version is too old, but it's possible "
"to attempt an in-place upgrade. Trying that now.\n")
libfuzzer = upgrade_libfuzzer(libfuzzer)
if check_libfuzzer_version(libfuzzer) != "up-to-date":
sys.stderr.write("Upgrade failed.")
raise RuntimeError(too_old_error)
elif version != "up-to-date":
raise RuntimeError("Unexpected up-to-date status: " + version)
sys.stderr.write("Your libFuzzer is up-to-date.\n")
c_opts = []
l_opts = []
if sys.platform == "darwin":
darwin_opts = ["-stdlib=libc++", "-mmacosx-version-min=10.7"]
c_opts += darwin_opts
l_opts += darwin_opts
ct = self.compiler.compiler_type
if ct == "unix":
c_opts.append(cpp_flag(self.compiler))
for ext in self.extensions:
ext.define_macros = [("VERSION_INFO",
"'{}'".format(self.distribution.get_version())),
("ATHERIS_MODULE_NAME", ext.name.split(".")[1])]
ext.extra_compile_args = c_opts
if ext.name == "atheris.core_with_libfuzzer":
ext.extra_link_args = l_opts + [libfuzzer]
else:
ext.extra_link_args = l_opts
build_ext.build_extensions(self)
try:
self.deploy_file(libfuzzer, orig_libfuzzer_name)
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write("\n")
# Deploy versions of ASan and UBSan that have been merged with libFuzzer
asan_name = orig_libfuzzer.replace(".fuzzer_no_main-", ".asan-")
merged_asan_name = "asan_with_fuzzer.so"
self.merge_deploy_libfuzzer_sanitizer(
libfuzzer, asan_name, merged_asan_name,
"asan_preinit.cc.o asan_preinit.cpp.o")
ubsan_name = orig_libfuzzer.replace(".fuzzer_no_main-",
".ubsan_standalone-")
merged_ubsan_name = "ubsan_with_fuzzer.so"
self.merge_deploy_libfuzzer_sanitizer(
libfuzzer, ubsan_name, merged_ubsan_name,
"ubsan_init_standalone_preinit.cc.o ubsan_init_standalone_preinit.cpp.o"
)
ubsanxx_name = orig_libfuzzer.replace(".fuzzer_no_main-",
".ubsan_standalone_cxx-")
merged_ubsanxx_name = "ubsan_cxx_with_fuzzer.so"
self.merge_deploy_libfuzzer_sanitizer(
libfuzzer, ubsanxx_name, merged_ubsanxx_name,
"ubsan_init_standalone_preinit.cc.o ubsan_init_standalone_preinit.cpp.o"
)
def deploy_file(self, name, target_filename):
atheris = self.get_ext_fullpath("atheris")
dest_file = os.path.join(os.path.dirname(atheris), target_filename)
shutil.copy(name, dest_file)
def merge_libfuzzer_sanitizer(self, libfuzzer, sanitizer, strip_preinit):
current_path = os.path.dirname(os.path.realpath(__file__))
new_sanitizer = subprocess.check_output([
os.path.join(current_path, "setup_utils/merge_libfuzzer_sanitizer.sh"),
libfuzzer, sanitizer, strip_preinit
])
return new_sanitizer.strip().decode("utf-8")
def merge_deploy_libfuzzer_sanitizer(self, libfuzzer, lib_name,
merged_lib_name, preinit):
try:
merged_lib = self.merge_libfuzzer_sanitizer(libfuzzer, lib_name, preinit)
self.deploy_file(merged_lib, merged_lib_name)
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write("\n")
setup(
name="atheris",
version=__version__,
author="Ian Eldred Pudney",
author_email="puddles@google.com",
url="https://pypi.org/project/atheris/",
description="A coverage-guided fuzzer for Python and Python extensions.",
long_description=open("README.md", "r").read(),
long_description_content_type="text/markdown",
packages=["atheris"],
ext_modules=ext_modules,
setup_requires=["pybind11>=2.5.0"],
cmdclass={"build_ext": BuildExt},
zip_safe=False,
)
| true | true |
f71f94cc9ad067fb869ddb7fb431d9594d731530 | 361 | py | Python | fast_gui/core.py | asvcode/nbdev_test | e5bc1a1da28e7c5d87cbba0a207e016219644ee4 | [
"Apache-2.0"
] | 2 | 2020-06-04T08:38:00.000Z | 2020-07-15T15:42:13.000Z | fast_gui/core.py | asvcode/nbdev_test | e5bc1a1da28e7c5d87cbba0a207e016219644ee4 | [
"Apache-2.0"
] | 2 | 2021-09-28T03:23:17.000Z | 2022-02-26T08:14:46.000Z | fast_gui/core.py | asvcode/nbdev_test | e5bc1a1da28e7c5d87cbba0a207e016219644ee4 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['repeat_one']
# Cell
import ipywidgets as widgets
from fastai2.vision.all import*
from .dashboard_two import ds_choice
# Cell
def repeat_one(source, n=128):
"""Single image helper for displaying batch"""
return [get_image_files(ds_choice.source)[9]]*n | 27.769231 | 87 | 0.753463 |
__all__ = ['repeat_one']
import ipywidgets as widgets
from fastai2.vision.all import*
from .dashboard_two import ds_choice
def repeat_one(source, n=128):
return [get_image_files(ds_choice.source)[9]]*n | true | true |
f71f978be69f377fe3ca350b8404a8eecbbbb6b5 | 11,993 | py | Python | tests/contrib/falcon/test_suite.py | discord/dd-trace-py | 3f6bca078e751bf7459fd02b7aff7f96eff0eeb6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/contrib/falcon/test_suite.py | discord/dd-trace-py | 3f6bca078e751bf7459fd02b7aff7f96eff0eeb6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2022-02-16T09:35:37.000Z | 2022-03-04T16:48:45.000Z | tests/contrib/falcon/test_suite.py | goodspark/dd-trace-py | e2089c7b348e9d1a70e01f96927d85a643d6ae56 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2022-02-11T16:34:22.000Z | 2022-02-11T16:34:22.000Z | from ddtrace import config
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.constants import ERROR_TYPE
from ddtrace.contrib.falcon.patch import FALCON_VERSION
from ddtrace.ext import http as httpx
from tests.opentracer.utils import init_tracer
from tests.utils import assert_is_measured
from tests.utils import assert_span_http_status_code
class FalconTestMixin(object):
def make_test_call(self, url, method="get", expected_status_code=None, **kwargs):
func = getattr(self.client, "simulate_%s" % (method,))
out = func(url, **kwargs)
if FALCON_VERSION < (2, 0, 0):
if expected_status_code is not None:
assert out.status_code == expected_status_code
else:
if expected_status_code is not None:
assert out.status[:3] == str(expected_status_code)
return out
class FalconTestCase(FalconTestMixin):
"""Falcon mixin test case that includes all possible tests. If you need
to add new tests, add them here so that they're shared across manual
and automatic instrumentation.
"""
def test_404(self):
self.make_test_call("/fake_endpoint", expected_status_code=404)
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert span.resource == "GET 404"
assert_span_http_status_code(span, 404)
assert span.get_tag(httpx.URL) == "http://falconframework.org/fake_endpoint"
assert httpx.QUERY_STRING not in span.get_tags()
assert span.parent_id is None
assert span.error == 0
def test_exception(self):
try:
self.make_test_call("/exception")
except Exception:
pass
else:
if FALCON_VERSION < (3, 0, 0):
assert 0
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert span.resource == "GET tests.contrib.falcon.app.resources.ResourceException"
assert_span_http_status_code(span, 500)
assert span.get_tag(httpx.URL) == "http://falconframework.org/exception"
assert span.parent_id is None
assert span.error == 1
def test_200(self, query_string="", trace_query_string=False):
out = self.make_test_call("/200", expected_status_code=200, query_string=query_string)
assert out.content.decode("utf-8") == "Success"
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert span.resource == "GET tests.contrib.falcon.app.resources.Resource200"
assert_span_http_status_code(span, 200)
fqs = ("?" + query_string) if query_string and trace_query_string else ""
assert span.get_tag(httpx.URL) == "http://falconframework.org/200" + fqs
if config.falcon.trace_query_string:
assert span.get_tag(httpx.QUERY_STRING) == query_string
else:
assert httpx.QUERY_STRING not in span.get_tags()
assert span.parent_id is None
assert span.span_type == "web"
assert span.error == 0
def test_200_qs(self):
return self.test_200("foo=bar")
def test_200_multi_qs(self):
return self.test_200("foo=bar&foo=baz&x=y")
def test_200_qs_trace(self):
with self.override_http_config("falcon", dict(trace_query_string=True)):
return self.test_200("foo=bar", trace_query_string=True)
def test_200_multi_qs_trace(self):
with self.override_http_config("falcon", dict(trace_query_string=True)):
return self.test_200("foo=bar&foo=baz&x=y", trace_query_string=True)
def test_analytics_global_on_integration_default(self):
"""
When making a request
When an integration trace search is not event sample rate is not set and globally trace search is enabled
We expect the root span to have the appropriate tag
"""
with self.override_global_config(dict(analytics_enabled=True)):
out = self.make_test_call("/200", expected_status_code=200)
self.assertEqual(out.content.decode("utf-8"), "Success")
self.assert_structure(dict(name="falcon.request", metrics={ANALYTICS_SAMPLE_RATE_KEY: 1.0}))
def test_analytics_global_on_integration_on(self):
"""
When making a request
When an integration trace search is enabled and sample rate is set and globally trace search is enabled
We expect the root span to have the appropriate tag
"""
with self.override_global_config(dict(analytics_enabled=True)):
with self.override_config("falcon", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
out = self.make_test_call("/200", expected_status_code=200)
self.assertEqual(out.content.decode("utf-8"), "Success")
self.assert_structure(dict(name="falcon.request", metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}))
def test_analytics_global_off_integration_default(self):
"""
When making a request
When an integration trace search is not set and sample rate is set and globally trace search is disabled
We expect the root span to not include tag
"""
with self.override_global_config(dict(analytics_enabled=False)):
out = self.make_test_call("/200", expected_status_code=200)
self.assertEqual(out.content.decode("utf-8"), "Success")
root = self.get_root_span()
self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_global_off_integration_on(self):
"""
When making a request
When an integration trace search is enabled and sample rate is set and globally trace search is disabled
We expect the root span to have the appropriate tag
"""
with self.override_global_config(dict(analytics_enabled=False)):
with self.override_config("falcon", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
out = self.make_test_call("/200", expected_status_code=200)
self.assertEqual(out.content.decode("utf-8"), "Success")
self.assert_structure(dict(name="falcon.request", metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}))
def test_201(self):
out = self.make_test_call("/201", method="post", expected_status_code=201)
assert out.status_code == 201
assert out.content.decode("utf-8") == "Success"
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert span.resource == "POST tests.contrib.falcon.app.resources.Resource201"
assert_span_http_status_code(span, 201)
assert span.get_tag(httpx.URL) == "http://falconframework.org/201"
assert span.parent_id is None
assert span.error == 0
def test_500(self):
out = self.make_test_call("/500", expected_status_code=500)
assert out.content.decode("utf-8") == "Failure"
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert span.resource == "GET tests.contrib.falcon.app.resources.Resource500"
assert_span_http_status_code(span, 500)
assert span.get_tag(httpx.URL) == "http://falconframework.org/500"
assert span.parent_id is None
assert span.error == 1
def test_404_exception(self):
self.make_test_call("/not_found", expected_status_code=404)
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert span.resource == "GET tests.contrib.falcon.app.resources.ResourceNotFound"
assert_span_http_status_code(span, 404)
assert span.get_tag(httpx.URL) == "http://falconframework.org/not_found"
assert span.parent_id is None
assert span.error == 0
def test_404_exception_no_stacktracer(self):
# it should not have the stacktrace when a 404 exception is raised
self.make_test_call("/not_found", expected_status_code=404)
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert_span_http_status_code(span, 404)
assert span.get_tag(ERROR_TYPE) is None
assert span.parent_id is None
assert span.error == 0
def test_200_ot(self):
"""OpenTracing version of test_200."""
writer = self.tracer._writer
ot_tracer = init_tracer("my_svc", self.tracer)
ot_tracer._dd_tracer.configure(writer=writer)
with ot_tracer.start_active_span("ot_span"):
out = self.make_test_call("/200", expected_status_code=200)
assert out.content.decode("utf-8") == "Success"
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 2
ot_span, dd_span = traces[0]
# confirm the parenting
assert ot_span.parent_id is None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.service == "my_svc"
assert ot_span.resource == "ot_span"
assert_is_measured(dd_span)
assert dd_span.name == "falcon.request"
assert dd_span.service == self._service
assert dd_span.resource == "GET tests.contrib.falcon.app.resources.Resource200"
assert_span_http_status_code(dd_span, 200)
assert dd_span.get_tag(httpx.URL) == "http://falconframework.org/200"
assert dd_span.error == 0
def test_falcon_request_hook(self):
@config.falcon.hooks.on("request")
def on_falcon_request(span, request, response):
span.set_tag("my.custom", "tag")
out = self.make_test_call("/200", expected_status_code=200)
assert out.content.decode("utf-8") == "Success"
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert span.get_tag("http.request.headers.my_header") is None
assert span.get_tag("http.response.headers.my_response_header") is None
assert span.name == "falcon.request"
assert span.get_tag("my.custom") == "tag"
assert span.error == 0
def test_http_header_tracing(self):
with self.override_config("falcon", {}):
config.falcon.http.trace_headers(["my-header", "my-response-header"])
self.make_test_call("/200", headers={"my-header": "my_value"})
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert span.get_tag("http.request.headers.my-header") == "my_value"
assert span.get_tag("http.response.headers.my-response-header") == "my_response_value"
| 41.071918 | 117 | 0.650546 | from ddtrace import config
from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY
from ddtrace.constants import ERROR_TYPE
from ddtrace.contrib.falcon.patch import FALCON_VERSION
from ddtrace.ext import http as httpx
from tests.opentracer.utils import init_tracer
from tests.utils import assert_is_measured
from tests.utils import assert_span_http_status_code
class FalconTestMixin(object):
def make_test_call(self, url, method="get", expected_status_code=None, **kwargs):
func = getattr(self.client, "simulate_%s" % (method,))
out = func(url, **kwargs)
if FALCON_VERSION < (2, 0, 0):
if expected_status_code is not None:
assert out.status_code == expected_status_code
else:
if expected_status_code is not None:
assert out.status[:3] == str(expected_status_code)
return out
class FalconTestCase(FalconTestMixin):
def test_404(self):
self.make_test_call("/fake_endpoint", expected_status_code=404)
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert span.resource == "GET 404"
assert_span_http_status_code(span, 404)
assert span.get_tag(httpx.URL) == "http://falconframework.org/fake_endpoint"
assert httpx.QUERY_STRING not in span.get_tags()
assert span.parent_id is None
assert span.error == 0
def test_exception(self):
try:
self.make_test_call("/exception")
except Exception:
pass
else:
if FALCON_VERSION < (3, 0, 0):
assert 0
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert span.resource == "GET tests.contrib.falcon.app.resources.ResourceException"
assert_span_http_status_code(span, 500)
assert span.get_tag(httpx.URL) == "http://falconframework.org/exception"
assert span.parent_id is None
assert span.error == 1
def test_200(self, query_string="", trace_query_string=False):
out = self.make_test_call("/200", expected_status_code=200, query_string=query_string)
assert out.content.decode("utf-8") == "Success"
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert span.resource == "GET tests.contrib.falcon.app.resources.Resource200"
assert_span_http_status_code(span, 200)
fqs = ("?" + query_string) if query_string and trace_query_string else ""
assert span.get_tag(httpx.URL) == "http://falconframework.org/200" + fqs
if config.falcon.trace_query_string:
assert span.get_tag(httpx.QUERY_STRING) == query_string
else:
assert httpx.QUERY_STRING not in span.get_tags()
assert span.parent_id is None
assert span.span_type == "web"
assert span.error == 0
def test_200_qs(self):
return self.test_200("foo=bar")
def test_200_multi_qs(self):
return self.test_200("foo=bar&foo=baz&x=y")
def test_200_qs_trace(self):
with self.override_http_config("falcon", dict(trace_query_string=True)):
return self.test_200("foo=bar", trace_query_string=True)
def test_200_multi_qs_trace(self):
with self.override_http_config("falcon", dict(trace_query_string=True)):
return self.test_200("foo=bar&foo=baz&x=y", trace_query_string=True)
def test_analytics_global_on_integration_default(self):
with self.override_global_config(dict(analytics_enabled=True)):
out = self.make_test_call("/200", expected_status_code=200)
self.assertEqual(out.content.decode("utf-8"), "Success")
self.assert_structure(dict(name="falcon.request", metrics={ANALYTICS_SAMPLE_RATE_KEY: 1.0}))
def test_analytics_global_on_integration_on(self):
with self.override_global_config(dict(analytics_enabled=True)):
with self.override_config("falcon", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
out = self.make_test_call("/200", expected_status_code=200)
self.assertEqual(out.content.decode("utf-8"), "Success")
self.assert_structure(dict(name="falcon.request", metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}))
def test_analytics_global_off_integration_default(self):
with self.override_global_config(dict(analytics_enabled=False)):
out = self.make_test_call("/200", expected_status_code=200)
self.assertEqual(out.content.decode("utf-8"), "Success")
root = self.get_root_span()
self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY))
def test_analytics_global_off_integration_on(self):
with self.override_global_config(dict(analytics_enabled=False)):
with self.override_config("falcon", dict(analytics_enabled=True, analytics_sample_rate=0.5)):
out = self.make_test_call("/200", expected_status_code=200)
self.assertEqual(out.content.decode("utf-8"), "Success")
self.assert_structure(dict(name="falcon.request", metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}))
def test_201(self):
out = self.make_test_call("/201", method="post", expected_status_code=201)
assert out.status_code == 201
assert out.content.decode("utf-8") == "Success"
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert span.resource == "POST tests.contrib.falcon.app.resources.Resource201"
assert_span_http_status_code(span, 201)
assert span.get_tag(httpx.URL) == "http://falconframework.org/201"
assert span.parent_id is None
assert span.error == 0
def test_500(self):
out = self.make_test_call("/500", expected_status_code=500)
assert out.content.decode("utf-8") == "Failure"
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert span.resource == "GET tests.contrib.falcon.app.resources.Resource500"
assert_span_http_status_code(span, 500)
assert span.get_tag(httpx.URL) == "http://falconframework.org/500"
assert span.parent_id is None
assert span.error == 1
def test_404_exception(self):
self.make_test_call("/not_found", expected_status_code=404)
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert span.resource == "GET tests.contrib.falcon.app.resources.ResourceNotFound"
assert_span_http_status_code(span, 404)
assert span.get_tag(httpx.URL) == "http://falconframework.org/not_found"
assert span.parent_id is None
assert span.error == 0
def test_404_exception_no_stacktracer(self):
self.make_test_call("/not_found", expected_status_code=404)
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert_is_measured(span)
assert span.name == "falcon.request"
assert span.service == self._service
assert_span_http_status_code(span, 404)
assert span.get_tag(ERROR_TYPE) is None
assert span.parent_id is None
assert span.error == 0
def test_200_ot(self):
writer = self.tracer._writer
ot_tracer = init_tracer("my_svc", self.tracer)
ot_tracer._dd_tracer.configure(writer=writer)
with ot_tracer.start_active_span("ot_span"):
out = self.make_test_call("/200", expected_status_code=200)
assert out.content.decode("utf-8") == "Success"
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 2
ot_span, dd_span = traces[0]
assert ot_span.parent_id is None
assert dd_span.parent_id == ot_span.span_id
assert ot_span.service == "my_svc"
assert ot_span.resource == "ot_span"
assert_is_measured(dd_span)
assert dd_span.name == "falcon.request"
assert dd_span.service == self._service
assert dd_span.resource == "GET tests.contrib.falcon.app.resources.Resource200"
assert_span_http_status_code(dd_span, 200)
assert dd_span.get_tag(httpx.URL) == "http://falconframework.org/200"
assert dd_span.error == 0
def test_falcon_request_hook(self):
@config.falcon.hooks.on("request")
def on_falcon_request(span, request, response):
span.set_tag("my.custom", "tag")
out = self.make_test_call("/200", expected_status_code=200)
assert out.content.decode("utf-8") == "Success"
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert span.get_tag("http.request.headers.my_header") is None
assert span.get_tag("http.response.headers.my_response_header") is None
assert span.name == "falcon.request"
assert span.get_tag("my.custom") == "tag"
assert span.error == 0
def test_http_header_tracing(self):
with self.override_config("falcon", {}):
config.falcon.http.trace_headers(["my-header", "my-response-header"])
self.make_test_call("/200", headers={"my-header": "my_value"})
traces = self.tracer.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
span = traces[0][0]
assert span.get_tag("http.request.headers.my-header") == "my_value"
assert span.get_tag("http.response.headers.my-response-header") == "my_response_value"
| true | true |
f71f98aa12c458b74228cfac53a00bd4f6d9a013 | 11,247 | py | Python | tensorflow/python/ops/standard_ops.py | noahl/tensorflow | b95d8cce7323d328565378e0d60d72603393f87d | [
"Apache-2.0"
] | 5 | 2018-09-22T20:16:46.000Z | 2022-02-28T10:35:19.000Z | tensorflow/python/ops/standard_ops.py | noahl/tensorflow | b95d8cce7323d328565378e0d60d72603393f87d | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/standard_ops.py | noahl/tensorflow | b95d8cce7323d328565378e0d60d72603393f87d | [
"Apache-2.0"
] | 2 | 2019-08-14T09:04:37.000Z | 2022-02-02T20:08:02.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unused-import
"""Import names of Tensor Flow standard Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
# pylint: disable=g-bad-import-order
# Imports the following modules so that @RegisterGradient get executed.
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import cudnn_rnn_grad
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import manip_grad
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import sparse_grad
from tensorflow.python.ops import spectral_grad
from tensorflow.python.ops import state_grad
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.util.all_util import remove_undocumented
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.array_ops import *
from tensorflow.python.ops.check_ops import *
from tensorflow.python.ops.clip_ops import *
from tensorflow.python.ops.special_math_ops import *
# TODO(vrv): Switch to import * once we're okay with exposing the module.
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.control_flow_ops import case
from tensorflow.python.ops.control_flow_ops import cond
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import no_op
from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin
# pylint: enable=redefined-builtin
from tensorflow.python.ops.control_flow_ops import while_loop
from tensorflow.python.ops.data_flow_ops import *
from tensorflow.python.ops.functional_ops import *
from tensorflow.python.ops.gradients import *
from tensorflow.python.ops.histogram_ops import *
from tensorflow.python.ops.init_ops import *
from tensorflow.python.ops.io_ops import *
from tensorflow.python.ops.linalg_ops import *
from tensorflow.python.ops.logging_ops import Print
from tensorflow.python.ops.logging_ops import get_summary_op
from tensorflow.python.ops.logging_ops import timestamp
from tensorflow.python.ops.lookup_ops import initialize_all_tables
from tensorflow.python.ops.lookup_ops import tables_initializer
from tensorflow.python.ops.manip_ops import *
from tensorflow.python.ops.math_ops import *
from tensorflow.python.ops.numerics import *
from tensorflow.python.ops.parsing_ops import *
from tensorflow.python.ops.partitioned_variables import *
from tensorflow.python.ops.random_ops import *
from tensorflow.python.ops.script_ops import py_func
from tensorflow.python.ops.session_ops import *
from tensorflow.python.ops.sparse_ops import *
from tensorflow.python.ops.state_ops import assign
from tensorflow.python.ops.state_ops import assign_add
from tensorflow.python.ops.state_ops import assign_sub
from tensorflow.python.ops.state_ops import count_up_to
from tensorflow.python.ops.state_ops import scatter_add
from tensorflow.python.ops.state_ops import scatter_div
from tensorflow.python.ops.state_ops import scatter_mul
from tensorflow.python.ops.state_ops import scatter_sub
from tensorflow.python.ops.state_ops import scatter_min
from tensorflow.python.ops.state_ops import scatter_max
from tensorflow.python.ops.state_ops import scatter_update
from tensorflow.python.ops.state_ops import scatter_nd_add
from tensorflow.python.ops.state_ops import scatter_nd_sub
# TODO(simister): Re-enable once binary size increase due to scatter_nd
# ops is under control.
# from tensorflow.python.ops.state_ops import scatter_nd_mul
# from tensorflow.python.ops.state_ops import scatter_nd_div
from tensorflow.python.ops.state_ops import scatter_nd_update
from tensorflow.python.ops.string_ops import *
from tensorflow.python.ops.template import *
from tensorflow.python.ops.tensor_array_ops import *
from tensorflow.python.ops.variable_scope import *
from tensorflow.python.ops.variables import *
# pylint: enable=wildcard-import
# pylint: enable=g-bad-import-order
#### For use in remove_undocumented below:
from tensorflow.python.framework import constant_op as _constant_op
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import check_ops as _check_ops
from tensorflow.python.ops import clip_ops as _clip_ops
from tensorflow.python.ops import confusion_matrix as _confusion_matrix
from tensorflow.python.ops import control_flow_ops as _control_flow_ops
from tensorflow.python.ops import data_flow_ops as _data_flow_ops
from tensorflow.python.ops import functional_ops as _functional_ops
from tensorflow.python.ops import gradients as _gradients
from tensorflow.python.ops import histogram_ops as _histogram_ops
from tensorflow.python.ops import init_ops as _init_ops
from tensorflow.python.ops import io_ops as _io_ops
from tensorflow.python.ops import linalg_ops as _linalg_ops
from tensorflow.python.ops import logging_ops as _logging_ops
from tensorflow.python.ops import manip_ops as _manip_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.ops import numerics as _numerics
from tensorflow.python.ops import parsing_ops as _parsing_ops
from tensorflow.python.ops import partitioned_variables as _partitioned_variables
from tensorflow.python.ops import random_ops as _random_ops
from tensorflow.python.ops import script_ops as _script_ops
from tensorflow.python.ops import session_ops as _session_ops
from tensorflow.python.ops import sparse_ops as _sparse_ops
from tensorflow.python.ops import special_math_ops as _special_math_ops
from tensorflow.python.ops import state_ops as _state_ops
from tensorflow.python.ops import string_ops as _string_ops
from tensorflow.python.ops import template as _template
from tensorflow.python.ops import tensor_array_ops as _tensor_array_ops
from tensorflow.python.ops import variable_scope as _variable_scope
from tensorflow.python.ops import variables as _variables
_allowed_symbols_math_ops = [
# TODO(drpng): decide if we want to reference these in the documentation.
"reduced_shape",
"sparse_segment_mean_grad",
"sparse_segment_sqrt_n_grad",
# Legacy: will be removed.
"arg_max",
"arg_min",
"lin_space",
"sparse_matmul", # Use tf.matmul.
# Deprecated (see versions.h):
"batch_fft",
"batch_fft2d",
"batch_fft3d",
"batch_ifft",
"batch_ifft2d",
"batch_ifft3d",
"mul", # use tf.multiply instead.
"neg", # use tf.negative instead.
"sub", # use tf.subtract instead.
# These are documented in nn.
# We are not importing nn because it would create a circular dependency.
"sigmoid",
"log_sigmoid",
"tanh",
]
_allowed_symbols_array_ops = [
# TODO(drpng): make sure they are documented.
# Scalars:
"NEW_AXIS",
"SHRINK_AXIS",
"newaxis",
# Documented in training.py.
# I do not import train, to avoid circular dependencies.
# TODO(drpng): this is defined in gen_array_ops, clearly not the right
# place.
"stop_gradient",
# See gen_docs_combined for tf.copy documentation.
"copy",
## TODO(drpng): make them inaccessible directly.
## TODO(drpng): Below, to-doc means that we need to find an appropriate
## documentation section to reference.
## For re-exporting to tf.*:
"constant",
"edit_distance", # to-doc
# From gen_array_ops:
"copy_host", # to-doc
"immutable_const", # to-doc
"invert_permutation", # to-doc
"quantize_and_dequantize", # to-doc
# TODO(drpng): legacy symbols to be removed.
"batch_matrix_diag",
"batch_matrix_band_part",
"batch_matrix_diag_part",
"batch_matrix_set_diag",
]
_allowed_symbols_partitioned_variables = [
"PartitionedVariable", # Requires doc link.
# Legacy.
"create_partitioned_variables",
"variable_axis_size_partitioner",
"min_max_variable_partitioner",
"fixed_size_partitioner",
]
_allowed_symbols_control_flow_ops = [
# TODO(drpng): Find a place in the documentation to reference these or
# remove.
"control_trigger",
"loop_cond",
"merge",
"switch",
]
_allowed_symbols_functional_ops = [
"nest", # Used by legacy code.
]
_allowed_symbols_gradients = [
# Documented in training.py:
# Not importing training.py to avoid complex graph dependencies.
"AggregationMethod",
"GradientTape",
"custom_gradient",
"gradients", # tf.gradients = gradients.gradients
"hessians",
]
_allowed_symbols_clip_ops = [
# Documented in training.py:
# Not importing training.py to avoid complex graph dependencies.
"clip_by_average_norm",
"clip_by_global_norm",
"clip_by_norm",
"clip_by_value",
"global_norm",
]
_allowed_symbols_logging_ops = [
# Documented in training.py.
# We are not importing training.py to avoid complex dependencies.
"audio_summary",
"histogram_summary",
"image_summary",
"merge_all_summaries",
"merge_summary",
"scalar_summary",
# TODO(drpng): link in training.py if it should be documented.
"get_summary_op",
]
_allowed_symbols_variable_scope_ops = [
"get_local_variable", # Documented in framework package.
]
_allowed_symbols_misc = [
"deserialize_many_sparse",
"parse_single_sequence_example",
"serialize_many_sparse",
"serialize_sparse",
"confusion_matrix",
]
_allowed_symbols = (_allowed_symbols_array_ops +
_allowed_symbols_clip_ops +
_allowed_symbols_control_flow_ops +
_allowed_symbols_functional_ops +
_allowed_symbols_gradients +
_allowed_symbols_logging_ops +
_allowed_symbols_math_ops +
_allowed_symbols_variable_scope_ops +
_allowed_symbols_misc +
_allowed_symbols_partitioned_variables)
remove_undocumented(__name__, _allowed_symbols, [
_sys.modules[__name__],
_array_ops,
_check_ops,
_clip_ops,
_confusion_matrix,
_control_flow_ops,
_constant_op,
_data_flow_ops,
_functional_ops,
_gradients,
_histogram_ops,
_init_ops,
_io_ops,
_linalg_ops,
_logging_ops,
_manip_ops,
_math_ops,
_numerics,
_parsing_ops,
_partitioned_variables,
_random_ops,
_script_ops,
_session_ops,
_sparse_ops,
_special_math_ops,
_state_ops,
_string_ops,
_template,
_tensor_array_ops,
_variable_scope,
_variables,
])
| 36.163987 | 93 | 0.767049 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys as _sys
from tensorflow.python.ops import array_grad
from tensorflow.python.ops import cudnn_rnn_grad
from tensorflow.python.ops import data_flow_grad
from tensorflow.python.ops import manip_grad
from tensorflow.python.ops import math_grad
from tensorflow.python.ops import sparse_grad
from tensorflow.python.ops import spectral_grad
from tensorflow.python.ops import state_grad
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.ops.array_ops import *
from tensorflow.python.ops.check_ops import *
from tensorflow.python.ops.clip_ops import *
from tensorflow.python.ops.special_math_ops import *
from tensorflow.python.ops.confusion_matrix import confusion_matrix
from tensorflow.python.ops.control_flow_ops import Assert
from tensorflow.python.ops.control_flow_ops import case
from tensorflow.python.ops.control_flow_ops import cond
from tensorflow.python.ops.control_flow_ops import group
from tensorflow.python.ops.control_flow_ops import no_op
from tensorflow.python.ops.control_flow_ops import tuple # pylint: disable=redefined-builtin
# pylint: enable=redefined-builtin
from tensorflow.python.ops.control_flow_ops import while_loop
from tensorflow.python.ops.data_flow_ops import *
from tensorflow.python.ops.functional_ops import *
from tensorflow.python.ops.gradients import *
from tensorflow.python.ops.histogram_ops import *
from tensorflow.python.ops.init_ops import *
from tensorflow.python.ops.io_ops import *
from tensorflow.python.ops.linalg_ops import *
from tensorflow.python.ops.logging_ops import Print
from tensorflow.python.ops.logging_ops import get_summary_op
from tensorflow.python.ops.logging_ops import timestamp
from tensorflow.python.ops.lookup_ops import initialize_all_tables
from tensorflow.python.ops.lookup_ops import tables_initializer
from tensorflow.python.ops.manip_ops import *
from tensorflow.python.ops.math_ops import *
from tensorflow.python.ops.numerics import *
from tensorflow.python.ops.parsing_ops import *
from tensorflow.python.ops.partitioned_variables import *
from tensorflow.python.ops.random_ops import *
from tensorflow.python.ops.script_ops import py_func
from tensorflow.python.ops.session_ops import *
from tensorflow.python.ops.sparse_ops import *
from tensorflow.python.ops.state_ops import assign
from tensorflow.python.ops.state_ops import assign_add
from tensorflow.python.ops.state_ops import assign_sub
from tensorflow.python.ops.state_ops import count_up_to
from tensorflow.python.ops.state_ops import scatter_add
from tensorflow.python.ops.state_ops import scatter_div
from tensorflow.python.ops.state_ops import scatter_mul
from tensorflow.python.ops.state_ops import scatter_sub
from tensorflow.python.ops.state_ops import scatter_min
from tensorflow.python.ops.state_ops import scatter_max
from tensorflow.python.ops.state_ops import scatter_update
from tensorflow.python.ops.state_ops import scatter_nd_add
from tensorflow.python.ops.state_ops import scatter_nd_sub
# TODO(simister): Re-enable once binary size increase due to scatter_nd
# ops is under control.
# from tensorflow.python.ops.state_ops import scatter_nd_mul
# from tensorflow.python.ops.state_ops import scatter_nd_div
from tensorflow.python.ops.state_ops import scatter_nd_update
from tensorflow.python.ops.string_ops import *
from tensorflow.python.ops.template import *
from tensorflow.python.ops.tensor_array_ops import *
from tensorflow.python.ops.variable_scope import *
from tensorflow.python.ops.variables import *
# pylint: enable=wildcard-import
# pylint: enable=g-bad-import-order
#### For use in remove_undocumented below:
from tensorflow.python.framework import constant_op as _constant_op
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import check_ops as _check_ops
from tensorflow.python.ops import clip_ops as _clip_ops
from tensorflow.python.ops import confusion_matrix as _confusion_matrix
from tensorflow.python.ops import control_flow_ops as _control_flow_ops
from tensorflow.python.ops import data_flow_ops as _data_flow_ops
from tensorflow.python.ops import functional_ops as _functional_ops
from tensorflow.python.ops import gradients as _gradients
from tensorflow.python.ops import histogram_ops as _histogram_ops
from tensorflow.python.ops import init_ops as _init_ops
from tensorflow.python.ops import io_ops as _io_ops
from tensorflow.python.ops import linalg_ops as _linalg_ops
from tensorflow.python.ops import logging_ops as _logging_ops
from tensorflow.python.ops import manip_ops as _manip_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.ops import numerics as _numerics
from tensorflow.python.ops import parsing_ops as _parsing_ops
from tensorflow.python.ops import partitioned_variables as _partitioned_variables
from tensorflow.python.ops import random_ops as _random_ops
from tensorflow.python.ops import script_ops as _script_ops
from tensorflow.python.ops import session_ops as _session_ops
from tensorflow.python.ops import sparse_ops as _sparse_ops
from tensorflow.python.ops import special_math_ops as _special_math_ops
from tensorflow.python.ops import state_ops as _state_ops
from tensorflow.python.ops import string_ops as _string_ops
from tensorflow.python.ops import template as _template
from tensorflow.python.ops import tensor_array_ops as _tensor_array_ops
from tensorflow.python.ops import variable_scope as _variable_scope
from tensorflow.python.ops import variables as _variables
_allowed_symbols_math_ops = [
# TODO(drpng): decide if we want to reference these in the documentation.
"reduced_shape",
"sparse_segment_mean_grad",
"sparse_segment_sqrt_n_grad",
# Legacy: will be removed.
"arg_max",
"arg_min",
"lin_space",
"sparse_matmul", # Use tf.matmul.
# Deprecated (see versions.h):
"batch_fft",
"batch_fft2d",
"batch_fft3d",
"batch_ifft",
"batch_ifft2d",
"batch_ifft3d",
"mul", # use tf.multiply instead.
"neg", # use tf.negative instead.
"sub", # use tf.subtract instead.
# These are documented in nn.
# We are not importing nn because it would create a circular dependency.
"sigmoid",
"log_sigmoid",
"tanh",
]
_allowed_symbols_array_ops = [
# TODO(drpng): make sure they are documented.
# Scalars:
"NEW_AXIS",
"SHRINK_AXIS",
"newaxis",
# Documented in training.py.
# I do not import train, to avoid circular dependencies.
# TODO(drpng): this is defined in gen_array_ops, clearly not the right
# place.
"stop_gradient",
# See gen_docs_combined for tf.copy documentation.
"copy",
## TODO(drpng): make them inaccessible directly.
## TODO(drpng): Below, to-doc means that we need to find an appropriate
## documentation section to reference.
## For re-exporting to tf.*:
"constant",
"edit_distance", # to-doc
# From gen_array_ops:
"copy_host", # to-doc
"immutable_const", # to-doc
"invert_permutation", # to-doc
"quantize_and_dequantize", # to-doc
# TODO(drpng): legacy symbols to be removed.
"batch_matrix_diag",
"batch_matrix_band_part",
"batch_matrix_diag_part",
"batch_matrix_set_diag",
]
_allowed_symbols_partitioned_variables = [
"PartitionedVariable", # Requires doc link.
# Legacy.
"create_partitioned_variables",
"variable_axis_size_partitioner",
"min_max_variable_partitioner",
"fixed_size_partitioner",
]
_allowed_symbols_control_flow_ops = [
# TODO(drpng): Find a place in the documentation to reference these or
# remove.
"control_trigger",
"loop_cond",
"merge",
"switch",
]
_allowed_symbols_functional_ops = [
"nest", # Used by legacy code.
]
_allowed_symbols_gradients = [
# Documented in training.py:
# Not importing training.py to avoid complex graph dependencies.
"AggregationMethod",
"GradientTape",
"custom_gradient",
"gradients", # tf.gradients = gradients.gradients
"hessians",
]
_allowed_symbols_clip_ops = [
# Documented in training.py:
# Not importing training.py to avoid complex graph dependencies.
"clip_by_average_norm",
"clip_by_global_norm",
"clip_by_norm",
"clip_by_value",
"global_norm",
]
_allowed_symbols_logging_ops = [
# Documented in training.py.
# We are not importing training.py to avoid complex dependencies.
"audio_summary",
"histogram_summary",
"image_summary",
"merge_all_summaries",
"merge_summary",
"scalar_summary",
# TODO(drpng): link in training.py if it should be documented.
"get_summary_op",
]
_allowed_symbols_variable_scope_ops = [
"get_local_variable", # Documented in framework package.
]
_allowed_symbols_misc = [
"deserialize_many_sparse",
"parse_single_sequence_example",
"serialize_many_sparse",
"serialize_sparse",
"confusion_matrix",
]
_allowed_symbols = (_allowed_symbols_array_ops +
_allowed_symbols_clip_ops +
_allowed_symbols_control_flow_ops +
_allowed_symbols_functional_ops +
_allowed_symbols_gradients +
_allowed_symbols_logging_ops +
_allowed_symbols_math_ops +
_allowed_symbols_variable_scope_ops +
_allowed_symbols_misc +
_allowed_symbols_partitioned_variables)
remove_undocumented(__name__, _allowed_symbols, [
_sys.modules[__name__],
_array_ops,
_check_ops,
_clip_ops,
_confusion_matrix,
_control_flow_ops,
_constant_op,
_data_flow_ops,
_functional_ops,
_gradients,
_histogram_ops,
_init_ops,
_io_ops,
_linalg_ops,
_logging_ops,
_manip_ops,
_math_ops,
_numerics,
_parsing_ops,
_partitioned_variables,
_random_ops,
_script_ops,
_session_ops,
_sparse_ops,
_special_math_ops,
_state_ops,
_string_ops,
_template,
_tensor_array_ops,
_variable_scope,
_variables,
])
| true | true |
f71f992d007cb05563bc79a20eaf79c8910f3047 | 7,240 | py | Python | run_scripts/FreeSurfer/nipype_reconall_with_tracker.py | neurodatascience/watts_up_compute | 1ed41e62690f99f699b44180208689cc19616bb7 | [
"MIT"
] | null | null | null | run_scripts/FreeSurfer/nipype_reconall_with_tracker.py | neurodatascience/watts_up_compute | 1ed41e62690f99f699b44180208689cc19616bb7 | [
"MIT"
] | null | null | null | run_scripts/FreeSurfer/nipype_reconall_with_tracker.py | neurodatascience/watts_up_compute | 1ed41e62690f99f699b44180208689cc19616bb7 | [
"MIT"
] | null | null | null | # Import modules
import os
import sys
from os.path import join as opj
import pandas as pd
import time
from nipype.interfaces.freesurfer import ReconAll
from nipype.interfaces.utility import IdentityInterface
from nipype.pipeline.engine import Workflow, Node
from pypapi import events, papi_high as high
import argparse
# Add paths (singularity should see these)
# FastSurfer and carbon trackers are in the mounted dir as these repos keep getting updated.
# TODO replace this with setup.py once the dependencis become stable
# sys.path.append('../../../experiment-impact-tracker/')
# sys.path.append('../../../codecarbon/')
from experiment_impact_tracker.compute_tracker import ImpactTracker
from codecarbon import EmissionsTracker, OfflineEmissionsTracker
def get_reconall(recon_directive,fs_folder):
# This node represents the actual recon-all command
reconall = Node(ReconAll(directive=recon_directive,
flags='-nuintensitycor -3T',
subjects_dir=fs_folder),
name="reconall")
return reconall
# This function returns for each subject the path to struct.nii.gz
def pathfinder(subject, foldername, filename):
from os.path import join as opj
struct_path = opj(foldername, subject, filename)
return struct_path
def main():
# setup
exp_start_time = time.time()
# argparse
parser = argparse.ArgumentParser(description='Script to run freesurfer reconall with nipype and track compute costs', epilog='$Id: fast_surfer_cnn, v 1.0 2019/09/30$')
# Data
parser.add_argument('--experiment_dir', dest='experiment_dir', help='path to directory to store freesurfer derived data.')
parser.add_argument('--data_dir', help="path to input data", default='/neurohub/ukbb/imaging/')
parser.add_argument('--subject_id', dest='subject_id', help='subject_id')
parser.add_argument('--T1_identifier', help='T1 identifier string relateive to the subject directory')
# FreeSurfer
parser.add_argument('--recon_directive', dest='recon_directive', help='recon_directive (autorecon 1, 2, or 3)', default='1') #MTL
# Trackers
parser.add_argument('--tracker_log_dir', dest='tracker_log_dir',
help="log dir for experiment impact tracker",
type=str, default='./tracker_logs/')
parser.add_argument('--geo_loc', dest='geo_loc',
help="(lat,log) coords for experiment impact tracker",
type=str, default='45.4972159,-73.6103642') #MTL Beluga
parser.add_argument('--CC_offline',
help="Run CC in offline mode",
action='store_true')
parser.add_argument('--TZ', dest='TZ',
help="TimeZone",
type=str, default='America/New_York')
parser.add_argument('--iso_code', dest='iso_code',
help="Country ISO code",
type=str, default='USA')
# PAPI
parser.add_argument('--count_FLOPs', dest='count_FLOPs',help="Count FLOPs using PAPI",action='store_true')
args = parser.parse_args()
# Data
experiment_dir = args.experiment_dir
data_dir = args.data_dir
subject_id = args.subject_id
T1_identifier = args.T1_identifier
# FreeSurfer
recon_directive = args.recon_directive
# FLOPs
count_FLOPs = args.count_FLOPs
# Trackers
tracker_log_dir = args.tracker_log_dir
geo_loc = args.geo_loc
CC_offline = args.CC_offline
TZ = args.TZ
iso_code = args.iso_code
print(f'Using offline mode for CC tracker: {CC_offline}')
if CC_offline:
print(f'Using {TZ} timezone and {iso_code} country iso code')
print(f'Starting subject: {subject_id}')
# Set up the trackers
log_dir = '{}/{}/'.format(tracker_log_dir,subject_id)
log_dir_EIT = f'{log_dir}/EIT/'
log_dir_CC = f'{log_dir}/CC/'
for d in [log_dir_EIT,log_dir_CC]:
if not os.path.exists(d):
os.makedirs(d)
# Use specified geo location for the HPC
ly,lx = float(geo_loc.split(',')[0]), float(geo_loc.split(',')[1])
coords = (ly,lx)
print(f'Using geographical coordinates (long,lat): {coords}')
# EIT tracker
tracker_EIT = ImpactTracker(log_dir_EIT,coords)
tracker_EIT.launch_impact_monitor()
# CodeCarbon tracker
os.environ['TZ']= TZ
if CC_offline:
tracker_CC = OfflineEmissionsTracker(output_dir=log_dir_CC, country_iso_code=iso_code)
else:
tracker_CC = EmissionsTracker(output_dir=log_dir_CC)
tracker_CC.start()
if count_FLOPs:
print('Counting flops using PAPI')
flop_csv = tracker_log_dir + 'compute_costs_flop.csv'
flop_df = pd.DataFrame(columns=['task','start_time','duration','DP'])
# Start FS processing for a given subject
subject_list = [subject_id]
fs_folder = opj(experiment_dir, 'freesurfer') # location of freesurfer folder
# Create the output folder - FreeSurfer can only run if this folder exists
os.system('mkdir -p %s' % fs_folder)
# Specify recon workflow stages
if recon_directive == 'all':
recon_directives = ['autorecon1','autorecon2','autorecon3']
else:
recon_directives = [recon_directive]
for r, recon_directive in enumerate(recon_directives):
print('\nStarting stage: {}'.format(recon_directive))
# Create the pipeline that runs the recon-all command
reconflow = Workflow(name="reconflow")
reconflow.base_dir = opj(experiment_dir, 'workingdir_reconflow')
# Some magical stuff happens here (not important for now)
infosource = Node(IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = ('subject_id', subject_list)
# Specify recon-all stage based on recon-directive
reconall = get_reconall(recon_directive, fs_folder)
# This section connects all the nodes of the pipeline to each other
reconflow.connect([(infosource, reconall, [('subject_id', 'subject_id')]),
(infosource, reconall, [(('subject_id', pathfinder,
data_dir, T1_identifier),
'T1_files')]),
])
if count_FLOPs:
# start flop counter
start_time = time.time()
high.start_counters([events.PAPI_DP_OPS,]) #default: PAPI_FP_OPS
# This command runs the recon-all pipeline in parallel (using n_procs cores)
# reconflow.run('MultiProc', plugin_args={'n_procs': 4})
reconflow.run()
if count_FLOPs:
# stop flop counter
DP = high.stop_counters()[0]
end_time = time.time()
duration = end_time - start_time
print('Duration: {}, Flops: {}'.format(duration, DP))
flop_df.loc[r] = [recon_directive,start_time, duration, DP]
## code-carbon tracker
tracker_CC.stop()
if count_FLOPs:
flop_df.to_csv(flop_csv)
if __name__=='__main__':
main()
| 36.938776 | 171 | 0.642403 |
import os
import sys
from os.path import join as opj
import pandas as pd
import time
from nipype.interfaces.freesurfer import ReconAll
from nipype.interfaces.utility import IdentityInterface
from nipype.pipeline.engine import Workflow, Node
from pypapi import events, papi_high as high
import argparse
from experiment_impact_tracker.compute_tracker import ImpactTracker
from codecarbon import EmissionsTracker, OfflineEmissionsTracker
def get_reconall(recon_directive,fs_folder):
reconall = Node(ReconAll(directive=recon_directive,
flags='-nuintensitycor -3T',
subjects_dir=fs_folder),
name="reconall")
return reconall
def pathfinder(subject, foldername, filename):
from os.path import join as opj
struct_path = opj(foldername, subject, filename)
return struct_path
def main():
exp_start_time = time.time()
parser = argparse.ArgumentParser(description='Script to run freesurfer reconall with nipype and track compute costs', epilog='$Id: fast_surfer_cnn, v 1.0 2019/09/30$')
parser.add_argument('--experiment_dir', dest='experiment_dir', help='path to directory to store freesurfer derived data.')
parser.add_argument('--data_dir', help="path to input data", default='/neurohub/ukbb/imaging/')
parser.add_argument('--subject_id', dest='subject_id', help='subject_id')
parser.add_argument('--T1_identifier', help='T1 identifier string relateive to the subject directory')
parser.add_argument('--recon_directive', dest='recon_directive', help='recon_directive (autorecon 1, 2, or 3)', default='1')
parser.add_argument('--tracker_log_dir', dest='tracker_log_dir',
help="log dir for experiment impact tracker",
type=str, default='./tracker_logs/')
parser.add_argument('--geo_loc', dest='geo_loc',
help="(lat,log) coords for experiment impact tracker",
type=str, default='45.4972159,-73.6103642')
parser.add_argument('--CC_offline',
help="Run CC in offline mode",
action='store_true')
parser.add_argument('--TZ', dest='TZ',
help="TimeZone",
type=str, default='America/New_York')
parser.add_argument('--iso_code', dest='iso_code',
help="Country ISO code",
type=str, default='USA')
parser.add_argument('--count_FLOPs', dest='count_FLOPs',help="Count FLOPs using PAPI",action='store_true')
args = parser.parse_args()
experiment_dir = args.experiment_dir
data_dir = args.data_dir
subject_id = args.subject_id
T1_identifier = args.T1_identifier
recon_directive = args.recon_directive
count_FLOPs = args.count_FLOPs
tracker_log_dir = args.tracker_log_dir
geo_loc = args.geo_loc
CC_offline = args.CC_offline
TZ = args.TZ
iso_code = args.iso_code
print(f'Using offline mode for CC tracker: {CC_offline}')
if CC_offline:
print(f'Using {TZ} timezone and {iso_code} country iso code')
print(f'Starting subject: {subject_id}')
log_dir = '{}/{}/'.format(tracker_log_dir,subject_id)
log_dir_EIT = f'{log_dir}/EIT/'
log_dir_CC = f'{log_dir}/CC/'
for d in [log_dir_EIT,log_dir_CC]:
if not os.path.exists(d):
os.makedirs(d)
ly,lx = float(geo_loc.split(',')[0]), float(geo_loc.split(',')[1])
coords = (ly,lx)
print(f'Using geographical coordinates (long,lat): {coords}')
tracker_EIT = ImpactTracker(log_dir_EIT,coords)
tracker_EIT.launch_impact_monitor()
os.environ['TZ']= TZ
if CC_offline:
tracker_CC = OfflineEmissionsTracker(output_dir=log_dir_CC, country_iso_code=iso_code)
else:
tracker_CC = EmissionsTracker(output_dir=log_dir_CC)
tracker_CC.start()
if count_FLOPs:
print('Counting flops using PAPI')
flop_csv = tracker_log_dir + 'compute_costs_flop.csv'
flop_df = pd.DataFrame(columns=['task','start_time','duration','DP'])
subject_list = [subject_id]
fs_folder = opj(experiment_dir, 'freesurfer')
os.system('mkdir -p %s' % fs_folder)
if recon_directive == 'all':
recon_directives = ['autorecon1','autorecon2','autorecon3']
else:
recon_directives = [recon_directive]
for r, recon_directive in enumerate(recon_directives):
print('\nStarting stage: {}'.format(recon_directive))
reconflow = Workflow(name="reconflow")
reconflow.base_dir = opj(experiment_dir, 'workingdir_reconflow')
infosource = Node(IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = ('subject_id', subject_list)
reconall = get_reconall(recon_directive, fs_folder)
reconflow.connect([(infosource, reconall, [('subject_id', 'subject_id')]),
(infosource, reconall, [(('subject_id', pathfinder,
data_dir, T1_identifier),
'T1_files')]),
])
if count_FLOPs:
start_time = time.time()
high.start_counters([events.PAPI_DP_OPS,])
reconflow.run()
if count_FLOPs:
DP = high.stop_counters()[0]
end_time = time.time()
duration = end_time - start_time
print('Duration: {}, Flops: {}'.format(duration, DP))
flop_df.loc[r] = [recon_directive,start_time, duration, DP]
)
if count_FLOPs:
flop_df.to_csv(flop_csv)
if __name__=='__main__':
main()
| true | true |
f71f9baff849e1b3e85a4e00a676e11b093d2eb9 | 7,777 | py | Python | examples/vae.py | strint/myia | 3d00d3fb3df80ab7a264a724226c5f56c6ff1a8a | [
"MIT"
] | 222 | 2019-02-13T07:56:28.000Z | 2022-03-28T07:07:54.000Z | examples/vae.py | strint/myia | 3d00d3fb3df80ab7a264a724226c5f56c6ff1a8a | [
"MIT"
] | 107 | 2019-02-12T21:56:39.000Z | 2022-03-12T01:08:03.000Z | examples/vae.py | strint/myia | 3d00d3fb3df80ab7a264a724226c5f56c6ff1a8a | [
"MIT"
] | 27 | 2017-11-14T17:58:15.000Z | 2019-01-14T01:36:09.000Z | """Example of an MLP in Myia.
Myia is still a work in progress, and this example may change in the future.
"""
import time
from dataclasses import dataclass
import numpy
import torch
from numpy.random import RandomState
from torchvision import datasets, transforms
import myia.public_api as pub
from myia import ArithmeticData, myia, value_and_grad
from myia.api import to_device
from myia.debug import traceback # noqa
from myia.operations import array_exp, array_pow, random_initialize
###########
# Options #
###########
dtype = "float32"
backend = "pytorch"
# backend = 'relay' # Uncomment to use relay backend
device_type = "cpu"
# device_type = 'cuda' # Uncomment to run on the gpu
backend_options_dict = {
"pytorch": {"device": device_type},
"relay": {"target": device_type, "device_id": 0},
}
backend_options = backend_options_dict[backend]
###############
# Hyperparams #
###############
lr = getattr(numpy, dtype)(0.01)
########
# Data #
########
# This just generates random data so we don't have to load a real dataset,
# but the model will work just as well on a real dataset.
def param(R, *size):
"""Generates a random array using the generator R."""
return numpy.array(R.rand(*size) * 2 - 1, dtype=dtype)
def generate_data(n, batch_size, input_size, target_size, *, seed=87):
"""Generate inputs and targets.
Generates n batches of samples of size input_size, matched with
a single target.
"""
R = RandomState(seed=seed)
return [
(param(R, batch_size, input_size), param(R, batch_size, target_size))
for i in range(n)
]
def mlp_parameters(*layer_sizes, seed=90909):
"""Generates parameters for a MLP given a list of layer sizes."""
R = RandomState(seed=seed)
parameters = []
for i, o in zip(layer_sizes[:-1], layer_sizes[1:]):
W = param(R, i, o)
b = param(R, 1, o)
parameters.append((W, b))
return parameters
#########
# Model #
#########
# We generate a MLP model with some arbitrary number of layers and tanh
# activations.
@dataclass(frozen=True)
class Linear(ArithmeticData):
"""Linear layer."""
W: "Weights array"
b: "Biases vector"
def apply(self, input):
"""Apply the layer."""
return input @ self.W + self.b
@dataclass(frozen=True)
class Tanh(ArithmeticData):
"""Tanh layer."""
def apply(self, input):
"""Apply the layer."""
return numpy.tanh(input)
@dataclass(frozen=True)
class Sequential(ArithmeticData):
"""Sequential layer, applies all sub-layers in order."""
layers: "Tuple of layers"
def apply(self, x):
"""Apply the layer."""
for layer in self.layers:
x = layer.apply(x)
return x
@dataclass(frozen=True)
class VAE(ArithmeticData):
"""Sequential layer, applies all sub-layers in order."""
fc1: "layer fc1"
fc21: "layer fc21"
fc22: "layer fc22"
fc3: "layer fc3"
fc4: "layer fc4"
def encode(self, x):
h1 = pub.relu(self.fc1.apply(x))
return self.fc21.apply(h1), self.fc22.apply(h1)
def reparameterize(self, mu, logvar, rstate):
std = array_exp(0.5 * logvar)
eps, rstate = pub.uniform(rstate, (2, 20), -1.0, 1.0)
return mu + eps * std, rstate
def decode(self, z):
h3 = pub.relu(self.fc3.apply(z))
return pub.sigmoid(self.fc4.apply(h3))
def forward(self, x, rstate):
mu, logvar = self.encode(pub.reshape(x, (-1, 784)))
z, rstate = self.reparameterize(mu, logvar, rstate)
return self.decode(z), mu, logvar, rstate
params = (
mlp_parameters(*(784, 400))[0],
mlp_parameters(*(400, 20))[0],
mlp_parameters(*(400, 20))[0],
mlp_parameters(*(20, 400))[0],
mlp_parameters(*(400, 784))[0],
)
model = VAE(
Linear(params[0][0], params[0][1]),
Linear(params[1][0], params[1][1]),
Linear(params[2][0], params[2][1]),
Linear(params[3][0], params[3][1]),
Linear(params[4][0], params[4][1]),
)
model = to_device(model, backend, backend_options, broaden=False)
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = pub.binary_cross_entropy(
recon_x, pub.reshape(x, (-1, 784)), reduction="sum"
)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * pub._sum(1 + logvar - array_pow(mu, 2) - array_exp(logvar))
return BCE + KLD
def cost(model, data, rstate):
recon_batch, mu, logvar, _rstate = model.forward(data, rstate)
loss = loss_function(recon_batch, data, mu, logvar)
return loss.item(), _rstate
@myia(backend=backend, backend_options=backend_options, return_backend=True)
def step(model, data, lr, rstate):
"""Returns the loss and parameter gradients.
value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
The 'model' argument can be omitted: by default the derivative wrt
the first argument is returned.
"""
(_cost, rstate), dmodel = value_and_grad(cost, "model")(
model, data, rstate, dout=(1, 1)
)
return _cost, model - lr * dmodel, rstate
@myia(backend=backend, backend_options=backend_options, return_backend=True)
def step_eval(model, data, rstate):
"""Returns the loss and parameter gradients.
value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
The 'model' argument can be omitted: by default the derivative wrt
the first argument is returned.
"""
return cost(model, data, rstate)
@myia(backend=backend, backend_options=backend_options, return_backend=True)
def step_init_seed():
"""Returns the loss and parameter gradients.
value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
The 'model' argument can be omitted: by default the derivative wrt
the first argument is returned.
"""
return random_initialize(1)
lr = getattr(numpy, dtype)(0.01)
if __name__ == "__main__":
seed = 123
cuda = False
batch_size = 2
epochs = 1
torch.manual_seed(seed)
device = torch.device("cuda" if cuda else "cpu")
kwargs = {"num_workers": 1, "pin_memory": True} if cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=True,
download=True,
transform=transforms.ToTensor(),
),
batch_size=batch_size,
shuffle=True,
**kwargs,
)
rand_state = step_init_seed()
for _ in range(epochs):
costs = []
t0 = time.time()
for i, (data, _) in enumerate(train_loader):
print("i", i + 1, "/", len(train_loader))
_cost, model, rand_state = step(
model, data.reshape((batch_size, 784)).numpy(), lr, rand_state
)
costs.append(_cost)
costs = [float(c.from_device()) for c in costs]
c = sum(costs) / len(costs)
t = time.time() - t0
print(f"Cost: {c:15.10f}\tTime: {t:15.10f}")
test_loader = torch.utils.data.DataLoader(
datasets.MNIST("../data", train=False, transform=transforms.ToTensor()),
batch_size=batch_size,
shuffle=True,
**kwargs,
)
costs = []
t0 = time.time()
for i, (data, _) in enumerate(test_loader):
_cost, rand_state = step_eval(
model, data.reshape((batch_size, 784)).numpy(), rand_state
)
costs.append(_cost)
costs = [float(c.from_device()) for c in costs]
c = sum(costs) / len(costs)
t = time.time() - t0
print(f"Cost: {c:15.10f}\tTime: {t:15.10f}")
| 26.542662 | 80 | 0.626077 |
import time
from dataclasses import dataclass
import numpy
import torch
from numpy.random import RandomState
from torchvision import datasets, transforms
import myia.public_api as pub
from myia import ArithmeticData, myia, value_and_grad
from myia.api import to_device
from myia.debug import traceback
from myia.operations import array_exp, array_pow, random_initialize
evice_id": 0},
}
backend_options = backend_options_dict[backend]
andomState(seed=seed)
return [
(param(R, batch_size, input_size), param(R, batch_size, target_size))
for i in range(n)
]
def mlp_parameters(*layer_sizes, seed=90909):
R = RandomState(seed=seed)
parameters = []
for i, o in zip(layer_sizes[:-1], layer_sizes[1:]):
W = param(R, i, o)
b = param(R, 1, o)
parameters.append((W, b))
return parameters
#########
# Model #
#########
# We generate a MLP model with some arbitrary number of layers and tanh
# activations.
@dataclass(frozen=True)
class Linear(ArithmeticData):
W: "Weights array"
b: "Biases vector"
def apply(self, input):
return input @ self.W + self.b
@dataclass(frozen=True)
class Tanh(ArithmeticData):
def apply(self, input):
return numpy.tanh(input)
@dataclass(frozen=True)
class Sequential(ArithmeticData):
layers: "Tuple of layers"
def apply(self, x):
for layer in self.layers:
x = layer.apply(x)
return x
@dataclass(frozen=True)
class VAE(ArithmeticData):
fc1: "layer fc1"
fc21: "layer fc21"
fc22: "layer fc22"
fc3: "layer fc3"
fc4: "layer fc4"
def encode(self, x):
h1 = pub.relu(self.fc1.apply(x))
return self.fc21.apply(h1), self.fc22.apply(h1)
def reparameterize(self, mu, logvar, rstate):
std = array_exp(0.5 * logvar)
eps, rstate = pub.uniform(rstate, (2, 20), -1.0, 1.0)
return mu + eps * std, rstate
def decode(self, z):
h3 = pub.relu(self.fc3.apply(z))
return pub.sigmoid(self.fc4.apply(h3))
def forward(self, x, rstate):
mu, logvar = self.encode(pub.reshape(x, (-1, 784)))
z, rstate = self.reparameterize(mu, logvar, rstate)
return self.decode(z), mu, logvar, rstate
params = (
mlp_parameters(*(784, 400))[0],
mlp_parameters(*(400, 20))[0],
mlp_parameters(*(400, 20))[0],
mlp_parameters(*(20, 400))[0],
mlp_parameters(*(400, 784))[0],
)
model = VAE(
Linear(params[0][0], params[0][1]),
Linear(params[1][0], params[1][1]),
Linear(params[2][0], params[2][1]),
Linear(params[3][0], params[3][1]),
Linear(params[4][0], params[4][1]),
)
model = to_device(model, backend, backend_options, broaden=False)
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = pub.binary_cross_entropy(
recon_x, pub.reshape(x, (-1, 784)), reduction="sum"
)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * pub._sum(1 + logvar - array_pow(mu, 2) - array_exp(logvar))
return BCE + KLD
def cost(model, data, rstate):
recon_batch, mu, logvar, _rstate = model.forward(data, rstate)
loss = loss_function(recon_batch, data, mu, logvar)
return loss.item(), _rstate
@myia(backend=backend, backend_options=backend_options, return_backend=True)
def step(model, data, lr, rstate):
(_cost, rstate), dmodel = value_and_grad(cost, "model")(
model, data, rstate, dout=(1, 1)
)
return _cost, model - lr * dmodel, rstate
@myia(backend=backend, backend_options=backend_options, return_backend=True)
def step_eval(model, data, rstate):
return cost(model, data, rstate)
@myia(backend=backend, backend_options=backend_options, return_backend=True)
def step_init_seed():
return random_initialize(1)
lr = getattr(numpy, dtype)(0.01)
if __name__ == "__main__":
seed = 123
cuda = False
batch_size = 2
epochs = 1
torch.manual_seed(seed)
device = torch.device("cuda" if cuda else "cpu")
kwargs = {"num_workers": 1, "pin_memory": True} if cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=True,
download=True,
transform=transforms.ToTensor(),
),
batch_size=batch_size,
shuffle=True,
**kwargs,
)
rand_state = step_init_seed()
for _ in range(epochs):
costs = []
t0 = time.time()
for i, (data, _) in enumerate(train_loader):
print("i", i + 1, "/", len(train_loader))
_cost, model, rand_state = step(
model, data.reshape((batch_size, 784)).numpy(), lr, rand_state
)
costs.append(_cost)
costs = [float(c.from_device()) for c in costs]
c = sum(costs) / len(costs)
t = time.time() - t0
print(f"Cost: {c:15.10f}\tTime: {t:15.10f}")
test_loader = torch.utils.data.DataLoader(
datasets.MNIST("../data", train=False, transform=transforms.ToTensor()),
batch_size=batch_size,
shuffle=True,
**kwargs,
)
costs = []
t0 = time.time()
for i, (data, _) in enumerate(test_loader):
_cost, rand_state = step_eval(
model, data.reshape((batch_size, 784)).numpy(), rand_state
)
costs.append(_cost)
costs = [float(c.from_device()) for c in costs]
c = sum(costs) / len(costs)
t = time.time() - t0
print(f"Cost: {c:15.10f}\tTime: {t:15.10f}")
| true | true |
f71f9c72961197bdb094aca591c521ff5e6e78f6 | 2,944 | py | Python | scripts/cfg/cfg.py | jepler/aocl-libm-ose | 4033e022da428125747e118ccd6fdd9cee21c470 | [
"BSD-3-Clause"
] | 66 | 2020-11-04T17:06:10.000Z | 2022-03-10T08:03:12.000Z | scripts/cfg/cfg.py | HollowMan6/aocl-libm-ose | 4033e022da428125747e118ccd6fdd9cee21c470 | [
"BSD-3-Clause"
] | 8 | 2021-04-18T18:37:53.000Z | 2022-03-11T12:49:31.000Z | scripts/cfg/cfg.py | HollowMan6/aocl-libm-ose | 4033e022da428125747e118ccd6fdd9cee21c470 | [
"BSD-3-Clause"
] | 8 | 2020-11-09T03:45:01.000Z | 2021-11-08T02:25:31.000Z | #
# Copyright (C) 2008-2020 Advanced Micro Devices, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from SCons.Variables import Variables as SVariables
from SCons.Script import AddOption
class LocalOption:
def __init__(self):
self.__help_texts = ""
pass
def Add(self, *args, **kwargs):
col_width = 30
help = " " + ", ".join(args)
if "help" in kwargs:
length = len(help)
if length >= col_width:
help += "\n" + " " * col_width
else:
help += " " * (col_width - length)
help += kwargs["help"]
self.__help_texts += help + "\n"
AddOption(*args, **kwargs)
def GetHelpTexts(self):
return self.__help_texts
class Variables(SVariables):
def __init__(self, files=[], args={}, is_global=1):
self.required = []
super(self.__class__,self).__init__(files, args, is_global)
def Add(self, key, help="", default=None, validator=None, converter=None, required=False):
SVariables.Add(self, key, help, default, validator, converter)
if required:
print("adding required option ", key[0])
self.required.append(key[0])
def Update(self, env):
print("required options are: ", self.required)
SVariables.Update(self, env)
for requirement in self.required:
if not env.has_key(requirement):
print('violation: ', requirement)
| 41.464789 | 94 | 0.686481 |
from SCons.Variables import Variables as SVariables
from SCons.Script import AddOption
class LocalOption:
def __init__(self):
self.__help_texts = ""
pass
def Add(self, *args, **kwargs):
col_width = 30
help = " " + ", ".join(args)
if "help" in kwargs:
length = len(help)
if length >= col_width:
help += "\n" + " " * col_width
else:
help += " " * (col_width - length)
help += kwargs["help"]
self.__help_texts += help + "\n"
AddOption(*args, **kwargs)
def GetHelpTexts(self):
return self.__help_texts
class Variables(SVariables):
def __init__(self, files=[], args={}, is_global=1):
self.required = []
super(self.__class__,self).__init__(files, args, is_global)
def Add(self, key, help="", default=None, validator=None, converter=None, required=False):
SVariables.Add(self, key, help, default, validator, converter)
if required:
print("adding required option ", key[0])
self.required.append(key[0])
def Update(self, env):
print("required options are: ", self.required)
SVariables.Update(self, env)
for requirement in self.required:
if not env.has_key(requirement):
print('violation: ', requirement)
| true | true |
f71f9c7e66717452a4ba40fb6f3d1934e7331d68 | 7,731 | py | Python | sadedegel/bblock/vocabulary.py | GlobalMaksimum/sadedegel | 8e28dbeabc3bf0d6f2222089ac5e3a849f9d3a6b | [
"MIT"
] | 100 | 2020-07-06T05:50:49.000Z | 2022-03-21T21:56:55.000Z | sadedegel/bblock/vocabulary.py | LyotardPostmodernizm/sadedegel | 8e28dbeabc3bf0d6f2222089ac5e3a849f9d3a6b | [
"MIT"
] | 244 | 2020-07-06T06:31:01.000Z | 2022-02-26T10:40:17.000Z | sadedegel/bblock/vocabulary.py | LyotardPostmodernizm/sadedegel | 8e28dbeabc3bf0d6f2222089ac5e3a849f9d3a6b | [
"MIT"
] | 23 | 2020-07-27T16:32:48.000Z | 2022-03-18T11:13:07.000Z | import warnings
from collections import defaultdict
from os.path import dirname
from pathlib import Path
import h5py
import numpy as np
from cached_property import cached_property
from rich.console import Console
from .util import tr_lower, normalize_tokenizer_name
console = Console()
class InvalidTokenizer(Exception):
"""Invalid tokenizer name"""
def vocabulary_file(tokenizer: str, verify_exists=True):
normalized_name = normalize_tokenizer_name(tokenizer)
if normalized_name not in ['bert', 'icu', 'simple']:
raise InvalidTokenizer(
(f"Currently only valid tokenizers are BERT, ICU Tokenizer for vocabulary generation."
" {normalized_name} found"))
vocab_file = Path(dirname(__file__)) / 'data' / normalized_name / 'vocabulary.hdf5'
if not vocab_file.exists() and verify_exists:
raise FileNotFoundError(f"Vocabulary file for {tokenizer} ({normalized_name}) tokenizer not found.")
return vocab_file
class VocabularyCounter:
def __init__(self, tokenizer, case_sensitive=True, min_tf=1, min_df=1):
self.tokenizer = tokenizer
self.doc_counter = defaultdict(set)
self.doc_set = set()
self.term_freq = defaultdict(int)
self.min_tf = min_tf
self.min_df = min_df
self.case_sensitive = case_sensitive
def inc(self, word: str, document_id: int, count: int = 1):
if self.case_sensitive:
w = word
else:
w = tr_lower(word)
self.doc_counter[w].add(document_id)
self.doc_set.add(document_id)
self.term_freq[w] += count
def add_word_to_doc(self, word: str, document_id: int):
"""Implemented for backward compatibility"""
self.inc(word, document_id, 1)
@property
def vocabulary_size(self):
return len(self.term_freq)
@property
def document_count(self):
return len(self.doc_set)
def prune(self):
to_remove = []
for w in self.term_freq:
if self.term_freq[w] < self.min_tf or len(self.doc_counter[w]) < self.min_df:
to_remove.append(w)
for w in to_remove:
del self.doc_counter[w]
del self.term_freq[w]
console.log(
f"{len(to_remove)} terms (case sensitive={self.case_sensitive}) are pruned by tf (>= {self.min_tf}) or df filter(>= {self.min_df})")
return self
def df(self, w: str):
if self.case_sensitive:
return len(self.doc_counter[w])
else:
return len(self.doc_counter[tr_lower(w)])
def tf(self, w: str):
if self.case_sensitive:
return self.term_freq[w]
else:
return self.term_freq[tr_lower(w)]
def to_hdf5(self, w2v=None):
with h5py.File(vocabulary_file(self.tokenizer, verify_exists=False), "a") as fp:
if self.case_sensitive:
group = fp.create_group("form_")
else:
group = fp.create_group("lower_")
words = sorted(list(self.term_freq.keys()), key=lambda w: tr_lower(w))
group.attrs['size'] = len(words)
group.attrs['document_count'] = len(self.doc_set)
group.attrs['tokenizer'] = self.tokenizer
group.attrs['min_tf'] = self.min_tf
group.attrs['min_df'] = self.min_df
if w2v is not None:
group.attrs['vector_size'] = w2v.vector_size
group.create_dataset("vector", data=np.array(
[w2v[w] if w in w2v else np.zeros(w2v.vector_size) for w in words]).astype(
np.float32),
compression="gzip",
compression_opts=9)
group.create_dataset("has_vector", data=np.array([w in w2v in w2v for w in words]),
compression="gzip",
compression_opts=9)
group.create_dataset("word", data=words, compression="gzip", compression_opts=9)
group.create_dataset("df", data=np.array([self.df(w) for w in words]), compression="gzip",
compression_opts=9)
group.create_dataset("tf", data=np.array([self.tf(w) for w in words]), compression="gzip",
compression_opts=9)
console.print(f"|D|: {self.document_count}, |V|: {self.vocabulary_size} (case sensitive={self.case_sensitive})")
class Vocabulary:
def __init__(self, tokenizer):
self.tokenizer = tokenizer
self.file_name = vocabulary_file(tokenizer)
self._df = None
self._df_cs = None
self._has_vector = None
self._vector = None
self.dword_cs = None
self.dword = None
@cached_property
def size_cs(self) -> int:
with h5py.File(self.file_name, "r") as fp:
return fp['form_'].attrs['size']
@cached_property
def size(self) -> int:
with h5py.File(self.file_name, "r") as fp:
return fp['lower_'].attrs['size']
def __len__(self):
return self.size
def id_cs(self, word: str, default: int = -1):
if self.dword_cs is None:
with h5py.File(self.file_name, "r") as fp:
self.dword = dict((b.decode("utf-8"), i) for i, b in enumerate(list(fp['lower_']['word'])))
self.dword_cs = dict((b.decode("utf-8"), i) for i, b in enumerate(list(fp['form_']['word'])))
return self.dword_cs.get(word, default)
def id(self, word: str, default: int = -1):
if self.dword is None:
with h5py.File(self.file_name, "r") as fp:
self.dword = dict((b.decode("utf-8"), i) for i, b in enumerate(list(fp['lower_']['word'])))
self.dword_cs = dict((b.decode("utf-8"), i) for i, b in enumerate(list(fp['form_']['word'])))
return self.dword.get(tr_lower(word), default)
def df(self, word: str):
i = self.id(word)
if i == -1:
return 0
else:
if self._df is None:
with h5py.File(self.file_name, "r") as fp:
self._df = np.array(fp['lower_']['df'])
return self._df[i]
def df_cs(self, word: str):
i = self.id_cs(word)
if i == -1:
return 0
else:
if self._df_cs is None:
with h5py.File(self.file_name, "r") as fp:
self._df_cs = np.array(fp['form_']['df'])
return self._df_cs[i]
def has_vector(self, word: str):
with h5py.File(self.file_name, "r") as fp:
if "has_vector" in fp['lower_']:
i = self.id(word)
if i == -1:
return False
else:
if self._has_vector is None:
self._has_vector = np.array(fp['lower_']['has_vector'])
return self._has_vector[i]
else:
return False
def vector(self, word: str):
# TODO: Performance improvement required
with h5py.File(self.file_name, "r") as fp:
if "vector" in fp['lower_']:
i = self.id(word)
if i == -1:
return False
else:
if self._vector is None:
self._vector = np.array(fp['lower_']['vector'])
return self._vector[i, :]
else:
return False
@cached_property
def document_count(self):
with h5py.File(self.file_name, "r") as fp:
return fp['form_'].attrs['document_count']
| 32.078838 | 144 | 0.56073 | import warnings
from collections import defaultdict
from os.path import dirname
from pathlib import Path
import h5py
import numpy as np
from cached_property import cached_property
from rich.console import Console
from .util import tr_lower, normalize_tokenizer_name
console = Console()
class InvalidTokenizer(Exception):
def vocabulary_file(tokenizer: str, verify_exists=True):
normalized_name = normalize_tokenizer_name(tokenizer)
if normalized_name not in ['bert', 'icu', 'simple']:
raise InvalidTokenizer(
(f"Currently only valid tokenizers are BERT, ICU Tokenizer for vocabulary generation."
" {normalized_name} found"))
vocab_file = Path(dirname(__file__)) / 'data' / normalized_name / 'vocabulary.hdf5'
if not vocab_file.exists() and verify_exists:
raise FileNotFoundError(f"Vocabulary file for {tokenizer} ({normalized_name}) tokenizer not found.")
return vocab_file
class VocabularyCounter:
def __init__(self, tokenizer, case_sensitive=True, min_tf=1, min_df=1):
self.tokenizer = tokenizer
self.doc_counter = defaultdict(set)
self.doc_set = set()
self.term_freq = defaultdict(int)
self.min_tf = min_tf
self.min_df = min_df
self.case_sensitive = case_sensitive
def inc(self, word: str, document_id: int, count: int = 1):
if self.case_sensitive:
w = word
else:
w = tr_lower(word)
self.doc_counter[w].add(document_id)
self.doc_set.add(document_id)
self.term_freq[w] += count
def add_word_to_doc(self, word: str, document_id: int):
self.inc(word, document_id, 1)
@property
def vocabulary_size(self):
return len(self.term_freq)
@property
def document_count(self):
return len(self.doc_set)
def prune(self):
to_remove = []
for w in self.term_freq:
if self.term_freq[w] < self.min_tf or len(self.doc_counter[w]) < self.min_df:
to_remove.append(w)
for w in to_remove:
del self.doc_counter[w]
del self.term_freq[w]
console.log(
f"{len(to_remove)} terms (case sensitive={self.case_sensitive}) are pruned by tf (>= {self.min_tf}) or df filter(>= {self.min_df})")
return self
def df(self, w: str):
if self.case_sensitive:
return len(self.doc_counter[w])
else:
return len(self.doc_counter[tr_lower(w)])
def tf(self, w: str):
if self.case_sensitive:
return self.term_freq[w]
else:
return self.term_freq[tr_lower(w)]
def to_hdf5(self, w2v=None):
with h5py.File(vocabulary_file(self.tokenizer, verify_exists=False), "a") as fp:
if self.case_sensitive:
group = fp.create_group("form_")
else:
group = fp.create_group("lower_")
words = sorted(list(self.term_freq.keys()), key=lambda w: tr_lower(w))
group.attrs['size'] = len(words)
group.attrs['document_count'] = len(self.doc_set)
group.attrs['tokenizer'] = self.tokenizer
group.attrs['min_tf'] = self.min_tf
group.attrs['min_df'] = self.min_df
if w2v is not None:
group.attrs['vector_size'] = w2v.vector_size
group.create_dataset("vector", data=np.array(
[w2v[w] if w in w2v else np.zeros(w2v.vector_size) for w in words]).astype(
np.float32),
compression="gzip",
compression_opts=9)
group.create_dataset("has_vector", data=np.array([w in w2v in w2v for w in words]),
compression="gzip",
compression_opts=9)
group.create_dataset("word", data=words, compression="gzip", compression_opts=9)
group.create_dataset("df", data=np.array([self.df(w) for w in words]), compression="gzip",
compression_opts=9)
group.create_dataset("tf", data=np.array([self.tf(w) for w in words]), compression="gzip",
compression_opts=9)
console.print(f"|D|: {self.document_count}, |V|: {self.vocabulary_size} (case sensitive={self.case_sensitive})")
class Vocabulary:
def __init__(self, tokenizer):
self.tokenizer = tokenizer
self.file_name = vocabulary_file(tokenizer)
self._df = None
self._df_cs = None
self._has_vector = None
self._vector = None
self.dword_cs = None
self.dword = None
@cached_property
def size_cs(self) -> int:
with h5py.File(self.file_name, "r") as fp:
return fp['form_'].attrs['size']
@cached_property
def size(self) -> int:
with h5py.File(self.file_name, "r") as fp:
return fp['lower_'].attrs['size']
def __len__(self):
return self.size
def id_cs(self, word: str, default: int = -1):
if self.dword_cs is None:
with h5py.File(self.file_name, "r") as fp:
self.dword = dict((b.decode("utf-8"), i) for i, b in enumerate(list(fp['lower_']['word'])))
self.dword_cs = dict((b.decode("utf-8"), i) for i, b in enumerate(list(fp['form_']['word'])))
return self.dword_cs.get(word, default)
def id(self, word: str, default: int = -1):
if self.dword is None:
with h5py.File(self.file_name, "r") as fp:
self.dword = dict((b.decode("utf-8"), i) for i, b in enumerate(list(fp['lower_']['word'])))
self.dword_cs = dict((b.decode("utf-8"), i) for i, b in enumerate(list(fp['form_']['word'])))
return self.dword.get(tr_lower(word), default)
def df(self, word: str):
i = self.id(word)
if i == -1:
return 0
else:
if self._df is None:
with h5py.File(self.file_name, "r") as fp:
self._df = np.array(fp['lower_']['df'])
return self._df[i]
def df_cs(self, word: str):
i = self.id_cs(word)
if i == -1:
return 0
else:
if self._df_cs is None:
with h5py.File(self.file_name, "r") as fp:
self._df_cs = np.array(fp['form_']['df'])
return self._df_cs[i]
def has_vector(self, word: str):
with h5py.File(self.file_name, "r") as fp:
if "has_vector" in fp['lower_']:
i = self.id(word)
if i == -1:
return False
else:
if self._has_vector is None:
self._has_vector = np.array(fp['lower_']['has_vector'])
return self._has_vector[i]
else:
return False
def vector(self, word: str):
with h5py.File(self.file_name, "r") as fp:
if "vector" in fp['lower_']:
i = self.id(word)
if i == -1:
return False
else:
if self._vector is None:
self._vector = np.array(fp['lower_']['vector'])
return self._vector[i, :]
else:
return False
@cached_property
def document_count(self):
with h5py.File(self.file_name, "r") as fp:
return fp['form_'].attrs['document_count']
| true | true |
f71f9cca7a4b7ebe90d9da227393163bb8dccc2f | 215 | py | Python | employee_management/employee_management/doctype/category_module_info/test_category_module_info.py | Vivekananthan112599/Frappe-Vivek | 6a2b70c736e17e9748c6a30e5722341acfb3b5c5 | [
"MIT"
] | null | null | null | employee_management/employee_management/doctype/category_module_info/test_category_module_info.py | Vivekananthan112599/Frappe-Vivek | 6a2b70c736e17e9748c6a30e5722341acfb3b5c5 | [
"MIT"
] | null | null | null | employee_management/employee_management/doctype/category_module_info/test_category_module_info.py | Vivekananthan112599/Frappe-Vivek | 6a2b70c736e17e9748c6a30e5722341acfb3b5c5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Gopi and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestCategoryModuleInfo(unittest.TestCase):
pass
| 19.545455 | 48 | 0.767442 |
from __future__ import unicode_literals
import unittest
class TestCategoryModuleInfo(unittest.TestCase):
pass
| true | true |
f71f9d8b86afa01dd3ceaf3a886e43128a317c40 | 600 | py | Python | sources/t05/t05ej12.py | workready/pythonbasic | 59bd82caf99244f5e711124e1f6f4dec8de22141 | [
"MIT"
] | null | null | null | sources/t05/t05ej12.py | workready/pythonbasic | 59bd82caf99244f5e711124e1f6f4dec8de22141 | [
"MIT"
] | null | null | null | sources/t05/t05ej12.py | workready/pythonbasic | 59bd82caf99244f5e711124e1f6f4dec8de22141 | [
"MIT"
] | null | null | null | import argparse
parser = argparse.ArgumentParser(description="Este programa calcula X^Y")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("x", type=int, help="la base")
parser.add_argument("y", type=int, help="el exponente")
args = parser.parse_args()
answer = args.x**args.y
if args.quiet:
print(answer)
elif args.verbose:
print("{} elevado a {} es igual a {}".format(args.x, args.y, answer))
else:
print("{}^{} == {}".format(args.x, args.y, answer))
| 33.333333 | 73 | 0.695 | import argparse
parser = argparse.ArgumentParser(description="Este programa calcula X^Y")
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("x", type=int, help="la base")
parser.add_argument("y", type=int, help="el exponente")
args = parser.parse_args()
answer = args.x**args.y
if args.quiet:
print(answer)
elif args.verbose:
print("{} elevado a {} es igual a {}".format(args.x, args.y, answer))
else:
print("{}^{} == {}".format(args.x, args.y, answer))
| true | true |
f71f9dd0c9b57edc5e44757006a6fb2ad0870d9a | 8,176 | py | Python | open_spiel/python/examples/hearts_supervised_learning.py | xujing1994/open_spiel | 7663a2717f16ff84c0d6a6bfdf19a9c21b37b765 | [
"Apache-2.0"
] | null | null | null | open_spiel/python/examples/hearts_supervised_learning.py | xujing1994/open_spiel | 7663a2717f16ff84c0d6a6bfdf19a9c21b37b765 | [
"Apache-2.0"
] | 4 | 2020-11-13T18:59:55.000Z | 2022-02-10T02:08:27.000Z | open_spiel/python/examples/hearts_supervised_learning.py | xujing1994/open_spiel | 7663a2717f16ff84c0d6a6bfdf19a9c21b37b765 | [
"Apache-2.0"
] | 1 | 2020-12-25T03:01:37.000Z | 2020-12-25T03:01:37.000Z | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Train a policy net on Hearts actions based given a dataset of trajectories.
Trajectories from the Hearts bot Xinxin can be generated using
open_spiel/games/hearts/xinxin_game_generator.cc.
"""
import os
import pickle
from typing import Any, Tuple
from absl import app
from absl import flags
import haiku as hk
import jax
from jax import numpy as jnp
from jax.experimental import optix
import numpy as np
import pyspiel
OptState = Any
Params = Any
FLAGS = flags.FLAGS
GAME = pyspiel.load_game('hearts')
NUM_CARDS = 52
NUM_ACTIONS = NUM_CARDS
NUM_PLAYERS = 4
TOP_K_ACTIONS = 5 # How many alternative actions to display
DEFAULT_LAYER_SIZES = [1024, 1024, 1024, 1024]
flags.DEFINE_integer('iterations', 100000, 'Number of iterations')
flags.DEFINE_string('data_path', None, 'Location for data')
flags.DEFINE_integer('eval_every', 10000, 'How often to evaluate the policy')
flags.DEFINE_integer('num_examples', 3,
'How many examples to print per evaluation')
flags.DEFINE_integer('train_batch', 128, 'Batch size for training step')
flags.DEFINE_integer('eval_batch', 10000, 'Batch size when evaluating')
flags.DEFINE_float('step_size', 1e-4, 'Step size for training')
flags.DEFINE_list('hidden_layer_sizes', None,
'Number of hidden units and layers in the network')
flags.DEFINE_integer('rng_seed', 42, 'Seed for initial network weights')
flags.DEFINE_string('save_path', None, 'Location for saved networks')
flags.DEFINE_string('checkpoint_file', None,
'Provides weights and optimzer state to resume training')
def _trajectory(line: str):
"""Returns parsed action trajectory."""
actions = [int(x) for x in line.split(' ')]
return tuple(actions)
def make_dataset(file: str):
"""Creates dataset as a generator of single examples."""
lines = [line for line in open(file)]
while True:
np.random.shuffle(lines)
for line in lines:
trajectory = _trajectory(line)
# skip pass_dir and deal actions
action_index = np.random.randint(NUM_CARDS + 1, len(trajectory))
state = GAME.new_initial_state()
for action in trajectory[:action_index]:
state.apply_action(action)
yield (state.information_state_tensor(), trajectory[action_index])
def batch(dataset, batch_size: int):
"""Creates a batched dataset from a one-at-a-time dataset."""
observations = np.zeros([batch_size] + GAME.information_state_tensor_shape(),
np.float32)
labels = np.zeros(batch_size, dtype=np.int32)
while True:
for batch_index in range(batch_size):
observations[batch_index], labels[batch_index] = next(dataset)
yield observations, labels
def one_hot(x, k):
"""Returns a one-hot encoding of `x` of size `k`."""
return jnp.array(x[..., jnp.newaxis] == jnp.arange(k), dtype=np.float32)
def net_fn(x):
"""Haiku module for our network."""
layers = []
for layer_size in FLAGS.hidden_layer_sizes:
layers.append(hk.Linear(int(layer_size)))
layers.append(jax.nn.relu)
layers.append(hk.Linear(NUM_ACTIONS))
layers.append(jax.nn.log_softmax)
net = hk.Sequential(layers)
return net(x)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.hidden_layer_sizes is None:
# Cannot pass default arguments as lists due to style requirements, so we
# override it here if they are not set.
FLAGS.hidden_layer_sizes = DEFAULT_LAYER_SIZES
# Make the network.
net = hk.without_apply_rng(hk.transform(net_fn, apply_rng=True))
# Make the optimiser.
opt = optix.adam(FLAGS.step_size)
@jax.jit
def loss(
params: Params,
inputs: np.ndarray,
targets: np.ndarray,
) -> jnp.DeviceArray:
"""Cross-entropy loss."""
assert targets.dtype == np.int32
log_probs = net.apply(params, inputs)
return -jnp.mean(one_hot(targets, NUM_ACTIONS) * log_probs)
@jax.jit
def accuracy(
params: Params,
inputs: np.ndarray,
targets: np.ndarray,
) -> jnp.DeviceArray:
"""Classification accuracy."""
predictions = net.apply(params, inputs)
return jnp.mean(jnp.argmax(predictions, axis=-1) == targets)
@jax.jit
def update(
params: Params,
opt_state: OptState,
inputs: np.ndarray,
targets: np.ndarray,
) -> Tuple[Params, OptState]:
"""Learning rule (stochastic gradient descent)."""
_, gradient = jax.value_and_grad(loss)(params, inputs, targets)
updates, opt_state = opt.update(gradient, opt_state)
new_params = optix.apply_updates(params, updates)
return new_params, opt_state
def output_samples(params: Params, max_samples: int):
"""Output some cases where the policy disagrees with the dataset action."""
if max_samples == 0:
return
count = 0
with open(os.path.join(FLAGS.data_path, 'test.txt')) as f:
lines = list(f)
np.random.shuffle(lines)
for line in lines:
state = GAME.new_initial_state()
actions = _trajectory(line)
for action in actions:
if not state.is_chance_node():
observation = np.array(state.information_state_tensor(), np.float32)
policy = np.exp(net.apply(params, observation))
probs_actions = [(p, a) for a, p in enumerate(policy)]
pred = max(probs_actions)[1]
if pred != action:
print(state)
for p, a in reversed(sorted(probs_actions)[-TOP_K_ACTIONS:]):
print('{:7} {:.2f}'.format(state.action_to_string(a), p))
print('Ground truth {}\n'.format(state.action_to_string(action)))
count += 1
break
state.apply_action(action)
if count >= max_samples:
return
# Store what we need to rebuild the Haiku net.
if FLAGS.save_path:
filename = os.path.join(FLAGS.save_path, 'layers.txt')
with open(filename, 'w') as layer_def_file:
for s in FLAGS.hidden_layer_sizes:
layer_def_file.write(f'{s} ')
layer_def_file.write('\n')
# Make datasets.
if FLAGS.data_path is None:
raise app.UsageError(
'Please generate your own supervised training data and supply the local'
'location as --data_path')
train = batch(
make_dataset(os.path.join(FLAGS.data_path, 'train.txt')),
FLAGS.train_batch)
test = batch(
make_dataset(os.path.join(FLAGS.data_path, 'test.txt')), FLAGS.eval_batch)
# Initialize network and optimiser.
if FLAGS.checkpoint_file:
with open(FLAGS.checkpoint_file, 'rb') as pkl_file:
params, opt_state = pickle.load(pkl_file)
else:
rng = jax.random.PRNGKey(FLAGS.rng_seed) # seed used for network weights
inputs, unused_targets = next(train)
params = net.init(rng, inputs)
opt_state = opt.init(params)
# Train/eval loop.
for step in range(FLAGS.iterations):
# Do SGD on a batch of training examples.
inputs, targets = next(train)
params, opt_state = update(params, opt_state, inputs, targets)
# Periodically evaluate classification accuracy on the test set.
if (1 + step) % FLAGS.eval_every == 0:
inputs, targets = next(test)
test_accuracy = accuracy(params, inputs, targets)
print(f'After {1+step} steps, test accuracy: {test_accuracy}.')
if FLAGS.save_path:
filename = os.path.join(FLAGS.save_path, f'checkpoint-{1 + step}.pkl')
with open(filename, 'wb') as pkl_file:
pickle.dump((params, opt_state), pkl_file)
output_samples(params, FLAGS.num_examples)
if __name__ == '__main__':
app.run(main)
| 34.209205 | 80 | 0.689946 |
import os
import pickle
from typing import Any, Tuple
from absl import app
from absl import flags
import haiku as hk
import jax
from jax import numpy as jnp
from jax.experimental import optix
import numpy as np
import pyspiel
OptState = Any
Params = Any
FLAGS = flags.FLAGS
GAME = pyspiel.load_game('hearts')
NUM_CARDS = 52
NUM_ACTIONS = NUM_CARDS
NUM_PLAYERS = 4
TOP_K_ACTIONS = 5
DEFAULT_LAYER_SIZES = [1024, 1024, 1024, 1024]
flags.DEFINE_integer('iterations', 100000, 'Number of iterations')
flags.DEFINE_string('data_path', None, 'Location for data')
flags.DEFINE_integer('eval_every', 10000, 'How often to evaluate the policy')
flags.DEFINE_integer('num_examples', 3,
'How many examples to print per evaluation')
flags.DEFINE_integer('train_batch', 128, 'Batch size for training step')
flags.DEFINE_integer('eval_batch', 10000, 'Batch size when evaluating')
flags.DEFINE_float('step_size', 1e-4, 'Step size for training')
flags.DEFINE_list('hidden_layer_sizes', None,
'Number of hidden units and layers in the network')
flags.DEFINE_integer('rng_seed', 42, 'Seed for initial network weights')
flags.DEFINE_string('save_path', None, 'Location for saved networks')
flags.DEFINE_string('checkpoint_file', None,
'Provides weights and optimzer state to resume training')
def _trajectory(line: str):
actions = [int(x) for x in line.split(' ')]
return tuple(actions)
def make_dataset(file: str):
lines = [line for line in open(file)]
while True:
np.random.shuffle(lines)
for line in lines:
trajectory = _trajectory(line)
action_index = np.random.randint(NUM_CARDS + 1, len(trajectory))
state = GAME.new_initial_state()
for action in trajectory[:action_index]:
state.apply_action(action)
yield (state.information_state_tensor(), trajectory[action_index])
def batch(dataset, batch_size: int):
observations = np.zeros([batch_size] + GAME.information_state_tensor_shape(),
np.float32)
labels = np.zeros(batch_size, dtype=np.int32)
while True:
for batch_index in range(batch_size):
observations[batch_index], labels[batch_index] = next(dataset)
yield observations, labels
def one_hot(x, k):
return jnp.array(x[..., jnp.newaxis] == jnp.arange(k), dtype=np.float32)
def net_fn(x):
layers = []
for layer_size in FLAGS.hidden_layer_sizes:
layers.append(hk.Linear(int(layer_size)))
layers.append(jax.nn.relu)
layers.append(hk.Linear(NUM_ACTIONS))
layers.append(jax.nn.log_softmax)
net = hk.Sequential(layers)
return net(x)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.hidden_layer_sizes is None:
FLAGS.hidden_layer_sizes = DEFAULT_LAYER_SIZES
net = hk.without_apply_rng(hk.transform(net_fn, apply_rng=True))
opt = optix.adam(FLAGS.step_size)
@jax.jit
def loss(
params: Params,
inputs: np.ndarray,
targets: np.ndarray,
) -> jnp.DeviceArray:
assert targets.dtype == np.int32
log_probs = net.apply(params, inputs)
return -jnp.mean(one_hot(targets, NUM_ACTIONS) * log_probs)
@jax.jit
def accuracy(
params: Params,
inputs: np.ndarray,
targets: np.ndarray,
) -> jnp.DeviceArray:
predictions = net.apply(params, inputs)
return jnp.mean(jnp.argmax(predictions, axis=-1) == targets)
@jax.jit
def update(
params: Params,
opt_state: OptState,
inputs: np.ndarray,
targets: np.ndarray,
) -> Tuple[Params, OptState]:
_, gradient = jax.value_and_grad(loss)(params, inputs, targets)
updates, opt_state = opt.update(gradient, opt_state)
new_params = optix.apply_updates(params, updates)
return new_params, opt_state
def output_samples(params: Params, max_samples: int):
if max_samples == 0:
return
count = 0
with open(os.path.join(FLAGS.data_path, 'test.txt')) as f:
lines = list(f)
np.random.shuffle(lines)
for line in lines:
state = GAME.new_initial_state()
actions = _trajectory(line)
for action in actions:
if not state.is_chance_node():
observation = np.array(state.information_state_tensor(), np.float32)
policy = np.exp(net.apply(params, observation))
probs_actions = [(p, a) for a, p in enumerate(policy)]
pred = max(probs_actions)[1]
if pred != action:
print(state)
for p, a in reversed(sorted(probs_actions)[-TOP_K_ACTIONS:]):
print('{:7} {:.2f}'.format(state.action_to_string(a), p))
print('Ground truth {}\n'.format(state.action_to_string(action)))
count += 1
break
state.apply_action(action)
if count >= max_samples:
return
if FLAGS.save_path:
filename = os.path.join(FLAGS.save_path, 'layers.txt')
with open(filename, 'w') as layer_def_file:
for s in FLAGS.hidden_layer_sizes:
layer_def_file.write(f'{s} ')
layer_def_file.write('\n')
if FLAGS.data_path is None:
raise app.UsageError(
'Please generate your own supervised training data and supply the local'
'location as --data_path')
train = batch(
make_dataset(os.path.join(FLAGS.data_path, 'train.txt')),
FLAGS.train_batch)
test = batch(
make_dataset(os.path.join(FLAGS.data_path, 'test.txt')), FLAGS.eval_batch)
if FLAGS.checkpoint_file:
with open(FLAGS.checkpoint_file, 'rb') as pkl_file:
params, opt_state = pickle.load(pkl_file)
else:
rng = jax.random.PRNGKey(FLAGS.rng_seed)
inputs, unused_targets = next(train)
params = net.init(rng, inputs)
opt_state = opt.init(params)
for step in range(FLAGS.iterations):
inputs, targets = next(train)
params, opt_state = update(params, opt_state, inputs, targets)
if (1 + step) % FLAGS.eval_every == 0:
inputs, targets = next(test)
test_accuracy = accuracy(params, inputs, targets)
print(f'After {1+step} steps, test accuracy: {test_accuracy}.')
if FLAGS.save_path:
filename = os.path.join(FLAGS.save_path, f'checkpoint-{1 + step}.pkl')
with open(filename, 'wb') as pkl_file:
pickle.dump((params, opt_state), pkl_file)
output_samples(params, FLAGS.num_examples)
if __name__ == '__main__':
app.run(main)
| true | true |
f71f9e454eb33061bb98dbbfbd0ff5e4e58bf745 | 38,248 | py | Python | conda_build/render.py | scopatz/conda-build | dd74b17f4e7cb4286fe9a403895f9d34feb8e071 | [
"BSD-3-Clause"
] | null | null | null | conda_build/render.py | scopatz/conda-build | dd74b17f4e7cb4286fe9a403895f9d34feb8e071 | [
"BSD-3-Clause"
] | null | null | null | conda_build/render.py | scopatz/conda-build | dd74b17f4e7cb4286fe9a403895f9d34feb8e071 | [
"BSD-3-Clause"
] | null | null | null | # (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import absolute_import, division, print_function
from collections import OrderedDict, defaultdict
from locale import getpreferredencoding
import json
import os
from os.path import isdir, isfile, abspath
import random
import re
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
import yaml
from .conda_interface import (PY3, UnsatisfiableError, ProgressiveFetchExtract,
TemporaryDirectory)
from .conda_interface import execute_actions
from .conda_interface import pkgs_dirs
from .conda_interface import conda_43
from .conda_interface import specs_from_url
from .conda_interface import memoized
from conda_build import exceptions, utils, environ
from conda_build.metadata import MetaData, combine_top_level_metadata_with_output
import conda_build.source as source
from conda_build.variants import (get_package_variants, list_of_dicts_to_dict_of_lists,
filter_by_key_value)
from conda_build.exceptions import DependencyNeedsBuildingError
from conda_build.index import get_build_index
# from conda_build.jinja_context import pin_subpackage_against_outputs
try:
from conda.base.constants import CONDA_TARBALL_EXTENSIONS
except Exception:
from conda.base.constants import CONDA_TARBALL_EXTENSION
CONDA_TARBALL_EXTENSIONS = (CONDA_TARBALL_EXTENSION,)
def odict_representer(dumper, data):
return dumper.represent_dict(data.items())
yaml.add_representer(set, yaml.representer.SafeRepresenter.represent_list)
yaml.add_representer(tuple, yaml.representer.SafeRepresenter.represent_list)
yaml.add_representer(OrderedDict, odict_representer)
def bldpkg_path(m):
'''
Returns path to built package's tarball given its ``Metadata``.
'''
subdir = 'noarch' if m.noarch or m.noarch_python else m.config.host_subdir
if not hasattr(m, 'type'):
if m.config.conda_pkg_format == "2":
pkg_type = "conda_v2"
else:
pkg_type = "conda"
else:
pkg_type = m.type
# the default case will switch over to conda_v2 at some point
if pkg_type == "conda":
path = os.path.join(m.config.output_folder, subdir, '%s%s' % (m.dist(), CONDA_TARBALL_EXTENSIONS[0]))
elif pkg_type == "conda_v2":
path = os.path.join(m.config.output_folder, subdir, '%s%s' % (m.dist(), '.conda'))
else:
path = '{} file for {} in: {}'.format(m.type, m.name(), os.path.join(m.config.output_folder, subdir))
return path
def actions_to_pins(actions):
specs = []
if conda_43:
spec_name = lambda x: x.dist_name
else:
spec_name = lambda x: str(x)
if 'LINK' in actions:
specs = [' '.join(spec_name(spec).split()[0].rsplit('-', 2)) for spec in actions['LINK']]
return specs
def _categorize_deps(m, specs, exclude_pattern, variant):
subpackages = []
dependencies = []
pass_through_deps = []
dash_or_under = re.compile("[-_]")
# ones that get filtered from actual versioning, to exclude them from the hash calculation
for spec in specs:
if not exclude_pattern or not exclude_pattern.match(spec):
is_subpackage = False
spec_name = spec.split()[0]
for entry in m.get_section('outputs'):
name = entry.get('name')
if name == spec_name:
subpackages.append(' '.join((name, m.version())))
is_subpackage = True
if not is_subpackage:
dependencies.append(spec)
# fill in variant version iff no version at all is provided
for key, value in variant.items():
# for sake of comparison, ignore dashes and underscores
if (dash_or_under.sub("", key) == dash_or_under.sub("", spec_name) and
not re.search(r'%s\s+[0-9a-zA-Z\_\.\<\>\=\*]' % spec_name, spec)):
dependencies.append(" ".join((spec_name, value)))
elif exclude_pattern.match(spec):
pass_through_deps.append(spec)
return subpackages, dependencies, pass_through_deps
def get_env_dependencies(m, env, variant, exclude_pattern=None,
permit_unsatisfiable_variants=False,
merge_build_host_on_same_platform=True):
specs = m.get_depends_top_and_out(env)
# replace x.x with our variant's numpy version, or else conda tries to literally go get x.x
if env in ('build', 'host'):
no_xx_specs = []
for spec in specs:
if ' x.x' in spec:
pkg_name = spec.split()[0]
no_xx_specs.append(' '.join((pkg_name, variant.get(pkg_name, ""))))
else:
no_xx_specs.append(spec)
specs = no_xx_specs
subpackages, dependencies, pass_through_deps = _categorize_deps(m, specs, exclude_pattern, variant)
dependencies = set(dependencies)
unsat = None
random_string = ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(10))
with TemporaryDirectory(prefix="_", suffix=random_string) as tmpdir:
try:
actions = environ.get_install_actions(tmpdir, tuple(dependencies), env,
subdir=getattr(m.config, '{}_subdir'.format(env)),
debug=m.config.debug,
verbose=m.config.verbose,
locking=m.config.locking,
bldpkgs_dirs=tuple(m.config.bldpkgs_dirs),
timeout=m.config.timeout,
disable_pip=m.config.disable_pip,
max_env_retry=m.config.max_env_retry,
output_folder=m.config.output_folder,
channel_urls=tuple(m.config.channel_urls))
except (UnsatisfiableError, DependencyNeedsBuildingError) as e:
# we'll get here if the environment is unsatisfiable
if hasattr(e, 'packages'):
unsat = ', '.join(e.packages)
else:
unsat = e.message
if permit_unsatisfiable_variants:
actions = {}
else:
raise
specs = actions_to_pins(actions)
return (utils.ensure_list((specs + subpackages + pass_through_deps) or
m.meta.get('requirements', {}).get(env, [])),
actions, unsat)
def strip_channel(spec_str):
if hasattr(spec_str, 'decode'):
spec_str = spec_str.decode()
if ':' in spec_str:
spec_str = spec_str.split("::")[-1]
return spec_str
def get_pin_from_build(m, dep, build_dep_versions):
dep_split = dep.split()
dep_name = dep_split[0]
build = ''
if len(dep_split) >= 3:
build = dep_split[2]
pin = None
version = build_dep_versions.get(dep_name) or m.config.variant.get(dep_name)
if (version and dep_name in m.config.variant.get('pin_run_as_build', {}) and
not (dep_name == 'python' and (m.noarch or m.noarch_python)) and
dep_name in build_dep_versions):
pin_cfg = m.config.variant['pin_run_as_build'][dep_name]
if isinstance(pin_cfg, str):
# if pin arg is a single 'x.x', use the same value for min and max
pin_cfg = dict(min_pin=pin_cfg, max_pin=pin_cfg)
pin = utils.apply_pin_expressions(version.split()[0], **pin_cfg)
elif dep.startswith('numpy') and 'x.x' in dep:
if not build_dep_versions.get(dep_name):
raise ValueError("numpy x.x specified, but numpy not in build requirements.")
pin = utils.apply_pin_expressions(version.split()[0], min_pin='x.x', max_pin='x.x')
if pin:
dep = " ".join((dep_name, pin, build)).strip()
return dep
def _filter_run_exports(specs, ignore_list):
filtered_specs = {}
for agent, specs_list in specs.items():
for spec in specs_list:
if hasattr(spec, 'decode'):
spec = spec.decode()
if not any((ignore_spec == '*' or spec == ignore_spec or
spec.startswith(ignore_spec + ' ')) for ignore_spec in ignore_list):
filtered_specs[agent] = filtered_specs.get(agent, []) + [spec]
return filtered_specs
def find_pkg_dir_or_file_in_pkgs_dirs(pkg_dist, m, files_only=False):
_pkgs_dirs = pkgs_dirs + list(m.config.bldpkgs_dirs)
pkg_loc = None
for pkgs_dir in _pkgs_dirs:
pkg_dir = os.path.join(pkgs_dir, pkg_dist)
pkg_file = os.path.join(pkgs_dir, pkg_dist + CONDA_TARBALL_EXTENSIONS[0])
if not files_only and os.path.isdir(pkg_dir):
pkg_loc = pkg_dir
break
elif os.path.isfile(pkg_file):
pkg_loc = pkg_file
break
elif files_only and os.path.isdir(pkg_dir):
pkg_loc = pkg_file
# create the tarball on demand. This is so that testing on archives works.
with tarfile.open(pkg_file, 'w:bz2') as archive:
for entry in os.listdir(pkg_dir):
archive.add(os.path.join(pkg_dir, entry), arcname=entry)
pkg_subdir = os.path.join(m.config.croot, m.config.host_subdir)
pkg_loc = os.path.join(pkg_subdir, os.path.basename(pkg_file))
shutil.move(pkg_file, pkg_loc)
return pkg_loc
@memoized
def _read_specs_from_package(pkg_loc, pkg_dist):
specs = {}
if pkg_loc and os.path.isdir(pkg_loc):
downstream_file = os.path.join(pkg_loc, 'info/run_exports')
if os.path.isfile(downstream_file):
with open(downstream_file) as f:
specs = {'weak': [spec.rstrip() for spec in f.readlines()]}
# a later attempt: record more info in the yaml file, to support "strong" run exports
elif os.path.isfile(downstream_file + '.yaml'):
with open(downstream_file + '.yaml') as f:
specs = yaml.safe_load(f)
elif os.path.isfile(downstream_file + '.json'):
with open(downstream_file + '.json') as f:
specs = json.load(f)
if not specs and pkg_loc and os.path.isfile(pkg_loc):
# switching to json for consistency in conda-build 4
specs_yaml = utils.package_has_file(pkg_loc, 'info/run_exports.yaml')
specs_json = utils.package_has_file(pkg_loc, 'info/run_exports.json')
if hasattr(specs_json, "decode"):
specs_json = specs_json.decode("utf-8")
if specs_json:
specs = json.loads(specs_json)
elif specs_yaml:
specs = yaml.safe_load(specs_yaml)
else:
legacy_specs = utils.package_has_file(pkg_loc, 'info/run_exports')
# exclude packages pinning themselves (makes no sense)
if legacy_specs:
weak_specs = set()
if hasattr(pkg_dist, "decode"):
pkg_dist = pkg_dist.decode("utf-8")
for spec in legacy_specs.splitlines():
if hasattr(spec, "decode"):
spec = spec.decode("utf-8")
if not spec.startswith(pkg_dist.rsplit('-', 2)[0]):
weak_specs.add(spec.rstrip())
specs = {'weak': sorted(list(weak_specs))}
return specs
def execute_download_actions(m, actions, env, package_subset=None, require_files=False):
index, _, _ = get_build_index(getattr(m.config, '{}_subdir'.format(env)), bldpkgs_dir=m.config.bldpkgs_dir,
output_folder=m.config.output_folder, channel_urls=m.config.channel_urls,
debug=m.config.debug, verbose=m.config.verbose, locking=m.config.locking,
timeout=m.config.timeout)
# this should be just downloading packages. We don't need to extract them -
# we read contents directly
if 'FETCH' in actions or 'EXTRACT' in actions:
# this is to force the download
execute_actions(actions, index, verbose=m.config.debug)
pkg_files = {}
packages = actions.get('LINK', [])
package_subset = utils.ensure_list(package_subset)
selected_packages = set()
if package_subset:
for pkg in package_subset:
if hasattr(pkg, 'name'):
if pkg in packages:
selected_packages.add(pkg)
else:
pkg_name = pkg.split()[0]
for link_pkg in packages:
if pkg_name == link_pkg.name:
selected_packages.add(link_pkg)
break
packages = selected_packages
for pkg in packages:
if hasattr(pkg, 'dist_name'):
pkg_dist = pkg.dist_name
else:
pkg = strip_channel(pkg)
pkg_dist = pkg.split(' ')[0]
pkg_loc = find_pkg_dir_or_file_in_pkgs_dirs(pkg_dist, m, files_only=require_files)
# ran through all pkgs_dirs, and did not find package or folder. Download it.
# TODO: this is a vile hack reaching into conda's internals. Replace with
# proper conda API when available.
if not pkg_loc and conda_43:
try:
pkg_record = [_ for _ in index if _.dist_name == pkg_dist][0]
# the conda 4.4 API uses a single `link_prefs` kwarg
# whereas conda 4.3 used `index` and `link_dists` kwargs
pfe = ProgressiveFetchExtract(link_prefs=(index[pkg_record],))
except TypeError:
# TypeError: __init__() got an unexpected keyword argument 'link_prefs'
pfe = ProgressiveFetchExtract(link_dists=[pkg], index=index)
with utils.LoggingContext():
pfe.execute()
for pkg_dir in pkgs_dirs:
_loc = os.path.join(pkg_dir, index[pkg].fn)
if os.path.isfile(_loc):
pkg_loc = _loc
break
pkg_files[pkg] = pkg_loc, pkg_dist
return pkg_files
def get_upstream_pins(m, actions, env):
"""Download packages from specs, then inspect each downloaded package for additional
downstream dependency specs. Return these additional specs."""
env_specs = m.meta.get('requirements', {}).get(env, [])
explicit_specs = [req.split(' ')[0] for req in env_specs] if env_specs else []
linked_packages = actions.get('LINK', [])
linked_packages = [pkg for pkg in linked_packages if pkg.name in explicit_specs]
ignore_list = utils.ensure_list(m.get_value('build/ignore_run_exports'))
additional_specs = {}
for pkg in linked_packages:
channeldata = utils.download_channeldata(pkg.channel)
run_exports = channeldata.get('packages', {}).get(pkg.name, {}).get('run_exports', {}).get(pkg.version, {})
specs = _filter_run_exports(run_exports, ignore_list)
if specs:
additional_specs = utils.merge_dicts_of_lists(additional_specs, specs)
return additional_specs
def _read_upstream_pin_files(m, env, permit_unsatisfiable_variants, exclude_pattern):
deps, actions, unsat = get_env_dependencies(m, env, m.config.variant,
exclude_pattern,
permit_unsatisfiable_variants=permit_unsatisfiable_variants)
# extend host deps with strong build run exports. This is important for things like
# vc feature activation to work correctly in the host env.
extra_run_specs = get_upstream_pins(m, actions, env)
return list(set(deps)) or m.meta.get('requirements', {}).get(env, []), unsat, extra_run_specs
def add_upstream_pins(m, permit_unsatisfiable_variants, exclude_pattern):
"""Applies run_exports from any build deps to host and run sections"""
# if we have host deps, they're more important than the build deps.
requirements = m.meta.get('requirements', {})
build_deps, build_unsat, extra_run_specs_from_build = _read_upstream_pin_files(m, 'build',
permit_unsatisfiable_variants, exclude_pattern)
# is there a 'host' section?
if m.is_cross:
# this must come before we read upstream pins, because it will enforce things
# like vc version from the compiler.
host_reqs = utils.ensure_list(m.get_value('requirements/host'))
# ensure host_reqs is present, so in-place modification below is actually in-place
requirements = m.meta.setdefault('requirements', {})
requirements['host'] = host_reqs
if not host_reqs:
matching_output = [out for out in m.meta.get('outputs', []) if
out.get('name') == m.name()]
if matching_output:
requirements = utils.expand_reqs(matching_output[0].get('requirements', {}))
matching_output[0]['requirements'] = requirements
host_reqs = requirements.setdefault('host', [])
# in-place modification of above thingie
host_reqs.extend(extra_run_specs_from_build.get('strong', []))
host_deps, host_unsat, extra_run_specs_from_host = _read_upstream_pin_files(m, 'host',
permit_unsatisfiable_variants, exclude_pattern)
extra_run_specs = set(extra_run_specs_from_host.get('strong', []) +
extra_run_specs_from_host.get('weak', []) +
extra_run_specs_from_build.get('strong', []))
else:
host_deps = []
host_unsat = []
extra_run_specs = set(extra_run_specs_from_build.get('strong', []))
if m.build_is_host:
extra_run_specs.update(extra_run_specs_from_build.get('weak', []))
build_deps = set(build_deps or []).update(extra_run_specs_from_build.get('weak', []))
else:
host_deps = set(extra_run_specs_from_build.get('strong', []))
run_deps = extra_run_specs | set(utils.ensure_list(requirements.get('run')))
for section, deps in (('build', build_deps), ('host', host_deps), ('run', run_deps)):
if deps:
requirements[section] = list(deps)
m.meta['requirements'] = requirements
return build_unsat, host_unsat
def _simplify_to_exact_constraints(metadata):
"""
For metapackages that are pinned exactly, we want to bypass all dependencies that may
be less exact.
"""
requirements = metadata.meta.get('requirements', {})
# collect deps on a per-section basis
for section in 'build', 'host', 'run':
deps = utils.ensure_list(requirements.get(section, []))
deps_dict = defaultdict(list)
for dep in deps:
spec_parts = utils.ensure_valid_spec(dep).split()
name = spec_parts[0]
if len(spec_parts) > 1:
deps_dict[name].append(spec_parts[1:])
else:
deps_dict[name].append([])
deps_list = []
for name, values in deps_dict.items():
exact_pins = []
for dep in values:
if len(dep) > 1:
version, build = dep[:2]
if not (any(c in version for c in ('>', '<', '*')) or '*' in build):
exact_pins.append(dep)
if len(values) == 1 and not any(values):
deps_list.append(name)
elif exact_pins:
if not all(pin == exact_pins[0] for pin in exact_pins):
raise ValueError("Conflicting exact pins: {}".format(exact_pins))
else:
deps_list.append(' '.join([name] + exact_pins[0]))
else:
deps_list.extend(' '.join([name] + dep) for dep in values if dep)
if section in requirements and deps_list:
requirements[section] = deps_list
metadata.meta['requirements'] = requirements
def finalize_metadata(m, parent_metadata=None, permit_unsatisfiable_variants=False):
"""Fully render a recipe. Fill in versions for build/host dependencies."""
if not parent_metadata:
parent_metadata = m
if m.skip():
m.final = True
else:
exclude_pattern = None
excludes = set(m.config.variant.get('ignore_version', []))
for key in m.config.variant.get('pin_run_as_build', {}).keys():
if key in excludes:
excludes.remove(key)
output_excludes = set()
if hasattr(m, 'other_outputs'):
output_excludes = set(name for (name, variant) in m.other_outputs.keys())
if excludes or output_excludes:
exclude_pattern = re.compile(r'|'.join(r'(?:^{}(?:\s|$|\Z))'.format(exc)
for exc in excludes | output_excludes))
parent_recipe = m.meta.get('extra', {}).get('parent_recipe', {})
# extract the topmost section where variables are defined, and put it on top of the
# requirements for a particular output
# Re-parse the output from the original recipe, so that we re-consider any jinja2 stuff
output = parent_metadata.get_rendered_output(m.name(), variant=m.config.variant)
is_top_level = True
if output:
if 'package' in output or 'name' not in output:
# it's just a top-level recipe
output = {'name': m.name()}
else:
is_top_level = False
if not parent_recipe or parent_recipe['name'] == m.name():
combine_top_level_metadata_with_output(m, output)
requirements = utils.expand_reqs(output.get('requirements', {}))
m.meta['requirements'] = requirements
if m.meta.get('requirements'):
utils.insert_variant_versions(m.meta['requirements'],
m.config.variant, 'build')
utils.insert_variant_versions(m.meta['requirements'],
m.config.variant, 'host')
m = parent_metadata.get_output_metadata(m.get_rendered_output(m.name()))
build_unsat, host_unsat = add_upstream_pins(m,
permit_unsatisfiable_variants,
exclude_pattern)
# getting this AFTER add_upstream_pins is important, because that function adds deps
# to the metadata.
requirements = m.meta.get('requirements', {})
# here's where we pin run dependencies to their build time versions. This happens based
# on the keys in the 'pin_run_as_build' key in the variant, which is a list of package
# names to have this behavior.
if output_excludes:
exclude_pattern = re.compile(r'|'.join(r'(?:^{}(?:\s|$|\Z))'.format(exc)
for exc in output_excludes))
pinning_env = 'host' if m.is_cross else 'build'
build_reqs = requirements.get(pinning_env, [])
# if python is in the build specs, but doesn't have a specific associated
# version, make sure to add one
if build_reqs and 'python' in build_reqs:
build_reqs.append('python {}'.format(m.config.variant['python']))
m.meta['requirements'][pinning_env] = build_reqs
full_build_deps, _, _ = get_env_dependencies(m, pinning_env,
m.config.variant,
exclude_pattern=exclude_pattern,
permit_unsatisfiable_variants=permit_unsatisfiable_variants)
full_build_dep_versions = {dep.split()[0]: " ".join(dep.split()[1:])
for dep in full_build_deps}
if isfile(m.requirements_path) and not requirements.get('run'):
requirements['run'] = specs_from_url(m.requirements_path)
run_deps = requirements.get('run', [])
versioned_run_deps = [get_pin_from_build(m, dep, full_build_dep_versions)
for dep in run_deps]
versioned_run_deps = [utils.ensure_valid_spec(spec, warn=True)
for spec in versioned_run_deps]
requirements[pinning_env] = full_build_deps
requirements['run'] = versioned_run_deps
m.meta['requirements'] = requirements
# append other requirements, such as python.app, appropriately
m.append_requirements()
if m.pin_depends == 'strict':
m.meta['requirements']['run'] = environ.get_pinned_deps(
m, 'run')
test_deps = m.get_value('test/requires')
if test_deps:
versioned_test_deps = list({get_pin_from_build(m, dep, full_build_dep_versions)
for dep in test_deps})
versioned_test_deps = [utils.ensure_valid_spec(spec, warn=True)
for spec in versioned_test_deps]
m.meta['test']['requires'] = versioned_test_deps
extra = m.meta.get('extra', {})
extra['copy_test_source_files'] = m.config.copy_test_source_files
m.meta['extra'] = extra
# if source/path is relative, then the output package makes no sense at all. The next
# best thing is to hard-code the absolute path. This probably won't exist on any
# system other than the original build machine, but at least it will work there.
if m.meta.get('source'):
if 'path' in m.meta['source']:
source_path = m.meta['source']['path']
os.path.expanduser(source_path)
if not os.path.isabs(source_path):
m.meta['source']['path'] = os.path.normpath(
os.path.join(m.path, source_path))
elif ('git_url' in m.meta['source'] and not (
# absolute paths are not relative paths
os.path.isabs(m.meta['source']['git_url']) or
# real urls are not relative paths
":" in m.meta['source']['git_url'])):
m.meta['source']['git_url'] = os.path.normpath(
os.path.join(m.path, m.meta['source']['git_url']))
if not m.meta.get('build'):
m.meta['build'] = {}
_simplify_to_exact_constraints(m)
if build_unsat or host_unsat:
m.final = False
log = utils.get_logger(__name__)
log.warn("Returning non-final recipe for {}; one or more dependencies "
"was unsatisfiable:".format(m.dist()))
if build_unsat:
log.warn("Build: {}".format(build_unsat))
if host_unsat:
log.warn("Host: {}".format(host_unsat))
else:
m.final = True
if is_top_level:
parent_metadata = m
return m
def try_download(metadata, no_download_source, raise_error=False):
if not metadata.source_provided and not no_download_source:
# this try/catch is for when the tool to download source is actually in
# meta.yaml, and not previously installed in builder env.
try:
source.provide(metadata)
except subprocess.CalledProcessError as error:
print("Warning: failed to download source. If building, will try "
"again after downloading recipe dependencies.")
print("Error was: ")
print(error)
if not metadata.source_provided:
if no_download_source:
raise ValueError("no_download_source specified, but can't fully render recipe without"
" downloading source. Please fix the recipe, or don't use "
"no_download_source.")
elif raise_error:
raise RuntimeError("Failed to download or patch source. Please see build log for info.")
def reparse(metadata):
"""Some things need to be parsed again after the build environment has been created
and activated."""
metadata.final = False
sys.path.insert(0, metadata.config.build_prefix)
sys.path.insert(0, metadata.config.host_prefix)
py_ver = '.'.join(metadata.config.variant['python'].split('.')[:2])
sys.path.insert(0, utils.get_site_packages(metadata.config.host_prefix, py_ver))
metadata.parse_until_resolved()
metadata = finalize_metadata(metadata)
return metadata
def distribute_variants(metadata, variants, permit_unsatisfiable_variants=False,
allow_no_other_outputs=False, bypass_env_check=False):
rendered_metadata = {}
need_source_download = True
# don't bother distributing python if it's a noarch package
if metadata.noarch or metadata.noarch_python:
variants = filter_by_key_value(variants, 'python', variants[0]['python'],
'noarch_reduction')
# store these for reference later
metadata.config.variants = variants
# These are always the full set. just 'variants' is the one that gets
# used mostly, and can be reduced
metadata.config.input_variants = variants
recipe_requirements = metadata.extract_requirements_text()
recipe_package_and_build_text = metadata.extract_package_and_build_text()
recipe_text = recipe_package_and_build_text + recipe_requirements
if PY3 and hasattr(recipe_text, 'decode'):
recipe_text = recipe_text.decode()
elif not PY3 and hasattr(recipe_text, 'encode'):
recipe_text = recipe_text.encode()
metadata.config.variant = variants[0]
used_variables = metadata.get_used_loop_vars(force_global=False)
top_loop = metadata.get_reduced_variant_set(used_variables)
for variant in top_loop:
mv = metadata.copy()
mv.config.variant = variant
pin_run_as_build = variant.get('pin_run_as_build', {})
if mv.numpy_xx and 'numpy' not in pin_run_as_build:
pin_run_as_build['numpy'] = {'min_pin': 'x.x', 'max_pin': 'x.x'}
conform_dict = {}
for key in used_variables:
# We use this variant in the top-level recipe.
# constrain the stored variants to only this version in the output
# variant mapping
conform_dict[key] = variant[key]
for key, values in conform_dict.items():
mv.config.variants = (filter_by_key_value(mv.config.variants, key, values,
'distribute_variants_reduction') or
mv.config.variants)
pin_run_as_build = variant.get('pin_run_as_build', {})
if mv.numpy_xx and 'numpy' not in pin_run_as_build:
pin_run_as_build['numpy'] = {'min_pin': 'x.x', 'max_pin': 'x.x'}
numpy_pinned_variants = []
for _variant in mv.config.variants:
_variant['pin_run_as_build'] = pin_run_as_build
numpy_pinned_variants.append(_variant)
mv.config.variants = numpy_pinned_variants
mv.config.squished_variants = list_of_dicts_to_dict_of_lists(mv.config.variants)
if mv.needs_source_for_render and mv.variant_in_source:
mv.parse_again()
utils.rm_rf(mv.config.work_dir)
source.provide(mv)
mv.parse_again()
try:
mv.parse_until_resolved(allow_no_other_outputs=allow_no_other_outputs,
bypass_env_check=bypass_env_check)
except SystemExit:
pass
need_source_download = (not mv.needs_source_for_render or not mv.source_provided)
rendered_metadata[(mv.dist(),
mv.config.variant.get('target_platform', mv.config.subdir),
tuple((var, mv.config.variant.get(var))
for var in mv.get_used_vars()))] = \
(mv, need_source_download, None)
# list of tuples.
# each tuple item is a tuple of 3 items:
# metadata, need_download, need_reparse_in_env
return list(rendered_metadata.values())
def expand_outputs(metadata_tuples):
"""Obtain all metadata objects for all outputs from recipe. Useful for outputting paths."""
expanded_outputs = OrderedDict()
for (_m, download, reparse) in metadata_tuples:
for (output_dict, m) in _m.copy().get_output_metadata_set(permit_unsatisfiable_variants=False):
expanded_outputs[m.dist()] = (output_dict, m)
return list(expanded_outputs.values())
def render_recipe(recipe_path, config, no_download_source=False, variants=None,
permit_unsatisfiable_variants=True, reset_build_id=True, bypass_env_check=False):
"""Returns a list of tuples, each consisting of
(metadata-object, needs_download, needs_render_in_env)
You get one tuple per variant. Outputs are not factored in here (subpackages won't affect these
results returned here.)
"""
arg = recipe_path
# Don't use byte literals for paths in Python 2
if not PY3:
arg = arg.decode(getpreferredencoding() or 'utf-8')
if isfile(arg):
if arg.endswith(('.tar', '.tar.gz', '.tgz', '.tar.bz2')):
recipe_dir = tempfile.mkdtemp()
t = tarfile.open(arg, 'r:*')
t.extractall(path=recipe_dir)
t.close()
need_cleanup = True
elif arg.endswith('.yaml'):
recipe_dir = os.path.dirname(arg)
need_cleanup = False
else:
print("Ignoring non-recipe: %s" % arg)
return None, None
else:
recipe_dir = abspath(arg)
need_cleanup = False
if not isdir(recipe_dir):
sys.exit("Error: no such directory: %s" % recipe_dir)
try:
m = MetaData(recipe_dir, config=config)
except exceptions.YamlParsingError as e:
sys.stderr.write(e.error_msg())
sys.exit(1)
rendered_metadata = {}
# important: set build id *before* downloading source. Otherwise source goes into a different
# build folder.
if config.set_build_id:
m.config.compute_build_id(m.name(), reset=reset_build_id)
# this source may go into a folder that doesn't match the eventual build folder.
# There's no way around it AFAICT. We must download the source to be able to render
# the recipe (from anything like GIT_FULL_HASH), but we can't know the final build
# folder until rendering is complete, because package names can have variant jinja2 in them.
if m.needs_source_for_render and not m.source_provided:
try_download(m, no_download_source=no_download_source)
if m.final:
if not hasattr(m.config, 'variants') or not m.config.variant:
m.config.ignore_system_variants = True
if os.path.isfile(os.path.join(m.path, 'conda_build_config.yaml')):
m.config.variant_config_files = [os.path.join(m.path, 'conda_build_config.yaml')]
m.config.variants = get_package_variants(m, variants=variants)
m.config.variant = m.config.variants[0]
rendered_metadata = [(m, False, False), ]
else:
# merge any passed-in variants with any files found
variants = get_package_variants(m, variants=variants)
# when building, we don't want to fully expand all outputs into metadata, only expand
# whatever variants we have (i.e. expand top-level variants, not output-only variants)
rendered_metadata = distribute_variants(m, variants,
permit_unsatisfiable_variants=permit_unsatisfiable_variants,
allow_no_other_outputs=True, bypass_env_check=bypass_env_check)
if need_cleanup:
utils.rm_rf(recipe_dir)
return rendered_metadata
# Keep this out of the function below so it can be imported by other modules.
FIELDS = ["package", "source", "build", "requirements", "test", "app", "outputs", "about", "extra"]
# Next bit of stuff is to support YAML output in the order we expect.
# http://stackoverflow.com/a/17310199/1170370
class _MetaYaml(dict):
fields = FIELDS
def to_omap(self):
return [(field, self[field]) for field in _MetaYaml.fields if field in self]
def _represent_omap(dumper, data):
return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.to_omap())
def _unicode_representer(dumper, uni):
node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
return node
class _IndentDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super(_IndentDumper, self).increase_indent(flow, False)
def ignore_aliases(self, data):
return True
yaml.add_representer(_MetaYaml, _represent_omap)
if PY3:
yaml.add_representer(str, _unicode_representer)
unicode = None # silence pyflakes about unicode not existing in py3
else:
yaml.add_representer(unicode, _unicode_representer)
def output_yaml(metadata, filename=None, suppress_outputs=False):
local_metadata = metadata.copy()
utils.trim_empty_keys(local_metadata.meta)
if suppress_outputs and local_metadata.is_output and 'outputs' in local_metadata.meta:
del local_metadata.meta['outputs']
output = yaml.dump(_MetaYaml(local_metadata.meta), Dumper=_IndentDumper,
default_flow_style=False, indent=4)
if filename:
if any(sep in filename for sep in ('\\', '/')):
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
with open(filename, "w") as f:
f.write(output)
return "Wrote yaml to %s" % filename
else:
return output
| 43.912744 | 115 | 0.616189 |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict, defaultdict
from locale import getpreferredencoding
import json
import os
from os.path import isdir, isfile, abspath
import random
import re
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
import yaml
from .conda_interface import (PY3, UnsatisfiableError, ProgressiveFetchExtract,
TemporaryDirectory)
from .conda_interface import execute_actions
from .conda_interface import pkgs_dirs
from .conda_interface import conda_43
from .conda_interface import specs_from_url
from .conda_interface import memoized
from conda_build import exceptions, utils, environ
from conda_build.metadata import MetaData, combine_top_level_metadata_with_output
import conda_build.source as source
from conda_build.variants import (get_package_variants, list_of_dicts_to_dict_of_lists,
filter_by_key_value)
from conda_build.exceptions import DependencyNeedsBuildingError
from conda_build.index import get_build_index
try:
from conda.base.constants import CONDA_TARBALL_EXTENSIONS
except Exception:
from conda.base.constants import CONDA_TARBALL_EXTENSION
CONDA_TARBALL_EXTENSIONS = (CONDA_TARBALL_EXTENSION,)
def odict_representer(dumper, data):
return dumper.represent_dict(data.items())
yaml.add_representer(set, yaml.representer.SafeRepresenter.represent_list)
yaml.add_representer(tuple, yaml.representer.SafeRepresenter.represent_list)
yaml.add_representer(OrderedDict, odict_representer)
def bldpkg_path(m):
subdir = 'noarch' if m.noarch or m.noarch_python else m.config.host_subdir
if not hasattr(m, 'type'):
if m.config.conda_pkg_format == "2":
pkg_type = "conda_v2"
else:
pkg_type = "conda"
else:
pkg_type = m.type
if pkg_type == "conda":
path = os.path.join(m.config.output_folder, subdir, '%s%s' % (m.dist(), CONDA_TARBALL_EXTENSIONS[0]))
elif pkg_type == "conda_v2":
path = os.path.join(m.config.output_folder, subdir, '%s%s' % (m.dist(), '.conda'))
else:
path = '{} file for {} in: {}'.format(m.type, m.name(), os.path.join(m.config.output_folder, subdir))
return path
def actions_to_pins(actions):
specs = []
if conda_43:
spec_name = lambda x: x.dist_name
else:
spec_name = lambda x: str(x)
if 'LINK' in actions:
specs = [' '.join(spec_name(spec).split()[0].rsplit('-', 2)) for spec in actions['LINK']]
return specs
def _categorize_deps(m, specs, exclude_pattern, variant):
subpackages = []
dependencies = []
pass_through_deps = []
dash_or_under = re.compile("[-_]")
for spec in specs:
if not exclude_pattern or not exclude_pattern.match(spec):
is_subpackage = False
spec_name = spec.split()[0]
for entry in m.get_section('outputs'):
name = entry.get('name')
if name == spec_name:
subpackages.append(' '.join((name, m.version())))
is_subpackage = True
if not is_subpackage:
dependencies.append(spec)
for key, value in variant.items():
if (dash_or_under.sub("", key) == dash_or_under.sub("", spec_name) and
not re.search(r'%s\s+[0-9a-zA-Z\_\.\<\>\=\*]' % spec_name, spec)):
dependencies.append(" ".join((spec_name, value)))
elif exclude_pattern.match(spec):
pass_through_deps.append(spec)
return subpackages, dependencies, pass_through_deps
def get_env_dependencies(m, env, variant, exclude_pattern=None,
permit_unsatisfiable_variants=False,
merge_build_host_on_same_platform=True):
specs = m.get_depends_top_and_out(env)
if env in ('build', 'host'):
no_xx_specs = []
for spec in specs:
if ' x.x' in spec:
pkg_name = spec.split()[0]
no_xx_specs.append(' '.join((pkg_name, variant.get(pkg_name, ""))))
else:
no_xx_specs.append(spec)
specs = no_xx_specs
subpackages, dependencies, pass_through_deps = _categorize_deps(m, specs, exclude_pattern, variant)
dependencies = set(dependencies)
unsat = None
random_string = ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(10))
with TemporaryDirectory(prefix="_", suffix=random_string) as tmpdir:
try:
actions = environ.get_install_actions(tmpdir, tuple(dependencies), env,
subdir=getattr(m.config, '{}_subdir'.format(env)),
debug=m.config.debug,
verbose=m.config.verbose,
locking=m.config.locking,
bldpkgs_dirs=tuple(m.config.bldpkgs_dirs),
timeout=m.config.timeout,
disable_pip=m.config.disable_pip,
max_env_retry=m.config.max_env_retry,
output_folder=m.config.output_folder,
channel_urls=tuple(m.config.channel_urls))
except (UnsatisfiableError, DependencyNeedsBuildingError) as e:
# we'll get here if the environment is unsatisfiable
if hasattr(e, 'packages'):
unsat = ', '.join(e.packages)
else:
unsat = e.message
if permit_unsatisfiable_variants:
actions = {}
else:
raise
specs = actions_to_pins(actions)
return (utils.ensure_list((specs + subpackages + pass_through_deps) or
m.meta.get('requirements', {}).get(env, [])),
actions, unsat)
def strip_channel(spec_str):
if hasattr(spec_str, 'decode'):
spec_str = spec_str.decode()
if ':' in spec_str:
spec_str = spec_str.split("::")[-1]
return spec_str
def get_pin_from_build(m, dep, build_dep_versions):
dep_split = dep.split()
dep_name = dep_split[0]
build = ''
if len(dep_split) >= 3:
build = dep_split[2]
pin = None
version = build_dep_versions.get(dep_name) or m.config.variant.get(dep_name)
if (version and dep_name in m.config.variant.get('pin_run_as_build', {}) and
not (dep_name == 'python' and (m.noarch or m.noarch_python)) and
dep_name in build_dep_versions):
pin_cfg = m.config.variant['pin_run_as_build'][dep_name]
if isinstance(pin_cfg, str):
pin_cfg = dict(min_pin=pin_cfg, max_pin=pin_cfg)
pin = utils.apply_pin_expressions(version.split()[0], **pin_cfg)
elif dep.startswith('numpy') and 'x.x' in dep:
if not build_dep_versions.get(dep_name):
raise ValueError("numpy x.x specified, but numpy not in build requirements.")
pin = utils.apply_pin_expressions(version.split()[0], min_pin='x.x', max_pin='x.x')
if pin:
dep = " ".join((dep_name, pin, build)).strip()
return dep
def _filter_run_exports(specs, ignore_list):
filtered_specs = {}
for agent, specs_list in specs.items():
for spec in specs_list:
if hasattr(spec, 'decode'):
spec = spec.decode()
if not any((ignore_spec == '*' or spec == ignore_spec or
spec.startswith(ignore_spec + ' ')) for ignore_spec in ignore_list):
filtered_specs[agent] = filtered_specs.get(agent, []) + [spec]
return filtered_specs
def find_pkg_dir_or_file_in_pkgs_dirs(pkg_dist, m, files_only=False):
_pkgs_dirs = pkgs_dirs + list(m.config.bldpkgs_dirs)
pkg_loc = None
for pkgs_dir in _pkgs_dirs:
pkg_dir = os.path.join(pkgs_dir, pkg_dist)
pkg_file = os.path.join(pkgs_dir, pkg_dist + CONDA_TARBALL_EXTENSIONS[0])
if not files_only and os.path.isdir(pkg_dir):
pkg_loc = pkg_dir
break
elif os.path.isfile(pkg_file):
pkg_loc = pkg_file
break
elif files_only and os.path.isdir(pkg_dir):
pkg_loc = pkg_file
with tarfile.open(pkg_file, 'w:bz2') as archive:
for entry in os.listdir(pkg_dir):
archive.add(os.path.join(pkg_dir, entry), arcname=entry)
pkg_subdir = os.path.join(m.config.croot, m.config.host_subdir)
pkg_loc = os.path.join(pkg_subdir, os.path.basename(pkg_file))
shutil.move(pkg_file, pkg_loc)
return pkg_loc
@memoized
def _read_specs_from_package(pkg_loc, pkg_dist):
specs = {}
if pkg_loc and os.path.isdir(pkg_loc):
downstream_file = os.path.join(pkg_loc, 'info/run_exports')
if os.path.isfile(downstream_file):
with open(downstream_file) as f:
specs = {'weak': [spec.rstrip() for spec in f.readlines()]}
elif os.path.isfile(downstream_file + '.yaml'):
with open(downstream_file + '.yaml') as f:
specs = yaml.safe_load(f)
elif os.path.isfile(downstream_file + '.json'):
with open(downstream_file + '.json') as f:
specs = json.load(f)
if not specs and pkg_loc and os.path.isfile(pkg_loc):
specs_yaml = utils.package_has_file(pkg_loc, 'info/run_exports.yaml')
specs_json = utils.package_has_file(pkg_loc, 'info/run_exports.json')
if hasattr(specs_json, "decode"):
specs_json = specs_json.decode("utf-8")
if specs_json:
specs = json.loads(specs_json)
elif specs_yaml:
specs = yaml.safe_load(specs_yaml)
else:
legacy_specs = utils.package_has_file(pkg_loc, 'info/run_exports')
if legacy_specs:
weak_specs = set()
if hasattr(pkg_dist, "decode"):
pkg_dist = pkg_dist.decode("utf-8")
for spec in legacy_specs.splitlines():
if hasattr(spec, "decode"):
spec = spec.decode("utf-8")
if not spec.startswith(pkg_dist.rsplit('-', 2)[0]):
weak_specs.add(spec.rstrip())
specs = {'weak': sorted(list(weak_specs))}
return specs
def execute_download_actions(m, actions, env, package_subset=None, require_files=False):
index, _, _ = get_build_index(getattr(m.config, '{}_subdir'.format(env)), bldpkgs_dir=m.config.bldpkgs_dir,
output_folder=m.config.output_folder, channel_urls=m.config.channel_urls,
debug=m.config.debug, verbose=m.config.verbose, locking=m.config.locking,
timeout=m.config.timeout)
# we read contents directly
if 'FETCH' in actions or 'EXTRACT' in actions:
# this is to force the download
execute_actions(actions, index, verbose=m.config.debug)
pkg_files = {}
packages = actions.get('LINK', [])
package_subset = utils.ensure_list(package_subset)
selected_packages = set()
if package_subset:
for pkg in package_subset:
if hasattr(pkg, 'name'):
if pkg in packages:
selected_packages.add(pkg)
else:
pkg_name = pkg.split()[0]
for link_pkg in packages:
if pkg_name == link_pkg.name:
selected_packages.add(link_pkg)
break
packages = selected_packages
for pkg in packages:
if hasattr(pkg, 'dist_name'):
pkg_dist = pkg.dist_name
else:
pkg = strip_channel(pkg)
pkg_dist = pkg.split(' ')[0]
pkg_loc = find_pkg_dir_or_file_in_pkgs_dirs(pkg_dist, m, files_only=require_files)
# ran through all pkgs_dirs, and did not find package or folder. Download it.
# TODO: this is a vile hack reaching into conda's internals. Replace with
if not pkg_loc and conda_43:
try:
pkg_record = [_ for _ in index if _.dist_name == pkg_dist][0]
pfe = ProgressiveFetchExtract(link_prefs=(index[pkg_record],))
except TypeError:
pfe = ProgressiveFetchExtract(link_dists=[pkg], index=index)
with utils.LoggingContext():
pfe.execute()
for pkg_dir in pkgs_dirs:
_loc = os.path.join(pkg_dir, index[pkg].fn)
if os.path.isfile(_loc):
pkg_loc = _loc
break
pkg_files[pkg] = pkg_loc, pkg_dist
return pkg_files
def get_upstream_pins(m, actions, env):
env_specs = m.meta.get('requirements', {}).get(env, [])
explicit_specs = [req.split(' ')[0] for req in env_specs] if env_specs else []
linked_packages = actions.get('LINK', [])
linked_packages = [pkg for pkg in linked_packages if pkg.name in explicit_specs]
ignore_list = utils.ensure_list(m.get_value('build/ignore_run_exports'))
additional_specs = {}
for pkg in linked_packages:
channeldata = utils.download_channeldata(pkg.channel)
run_exports = channeldata.get('packages', {}).get(pkg.name, {}).get('run_exports', {}).get(pkg.version, {})
specs = _filter_run_exports(run_exports, ignore_list)
if specs:
additional_specs = utils.merge_dicts_of_lists(additional_specs, specs)
return additional_specs
def _read_upstream_pin_files(m, env, permit_unsatisfiable_variants, exclude_pattern):
deps, actions, unsat = get_env_dependencies(m, env, m.config.variant,
exclude_pattern,
permit_unsatisfiable_variants=permit_unsatisfiable_variants)
extra_run_specs = get_upstream_pins(m, actions, env)
return list(set(deps)) or m.meta.get('requirements', {}).get(env, []), unsat, extra_run_specs
def add_upstream_pins(m, permit_unsatisfiable_variants, exclude_pattern):
requirements = m.meta.get('requirements', {})
build_deps, build_unsat, extra_run_specs_from_build = _read_upstream_pin_files(m, 'build',
permit_unsatisfiable_variants, exclude_pattern)
# is there a 'host' section?
if m.is_cross:
# this must come before we read upstream pins, because it will enforce things
# like vc version from the compiler.
host_reqs = utils.ensure_list(m.get_value('requirements/host'))
# ensure host_reqs is present, so in-place modification below is actually in-place
requirements = m.meta.setdefault('requirements', {})
requirements['host'] = host_reqs
if not host_reqs:
matching_output = [out for out in m.meta.get('outputs', []) if
out.get('name') == m.name()]
if matching_output:
requirements = utils.expand_reqs(matching_output[0].get('requirements', {}))
matching_output[0]['requirements'] = requirements
host_reqs = requirements.setdefault('host', [])
# in-place modification of above thingie
host_reqs.extend(extra_run_specs_from_build.get('strong', []))
host_deps, host_unsat, extra_run_specs_from_host = _read_upstream_pin_files(m, 'host',
permit_unsatisfiable_variants, exclude_pattern)
extra_run_specs = set(extra_run_specs_from_host.get('strong', []) +
extra_run_specs_from_host.get('weak', []) +
extra_run_specs_from_build.get('strong', []))
else:
host_deps = []
host_unsat = []
extra_run_specs = set(extra_run_specs_from_build.get('strong', []))
if m.build_is_host:
extra_run_specs.update(extra_run_specs_from_build.get('weak', []))
build_deps = set(build_deps or []).update(extra_run_specs_from_build.get('weak', []))
else:
host_deps = set(extra_run_specs_from_build.get('strong', []))
run_deps = extra_run_specs | set(utils.ensure_list(requirements.get('run')))
for section, deps in (('build', build_deps), ('host', host_deps), ('run', run_deps)):
if deps:
requirements[section] = list(deps)
m.meta['requirements'] = requirements
return build_unsat, host_unsat
def _simplify_to_exact_constraints(metadata):
requirements = metadata.meta.get('requirements', {})
# collect deps on a per-section basis
for section in 'build', 'host', 'run':
deps = utils.ensure_list(requirements.get(section, []))
deps_dict = defaultdict(list)
for dep in deps:
spec_parts = utils.ensure_valid_spec(dep).split()
name = spec_parts[0]
if len(spec_parts) > 1:
deps_dict[name].append(spec_parts[1:])
else:
deps_dict[name].append([])
deps_list = []
for name, values in deps_dict.items():
exact_pins = []
for dep in values:
if len(dep) > 1:
version, build = dep[:2]
if not (any(c in version for c in ('>', '<', '*')) or '*' in build):
exact_pins.append(dep)
if len(values) == 1 and not any(values):
deps_list.append(name)
elif exact_pins:
if not all(pin == exact_pins[0] for pin in exact_pins):
raise ValueError("Conflicting exact pins: {}".format(exact_pins))
else:
deps_list.append(' '.join([name] + exact_pins[0]))
else:
deps_list.extend(' '.join([name] + dep) for dep in values if dep)
if section in requirements and deps_list:
requirements[section] = deps_list
metadata.meta['requirements'] = requirements
def finalize_metadata(m, parent_metadata=None, permit_unsatisfiable_variants=False):
if not parent_metadata:
parent_metadata = m
if m.skip():
m.final = True
else:
exclude_pattern = None
excludes = set(m.config.variant.get('ignore_version', []))
for key in m.config.variant.get('pin_run_as_build', {}).keys():
if key in excludes:
excludes.remove(key)
output_excludes = set()
if hasattr(m, 'other_outputs'):
output_excludes = set(name for (name, variant) in m.other_outputs.keys())
if excludes or output_excludes:
exclude_pattern = re.compile(r'|'.join(r'(?:^{}(?:\s|$|\Z))'.format(exc)
for exc in excludes | output_excludes))
parent_recipe = m.meta.get('extra', {}).get('parent_recipe', {})
# extract the topmost section where variables are defined, and put it on top of the
# requirements for a particular output
# Re-parse the output from the original recipe, so that we re-consider any jinja2 stuff
output = parent_metadata.get_rendered_output(m.name(), variant=m.config.variant)
is_top_level = True
if output:
if 'package' in output or 'name' not in output:
# it's just a top-level recipe
output = {'name': m.name()}
else:
is_top_level = False
if not parent_recipe or parent_recipe['name'] == m.name():
combine_top_level_metadata_with_output(m, output)
requirements = utils.expand_reqs(output.get('requirements', {}))
m.meta['requirements'] = requirements
if m.meta.get('requirements'):
utils.insert_variant_versions(m.meta['requirements'],
m.config.variant, 'build')
utils.insert_variant_versions(m.meta['requirements'],
m.config.variant, 'host')
m = parent_metadata.get_output_metadata(m.get_rendered_output(m.name()))
build_unsat, host_unsat = add_upstream_pins(m,
permit_unsatisfiable_variants,
exclude_pattern)
requirements = m.meta.get('requirements', {})
# on the keys in the 'pin_run_as_build' key in the variant, which is a list of package
# names to have this behavior.
if output_excludes:
exclude_pattern = re.compile(r'|'.join(r'(?:^{}(?:\s|$|\Z))'.format(exc)
for exc in output_excludes))
pinning_env = 'host' if m.is_cross else 'build'
build_reqs = requirements.get(pinning_env, [])
# if python is in the build specs, but doesn't have a specific associated
if build_reqs and 'python' in build_reqs:
build_reqs.append('python {}'.format(m.config.variant['python']))
m.meta['requirements'][pinning_env] = build_reqs
full_build_deps, _, _ = get_env_dependencies(m, pinning_env,
m.config.variant,
exclude_pattern=exclude_pattern,
permit_unsatisfiable_variants=permit_unsatisfiable_variants)
full_build_dep_versions = {dep.split()[0]: " ".join(dep.split()[1:])
for dep in full_build_deps}
if isfile(m.requirements_path) and not requirements.get('run'):
requirements['run'] = specs_from_url(m.requirements_path)
run_deps = requirements.get('run', [])
versioned_run_deps = [get_pin_from_build(m, dep, full_build_dep_versions)
for dep in run_deps]
versioned_run_deps = [utils.ensure_valid_spec(spec, warn=True)
for spec in versioned_run_deps]
requirements[pinning_env] = full_build_deps
requirements['run'] = versioned_run_deps
m.meta['requirements'] = requirements
m.append_requirements()
if m.pin_depends == 'strict':
m.meta['requirements']['run'] = environ.get_pinned_deps(
m, 'run')
test_deps = m.get_value('test/requires')
if test_deps:
versioned_test_deps = list({get_pin_from_build(m, dep, full_build_dep_versions)
for dep in test_deps})
versioned_test_deps = [utils.ensure_valid_spec(spec, warn=True)
for spec in versioned_test_deps]
m.meta['test']['requires'] = versioned_test_deps
extra = m.meta.get('extra', {})
extra['copy_test_source_files'] = m.config.copy_test_source_files
m.meta['extra'] = extra
# system other than the original build machine, but at least it will work there.
if m.meta.get('source'):
if 'path' in m.meta['source']:
source_path = m.meta['source']['path']
os.path.expanduser(source_path)
if not os.path.isabs(source_path):
m.meta['source']['path'] = os.path.normpath(
os.path.join(m.path, source_path))
elif ('git_url' in m.meta['source'] and not (
# absolute paths are not relative paths
os.path.isabs(m.meta['source']['git_url']) or
# real urls are not relative paths
":" in m.meta['source']['git_url'])):
m.meta['source']['git_url'] = os.path.normpath(
os.path.join(m.path, m.meta['source']['git_url']))
if not m.meta.get('build'):
m.meta['build'] = {}
_simplify_to_exact_constraints(m)
if build_unsat or host_unsat:
m.final = False
log = utils.get_logger(__name__)
log.warn("Returning non-final recipe for {}; one or more dependencies "
"was unsatisfiable:".format(m.dist()))
if build_unsat:
log.warn("Build: {}".format(build_unsat))
if host_unsat:
log.warn("Host: {}".format(host_unsat))
else:
m.final = True
if is_top_level:
parent_metadata = m
return m
def try_download(metadata, no_download_source, raise_error=False):
if not metadata.source_provided and not no_download_source:
# this try/catch is for when the tool to download source is actually in
# meta.yaml, and not previously installed in builder env.
try:
source.provide(metadata)
except subprocess.CalledProcessError as error:
print("Warning: failed to download source. If building, will try "
"again after downloading recipe dependencies.")
print("Error was: ")
print(error)
if not metadata.source_provided:
if no_download_source:
raise ValueError("no_download_source specified, but can't fully render recipe without"
" downloading source. Please fix the recipe, or don't use "
"no_download_source.")
elif raise_error:
raise RuntimeError("Failed to download or patch source. Please see build log for info.")
def reparse(metadata):
metadata.final = False
sys.path.insert(0, metadata.config.build_prefix)
sys.path.insert(0, metadata.config.host_prefix)
py_ver = '.'.join(metadata.config.variant['python'].split('.')[:2])
sys.path.insert(0, utils.get_site_packages(metadata.config.host_prefix, py_ver))
metadata.parse_until_resolved()
metadata = finalize_metadata(metadata)
return metadata
def distribute_variants(metadata, variants, permit_unsatisfiable_variants=False,
allow_no_other_outputs=False, bypass_env_check=False):
rendered_metadata = {}
need_source_download = True
# don't bother distributing python if it's a noarch package
if metadata.noarch or metadata.noarch_python:
variants = filter_by_key_value(variants, 'python', variants[0]['python'],
'noarch_reduction')
# store these for reference later
metadata.config.variants = variants
# These are always the full set. just 'variants' is the one that gets
# used mostly, and can be reduced
metadata.config.input_variants = variants
recipe_requirements = metadata.extract_requirements_text()
recipe_package_and_build_text = metadata.extract_package_and_build_text()
recipe_text = recipe_package_and_build_text + recipe_requirements
if PY3 and hasattr(recipe_text, 'decode'):
recipe_text = recipe_text.decode()
elif not PY3 and hasattr(recipe_text, 'encode'):
recipe_text = recipe_text.encode()
metadata.config.variant = variants[0]
used_variables = metadata.get_used_loop_vars(force_global=False)
top_loop = metadata.get_reduced_variant_set(used_variables)
for variant in top_loop:
mv = metadata.copy()
mv.config.variant = variant
pin_run_as_build = variant.get('pin_run_as_build', {})
if mv.numpy_xx and 'numpy' not in pin_run_as_build:
pin_run_as_build['numpy'] = {'min_pin': 'x.x', 'max_pin': 'x.x'}
conform_dict = {}
for key in used_variables:
# We use this variant in the top-level recipe.
# constrain the stored variants to only this version in the output
# variant mapping
conform_dict[key] = variant[key]
for key, values in conform_dict.items():
mv.config.variants = (filter_by_key_value(mv.config.variants, key, values,
'distribute_variants_reduction') or
mv.config.variants)
pin_run_as_build = variant.get('pin_run_as_build', {})
if mv.numpy_xx and 'numpy' not in pin_run_as_build:
pin_run_as_build['numpy'] = {'min_pin': 'x.x', 'max_pin': 'x.x'}
numpy_pinned_variants = []
for _variant in mv.config.variants:
_variant['pin_run_as_build'] = pin_run_as_build
numpy_pinned_variants.append(_variant)
mv.config.variants = numpy_pinned_variants
mv.config.squished_variants = list_of_dicts_to_dict_of_lists(mv.config.variants)
if mv.needs_source_for_render and mv.variant_in_source:
mv.parse_again()
utils.rm_rf(mv.config.work_dir)
source.provide(mv)
mv.parse_again()
try:
mv.parse_until_resolved(allow_no_other_outputs=allow_no_other_outputs,
bypass_env_check=bypass_env_check)
except SystemExit:
pass
need_source_download = (not mv.needs_source_for_render or not mv.source_provided)
rendered_metadata[(mv.dist(),
mv.config.variant.get('target_platform', mv.config.subdir),
tuple((var, mv.config.variant.get(var))
for var in mv.get_used_vars()))] = \
(mv, need_source_download, None)
# list of tuples.
# each tuple item is a tuple of 3 items:
# metadata, need_download, need_reparse_in_env
return list(rendered_metadata.values())
def expand_outputs(metadata_tuples):
expanded_outputs = OrderedDict()
for (_m, download, reparse) in metadata_tuples:
for (output_dict, m) in _m.copy().get_output_metadata_set(permit_unsatisfiable_variants=False):
expanded_outputs[m.dist()] = (output_dict, m)
return list(expanded_outputs.values())
def render_recipe(recipe_path, config, no_download_source=False, variants=None,
permit_unsatisfiable_variants=True, reset_build_id=True, bypass_env_check=False):
arg = recipe_path
# Don't use byte literals for paths in Python 2
if not PY3:
arg = arg.decode(getpreferredencoding() or 'utf-8')
if isfile(arg):
if arg.endswith(('.tar', '.tar.gz', '.tgz', '.tar.bz2')):
recipe_dir = tempfile.mkdtemp()
t = tarfile.open(arg, 'r:*')
t.extractall(path=recipe_dir)
t.close()
need_cleanup = True
elif arg.endswith('.yaml'):
recipe_dir = os.path.dirname(arg)
need_cleanup = False
else:
print("Ignoring non-recipe: %s" % arg)
return None, None
else:
recipe_dir = abspath(arg)
need_cleanup = False
if not isdir(recipe_dir):
sys.exit("Error: no such directory: %s" % recipe_dir)
try:
m = MetaData(recipe_dir, config=config)
except exceptions.YamlParsingError as e:
sys.stderr.write(e.error_msg())
sys.exit(1)
rendered_metadata = {}
if config.set_build_id:
m.config.compute_build_id(m.name(), reset=reset_build_id)
# There's no way around it AFAICT. We must download the source to be able to render
# folder until rendering is complete, because package names can have variant jinja2 in them.
if m.needs_source_for_render and not m.source_provided:
try_download(m, no_download_source=no_download_source)
if m.final:
if not hasattr(m.config, 'variants') or not m.config.variant:
m.config.ignore_system_variants = True
if os.path.isfile(os.path.join(m.path, 'conda_build_config.yaml')):
m.config.variant_config_files = [os.path.join(m.path, 'conda_build_config.yaml')]
m.config.variants = get_package_variants(m, variants=variants)
m.config.variant = m.config.variants[0]
rendered_metadata = [(m, False, False), ]
else:
# merge any passed-in variants with any files found
variants = get_package_variants(m, variants=variants)
# when building, we don't want to fully expand all outputs into metadata, only expand
rendered_metadata = distribute_variants(m, variants,
permit_unsatisfiable_variants=permit_unsatisfiable_variants,
allow_no_other_outputs=True, bypass_env_check=bypass_env_check)
if need_cleanup:
utils.rm_rf(recipe_dir)
return rendered_metadata
FIELDS = ["package", "source", "build", "requirements", "test", "app", "outputs", "about", "extra"]
class _MetaYaml(dict):
fields = FIELDS
def to_omap(self):
return [(field, self[field]) for field in _MetaYaml.fields if field in self]
def _represent_omap(dumper, data):
return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.to_omap())
def _unicode_representer(dumper, uni):
node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
return node
class _IndentDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super(_IndentDumper, self).increase_indent(flow, False)
def ignore_aliases(self, data):
return True
yaml.add_representer(_MetaYaml, _represent_omap)
if PY3:
yaml.add_representer(str, _unicode_representer)
unicode = None
else:
yaml.add_representer(unicode, _unicode_representer)
def output_yaml(metadata, filename=None, suppress_outputs=False):
local_metadata = metadata.copy()
utils.trim_empty_keys(local_metadata.meta)
if suppress_outputs and local_metadata.is_output and 'outputs' in local_metadata.meta:
del local_metadata.meta['outputs']
output = yaml.dump(_MetaYaml(local_metadata.meta), Dumper=_IndentDumper,
default_flow_style=False, indent=4)
if filename:
if any(sep in filename for sep in ('\\', '/')):
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
with open(filename, "w") as f:
f.write(output)
return "Wrote yaml to %s" % filename
else:
return output
| true | true |
f71f9e495e3652fb8077b2a05bb9db9df47c5f74 | 8,148 | py | Python | addon-sdk-1.17/python-lib/cuddlefish/xpi.py | hankduan/firefoxExtension | a5fd86ef024a5ed21e039eb2f4b50fb6d0cf3567 | [
"MIT"
] | 102 | 2015-01-09T22:12:00.000Z | 2021-04-21T01:18:51.000Z | addon-sdk-1.17/python-lib/cuddlefish/xpi.py | hankduan/firefoxExtension | a5fd86ef024a5ed21e039eb2f4b50fb6d0cf3567 | [
"MIT"
] | 17 | 2015-01-24T22:30:47.000Z | 2020-11-19T01:13:32.000Z | addon-sdk-1.17/python-lib/cuddlefish/xpi.py | hankduan/firefoxExtension | a5fd86ef024a5ed21e039eb2f4b50fb6d0cf3567 | [
"MIT"
] | 33 | 2015-01-15T16:11:15.000Z | 2021-06-11T12:15:29.000Z | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import zipfile
import simplejson as json
from cuddlefish.util import filter_filenames, filter_dirnames
class HarnessOptionAlreadyDefinedError(Exception):
"""You cannot use --harness-option on keys that already exist in
harness-options.json"""
ZIPSEP = "/" # always use "/" in zipfiles
def make_zipfile_path(localroot, localpath):
return ZIPSEP.join(localpath[len(localroot)+1:].split(os.sep))
def mkzipdir(zf, path):
dirinfo = zipfile.ZipInfo(path)
dirinfo.external_attr = int("040755", 8) << 16L
zf.writestr(dirinfo, "")
def build_xpi(template_root_dir, manifest, xpi_path,
harness_options, limit_to=None, extra_harness_options={},
bundle_sdk=True, pkgdir=""):
IGNORED_FILES = [".hgignore", ".DS_Store", "install.rdf",
"application.ini", xpi_path]
files_to_copy = {} # maps zipfile path to local-disk abspath
dirs_to_create = set() # zipfile paths, no trailing slash
zf = zipfile.ZipFile(xpi_path, "w", zipfile.ZIP_DEFLATED)
open('.install.rdf', 'w').write(str(manifest))
zf.write('.install.rdf', 'install.rdf')
os.remove('.install.rdf')
# Handle add-on icon
if 'icon' in harness_options:
zf.write(str(harness_options['icon']), 'icon.png')
del harness_options['icon']
if 'icon64' in harness_options:
zf.write(str(harness_options['icon64']), 'icon64.png')
del harness_options['icon64']
# chrome.manifest
if os.path.isfile(os.path.join(pkgdir, 'chrome.manifest')):
files_to_copy['chrome.manifest'] = os.path.join(pkgdir, 'chrome.manifest')
# chrome folder (would contain content, skin, and locale folders typically)
folder = 'chrome'
if os.path.exists(os.path.join(pkgdir, folder)):
dirs_to_create.add('chrome')
# cp -r folder
abs_dirname = os.path.join(pkgdir, folder)
for dirpath, dirnames, filenames in os.walk(abs_dirname):
goodfiles = list(filter_filenames(filenames, IGNORED_FILES))
dirnames[:] = filter_dirnames(dirnames)
for dirname in dirnames:
arcpath = make_zipfile_path(template_root_dir,
os.path.join(dirpath, dirname))
dirs_to_create.add(arcpath)
for filename in goodfiles:
abspath = os.path.join(dirpath, filename)
arcpath = ZIPSEP.join(
[folder,
make_zipfile_path(abs_dirname, os.path.join(dirpath, filename)),
])
files_to_copy[str(arcpath)] = str(abspath)
# Handle simple-prefs
if 'preferences' in harness_options:
from options_xul import parse_options, validate_prefs
validate_prefs(harness_options["preferences"])
opts_xul = parse_options(harness_options["preferences"],
harness_options["jetpackID"],
harness_options["preferencesBranch"])
open('.options.xul', 'wb').write(opts_xul.encode("utf-8"))
zf.write('.options.xul', 'options.xul')
os.remove('.options.xul')
from options_defaults import parse_options_defaults
prefs_js = parse_options_defaults(harness_options["preferences"],
harness_options["preferencesBranch"])
open('.prefs.js', 'wb').write(prefs_js.encode("utf-8"))
else:
open('.prefs.js', 'wb').write("")
zf.write('.prefs.js', 'defaults/preferences/prefs.js')
os.remove('.prefs.js')
for dirpath, dirnames, filenames in os.walk(template_root_dir):
filenames = list(filter_filenames(filenames, IGNORED_FILES))
dirnames[:] = filter_dirnames(dirnames)
for dirname in dirnames:
arcpath = make_zipfile_path(template_root_dir,
os.path.join(dirpath, dirname))
dirs_to_create.add(arcpath)
for filename in filenames:
abspath = os.path.join(dirpath, filename)
arcpath = make_zipfile_path(template_root_dir, abspath)
files_to_copy[arcpath] = abspath
# `packages` attribute contains a dictionnary of dictionnary
# of all packages sections directories
for packageName in harness_options['packages']:
base_arcpath = ZIPSEP.join(['resources', packageName])
# Eventually strip sdk files. We need to do that in addition to the
# whilelist as the whitelist is only used for `cfx xpi`:
if not bundle_sdk and packageName == 'addon-sdk':
continue
# Always write the top directory, even if it contains no files, since
# the harness will try to access it.
dirs_to_create.add(base_arcpath)
for sectionName in harness_options['packages'][packageName]:
abs_dirname = harness_options['packages'][packageName][sectionName]
base_arcpath = ZIPSEP.join(['resources', packageName, sectionName])
# Always write the top directory, even if it contains no files, since
# the harness will try to access it.
dirs_to_create.add(base_arcpath)
# cp -r stuff from abs_dirname/ into ZIP/resources/RESOURCEBASE/
for dirpath, dirnames, filenames in os.walk(abs_dirname):
goodfiles = list(filter_filenames(filenames, IGNORED_FILES))
dirnames[:] = filter_dirnames(dirnames)
for filename in goodfiles:
abspath = os.path.join(dirpath, filename)
if limit_to is not None and abspath not in limit_to:
continue # strip unused files
arcpath = ZIPSEP.join(
['resources',
packageName,
sectionName,
make_zipfile_path(abs_dirname,
os.path.join(dirpath, filename)),
])
files_to_copy[str(arcpath)] = str(abspath)
del harness_options['packages']
locales_json_data = {"locales": []}
mkzipdir(zf, "locale/")
for language in sorted(harness_options['locale']):
locales_json_data["locales"].append(language)
locale = harness_options['locale'][language]
# Be carefull about strings, we need to always ensure working with UTF-8
jsonStr = json.dumps(locale, indent=1, sort_keys=True, ensure_ascii=False)
info = zipfile.ZipInfo('locale/' + language + '.json')
info.external_attr = 0644 << 16L
zf.writestr(info, jsonStr.encode( "utf-8" ))
del harness_options['locale']
jsonStr = json.dumps(locales_json_data, ensure_ascii=True) +"\n"
info = zipfile.ZipInfo('locales.json')
info.external_attr = 0644 << 16L
zf.writestr(info, jsonStr.encode("utf-8"))
# now figure out which directories we need: all retained files parents
for arcpath in files_to_copy:
bits = arcpath.split("/")
for i in range(1,len(bits)):
parentpath = ZIPSEP.join(bits[0:i])
dirs_to_create.add(parentpath)
# Create zipfile in alphabetical order, with each directory before its
# files
for name in sorted(dirs_to_create.union(set(files_to_copy))):
if name in dirs_to_create:
mkzipdir(zf, name+"/")
if name in files_to_copy:
zf.write(files_to_copy[name], name)
# Add extra harness options
harness_options = harness_options.copy()
for key,value in extra_harness_options.items():
if key in harness_options:
msg = "Can't use --harness-option for existing key '%s'" % key
raise HarnessOptionAlreadyDefinedError(msg)
harness_options[key] = value
# Write harness-options.json
open('.options.json', 'w').write(json.dumps(harness_options, indent=1,
sort_keys=True))
zf.write('.options.json', 'harness-options.json')
os.remove('.options.json')
zf.close()
| 42.4375 | 83 | 0.632793 |
import os
import zipfile
import simplejson as json
from cuddlefish.util import filter_filenames, filter_dirnames
class HarnessOptionAlreadyDefinedError(Exception):
"""You cannot use --harness-option on keys that already exist in
harness-options.json"""
ZIPSEP = "/"
def make_zipfile_path(localroot, localpath):
return ZIPSEP.join(localpath[len(localroot)+1:].split(os.sep))
def mkzipdir(zf, path):
dirinfo = zipfile.ZipInfo(path)
dirinfo.external_attr = int("040755", 8) << 16L
zf.writestr(dirinfo, "")
def build_xpi(template_root_dir, manifest, xpi_path,
harness_options, limit_to=None, extra_harness_options={},
bundle_sdk=True, pkgdir=""):
IGNORED_FILES = [".hgignore", ".DS_Store", "install.rdf",
"application.ini", xpi_path]
files_to_copy = {}
dirs_to_create = set()
zf = zipfile.ZipFile(xpi_path, "w", zipfile.ZIP_DEFLATED)
open('.install.rdf', 'w').write(str(manifest))
zf.write('.install.rdf', 'install.rdf')
os.remove('.install.rdf')
if 'icon' in harness_options:
zf.write(str(harness_options['icon']), 'icon.png')
del harness_options['icon']
if 'icon64' in harness_options:
zf.write(str(harness_options['icon64']), 'icon64.png')
del harness_options['icon64']
if os.path.isfile(os.path.join(pkgdir, 'chrome.manifest')):
files_to_copy['chrome.manifest'] = os.path.join(pkgdir, 'chrome.manifest')
folder = 'chrome'
if os.path.exists(os.path.join(pkgdir, folder)):
dirs_to_create.add('chrome')
abs_dirname = os.path.join(pkgdir, folder)
for dirpath, dirnames, filenames in os.walk(abs_dirname):
goodfiles = list(filter_filenames(filenames, IGNORED_FILES))
dirnames[:] = filter_dirnames(dirnames)
for dirname in dirnames:
arcpath = make_zipfile_path(template_root_dir,
os.path.join(dirpath, dirname))
dirs_to_create.add(arcpath)
for filename in goodfiles:
abspath = os.path.join(dirpath, filename)
arcpath = ZIPSEP.join(
[folder,
make_zipfile_path(abs_dirname, os.path.join(dirpath, filename)),
])
files_to_copy[str(arcpath)] = str(abspath)
if 'preferences' in harness_options:
from options_xul import parse_options, validate_prefs
validate_prefs(harness_options["preferences"])
opts_xul = parse_options(harness_options["preferences"],
harness_options["jetpackID"],
harness_options["preferencesBranch"])
open('.options.xul', 'wb').write(opts_xul.encode("utf-8"))
zf.write('.options.xul', 'options.xul')
os.remove('.options.xul')
from options_defaults import parse_options_defaults
prefs_js = parse_options_defaults(harness_options["preferences"],
harness_options["preferencesBranch"])
open('.prefs.js', 'wb').write(prefs_js.encode("utf-8"))
else:
open('.prefs.js', 'wb').write("")
zf.write('.prefs.js', 'defaults/preferences/prefs.js')
os.remove('.prefs.js')
for dirpath, dirnames, filenames in os.walk(template_root_dir):
filenames = list(filter_filenames(filenames, IGNORED_FILES))
dirnames[:] = filter_dirnames(dirnames)
for dirname in dirnames:
arcpath = make_zipfile_path(template_root_dir,
os.path.join(dirpath, dirname))
dirs_to_create.add(arcpath)
for filename in filenames:
abspath = os.path.join(dirpath, filename)
arcpath = make_zipfile_path(template_root_dir, abspath)
files_to_copy[arcpath] = abspath
for packageName in harness_options['packages']:
base_arcpath = ZIPSEP.join(['resources', packageName])
if not bundle_sdk and packageName == 'addon-sdk':
continue
dirs_to_create.add(base_arcpath)
for sectionName in harness_options['packages'][packageName]:
abs_dirname = harness_options['packages'][packageName][sectionName]
base_arcpath = ZIPSEP.join(['resources', packageName, sectionName])
dirs_to_create.add(base_arcpath)
for dirpath, dirnames, filenames in os.walk(abs_dirname):
goodfiles = list(filter_filenames(filenames, IGNORED_FILES))
dirnames[:] = filter_dirnames(dirnames)
for filename in goodfiles:
abspath = os.path.join(dirpath, filename)
if limit_to is not None and abspath not in limit_to:
continue
arcpath = ZIPSEP.join(
['resources',
packageName,
sectionName,
make_zipfile_path(abs_dirname,
os.path.join(dirpath, filename)),
])
files_to_copy[str(arcpath)] = str(abspath)
del harness_options['packages']
locales_json_data = {"locales": []}
mkzipdir(zf, "locale/")
for language in sorted(harness_options['locale']):
locales_json_data["locales"].append(language)
locale = harness_options['locale'][language]
jsonStr = json.dumps(locale, indent=1, sort_keys=True, ensure_ascii=False)
info = zipfile.ZipInfo('locale/' + language + '.json')
info.external_attr = 0644 << 16L
zf.writestr(info, jsonStr.encode( "utf-8" ))
del harness_options['locale']
jsonStr = json.dumps(locales_json_data, ensure_ascii=True) +"\n"
info = zipfile.ZipInfo('locales.json')
info.external_attr = 0644 << 16L
zf.writestr(info, jsonStr.encode("utf-8"))
for arcpath in files_to_copy:
bits = arcpath.split("/")
for i in range(1,len(bits)):
parentpath = ZIPSEP.join(bits[0:i])
dirs_to_create.add(parentpath)
for name in sorted(dirs_to_create.union(set(files_to_copy))):
if name in dirs_to_create:
mkzipdir(zf, name+"/")
if name in files_to_copy:
zf.write(files_to_copy[name], name)
harness_options = harness_options.copy()
for key,value in extra_harness_options.items():
if key in harness_options:
msg = "Can't use --harness-option for existing key '%s'" % key
raise HarnessOptionAlreadyDefinedError(msg)
harness_options[key] = value
# Write harness-options.json
open('.options.json', 'w').write(json.dumps(harness_options, indent=1,
sort_keys=True))
zf.write('.options.json', 'harness-options.json')
os.remove('.options.json')
zf.close()
| false | true |
f71fa02523d9f3e25a04474d5b9b67ff8827679a | 346 | py | Python | .history/routes_20200723125644.py | rkustas/taskmanager | 3218b277a235c4e8d30b1d548ba28be3ab3f628f | [
"MIT"
] | null | null | null | .history/routes_20200723125644.py | rkustas/taskmanager | 3218b277a235c4e8d30b1d548ba28be3ab3f628f | [
"MIT"
] | null | null | null | .history/routes_20200723125644.py | rkustas/taskmanager | 3218b277a235c4e8d30b1d548ba28be3ab3f628f | [
"MIT"
] | null | null | null | from app import app
from flask import render_template
import forms
# Basic route
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', current_title= 'Custom Title')
@app.route('/about', methods=['GET','POST'])
def about():
form = forms.AddTaskForm()
return render_template('about.html', form=form) | 23.066667 | 71 | 0.702312 | from app import app
from flask import render_template
import forms
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html', current_title= 'Custom Title')
@app.route('/about', methods=['GET','POST'])
def about():
form = forms.AddTaskForm()
return render_template('about.html', form=form) | true | true |
f71fa109caf8f0da6ce9758d0b1182b0c641a35c | 2,136 | py | Python | python/cuml/test/test_trustworthiness.py | efajardo-nv/cuml | bc86714836284ed4752c267513e5d447e884e1c5 | [
"Apache-2.0"
] | 3 | 2019-10-17T21:46:07.000Z | 2019-10-22T20:13:55.000Z | python/cuml/test/test_trustworthiness.py | efajardo-nv/cuml | bc86714836284ed4752c267513e5d447e884e1c5 | [
"Apache-2.0"
] | 1 | 2020-02-03T22:43:57.000Z | 2020-02-29T02:32:40.000Z | python/cuml/test/test_trustworthiness.py | efajardo-nv/cuml | bc86714836284ed4752c267513e5d447e884e1c5 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018-2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from sklearn.manifold.t_sne import trustworthiness as sklearn_trustworthiness
from cuml.metrics import trustworthiness as cuml_trustworthiness
from sklearn.datasets.samples_generator import make_blobs
from umap import UMAP
import cudf
import numpy as np
@pytest.mark.parametrize('input_type', ['ndarray'])
@pytest.mark.parametrize('n_samples', [10, 100])
@pytest.mark.parametrize('n_features', [10, 100])
@pytest.mark.parametrize('n_components', [2, 8])
def test_trustworthiness(input_type, n_samples, n_features, n_components):
centers = round(n_samples*0.4)
X, y = make_blobs(n_samples=n_samples, centers=centers,
n_features=n_features)
X_embedded = \
UMAP(n_components=n_components).fit_transform(X)
X = X.astype(np.float32)
X_embedded = X_embedded.astype(np.float32)
if input_type == 'dataframe':
gdf = cudf.DataFrame()
for i in range(X.shape[1]):
gdf[str(i)] = np.asarray(X[:, i], dtype=np.float32)
gdf_embedded = cudf.DataFrame()
for i in range(X_embedded.shape[1]):
gdf_embedded[str(i)] = np.asarray(X_embedded[:, i],
dtype=np.float32)
score = cuml_trustworthiness(gdf, gdf_embedded)
else:
score = cuml_trustworthiness(X, X_embedded)
sk_score = sklearn_trustworthiness(X, X_embedded)
eps = 0.001
assert (sk_score * (1 - eps) <= score and
score <= sk_score * (1 + eps))
# assert cu_score == sk_score ideally
| 35.6 | 77 | 0.690543 |
import pytest
from sklearn.manifold.t_sne import trustworthiness as sklearn_trustworthiness
from cuml.metrics import trustworthiness as cuml_trustworthiness
from sklearn.datasets.samples_generator import make_blobs
from umap import UMAP
import cudf
import numpy as np
@pytest.mark.parametrize('input_type', ['ndarray'])
@pytest.mark.parametrize('n_samples', [10, 100])
@pytest.mark.parametrize('n_features', [10, 100])
@pytest.mark.parametrize('n_components', [2, 8])
def test_trustworthiness(input_type, n_samples, n_features, n_components):
centers = round(n_samples*0.4)
X, y = make_blobs(n_samples=n_samples, centers=centers,
n_features=n_features)
X_embedded = \
UMAP(n_components=n_components).fit_transform(X)
X = X.astype(np.float32)
X_embedded = X_embedded.astype(np.float32)
if input_type == 'dataframe':
gdf = cudf.DataFrame()
for i in range(X.shape[1]):
gdf[str(i)] = np.asarray(X[:, i], dtype=np.float32)
gdf_embedded = cudf.DataFrame()
for i in range(X_embedded.shape[1]):
gdf_embedded[str(i)] = np.asarray(X_embedded[:, i],
dtype=np.float32)
score = cuml_trustworthiness(gdf, gdf_embedded)
else:
score = cuml_trustworthiness(X, X_embedded)
sk_score = sklearn_trustworthiness(X, X_embedded)
eps = 0.001
assert (sk_score * (1 - eps) <= score and
score <= sk_score * (1 + eps))
| true | true |
f71fa140056dc835bd2625ea657d951e01d571d7 | 7,163 | py | Python | bindings/python/ensmallen_graph/datasets/networkrepository/socfbucsc68.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/networkrepository/socfbucsc68.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/networkrepository/socfbucsc68.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph socfb-UCSC68.
The graph is automatically retrieved from the NetworkRepository repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 11:50:55.897921
The undirected graph socfb-UCSC68 has 8991 nodes and 224584 unweighted
edges, of which none are self-loops. The graph is sparse as it has a density
of 0.00556 and has 7 connected components, where the component with most
nodes has 8979 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 39, the mean node degree is 49.96, and
the node degree mode is 1. The top 5 most central nodes are 2840 (degree
454), 7542 (degree 400), 4763 (degree 329), 692 (degree 323) and 2949 (degree
315).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
@article{traud2012social,
title={Social structure of {F}acebook networks},
author={Traud, Amanda L and Mucha, Peter J and Porter, Mason A},
journal={Phys. A},
month={Aug},
number={16},
pages={4165--4180},
volume={391},
year={2012}
}
@article{Traud:2011fs,
title={Comparing Community Structure to Characteristics in Online Collegiate Social Networks},
author={Traud, Amanda L and Kelsic, Eric D and Mucha, Peter J and Porter, Mason A},
journal={SIAM Rev.},
number={3},
pages={526--543},
volume={53},
year={2011}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import SocfbUcsc68
# Then load the graph
graph = SocfbUcsc68()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def SocfbUcsc68(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the socfb-UCSC68 graph.
The graph is automatically retrieved from the NetworkRepository repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of socfb-UCSC68 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-06 11:50:55.897921
The undirected graph socfb-UCSC68 has 8991 nodes and 224584 unweighted
edges, of which none are self-loops. The graph is sparse as it has a density
of 0.00556 and has 7 connected components, where the component with most
nodes has 8979 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 39, the mean node degree is 49.96, and
the node degree mode is 1. The top 5 most central nodes are 2840 (degree
454), 7542 (degree 400), 4763 (degree 329), 692 (degree 323) and 2949 (degree
315).
References
---------------------
Please cite the following if you use the data:
@inproceedings{nr,
title = {The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle = {AAAI},
url={http://networkrepository.com},
year={2015}
}
@article{traud2012social,
title={Social structure of {F}acebook networks},
author={Traud, Amanda L and Mucha, Peter J and Porter, Mason A},
journal={Phys. A},
month={Aug},
number={16},
pages={4165--4180},
volume={391},
year={2012}
}
@article{Traud:2011fs,
title={Comparing Community Structure to Characteristics in Online Collegiate Social Networks},
author={Traud, Amanda L and Kelsic, Eric D and Mucha, Peter J and Porter, Mason A},
journal={SIAM Rev.},
number={3},
pages={526--543},
volume={53},
year={2011}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.networkrepository import SocfbUcsc68
# Then load the graph
graph = SocfbUcsc68()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="SocfbUcsc68",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 31.835556 | 103 | 0.668993 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph
def SocfbUcsc68(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/networkrepository",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
return AutomaticallyRetrievedGraph(
graph_name="SocfbUcsc68",
dataset="networkrepository",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f71fa2527a6d8ee637811cf737524b29b2058b63 | 8,210 | py | Python | var/spack/repos/builtin/packages/graphviz/package.py | varioustoxins/spack | cab0e4cb240f34891a6d753f3393e512f9a99e9a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/graphviz/package.py | varioustoxins/spack | cab0e4cb240f34891a6d753f3393e512f9a99e9a | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6 | 2022-01-08T08:41:11.000Z | 2022-03-14T19:28:07.000Z | var/spack/repos/builtin/packages/graphviz/package.py | foeroyingur/spack | 5300cbbb2e569190015c72d0970d25425ea38647 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
from spack import *
from spack.operating_systems.mac_os import macos_version
MACOS_VERSION = macos_version() if sys.platform == 'darwin' else None
class Graphviz(AutotoolsPackage):
"""Graph Visualization Software"""
homepage = 'http://www.graphviz.org'
git = 'https://gitlab.com/graphviz/graphviz.git'
url = 'https://gitlab.com/graphviz/graphviz/-/archive/2.46.0/graphviz-2.46.0.tar.bz2'
version('2.49.0', sha256='b129555743bb9bfb7b63c55825da51763b2f1ee7c0eaa6234a42a61a3aff6cc9')
version('2.47.2', sha256='b5ebb00d4283c6d12cf16b2323e1820b535cc3823c8f261b783f7903b1d5b7fb')
version('2.46.0', sha256='1b11684fd5488940b45bf4624393140da6032abafae08f33dc3e986cffd55d71')
version('2.44.1', sha256='0f8f3fbeaddd474e0a270dc9bb0e247a1ae4284ae35125af4adceffae5c7ae9b')
version('2.42.4', sha256='a1ca0c4273d96bbf32fbfcbb784c8da2e38da13e7d2bbf9b24fe94ae45e79c4c')
version('2.40.1', sha256='581596aaeac5dae3f57da6ecde62ad7709a992df341e8f7c6177b41e8b1ae4f6')
version('2.38.0', sha256='c1b1e326b5d1f45b0ce91edd7acc68e80ff6be6b470008766e4d466aafc9801f', deprecated=True)
# Language bindings
language_bindings = ['java']
# Additional language bindings are nominally supported by GraphViz via SWIG
# but are untested and need the proper dependencies added:
# language_bindings += ['sharp', 'go', 'guile', 'io', 'lua', 'ocaml',
# 'perl', 'php', 'python', 'r', 'ruby', 'tcl']
for lang in language_bindings:
variant(lang, default=False,
description='Enable for optional {0} language '
'bindings'.format(lang))
# Feature variants
variant('doc', default=False,
description='Build and install graphviz documentation')
variant('expat', default=False,
description='Build with Expat support (enables HTML-like labels)')
variant('gts', default=False,
description='Build with GNU Triangulated Surface Library')
variant('ghostscript', default=False,
description='Build with Ghostscript support')
variant('gtkplus', default=False,
description='Build with GTK+ support')
variant('libgd', default=False,
description='Build with libgd support (more output formats)')
variant('pangocairo', default=False,
description='Build with pango+cairo support (more output formats)')
variant('poppler', default=False,
description='Build with poppler support (pdf formats)')
variant('qt', default=False,
description='Build with Qt support')
variant('quartz', default=(MACOS_VERSION is not None),
description='Build with Quartz and PDF support')
variant('x', default=False,
description='Use the X Window System')
patch('https://www.linuxfromscratch.org/patches/blfs/9.0/graphviz-2.40.1-qt5-1.patch',
sha256='bd532df325df811713e311d17aaeac3f5d6075ea4fd0eae8d989391e6afba930',
when='@:2.40+qt^qt@5:')
patch('https://raw.githubusercontent.com/easybuilders/easybuild-easyconfigs/master/easybuild/easyconfigs/g/Graphviz/Graphviz-2.38.0_icc_sfio.patch',
sha256='393a0a772315a89dcc970b5efd4765d22dba83493d7956303673eb89c45b949f',
level=0,
when='@:2.40%intel')
patch('https://raw.githubusercontent.com/easybuilders/easybuild-easyconfigs/master/easybuild/easyconfigs/g/Graphviz/Graphviz-2.40.1_icc_vmalloc.patch',
sha256='813e6529e79161a18b0f24a969b7de22f8417b2e942239e658b5402884541bc2',
when='@:2.40%intel')
patch('ps2pdf.patch', when='@:2.45')
patch('implicit.patch', level=0, when='@:2.44.0')
if not MACOS_VERSION:
conflicts('+quartz',
msg="Graphviz can only be build with Quartz on macOS.")
elif MACOS_VERSION >= Version('10.9'):
# Doesn't detect newer mac os systems as being new
patch('fix-quartz-darwin.patch', when='@:2.47.2')
# Language dependencies
for lang in language_bindings:
depends_on('swig', when=('+' + lang))
depends_on(lang, when=('+' + lang))
# Feature dependencies
depends_on('zlib')
depends_on('groff', type='build', when='+doc')
depends_on('ghostscript', type='build', when='+doc')
depends_on('expat', when='+expat')
depends_on('libgd', when='+libgd')
depends_on('fontconfig', when='+libgd')
depends_on('freetype', when='+libgd')
depends_on('ghostscript', when='+ghostscript')
depends_on('gtkplus', when='+gtkplus')
depends_on('gts', when='+gts')
depends_on('cairo+pdf+png+svg', when='+pangocairo')
depends_on('fontconfig', when='+pangocairo')
depends_on('freetype', when='+pangocairo')
depends_on('glib', when='+pangocairo')
depends_on('libpng', when='+pangocairo')
depends_on('pango', when='+pangocairo')
depends_on('poppler+glib', when='+poppler')
depends_on('qt', when='+qt')
depends_on('libx11', when="+x")
# Build dependencies (graphviz binaries don't include configure file)
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('bison@3.0.4:', type='build')
depends_on('flex', type='build')
depends_on('sed', type='build')
depends_on('libtool', type='build')
depends_on('pkgconfig', type='build')
# to process f-strings used in gen_version.py
depends_on('python@3.6:', when='@2.47:', type='build')
conflicts('~doc',
when='@:2.45',
msg='graphviz always builds documentation below version 2.46')
conflicts('%gcc@:5.9',
when='@2.40.1+qt ^qt@5:',
msg='graphviz-2.40.1 needs gcc-6 or greater to compile with QT5 '
'suppport')
def autoreconf(self, spec, prefix):
# We need to generate 'configure' when checking out sources from git
# If configure exists nothing needs to be done
if os.path.exists(self.configure_abs_path):
return
# Else bootstrap (disabling auto-configure with NOCONFIG)
bash = which('bash')
bash('./autogen.sh', 'NOCONFIG')
def setup_build_environment(self, env):
if '+quartz' in self.spec:
env.set('OBJC', self.compiler.cc)
@when('%clang platform=darwin')
def patch(self):
# When using Clang, replace GCC's libstdc++ with LLVM's libc++
mkdirs = ['cmd/dot', 'cmd/edgepaint', 'cmd/mingle', 'plugin/gdiplus']
filter_file(r'-lstdc\+\+', '-lc++', 'configure.ac',
*(d + '/Makefile.am' for d in mkdirs))
@when('%apple-clang')
def patch(self):
# When using Clang, replace GCC's libstdc++ with LLVM's libc++
mkdirs = ['cmd/dot', 'cmd/edgepaint', 'cmd/mingle', 'plugin/gdiplus']
filter_file(r'-lstdc\+\+', '-lc++', 'configure.ac',
*(d + '/Makefile.am' for d in mkdirs))
def configure_args(self):
spec = self.spec
args = ['--disable-silent-rules']
use_swig = False
for lang in self.language_bindings:
if '+' + lang in spec:
use_swig = True
args.append('--enable-' + lang)
args.append('--{0}-swig'.format('enable' if use_swig else 'disable'))
for var in ["expat", "gts", "ghostscript", "libgd", "pangocairo",
"poppler", "qt", "quartz", "x"]:
args += self.with_or_without(var)
for var in ["zlib", "expat", "java"]:
if '+' + var in spec:
args.append('--with-{0}includedir={1}'.format(
var, spec[var].prefix.include))
args.append('--with-{0}libdir={1}'.format(
var, spec[var].prefix.lib))
args.append('--{0}-gtk'.format(
"with" if "+gtkplus" in spec else "without"))
if spec.version >= Version('2.46'):
args.append('--{0}-man-pdfs'.format(
'enable' if '+doc' in spec else 'disable'))
return args
| 43.439153 | 155 | 0.637881 |
import os
import sys
from spack import *
from spack.operating_systems.mac_os import macos_version
MACOS_VERSION = macos_version() if sys.platform == 'darwin' else None
class Graphviz(AutotoolsPackage):
homepage = 'http://www.graphviz.org'
git = 'https://gitlab.com/graphviz/graphviz.git'
url = 'https://gitlab.com/graphviz/graphviz/-/archive/2.46.0/graphviz-2.46.0.tar.bz2'
version('2.49.0', sha256='b129555743bb9bfb7b63c55825da51763b2f1ee7c0eaa6234a42a61a3aff6cc9')
version('2.47.2', sha256='b5ebb00d4283c6d12cf16b2323e1820b535cc3823c8f261b783f7903b1d5b7fb')
version('2.46.0', sha256='1b11684fd5488940b45bf4624393140da6032abafae08f33dc3e986cffd55d71')
version('2.44.1', sha256='0f8f3fbeaddd474e0a270dc9bb0e247a1ae4284ae35125af4adceffae5c7ae9b')
version('2.42.4', sha256='a1ca0c4273d96bbf32fbfcbb784c8da2e38da13e7d2bbf9b24fe94ae45e79c4c')
version('2.40.1', sha256='581596aaeac5dae3f57da6ecde62ad7709a992df341e8f7c6177b41e8b1ae4f6')
version('2.38.0', sha256='c1b1e326b5d1f45b0ce91edd7acc68e80ff6be6b470008766e4d466aafc9801f', deprecated=True)
language_bindings = ['java']
for lang in language_bindings:
variant(lang, default=False,
description='Enable for optional {0} language '
'bindings'.format(lang))
variant('doc', default=False,
description='Build and install graphviz documentation')
variant('expat', default=False,
description='Build with Expat support (enables HTML-like labels)')
variant('gts', default=False,
description='Build with GNU Triangulated Surface Library')
variant('ghostscript', default=False,
description='Build with Ghostscript support')
variant('gtkplus', default=False,
description='Build with GTK+ support')
variant('libgd', default=False,
description='Build with libgd support (more output formats)')
variant('pangocairo', default=False,
description='Build with pango+cairo support (more output formats)')
variant('poppler', default=False,
description='Build with poppler support (pdf formats)')
variant('qt', default=False,
description='Build with Qt support')
variant('quartz', default=(MACOS_VERSION is not None),
description='Build with Quartz and PDF support')
variant('x', default=False,
description='Use the X Window System')
patch('https://www.linuxfromscratch.org/patches/blfs/9.0/graphviz-2.40.1-qt5-1.patch',
sha256='bd532df325df811713e311d17aaeac3f5d6075ea4fd0eae8d989391e6afba930',
when='@:2.40+qt^qt@5:')
patch('https://raw.githubusercontent.com/easybuilders/easybuild-easyconfigs/master/easybuild/easyconfigs/g/Graphviz/Graphviz-2.38.0_icc_sfio.patch',
sha256='393a0a772315a89dcc970b5efd4765d22dba83493d7956303673eb89c45b949f',
level=0,
when='@:2.40%intel')
patch('https://raw.githubusercontent.com/easybuilders/easybuild-easyconfigs/master/easybuild/easyconfigs/g/Graphviz/Graphviz-2.40.1_icc_vmalloc.patch',
sha256='813e6529e79161a18b0f24a969b7de22f8417b2e942239e658b5402884541bc2',
when='@:2.40%intel')
patch('ps2pdf.patch', when='@:2.45')
patch('implicit.patch', level=0, when='@:2.44.0')
if not MACOS_VERSION:
conflicts('+quartz',
msg="Graphviz can only be build with Quartz on macOS.")
elif MACOS_VERSION >= Version('10.9'):
patch('fix-quartz-darwin.patch', when='@:2.47.2')
# Language dependencies
for lang in language_bindings:
depends_on('swig', when=('+' + lang))
depends_on(lang, when=('+' + lang))
# Feature dependencies
depends_on('zlib')
depends_on('groff', type='build', when='+doc')
depends_on('ghostscript', type='build', when='+doc')
depends_on('expat', when='+expat')
depends_on('libgd', when='+libgd')
depends_on('fontconfig', when='+libgd')
depends_on('freetype', when='+libgd')
depends_on('ghostscript', when='+ghostscript')
depends_on('gtkplus', when='+gtkplus')
depends_on('gts', when='+gts')
depends_on('cairo+pdf+png+svg', when='+pangocairo')
depends_on('fontconfig', when='+pangocairo')
depends_on('freetype', when='+pangocairo')
depends_on('glib', when='+pangocairo')
depends_on('libpng', when='+pangocairo')
depends_on('pango', when='+pangocairo')
depends_on('poppler+glib', when='+poppler')
depends_on('qt', when='+qt')
depends_on('libx11', when="+x")
# Build dependencies (graphviz binaries don't include configure file)
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('bison@3.0.4:', type='build')
depends_on('flex', type='build')
depends_on('sed', type='build')
depends_on('libtool', type='build')
depends_on('pkgconfig', type='build')
depends_on('python@3.6:', when='@2.47:', type='build')
conflicts('~doc',
when='@:2.45',
msg='graphviz always builds documentation below version 2.46')
conflicts('%gcc@:5.9',
when='@2.40.1+qt ^qt@5:',
msg='graphviz-2.40.1 needs gcc-6 or greater to compile with QT5 '
'suppport')
def autoreconf(self, spec, prefix):
if os.path.exists(self.configure_abs_path):
return
bash = which('bash')
bash('./autogen.sh', 'NOCONFIG')
def setup_build_environment(self, env):
if '+quartz' in self.spec:
env.set('OBJC', self.compiler.cc)
@when('%clang platform=darwin')
def patch(self):
mkdirs = ['cmd/dot', 'cmd/edgepaint', 'cmd/mingle', 'plugin/gdiplus']
filter_file(r'-lstdc\+\+', '-lc++', 'configure.ac',
*(d + '/Makefile.am' for d in mkdirs))
@when('%apple-clang')
def patch(self):
mkdirs = ['cmd/dot', 'cmd/edgepaint', 'cmd/mingle', 'plugin/gdiplus']
filter_file(r'-lstdc\+\+', '-lc++', 'configure.ac',
*(d + '/Makefile.am' for d in mkdirs))
def configure_args(self):
spec = self.spec
args = ['--disable-silent-rules']
use_swig = False
for lang in self.language_bindings:
if '+' + lang in spec:
use_swig = True
args.append('--enable-' + lang)
args.append('--{0}-swig'.format('enable' if use_swig else 'disable'))
for var in ["expat", "gts", "ghostscript", "libgd", "pangocairo",
"poppler", "qt", "quartz", "x"]:
args += self.with_or_without(var)
for var in ["zlib", "expat", "java"]:
if '+' + var in spec:
args.append('--with-{0}includedir={1}'.format(
var, spec[var].prefix.include))
args.append('--with-{0}libdir={1}'.format(
var, spec[var].prefix.lib))
args.append('--{0}-gtk'.format(
"with" if "+gtkplus" in spec else "without"))
if spec.version >= Version('2.46'):
args.append('--{0}-man-pdfs'.format(
'enable' if '+doc' in spec else 'disable'))
return args
| true | true |
f71fa28fe37d5ad73815c67dfd54a10cbdef33d0 | 84,349 | py | Python | python/src/chirpstack_api/as_pb/external/api/application_pb2_grpc.py | maxreb/chirpstack-api | c591dd556e70b384318cdf61de19c0350715d61d | [
"MIT"
] | 55 | 2019-11-05T15:46:49.000Z | 2022-03-23T14:31:33.000Z | python/src/chirpstack_api/as_pb/external/api/application_pb2_grpc.py | maxreb/chirpstack-api | c591dd556e70b384318cdf61de19c0350715d61d | [
"MIT"
] | 39 | 2019-11-08T21:03:45.000Z | 2022-03-01T12:40:36.000Z | python/src/chirpstack_api/as_pb/external/api/application_pb2_grpc.py | maxreb/chirpstack-api | c591dd556e70b384318cdf61de19c0350715d61d | [
"MIT"
] | 101 | 2019-11-22T13:59:59.000Z | 2022-03-14T09:52:46.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from chirpstack_api.as_pb.external.api import application_pb2 as chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ApplicationServiceStub(object):
"""ApplicationService is the service managing applications.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Create = channel.unary_unary(
'/api.ApplicationService/Create',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateApplicationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateApplicationResponse.FromString,
)
self.Get = channel.unary_unary(
'/api.ApplicationService/Get',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetApplicationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetApplicationResponse.FromString,
)
self.Update = channel.unary_unary(
'/api.ApplicationService/Update',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateApplicationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Delete = channel.unary_unary(
'/api.ApplicationService/Delete',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteApplicationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.List = channel.unary_unary(
'/api.ApplicationService/List',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListApplicationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListApplicationResponse.FromString,
)
self.CreateHTTPIntegration = channel.unary_unary(
'/api.ApplicationService/CreateHTTPIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateHTTPIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetHTTPIntegration = channel.unary_unary(
'/api.ApplicationService/GetHTTPIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetHTTPIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetHTTPIntegrationResponse.FromString,
)
self.UpdateHTTPIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateHTTPIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateHTTPIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteHTTPIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteHTTPIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteHTTPIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateInfluxDBIntegration = channel.unary_unary(
'/api.ApplicationService/CreateInfluxDBIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateInfluxDBIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetInfluxDBIntegration = channel.unary_unary(
'/api.ApplicationService/GetInfluxDBIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetInfluxDBIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetInfluxDBIntegrationResponse.FromString,
)
self.UpdateInfluxDBIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateInfluxDBIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateInfluxDBIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteInfluxDBIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteInfluxDBIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteInfluxDBIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateThingsBoardIntegration = channel.unary_unary(
'/api.ApplicationService/CreateThingsBoardIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateThingsBoardIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetThingsBoardIntegration = channel.unary_unary(
'/api.ApplicationService/GetThingsBoardIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetThingsBoardIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetThingsBoardIntegrationResponse.FromString,
)
self.UpdateThingsBoardIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateThingsBoardIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateThingsBoardIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteThingsBoardIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteThingsBoardIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteThingsBoardIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateMyDevicesIntegration = channel.unary_unary(
'/api.ApplicationService/CreateMyDevicesIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateMyDevicesIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetMyDevicesIntegration = channel.unary_unary(
'/api.ApplicationService/GetMyDevicesIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetMyDevicesIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetMyDevicesIntegrationResponse.FromString,
)
self.UpdateMyDevicesIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateMyDevicesIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateMyDevicesIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteMyDevicesIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteMyDevicesIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteMyDevicesIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateLoRaCloudIntegration = channel.unary_unary(
'/api.ApplicationService/CreateLoRaCloudIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateLoRaCloudIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetLoRaCloudIntegration = channel.unary_unary(
'/api.ApplicationService/GetLoRaCloudIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetLoRaCloudIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetLoRaCloudIntegrationResponse.FromString,
)
self.UpdateLoRaCloudIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateLoRaCloudIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateLoRaCloudIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteLoRaCloudIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteLoRaCloudIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteLoRaCloudIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateGCPPubSubIntegration = channel.unary_unary(
'/api.ApplicationService/CreateGCPPubSubIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateGCPPubSubIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetGCPPubSubIntegration = channel.unary_unary(
'/api.ApplicationService/GetGCPPubSubIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetGCPPubSubIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetGCPPubSubIntegrationResponse.FromString,
)
self.UpdateGCPPubSubIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateGCPPubSubIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateGCPPubSubIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteGCPPubSubIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteGCPPubSubIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteGCPPubSubIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateAWSSNSIntegration = channel.unary_unary(
'/api.ApplicationService/CreateAWSSNSIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateAWSSNSIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetAWSSNSIntegration = channel.unary_unary(
'/api.ApplicationService/GetAWSSNSIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAWSSNSIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAWSSNSIntegrationResponse.FromString,
)
self.UpdateAWSSNSIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateAWSSNSIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateAWSSNSIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteAWSSNSIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteAWSSNSIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteAWSSNSIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateAzureServiceBusIntegration = channel.unary_unary(
'/api.ApplicationService/CreateAzureServiceBusIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateAzureServiceBusIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetAzureServiceBusIntegration = channel.unary_unary(
'/api.ApplicationService/GetAzureServiceBusIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAzureServiceBusIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAzureServiceBusIntegrationResponse.FromString,
)
self.UpdateAzureServiceBusIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateAzureServiceBusIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateAzureServiceBusIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteAzureServiceBusIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteAzureServiceBusIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteAzureServiceBusIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreatePilotThingsIntegration = channel.unary_unary(
'/api.ApplicationService/CreatePilotThingsIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreatePilotThingsIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetPilotThingsIntegration = channel.unary_unary(
'/api.ApplicationService/GetPilotThingsIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetPilotThingsIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetPilotThingsIntegrationResponse.FromString,
)
self.UpdatePilotThingsIntegration = channel.unary_unary(
'/api.ApplicationService/UpdatePilotThingsIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdatePilotThingsIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeletePilotThingsIntegration = channel.unary_unary(
'/api.ApplicationService/DeletePilotThingsIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeletePilotThingsIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListIntegrations = channel.unary_unary(
'/api.ApplicationService/ListIntegrations',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListIntegrationResponse.FromString,
)
self.GenerateMQTTIntegrationClientCertificate = channel.unary_unary(
'/api.ApplicationService/GenerateMQTTIntegrationClientCertificate',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GenerateMQTTIntegrationClientCertificateRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GenerateMQTTIntegrationClientCertificateResponse.FromString,
)
class ApplicationServiceServicer(object):
"""ApplicationService is the service managing applications.
"""
def Create(self, request, context):
"""Create creates the given application.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""Get returns the requested application.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Update updates the given application.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Delete deletes the given application.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""List lists the available applications.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateHTTPIntegration(self, request, context):
"""CreateHTTPIntegration creates a HTTP application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetHTTPIntegration(self, request, context):
"""GetHTTPIntegration returns the HTTP application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateHTTPIntegration(self, request, context):
"""UpdateHTTPIntegration updates the HTTP application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteHTTPIntegration(self, request, context):
"""DeleteIntegration deletes the HTTP application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateInfluxDBIntegration(self, request, context):
"""CreateInfluxDBIntegration create an InfluxDB application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetInfluxDBIntegration(self, request, context):
"""GetInfluxDBIntegration returns the InfluxDB application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateInfluxDBIntegration(self, request, context):
"""UpdateInfluxDBIntegration updates the InfluxDB application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteInfluxDBIntegration(self, request, context):
"""DeleteInfluxDBIntegration deletes the InfluxDB application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateThingsBoardIntegration(self, request, context):
"""CreateThingsBoardIntegration creates a ThingsBoard application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetThingsBoardIntegration(self, request, context):
"""GetThingsBoardIntegration returns the ThingsBoard application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateThingsBoardIntegration(self, request, context):
"""UpdateThingsBoardIntegration updates the ThingsBoard application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteThingsBoardIntegration(self, request, context):
"""DeleteThingsBoardIntegration deletes the ThingsBoard application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateMyDevicesIntegration(self, request, context):
"""CreateMyDevicesIntegration creates a MyDevices application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetMyDevicesIntegration(self, request, context):
"""GetMyDevicesIntegration returns the MyDevices application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateMyDevicesIntegration(self, request, context):
"""UpdateMyDevicesIntegration updates the MyDevices application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteMyDevicesIntegration(self, request, context):
"""DeleteMyDevicesIntegration deletes the MyDevices application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateLoRaCloudIntegration(self, request, context):
"""CreateLoRaCloudIntegration creates A LoRaCloud application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLoRaCloudIntegration(self, request, context):
"""GetLoRaCloudIntegration returns the LoRaCloud application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateLoRaCloudIntegration(self, request, context):
"""UpdateLoRaCloudIntegration updates the LoRaCloud application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteLoRaCloudIntegration(self, request, context):
"""DeleteLoRaCloudIntegration deletes the LoRaCloud application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateGCPPubSubIntegration(self, request, context):
"""CreateGCPPubSubIntegration creates a GCP PubSub application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetGCPPubSubIntegration(self, request, context):
"""GetGCPPubSubIntegration returns the GCP PubSub application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateGCPPubSubIntegration(self, request, context):
"""UpdateGCPPubSubIntegration updates the GCP PubSub application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteGCPPubSubIntegration(self, request, context):
"""DeleteGCPPubSubIntegration deletes the GCP PubSub application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateAWSSNSIntegration(self, request, context):
"""CreateAWSSNSIntegration creates a AWS SNS application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAWSSNSIntegration(self, request, context):
"""GetAWSSNSIntegration returns the AWS SNS application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateAWSSNSIntegration(self, request, context):
"""UpdateAWSSNSIntegration updates the AWS SNS application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteAWSSNSIntegration(self, request, context):
"""DeleteAWSSNSIntegration deletes the AWS SNS application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateAzureServiceBusIntegration(self, request, context):
"""CreateAzureServiceBusIntegration creates an Azure Service-Bus application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAzureServiceBusIntegration(self, request, context):
"""GetAzureServiceBusIntegration returns the Azure Service-Bus application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateAzureServiceBusIntegration(self, request, context):
"""UpdateAzureServiceBusIntegration updates the Azure Service-Bus application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteAzureServiceBusIntegration(self, request, context):
"""DeleteAzureServiceBusIntegration deletes the Azure Service-Bus application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreatePilotThingsIntegration(self, request, context):
"""CreatePilotThingsIntegration creates an Pilot Things application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPilotThingsIntegration(self, request, context):
"""GetPilotThingsIntegration returns the Pilot Things application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdatePilotThingsIntegration(self, request, context):
"""UpdatePilotThingsIntegration updates the Pilot Things application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeletePilotThingsIntegration(self, request, context):
"""DeletePilotThingsIntegration deletes the Pilot Things application-integration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListIntegrations(self, request, context):
"""ListIntegrations lists all configured integrations.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GenerateMQTTIntegrationClientCertificate(self, request, context):
"""GenerateMQTTIntegrationClientCertificate generates an application ID specific TLS certificate
to connect to the MQTT broker.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ApplicationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateApplicationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateApplicationResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetApplicationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetApplicationResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateApplicationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteApplicationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListApplicationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListApplicationResponse.SerializeToString,
),
'CreateHTTPIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateHTTPIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateHTTPIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetHTTPIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetHTTPIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetHTTPIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetHTTPIntegrationResponse.SerializeToString,
),
'UpdateHTTPIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateHTTPIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateHTTPIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteHTTPIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteHTTPIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteHTTPIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateInfluxDBIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateInfluxDBIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateInfluxDBIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetInfluxDBIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetInfluxDBIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetInfluxDBIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetInfluxDBIntegrationResponse.SerializeToString,
),
'UpdateInfluxDBIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateInfluxDBIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateInfluxDBIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteInfluxDBIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteInfluxDBIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteInfluxDBIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateThingsBoardIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateThingsBoardIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateThingsBoardIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetThingsBoardIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetThingsBoardIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetThingsBoardIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetThingsBoardIntegrationResponse.SerializeToString,
),
'UpdateThingsBoardIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateThingsBoardIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateThingsBoardIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteThingsBoardIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteThingsBoardIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteThingsBoardIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateMyDevicesIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateMyDevicesIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateMyDevicesIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetMyDevicesIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetMyDevicesIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetMyDevicesIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetMyDevicesIntegrationResponse.SerializeToString,
),
'UpdateMyDevicesIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateMyDevicesIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateMyDevicesIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteMyDevicesIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteMyDevicesIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteMyDevicesIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateLoRaCloudIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateLoRaCloudIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateLoRaCloudIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetLoRaCloudIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetLoRaCloudIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetLoRaCloudIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetLoRaCloudIntegrationResponse.SerializeToString,
),
'UpdateLoRaCloudIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateLoRaCloudIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateLoRaCloudIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteLoRaCloudIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteLoRaCloudIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteLoRaCloudIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateGCPPubSubIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateGCPPubSubIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateGCPPubSubIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetGCPPubSubIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetGCPPubSubIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetGCPPubSubIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetGCPPubSubIntegrationResponse.SerializeToString,
),
'UpdateGCPPubSubIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateGCPPubSubIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateGCPPubSubIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteGCPPubSubIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteGCPPubSubIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteGCPPubSubIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateAWSSNSIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateAWSSNSIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateAWSSNSIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetAWSSNSIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetAWSSNSIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAWSSNSIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAWSSNSIntegrationResponse.SerializeToString,
),
'UpdateAWSSNSIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateAWSSNSIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateAWSSNSIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteAWSSNSIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteAWSSNSIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteAWSSNSIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateAzureServiceBusIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateAzureServiceBusIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateAzureServiceBusIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetAzureServiceBusIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetAzureServiceBusIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAzureServiceBusIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAzureServiceBusIntegrationResponse.SerializeToString,
),
'UpdateAzureServiceBusIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateAzureServiceBusIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateAzureServiceBusIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteAzureServiceBusIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteAzureServiceBusIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteAzureServiceBusIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreatePilotThingsIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreatePilotThingsIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreatePilotThingsIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetPilotThingsIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetPilotThingsIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetPilotThingsIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetPilotThingsIntegrationResponse.SerializeToString,
),
'UpdatePilotThingsIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdatePilotThingsIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdatePilotThingsIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeletePilotThingsIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeletePilotThingsIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeletePilotThingsIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListIntegrations': grpc.unary_unary_rpc_method_handler(
servicer.ListIntegrations,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListIntegrationResponse.SerializeToString,
),
'GenerateMQTTIntegrationClientCertificate': grpc.unary_unary_rpc_method_handler(
servicer.GenerateMQTTIntegrationClientCertificate,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GenerateMQTTIntegrationClientCertificateRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GenerateMQTTIntegrationClientCertificateResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'api.ApplicationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ApplicationService(object):
"""ApplicationService is the service managing applications.
"""
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/Create',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateApplicationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateApplicationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/Get',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetApplicationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetApplicationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/Update',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateApplicationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/Delete',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteApplicationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/List',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListApplicationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListApplicationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateHTTPIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateHTTPIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateHTTPIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetHTTPIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetHTTPIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetHTTPIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetHTTPIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateHTTPIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateHTTPIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateHTTPIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteHTTPIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteHTTPIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteHTTPIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateInfluxDBIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateInfluxDBIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateInfluxDBIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetInfluxDBIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetInfluxDBIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetInfluxDBIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetInfluxDBIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateInfluxDBIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateInfluxDBIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateInfluxDBIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteInfluxDBIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteInfluxDBIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteInfluxDBIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateThingsBoardIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateThingsBoardIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateThingsBoardIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetThingsBoardIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetThingsBoardIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetThingsBoardIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetThingsBoardIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateThingsBoardIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateThingsBoardIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateThingsBoardIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteThingsBoardIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteThingsBoardIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteThingsBoardIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateMyDevicesIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateMyDevicesIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateMyDevicesIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetMyDevicesIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetMyDevicesIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetMyDevicesIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetMyDevicesIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateMyDevicesIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateMyDevicesIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateMyDevicesIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteMyDevicesIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteMyDevicesIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteMyDevicesIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateLoRaCloudIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateLoRaCloudIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateLoRaCloudIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetLoRaCloudIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetLoRaCloudIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetLoRaCloudIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetLoRaCloudIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateLoRaCloudIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateLoRaCloudIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateLoRaCloudIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteLoRaCloudIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteLoRaCloudIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteLoRaCloudIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateGCPPubSubIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateGCPPubSubIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateGCPPubSubIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetGCPPubSubIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetGCPPubSubIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetGCPPubSubIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetGCPPubSubIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateGCPPubSubIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateGCPPubSubIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateGCPPubSubIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteGCPPubSubIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteGCPPubSubIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteGCPPubSubIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateAWSSNSIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateAWSSNSIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateAWSSNSIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetAWSSNSIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetAWSSNSIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAWSSNSIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAWSSNSIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateAWSSNSIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateAWSSNSIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateAWSSNSIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteAWSSNSIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteAWSSNSIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteAWSSNSIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateAzureServiceBusIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateAzureServiceBusIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateAzureServiceBusIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetAzureServiceBusIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetAzureServiceBusIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAzureServiceBusIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAzureServiceBusIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateAzureServiceBusIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateAzureServiceBusIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateAzureServiceBusIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteAzureServiceBusIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteAzureServiceBusIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteAzureServiceBusIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreatePilotThingsIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreatePilotThingsIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreatePilotThingsIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetPilotThingsIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetPilotThingsIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetPilotThingsIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetPilotThingsIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdatePilotThingsIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdatePilotThingsIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdatePilotThingsIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeletePilotThingsIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeletePilotThingsIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeletePilotThingsIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListIntegrations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/ListIntegrations',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GenerateMQTTIntegrationClientCertificate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GenerateMQTTIntegrationClientCertificate',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GenerateMQTTIntegrationClientCertificateRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GenerateMQTTIntegrationClientCertificateResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 56.195203 | 176 | 0.719013 |
import grpc
from chirpstack_api.as_pb.external.api import application_pb2 as chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ApplicationServiceStub(object):
def __init__(self, channel):
self.Create = channel.unary_unary(
'/api.ApplicationService/Create',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateApplicationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateApplicationResponse.FromString,
)
self.Get = channel.unary_unary(
'/api.ApplicationService/Get',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetApplicationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetApplicationResponse.FromString,
)
self.Update = channel.unary_unary(
'/api.ApplicationService/Update',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateApplicationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Delete = channel.unary_unary(
'/api.ApplicationService/Delete',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteApplicationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.List = channel.unary_unary(
'/api.ApplicationService/List',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListApplicationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListApplicationResponse.FromString,
)
self.CreateHTTPIntegration = channel.unary_unary(
'/api.ApplicationService/CreateHTTPIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateHTTPIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetHTTPIntegration = channel.unary_unary(
'/api.ApplicationService/GetHTTPIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetHTTPIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetHTTPIntegrationResponse.FromString,
)
self.UpdateHTTPIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateHTTPIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateHTTPIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteHTTPIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteHTTPIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteHTTPIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateInfluxDBIntegration = channel.unary_unary(
'/api.ApplicationService/CreateInfluxDBIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateInfluxDBIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetInfluxDBIntegration = channel.unary_unary(
'/api.ApplicationService/GetInfluxDBIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetInfluxDBIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetInfluxDBIntegrationResponse.FromString,
)
self.UpdateInfluxDBIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateInfluxDBIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateInfluxDBIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteInfluxDBIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteInfluxDBIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteInfluxDBIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateThingsBoardIntegration = channel.unary_unary(
'/api.ApplicationService/CreateThingsBoardIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateThingsBoardIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetThingsBoardIntegration = channel.unary_unary(
'/api.ApplicationService/GetThingsBoardIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetThingsBoardIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetThingsBoardIntegrationResponse.FromString,
)
self.UpdateThingsBoardIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateThingsBoardIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateThingsBoardIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteThingsBoardIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteThingsBoardIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteThingsBoardIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateMyDevicesIntegration = channel.unary_unary(
'/api.ApplicationService/CreateMyDevicesIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateMyDevicesIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetMyDevicesIntegration = channel.unary_unary(
'/api.ApplicationService/GetMyDevicesIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetMyDevicesIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetMyDevicesIntegrationResponse.FromString,
)
self.UpdateMyDevicesIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateMyDevicesIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateMyDevicesIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteMyDevicesIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteMyDevicesIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteMyDevicesIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateLoRaCloudIntegration = channel.unary_unary(
'/api.ApplicationService/CreateLoRaCloudIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateLoRaCloudIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetLoRaCloudIntegration = channel.unary_unary(
'/api.ApplicationService/GetLoRaCloudIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetLoRaCloudIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetLoRaCloudIntegrationResponse.FromString,
)
self.UpdateLoRaCloudIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateLoRaCloudIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateLoRaCloudIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteLoRaCloudIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteLoRaCloudIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteLoRaCloudIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateGCPPubSubIntegration = channel.unary_unary(
'/api.ApplicationService/CreateGCPPubSubIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateGCPPubSubIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetGCPPubSubIntegration = channel.unary_unary(
'/api.ApplicationService/GetGCPPubSubIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetGCPPubSubIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetGCPPubSubIntegrationResponse.FromString,
)
self.UpdateGCPPubSubIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateGCPPubSubIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateGCPPubSubIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteGCPPubSubIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteGCPPubSubIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteGCPPubSubIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateAWSSNSIntegration = channel.unary_unary(
'/api.ApplicationService/CreateAWSSNSIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateAWSSNSIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetAWSSNSIntegration = channel.unary_unary(
'/api.ApplicationService/GetAWSSNSIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAWSSNSIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAWSSNSIntegrationResponse.FromString,
)
self.UpdateAWSSNSIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateAWSSNSIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateAWSSNSIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteAWSSNSIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteAWSSNSIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteAWSSNSIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreateAzureServiceBusIntegration = channel.unary_unary(
'/api.ApplicationService/CreateAzureServiceBusIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateAzureServiceBusIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetAzureServiceBusIntegration = channel.unary_unary(
'/api.ApplicationService/GetAzureServiceBusIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAzureServiceBusIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAzureServiceBusIntegrationResponse.FromString,
)
self.UpdateAzureServiceBusIntegration = channel.unary_unary(
'/api.ApplicationService/UpdateAzureServiceBusIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateAzureServiceBusIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeleteAzureServiceBusIntegration = channel.unary_unary(
'/api.ApplicationService/DeleteAzureServiceBusIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteAzureServiceBusIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CreatePilotThingsIntegration = channel.unary_unary(
'/api.ApplicationService/CreatePilotThingsIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreatePilotThingsIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetPilotThingsIntegration = channel.unary_unary(
'/api.ApplicationService/GetPilotThingsIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetPilotThingsIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetPilotThingsIntegrationResponse.FromString,
)
self.UpdatePilotThingsIntegration = channel.unary_unary(
'/api.ApplicationService/UpdatePilotThingsIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdatePilotThingsIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeletePilotThingsIntegration = channel.unary_unary(
'/api.ApplicationService/DeletePilotThingsIntegration',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeletePilotThingsIntegrationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListIntegrations = channel.unary_unary(
'/api.ApplicationService/ListIntegrations',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListIntegrationRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListIntegrationResponse.FromString,
)
self.GenerateMQTTIntegrationClientCertificate = channel.unary_unary(
'/api.ApplicationService/GenerateMQTTIntegrationClientCertificate',
request_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GenerateMQTTIntegrationClientCertificateRequest.SerializeToString,
response_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GenerateMQTTIntegrationClientCertificateResponse.FromString,
)
class ApplicationServiceServicer(object):
def Create(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateHTTPIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetHTTPIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateHTTPIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteHTTPIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateInfluxDBIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetInfluxDBIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateInfluxDBIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteInfluxDBIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateThingsBoardIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetThingsBoardIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateThingsBoardIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteThingsBoardIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateMyDevicesIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetMyDevicesIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateMyDevicesIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteMyDevicesIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateLoRaCloudIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetLoRaCloudIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateLoRaCloudIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteLoRaCloudIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateGCPPubSubIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetGCPPubSubIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateGCPPubSubIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteGCPPubSubIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateAWSSNSIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAWSSNSIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateAWSSNSIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteAWSSNSIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateAzureServiceBusIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAzureServiceBusIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateAzureServiceBusIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteAzureServiceBusIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreatePilotThingsIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPilotThingsIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdatePilotThingsIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeletePilotThingsIntegration(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListIntegrations(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GenerateMQTTIntegrationClientCertificate(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ApplicationServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateApplicationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateApplicationResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetApplicationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetApplicationResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateApplicationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteApplicationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListApplicationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListApplicationResponse.SerializeToString,
),
'CreateHTTPIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateHTTPIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateHTTPIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetHTTPIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetHTTPIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetHTTPIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetHTTPIntegrationResponse.SerializeToString,
),
'UpdateHTTPIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateHTTPIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateHTTPIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteHTTPIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteHTTPIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteHTTPIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateInfluxDBIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateInfluxDBIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateInfluxDBIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetInfluxDBIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetInfluxDBIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetInfluxDBIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetInfluxDBIntegrationResponse.SerializeToString,
),
'UpdateInfluxDBIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateInfluxDBIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateInfluxDBIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteInfluxDBIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteInfluxDBIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteInfluxDBIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateThingsBoardIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateThingsBoardIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateThingsBoardIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetThingsBoardIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetThingsBoardIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetThingsBoardIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetThingsBoardIntegrationResponse.SerializeToString,
),
'UpdateThingsBoardIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateThingsBoardIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateThingsBoardIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteThingsBoardIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteThingsBoardIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteThingsBoardIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateMyDevicesIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateMyDevicesIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateMyDevicesIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetMyDevicesIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetMyDevicesIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetMyDevicesIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetMyDevicesIntegrationResponse.SerializeToString,
),
'UpdateMyDevicesIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateMyDevicesIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateMyDevicesIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteMyDevicesIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteMyDevicesIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteMyDevicesIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateLoRaCloudIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateLoRaCloudIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateLoRaCloudIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetLoRaCloudIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetLoRaCloudIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetLoRaCloudIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetLoRaCloudIntegrationResponse.SerializeToString,
),
'UpdateLoRaCloudIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateLoRaCloudIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateLoRaCloudIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteLoRaCloudIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteLoRaCloudIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteLoRaCloudIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateGCPPubSubIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateGCPPubSubIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateGCPPubSubIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetGCPPubSubIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetGCPPubSubIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetGCPPubSubIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetGCPPubSubIntegrationResponse.SerializeToString,
),
'UpdateGCPPubSubIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateGCPPubSubIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateGCPPubSubIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteGCPPubSubIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteGCPPubSubIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteGCPPubSubIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateAWSSNSIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateAWSSNSIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateAWSSNSIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetAWSSNSIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetAWSSNSIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAWSSNSIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAWSSNSIntegrationResponse.SerializeToString,
),
'UpdateAWSSNSIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateAWSSNSIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateAWSSNSIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteAWSSNSIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteAWSSNSIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteAWSSNSIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreateAzureServiceBusIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreateAzureServiceBusIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateAzureServiceBusIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetAzureServiceBusIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetAzureServiceBusIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAzureServiceBusIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAzureServiceBusIntegrationResponse.SerializeToString,
),
'UpdateAzureServiceBusIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdateAzureServiceBusIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateAzureServiceBusIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeleteAzureServiceBusIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeleteAzureServiceBusIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteAzureServiceBusIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CreatePilotThingsIntegration': grpc.unary_unary_rpc_method_handler(
servicer.CreatePilotThingsIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreatePilotThingsIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetPilotThingsIntegration': grpc.unary_unary_rpc_method_handler(
servicer.GetPilotThingsIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetPilotThingsIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetPilotThingsIntegrationResponse.SerializeToString,
),
'UpdatePilotThingsIntegration': grpc.unary_unary_rpc_method_handler(
servicer.UpdatePilotThingsIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdatePilotThingsIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeletePilotThingsIntegration': grpc.unary_unary_rpc_method_handler(
servicer.DeletePilotThingsIntegration,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeletePilotThingsIntegrationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListIntegrations': grpc.unary_unary_rpc_method_handler(
servicer.ListIntegrations,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListIntegrationRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListIntegrationResponse.SerializeToString,
),
'GenerateMQTTIntegrationClientCertificate': grpc.unary_unary_rpc_method_handler(
servicer.GenerateMQTTIntegrationClientCertificate,
request_deserializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GenerateMQTTIntegrationClientCertificateRequest.FromString,
response_serializer=chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GenerateMQTTIntegrationClientCertificateResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'api.ApplicationService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class ApplicationService(object):
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/Create',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateApplicationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateApplicationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/Get',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetApplicationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetApplicationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/Update',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateApplicationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/Delete',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteApplicationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/List',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListApplicationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListApplicationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateHTTPIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateHTTPIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateHTTPIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetHTTPIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetHTTPIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetHTTPIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetHTTPIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateHTTPIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateHTTPIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateHTTPIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteHTTPIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteHTTPIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteHTTPIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateInfluxDBIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateInfluxDBIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateInfluxDBIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetInfluxDBIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetInfluxDBIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetInfluxDBIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetInfluxDBIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateInfluxDBIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateInfluxDBIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateInfluxDBIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteInfluxDBIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteInfluxDBIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteInfluxDBIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateThingsBoardIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateThingsBoardIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateThingsBoardIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetThingsBoardIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetThingsBoardIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetThingsBoardIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetThingsBoardIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateThingsBoardIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateThingsBoardIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateThingsBoardIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteThingsBoardIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteThingsBoardIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteThingsBoardIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateMyDevicesIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateMyDevicesIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateMyDevicesIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetMyDevicesIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetMyDevicesIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetMyDevicesIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetMyDevicesIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateMyDevicesIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateMyDevicesIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateMyDevicesIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteMyDevicesIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteMyDevicesIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteMyDevicesIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateLoRaCloudIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateLoRaCloudIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateLoRaCloudIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetLoRaCloudIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetLoRaCloudIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetLoRaCloudIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetLoRaCloudIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateLoRaCloudIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateLoRaCloudIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateLoRaCloudIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteLoRaCloudIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteLoRaCloudIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteLoRaCloudIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateGCPPubSubIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateGCPPubSubIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateGCPPubSubIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetGCPPubSubIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetGCPPubSubIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetGCPPubSubIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetGCPPubSubIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateGCPPubSubIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateGCPPubSubIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateGCPPubSubIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteGCPPubSubIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteGCPPubSubIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteGCPPubSubIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateAWSSNSIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateAWSSNSIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateAWSSNSIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetAWSSNSIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetAWSSNSIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAWSSNSIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAWSSNSIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateAWSSNSIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateAWSSNSIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateAWSSNSIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteAWSSNSIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteAWSSNSIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteAWSSNSIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreateAzureServiceBusIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreateAzureServiceBusIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreateAzureServiceBusIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetAzureServiceBusIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetAzureServiceBusIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAzureServiceBusIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetAzureServiceBusIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateAzureServiceBusIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdateAzureServiceBusIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdateAzureServiceBusIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteAzureServiceBusIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeleteAzureServiceBusIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeleteAzureServiceBusIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CreatePilotThingsIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/CreatePilotThingsIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.CreatePilotThingsIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetPilotThingsIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GetPilotThingsIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetPilotThingsIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GetPilotThingsIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdatePilotThingsIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/UpdatePilotThingsIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.UpdatePilotThingsIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeletePilotThingsIntegration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/DeletePilotThingsIntegration',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.DeletePilotThingsIntegrationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListIntegrations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/ListIntegrations',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListIntegrationRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.ListIntegrationResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GenerateMQTTIntegrationClientCertificate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/api.ApplicationService/GenerateMQTTIntegrationClientCertificate',
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GenerateMQTTIntegrationClientCertificateRequest.SerializeToString,
chirpstack__api_dot_as__pb_dot_external_dot_api_dot_application__pb2.GenerateMQTTIntegrationClientCertificateResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| true | true |
f71fa2a559498de7857b82dce82c2cf35e13d842 | 1,475 | py | Python | heterogeneous_client.py | samiul272/fed_ml_proj | 16b6ab0e8a5a5c8ca1a7c6636ec167238f63b31b | [
"MIT"
] | null | null | null | heterogeneous_client.py | samiul272/fed_ml_proj | 16b6ab0e8a5a5c8ca1a7c6636ec167238f63b31b | [
"MIT"
] | null | null | null | heterogeneous_client.py | samiul272/fed_ml_proj | 16b6ab0e8a5a5c8ca1a7c6636ec167238f63b31b | [
"MIT"
] | null | null | null | import logging
class HeterogeneousClient:
def __init__(self, client_idx, local_training_data, local_test_data, local_sample_number, args, device,
model_trainer):
self.client_idx = client_idx
self.local_training_data = local_training_data
self.local_test_data = local_test_data
self.local_sample_number = local_sample_number
logging.info("self.local_sample_number = " + str(self.local_sample_number))
self.args = args
self.device = device
self.model_trainer = model_trainer
def update_local_dataset(self, client_idx, local_training_data, local_test_data, local_sample_number):
self.client_idx = client_idx
self.local_training_data = local_training_data
self.local_test_data = local_test_data
self.local_sample_number = local_sample_number
def get_sample_number(self):
return self.local_sample_number
def train(self, w_global):
self.model_trainer.set_model_params(w_global)
self.model_trainer.train(self.local_training_data, self.device, self.args)
weights = self.model_trainer.get_model_params()
return weights
def local_test(self, b_use_test_dataset):
if b_use_test_dataset:
test_data = self.local_test_data
else:
test_data = self.local_training_data
metrics = self.model_trainer.runtest(test_data, self.device, self.args)
return metrics
| 36.875 | 107 | 0.711864 | import logging
class HeterogeneousClient:
def __init__(self, client_idx, local_training_data, local_test_data, local_sample_number, args, device,
model_trainer):
self.client_idx = client_idx
self.local_training_data = local_training_data
self.local_test_data = local_test_data
self.local_sample_number = local_sample_number
logging.info("self.local_sample_number = " + str(self.local_sample_number))
self.args = args
self.device = device
self.model_trainer = model_trainer
def update_local_dataset(self, client_idx, local_training_data, local_test_data, local_sample_number):
self.client_idx = client_idx
self.local_training_data = local_training_data
self.local_test_data = local_test_data
self.local_sample_number = local_sample_number
def get_sample_number(self):
return self.local_sample_number
def train(self, w_global):
self.model_trainer.set_model_params(w_global)
self.model_trainer.train(self.local_training_data, self.device, self.args)
weights = self.model_trainer.get_model_params()
return weights
def local_test(self, b_use_test_dataset):
if b_use_test_dataset:
test_data = self.local_test_data
else:
test_data = self.local_training_data
metrics = self.model_trainer.runtest(test_data, self.device, self.args)
return metrics
| true | true |
f71fa355b3f48d5c42b444b5071f36c04ba8f953 | 5,989 | py | Python | src/utils.py | SimonPerche/PersonalitiesWars | 495803a5be5e9fde572c3f39086d8a3510c75f58 | [
"MIT"
] | null | null | null | src/utils.py | SimonPerche/PersonalitiesWars | 495803a5be5e9fde572c3f39086d8a3510c75f58 | [
"MIT"
] | null | null | null | src/utils.py | SimonPerche/PersonalitiesWars | 495803a5be5e9fde572c3f39086d8a3510c75f58 | [
"MIT"
] | 1 | 2022-03-08T22:07:50.000Z | 2022-03-08T22:07:50.000Z | from typing import Dict, List, Optional, Union
import contextlib
import asyncio
import discord
from discord.ext import pages
from database import DatabasePersonality, DatabaseDeck
# Set authorized guilds for slash command (return [] for global command - might take up to 1h to register)
def get_authorized_guild_ids():
return [550631040826343427]
async def personalities_name_searcher(ctx: discord.AutocompleteContext):
return [perso['name'] for perso in DatabasePersonality.get().get_all_personalities()
if ctx.value.lower() in perso['name'].lower()]
async def personalities_group_searcher(ctx: discord.AutocompleteContext):
return [group for group in DatabasePersonality.get().get_all_groups() if ctx.value.lower() in group.lower()]
async def wishlist_name_searcher(ctx: discord.AutocompleteContext):
ids = DatabaseDeck.get().get_wishlist(ctx.interaction.guild.id, ctx.interaction.user.id)
personalities = DatabasePersonality.get().get_multiple_perso_information(ids)
return [perso['name'] for perso in personalities
if ctx.value.lower() in perso['name'].lower()]
async def shopping_list_name_searcher(ctx: discord.AutocompleteContext):
ids = DatabaseDeck.get().get_shopping_list(ctx.interaction.guild.id, ctx.interaction.user.id)
personalities = DatabasePersonality.get().get_multiple_perso_information(ids)
return [perso['name'] for perso in personalities
if ctx.value.lower() in perso['name'].lower()]
async def deck_name_searcher(ctx: discord.AutocompleteContext):
ids = DatabaseDeck.get().get_user_deck(ctx.interaction.guild.id, ctx.interaction.user.id)
personalities = DatabasePersonality.get().get_multiple_perso_information(ids)
return [perso['name'] for perso in personalities
if ctx.value.lower() in perso['name'].lower()]
async def badges_name_searcher(ctx: discord.AutocompleteContext):
badges = DatabaseDeck.get().get_all_badges(ctx.interaction.guild.id)
return [badge['name'] for badge in badges if ctx.value.lower() in badge['name'].lower()]
class ConfirmView(discord.ui.View):
def __init__(self, authorized_user: discord.User, timeout: int = 60):
super().__init__(timeout=timeout)
self.is_accepted = None
self.authorized_user = authorized_user
@discord.ui.button(label="Yes", style=discord.ButtonStyle.green)
async def yes(
self, button: discord.ui.Button, interaction: discord.Interaction
):
self.is_accepted = True
button.label = 'Yes (chosen)'
await self.disable_update_and_stop(interaction)
@discord.ui.button(label="No", style=discord.ButtonStyle.red)
async def no(
self, button: discord.ui.Button, interaction: discord.Interaction
):
self.is_accepted = False
button.label = 'No (chosen)'
await self.disable_update_and_stop(interaction)
async def interaction_check(self, interaction: discord.Interaction):
if interaction.user != self.authorized_user:
await interaction.response.send_message('You cannot answer, you are not the recipient.', ephemeral=True)
return False
return True
async def on_timeout(self):
await self.disable()
async def disable_update_and_stop(self, interaction: discord.Interaction):
await self.disable()
await interaction.response.edit_message(view=self)
self.stop()
async def disable(self):
for child in self.children:
child.disabled = True
class PaginatorCustomStartPage(pages.Paginator):
def __init__(
self,
pages: Union[List[str], List[discord.Embed]],
author_check=True,
custom_view: Optional[discord.ui.View] = None,
timeout: Optional[float] = 180.0,
first_page: int = 0
) -> None:
super().__init__(pages=pages, show_disabled=True, show_indicator=True, author_check=author_check,
disable_on_timeout=True, custom_view=custom_view, timeout=timeout)
if first_page >= len(pages):
first_page = len(pages) - 1
elif first_page < 0:
first_page = 0
self.current_page = first_page
self.update_buttons()
async def respond(self, interaction: discord.Interaction, ephemeral: bool = False):
"""Sends an interaction response or followup with the paginated items.
Parameters
------------
interaction: :class:`discord.Interaction`
The interaction associated with this response.
ephemeral: :class:`bool`
Choose whether the message is ephemeral or not.
Returns
--------
:class:`~discord.Interaction`
The message sent with the paginator.
"""
page = self.pages[self.current_page]
self.user = interaction.user
if interaction.response.is_done():
msg = await interaction.followup.send(
content=page if isinstance(page, str) else None, embed=page if isinstance(page, discord.Embed) else None, view=self, ephemeral=ephemeral
)
else:
msg = await interaction.response.send_message(
content=page if isinstance(page, str) else None, embed=page if isinstance(page, discord.Embed) else None, view=self, ephemeral=ephemeral
)
if isinstance(msg, (discord.WebhookMessage, discord.Message)):
self.message = msg
elif isinstance(msg, discord.Interaction):
self.message = await msg.original_message()
return self.message
# https://stackoverflow.com/questions/49622924/wait-for-timeout-or-event-being-set-for-asyncio-event
async def event_wait(event: asyncio.Event, timeout: float):
# suppress TimeoutError because we'll return False in case of timeout
with contextlib.suppress(asyncio.TimeoutError):
await asyncio.wait_for(event.wait(), timeout)
return event.is_set()
| 39.143791 | 152 | 0.688429 | from typing import Dict, List, Optional, Union
import contextlib
import asyncio
import discord
from discord.ext import pages
from database import DatabasePersonality, DatabaseDeck
def get_authorized_guild_ids():
return [550631040826343427]
async def personalities_name_searcher(ctx: discord.AutocompleteContext):
return [perso['name'] for perso in DatabasePersonality.get().get_all_personalities()
if ctx.value.lower() in perso['name'].lower()]
async def personalities_group_searcher(ctx: discord.AutocompleteContext):
return [group for group in DatabasePersonality.get().get_all_groups() if ctx.value.lower() in group.lower()]
async def wishlist_name_searcher(ctx: discord.AutocompleteContext):
ids = DatabaseDeck.get().get_wishlist(ctx.interaction.guild.id, ctx.interaction.user.id)
personalities = DatabasePersonality.get().get_multiple_perso_information(ids)
return [perso['name'] for perso in personalities
if ctx.value.lower() in perso['name'].lower()]
async def shopping_list_name_searcher(ctx: discord.AutocompleteContext):
ids = DatabaseDeck.get().get_shopping_list(ctx.interaction.guild.id, ctx.interaction.user.id)
personalities = DatabasePersonality.get().get_multiple_perso_information(ids)
return [perso['name'] for perso in personalities
if ctx.value.lower() in perso['name'].lower()]
async def deck_name_searcher(ctx: discord.AutocompleteContext):
ids = DatabaseDeck.get().get_user_deck(ctx.interaction.guild.id, ctx.interaction.user.id)
personalities = DatabasePersonality.get().get_multiple_perso_information(ids)
return [perso['name'] for perso in personalities
if ctx.value.lower() in perso['name'].lower()]
async def badges_name_searcher(ctx: discord.AutocompleteContext):
badges = DatabaseDeck.get().get_all_badges(ctx.interaction.guild.id)
return [badge['name'] for badge in badges if ctx.value.lower() in badge['name'].lower()]
class ConfirmView(discord.ui.View):
def __init__(self, authorized_user: discord.User, timeout: int = 60):
super().__init__(timeout=timeout)
self.is_accepted = None
self.authorized_user = authorized_user
@discord.ui.button(label="Yes", style=discord.ButtonStyle.green)
async def yes(
self, button: discord.ui.Button, interaction: discord.Interaction
):
self.is_accepted = True
button.label = 'Yes (chosen)'
await self.disable_update_and_stop(interaction)
@discord.ui.button(label="No", style=discord.ButtonStyle.red)
async def no(
self, button: discord.ui.Button, interaction: discord.Interaction
):
self.is_accepted = False
button.label = 'No (chosen)'
await self.disable_update_and_stop(interaction)
async def interaction_check(self, interaction: discord.Interaction):
if interaction.user != self.authorized_user:
await interaction.response.send_message('You cannot answer, you are not the recipient.', ephemeral=True)
return False
return True
async def on_timeout(self):
await self.disable()
async def disable_update_and_stop(self, interaction: discord.Interaction):
await self.disable()
await interaction.response.edit_message(view=self)
self.stop()
async def disable(self):
for child in self.children:
child.disabled = True
class PaginatorCustomStartPage(pages.Paginator):
def __init__(
self,
pages: Union[List[str], List[discord.Embed]],
author_check=True,
custom_view: Optional[discord.ui.View] = None,
timeout: Optional[float] = 180.0,
first_page: int = 0
) -> None:
super().__init__(pages=pages, show_disabled=True, show_indicator=True, author_check=author_check,
disable_on_timeout=True, custom_view=custom_view, timeout=timeout)
if first_page >= len(pages):
first_page = len(pages) - 1
elif first_page < 0:
first_page = 0
self.current_page = first_page
self.update_buttons()
async def respond(self, interaction: discord.Interaction, ephemeral: bool = False):
page = self.pages[self.current_page]
self.user = interaction.user
if interaction.response.is_done():
msg = await interaction.followup.send(
content=page if isinstance(page, str) else None, embed=page if isinstance(page, discord.Embed) else None, view=self, ephemeral=ephemeral
)
else:
msg = await interaction.response.send_message(
content=page if isinstance(page, str) else None, embed=page if isinstance(page, discord.Embed) else None, view=self, ephemeral=ephemeral
)
if isinstance(msg, (discord.WebhookMessage, discord.Message)):
self.message = msg
elif isinstance(msg, discord.Interaction):
self.message = await msg.original_message()
return self.message
async def event_wait(event: asyncio.Event, timeout: float):
with contextlib.suppress(asyncio.TimeoutError):
await asyncio.wait_for(event.wait(), timeout)
return event.is_set()
| true | true |
f71fa39892b1a2c86c574196d784cf419690c32e | 449 | py | Python | app/commons/errorCodes.py | handdola/ai-chatbot | f0a336afb873db10b7a5f068b4e1eaa07bf62967 | [
"MIT"
] | 3 | 2017-12-27T19:29:27.000Z | 2018-01-07T02:51:44.000Z | app/commons/errorCodes.py | handdola/ai-chatbot | f0a336afb873db10b7a5f068b4e1eaa07bf62967 | [
"MIT"
] | 108 | 2018-03-26T05:44:22.000Z | 2020-12-14T15:08:38.000Z | app/commons/errorCodes.py | handdola/ai-chatbot | f0a336afb873db10b7a5f068b4e1eaa07bf62967 | [
"MIT"
] | 1 | 2019-05-30T10:50:49.000Z | 2019-05-30T10:50:49.000Z | emptyInput = {"errorCode": 601, "description": "empty input"}
InvalidInput = {"errorCode": 602, "description": "Invalid input"}
UnidentifiedIntent = {
"errorCode": 701,
"description": "Can't identify the intent"}
NotEnoughData = {
"errorCode": 702,
"description": "Not enough Training Data. Please Add more stories"}
UnableToextractentities = {"errorCode": 801,
"description": "Unable extract entities"}
| 34.538462 | 71 | 0.659243 | emptyInput = {"errorCode": 601, "description": "empty input"}
InvalidInput = {"errorCode": 602, "description": "Invalid input"}
UnidentifiedIntent = {
"errorCode": 701,
"description": "Can't identify the intent"}
NotEnoughData = {
"errorCode": 702,
"description": "Not enough Training Data. Please Add more stories"}
UnableToextractentities = {"errorCode": 801,
"description": "Unable extract entities"}
| true | true |
f71fa3cd63c33d94a24ea578cb181d5f2238b651 | 108,132 | py | Python | airflow/jobs.py | yujiantao/incubator-airflow | 97ac37d0b936fd565b113b79f418ff25b245de14 | [
"Apache-2.0"
] | 1 | 2020-05-03T04:34:08.000Z | 2020-05-03T04:34:08.000Z | airflow/jobs.py | yujiantao/incubator-airflow | 97ac37d0b936fd565b113b79f418ff25b245de14 | [
"Apache-2.0"
] | 4 | 2018-03-20T21:24:26.000Z | 2020-05-03T04:23:02.000Z | airflow/jobs.py | yujiantao/incubator-airflow | 97ac37d0b936fd565b113b79f418ff25b245de14 | [
"Apache-2.0"
] | 1 | 2018-10-23T08:58:10.000Z | 2018-10-23T08:58:10.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import getpass
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from time import sleep
import six
from past.builtins import basestring
from sqlalchemy import (Column, Index, Integer, String, and_, func, not_, or_)
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm.session import make_transient
from airflow import configuration as conf
from airflow import executors, models, settings
from airflow.exceptions import AirflowException
from airflow.models import DAG, DagRun
from airflow.models.dagpickle import DagPickle
from airflow.settings import Stats
from airflow.task.task_runner import get_task_runner
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, RUN_DEPS
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.configuration import tmp_configuration_copy
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorAgent,
SimpleDag,
SimpleDagBag,
SimpleTaskInstance,
list_py_file_paths)
from airflow.utils.db import create_session, provide_session
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.net import get_hostname
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils.state import State
Base = models.base.Base
ID_LEN = models.ID_LEN
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have its own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(UtcDateTime())
end_date = Column(UtcDateTime())
latest_heartbeat = Column(UtcDateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
Index('idx_job_state_heartbeat', state, latest_heartbeat),
)
def __init__(
self,
executor=executors.GetDefaultExecutor(),
heartrate=conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = get_hostname()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = timezone.utcnow()
self.latest_heartbeat = timezone.utcnow()
self.heartrate = heartrate
self.unixname = getpass.getuser()
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(timezone.utcnow() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
@provide_session
def kill(self, session=None):
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = timezone.utcnow()
try:
self.on_kill()
except Exception as e:
self.log.error('on_kill() method failed: {}'.format(e))
session.merge(job)
session.commit()
raise AirflowException("Job shut down externally.")
def on_kill(self):
"""
Will be called when an external kill command is received
"""
pass
def heartbeat_callback(self, session=None):
pass
def heartbeat(self):
"""
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
"""
try:
with create_session() as session:
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(job)
session.commit()
if job.state == State.SHUTDOWN:
self.kill()
# Figure out how long to sleep for
sleep_for = 0
if job.latest_heartbeat:
sleep_for = max(
0,
self.heartrate - (timezone.utcnow() -
job.latest_heartbeat).total_seconds())
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = timezone.utcnow()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
self.log.debug('[heartbeat]')
except OperationalError as e:
self.log.error("Scheduler heartbeat got an exception: %s", str(e))
def run(self):
Stats.incr(self.__class__.__name__.lower() + '_start', 1, 1)
# Adding an entry in the DB
with create_session() as session:
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
try:
self._execute()
# In case of max runs or max duration
self.state = State.SUCCESS
except SystemExit:
# In case of ^C or SIGTERM
self.state = State.SUCCESS
except Exception:
self.state = State.FAILED
raise
finally:
self.end_date = timezone.utcnow()
session.merge(self)
session.commit()
Stats.incr(self.__class__.__name__.lower() + '_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
@provide_session
def reset_state_for_orphaned_tasks(self, filter_by_dag_run=None, session=None):
"""
This function checks if there are any tasks in the dagrun (or all)
that have a scheduled state but are not known by the
executor. If it finds those it will reset the state to None
so they will get picked up again.
The batch option is for performance reasons as the queries are made in
sequence.
:param filter_by_dag_run: the dag_run we want to process, None if all
:type filter_by_dag_run: models.DagRun
:return: the TIs reset (in expired SQLAlchemy state)
:rtype: List(TaskInstance)
"""
queued_tis = self.executor.queued_tasks
# also consider running as the state might not have changed in the db yet
running_tis = self.executor.running
resettable_states = [State.SCHEDULED, State.QUEUED]
TI = models.TaskInstance
DR = models.DagRun
if filter_by_dag_run is None:
resettable_tis = (
session
.query(TI)
.join(
DR,
and_(
TI.dag_id == DR.dag_id,
TI.execution_date == DR.execution_date))
.filter(
DR.state == State.RUNNING,
DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'),
TI.state.in_(resettable_states))).all()
else:
resettable_tis = filter_by_dag_run.get_task_instances(state=resettable_states,
session=session)
tis_to_reset = []
# Can't use an update here since it doesn't support joins
for ti in resettable_tis:
if ti.key not in queued_tis and ti.key not in running_tis:
tis_to_reset.append(ti)
if len(tis_to_reset) == 0:
return []
def query(result, items):
filter_for_tis = ([and_(TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in items])
reset_tis = (
session
.query(TI)
.filter(or_(*filter_for_tis), TI.state.in_(resettable_states))
.with_for_update()
.all())
for ti in reset_tis:
ti.state = State.NONE
session.merge(ti)
return result + reset_tis
reset_tis = helpers.reduce_in_chunks(query,
tis_to_reset,
[],
self.max_tis_per_query)
task_instance_str = '\n\t'.join(
["{}".format(x) for x in reset_tis])
session.commit()
self.log.info(
"Reset the following %s TaskInstances:\n\t%s",
len(reset_tis), task_instance_str
)
return reset_tis
class DagFileProcessor(AbstractDagFileProcessor, LoggingMixin):
"""Helps call SchedulerJob.process_file() in a separate process."""
# Counter that increments everytime an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, zombies):
"""
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_whitelist: If specified, only look at these DAG ID's
:type dag_id_whitelist: list[unicode]
:param zombies: zombie task instances to kill
:type zombies: list[SimpleTaskInstance]
"""
self._file_path = file_path
# Queue that's used to pass results from the child process.
self._result_queue = multiprocessing.Queue()
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _launch_process(result_queue,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Launch a process to process the given file.
:param result_queue: the queue to use for passing back the result
:type result_queue: multiprocessing.Queue
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:return: the process that was launched
:rtype: multiprocessing.Process
:param zombies: zombie task instances to kill
:type zombies: list[SimpleTaskInstance]
"""
def helper():
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log)
result = scheduler_job.process_file(file_path,
zombies,
pickle_dags)
result_queue.put(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
p = multiprocessing.Process(target=helper,
args=(),
name="{}-Process".format(thread_name))
p.start()
return p
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies)
self._start_time = timezone.utcnow()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call stop before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill and self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
# In case result queue is corrupted.
if self._result_queue and not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
# Potential error case when process dies
if self._result_queue and not self._process.is_alive():
self._done = True
# Get the object from the queue or else join() can hang.
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=-1,
processor_poll_interval=1.0,
run_duration=None,
do_pickle=False,
log=None,
*args, **kwargs):
"""
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited within the run_duration.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param run_duration: how long to run (in seconds) before exiting
:type run_duration: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self.run_duration = run_duration
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.using_sqlite = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
if run_duration is None:
self.run_duration = conf.getint('scheduler',
'run_duration')
self.processor_agent = None
self._last_loop = False
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal {}".format(signum))
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([ti.sla for ti in dag.tasks]):
self.log.info(
"Skipping SLA check for %s because no tasks in DAG have SLAs",
dag
)
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False) # noqa: E712
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(bug=asciiart.bug, **locals())
emails = set()
for task in dag.tasks:
if task.email:
if isinstance(task.email, basestring):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails and len(slas):
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: models.Dagbag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(models.ImportError).filter(
models.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(models.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future
if next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, queue, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future
if run.execution_date > timezone.utcnow():
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Active dag runs > max_active_run.")
continue
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
# future: remove adhoc
if task.adhoc:
continue
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.log.debug('Queuing task: %s', ti)
queue.append(ti.key)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[State]
:param new_state: set TaskInstances to this state
:type new_state: State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
if self.using_sqlite:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
@provide_session
def __get_task_concurrency_map(self, states, session=None):
"""
Returns a map from tasks to number in the states list given.
:param states: List of states to query for
:type states: List[State]
:return: A map from (dag_id, task_id) to count of tasks in states
:rtype: Dict[[String, String], Int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
task_map[(dag_id, task_id)] = count
return task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: Tuple[State]
:return: List[TaskInstance]
"""
executable_tis = []
# Get all the queued task instances from associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711
not_(DM.is_paused)))
)
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
["{}".format(x) for x in task_instances_to_examine])
self.log.info("{} tasks up for execution:\n\t{}"
.format(len(task_instances_to_examine),
task_instance_str))
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
states_to_count_as_running = [State.RUNNING, State.QUEUED]
task_concurrency_map = self.__get_task_concurrency_map(
states=states_to_count_as_running, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
if not pool:
# Arbitrary:
# If queued outside of a pool, trigger no more than
# non_pooled_task_slot_count per run
open_slots = conf.getint('core', 'non_pooled_task_slot_count')
else:
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
open_slots = 0
else:
open_slots = pools[pool].open_slots(session=session)
num_queued = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name={pool}) with {open_slots} "
"open slots and {num_queued} task instances in queue".format(
**locals()
)
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# DAG IDs with running tasks that equal the concurrency limit of the dag
dag_id_to_possibly_running_task_count = {}
for task_instance in priority_sorted_task_instances:
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
if dag_id not in dag_id_to_possibly_running_task_count:
dag_id_to_possibly_running_task_count[dag_id] = \
DAG.get_num_task_instances(
dag_id,
simple_dag_bag.get_dag(dag_id).task_ids,
states=states_to_count_as_running,
session=session)
current_task_concurrency = dag_id_to_possibly_running_task_count[dag_id]
task_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_task_concurrency, task_concurrency_limit
)
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, task_concurrency_limit
)
continue
task_concurrency = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency is not None:
num_running = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if num_running >= task_concurrency:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
else:
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_id_to_possibly_running_task_count[dag_id] += 1
task_instance_str = "\n\t".join(
["{}".format(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: List[TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:return: List[SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa: E711
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = (timezone.utcnow()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
["{}".format(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following {} tasks to queued state:\n\t{}"
.format(len(tis_to_set_to_queued), task_instance_str))
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: List[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: Tuple[State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance_str = "\n\t".join(
["{}".format(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t{}"
.format(task_instance_str))
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: DAG
:param tis_out: A queue to add generated TaskInstance objects
:type tis_out: multiprocessing.Queue[TaskInstance]
:return: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
dag_run = self.create_dag_run(dag)
if dag_run:
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports %s.%s execution_date=%s as %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
self.log.info("Running execute loop for %s seconds", self.run_duration)
self.log.info("Processing each file at most %s times", self.num_runs)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
def processor_factory(file_path, zombies):
return DagFileProcessor(file_path,
pickle_dags,
self.dag_ids,
zombies)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
processor_factory,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:return: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while (timezone.utcnow() - execute_start_time).total_seconds() < \
self.run_duration or self.run_duration < 0:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.info("Harvesting DAG parsing results")
simple_dags = self.processor_agent.harvest_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if len(simple_dags) > 0:
try:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued, but the corresponding
# DAG run isn't running, set the state to NONE so we don't try to
# re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
continue
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
# Exit early for a test mode, run one additional scheduler loop
# to reduce the possibility that parsed DAG was put into the queue
# by the DAG manager but not yet received by DAG agent.
if self.processor_agent.done:
self._last_loop = True
if self._last_loop:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param zombies: zombie task instances to kill.
:type zombies: list[SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[SimpleDag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return []
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true?)
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We can defer checking the task dependency checks to the worker themselves
# since they can be expensive to run in the scheduler.
dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
# TODO(aoen): It's not great that we have to check all the task instance
# dependencies twice; once to get the task scheduled, and again to actually
# run the task. We should try to come up with a way to only check them once.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
ID_PREFIX = 'backfill_'
ID_FORMAT_PREFIX = ID_PREFIX + '{0}'
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
class _DagRunTaskStatus(object):
"""
Internal status of the backfill job. This class is intended to be instantiated
only within a BackfillJob instance and will track the execution of tasks,
e.g. running, skipped, succeeded, failed, etc. Information about the dag runs
related to the backfill job are also being tracked in this structure,
.e.g finished runs, etc. Any other status related information related to the
execution of dag runs / tasks can be included in this structure since it makes
it easier to pass it around.
"""
# TODO(edgarRd): AIRFLOW-1444: Add consistency check on counts
def __init__(self,
to_run=None,
running=None,
skipped=None,
succeeded=None,
failed=None,
not_ready=None,
deadlocked=None,
active_runs=None,
executed_dag_run_dates=None,
finished_runs=0,
total_runs=0,
):
"""
:param to_run: Tasks to run in the backfill
:type to_run: dict[Tuple[String, String, DateTime], TaskInstance]
:param running: Maps running task instance key to task instance object
:type running: dict[Tuple[String, String, DateTime], TaskInstance]
:param skipped: Tasks that have been skipped
:type skipped: set[Tuple[String, String, DateTime]]
:param succeeded: Tasks that have succeeded so far
:type succeeded: set[Tuple[String, String, DateTime]]
:param failed: Tasks that have failed
:type failed: set[Tuple[String, String, DateTime]]
:param not_ready: Tasks not ready for execution
:type not_ready: set[Tuple[String, String, DateTime]]
:param deadlocked: Deadlocked tasks
:type deadlocked: set[Tuple[String, String, DateTime]]
:param active_runs: Active dag runs at a certain point in time
:type active_runs: list[DagRun]
:param executed_dag_run_dates: Datetime objects for the executed dag runs
:type executed_dag_run_dates: set[Datetime]
:param finished_runs: Number of finished runs so far
:type finished_runs: int
:param total_runs: Number of total dag runs able to run
:type total_runs: int
"""
self.to_run = to_run or dict()
self.running = running or dict()
self.skipped = skipped or set()
self.succeeded = succeeded or set()
self.failed = failed or set()
self.not_ready = not_ready or set()
self.deadlocked = deadlocked or set()
self.active_runs = active_runs or list()
self.executed_dag_run_dates = executed_dag_run_dates or set()
self.finished_runs = finished_runs
self.total_runs = total_runs
def __init__(
self,
dag,
start_date=None,
end_date=None,
mark_success=False,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
pool=None,
delay_on_limit_secs=1.0,
verbose=False,
conf=None,
rerun_failed_tasks=False,
*args, **kwargs):
"""
:param dag: DAG object.
:type dag: `class DAG`.
:param start_date: start date for the backfill date range.
:type start_date: datetime.
:param end_date: end date for the backfill date range.
:type end_date: datetime
:param mark_success: flag whether to mark the task auto success.
:type mark_success: bool
:param donot_pickle: whether pickle
:type donot_pickle: bool
:param ignore_first_depends_on_past: whether to ignore depend on past
:type ignore_first_depends_on_past: bool
:param ignore_task_deps: whether to ignore the task dependency
:type ignore_task_deps: bool
:param pool:
:type pool: list
:param delay_on_limit_secs:
:param verbose:
:type verbose: flag to whether display verbose message to backfill console
:param conf: a dictionary which user could pass k-v pairs for backfill
:type conf: dictionary
:param rerun_failed_tasks: flag to whether to
auto rerun the failed task in backfill
:type rerun_failed_tasks: bool
:param args:
:param kwargs:
"""
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.donot_pickle = donot_pickle
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.pool = pool
self.delay_on_limit_secs = delay_on_limit_secs
self.verbose = verbose
self.conf = conf
self.rerun_failed_tasks = rerun_failed_tasks
super(BackfillJob, self).__init__(*args, **kwargs)
def _update_counters(self, ti_status):
"""
Updates the counters per state of the tasks that were running. Can re-add
to tasks to run in case required.
:param ti_status: the internal status of the backfill job tasks
:type ti_status: BackfillJob._DagRunTaskStatus
"""
for key, ti in list(ti_status.running.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.FAILED:
self.log.error("Task instance %s failed", ti)
ti_status.failed.add(key)
ti_status.running.pop(key)
continue
# special case: if the task needs to run again put it back
elif ti.state == State.UP_FOR_RETRY:
self.log.warning("Task instance %s is up for retry", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: The state of the task can be set to NONE by the task itself
# when it reaches concurrency limits. It could also happen when the state
# is changed externally, e.g. by clearing tasks from the ui. We need to cover
# for that as otherwise those tasks would fall outside of the scope of
# the backfill suddenly.
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance %s state was set to none externally or "
"reaching concurrency limits. Re-adding task to queue.",
ti
)
ti.set_state(State.SCHEDULED)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
def _manage_executor_state(self, running):
"""
Checks if the executor agrees with the state of task instances
that are running
:param running: dict of key, task to verify
"""
executor = self.executor
for key, state in list(executor.get_event_buffer().items()):
if key not in running:
self.log.warning(
"%s state %s not in running=%s",
key, state, running.values()
)
continue
ti = running[key]
ti.refresh_from_db()
self.log.debug("Executor state: %s task %s", state, ti)
if state == State.FAILED or state == State.SUCCESS:
if ti.state == State.RUNNING or ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
ti.handle_failure(msg)
@provide_session
def _get_dag_run(self, run_date, session=None):
"""
Returns a dag run for the given run date, which will be matched to an existing
dag run if available or create a new dag run otherwise. If the max_active_runs
limit is reached, this function will return None.
:param run_date: the execution date for the dag run
:type run_date: datetime
:param session: the database session object
:type session: Session
:return: a DagRun in state RUNNING or None
"""
run_id = BackfillJob.ID_FORMAT_PREFIX.format(run_date.isoformat())
# consider max_active_runs but ignore when running subdags
respect_dag_max_active_limit = (True
if (self.dag.schedule_interval and
not self.dag.is_subdag)
else False)
current_active_dag_count = self.dag.get_num_active_runs(external_trigger=False)
# check if we are scheduling on top of a already existing dag_run
# we could find a "scheduled" run instead of a "backfill"
run = DagRun.find(dag_id=self.dag.dag_id,
execution_date=run_date,
session=session)
if run is not None and len(run) > 0:
run = run[0]
if run.state == State.RUNNING:
respect_dag_max_active_limit = False
else:
run = None
# enforce max_active_runs limit for dag, special cases already
# handled by respect_dag_max_active_limit
if (respect_dag_max_active_limit and
current_active_dag_count >= self.dag.max_active_runs):
return None
run = run or self.dag.create_dagrun(
run_id=run_id,
execution_date=run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
conf=self.conf,
)
# set required transient field
run.dag = self.dag
# explicitly mark as backfill and running
run.state = State.RUNNING
run.run_id = run_id
run.verify_integrity(session=session)
return run
@provide_session
def _task_instances_for_dag_run(self, dag_run, session=None):
"""
Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: models.DagRun
:param session: the database session object
:type session: Session
"""
tasks_to_run = {}
if dag_run is None:
return tasks_to_run
# check if we have orphaned tasks
self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)
# for some reason if we don't refresh the reference to run is lost
dag_run.refresh_from_db()
make_transient(dag_run)
# TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf
for ti in dag_run.get_task_instances():
# all tasks part of the backfill are scheduled to run
if ti.state == State.NONE:
ti.set_state(State.SCHEDULED, session=session)
if ti.state != State.REMOVED:
tasks_to_run[ti.key] = ti
return tasks_to_run
def _log_progress(self, ti_status):
msg = ' | '.join([
"[backfill progress]",
"finished run {0} of {1}",
"tasks waiting: {2}",
"succeeded: {3}",
"running: {4}",
"failed: {5}",
"skipped: {6}",
"deadlocked: {7}",
"not ready: {8}"
]).format(
ti_status.finished_runs,
ti_status.total_runs,
len(ti_status.to_run),
len(ti_status.succeeded),
len(ti_status.running),
len(ti_status.failed),
len(ti_status.skipped),
len(ti_status.deadlocked),
len(ti_status.not_ready))
self.log.info(msg)
self.log.debug(
"Finished dag run loop iteration. Remaining tasks %s",
ti_status.to_run.values()
)
@provide_session
def _process_backfill_task_instances(self,
ti_status,
executor,
pickle_id,
start_date=None, session=None):
"""
Process a set of task instances from a set of dag runs. Special handling is done
to account for different task instance states that could be present when running
them in a backfill process.
:param ti_status: the internal status of the job
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to run the task instances
:type executor: BaseExecutor
:param pickle_id: the pickle_id if dag is pickled, None otherwise
:type pickle_id: int
:param start_date: the start date of the backfill job
:type start_date: datetime
:param session: the current session object
:type session: Session
:return: the list of execution_dates for the finished dag runs
:rtype: list
"""
executed_run_dates = []
while ((len(ti_status.to_run) > 0 or len(ti_status.running) > 0) and
len(ti_status.deadlocked) == 0):
self.log.debug("*** Clearing out not_ready list ***")
ti_status.not_ready.clear()
# we need to execute the tasks bottom to top
# or leaf to root, as otherwise tasks might be
# determined deadlocked while they are actually
# waiting for their upstream to finish
for task in self.dag.topological_sort():
for key, ti in list(ti_status.to_run.items()):
if task.task_id != ti.task_id:
continue
ti.refresh_from_db()
task = self.dag.get_task(ti.task_id)
ti.task = task
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
self.log.debug(
"Task instance to run %s state %s", ti, ti.state)
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
# guard against externally modified tasks instances or
# in case max concurrency has been reached at task runtime
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance {} state was set to None "
"externally. This should not happen"
)
ti.set_state(State.SCHEDULED, session=session)
if self.rerun_failed_tasks:
# Rerun failed tasks or upstreamed failed tasks
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with state {state}".format(ti=ti,
state=ti.state))
if key in ti_status.running:
ti_status.running.pop(key)
# Reset the failed task in backfill to scheduled state
ti.set_state(State.SCHEDULED, session=session)
else:
# Default behaviour which works for subdag.
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with {state} state".format(ti=ti,
state=ti.state))
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
backfill_context = DepContext(
deps=RUN_DEPS,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
flag_upstream_failed=True)
# Is the task runnable? -- then run it
# the dependency checker can change states of tis
if ti.are_dependencies_met(
dep_context=backfill_context,
session=session,
verbose=self.verbose):
ti.refresh_from_db(lock_for_update=True, session=session)
if ti.state == State.SCHEDULED or ti.state == State.UP_FOR_RETRY:
if executor.has_task(ti):
self.log.debug(
"Task Instance %s already in executor "
"waiting for queue to clear",
ti
)
else:
self.log.debug('Sending %s to executor', ti)
# Skip scheduled state, we are executing immediately
ti.state = State.QUEUED
session.merge(ti)
cfg_path = None
if executor.__class__ in (executors.LocalExecutor,
executors.SequentialExecutor):
cfg_path = tmp_configuration_copy()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_task_deps=self.ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool,
cfg_path=cfg_path)
ti_status.running[key] = ti
ti_status.to_run.pop(key)
session.commit()
continue
if ti.state == State.UPSTREAM_FAILED:
self.log.error("Task instance %s upstream failed", ti)
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
# special case
if ti.state == State.UP_FOR_RETRY:
self.log.debug(
"Task instance %s retry period not "
"expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
continue
# all remaining tasks
self.log.debug('Adding %s to not_ready', ti)
ti_status.not_ready.add(key)
# execute the tasks in the queue
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run and there are no running tasks then the backfill
# is deadlocked
if (ti_status.not_ready and
ti_status.not_ready == set(ti_status.to_run) and
len(ti_status.running) == 0):
self.log.warning(
"Deadlock discovered for ti_status.to_run=%s",
ti_status.to_run.values()
)
ti_status.deadlocked.update(ti_status.to_run.values())
ti_status.to_run.clear()
# check executor state
self._manage_executor_state(ti_status.running)
# update the task counters
self._update_counters(ti_status=ti_status)
# update dag run state
_dag_runs = ti_status.active_runs[:]
for run in _dag_runs:
run.update_state(session=session)
if run.state in State.finished():
ti_status.finished_runs += 1
ti_status.active_runs.remove(run)
executed_run_dates.append(run.execution_date)
self._log_progress(ti_status)
# return updated status
return executed_run_dates
@provide_session
def _collect_errors(self, ti_status, session=None):
err = ''
if ti_status.failed:
err += (
"---------------------------------------------------\n"
"Some task instances failed:\n{}\n".format(ti_status.failed))
if ti_status.deadlocked:
err += (
'---------------------------------------------------\n'
'BackfillJob is deadlocked.')
deadlocked_depends_on_past = any(
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=False),
session=session,
verbose=self.verbose) !=
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=True),
session=session,
verbose=self.verbose)
for t in ti_status.deadlocked)
if deadlocked_depends_on_past:
err += (
'Some of the deadlocked tasks were unable to run because '
'of "depends_on_past" relationships. Try running the '
'backfill with the option '
'"ignore_first_depends_on_past=True" or passing "-I" at '
'the command line.')
err += ' These tasks have succeeded:\n{}\n'.format(ti_status.succeeded)
err += ' These tasks are running:\n{}\n'.format(ti_status.running)
err += ' These tasks have failed:\n{}\n'.format(ti_status.failed)
err += ' These tasks are skipped:\n{}\n'.format(ti_status.skipped)
err += ' These tasks are deadlocked:\n{}\n'.format(ti_status.deadlocked)
return err
@provide_session
def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id,
start_date, session=None):
"""
Computes the dag runs and their respective task instances for
the given run dates and executes the task instances.
Returns a list of execution dates of the dag runs that were executed.
:param run_dates: Execution dates for dag runs
:type run_dates: list
:param ti_status: internal BackfillJob status structure to tis track progress
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to use, it must be previously started
:type executor: BaseExecutor
:param pickle_id: numeric id of the pickled dag, None if not pickled
:type pickle_id: int
:param start_date: backfill start date
:type start_date: datetime
:param session: the current session object
:type session: Session
"""
for next_run_date in run_dates:
dag_run = self._get_dag_run(next_run_date, session=session)
tis_map = self._task_instances_for_dag_run(dag_run,
session=session)
if dag_run is None:
continue
ti_status.active_runs.append(dag_run)
ti_status.to_run.update(tis_map or {})
processed_dag_run_dates = self._process_backfill_task_instances(
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
ti_status.executed_dag_run_dates.update(processed_dag_run_dates)
@provide_session
def _execute(self, session=None):
"""
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
"""
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date,
end_date=self.bf_end_date)
if len(run_dates) == 0:
self.log.info("No run dates were found for the given dates and dag interval.")
return
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
ti_status.total_runs = len(run_dates) # total dag runs in backfill
try:
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [run_date for run_date in run_dates
if run_date not in ti_status.executed_dag_run_dates]
self._execute_for_run_dates(run_dates=dates_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
remaining_dates = (
ti_status.total_runs - len(ti_status.executed_dag_run_dates)
)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise AirflowException(err)
if remaining_dates > 0:
self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id
)
time.sleep(self.delay_on_limit_secs)
finally:
executor.end()
session.commit()
self.log.info("Backfill done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
pickle_id=None,
pool=None,
*args, **kwargs):
self.task_instance = task_instance
self.dag_id = task_instance.dag_id
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
self.task_runner = get_task_runner(self)
def signal_handler(signum, frame):
"""Setting kill signal handler"""
self.log.error("Received SIGTERM. Terminating subprocesses")
self.on_kill()
raise AirflowException("LocalTaskJob received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
if not self.task_instance._check_and_change_state_before_execution(
mark_success=self.mark_success,
ignore_all_deps=self.ignore_all_deps,
ignore_depends_on_past=self.ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
ignore_ti_state=self.ignore_ti_state,
job_id=self.id,
pool=self.pool):
self.log.info("Task is not able to be run")
return
try:
self.task_runner.start()
last_heartbeat_time = time.time()
heartbeat_time_limit = conf.getint('scheduler',
'scheduler_zombie_task_threshold')
while True:
# Monitor the task to see if it's done
return_code = self.task_runner.return_code()
if return_code is not None:
self.log.info("Task exited with return code %s", return_code)
return
# Periodically heartbeat so that the scheduler doesn't think this
# is a zombie
try:
self.heartbeat()
last_heartbeat_time = time.time()
except OperationalError:
Stats.incr('local_task_job_heartbeat_failure', 1, 1)
self.log.exception(
"Exception while trying to heartbeat! Sleeping for %s seconds",
self.heartrate
)
time.sleep(self.heartrate)
# If it's been too long since we've heartbeat, then it's possible that
# the scheduler rescheduled this task, so kill launched processes.
time_since_last_heartbeat = time.time() - last_heartbeat_time
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr('local_task_job_prolonged_heartbeat_failure', 1, 1)
self.log.error("Heartbeat time limited exceeded!")
raise AirflowException("Time since last heartbeat({:.2f}s) "
"exceeded limit ({}s)."
.format(time_since_last_heartbeat,
heartbeat_time_limit))
finally:
self.on_kill()
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# ensure termination if processes are created later
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
same_process = ti.pid == os.getpid()
if ti.state == State.RUNNING:
if not same_hostname:
self.log.warning("The recorded hostname {ti.hostname} "
"does not match this instance's hostname "
"{fqdn}".format(**locals()))
raise AirflowException("Hostname of job runner does not match")
elif not same_process:
current_pid = os.getpid()
self.log.warning("Recorded pid {ti.pid} does not match "
"the current pid "
"{current_pid}".format(**locals()))
raise AirflowException("PID of job runner does not match")
elif (
self.task_runner.return_code() is None and
hasattr(self.task_runner, 'process')
):
self.log.warning(
"State of this instance has been externally set to %s. "
"Taking the poison pill.",
ti.state
)
self.task_runner.terminate()
self.terminating = True
| 41.47756 | 104 | 0.558447 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import getpass
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from time import sleep
import six
from past.builtins import basestring
from sqlalchemy import (Column, Index, Integer, String, and_, func, not_, or_)
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm.session import make_transient
from airflow import configuration as conf
from airflow import executors, models, settings
from airflow.exceptions import AirflowException
from airflow.models import DAG, DagRun
from airflow.models.dagpickle import DagPickle
from airflow.settings import Stats
from airflow.task.task_runner import get_task_runner
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, RUN_DEPS
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.configuration import tmp_configuration_copy
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorAgent,
SimpleDag,
SimpleDagBag,
SimpleTaskInstance,
list_py_file_paths)
from airflow.utils.db import create_session, provide_session
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.net import get_hostname
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils.state import State
Base = models.base.Base
ID_LEN = models.ID_LEN
class BaseJob(Base, LoggingMixin):
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(UtcDateTime())
end_date = Column(UtcDateTime())
latest_heartbeat = Column(UtcDateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
Index('idx_job_state_heartbeat', state, latest_heartbeat),
)
def __init__(
self,
executor=executors.GetDefaultExecutor(),
heartrate=conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = get_hostname()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = timezone.utcnow()
self.latest_heartbeat = timezone.utcnow()
self.heartrate = heartrate
self.unixname = getpass.getuser()
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(timezone.utcnow() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
@provide_session
def kill(self, session=None):
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = timezone.utcnow()
try:
self.on_kill()
except Exception as e:
self.log.error('on_kill() method failed: {}'.format(e))
session.merge(job)
session.commit()
raise AirflowException("Job shut down externally.")
def on_kill(self):
pass
def heartbeat_callback(self, session=None):
pass
def heartbeat(self):
try:
with create_session() as session:
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(job)
session.commit()
if job.state == State.SHUTDOWN:
self.kill()
sleep_for = 0
if job.latest_heartbeat:
sleep_for = max(
0,
self.heartrate - (timezone.utcnow() -
job.latest_heartbeat).total_seconds())
sleep(sleep_for)
with create_session() as session:
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = timezone.utcnow()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
self.log.debug('[heartbeat]')
except OperationalError as e:
self.log.error("Scheduler heartbeat got an exception: %s", str(e))
def run(self):
Stats.incr(self.__class__.__name__.lower() + '_start', 1, 1)
with create_session() as session:
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
try:
self._execute()
self.state = State.SUCCESS
except SystemExit:
self.state = State.SUCCESS
except Exception:
self.state = State.FAILED
raise
finally:
self.end_date = timezone.utcnow()
session.merge(self)
session.commit()
Stats.incr(self.__class__.__name__.lower() + '_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
@provide_session
def reset_state_for_orphaned_tasks(self, filter_by_dag_run=None, session=None):
queued_tis = self.executor.queued_tasks
running_tis = self.executor.running
resettable_states = [State.SCHEDULED, State.QUEUED]
TI = models.TaskInstance
DR = models.DagRun
if filter_by_dag_run is None:
resettable_tis = (
session
.query(TI)
.join(
DR,
and_(
TI.dag_id == DR.dag_id,
TI.execution_date == DR.execution_date))
.filter(
DR.state == State.RUNNING,
DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'),
TI.state.in_(resettable_states))).all()
else:
resettable_tis = filter_by_dag_run.get_task_instances(state=resettable_states,
session=session)
tis_to_reset = []
for ti in resettable_tis:
if ti.key not in queued_tis and ti.key not in running_tis:
tis_to_reset.append(ti)
if len(tis_to_reset) == 0:
return []
def query(result, items):
filter_for_tis = ([and_(TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in items])
reset_tis = (
session
.query(TI)
.filter(or_(*filter_for_tis), TI.state.in_(resettable_states))
.with_for_update()
.all())
for ti in reset_tis:
ti.state = State.NONE
session.merge(ti)
return result + reset_tis
reset_tis = helpers.reduce_in_chunks(query,
tis_to_reset,
[],
self.max_tis_per_query)
task_instance_str = '\n\t'.join(
["{}".format(x) for x in reset_tis])
session.commit()
self.log.info(
"Reset the following %s TaskInstances:\n\t%s",
len(reset_tis), task_instance_str
)
return reset_tis
class DagFileProcessor(AbstractDagFileProcessor, LoggingMixin):
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, zombies):
self._file_path = file_path
self._result_queue = multiprocessing.Queue()
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _launch_process(result_queue,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
def helper():
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
try:
sys.stdout = stdout
sys.stderr = stderr
settings.configure_orm()
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log)
result = scheduler_job.process_file(file_path,
zombies,
pickle_dags)
result_queue.put(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
p = multiprocessing.Process(target=helper,
args=(),
name="{}-Process".format(thread_name))
p.start()
return p
def start(self):
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies)
self._start_time = timezone.utcnow()
def terminate(self, sigkill=False):
if self._process is None:
raise AirflowException("Tried to call stop before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill and self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._result_queue and not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
if self._result_queue and not self._process.is_alive():
self._done = True
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
return False
@property
def result(self):
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=-1,
processor_poll_interval=1.0,
run_duration=None,
do_pickle=False,
log=None,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self.run_duration = run_duration
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.using_sqlite = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
if run_duration is None:
self.run_duration = conf.getint('scheduler',
'run_duration')
self.processor_agent = None
self._last_loop = False
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
self.log.info("Exiting gracefully upon receiving signal {}".format(signum))
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
@provide_session
def manage_slas(self, dag, session=None):
if not any([ti.sla for ti in dag.tasks]):
self.log.info(
"Skipping SLA check for %s because no tasks in DAG have SLAs",
dag
)
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False) # noqa: E712
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(bug=asciiart.bug, **locals())
emails = set()
for task in dag.tasks:
if task.email:
if isinstance(task.email, basestring):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails and len(slas):
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
for dagbag_file in dagbag.file_last_changed:
session.query(models.ImportError).filter(
models.ImportError.filename == dagbag_file
).delete()
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(models.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False,
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
if next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, queue, session=None):
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future
if run.execution_date > timezone.utcnow():
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Active dag runs > max_active_run.")
continue
if run.is_backfill:
continue
run.dag = dag
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY))
for ti in tis:
task = dag.get_task(ti.task_id)
ti.task = task
if task.adhoc:
continue
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.log.debug('Queuing task: %s', ti)
queue.append(ti.key)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
if self.using_sqlite:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
@provide_session
def __get_task_concurrency_map(self, states, session=None):
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
task_map[(dag_id, task_id)] = count
return task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
executable_tis = []
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None,
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None,
not_(DM.is_paused)))
)
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states))
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
task_instance_str = "\n\t".join(
["{}".format(x) for x in task_instances_to_examine])
self.log.info("{} tasks up for execution:\n\t{}"
.format(len(task_instances_to_examine),
task_instance_str))
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
states_to_count_as_running = [State.RUNNING, State.QUEUED]
task_concurrency_map = self.__get_task_concurrency_map(
states=states_to_count_as_running, session=session)
for pool, task_instances in pool_to_task_instances.items():
if not pool:
open_slots = conf.getint('core', 'non_pooled_task_slot_count')
else:
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
open_slots = 0
else:
open_slots = pools[pool].open_slots(session=session)
num_queued = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name={pool}) with {open_slots} "
"open slots and {num_queued} task instances in queue".format(
**locals()
)
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
dag_id_to_possibly_running_task_count = {}
for task_instance in priority_sorted_task_instances:
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
break
# Check to make sure that the task concurrency of the DAG hasn't been
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
if dag_id not in dag_id_to_possibly_running_task_count:
dag_id_to_possibly_running_task_count[dag_id] = \
DAG.get_num_task_instances(
dag_id,
simple_dag_bag.get_dag(dag_id).task_ids,
states=states_to_count_as_running,
session=session)
current_task_concurrency = dag_id_to_possibly_running_task_count[dag_id]
task_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_task_concurrency, task_concurrency_limit
)
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, task_concurrency_limit
)
continue
task_concurrency = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency is not None:
num_running = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if num_running >= task_concurrency:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
else:
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_id_to_possibly_running_task_count[dag_id] += 1
task_instance_str = "\n\t".join(
["{}".format(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa: E711
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = (timezone.utcnow()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
["{}".format(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following {} tasks to queued state:\n\t{}"
.format(len(tis_to_set_to_queued), task_instance_str))
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance_str = "\n\t".join(
["{}".format(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t{}"
.format(task_instance_str))
def _process_dags(self, dagbag, dags, tis_out):
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
dag_run = self.create_dag_run(dag)
if dag_run:
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports %s.%s execution_date=%s as %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
self.log.info("Running execute loop for %s seconds", self.run_duration)
self.log.info("Processing each file at most %s times", self.num_runs)
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
def processor_factory(file_path, zombies):
return DagFileProcessor(file_path,
pickle_dags,
self.dag_ids,
zombies)
async_mode = not self.using_sqlite
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
processor_factory,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
def _execute_helper(self):
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while (timezone.utcnow() - execute_start_time).total_seconds() < \
self.run_duration or self.run_duration < 0:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.info("Harvesting DAG parsing results")
simple_dags = self.processor_agent.harvest_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
simple_dag_bag = SimpleDagBag(simple_dags)
if len(simple_dags) > 0:
try:
simple_dag_bag = SimpleDagBag(simple_dags)
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
continue
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
self._process_executor_events(simple_dag_bag)
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self._last_loop = True
if self._last_loop:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
self.processor_agent.terminate()
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
self.log.info("Processing file %s for tasks to queue", file_path)
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return []
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
for dag_id in dagbag.dags:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
if dag_id not in paused_dag_ids:
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# process and due to some unusual behavior. (empty() incorrectly
# returns true?)
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We can defer checking the task dependency checks to the worker themselves
# since they can be expensive to run in the scheduler.
dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
# TODO(aoen): It's not great that we have to check all the task instance
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
ti.state = State.SCHEDULED
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
session.commit()
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
ID_PREFIX = 'backfill_'
ID_FORMAT_PREFIX = ID_PREFIX + '{0}'
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
class _DagRunTaskStatus(object):
def __init__(self,
to_run=None,
running=None,
skipped=None,
succeeded=None,
failed=None,
not_ready=None,
deadlocked=None,
active_runs=None,
executed_dag_run_dates=None,
finished_runs=0,
total_runs=0,
):
self.to_run = to_run or dict()
self.running = running or dict()
self.skipped = skipped or set()
self.succeeded = succeeded or set()
self.failed = failed or set()
self.not_ready = not_ready or set()
self.deadlocked = deadlocked or set()
self.active_runs = active_runs or list()
self.executed_dag_run_dates = executed_dag_run_dates or set()
self.finished_runs = finished_runs
self.total_runs = total_runs
def __init__(
self,
dag,
start_date=None,
end_date=None,
mark_success=False,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
pool=None,
delay_on_limit_secs=1.0,
verbose=False,
conf=None,
rerun_failed_tasks=False,
*args, **kwargs):
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.donot_pickle = donot_pickle
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.pool = pool
self.delay_on_limit_secs = delay_on_limit_secs
self.verbose = verbose
self.conf = conf
self.rerun_failed_tasks = rerun_failed_tasks
super(BackfillJob, self).__init__(*args, **kwargs)
def _update_counters(self, ti_status):
for key, ti in list(ti_status.running.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.FAILED:
self.log.error("Task instance %s failed", ti)
ti_status.failed.add(key)
ti_status.running.pop(key)
continue
elif ti.state == State.UP_FOR_RETRY:
self.log.warning("Task instance %s is up for retry", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance %s state was set to none externally or "
"reaching concurrency limits. Re-adding task to queue.",
ti
)
ti.set_state(State.SCHEDULED)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
def _manage_executor_state(self, running):
executor = self.executor
for key, state in list(executor.get_event_buffer().items()):
if key not in running:
self.log.warning(
"%s state %s not in running=%s",
key, state, running.values()
)
continue
ti = running[key]
ti.refresh_from_db()
self.log.debug("Executor state: %s task %s", state, ti)
if state == State.FAILED or state == State.SUCCESS:
if ti.state == State.RUNNING or ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
ti.handle_failure(msg)
@provide_session
def _get_dag_run(self, run_date, session=None):
run_id = BackfillJob.ID_FORMAT_PREFIX.format(run_date.isoformat())
respect_dag_max_active_limit = (True
if (self.dag.schedule_interval and
not self.dag.is_subdag)
else False)
current_active_dag_count = self.dag.get_num_active_runs(external_trigger=False)
run = DagRun.find(dag_id=self.dag.dag_id,
execution_date=run_date,
session=session)
if run is not None and len(run) > 0:
run = run[0]
if run.state == State.RUNNING:
respect_dag_max_active_limit = False
else:
run = None
if (respect_dag_max_active_limit and
current_active_dag_count >= self.dag.max_active_runs):
return None
run = run or self.dag.create_dagrun(
run_id=run_id,
execution_date=run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
conf=self.conf,
)
run.dag = self.dag
run.state = State.RUNNING
run.run_id = run_id
run.verify_integrity(session=session)
return run
@provide_session
def _task_instances_for_dag_run(self, dag_run, session=None):
tasks_to_run = {}
if dag_run is None:
return tasks_to_run
self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)
dag_run.refresh_from_db()
make_transient(dag_run)
# TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf
for ti in dag_run.get_task_instances():
# all tasks part of the backfill are scheduled to run
if ti.state == State.NONE:
ti.set_state(State.SCHEDULED, session=session)
if ti.state != State.REMOVED:
tasks_to_run[ti.key] = ti
return tasks_to_run
def _log_progress(self, ti_status):
msg = ' | '.join([
"[backfill progress]",
"finished run {0} of {1}",
"tasks waiting: {2}",
"succeeded: {3}",
"running: {4}",
"failed: {5}",
"skipped: {6}",
"deadlocked: {7}",
"not ready: {8}"
]).format(
ti_status.finished_runs,
ti_status.total_runs,
len(ti_status.to_run),
len(ti_status.succeeded),
len(ti_status.running),
len(ti_status.failed),
len(ti_status.skipped),
len(ti_status.deadlocked),
len(ti_status.not_ready))
self.log.info(msg)
self.log.debug(
"Finished dag run loop iteration. Remaining tasks %s",
ti_status.to_run.values()
)
@provide_session
def _process_backfill_task_instances(self,
ti_status,
executor,
pickle_id,
start_date=None, session=None):
executed_run_dates = []
while ((len(ti_status.to_run) > 0 or len(ti_status.running) > 0) and
len(ti_status.deadlocked) == 0):
self.log.debug("*** Clearing out not_ready list ***")
ti_status.not_ready.clear()
# we need to execute the tasks bottom to top
# or leaf to root, as otherwise tasks might be
# determined deadlocked while they are actually
# waiting for their upstream to finish
for task in self.dag.topological_sort():
for key, ti in list(ti_status.to_run.items()):
if task.task_id != ti.task_id:
continue
ti.refresh_from_db()
task = self.dag.get_task(ti.task_id)
ti.task = task
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
self.log.debug(
"Task instance to run %s state %s", ti, ti.state)
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance {} state was set to None "
"externally. This should not happen"
)
ti.set_state(State.SCHEDULED, session=session)
if self.rerun_failed_tasks:
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with state {state}".format(ti=ti,
state=ti.state))
if key in ti_status.running:
ti_status.running.pop(key)
ti.set_state(State.SCHEDULED, session=session)
else:
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with {state} state".format(ti=ti,
state=ti.state))
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
backfill_context = DepContext(
deps=RUN_DEPS,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
flag_upstream_failed=True)
if ti.are_dependencies_met(
dep_context=backfill_context,
session=session,
verbose=self.verbose):
ti.refresh_from_db(lock_for_update=True, session=session)
if ti.state == State.SCHEDULED or ti.state == State.UP_FOR_RETRY:
if executor.has_task(ti):
self.log.debug(
"Task Instance %s already in executor "
"waiting for queue to clear",
ti
)
else:
self.log.debug('Sending %s to executor', ti)
ti.state = State.QUEUED
session.merge(ti)
cfg_path = None
if executor.__class__ in (executors.LocalExecutor,
executors.SequentialExecutor):
cfg_path = tmp_configuration_copy()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_task_deps=self.ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool,
cfg_path=cfg_path)
ti_status.running[key] = ti
ti_status.to_run.pop(key)
session.commit()
continue
if ti.state == State.UPSTREAM_FAILED:
self.log.error("Task instance %s upstream failed", ti)
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
if ti.state == State.UP_FOR_RETRY:
self.log.debug(
"Task instance %s retry period not "
"expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
continue
self.log.debug('Adding %s to not_ready', ti)
ti_status.not_ready.add(key)
self.heartbeat()
executor.heartbeat()
# tasks to run and there are no running tasks then the backfill
# is deadlocked
if (ti_status.not_ready and
ti_status.not_ready == set(ti_status.to_run) and
len(ti_status.running) == 0):
self.log.warning(
"Deadlock discovered for ti_status.to_run=%s",
ti_status.to_run.values()
)
ti_status.deadlocked.update(ti_status.to_run.values())
ti_status.to_run.clear()
# check executor state
self._manage_executor_state(ti_status.running)
# update the task counters
self._update_counters(ti_status=ti_status)
# update dag run state
_dag_runs = ti_status.active_runs[:]
for run in _dag_runs:
run.update_state(session=session)
if run.state in State.finished():
ti_status.finished_runs += 1
ti_status.active_runs.remove(run)
executed_run_dates.append(run.execution_date)
self._log_progress(ti_status)
# return updated status
return executed_run_dates
@provide_session
def _collect_errors(self, ti_status, session=None):
err = ''
if ti_status.failed:
err += (
"---------------------------------------------------\n"
"Some task instances failed:\n{}\n".format(ti_status.failed))
if ti_status.deadlocked:
err += (
'---------------------------------------------------\n'
'BackfillJob is deadlocked.')
deadlocked_depends_on_past = any(
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=False),
session=session,
verbose=self.verbose) !=
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=True),
session=session,
verbose=self.verbose)
for t in ti_status.deadlocked)
if deadlocked_depends_on_past:
err += (
'Some of the deadlocked tasks were unable to run because '
'of "depends_on_past" relationships. Try running the '
'backfill with the option '
'"ignore_first_depends_on_past=True" or passing "-I" at '
'the command line.')
err += ' These tasks have succeeded:\n{}\n'.format(ti_status.succeeded)
err += ' These tasks are running:\n{}\n'.format(ti_status.running)
err += ' These tasks have failed:\n{}\n'.format(ti_status.failed)
err += ' These tasks are skipped:\n{}\n'.format(ti_status.skipped)
err += ' These tasks are deadlocked:\n{}\n'.format(ti_status.deadlocked)
return err
@provide_session
def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id,
start_date, session=None):
for next_run_date in run_dates:
dag_run = self._get_dag_run(next_run_date, session=session)
tis_map = self._task_instances_for_dag_run(dag_run,
session=session)
if dag_run is None:
continue
ti_status.active_runs.append(dag_run)
ti_status.to_run.update(tis_map or {})
processed_dag_run_dates = self._process_backfill_task_instances(
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
ti_status.executed_dag_run_dates.update(processed_dag_run_dates)
@provide_session
def _execute(self, session=None):
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date,
end_date=self.bf_end_date)
if len(run_dates) == 0:
self.log.info("No run dates were found for the given dates and dag interval.")
return
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
ti_status.total_runs = len(run_dates)
try:
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [run_date for run_date in run_dates
if run_date not in ti_status.executed_dag_run_dates]
self._execute_for_run_dates(run_dates=dates_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
remaining_dates = (
ti_status.total_runs - len(ti_status.executed_dag_run_dates)
)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise AirflowException(err)
if remaining_dates > 0:
self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id
)
time.sleep(self.delay_on_limit_secs)
finally:
executor.end()
session.commit()
self.log.info("Backfill done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
pickle_id=None,
pool=None,
*args, **kwargs):
self.task_instance = task_instance
self.dag_id = task_instance.dag_id
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
# terminate multiple times
self.terminating = False
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
self.task_runner = get_task_runner(self)
def signal_handler(signum, frame):
self.log.error("Received SIGTERM. Terminating subprocesses")
self.on_kill()
raise AirflowException("LocalTaskJob received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
if not self.task_instance._check_and_change_state_before_execution(
mark_success=self.mark_success,
ignore_all_deps=self.ignore_all_deps,
ignore_depends_on_past=self.ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
ignore_ti_state=self.ignore_ti_state,
job_id=self.id,
pool=self.pool):
self.log.info("Task is not able to be run")
return
try:
self.task_runner.start()
last_heartbeat_time = time.time()
heartbeat_time_limit = conf.getint('scheduler',
'scheduler_zombie_task_threshold')
while True:
# Monitor the task to see if it's done
return_code = self.task_runner.return_code()
if return_code is not None:
self.log.info("Task exited with return code %s", return_code)
return
# is a zombie
try:
self.heartbeat()
last_heartbeat_time = time.time()
except OperationalError:
Stats.incr('local_task_job_heartbeat_failure', 1, 1)
self.log.exception(
"Exception while trying to heartbeat! Sleeping for %s seconds",
self.heartrate
)
time.sleep(self.heartrate)
# If it's been too long since we've heartbeat, then it's possible that
time_since_last_heartbeat = time.time() - last_heartbeat_time
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr('local_task_job_prolonged_heartbeat_failure', 1, 1)
self.log.error("Heartbeat time limited exceeded!")
raise AirflowException("Time since last heartbeat({:.2f}s) "
"exceeded limit ({}s)."
.format(time_since_last_heartbeat,
heartbeat_time_limit))
finally:
self.on_kill()
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session=None):
if self.terminating:
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
same_process = ti.pid == os.getpid()
if ti.state == State.RUNNING:
if not same_hostname:
self.log.warning("The recorded hostname {ti.hostname} "
"does not match this instance's hostname "
"{fqdn}".format(**locals()))
raise AirflowException("Hostname of job runner does not match")
elif not same_process:
current_pid = os.getpid()
self.log.warning("Recorded pid {ti.pid} does not match "
"the current pid "
"{current_pid}".format(**locals()))
raise AirflowException("PID of job runner does not match")
elif (
self.task_runner.return_code() is None and
hasattr(self.task_runner, 'process')
):
self.log.warning(
"State of this instance has been externally set to %s. "
"Taking the poison pill.",
ti.state
)
self.task_runner.terminate()
self.terminating = True
| true | true |
f71fa3db4ff531443af2a92cd1b1a2d567ddaf8d | 188 | py | Python | lightningrun.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | lightningrun.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | lightningrun.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | import os
os.system("git clone https://github.com/justteen/BUZZ-USERBOT /root/userbot && mkdir /root/userbot/bin/ && cd /root/userbot/ && chmod +x /usr/local/bin/* && python3 -m userbot")
| 62.666667 | 177 | 0.702128 | import os
os.system("git clone https://github.com/justteen/BUZZ-USERBOT /root/userbot && mkdir /root/userbot/bin/ && cd /root/userbot/ && chmod +x /usr/local/bin/* && python3 -m userbot")
| true | true |
f71fa5c1c650d81c5044415e020b232623ab58c2 | 37,109 | py | Python | src/m1_Line.py | chenx15rose/10-MoreImplementingClasses | 2bce636c73e968111c22bc245d90a596276d4679 | [
"MIT"
] | null | null | null | src/m1_Line.py | chenx15rose/10-MoreImplementingClasses | 2bce636c73e968111c22bc245d90a596276d4679 | [
"MIT"
] | null | null | null | src/m1_Line.py | chenx15rose/10-MoreImplementingClasses | 2bce636c73e968111c22bc245d90a596276d4679 | [
"MIT"
] | null | null | null | """
A simple Line class.
NOTE: This is NOT rosegraphics -- it is your OWN Line class.
Authors: David Mutchler, Vibha Alangar, Dave Fisher, Amanda Stouder,
their colleagues and Xiaolong Chen.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import math
import m1t_test_Line as m1t
########################################################################
# IMPORTANT:
# Your instructor will help you get started on this exercise.
########################################################################
# ----------------------------------------------------------------------
# DONE: 2. With your instructor, READ THE INSTRUCTIONS
# in file m0_INSTRUCTIONS.txt, asking questions as needed.
# Once you understand the instructions, mark this TO DO as DONE.
#
# Also go ahead and mark the src as a Sources Root (right click src folder
# Mark Directory as --> Sources Root. Expand the imports above
# and notice the red line go away from the m1t_test_Line module.
# This step is not critically important this time, but still good to do.
# ----------------------------------------------------------------------
########################################################################
# NOTE: For ALL of the methods that you implement, the method is allowed
# to have additional side effects as needed by it and/or other methods.
########################################################################
def main():
"""
Calls the TEST functions in this module, but ONLY if the method
to be tested has at least a partial implementation. That is,
a TEST function will not be called until you begin work
on the code that it is testing.
"""
if m1t.is_implemented('__init__'):
run_test_init()
if m1t.is_implemented('clone'):
run_test_clone()
if m1t.is_implemented('reverse'):
run_test_reverse()
if m1t.is_implemented('slope'):
run_test_slope()
if m1t.is_implemented('length'):
run_test_length()
if m1t.is_implemented('get_number_of_clones'):
run_test_get_number_of_clones()
if m1t.is_implemented('line_plus'):
run_test_line_plus()
if m1t.is_implemented('line_minus'):
run_test_line_minus()
if m1t.is_implemented('midpoint'):
run_test_midpoint()
if m1t.is_implemented('is_parallel'):
run_test_is_parallel()
if m1t.is_implemented('reset'):
run_test_reset()
########################################################################
# Students:
# Do NOT touch the following Point class - it has no TO DO.
# Do NOT copy code from the methods in this Point class.
#
# DO ** READ ** this Point class,
# asking questions about any of it that you do not understand.
#
# DO ** CALL ** methods in this Point class as needed
# in implementing and testing the methods of the ** Line ** class.
#
# IMPORTANT, IMPORTANT, IMPORTANT:
# *** In your ** Line ** class methods, you should NEVER have code
# *** that a ** Point ** class method could do for you.
########################################################################
# The Point class (and its methods) begins here.
########################################################################
class Point(object):
""" Represents a point in 2-dimensional space. """
def __init__(self, x, y):
"""
Sets instance variables x and y to the given coordinates.
"""
self.x = x
self.y = y
def __repr__(self):
"""
Returns a string representation of this Point.
For each coordinate (x and y), the representation
- uses no decimal points if the number is close to an integer,
- else it uses decimal_places places after the decimal point, where decimal_places = 2.
Examples:
Point(10, 3.14)
Point(3.01, 2.99)
"""
decimal_places = 2 # Use 2 places after the decimal point
formats = []
numbers = []
for coordinate in (self.x, self.y):
if abs(coordinate - round(coordinate)) < (10 ** -decimal_places):
# Treat it as an integer:
formats.append('{}')
numbers.append(round(coordinate))
else:
# Treat it as a float to decimal_places decimal places:
formats.append('{:.' + str(decimal_places) + 'f}')
numbers.append(round(coordinate, decimal_places))
format_string = 'Point(' + formats[0] + ', ' + formats[1] + ')'
return format_string.format(numbers[0], numbers[1])
def __eq__(self, p2):
"""
Defines == for Points: a == b is equivalent to a.__eq__(b).
Treats two numbers as "equal" if they are within 6 decimal
places of each other for both x and y coordinates.
"""
return (round(self.x, 6) == round(p2.x, 6) and
round(self.y, 6) == round(p2.y, 6))
def clone(self):
""" Returns a new Point at the same (x, y) as this Point. """
return Point(self.x, self.y)
def distance_from(self, p2):
""" Returns the distance this Point is from the given Point. """
dx_squared = (self.x - p2.x) ** 2
dy_squared = (self.y - p2.y) ** 2
return math.sqrt(dx_squared + dy_squared)
def halfway_to(self, p2):
"""
Given another Point object p2, returns a new Point
that is half-way between this Point and the given Point (p2).
"""
return Point((self.x + p2.x) / 2,
(self.y + p2.y) / 2)
def plus(self, p2):
"""
Returns a Point whose coordinates are those of this Point
plus the given Point. For example:
p1 = Point(500, 20)
p2 = Point(100, 13)
p3 = p1.plus(p2)
print(p3)
would print: Point(600, 33)
"""
return Point(self.x + p2.x, self.y + p2.y)
def minus(self, p2):
"""
Returns a Point whose coordinates are those of this Point
minus the given Point. For example:
p1 = Point(500, 20)
p2 = Point(100, 13)
p3 = p1.minus(p2)
print(p3)
would print: Point(400, 7)
"""
return Point(self.x - p2.x, self.y - p2.y)
########################################################################
# The Line class (and its methods) begins here.
########################################################################
class Line(object):
""" Represents a line segment in 2-dimensional space. """
def __init__(self, start, end):
"""
What comes in:
-- self
-- a Point object named start
-- a Point object named end
where the two Points are to be the initial start and end points,
respectively, of this Line.
What goes out: Nothing (i.e., None).
Side effects: MUTATEs this Line by setting two instance
variables named:
-- start
-- end
to CLONES of the two Point arguments, respectively.
Other methods must maintain those instance variables as needed
so that they always indicate the CURRENT start and end points
of this Line.
Also, initializes other instance variables as needed
by other Line methods.
Example: This __init__ method runs when one constructs
a Line. So the 3rd of the following statements
invokes the __init__ method of this Line class:
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
print(line.start) # Should print Point(30, 17)
print(line.end) # Should print Point(50, 80)
print(line.start == p1) # Should print True
print(line.start is p1) # Should print False
Type hints:
:type start: Point
:type end: Point
"""
# --------------------------------------------------------------
# DONE: 3.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# --------------------------------------------------------------
self.start = start.clone()
self.end = end.clone()
self.nclone = 0
self.oristart = start.clone()
self.oriend = end.clone()
def __repr__(self):
"""
What comes in:
-- self
What goes out: Returns a string representation of this Line,
in the form:
Line[(x1, y1), (x2, y2)]
Side effects: None.
Note: print(BLAH) causes BLAH's __repr__ to be called.
BLAH's __repr__ returns a string,
which the print function then prints.
Example: Since the print function calls __repr__ on the
object to be printed:
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
# The following statement causes __repr__ to run,
# hence should print: Line[(30, 17), (50, 80)]
print(line)
Type hints:
:rtype: str
"""
# --------------------------------------------------------------
# We have already implemented this __repr__ function for you.
# Do NOT modify it.
# --------------------------------------------------------------
start = repr(self.start).replace('Point', '')
end = repr(self.end).replace('Point', '')
return 'Line[{}, {}]'.format(start, end)
def __eq__(self, line2):
"""
What comes in:
-- self
-- a Line object
What goes out: Returns True if:
this Line's start point is equal to line2's start point AND
this Line's end point is equal to line2's end point.
Returns False otherwise.
Side effects: None.
Note: a == b is equivalent to a.__eq__(b).
Examples:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = Line(p1, p2)
line3 = Line(p2, p1)
print(line1 == line1) # Should print: True
print(line1 == line2) # Should print: True
print(line1 == line3) # Should print: False
line1.start = Point(0, 0)
print(line1 == line2) # Should now print: False
Type hints:
:type line2: Line
:rtype: bool
"""
# --------------------------------------------------------------
# We have already implemented this __eq__ function for you.
# Do NOT modify it.
# --------------------------------------------------------------
return (self.start == line2.start) and (self.end == line2.end)
def clone(self):
"""
What comes in:
-- self
What goes out: Returns a new Line whose START is a clone of
this Line's START and whose END is a clone of this Line's END.
Side effects: None.
Example:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should print: True
print(line1 is line2) # Should print: False
print(line1.start is line2.start) # Should print: False
print(line1.end is line2.end) # Should print: False
line1.start = Point(11, 12)
print(line1) # Should print: Line[(11, 12), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should now print: False
Type hints:
:rtype: Line
"""
# --------------------------------------------------------------
# DONE: 4.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# --------------------------------------------------------------
line = Line(self.start,self.end)
self.nclone +=1
return line
def reverse(self):
"""
What comes in:
-- self
What goes out: Nothing (i.e., None).
Side effects: MUTATES this Line so that its direction is
reversed (that is, its start and end points are swapped).
Examples:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
line1.reverse()
print(line1) # Should print: Line[(50, 80), (30, 17)]
print(line1 == line2) # Should print: False
line1.reverse()
print(line1 == line2) # Should now print: True
"""
temp =self.start
self.start = self.end
self.end =temp
# --------------------------------------------------------------
# DONE: 5.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# --------------------------------------------------------------
def slope(self):
"""
What comes in:
-- self
What goes out: Returns the slope of this Line.
If the line is vertical (i.e., has "infinite" slope), returns
math.inf
Side effects: None.
Examples:
p1 = Point(30, 3)
p2 = Point(50, 8)
line1 = Line(p1, p2)
# Since the slope is (8 - 3) / (50 - 30) , which is 0.25:
print(line1.slope()) # Should print [approximately]: 0.25
line2 = Line(Point(10, 10), Point(10, 5))
print(line2.slope()) # Should print: inf
# math.inf is NOT the STRING 'inf', so:
print(line2.slope() == 'inf') # Should print False
Type hints:
:rtype: float
"""
if self.end.x == self.start.x:
return math.inf
else:
slope = (self.end.y - self.start.y)/(self.end.x - self.start.x)
return slope
# --------------------------------------------------------------
# DONE: 6.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# --------------------------------------------------------------
def length(self):
"""
What comes in:
-- self
What goes out: Returns the length of this Line.
Side effects: None.
Example:
p1 = Point(166, 10)
p2 = Point(100, 10)
line1 = Line(p1, p2)
# Since the distance from p1 to p2 is 66:
print(line1.length()) # Should print: 66.0
p3 = Point(0, 0)
p4 = Point(3, 4)
line2 = Line(p3, p4)
print(line2.length()) # Should print about 5.0
Type hints:
:rtype: float
"""
length = math.sqrt((self.end.y - self.start.y)**2+(self.end.x - self.start.x)**2)
return length
# --------------------------------------------------------------
# DONE: 7.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# --------------------------------------------------------------
def get_number_of_clones(self):
"""
What comes in:
-- self
What goes out:
-- Returns the number of times that this Line has been cloned
(via the clone method).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = line1.clone()
line3 = line1.clone()
line4 = line3.clone()
line5 = line1.clone()
print(line1.get_number_of_clones())
print(line2.get_number_of_clones())
print(line3.get_number_of_clones())
print(line4.get_number_of_clones())
print(line5.get_number_of_clones())
would print:
3 [since there are three line1.clone() statements]
0 [since there are no line2.clone() statements]
1 [since there is one line3.clone() statement]
0 [since there are no line4.clone() statements]
0 [since there are no line5.clone() statements]
Type hints:
:rtype: int:
"""
return self.nclone
# --------------------------------------------------------------
# DONE: 8.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# --------------------------------------------------------------
def line_plus(self, other_line):
"""
What comes in:
-- self
-- another Line object
What goes out:
-- Returns a Line whose:
-- start is the sum of this Line's start (a Point)
and the other_line's start (another Point).
-- end is the sum of this Line's end (a Point)
and the other_line's end (another Point).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_plus(line2)
print(line3)
would print: Line[(600, 33), (500, 16)]
Type hints:
:type other_line: Line
:rtype: Line:
"""
start = Point(self.start.x + other_line.start.x,self.start.y + other_line.start.y)
end = Point(self.end.x + other_line.end.x,self.end.y + other_line.end.y)
line = Line(start,end)
return line
# --------------------------------------------------------------
# DONE: 9.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# --------------------------------------------------------------
def line_minus(self, other_line):
"""
What comes in:
-- self
-- another Line object
What goes out:
-- Returns a Line whose:
-- start is this Line's start (a Point)
minus the other_line's start (another Point).
-- end is this Line's end (a Point)
minus the other_line's end (another Point).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_minus(line2)
print(line3)
would print: Line[(400, 7), (-300, 0)]
Type hints:
:type other_line: Line
:rtype: Line:
"""
start = Point(self.start.x - other_line.start.x, self.start.y - other_line.start.y)
end = Point(self.end.x - other_line.end.x, self.end.y - other_line.end.y)
line = Line(start, end)
return line
# --------------------------------------------------------------
# DONE: 10.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# --------------------------------------------------------------
def midpoint(self):
"""
What comes in:
-- self
What goes out: returns a Point at the midpoint of this Line.
Side effects: None.
Example:
p1 = Point(3, 10)
p2 = Point(9, 20)
line1 = Line(p1, p2)
print(line1.midpoint()) # Should print: Point(6, 15)
Type hints:
:rtype: Point
"""
midpoint = Point((1/2)*(self.start.x + self.end.x), (1/2)*(self.start.y + self.end.y))
return midpoint
# --------------------------------------------------------------
# DONE: 11.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# --------------------------------------------------------------
def is_parallel(self, line2):
"""
What comes in:
-- self
-- another Line object (line2)
What goes out: Returns True if this Line is parallel to the
given Line (line2). Returns False otherwise.
Side effects: None.
Examples:
line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0
line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0
line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0
line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf
print(line1.is_parallel(line2)) # Should print: True
print(line2.is_parallel(line1)) # Should print: True
print(line1.is_parallel(line3)) # Should print: False
print(line1.is_parallel(line4)) # Should print: False
print(line1.is_parallel(line1)) # Should print: True
print(line4.is_parallel(line4)) # Should print: True
Type hints:
:type line2: Line
:rtype: bool
"""
selfslope = Line.slope(self)
line2slope = Line.slope(line2)
if round(selfslope,12) ==round(line2slope,12):
return True
else :
return False
# --------------------------------------------------------------
# DONE: 12.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# --------------------------------------------------------------
################################################################
#
# IMPORTANT: When you test whether two FLOATING POINT numbers
# are "equal", you must ROUND each to (say) 12 decimal places.
# Otherwise, you risk the imprecision of floating-point
# arithmetic biting you. For example, in REAL arithmetic,
# 1 / (24 * math.pi - 20 * math.pi)
# and
# 3 / (72 * math.pi - 60 * math.pi)
# are equal. But in FLOATING point arithmetic, they are:
# 0.07957747154594767
# and
# 0.07957747154594765
# respectively (hence NOT equal).
# Try it out if you don't believe me!
#
# IMPORTANT BOTTOM-LINE: When you want to test whether
# two FLOATING POINT numbers a and b are the same,
# DON'T use: a == b
# INSTEAD use: round(a, 12) == round(b, 12)
#
# The latter compares the numbers rounded to 12 decimal places
# which (usually) is adequate to ignore floating-point errors
# and (usually) adequate to distinguish numbers that really
# are different from each other.
################################################################
def reset(self):
"""
What comes in:
-- self
What goes out: Nothing (i.e., None).
Side effects: MUTATES this Line so that its start and end points
revert to what they were when this Line was constructed.
Examples:
p1 = Point(-3, -4)
p2 = Point(3, 4)
line1 = Line(p1, p2)
line2 = Line(Point(0, 1), Point(10, 20))
... [various actions, including some like these:]
line1.start = Point(100, 300)
line2.end = Point(99, 4)
line1.reverse()
# Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and
# (x2, y2) are the CURRENT coordinates of line1's endpoints.
print(line1)
print(line2) # Similarly for line2
line1.reset()
line2.reset()
print(line1) # Should print: Line[(-3, -4), (3, 4)]
print(line2) # Should print: Line[(0, 1), (10, 20)]
"""
# --------------------------------------------------------------
# DONE: 13.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# --------------------------------------------------------------
self.start = self.oristart
self.end = self.oriend
########################################################################
# The TEST functions for the Line class begin here.
#
# We have already written the TEST functions. They all take the form:
# -- m1t.run_test_BLAH() # This runs OUR tests.
# -- One more test (or tests) that came directly from the Example
# in the specification.
########################################################################
def run_test_init():
""" Tests the __init__ method of the Line class. """
m1t.run_test_init() # This runs OUR tests.
# ------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# ------------------------------------------------------------------
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
print(line.start) # Should print Point(30, 17)
print(line.end) # Should print Point(50, 80)
print(line.start == p1) # Should print True
print(line.start is p1) # Should print False
print('The above should print:')
print(' Point(30, 17)')
print(' Point(50, 80)')
print(' True')
print(' False')
def run_test_clone():
""" Tests the clone method of the Line class. """
m1t.run_test_clone() # This runs OUR tests.
# ------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# ------------------------------------------------------------------
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should print: True
print(line1 is line2) # Should print: False
print(line1.start is line2.start) # Should print: False
print(line1.end is line2.end) # Should print: False
line1.start = Point(11, 12)
print(line1) # Should print: Line[(11, 12), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should now print: False
print('The above should print:')
print(' Line[(30, 17), (50, 80)]')
print(' Line[(30, 17), (50, 80)]')
print(' True')
print(' False')
print(' False')
print(' False')
print(' Line[(11, 12), (50, 80)]')
print(' Line[(30, 17), (50, 80)')
print(' False')
def run_test_reverse():
""" Tests the reverse method of the Line class. """
m1t.run_test_reverse() # This runs OUR tests.
# ------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# ------------------------------------------------------------------
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
line1.reverse()
print(line1) # Should print: Line[(50, 80), (30, 17)]
print(line1 == line2) # Should print: False
line1.reverse()
print(line1 == line2) # Should now print: True
print('The above should print:')
print(' Line[(30, 17), (50, 80)]')
print(' Line[(50, 80), (30, 17)')
print(' False')
print(' True')
def run_test_slope():
""" Tests the slope method of the Line class. """
m1t.run_test_slope() # This runs OUR tests.
# ------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# ------------------------------------------------------------------
p1 = Point(30, 3)
p2 = Point(50, 8)
line1 = Line(p1, p2)
# Since the slope is (8 - 3) / (50 - 30) , which is 0.25:
print(line1.slope()) # Should print [approximately]: 0.25
line2 = Line(Point(10, 10), Point(10, 5))
print(line2.slope()) # Should print: inf
# math.inf is NOT the STRING 'inf', so:
print(line2.slope() == 'inf') # Should print False
print('The above should print:')
print(' 0.25 (approximately)')
print(' inf')
print(' False')
def run_test_length():
""" Tests the length method of the Line class. """
m1t.run_test_length() # This runs OUR tests.
# ------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# ------------------------------------------------------------------
p1 = Point(166, 10)
p2 = Point(100, 10)
line1 = Line(p1, p2)
# Since the distance from p1 to p2 is 66:
print(line1.length()) # Should print: 66.0
p3 = Point(0, 0)
p4 = Point(3, 4)
line2 = Line(p3, p4)
print(line2.length()) # Should print about 5.0
print('The above should print:')
print(' 66.0')
print(' 5.0 (approximately)')
def run_test_get_number_of_clones():
""" Tests the get_number_of_clones method of the Line class. """
m1t.run_test_get_number_of_clones() # This runs OUR tests.
# ------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# ------------------------------------------------------------------
line1 = Line(Point(500, 20), Point(100, 8))
line2 = line1.clone()
line3 = line1.clone()
line4 = line3.clone()
line5 = line1.clone()
print(line1.get_number_of_clones())
print(line2.get_number_of_clones())
print(line3.get_number_of_clones())
print(line4.get_number_of_clones())
print(line5.get_number_of_clones())
print('The above should print 3, then 0, then 1, then 0, then 0.')
def run_test_line_plus():
""" Tests the line_plus method of the Line class. """
m1t.run_test_line_plus() # This runs OUR tests.
# ------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# ------------------------------------------------------------------
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_plus(line2)
print(line3)
print('The above should print: Line[(600, 33), (500, 16)]')
def run_test_line_minus():
""" Tests the line_minus method of the Line class. """
m1t.run_test_line_minus() # This runs OUR tests.
# ------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# ------------------------------------------------------------------
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_minus(line2)
print(line3)
print('The above should print: Line[(400, 7), (-300, 0)]')
def run_test_midpoint():
""" Tests the midpoint method of the Line class. """
m1t.run_test_midpoint() # This runs OUR tests.
# ------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# ------------------------------------------------------------------
p1 = Point(3, 10)
p2 = Point(9, 20)
line1 = Line(p1, p2)
print(line1.midpoint()) # Should print: Point(6, 15)
print('The above should print: Point(6, 15)')
def run_test_is_parallel():
""" Tests the is_parallel method of the Line class. """
m1t.run_test_is_parallel() # This runs OUR tests.
# ------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# ------------------------------------------------------------------
line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0
line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0
line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0
line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf
print(line1.is_parallel(line2)) # Should print: True
print(line2.is_parallel(line1)) # Should print: True
print(line1.is_parallel(line3)) # Should print: False
print(line1.is_parallel(line4)) # Should print: False
print(line1.is_parallel(line1)) # Should print: True
print(line4.is_parallel(line4)) # Should print: True
print('The above should print:')
print(' True, True, False, False, True, True')
def run_test_reset():
""" Tests the reset method of the Line class. """
m1t.run_test_reset() # This runs OUR tests.
# ------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# ------------------------------------------------------------------
p1 = Point(-3, -4)
p2 = Point(3, 4)
line1 = Line(p1, p2)
line2 = Line(Point(0, 1), Point(10, 20))
line1.start = Point(100, 300)
line2.end = Point(99, 4)
line1.reverse()
# Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and
# (x2, y2) are the CURRENT coordinates of line1's endpoints.
print(line1)
print(line2) # Similarly for line2
line1.reset()
line2.reset()
print(line1) # Should print: Line[(-3, -4), (3, 4)]
print(line2) # Should print: Line[(0, 1), (10, 20)]
print('The above should print:')
print(' Line[(3, 4), (100, 300)]')
print(' Line[(0, 1), (99, 4)]')
print(' Line[(-3, -4), (3, 4)]')
print(' Line[(0, 1), (10, 20)]')
# ----------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
| 37.789206 | 97 | 0.487052 |
import math
import m1t_test_Line as m1t
| true | true |
f71fa5eb456e72440a4f739d0f3da6d1a8af1718 | 696 | py | Python | corrector/utils/cut_word/mm.py | mamachengcheng/corrector | e87c49f7dd7d9f236084e963906f414f72a884c9 | [
"MIT"
] | 4 | 2020-11-11T14:08:56.000Z | 2022-02-15T01:31:27.000Z | corrector/utils/cut_word/mm.py | mamachengcheng/corrector | e87c49f7dd7d9f236084e963906f414f72a884c9 | [
"MIT"
] | null | null | null | corrector/utils/cut_word/mm.py | mamachengcheng/corrector | e87c49f7dd7d9f236084e963906f414f72a884c9 | [
"MIT"
] | null | null | null |
def mm(word_dict, text):
result = list()
max_length =
return result
def mm(word_dict, token, window_size=5):
idxs = []
result = []
index = 0
text_size = len(token)
#print(token)
while text_size > index:
for size in range(window_size + index, index, -1):
#print(size,window_size,index) #10 10 0 蓬配奥称如朝鲜迅速弃; 9 10 0 蓬配奥称如朝鲜迅速
piece = token[index:size]
#print(piece)
if piece in word_dict:
index = size - 1
idxs.append(index-len(piece)+1)
result.append(piece)
break
index = index + 1
#print(idxs,result)
return idxs, result
| 26.769231 | 81 | 0.541667 |
def mm(word_dict, text):
result = list()
max_length =
return result
def mm(word_dict, token, window_size=5):
idxs = []
result = []
index = 0
text_size = len(token)
while text_size > index:
for size in range(window_size + index, index, -1):
]
if piece in word_dict:
index = size - 1
idxs.append(index-len(piece)+1)
result.append(piece)
break
index = index + 1
return idxs, result
| false | true |
f71fa77e0d385cdca5a886aa27031fa0a8e90b0c | 1,251 | py | Python | canary-dedup-groomer.py | open-cluster-management/canary-reporting | d97d49fb83e8e8d5e56b937261928285303e475e | [
"Apache-2.0"
] | null | null | null | canary-dedup-groomer.py | open-cluster-management/canary-reporting | d97d49fb83e8e8d5e56b937261928285303e475e | [
"Apache-2.0"
] | 35 | 2021-03-24T14:57:42.000Z | 2021-09-23T18:37:58.000Z | canary-dedup-groomer.py | open-cluster-management/canary-reporting | d97d49fb83e8e8d5e56b937261928285303e475e | [
"Apache-2.0"
] | 1 | 2021-08-02T14:26:55.000Z | 2021-08-02T14:26:55.000Z | import os, sys, json, db_utils
import pymysql
from github import Github, UnknownObjectException
from datetime import datetime
# Aligns all defects recorded in database with their github status
TABLE_NAME = "canary_issues"
c = None
conn = None
github_token=os.getenv('GITHUB_TOKEN')
github_org=os.getenv('GITHUB_ORG')
github_repo=os.getenv('GITHUB_REPO')
def query_github_status(defect):
try:
issue_state = repo.get_issue(defect).state
except UnknownObjectException:
issue_state = None
return issue_state
#
# Get conneted to the Database
#
ret = db_utils.connect_to_db()
#
# Get connected to GitHub
#
github_token=os.getenv('GITHUB_TOKEN')
try:
g = Github(github_token)
org = g.get_organization(github_org)
repo = org.get_repo(github_repo)
except UnknownObjectException as ex:
print(ex)
exit(1)
ret = db_utils.pull_open_defects(github_repo)
if ret != None:
open_defects = list(ret)
for row in open_defects:
id = list(row)[0]
defect = list(row)[1]
status = query_github_status(int(defect))
if (status != "open") and (status != None):
db_utils.update_status(id, status, repo)
else:
print("No open defects!")
ret = db_utils.disconnect_from_db()
| 24.529412 | 66 | 0.710631 | import os, sys, json, db_utils
import pymysql
from github import Github, UnknownObjectException
from datetime import datetime
TABLE_NAME = "canary_issues"
c = None
conn = None
github_token=os.getenv('GITHUB_TOKEN')
github_org=os.getenv('GITHUB_ORG')
github_repo=os.getenv('GITHUB_REPO')
def query_github_status(defect):
try:
issue_state = repo.get_issue(defect).state
except UnknownObjectException:
issue_state = None
return issue_state
ret = db_utils.connect_to_db()
github_token=os.getenv('GITHUB_TOKEN')
try:
g = Github(github_token)
org = g.get_organization(github_org)
repo = org.get_repo(github_repo)
except UnknownObjectException as ex:
print(ex)
exit(1)
ret = db_utils.pull_open_defects(github_repo)
if ret != None:
open_defects = list(ret)
for row in open_defects:
id = list(row)[0]
defect = list(row)[1]
status = query_github_status(int(defect))
if (status != "open") and (status != None):
db_utils.update_status(id, status, repo)
else:
print("No open defects!")
ret = db_utils.disconnect_from_db()
| true | true |
f71fa7d89da3839e79f74760543e4ed894dcc3ac | 3,351 | py | Python | imagr_images/tests.py | sazlin/cfpydev-imagr | e34ac025e357694f40034ab1c02ed3be5294c2d8 | [
"MIT"
] | null | null | null | imagr_images/tests.py | sazlin/cfpydev-imagr | e34ac025e357694f40034ab1c02ed3be5294c2d8 | [
"MIT"
] | null | null | null | imagr_images/tests.py | sazlin/cfpydev-imagr | e34ac025e357694f40034ab1c02ed3be5294c2d8 | [
"MIT"
] | null | null | null | from django.test import TestCase
from models import Photo, Album
from imagr_users.models import ImagrUser
from imagr_images.models import get_file_owner_username
from admin import PhotoAdmin, AlbumAdmin, ImageSizeListFilter
from django.core.urlresolvers import reverse
from django.contrib.admin.sites import AdminSite
import datetime
from django.test.utils import setup_test_environment
setup_test_environment()
from django.test.client import Client
client = Client()
class ImagrTests(TestCase):
def setUp(self):
u1 = ImagrUser.objects.create(username='testuser')
u2 = ImagrUser.objects.create(username='testuser2')
u3 = ImagrUser.objects.create(username='testuser3')
u1.follow(u2)
u1.follow(u3)
Photo.objects.create(
image='test.png',
title='u1 test image',
owner=u1,
published=1)
Photo.objects.create(
image='test.png',
title='u2 test image',
owner=u2,
published=1)
Photo.objects.create(
image='test.png',
title='u3 test image',
owner=u3,
published=1)
Album.objects.create(
title='test album',
owner=u1,
published=1,
)
self.site = AdminSite()
def test_get_file_owner(self):
test_photo = Photo.objects.get(title='u1 test image')
self.assertEqual(isinstance(test_photo, Photo), True)
test_filename = '/garbage/garbage/garbage/test.png'
result = get_file_owner_username(test_photo, test_filename)
today = datetime.datetime.utcnow()
expected = 'testuser/{}/{}/{}'.format(unicode(today.year), unicode(today.month), u'test.png')
self.assertEquals(result, expected)
def test_photo_save(self):
test_photo = Photo.objects.get(title='u1 test image')
self.assertGreater(test_photo.image_size, 0)
def test_album_owner_link(self):
test_album = Album.objects.get(title='test album')
expected = "<a href='../../imagr_users/imagruser/{}/'>{}</a>".format(
test_album.owner.id,
test_album.owner)
test_album_admin = AlbumAdmin(test_album, self.site)
self.assertEquals(test_album_admin.owner_link(test_album), expected)
def test_photo_owner_link(self):
test_photo = Photo.objects.get(title='u1 test image')
expected = "<a href='../../imagr_users/imagruser/{}/'>{}</a>".format(
test_photo.owner.id,
test_photo.owner)
test_photo_admin = AlbumAdmin(test_photo, self.site)
self.assertEquals(test_photo_admin.owner_link(test_photo), expected)
def test_view_stream_page(self):
#client.logout()
user = ImagrUser.objects.get(username='testuser')
client.logout()
#client.login()
# self.assertEqual(client.session['_auth_user_id'], user.pk)
response = client.get(reverse('stream_page'))
self.assertEquals(response.status_code, 200)
actual_photos = response.context['photos']
self.assertEquals(len(actual_photos), 3)
self.assertEquals(actual_photos[0].title, 'u3 test image')
self.assertEquals(actual_photos[1].title, 'u2 test image')
self.assertEquals(actual_photos[2].title, 'u1 test image')
| 37.651685 | 101 | 0.64906 | from django.test import TestCase
from models import Photo, Album
from imagr_users.models import ImagrUser
from imagr_images.models import get_file_owner_username
from admin import PhotoAdmin, AlbumAdmin, ImageSizeListFilter
from django.core.urlresolvers import reverse
from django.contrib.admin.sites import AdminSite
import datetime
from django.test.utils import setup_test_environment
setup_test_environment()
from django.test.client import Client
client = Client()
class ImagrTests(TestCase):
def setUp(self):
u1 = ImagrUser.objects.create(username='testuser')
u2 = ImagrUser.objects.create(username='testuser2')
u3 = ImagrUser.objects.create(username='testuser3')
u1.follow(u2)
u1.follow(u3)
Photo.objects.create(
image='test.png',
title='u1 test image',
owner=u1,
published=1)
Photo.objects.create(
image='test.png',
title='u2 test image',
owner=u2,
published=1)
Photo.objects.create(
image='test.png',
title='u3 test image',
owner=u3,
published=1)
Album.objects.create(
title='test album',
owner=u1,
published=1,
)
self.site = AdminSite()
def test_get_file_owner(self):
test_photo = Photo.objects.get(title='u1 test image')
self.assertEqual(isinstance(test_photo, Photo), True)
test_filename = '/garbage/garbage/garbage/test.png'
result = get_file_owner_username(test_photo, test_filename)
today = datetime.datetime.utcnow()
expected = 'testuser/{}/{}/{}'.format(unicode(today.year), unicode(today.month), u'test.png')
self.assertEquals(result, expected)
def test_photo_save(self):
test_photo = Photo.objects.get(title='u1 test image')
self.assertGreater(test_photo.image_size, 0)
def test_album_owner_link(self):
test_album = Album.objects.get(title='test album')
expected = "<a href='../../imagr_users/imagruser/{}/'>{}</a>".format(
test_album.owner.id,
test_album.owner)
test_album_admin = AlbumAdmin(test_album, self.site)
self.assertEquals(test_album_admin.owner_link(test_album), expected)
def test_photo_owner_link(self):
test_photo = Photo.objects.get(title='u1 test image')
expected = "<a href='../../imagr_users/imagruser/{}/'>{}</a>".format(
test_photo.owner.id,
test_photo.owner)
test_photo_admin = AlbumAdmin(test_photo, self.site)
self.assertEquals(test_photo_admin.owner_link(test_photo), expected)
def test_view_stream_page(self):
user = ImagrUser.objects.get(username='testuser')
client.logout()
response = client.get(reverse('stream_page'))
self.assertEquals(response.status_code, 200)
actual_photos = response.context['photos']
self.assertEquals(len(actual_photos), 3)
self.assertEquals(actual_photos[0].title, 'u3 test image')
self.assertEquals(actual_photos[1].title, 'u2 test image')
self.assertEquals(actual_photos[2].title, 'u1 test image')
| true | true |
f71fa83d6989172da3743eb9fd560fc906515688 | 10,607 | py | Python | tests/commandline/stubber_cli_test.py | Josverl/mipystubber | 504814224b38208e9886661b181a57d2b9077be1 | [
"MIT"
] | 1 | 2019-03-26T16:03:04.000Z | 2019-03-26T16:03:04.000Z | tests/commandline/stubber_cli_test.py | Josverl/mipystubber | 504814224b38208e9886661b181a57d2b9077be1 | [
"MIT"
] | null | null | null | tests/commandline/stubber_cli_test.py | Josverl/mipystubber | 504814224b38208e9886661b181a57d2b9077be1 | [
"MIT"
] | null | null | null | from typing import List
import pytest
from pytest_mock import MockerFixture
from mock import MagicMock
from pathlib import Path
from click.testing import CliRunner
# module under test :
import stubber.stubber as stubber
def test_stubber_help():
# check basic commandline sanity check
runner = CliRunner()
result = runner.invoke(stubber.stubber_cli, ["--help"])
assert result.exit_code == 0
assert "Usage:" in result.output
assert "Commands:" in result.output
##########################################################################################
# clone
##########################################################################################
def test_stubber_clone(mocker: MockerFixture, tmp_path: Path):
runner = CliRunner()
mock_clone: MagicMock = mocker.patch("stubber.stubber.git.clone", autospec=True, return_value=0)
mock_fetch: MagicMock = mocker.patch("stubber.stubber.git.fetch", autospec=True, return_value=0)
result = runner.invoke(stubber.stubber_cli, ["clone"])
assert result.exit_code == 0
# either clone or fetch
assert mock_clone.call_count + mock_fetch.call_count == 2
if mock_clone.call_count > 0:
mock_clone.assert_any_call(remote_repo="https://github.com/micropython/micropython.git", path=Path("repos/micropython"))
mock_clone.assert_any_call(remote_repo="https://github.com/micropython/micropython-lib.git", path=Path("repos/micropython-lib"))
else:
mock_fetch.assert_any_call(Path("repos/micropython"))
mock_fetch.assert_any_call(Path("repos/micropython-lib"))
def test_stubber_clone_path(mocker: MockerFixture, tmp_path: Path):
runner = CliRunner()
mock_clone: MagicMock = mocker.MagicMock(return_value=0)
mocker.patch("stubber.stubber.git.clone", mock_clone)
m_tag = mocker.patch("stubber.stubber.git.get_tag", autospec=True)
m_dir = mocker.patch("stubber.stubber.os.mkdir", autospec=True)
# now test with path specified
result = runner.invoke(stubber.stubber_cli, ["clone", "--path", "foobar"])
assert result.exit_code == 0
assert mock_clone.call_count >= 2
mock_clone.assert_any_call(remote_repo="https://github.com/micropython/micropython.git", path=Path("foobar/micropython"))
mock_clone.assert_any_call(remote_repo="https://github.com/micropython/micropython-lib.git", path=Path("foobar/micropython-lib"))
assert m_tag.call_count >= 2
##########################################################################################
# switch
##########################################################################################
@pytest.mark.parametrize(
"params",
[
["switch", "--version", "latest", "--path", "foobar"],
["switch", "--version", "v1.10", "--path", "foobar"],
],
)
def test_stubber_switch(mocker: MockerFixture, params: List[str]):
runner = CliRunner()
# mock_clone: MagicMock = mocker.patch("stubber.stubber.git.clone", autospec=True, return_value=0)
# Mock Path.exists
m_fetch: MagicMock = mocker.patch("stubber.stubber.git.fetch", autospec=True, return_value=0)
m_switch: MagicMock = mocker.patch("stubber.stubber.git.switch_branch", autospec=True, return_value=0)
m_checkout: MagicMock = mocker.patch("stubber.stubber.git.checkout_tag", autospec=True, return_value=0)
m_get_tag: MagicMock = mocker.patch("stubber.stubber.git.get_tag", autospec=True, return_value="v1.42")
m_match = mocker.patch("stubber.stubber.get_mpy.match_lib_with_mpy", autospec=True)
m_exists = mocker.patch("stubber.stubber.Path.exists", return_value=True)
result = runner.invoke(stubber.stubber_cli, params)
assert result.exit_code == 0
# fetch latest
assert m_fetch.call_count == 2
# "foobar" from params is used as the path
m_fetch.assert_any_call(Path("foobar/micropython"))
m_fetch.assert_any_call(Path("foobar/micropython-lib"))
# core
m_match.assert_called_once()
if "latest" in params:
m_switch.assert_called_once()
m_checkout.assert_not_called()
else:
m_switch.assert_not_called()
m_checkout.assert_called_once()
##########################################################################################
# minify
##########################################################################################
def test_stubber_minify(mocker: MockerFixture):
# check basic commandline sanity check
runner = CliRunner()
mock_minify: MagicMock = mocker.MagicMock(return_value=0)
mocker.patch("stubber.stubber.minify", mock_minify)
result = runner.invoke(stubber.stubber_cli, ["minify"])
assert result.exit_code == 0
mock_minify.assert_called_once_with("board/createstubs.py", "./minified", True, False, False)
def test_stubber_minify_all(mocker: MockerFixture):
# check basic commandline sanity check
runner = CliRunner()
mock_minify: MagicMock = mocker.MagicMock(return_value=0)
mocker.patch("stubber.stubber.minify", mock_minify)
result = runner.invoke(stubber.stubber_cli, ["minify", "--all"])
assert result.exit_code == 0
assert mock_minify.call_count == 3
mock_minify.assert_any_call("board/createstubs.py", "./minified", True, False, False)
mock_minify.assert_any_call("board/createstubs_db.py", "./minified", True, False, False)
mock_minify.assert_any_call("board/createstubs_mem.py", "./minified", True, False, False)
##########################################################################################
# stub
##########################################################################################
def test_stubber_stub(mocker: MockerFixture):
# check basic commandline sanity check
runner = CliRunner()
# mock: MagicMock = mocker.MagicMock(return_value=True)
mock: MagicMock = mocker.patch("stubber.stubber.utils.generate_pyi_files", autospec=True, return_value=True)
# fake run on current folder
result = runner.invoke(stubber.stubber_cli, ["stub", "--source", "."])
mock.assert_called_once_with(Path("."))
assert result.exit_code == 0
##########################################################################################
# get-frozen
##########################################################################################
def test_stubber_get_frozen(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock_version: MagicMock = mocker.patch("stubber.stubber.git.get_tag", autospec=True, return_value="v1.42")
mock: MagicMock = mocker.patch("stubber.stubber.get_mpy.get_frozen", autospec=True)
mock_post: MagicMock = mocker.patch("stubber.stubber.utils.do_post_processing", autospec=True)
# fake run - need to ensure that there is a destination folder
result = runner.invoke(stubber.stubber_cli, ["get-frozen", "--stub-folder", tmp_path.as_posix()])
assert result.exit_code == 0
# FIXME : test failes in CI
mock.assert_called_once()
mock_version.assert_called_once()
mock_post.assert_called_once_with([tmp_path / "micropython-v1_42-frozen"], True, True)
##########################################################################################
# get-lobo
##########################################################################################
def test_stubber_get_lobo(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock: MagicMock = mocker.patch("stubber.stubber.get_lobo.get_frozen", autospec=True)
mock_post: MagicMock = mocker.patch("stubber.stubber.utils.do_post_processing", autospec=True)
# fake run
result = runner.invoke(stubber.stubber_cli, ["get-lobo", "--stub-folder", tmp_path.as_posix()])
mock.assert_called_once()
mock_post.assert_called_once()
mock_post.assert_called_once_with([tmp_path / "loboris-v3_2_24-frozen"], True, True)
assert result.exit_code == 0
##########################################################################################
# get-core
##########################################################################################
def test_stubber_get_core(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock: MagicMock = mocker.patch("stubber.stubber.get_cpython.get_core", autospec=True)
mock_post: MagicMock = mocker.patch("stubber.stubber.utils.do_post_processing", autospec=True)
# fake run
result = runner.invoke(stubber.stubber_cli, ["get-core", "--stub-folder", tmp_path.as_posix()])
assert result.exit_code == 0
# process is called twice
assert mock.call_count == 2
# post is called one
mock_post.assert_called_with([tmp_path / "cpython_core-pycopy", tmp_path / "cpython_core-micropython"], True, True)
##########################################################################################
# get-docstubs
##########################################################################################
def test_stubber_get_docstubs(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock_version: MagicMock = mocker.patch("stubber.stubber.git.get_tag", autospec=True, return_value="v1.42")
mock: MagicMock = mocker.patch("stubber.stubber.generate_from_rst", autospec=True)
mock_post: MagicMock = mocker.patch("stubber.stubber.utils.do_post_processing", autospec=True)
# fake run
result = runner.invoke(stubber.stubber_cli, ["get-docstubs", "--stub-folder", tmp_path.as_posix()])
assert result.exit_code == 0
# process is called twice
assert mock.call_count == 1
mock.assert_called_once()
assert mock_version.call_count >= 1
# post is called one
mock_post.assert_called_with([tmp_path / "micropython-v1_42-docstubs"], False, True)
##########################################################################################
# get-lobo
##########################################################################################
def test_stubber_fallback(mocker: MockerFixture, tmp_path: Path):
# check basic commandline sanity check
runner = CliRunner()
mock: MagicMock = mocker.patch("stubber.stubber.update_fallback", autospec=True)
# mock2: MagicMock = mocker.patch("stubber.update_fallback.update_fallback", autospec=True)
# from .update_fallback import update_fallback,
# fake run
result = runner.invoke(stubber.stubber_cli, ["update-fallback", "--stub-folder", tmp_path.as_posix()])
mock.assert_called_once()
assert result.exit_code == 0
| 42.258964 | 136 | 0.614217 | from typing import List
import pytest
from pytest_mock import MockerFixture
from mock import MagicMock
from pathlib import Path
from click.testing import CliRunner
import stubber.stubber as stubber
def test_stubber_help():
runner = CliRunner()
result = runner.invoke(stubber.stubber_cli, ["--help"])
assert result.exit_code == 0
assert "Usage:" in result.output
assert "Commands:" in result.output
| true | true |
f71fa8aed93c8b08ae1ae6669c787edf3afcae3d | 12,641 | py | Python | python/ccxt/async_support/base/exchange.py | gabvladov/ccxt | c26ba54afe1617d7314bf6714427a4db6d0c6381 | [
"MIT"
] | null | null | null | python/ccxt/async_support/base/exchange.py | gabvladov/ccxt | c26ba54afe1617d7314bf6714427a4db6d0c6381 | [
"MIT"
] | null | null | null | python/ccxt/async_support/base/exchange.py | gabvladov/ccxt | c26ba54afe1617d7314bf6714427a4db6d0c6381 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.20.94'
# -----------------------------------------------------------------------------
import asyncio
import concurrent
import socket
import time
import math
import random
import certifi
import aiohttp
import ssl
import sys
import yarl
# -----------------------------------------------------------------------------
from ccxt.async_support.base.throttle import throttle
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
# Create our SSL context object with our CA cert file
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def wait_for_token(self):
while self.rateLimitTokens <= 1:
# if self.verbose:
# print('Waiting for tokens: Exchange: {0}'.format(self.id))
self.add_new_tokens()
seconds_delays = [0.001, 0.005, 0.022, 0.106, 0.5]
delay = random.choice(seconds_delays)
await asyncio.sleep(delay)
self.rateLimitTokens -= 1
def add_new_tokens(self):
# if self.verbose:
# print('Adding new tokens: Exchange: {0}'.format(self.id))
now = time.monotonic()
time_since_update = now - self.rateLimitUpdateTime
new_tokens = math.floor((0.8 * 1000.0 * time_since_update) / self.rateLimit)
if new_tokens > 1:
self.rateLimitTokens = min(self.rateLimitTokens + new_tokens, self.rateLimitMaxTokens)
self.rateLimitUpdateTime = now
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
await self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
http_status_code = response.status
http_status_text = response.reason
json_response = self.parse_json(http_response)
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
raise ExchangeNotAvailable(method + ' ' + url)
except concurrent.futures._base.TimeoutError as e:
raise RequestTimeout(method + ' ' + url)
except aiohttp.client_exceptions.ClientConnectionError as e:
raise ExchangeNotAvailable(method + ' ' + url)
except aiohttp.client_exceptions.ClientError as e: # base exception class
raise ExchangeError(method + ' ' + url)
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_errors(http_status_code, http_status_text, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
| 41.719472 | 355 | 0.610316 |
__version__ = '1.20.94'
import asyncio
import concurrent
import socket
import time
import math
import random
import certifi
import aiohttp
import ssl
import sys
import yarl
from ccxt.async_support.base.throttle import throttle
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
from ccxt.base.exchange import Exchange as BaseExchange
__all__ = [
'BaseExchange',
'Exchange',
]
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.aiohttp_trust_env = config.get('aiohttp_trust_env', self.aiohttp_trust_env)
self.verify = config.get('verify', self.verify)
self.own_session = 'session' not in config
self.cafile = config.get('cafile', certifi.where())
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + " requires to release all resources with an explicit call to the .close() coroutine. If you are using the exchange instance with async coroutines, add exchange.close() to your code into a place when you're done with the exchange and don't need the exchange instance anymore (at the end of your async coroutine).")
if sys.version_info >= (3, 5):
async def __aenter__(self):
self.open()
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
def open(self):
if self.own_session and self.session is None:
context = ssl.create_default_context(cafile=self.cafile) if self.verify else self.verify
connector = aiohttp.TCPConnector(ssl=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector, trust_env=self.aiohttp_trust_env)
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def wait_for_token(self):
while self.rateLimitTokens <= 1:
self.add_new_tokens()
seconds_delays = [0.001, 0.005, 0.022, 0.106, 0.5]
delay = random.choice(seconds_delays)
await asyncio.sleep(delay)
self.rateLimitTokens -= 1
def add_new_tokens(self):
now = time.monotonic()
time_since_update = now - self.rateLimitUpdateTime
new_tokens = math.floor((0.8 * 1000.0 * time_since_update) / self.rateLimit)
if new_tokens > 1:
self.rateLimitTokens = min(self.rateLimitTokens + new_tokens, self.rateLimitMaxTokens)
self.rateLimitUpdateTime = now
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
if self.enableRateLimit:
await self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
request_body = body
encoded_body = body.encode() if body else None
self.open()
session_method = getattr(self.session, method.lower())
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=request_headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_response = await response.text()
http_status_code = response.status
http_status_text = response.reason
json_response = self.parse_json(http_response)
headers = response.headers
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.verbose:
print("\nResponse:", method, url, http_status_code, headers, http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
except socket.gaierror as e:
raise ExchangeNotAvailable(method + ' ' + url)
except concurrent.futures._base.TimeoutError as e:
raise RequestTimeout(method + ' ' + url)
except aiohttp.client_exceptions.ClientConnectionError as e:
raise ExchangeNotAvailable(method + ' ' + url)
except aiohttp.client_exceptions.ClientError as e:
raise ExchangeError(method + ' ' + url)
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
self.handle_rest_errors(http_status_code, http_status_text, http_response, url, method)
self.handle_rest_response(http_response, json_response, url, method)
if json_response is not None:
return json_response
if self.is_text_response(headers):
return http_response
return response.content
async def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
markets = await self.fetch_markets(params)
return self.set_markets(markets, currencies)
async def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = await self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = await self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
async def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, await self.fetch_fees())
return self.loaded_fees
async def fetch_markets(self, params={}):
return self.to_array(self.markets)
async def fetch_currencies(self, params={}):
return self.currencies
async def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = await self.fetch_time(params)
self.status['updated'] = updated
return self.status
async def fetch_order_status(self, id, symbol=None, params={}):
order = await self.fetch_order(id, symbol, params)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported('fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return await self.fetch_ohlcv(symbol, timeframe, since, limit, params)
async def fetch_full_tickers(self, symbols=None, params={}):
return await self.fetch_tickers(symbols, params)
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError('updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
async def fetch_trading_fees(self, params={}):
raise NotSupported('fetch_trading_fees() not supported yet')
async def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported('fetch_trading_fee() not supported yet')
return await self.fetch_trading_fees(params)
async def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = await self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
async def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = await self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = await self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
async def fetch_ticker(self, symbol, params={}):
raise NotSupported('fetch_ticker() not supported yet')
| true | true |
f71fa96f8684f8b86d25128c15599561e0aa97b2 | 11,711 | py | Python | fs_patches_of_hybrid_cloud/cherry_for_B038/nova_cascaded/nova/virt/vmwareapi/vmware_images.py | Hybrid-Cloud/badam | 390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb | [
"Apache-2.0"
] | 2 | 2015-06-15T02:16:33.000Z | 2022-02-23T07:10:38.000Z | patches_tool/vcloud_patch/code/nova/virt/vmwareapi/vmware_images.py | Hybrid-Cloud/badam | 390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb | [
"Apache-2.0"
] | 7 | 2016-05-13T06:39:45.000Z | 2016-05-20T02:55:31.000Z | fs_patches_of_hybrid_cloud/cherry_for_B038/nova_cascaded/nova/virt/vmwareapi/vmware_images.py | Hybrid-Cloud/badam | 390ad3a6fc03948008f7c04ed2f9fcc8514cc1eb | [
"Apache-2.0"
] | 4 | 2015-11-02T04:02:50.000Z | 2021-05-13T17:06:00.000Z | # Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility functions for Image transfer and manipulation.
"""
import os
from oslo.config import cfg
from nova import exception
from nova import image
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import units
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import io_util
from nova.virt.vmwareapi import read_write_util
# NOTE(mdbooth): We use use_linked_clone below, but don't have to import it
# because nova.virt.vmwareapi.driver is imported first. In fact, it is not
# possible to import it here, as nova.virt.vmwareapi.driver calls
# CONF.register_opts() after the import chain which imports this module. This
# is not a problem as long as the import order doesn't change.
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
QUEUE_BUFFER_SIZE = 10
LINKED_CLONE_PROPERTY = 'vmware_linked_clone'
class VMwareImage(object):
def __init__(self, image_id,
file_size=0,
os_type=constants.DEFAULT_OS_TYPE,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE,
file_type=constants.DEFAULT_DISK_FORMAT,
linked_clone=None,
vif_model=constants.DEFAULT_VIF_MODEL):
"""VMwareImage holds values for use in building VMs.
image_id (str): uuid of the image
file_size (int): size of file in bytes
os_type (str): name of guest os (use vSphere names only)
adapter_type (str): name of the adapter's type
disk_type (str): type of disk in thin, thick, etc
file_type (str): vmdk or iso
linked_clone(bool): use linked clone, or don't
"""
self.image_id = image_id
self.file_size = file_size
self.os_type = os_type
self.adapter_type = adapter_type
self.disk_type = disk_type
self.file_type = file_type
# NOTE(vui): This should be removed when we restore the
# descriptor-based validation.
if (self.file_type is not None and
self.file_type not in constants.DISK_FORMATS_ALL):
raise exception.InvalidDiskFormat(disk_format=self.file_type)
if linked_clone is not None:
self.linked_clone = linked_clone
else:
self.linked_clone = CONF.vmware.use_linked_clone
self.vif_model = vif_model
@property
def file_size_in_kb(self):
return self.file_size / units.Ki
@property
def file_size_in_gb(self):
return self.file_size / units.Gi
@property
def is_sparse(self):
return self.disk_type == constants.DISK_TYPE_SPARSE
@property
def is_iso(self):
return self.file_type == constants.DISK_FORMAT_ISO
@classmethod
def from_image(cls, image_id, image_meta=None):
"""Returns VMwareImage, the subset of properties the driver uses.
:param image_id - image id of image
:param image_meta - image metadata we are working with
:return: vmware image object
:rtype: nova.virt.vmwareapi.vmware_images.VmwareImage
"""
if image_meta is None:
image_meta = {}
properties = image_meta.get("properties", {})
# calculate linked_clone flag, allow image properties to override the
# global property set in the configurations.
image_linked_clone = properties.get(LINKED_CLONE_PROPERTY,
CONF.vmware.use_linked_clone)
# catch any string values that need to be interpreted as boolean values
linked_clone = strutils.bool_from_string(image_linked_clone)
props = {
'image_id': image_id,
'linked_clone': linked_clone
}
if 'size' in image_meta:
props['file_size'] = image_meta['size']
if 'disk_format' in image_meta:
props['file_type'] = image_meta['disk_format']
props_map = {
'vmware_ostype': 'os_type',
'vmware_adaptertype': 'adapter_type',
'vmware_disktype': 'disk_type',
'hw_vif_model': 'vif_model'
}
for k, v in props_map.iteritems():
if k in properties:
props[v] = properties[k]
return cls(**props)
def start_transfer(context, read_file_handle, data_size,
write_file_handle=None, image_id=None, image_meta=None):
"""Start the data transfer from the reader to the writer.
Reader writes to the pipe and the writer reads from the pipe. This means
that the total transfer time boils down to the slower of the read/write
and not the addition of the two times.
"""
if not image_meta:
image_meta = {}
# The pipe that acts as an intermediate store of data for reader to write
# to and writer to grab from.
thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
# The read thread. In case of glance it is the instance of the
# GlanceFileRead class. The glance client read returns an iterator
# and this class wraps that iterator to provide datachunks in calls
# to read.
read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
# In case of Glance - VMware transfer, we just need a handle to the
# HTTP Connection that is to send transfer data to the VMware datastore.
if write_file_handle:
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
# In case of VMware - Glance transfer, we relinquish VMware HTTP file read
# handle to Glance Client instance, but to be sure of the transfer we need
# to be sure of the status of the image on glance changing to active.
# The GlanceWriteThread handles the same for us.
elif image_id:
write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
image_id, image_meta)
# Start the read and write threads.
read_event = read_thread.start()
write_event = write_thread.start()
try:
# Wait on the read and write events to signal their end
read_event.wait()
write_event.wait()
except Exception as exc:
# In case of any of the reads or writes raising an exception,
# stop the threads so that we un-necessarily don't keep the other one
# waiting.
read_thread.stop()
write_thread.stop()
# Log and raise the exception.
LOG.exception(exc)
raise exception.NovaException(exc)
finally:
# No matter what, try closing the read and write handles, if it so
# applies.
read_file_handle.close()
if write_file_handle:
write_file_handle.close()
def upload_iso_to_datastore(iso_path, instance, **kwargs):
LOG.debug("Uploading iso %s to datastore", iso_path,
instance=instance)
with open(iso_path, 'r') as iso_file:
write_file_handle = read_write_util.VMwareHTTPWriteFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"),
os.fstat(iso_file.fileno()).st_size)
LOG.debug("Uploading iso of size : %s ",
os.fstat(iso_file.fileno()).st_size)
block_size = 0x10000
data = iso_file.read(block_size)
while len(data) > 0:
write_file_handle.write(data)
data = iso_file.read(block_size)
write_file_handle.close()
LOG.debug("Uploaded iso %s to datastore", iso_path,
instance=instance)
def fetch_image(context, instance, host, dc_name, ds_name, file_path,
cookies=None):
"""Download image from the glance image server."""
image_ref = instance['image_ref']
LOG.debug("Downloading image file data %(image_ref)s to the "
"data store %(data_store_name)s",
{'image_ref': image_ref,
'data_store_name': ds_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
read_iter = IMAGE_API.download(context, image_ref)
read_file_handle = read_write_util.GlanceFileRead(read_iter)
write_file_handle = read_write_util.VMwareHTTPWriteFile(
host, dc_name, ds_name, cookies, file_path, file_size)
start_transfer(context, read_file_handle, file_size,
write_file_handle=write_file_handle)
LOG.debug("Downloaded image file data %(image_ref)s to "
"%(upload_name)s on the data store "
"%(data_store_name)s",
{'image_ref': image_ref,
'upload_name': 'n/a' if file_path is None else file_path,
'data_store_name': 'n/a' if ds_name is None else ds_name},
instance=instance)
def upload_image(context, image, instance, **kwargs):
"""Upload the snapshotted vm disk file to Glance image server."""
LOG.debug("Uploading image %s to the Glance image server", image,
instance=instance)
read_file_handle = read_write_util.VMwareHTTPReadFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"))
file_size = read_file_handle.get_size()
metadata = IMAGE_API.get(context, image)
# The properties and other fields that we need to set for the image.
image_metadata = {"disk_format": "vmdk",
"is_public": "false",
"name": metadata['name'],
"status": "active",
"container_format": "bare",
"size": file_size,
"properties": {"vmware_adaptertype":
kwargs.get("adapter_type"),
"vmware_disktype":
kwargs.get("disk_type"),
"vmware_ostype": kwargs.get("os_type"),
"vmware_image_version":
kwargs.get("image_version"),
"owner_id": instance['project_id']}}
start_transfer(context, read_file_handle, file_size,
image_id=metadata['id'], image_meta=image_metadata)
LOG.debug("Uploaded image %s to the Glance image server", image,
instance=instance)
| 40.663194 | 80 | 0.61566 |
import os
from oslo.config import cfg
from nova import exception
from nova import image
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import units
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import io_util
from nova.virt.vmwareapi import read_write_util
# because nova.virt.vmwareapi.driver is imported first. In fact, it is not
# possible to import it here, as nova.virt.vmwareapi.driver calls
# CONF.register_opts() after the import chain which imports this module. This
# is not a problem as long as the import order doesn't change.
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
QUEUE_BUFFER_SIZE = 10
LINKED_CLONE_PROPERTY = 'vmware_linked_clone'
class VMwareImage(object):
def __init__(self, image_id,
file_size=0,
os_type=constants.DEFAULT_OS_TYPE,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE,
file_type=constants.DEFAULT_DISK_FORMAT,
linked_clone=None,
vif_model=constants.DEFAULT_VIF_MODEL):
self.image_id = image_id
self.file_size = file_size
self.os_type = os_type
self.adapter_type = adapter_type
self.disk_type = disk_type
self.file_type = file_type
if (self.file_type is not None and
self.file_type not in constants.DISK_FORMATS_ALL):
raise exception.InvalidDiskFormat(disk_format=self.file_type)
if linked_clone is not None:
self.linked_clone = linked_clone
else:
self.linked_clone = CONF.vmware.use_linked_clone
self.vif_model = vif_model
@property
def file_size_in_kb(self):
return self.file_size / units.Ki
@property
def file_size_in_gb(self):
return self.file_size / units.Gi
@property
def is_sparse(self):
return self.disk_type == constants.DISK_TYPE_SPARSE
@property
def is_iso(self):
return self.file_type == constants.DISK_FORMAT_ISO
@classmethod
def from_image(cls, image_id, image_meta=None):
if image_meta is None:
image_meta = {}
properties = image_meta.get("properties", {})
image_linked_clone = properties.get(LINKED_CLONE_PROPERTY,
CONF.vmware.use_linked_clone)
linked_clone = strutils.bool_from_string(image_linked_clone)
props = {
'image_id': image_id,
'linked_clone': linked_clone
}
if 'size' in image_meta:
props['file_size'] = image_meta['size']
if 'disk_format' in image_meta:
props['file_type'] = image_meta['disk_format']
props_map = {
'vmware_ostype': 'os_type',
'vmware_adaptertype': 'adapter_type',
'vmware_disktype': 'disk_type',
'hw_vif_model': 'vif_model'
}
for k, v in props_map.iteritems():
if k in properties:
props[v] = properties[k]
return cls(**props)
def start_transfer(context, read_file_handle, data_size,
write_file_handle=None, image_id=None, image_meta=None):
if not image_meta:
image_meta = {}
thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
if write_file_handle:
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
elif image_id:
write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
image_id, image_meta)
read_event = read_thread.start()
write_event = write_thread.start()
try:
read_event.wait()
write_event.wait()
except Exception as exc:
# waiting.
read_thread.stop()
write_thread.stop()
# Log and raise the exception.
LOG.exception(exc)
raise exception.NovaException(exc)
finally:
# No matter what, try closing the read and write handles, if it so
# applies.
read_file_handle.close()
if write_file_handle:
write_file_handle.close()
def upload_iso_to_datastore(iso_path, instance, **kwargs):
LOG.debug("Uploading iso %s to datastore", iso_path,
instance=instance)
with open(iso_path, 'r') as iso_file:
write_file_handle = read_write_util.VMwareHTTPWriteFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"),
os.fstat(iso_file.fileno()).st_size)
LOG.debug("Uploading iso of size : %s ",
os.fstat(iso_file.fileno()).st_size)
block_size = 0x10000
data = iso_file.read(block_size)
while len(data) > 0:
write_file_handle.write(data)
data = iso_file.read(block_size)
write_file_handle.close()
LOG.debug("Uploaded iso %s to datastore", iso_path,
instance=instance)
def fetch_image(context, instance, host, dc_name, ds_name, file_path,
cookies=None):
image_ref = instance['image_ref']
LOG.debug("Downloading image file data %(image_ref)s to the "
"data store %(data_store_name)s",
{'image_ref': image_ref,
'data_store_name': ds_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
read_iter = IMAGE_API.download(context, image_ref)
read_file_handle = read_write_util.GlanceFileRead(read_iter)
write_file_handle = read_write_util.VMwareHTTPWriteFile(
host, dc_name, ds_name, cookies, file_path, file_size)
start_transfer(context, read_file_handle, file_size,
write_file_handle=write_file_handle)
LOG.debug("Downloaded image file data %(image_ref)s to "
"%(upload_name)s on the data store "
"%(data_store_name)s",
{'image_ref': image_ref,
'upload_name': 'n/a' if file_path is None else file_path,
'data_store_name': 'n/a' if ds_name is None else ds_name},
instance=instance)
def upload_image(context, image, instance, **kwargs):
LOG.debug("Uploading image %s to the Glance image server", image,
instance=instance)
read_file_handle = read_write_util.VMwareHTTPReadFile(
kwargs.get("host"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"))
file_size = read_file_handle.get_size()
metadata = IMAGE_API.get(context, image)
# The properties and other fields that we need to set for the image.
image_metadata = {"disk_format": "vmdk",
"is_public": "false",
"name": metadata['name'],
"status": "active",
"container_format": "bare",
"size": file_size,
"properties": {"vmware_adaptertype":
kwargs.get("adapter_type"),
"vmware_disktype":
kwargs.get("disk_type"),
"vmware_ostype": kwargs.get("os_type"),
"vmware_image_version":
kwargs.get("image_version"),
"owner_id": instance['project_id']}}
start_transfer(context, read_file_handle, file_size,
image_id=metadata['id'], image_meta=image_metadata)
LOG.debug("Uploaded image %s to the Glance image server", image,
instance=instance)
| true | true |
f71fa9e010cfc60587fdfbd98f199c76f3a648ad | 30,587 | py | Python | mypyc/irbuild/expression.py | sileht/mypy | 334876a0cdb80d76333e4976238fd7f42fbaabf2 | [
"PSF-2.0"
] | 1 | 2021-09-25T16:12:01.000Z | 2021-09-25T16:12:01.000Z | mypyc/irbuild/expression.py | sileht/mypy | 334876a0cdb80d76333e4976238fd7f42fbaabf2 | [
"PSF-2.0"
] | 1 | 2021-08-21T07:40:45.000Z | 2021-08-21T07:40:45.000Z | mypyc/irbuild/expression.py | sileht/mypy | 334876a0cdb80d76333e4976238fd7f42fbaabf2 | [
"PSF-2.0"
] | 1 | 2021-08-21T07:39:57.000Z | 2021-08-21T07:39:57.000Z | """Transform mypy expression ASTs to mypyc IR (Intermediate Representation).
The top-level AST transformation logic is implemented in mypyc.irbuild.visitor
and mypyc.irbuild.builder.
"""
from typing import List, Optional, Union, Callable, cast
from mypy.nodes import (
Expression, NameExpr, MemberExpr, SuperExpr, CallExpr, UnaryExpr, OpExpr, IndexExpr,
ConditionalExpr, ComparisonExpr, IntExpr, FloatExpr, ComplexExpr, StrExpr,
BytesExpr, EllipsisExpr, ListExpr, TupleExpr, DictExpr, SetExpr, ListComprehension,
SetComprehension, DictionaryComprehension, SliceExpr, GeneratorExpr, CastExpr, StarExpr,
AssignmentExpr,
Var, RefExpr, MypyFile, TypeInfo, TypeApplication, LDEF, ARG_POS
)
from mypy.types import TupleType, Instance, TypeType, ProperType, get_proper_type
from mypyc.common import MAX_SHORT_INT
from mypyc.ir.ops import (
Value, Register, TupleGet, TupleSet, BasicBlock, Assign, LoadAddress, RaiseStandardError
)
from mypyc.ir.rtypes import (
RTuple, object_rprimitive, is_none_rprimitive, int_rprimitive, is_int_rprimitive
)
from mypyc.ir.func_ir import FUNC_CLASSMETHOD, FUNC_STATICMETHOD
from mypyc.irbuild.format_str_tokenizer import (
tokenizer_printf_style, join_formatted_strings, convert_expr
)
from mypyc.primitives.bytes_ops import bytes_slice_op
from mypyc.primitives.registry import CFunctionDescription, builtin_names, binary_ops
from mypyc.primitives.generic_ops import iter_op
from mypyc.primitives.misc_ops import new_slice_op, ellipsis_op, type_op, get_module_dict_op
from mypyc.primitives.list_ops import list_append_op, list_extend_op, list_slice_op
from mypyc.primitives.tuple_ops import list_tuple_op, tuple_slice_op
from mypyc.primitives.dict_ops import dict_new_op, dict_set_item_op, dict_get_item_op
from mypyc.primitives.set_ops import set_add_op, set_update_op
from mypyc.primitives.str_ops import str_slice_op
from mypyc.primitives.int_ops import int_comparison_op_mapping
from mypyc.irbuild.specialize import specializers
from mypyc.irbuild.builder import IRBuilder
from mypyc.irbuild.for_helpers import (
translate_list_comprehension, translate_set_comprehension,
comprehension_helper
)
# Name and attribute references
def transform_name_expr(builder: IRBuilder, expr: NameExpr) -> Value:
if expr.node is None:
builder.add(RaiseStandardError(RaiseStandardError.RUNTIME_ERROR,
"mypyc internal error: should be unreachable",
expr.line))
return builder.none()
fullname = expr.node.fullname
if fullname in builtin_names:
typ, src = builtin_names[fullname]
return builder.add(LoadAddress(typ, src, expr.line))
# special cases
if fullname == 'builtins.None':
return builder.none()
if fullname == 'builtins.True':
return builder.true()
if fullname == 'builtins.False':
return builder.false()
if isinstance(expr.node, Var) and expr.node.is_final:
value = builder.emit_load_final(
expr.node,
fullname,
expr.name,
builder.is_native_ref_expr(expr),
builder.types[expr],
expr.line,
)
if value is not None:
return value
if isinstance(expr.node, MypyFile) and expr.node.fullname in builder.imports:
return builder.load_module(expr.node.fullname)
# If the expression is locally defined, then read the result from the corresponding
# assignment target and return it. Otherwise if the expression is a global, load it from
# the globals dictionary.
# Except for imports, that currently always happens in the global namespace.
if expr.kind == LDEF and not (isinstance(expr.node, Var)
and expr.node.is_suppressed_import):
# Try to detect and error when we hit the irritating mypy bug
# where a local variable is cast to None. (#5423)
if (isinstance(expr.node, Var) and is_none_rprimitive(builder.node_type(expr))
and expr.node.is_inferred):
builder.error(
'Local variable "{}" has inferred type None; add an annotation'.format(
expr.node.name),
expr.node.line)
# TODO: Behavior currently only defined for Var, FuncDef and MypyFile node types.
if isinstance(expr.node, MypyFile):
# Load reference to a module imported inside function from
# the modules dictionary. It would be closer to Python
# semantics to access modules imported inside functions
# via local variables, but this is tricky since the mypy
# AST doesn't include a Var node for the module. We
# instead load the module separately on each access.
mod_dict = builder.call_c(get_module_dict_op, [], expr.line)
obj = builder.call_c(dict_get_item_op,
[mod_dict, builder.load_str(expr.node.fullname)],
expr.line)
return obj
else:
return builder.read(builder.get_assignment_target(expr), expr.line)
return builder.load_global(expr)
def transform_member_expr(builder: IRBuilder, expr: MemberExpr) -> Value:
# First check if this is maybe a final attribute.
final = builder.get_final_ref(expr)
if final is not None:
fullname, final_var, native = final
value = builder.emit_load_final(final_var, fullname, final_var.name, native,
builder.types[expr], expr.line)
if value is not None:
return value
if isinstance(expr.node, MypyFile) and expr.node.fullname in builder.imports:
return builder.load_module(expr.node.fullname)
obj = builder.accept(expr.expr)
rtype = builder.node_type(expr)
# Special case: for named tuples transform attribute access to faster index access.
typ = get_proper_type(builder.types.get(expr.expr))
if isinstance(typ, TupleType) and typ.partial_fallback.type.is_named_tuple:
fields = typ.partial_fallback.type.metadata['namedtuple']['fields']
if expr.name in fields:
index = builder.builder.load_int(fields.index(expr.name))
return builder.gen_method_call(obj, '__getitem__', [index], rtype, expr.line)
check_instance_attribute_access_through_class(builder, expr, typ)
return builder.builder.get_attr(obj, expr.name, rtype, expr.line)
def check_instance_attribute_access_through_class(builder: IRBuilder,
expr: MemberExpr,
typ: Optional[ProperType]) -> None:
"""Report error if accessing an instance attribute through class object."""
if isinstance(expr.expr, RefExpr):
node = expr.expr.node
if isinstance(typ, TypeType) and isinstance(typ.item, Instance):
# TODO: Handle other item types
node = typ.item.type
if isinstance(node, TypeInfo):
class_ir = builder.mapper.type_to_ir.get(node)
if class_ir is not None and class_ir.is_ext_class:
sym = node.get(expr.name)
if (sym is not None
and isinstance(sym.node, Var)
and not sym.node.is_classvar
and not sym.node.is_final):
builder.error(
'Cannot access instance attribute "{}" through class object'.format(
expr.name),
expr.line
)
builder.note(
'(Hint: Use "x: Final = ..." or "x: ClassVar = ..." to define '
'a class attribute)',
expr.line
)
def transform_super_expr(builder: IRBuilder, o: SuperExpr) -> Value:
# warning(builder, 'can not optimize super() expression', o.line)
sup_val = builder.load_module_attr_by_fullname('builtins.super', o.line)
if o.call.args:
args = [builder.accept(arg) for arg in o.call.args]
else:
assert o.info is not None
typ = builder.load_native_type_object(o.info.fullname)
ir = builder.mapper.type_to_ir[o.info]
iter_env = iter(builder.builder.args)
# Grab first argument
vself: Value = next(iter_env)
if builder.fn_info.is_generator:
# grab sixth argument (see comment in translate_super_method_call)
self_targ = list(builder.symtables[-1].values())[6]
vself = builder.read(self_targ, builder.fn_info.fitem.line)
elif not ir.is_ext_class:
vself = next(iter_env) # second argument is self if non_extension class
args = [typ, vself]
res = builder.py_call(sup_val, args, o.line)
return builder.py_get_attr(res, o.name, o.line)
# Calls
def transform_call_expr(builder: IRBuilder, expr: CallExpr) -> Value:
if isinstance(expr.analyzed, CastExpr):
return translate_cast_expr(builder, expr.analyzed)
callee = expr.callee
if isinstance(callee, IndexExpr) and isinstance(callee.analyzed, TypeApplication):
callee = callee.analyzed.expr # Unwrap type application
if isinstance(callee, MemberExpr):
return translate_method_call(builder, expr, callee)
elif isinstance(callee, SuperExpr):
return translate_super_method_call(builder, expr, callee)
else:
return translate_call(builder, expr, callee)
def translate_call(builder: IRBuilder, expr: CallExpr, callee: Expression) -> Value:
# The common case of calls is refexprs
if isinstance(callee, RefExpr):
return translate_refexpr_call(builder, expr, callee)
function = builder.accept(callee)
args = [builder.accept(arg) for arg in expr.args]
return builder.py_call(function, args, expr.line,
arg_kinds=expr.arg_kinds, arg_names=expr.arg_names)
def translate_refexpr_call(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value:
"""Translate a non-method call."""
# TODO: Allow special cases to have default args or named args. Currently they don't since
# they check that everything in arg_kinds is ARG_POS.
# If there is a specializer for this function, try calling it.
# We would return the first successful one.
if callee.fullname and (callee.fullname, None) in specializers:
for specializer in specializers[callee.fullname, None]:
val = specializer(builder, expr, callee)
if val is not None:
return val
# Gen the argument values
arg_values = [builder.accept(arg) for arg in expr.args]
return builder.call_refexpr_with_args(expr, callee, arg_values)
def translate_method_call(builder: IRBuilder, expr: CallExpr, callee: MemberExpr) -> Value:
"""Generate IR for an arbitrary call of form e.m(...).
This can also deal with calls to module-level functions.
"""
if builder.is_native_ref_expr(callee):
# Call to module-level native function or such
return translate_call(builder, expr, callee)
elif (
isinstance(callee.expr, RefExpr)
and isinstance(callee.expr.node, TypeInfo)
and callee.expr.node in builder.mapper.type_to_ir
and builder.mapper.type_to_ir[callee.expr.node].has_method(callee.name)
):
# Call a method via the *class*
assert isinstance(callee.expr.node, TypeInfo)
ir = builder.mapper.type_to_ir[callee.expr.node]
decl = ir.method_decl(callee.name)
args = []
arg_kinds, arg_names = expr.arg_kinds[:], expr.arg_names[:]
# Add the class argument for class methods in extension classes
if decl.kind == FUNC_CLASSMETHOD and ir.is_ext_class:
args.append(builder.load_native_type_object(callee.expr.node.fullname))
arg_kinds.insert(0, ARG_POS)
arg_names.insert(0, None)
args += [builder.accept(arg) for arg in expr.args]
if ir.is_ext_class:
return builder.builder.call(decl, args, arg_kinds, arg_names, expr.line)
else:
obj = builder.accept(callee.expr)
return builder.gen_method_call(obj,
callee.name,
args,
builder.node_type(expr),
expr.line,
expr.arg_kinds,
expr.arg_names)
elif builder.is_module_member_expr(callee):
# Fall back to a PyCall for non-native module calls
function = builder.accept(callee)
args = [builder.accept(arg) for arg in expr.args]
return builder.py_call(function, args, expr.line,
arg_kinds=expr.arg_kinds, arg_names=expr.arg_names)
else:
receiver_typ = builder.node_type(callee.expr)
# If there is a specializer for this method name/type, try calling it.
# We would return the first successful one.
if (callee.name, receiver_typ) in specializers:
for specializer in specializers[callee.name, receiver_typ]:
val = specializer(builder, expr, callee)
if val is not None:
return val
obj = builder.accept(callee.expr)
args = [builder.accept(arg) for arg in expr.args]
return builder.gen_method_call(obj,
callee.name,
args,
builder.node_type(expr),
expr.line,
expr.arg_kinds,
expr.arg_names)
def translate_super_method_call(builder: IRBuilder, expr: CallExpr, callee: SuperExpr) -> Value:
if callee.info is None or (len(callee.call.args) != 0 and len(callee.call.args) != 2):
return translate_call(builder, expr, callee)
# We support two-argument super but only when it is super(CurrentClass, self)
# TODO: We could support it when it is a parent class in many cases?
if len(callee.call.args) == 2:
self_arg = callee.call.args[1]
if (
not isinstance(self_arg, NameExpr)
or not isinstance(self_arg.node, Var)
or not self_arg.node.is_self
):
return translate_call(builder, expr, callee)
typ_arg = callee.call.args[0]
if (
not isinstance(typ_arg, NameExpr)
or not isinstance(typ_arg.node, TypeInfo)
or callee.info is not typ_arg.node
):
return translate_call(builder, expr, callee)
ir = builder.mapper.type_to_ir[callee.info]
# Search for the method in the mro, skipping ourselves.
for base in ir.mro[1:]:
if callee.name in base.method_decls:
break
else:
return translate_call(builder, expr, callee)
decl = base.method_decl(callee.name)
arg_values = [builder.accept(arg) for arg in expr.args]
arg_kinds, arg_names = expr.arg_kinds[:], expr.arg_names[:]
if decl.kind != FUNC_STATICMETHOD:
# Grab first argument
vself: Value = builder.self()
if decl.kind == FUNC_CLASSMETHOD:
vself = builder.call_c(type_op, [vself], expr.line)
elif builder.fn_info.is_generator:
# For generator classes, the self target is the 6th value
# in the symbol table (which is an ordered dict). This is sort
# of ugly, but we can't search by name since the 'self' parameter
# could be named anything, and it doesn't get added to the
# environment indexes.
self_targ = list(builder.symtables[-1].values())[6]
vself = builder.read(self_targ, builder.fn_info.fitem.line)
arg_values.insert(0, vself)
arg_kinds.insert(0, ARG_POS)
arg_names.insert(0, None)
return builder.builder.call(decl, arg_values, arg_kinds, arg_names, expr.line)
def translate_cast_expr(builder: IRBuilder, expr: CastExpr) -> Value:
src = builder.accept(expr.expr)
target_type = builder.type_to_rtype(expr.type)
return builder.coerce(src, target_type, expr.line)
# Operators
def transform_unary_expr(builder: IRBuilder, expr: UnaryExpr) -> Value:
return builder.unary_op(builder.accept(expr.expr), expr.op, expr.line)
def transform_op_expr(builder: IRBuilder, expr: OpExpr) -> Value:
if expr.op in ('and', 'or'):
return builder.shortcircuit_expr(expr)
# Special case for string formatting
if expr.op == '%' and isinstance(expr.left, StrExpr):
return translate_str_format_percent_sign(builder, expr.left, expr.right)
return builder.binary_op(
builder.accept(expr.left), builder.accept(expr.right), expr.op, expr.line
)
def transform_index_expr(builder: IRBuilder, expr: IndexExpr) -> Value:
base = builder.accept(expr.base)
index = expr.index
if isinstance(base.type, RTuple) and isinstance(index, IntExpr):
return builder.add(TupleGet(base, index.value, expr.line))
if isinstance(index, SliceExpr):
value = try_gen_slice_op(builder, base, index)
if value:
return value
index_reg = builder.accept(expr.index)
return builder.gen_method_call(
base, '__getitem__', [index_reg], builder.node_type(expr), expr.line)
def try_gen_slice_op(builder: IRBuilder, base: Value, index: SliceExpr) -> Optional[Value]:
"""Generate specialized slice op for some index expressions.
Return None if a specialized op isn't available.
This supports obj[x:y], obj[:x], and obj[x:] for a few types.
"""
if index.stride:
# We can only handle the default stride of 1.
return None
if index.begin_index:
begin_type = builder.node_type(index.begin_index)
else:
begin_type = int_rprimitive
if index.end_index:
end_type = builder.node_type(index.end_index)
else:
end_type = int_rprimitive
# Both begin and end index must be int (or missing).
if is_int_rprimitive(begin_type) and is_int_rprimitive(end_type):
if index.begin_index:
begin = builder.accept(index.begin_index)
else:
begin = builder.load_int(0)
if index.end_index:
end = builder.accept(index.end_index)
else:
# Replace missing end index with the largest short integer
# (a sequence can't be longer).
end = builder.load_int(MAX_SHORT_INT)
candidates = [list_slice_op, tuple_slice_op, str_slice_op, bytes_slice_op]
return builder.builder.matching_call_c(candidates, [base, begin, end], index.line)
return None
def transform_conditional_expr(builder: IRBuilder, expr: ConditionalExpr) -> Value:
if_body, else_body, next = BasicBlock(), BasicBlock(), BasicBlock()
builder.process_conditional(expr.cond, if_body, else_body)
expr_type = builder.node_type(expr)
# Having actual Phi nodes would be really nice here!
target = Register(expr_type)
builder.activate_block(if_body)
true_value = builder.accept(expr.if_expr)
true_value = builder.coerce(true_value, expr_type, expr.line)
builder.add(Assign(target, true_value))
builder.goto(next)
builder.activate_block(else_body)
false_value = builder.accept(expr.else_expr)
false_value = builder.coerce(false_value, expr_type, expr.line)
builder.add(Assign(target, false_value))
builder.goto(next)
builder.activate_block(next)
return target
def transform_comparison_expr(builder: IRBuilder, e: ComparisonExpr) -> Value:
# x in (...)/[...]
# x not in (...)/[...]
if (e.operators[0] in ['in', 'not in']
and len(e.operators) == 1
and isinstance(e.operands[1], (TupleExpr, ListExpr))):
items = e.operands[1].items
n_items = len(items)
# x in y -> x == y[0] or ... or x == y[n]
# x not in y -> x != y[0] and ... and x != y[n]
# 16 is arbitrarily chosen to limit code size
if 1 < n_items < 16:
if e.operators[0] == 'in':
bin_op = 'or'
cmp_op = '=='
else:
bin_op = 'and'
cmp_op = '!='
lhs = e.operands[0]
mypy_file = builder.graph['builtins'].tree
assert mypy_file is not None
bool_type = Instance(cast(TypeInfo, mypy_file.names['bool'].node), [])
exprs = []
for item in items:
expr = ComparisonExpr([cmp_op], [lhs, item])
builder.types[expr] = bool_type
exprs.append(expr)
or_expr: Expression = exprs.pop(0)
for expr in exprs:
or_expr = OpExpr(bin_op, or_expr, expr)
builder.types[or_expr] = bool_type
return builder.accept(or_expr)
# x in [y]/(y) -> x == y
# x not in [y]/(y) -> x != y
elif n_items == 1:
if e.operators[0] == 'in':
cmp_op = '=='
else:
cmp_op = '!='
e.operators = [cmp_op]
e.operands[1] = items[0]
# x in []/() -> False
# x not in []/() -> True
elif n_items == 0:
if e.operators[0] == 'in':
return builder.false()
else:
return builder.true()
# TODO: Don't produce an expression when used in conditional context
# All of the trickiness here is due to support for chained conditionals
# (`e1 < e2 > e3`, etc). `e1 < e2 > e3` is approximately equivalent to
# `e1 < e2 and e2 > e3` except that `e2` is only evaluated once.
expr_type = builder.node_type(e)
# go(i, prev) generates code for `ei opi e{i+1} op{i+1} ... en`,
# assuming that prev contains the value of `ei`.
def go(i: int, prev: Value) -> Value:
if i == len(e.operators) - 1:
return transform_basic_comparison(builder,
e.operators[i], prev, builder.accept(e.operands[i + 1]), e.line)
next = builder.accept(e.operands[i + 1])
return builder.builder.shortcircuit_helper(
'and', expr_type,
lambda: transform_basic_comparison(builder,
e.operators[i], prev, next, e.line),
lambda: go(i + 1, next),
e.line)
return go(0, builder.accept(e.operands[0]))
def transform_basic_comparison(builder: IRBuilder,
op: str,
left: Value,
right: Value,
line: int) -> Value:
if (is_int_rprimitive(left.type) and is_int_rprimitive(right.type)
and op in int_comparison_op_mapping.keys()):
return builder.compare_tagged(left, right, op, line)
negate = False
if op == 'is not':
op, negate = 'is', True
elif op == 'not in':
op, negate = 'in', True
target = builder.binary_op(left, right, op, line)
if negate:
target = builder.unary_op(target, 'not', line)
return target
def translate_str_format_percent_sign(builder: IRBuilder,
format_expr: StrExpr,
rhs: Expression) -> Value:
tokens = tokenizer_printf_style(format_expr.value)
if tokens is not None:
literals, format_ops = tokens
exprs = []
if isinstance(rhs, TupleExpr):
exprs = rhs.items
elif isinstance(rhs, Expression):
exprs.append(rhs)
substitutions = convert_expr(builder, format_ops, exprs, format_expr.line)
if substitutions is not None:
return join_formatted_strings(builder, literals, substitutions, format_expr.line)
call_c_ops_candidates = binary_ops.get('%', [])
ret = builder.builder.matching_call_c(call_c_ops_candidates,
[builder.accept(format_expr), builder.accept(rhs)],
format_expr.line)
assert ret is not None, 'Cannot use binary op % at line {}'.format(format_expr.line)
return ret
# Literals
def transform_int_expr(builder: IRBuilder, expr: IntExpr) -> Value:
return builder.builder.load_int(expr.value)
def transform_float_expr(builder: IRBuilder, expr: FloatExpr) -> Value:
return builder.builder.load_float(expr.value)
def transform_complex_expr(builder: IRBuilder, expr: ComplexExpr) -> Value:
return builder.builder.load_complex(expr.value)
def transform_str_expr(builder: IRBuilder, expr: StrExpr) -> Value:
return builder.load_str(expr.value)
def transform_bytes_expr(builder: IRBuilder, expr: BytesExpr) -> Value:
value = bytes(expr.value, 'utf8').decode('unicode-escape').encode('raw-unicode-escape')
return builder.builder.load_bytes(value)
def transform_ellipsis(builder: IRBuilder, o: EllipsisExpr) -> Value:
return builder.add(LoadAddress(ellipsis_op.type, ellipsis_op.src, o.line))
# Display expressions
def transform_list_expr(builder: IRBuilder, expr: ListExpr) -> Value:
return _visit_list_display(builder, expr.items, expr.line)
def _visit_list_display(builder: IRBuilder, items: List[Expression], line: int) -> Value:
return _visit_display(
builder,
items,
builder.new_list_op,
list_append_op,
list_extend_op,
line,
True
)
def transform_tuple_expr(builder: IRBuilder, expr: TupleExpr) -> Value:
if any(isinstance(item, StarExpr) for item in expr.items):
# create a tuple of unknown length
return _visit_tuple_display(builder, expr)
# create a tuple of fixed length (RTuple)
tuple_type = builder.node_type(expr)
# When handling NamedTuple et. al we might not have proper type info,
# so make some up if we need it.
types = (tuple_type.types if isinstance(tuple_type, RTuple)
else [object_rprimitive] * len(expr.items))
items = []
for item_expr, item_type in zip(expr.items, types):
reg = builder.accept(item_expr)
items.append(builder.coerce(reg, item_type, item_expr.line))
return builder.add(TupleSet(items, expr.line))
def _visit_tuple_display(builder: IRBuilder, expr: TupleExpr) -> Value:
"""Create a list, then turn it into a tuple."""
val_as_list = _visit_list_display(builder, expr.items, expr.line)
return builder.call_c(list_tuple_op, [val_as_list], expr.line)
def transform_dict_expr(builder: IRBuilder, expr: DictExpr) -> Value:
"""First accepts all keys and values, then makes a dict out of them."""
key_value_pairs = []
for key_expr, value_expr in expr.items:
key = builder.accept(key_expr) if key_expr is not None else None
value = builder.accept(value_expr)
key_value_pairs.append((key, value))
return builder.builder.make_dict(key_value_pairs, expr.line)
def transform_set_expr(builder: IRBuilder, expr: SetExpr) -> Value:
return _visit_display(
builder,
expr.items,
builder.new_set_op,
set_add_op,
set_update_op,
expr.line,
False
)
def _visit_display(builder: IRBuilder,
items: List[Expression],
constructor_op: Callable[[List[Value], int], Value],
append_op: CFunctionDescription,
extend_op: CFunctionDescription,
line: int,
is_list: bool
) -> Value:
accepted_items = []
for item in items:
if isinstance(item, StarExpr):
accepted_items.append((True, builder.accept(item.expr)))
else:
accepted_items.append((False, builder.accept(item)))
result: Union[Value, None] = None
initial_items = []
for starred, value in accepted_items:
if result is None and not starred and is_list:
initial_items.append(value)
continue
if result is None:
result = constructor_op(initial_items, line)
builder.call_c(extend_op if starred else append_op, [result, value], line)
if result is None:
result = constructor_op(initial_items, line)
return result
# Comprehensions
def transform_list_comprehension(builder: IRBuilder, o: ListComprehension) -> Value:
if any(o.generator.is_async):
builder.error('async comprehensions are unimplemented', o.line)
return translate_list_comprehension(builder, o.generator)
def transform_set_comprehension(builder: IRBuilder, o: SetComprehension) -> Value:
if any(o.generator.is_async):
builder.error('async comprehensions are unimplemented', o.line)
return translate_set_comprehension(builder, o.generator)
def transform_dictionary_comprehension(builder: IRBuilder, o: DictionaryComprehension) -> Value:
if any(o.is_async):
builder.error('async comprehensions are unimplemented', o.line)
d = builder.call_c(dict_new_op, [], o.line)
loop_params = list(zip(o.indices, o.sequences, o.condlists))
def gen_inner_stmts() -> None:
k = builder.accept(o.key)
v = builder.accept(o.value)
builder.call_c(dict_set_item_op, [d, k, v], o.line)
comprehension_helper(builder, loop_params, gen_inner_stmts, o.line)
return d
# Misc
def transform_slice_expr(builder: IRBuilder, expr: SliceExpr) -> Value:
def get_arg(arg: Optional[Expression]) -> Value:
if arg is None:
return builder.none_object()
else:
return builder.accept(arg)
args = [get_arg(expr.begin_index),
get_arg(expr.end_index),
get_arg(expr.stride)]
return builder.call_c(new_slice_op, args, expr.line)
def transform_generator_expr(builder: IRBuilder, o: GeneratorExpr) -> Value:
if any(o.is_async):
builder.error('async comprehensions are unimplemented', o.line)
builder.warning('Treating generator comprehension as list', o.line)
return builder.call_c(
iter_op, [translate_list_comprehension(builder, o)], o.line
)
def transform_assignment_expr(builder: IRBuilder, o: AssignmentExpr) -> Value:
value = builder.accept(o.value)
target = builder.get_assignment_target(o.target)
builder.assign(target, value, o.line)
return value
| 38.914758 | 96 | 0.639193 |
from typing import List, Optional, Union, Callable, cast
from mypy.nodes import (
Expression, NameExpr, MemberExpr, SuperExpr, CallExpr, UnaryExpr, OpExpr, IndexExpr,
ConditionalExpr, ComparisonExpr, IntExpr, FloatExpr, ComplexExpr, StrExpr,
BytesExpr, EllipsisExpr, ListExpr, TupleExpr, DictExpr, SetExpr, ListComprehension,
SetComprehension, DictionaryComprehension, SliceExpr, GeneratorExpr, CastExpr, StarExpr,
AssignmentExpr,
Var, RefExpr, MypyFile, TypeInfo, TypeApplication, LDEF, ARG_POS
)
from mypy.types import TupleType, Instance, TypeType, ProperType, get_proper_type
from mypyc.common import MAX_SHORT_INT
from mypyc.ir.ops import (
Value, Register, TupleGet, TupleSet, BasicBlock, Assign, LoadAddress, RaiseStandardError
)
from mypyc.ir.rtypes import (
RTuple, object_rprimitive, is_none_rprimitive, int_rprimitive, is_int_rprimitive
)
from mypyc.ir.func_ir import FUNC_CLASSMETHOD, FUNC_STATICMETHOD
from mypyc.irbuild.format_str_tokenizer import (
tokenizer_printf_style, join_formatted_strings, convert_expr
)
from mypyc.primitives.bytes_ops import bytes_slice_op
from mypyc.primitives.registry import CFunctionDescription, builtin_names, binary_ops
from mypyc.primitives.generic_ops import iter_op
from mypyc.primitives.misc_ops import new_slice_op, ellipsis_op, type_op, get_module_dict_op
from mypyc.primitives.list_ops import list_append_op, list_extend_op, list_slice_op
from mypyc.primitives.tuple_ops import list_tuple_op, tuple_slice_op
from mypyc.primitives.dict_ops import dict_new_op, dict_set_item_op, dict_get_item_op
from mypyc.primitives.set_ops import set_add_op, set_update_op
from mypyc.primitives.str_ops import str_slice_op
from mypyc.primitives.int_ops import int_comparison_op_mapping
from mypyc.irbuild.specialize import specializers
from mypyc.irbuild.builder import IRBuilder
from mypyc.irbuild.for_helpers import (
translate_list_comprehension, translate_set_comprehension,
comprehension_helper
)
def transform_name_expr(builder: IRBuilder, expr: NameExpr) -> Value:
if expr.node is None:
builder.add(RaiseStandardError(RaiseStandardError.RUNTIME_ERROR,
"mypyc internal error: should be unreachable",
expr.line))
return builder.none()
fullname = expr.node.fullname
if fullname in builtin_names:
typ, src = builtin_names[fullname]
return builder.add(LoadAddress(typ, src, expr.line))
if fullname == 'builtins.None':
return builder.none()
if fullname == 'builtins.True':
return builder.true()
if fullname == 'builtins.False':
return builder.false()
if isinstance(expr.node, Var) and expr.node.is_final:
value = builder.emit_load_final(
expr.node,
fullname,
expr.name,
builder.is_native_ref_expr(expr),
builder.types[expr],
expr.line,
)
if value is not None:
return value
if isinstance(expr.node, MypyFile) and expr.node.fullname in builder.imports:
return builder.load_module(expr.node.fullname)
if expr.kind == LDEF and not (isinstance(expr.node, Var)
and expr.node.is_suppressed_import):
if (isinstance(expr.node, Var) and is_none_rprimitive(builder.node_type(expr))
and expr.node.is_inferred):
builder.error(
'Local variable "{}" has inferred type None; add an annotation'.format(
expr.node.name),
expr.node.line)
if isinstance(expr.node, MypyFile):
# instead load the module separately on each access.
mod_dict = builder.call_c(get_module_dict_op, [], expr.line)
obj = builder.call_c(dict_get_item_op,
[mod_dict, builder.load_str(expr.node.fullname)],
expr.line)
return obj
else:
return builder.read(builder.get_assignment_target(expr), expr.line)
return builder.load_global(expr)
def transform_member_expr(builder: IRBuilder, expr: MemberExpr) -> Value:
# First check if this is maybe a final attribute.
final = builder.get_final_ref(expr)
if final is not None:
fullname, final_var, native = final
value = builder.emit_load_final(final_var, fullname, final_var.name, native,
builder.types[expr], expr.line)
if value is not None:
return value
if isinstance(expr.node, MypyFile) and expr.node.fullname in builder.imports:
return builder.load_module(expr.node.fullname)
obj = builder.accept(expr.expr)
rtype = builder.node_type(expr)
# Special case: for named tuples transform attribute access to faster index access.
typ = get_proper_type(builder.types.get(expr.expr))
if isinstance(typ, TupleType) and typ.partial_fallback.type.is_named_tuple:
fields = typ.partial_fallback.type.metadata['namedtuple']['fields']
if expr.name in fields:
index = builder.builder.load_int(fields.index(expr.name))
return builder.gen_method_call(obj, '__getitem__', [index], rtype, expr.line)
check_instance_attribute_access_through_class(builder, expr, typ)
return builder.builder.get_attr(obj, expr.name, rtype, expr.line)
def check_instance_attribute_access_through_class(builder: IRBuilder,
expr: MemberExpr,
typ: Optional[ProperType]) -> None:
if isinstance(expr.expr, RefExpr):
node = expr.expr.node
if isinstance(typ, TypeType) and isinstance(typ.item, Instance):
# TODO: Handle other item types
node = typ.item.type
if isinstance(node, TypeInfo):
class_ir = builder.mapper.type_to_ir.get(node)
if class_ir is not None and class_ir.is_ext_class:
sym = node.get(expr.name)
if (sym is not None
and isinstance(sym.node, Var)
and not sym.node.is_classvar
and not sym.node.is_final):
builder.error(
'Cannot access instance attribute "{}" through class object'.format(
expr.name),
expr.line
)
builder.note(
'(Hint: Use "x: Final = ..." or "x: ClassVar = ..." to define '
'a class attribute)',
expr.line
)
def transform_super_expr(builder: IRBuilder, o: SuperExpr) -> Value:
# warning(builder, 'can not optimize super() expression', o.line)
sup_val = builder.load_module_attr_by_fullname('builtins.super', o.line)
if o.call.args:
args = [builder.accept(arg) for arg in o.call.args]
else:
assert o.info is not None
typ = builder.load_native_type_object(o.info.fullname)
ir = builder.mapper.type_to_ir[o.info]
iter_env = iter(builder.builder.args)
# Grab first argument
vself: Value = next(iter_env)
if builder.fn_info.is_generator:
# grab sixth argument (see comment in translate_super_method_call)
self_targ = list(builder.symtables[-1].values())[6]
vself = builder.read(self_targ, builder.fn_info.fitem.line)
elif not ir.is_ext_class:
vself = next(iter_env) # second argument is self if non_extension class
args = [typ, vself]
res = builder.py_call(sup_val, args, o.line)
return builder.py_get_attr(res, o.name, o.line)
# Calls
def transform_call_expr(builder: IRBuilder, expr: CallExpr) -> Value:
if isinstance(expr.analyzed, CastExpr):
return translate_cast_expr(builder, expr.analyzed)
callee = expr.callee
if isinstance(callee, IndexExpr) and isinstance(callee.analyzed, TypeApplication):
callee = callee.analyzed.expr # Unwrap type application
if isinstance(callee, MemberExpr):
return translate_method_call(builder, expr, callee)
elif isinstance(callee, SuperExpr):
return translate_super_method_call(builder, expr, callee)
else:
return translate_call(builder, expr, callee)
def translate_call(builder: IRBuilder, expr: CallExpr, callee: Expression) -> Value:
# The common case of calls is refexprs
if isinstance(callee, RefExpr):
return translate_refexpr_call(builder, expr, callee)
function = builder.accept(callee)
args = [builder.accept(arg) for arg in expr.args]
return builder.py_call(function, args, expr.line,
arg_kinds=expr.arg_kinds, arg_names=expr.arg_names)
def translate_refexpr_call(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value:
# TODO: Allow special cases to have default args or named args. Currently they don't since
if callee.fullname and (callee.fullname, None) in specializers:
for specializer in specializers[callee.fullname, None]:
val = specializer(builder, expr, callee)
if val is not None:
return val
arg_values = [builder.accept(arg) for arg in expr.args]
return builder.call_refexpr_with_args(expr, callee, arg_values)
def translate_method_call(builder: IRBuilder, expr: CallExpr, callee: MemberExpr) -> Value:
if builder.is_native_ref_expr(callee):
return translate_call(builder, expr, callee)
elif (
isinstance(callee.expr, RefExpr)
and isinstance(callee.expr.node, TypeInfo)
and callee.expr.node in builder.mapper.type_to_ir
and builder.mapper.type_to_ir[callee.expr.node].has_method(callee.name)
):
assert isinstance(callee.expr.node, TypeInfo)
ir = builder.mapper.type_to_ir[callee.expr.node]
decl = ir.method_decl(callee.name)
args = []
arg_kinds, arg_names = expr.arg_kinds[:], expr.arg_names[:]
if decl.kind == FUNC_CLASSMETHOD and ir.is_ext_class:
args.append(builder.load_native_type_object(callee.expr.node.fullname))
arg_kinds.insert(0, ARG_POS)
arg_names.insert(0, None)
args += [builder.accept(arg) for arg in expr.args]
if ir.is_ext_class:
return builder.builder.call(decl, args, arg_kinds, arg_names, expr.line)
else:
obj = builder.accept(callee.expr)
return builder.gen_method_call(obj,
callee.name,
args,
builder.node_type(expr),
expr.line,
expr.arg_kinds,
expr.arg_names)
elif builder.is_module_member_expr(callee):
function = builder.accept(callee)
args = [builder.accept(arg) for arg in expr.args]
return builder.py_call(function, args, expr.line,
arg_kinds=expr.arg_kinds, arg_names=expr.arg_names)
else:
receiver_typ = builder.node_type(callee.expr)
if (callee.name, receiver_typ) in specializers:
for specializer in specializers[callee.name, receiver_typ]:
val = specializer(builder, expr, callee)
if val is not None:
return val
obj = builder.accept(callee.expr)
args = [builder.accept(arg) for arg in expr.args]
return builder.gen_method_call(obj,
callee.name,
args,
builder.node_type(expr),
expr.line,
expr.arg_kinds,
expr.arg_names)
def translate_super_method_call(builder: IRBuilder, expr: CallExpr, callee: SuperExpr) -> Value:
if callee.info is None or (len(callee.call.args) != 0 and len(callee.call.args) != 2):
return translate_call(builder, expr, callee)
if len(callee.call.args) == 2:
self_arg = callee.call.args[1]
if (
not isinstance(self_arg, NameExpr)
or not isinstance(self_arg.node, Var)
or not self_arg.node.is_self
):
return translate_call(builder, expr, callee)
typ_arg = callee.call.args[0]
if (
not isinstance(typ_arg, NameExpr)
or not isinstance(typ_arg.node, TypeInfo)
or callee.info is not typ_arg.node
):
return translate_call(builder, expr, callee)
ir = builder.mapper.type_to_ir[callee.info]
for base in ir.mro[1:]:
if callee.name in base.method_decls:
break
else:
return translate_call(builder, expr, callee)
decl = base.method_decl(callee.name)
arg_values = [builder.accept(arg) for arg in expr.args]
arg_kinds, arg_names = expr.arg_kinds[:], expr.arg_names[:]
if decl.kind != FUNC_STATICMETHOD:
vself: Value = builder.self()
if decl.kind == FUNC_CLASSMETHOD:
vself = builder.call_c(type_op, [vself], expr.line)
elif builder.fn_info.is_generator:
# could be named anything, and it doesn't get added to the
self_targ = list(builder.symtables[-1].values())[6]
vself = builder.read(self_targ, builder.fn_info.fitem.line)
arg_values.insert(0, vself)
arg_kinds.insert(0, ARG_POS)
arg_names.insert(0, None)
return builder.builder.call(decl, arg_values, arg_kinds, arg_names, expr.line)
def translate_cast_expr(builder: IRBuilder, expr: CastExpr) -> Value:
src = builder.accept(expr.expr)
target_type = builder.type_to_rtype(expr.type)
return builder.coerce(src, target_type, expr.line)
def transform_unary_expr(builder: IRBuilder, expr: UnaryExpr) -> Value:
return builder.unary_op(builder.accept(expr.expr), expr.op, expr.line)
def transform_op_expr(builder: IRBuilder, expr: OpExpr) -> Value:
if expr.op in ('and', 'or'):
return builder.shortcircuit_expr(expr)
if expr.op == '%' and isinstance(expr.left, StrExpr):
return translate_str_format_percent_sign(builder, expr.left, expr.right)
return builder.binary_op(
builder.accept(expr.left), builder.accept(expr.right), expr.op, expr.line
)
def transform_index_expr(builder: IRBuilder, expr: IndexExpr) -> Value:
base = builder.accept(expr.base)
index = expr.index
if isinstance(base.type, RTuple) and isinstance(index, IntExpr):
return builder.add(TupleGet(base, index.value, expr.line))
if isinstance(index, SliceExpr):
value = try_gen_slice_op(builder, base, index)
if value:
return value
index_reg = builder.accept(expr.index)
return builder.gen_method_call(
base, '__getitem__', [index_reg], builder.node_type(expr), expr.line)
def try_gen_slice_op(builder: IRBuilder, base: Value, index: SliceExpr) -> Optional[Value]:
if index.stride:
return None
if index.begin_index:
begin_type = builder.node_type(index.begin_index)
else:
begin_type = int_rprimitive
if index.end_index:
end_type = builder.node_type(index.end_index)
else:
end_type = int_rprimitive
if is_int_rprimitive(begin_type) and is_int_rprimitive(end_type):
if index.begin_index:
begin = builder.accept(index.begin_index)
else:
begin = builder.load_int(0)
if index.end_index:
end = builder.accept(index.end_index)
else:
end = builder.load_int(MAX_SHORT_INT)
candidates = [list_slice_op, tuple_slice_op, str_slice_op, bytes_slice_op]
return builder.builder.matching_call_c(candidates, [base, begin, end], index.line)
return None
def transform_conditional_expr(builder: IRBuilder, expr: ConditionalExpr) -> Value:
if_body, else_body, next = BasicBlock(), BasicBlock(), BasicBlock()
builder.process_conditional(expr.cond, if_body, else_body)
expr_type = builder.node_type(expr)
# Having actual Phi nodes would be really nice here!
target = Register(expr_type)
builder.activate_block(if_body)
true_value = builder.accept(expr.if_expr)
true_value = builder.coerce(true_value, expr_type, expr.line)
builder.add(Assign(target, true_value))
builder.goto(next)
builder.activate_block(else_body)
false_value = builder.accept(expr.else_expr)
false_value = builder.coerce(false_value, expr_type, expr.line)
builder.add(Assign(target, false_value))
builder.goto(next)
builder.activate_block(next)
return target
def transform_comparison_expr(builder: IRBuilder, e: ComparisonExpr) -> Value:
# x in (...)/[...]
# x not in (...)/[...]
if (e.operators[0] in ['in', 'not in']
and len(e.operators) == 1
and isinstance(e.operands[1], (TupleExpr, ListExpr))):
items = e.operands[1].items
n_items = len(items)
# x in y -> x == y[0] or ... or x == y[n]
# x not in y -> x != y[0] and ... and x != y[n]
# 16 is arbitrarily chosen to limit code size
if 1 < n_items < 16:
if e.operators[0] == 'in':
bin_op = 'or'
cmp_op = '=='
else:
bin_op = 'and'
cmp_op = '!='
lhs = e.operands[0]
mypy_file = builder.graph['builtins'].tree
assert mypy_file is not None
bool_type = Instance(cast(TypeInfo, mypy_file.names['bool'].node), [])
exprs = []
for item in items:
expr = ComparisonExpr([cmp_op], [lhs, item])
builder.types[expr] = bool_type
exprs.append(expr)
or_expr: Expression = exprs.pop(0)
for expr in exprs:
or_expr = OpExpr(bin_op, or_expr, expr)
builder.types[or_expr] = bool_type
return builder.accept(or_expr)
# x in [y]/(y) -> x == y
# x not in [y]/(y) -> x != y
elif n_items == 1:
if e.operators[0] == 'in':
cmp_op = '=='
else:
cmp_op = '!='
e.operators = [cmp_op]
e.operands[1] = items[0]
# x in []/() -> False
# x not in []/() -> True
elif n_items == 0:
if e.operators[0] == 'in':
return builder.false()
else:
return builder.true()
# TODO: Don't produce an expression when used in conditional context
expr_type = builder.node_type(e)
def go(i: int, prev: Value) -> Value:
if i == len(e.operators) - 1:
return transform_basic_comparison(builder,
e.operators[i], prev, builder.accept(e.operands[i + 1]), e.line)
next = builder.accept(e.operands[i + 1])
return builder.builder.shortcircuit_helper(
'and', expr_type,
lambda: transform_basic_comparison(builder,
e.operators[i], prev, next, e.line),
lambda: go(i + 1, next),
e.line)
return go(0, builder.accept(e.operands[0]))
def transform_basic_comparison(builder: IRBuilder,
op: str,
left: Value,
right: Value,
line: int) -> Value:
if (is_int_rprimitive(left.type) and is_int_rprimitive(right.type)
and op in int_comparison_op_mapping.keys()):
return builder.compare_tagged(left, right, op, line)
negate = False
if op == 'is not':
op, negate = 'is', True
elif op == 'not in':
op, negate = 'in', True
target = builder.binary_op(left, right, op, line)
if negate:
target = builder.unary_op(target, 'not', line)
return target
def translate_str_format_percent_sign(builder: IRBuilder,
format_expr: StrExpr,
rhs: Expression) -> Value:
tokens = tokenizer_printf_style(format_expr.value)
if tokens is not None:
literals, format_ops = tokens
exprs = []
if isinstance(rhs, TupleExpr):
exprs = rhs.items
elif isinstance(rhs, Expression):
exprs.append(rhs)
substitutions = convert_expr(builder, format_ops, exprs, format_expr.line)
if substitutions is not None:
return join_formatted_strings(builder, literals, substitutions, format_expr.line)
call_c_ops_candidates = binary_ops.get('%', [])
ret = builder.builder.matching_call_c(call_c_ops_candidates,
[builder.accept(format_expr), builder.accept(rhs)],
format_expr.line)
assert ret is not None, 'Cannot use binary op % at line {}'.format(format_expr.line)
return ret
def transform_int_expr(builder: IRBuilder, expr: IntExpr) -> Value:
return builder.builder.load_int(expr.value)
def transform_float_expr(builder: IRBuilder, expr: FloatExpr) -> Value:
return builder.builder.load_float(expr.value)
def transform_complex_expr(builder: IRBuilder, expr: ComplexExpr) -> Value:
return builder.builder.load_complex(expr.value)
def transform_str_expr(builder: IRBuilder, expr: StrExpr) -> Value:
return builder.load_str(expr.value)
def transform_bytes_expr(builder: IRBuilder, expr: BytesExpr) -> Value:
value = bytes(expr.value, 'utf8').decode('unicode-escape').encode('raw-unicode-escape')
return builder.builder.load_bytes(value)
def transform_ellipsis(builder: IRBuilder, o: EllipsisExpr) -> Value:
return builder.add(LoadAddress(ellipsis_op.type, ellipsis_op.src, o.line))
def transform_list_expr(builder: IRBuilder, expr: ListExpr) -> Value:
return _visit_list_display(builder, expr.items, expr.line)
def _visit_list_display(builder: IRBuilder, items: List[Expression], line: int) -> Value:
return _visit_display(
builder,
items,
builder.new_list_op,
list_append_op,
list_extend_op,
line,
True
)
def transform_tuple_expr(builder: IRBuilder, expr: TupleExpr) -> Value:
if any(isinstance(item, StarExpr) for item in expr.items):
return _visit_tuple_display(builder, expr)
tuple_type = builder.node_type(expr)
types = (tuple_type.types if isinstance(tuple_type, RTuple)
else [object_rprimitive] * len(expr.items))
items = []
for item_expr, item_type in zip(expr.items, types):
reg = builder.accept(item_expr)
items.append(builder.coerce(reg, item_type, item_expr.line))
return builder.add(TupleSet(items, expr.line))
def _visit_tuple_display(builder: IRBuilder, expr: TupleExpr) -> Value:
val_as_list = _visit_list_display(builder, expr.items, expr.line)
return builder.call_c(list_tuple_op, [val_as_list], expr.line)
def transform_dict_expr(builder: IRBuilder, expr: DictExpr) -> Value:
key_value_pairs = []
for key_expr, value_expr in expr.items:
key = builder.accept(key_expr) if key_expr is not None else None
value = builder.accept(value_expr)
key_value_pairs.append((key, value))
return builder.builder.make_dict(key_value_pairs, expr.line)
def transform_set_expr(builder: IRBuilder, expr: SetExpr) -> Value:
return _visit_display(
builder,
expr.items,
builder.new_set_op,
set_add_op,
set_update_op,
expr.line,
False
)
def _visit_display(builder: IRBuilder,
items: List[Expression],
constructor_op: Callable[[List[Value], int], Value],
append_op: CFunctionDescription,
extend_op: CFunctionDescription,
line: int,
is_list: bool
) -> Value:
accepted_items = []
for item in items:
if isinstance(item, StarExpr):
accepted_items.append((True, builder.accept(item.expr)))
else:
accepted_items.append((False, builder.accept(item)))
result: Union[Value, None] = None
initial_items = []
for starred, value in accepted_items:
if result is None and not starred and is_list:
initial_items.append(value)
continue
if result is None:
result = constructor_op(initial_items, line)
builder.call_c(extend_op if starred else append_op, [result, value], line)
if result is None:
result = constructor_op(initial_items, line)
return result
def transform_list_comprehension(builder: IRBuilder, o: ListComprehension) -> Value:
if any(o.generator.is_async):
builder.error('async comprehensions are unimplemented', o.line)
return translate_list_comprehension(builder, o.generator)
def transform_set_comprehension(builder: IRBuilder, o: SetComprehension) -> Value:
if any(o.generator.is_async):
builder.error('async comprehensions are unimplemented', o.line)
return translate_set_comprehension(builder, o.generator)
def transform_dictionary_comprehension(builder: IRBuilder, o: DictionaryComprehension) -> Value:
if any(o.is_async):
builder.error('async comprehensions are unimplemented', o.line)
d = builder.call_c(dict_new_op, [], o.line)
loop_params = list(zip(o.indices, o.sequences, o.condlists))
def gen_inner_stmts() -> None:
k = builder.accept(o.key)
v = builder.accept(o.value)
builder.call_c(dict_set_item_op, [d, k, v], o.line)
comprehension_helper(builder, loop_params, gen_inner_stmts, o.line)
return d
def transform_slice_expr(builder: IRBuilder, expr: SliceExpr) -> Value:
def get_arg(arg: Optional[Expression]) -> Value:
if arg is None:
return builder.none_object()
else:
return builder.accept(arg)
args = [get_arg(expr.begin_index),
get_arg(expr.end_index),
get_arg(expr.stride)]
return builder.call_c(new_slice_op, args, expr.line)
def transform_generator_expr(builder: IRBuilder, o: GeneratorExpr) -> Value:
if any(o.is_async):
builder.error('async comprehensions are unimplemented', o.line)
builder.warning('Treating generator comprehension as list', o.line)
return builder.call_c(
iter_op, [translate_list_comprehension(builder, o)], o.line
)
def transform_assignment_expr(builder: IRBuilder, o: AssignmentExpr) -> Value:
value = builder.accept(o.value)
target = builder.get_assignment_target(o.target)
builder.assign(target, value, o.line)
return value
| true | true |
f71fac3e2c7d6447e6fb71445d88074908c05f79 | 507 | py | Python | neighborapp/migrations/0005_auto_20220104_1254.py | Kips-alih/neighborhood | 216d81b352c0f7f61812280f3aa816f8450a61bc | [
"MIT"
] | null | null | null | neighborapp/migrations/0005_auto_20220104_1254.py | Kips-alih/neighborhood | 216d81b352c0f7f61812280f3aa816f8450a61bc | [
"MIT"
] | null | null | null | neighborapp/migrations/0005_auto_20220104_1254.py | Kips-alih/neighborhood | 216d81b352c0f7f61812280f3aa816f8450a61bc | [
"MIT"
] | null | null | null | # Generated by Django 2.2.24 on 2022-01-04 09:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('neighborapp', '0004_neighborhood_description'),
]
operations = [
migrations.AlterField(
model_name='neighborhood',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='neighborapp.Location'),
),
]
| 25.35 | 119 | 0.658777 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('neighborapp', '0004_neighborhood_description'),
]
operations = [
migrations.AlterField(
model_name='neighborhood',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='neighborapp.Location'),
),
]
| true | true |
f71fada5cf3ee5e227b5f46f44161960eda0e4a2 | 4,783 | py | Python | main.py | PiotrBosowski/prl-browser | eabfcc9307e0ff27d490841b80f9d1cdc06f022f | [
"MIT"
] | null | null | null | main.py | PiotrBosowski/prl-browser | eabfcc9307e0ff27d490841b80f9d1cdc06f022f | [
"MIT"
] | null | null | null | main.py | PiotrBosowski/prl-browser | eabfcc9307e0ff27d490841b80f9d1cdc06f022f | [
"MIT"
] | null | null | null | from bottle import *
import os
import settings
from domain.training_model import Training
from training_utils.combined_outputs import overview_csv
@get(f'/<session_name>/<model_name>/refresh')
def invalidate_cache(session_name, model_name):
global global_models
global_models = Training.load_all()
return redirect(f"/{session_name}/{model_name}")
@post(f'/<session_name>/<model_name>/delete')
def delete_model(session_name, model_name):
deleted = Training.delete_model(global_models, session_name, model_name)
if deleted:
return redirect(
f"/{request.forms.next_session}/{request.forms.next_model}")
else:
return redirect('/')
@route(f'/<session>/<model>/<filename>')
def send_image(session, model, filename):
"""
Sends an image.
:param session:
:param model: model name
:param filename: image name
:return: static file of the requested image
"""
model_path = os.path.join(settings.models_dir, session, model)
return static_file(filename, root=model_path, mimetype='image/png')
@route(f'/<session>/<model>/<report>/<filename>')
def send_report_image(session, model, report, filename):
"""
Sends an image.
:param session:
:param model: model name
:param filename: image name
:return: static file of the requested image
"""
model_path = os.path.join(settings.models_dir, session, model, report)
return static_file(filename, root=model_path, mimetype='image/png')
@get(f'/<session_name>/<model_name>')
@view('model_template')
def model_page(session_name, model_name):
"""
Returns model view page.
:param session_name:
:param model_name: model name
:return: model view generated from model_template
"""
Training.refresh_models(global_models)
current_model = global_models[session_name][model_name]
test_id = request.query.test
if test_id and current_model.reports:
current_test = current_model.reports[int(test_id)]
current_test_url = os.path.basename(current_test.path)
elif current_model.reports:
current_test = current_model.get_last_report()
current_test_url = os.path.basename(current_test.path)
else:
current_test = None
current_test_url = ""
models = Training.models_flat(global_models)
models = Training.models_select(models, recent, filter, sortby)
# datasets_structures = {name: json.dumps(dataset['sources'], indent=2)
# for name, dataset in current_model.datasets.items()}
datasets = current_model.datasets
if current_model in models:
index = models.index(current_model)
return template('browser/model_template',
models=models,
model=current_model,
datasets=datasets,
# datasets_structures=datasets_structures,
validation=current_model.history[-1].report.confusion,
previous=models[index - 1],
following=Training.get_next_model(index, models),
current_test=current_test,
current_test_url=current_test_url,
settings=settings)
else:
return redirect('/')
@route('/favicon.ico', method='GET')
def get_favicon():
"""
Browsers for no reason keep asking for favicon, so there you go.
:return: favicon
"""
return static_file('favicon.ico', root='browser')
@route('/style.css')
def send_style():
"""
Sends style.css.
:return: style.css
"""
return static_file('style.css', root='browser')
@route('/navigation.js')
def send_js():
return static_file('navigation.js', root='browser')
@route('/jquery-3.5.1.min.js')
def send_js():
return static_file('jquery-3.5.1.min.js', root='browser')
@get('/overview.csv')
def generate_csv():
filename = overview_csv(global_models)
return static_file(filename, root=settings.models_dir)
@route('/')
@view('report_template')
def index():
"""
Returns main page of the server.
:return:
"""
Training.refresh_models(global_models)
models = Training.models_flat(global_models)
models = Training.models_select(models, recent, filter, sortby)
if models:
return redirect(models[0].url())
else:
return "no models to show"
def browse_results():
"""
Launches the server at localhost:8080.
"""
run(host='localhost', port=8080)
if __name__ == "__main__":
recent = 0
filter = ""
sortby = "accuracy"
reverse_order = True
global_models = Training.load_all(skip_raw_outputs=True,
skip_wrong_preds=True)
browse_results()
| 29.708075 | 81 | 0.655237 | from bottle import *
import os
import settings
from domain.training_model import Training
from training_utils.combined_outputs import overview_csv
@get(f'/<session_name>/<model_name>/refresh')
def invalidate_cache(session_name, model_name):
global global_models
global_models = Training.load_all()
return redirect(f"/{session_name}/{model_name}")
@post(f'/<session_name>/<model_name>/delete')
def delete_model(session_name, model_name):
deleted = Training.delete_model(global_models, session_name, model_name)
if deleted:
return redirect(
f"/{request.forms.next_session}/{request.forms.next_model}")
else:
return redirect('/')
@route(f'/<session>/<model>/<filename>')
def send_image(session, model, filename):
model_path = os.path.join(settings.models_dir, session, model)
return static_file(filename, root=model_path, mimetype='image/png')
@route(f'/<session>/<model>/<report>/<filename>')
def send_report_image(session, model, report, filename):
model_path = os.path.join(settings.models_dir, session, model, report)
return static_file(filename, root=model_path, mimetype='image/png')
@get(f'/<session_name>/<model_name>')
@view('model_template')
def model_page(session_name, model_name):
Training.refresh_models(global_models)
current_model = global_models[session_name][model_name]
test_id = request.query.test
if test_id and current_model.reports:
current_test = current_model.reports[int(test_id)]
current_test_url = os.path.basename(current_test.path)
elif current_model.reports:
current_test = current_model.get_last_report()
current_test_url = os.path.basename(current_test.path)
else:
current_test = None
current_test_url = ""
models = Training.models_flat(global_models)
models = Training.models_select(models, recent, filter, sortby)
datasets = current_model.datasets
if current_model in models:
index = models.index(current_model)
return template('browser/model_template',
models=models,
model=current_model,
datasets=datasets,
validation=current_model.history[-1].report.confusion,
previous=models[index - 1],
following=Training.get_next_model(index, models),
current_test=current_test,
current_test_url=current_test_url,
settings=settings)
else:
return redirect('/')
@route('/favicon.ico', method='GET')
def get_favicon():
return static_file('favicon.ico', root='browser')
@route('/style.css')
def send_style():
return static_file('style.css', root='browser')
@route('/navigation.js')
def send_js():
return static_file('navigation.js', root='browser')
@route('/jquery-3.5.1.min.js')
def send_js():
return static_file('jquery-3.5.1.min.js', root='browser')
@get('/overview.csv')
def generate_csv():
filename = overview_csv(global_models)
return static_file(filename, root=settings.models_dir)
@route('/')
@view('report_template')
def index():
Training.refresh_models(global_models)
models = Training.models_flat(global_models)
models = Training.models_select(models, recent, filter, sortby)
if models:
return redirect(models[0].url())
else:
return "no models to show"
def browse_results():
run(host='localhost', port=8080)
if __name__ == "__main__":
recent = 0
filter = ""
sortby = "accuracy"
reverse_order = True
global_models = Training.load_all(skip_raw_outputs=True,
skip_wrong_preds=True)
browse_results()
| true | true |
f71fada6cd8a268a05f3827d0723a58a4d32aa28 | 9,366 | py | Python | MDSimsEval/pca_analysis.py | MikeXydas/MDSimsEval | 6c32bd8b74e421120beca18d18c3e58fc8f85247 | [
"MIT"
] | 1 | 2020-06-30T12:56:41.000Z | 2020-06-30T12:56:41.000Z | MDSimsEval/pca_analysis.py | MikeXydas/MDSimsEval | 6c32bd8b74e421120beca18d18c3e58fc8f85247 | [
"MIT"
] | 2 | 2021-06-08T21:53:33.000Z | 2021-12-13T20:43:42.000Z | MDSimsEval/pca_analysis.py | MikeXydas/MDSimsEval | 6c32bd8b74e421120beca18d18c3e58fc8f85247 | [
"MIT"
] | null | null | null | import math
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from tqdm import tqdm
def scree_plot(analysis_actors_dict, dir_path, pcs_on_scree_plot=50, variance_ratio_line=0.75):
"""
Creates a plot with the scree plots for each ligand and saves it on the specified ``dir_path``. With blue color is
class 1 and with orange color class 2.
Args:
analysis_actors_dict: ``{ "Agonists": List[AnalysisActor.class], "Antagonists": List[AnalysisActor.class] }``
dir_path (str): The path of the directory the plot will be saved (must end with a ``/``)
pcs_on_scree_plot(int): The number of the first PCs that will be used on the scree plots
variance_ratio_line(float): Float from 0.0 to 1.0 which specifies the variance ratio that a vertical line will
be plotted
"""
# Get the dimensions of the final plot
plot_cols = 3
plot_rows = math.ceil(len(analysis_actors_dict['Agonists']) + len(analysis_actors_dict['Antagonists']) / plot_cols)
fig = plt.figure(figsize=(18, 6 * plot_rows))
plot_index = 1
# Agonists Iteration
for which_ligand in analysis_actors_dict['Agonists']:
ax = fig.add_subplot(plot_rows, plot_cols, plot_index)
plt.axvline(x=np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > variance_ratio_line)[0][0],
ls='--', c='grey', label=f"Reached {int(variance_ratio_line * 100)}% variance")
plt.plot(np.arange(len(which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot])),
which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot], label="Variance Ratio")
plt.ylabel("Variance")
plt.xlabel("#PC")
plt.title(which_ligand.drug_name)
plt.legend()
plot_index += 1
# Antagonists Iteration
for which_ligand in analysis_actors_dict['Antagonists']:
ax = fig.add_subplot(plot_rows, plot_cols, plot_index)
plt.axvline(x=np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > variance_ratio_line)[0][0],
ls='--', c='grey', label=f"Reached {int(variance_ratio_line * 100)}% variance")
plt.plot(np.arange(len(which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot])),
which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot], label="Variance", color='orange')
plt.ylabel("Variance")
plt.xlabel("#PC")
plt.title(which_ligand.drug_name)
plt.legend()
plot_index += 1
fig.suptitle('PCA Scree Plots\nAgonists: Blue\nAntagonists: Orange', fontsize=26, y=0.93)
plt.savefig(f'{dir_path}pca_scree_plots.png', format='png')
def populate_variance_showcase_df(analysis_actors_dict, drug_type):
"""
Creates a DataFrame having for each drug the number of PCs needed in order to have 50%, 75% and 95% variance
Args:
analysis_actors_dict: ``{ "Agonists": List[AnalysisActor.class], "Antagonists": List[AnalysisActor.class] }``
drug_type (str): The class name ('Agonists' or 'Antagonists')
Returns:
pd.DataFrame: A DataFrame with columns ``['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance']``
"""
inp_df = pd.DataFrame(columns=['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance'])
for which_ligand in analysis_actors_dict[drug_type]:
pca_var_row = pd.DataFrame([[
which_ligand.drug_name,
drug_type,
np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.5)[0][0] + 1,
# We +1 since the np.where will return
np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.75)[0][0] + 1,
# the 0 based index of the PC
np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.95)[0][0] + 1]
], columns=['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance'])
inp_df = inp_df.append(pca_var_row, ignore_index=True)
return inp_df
def project_pca_on_2d(analysis_actors_dict, drug_type, dir_path):
"""
Plots the 2d projection on the first two PCs of the atom space. The colorbar expresses the progression
of the frames (color0 -> frame0, color1 -> last_frame).
The plot is shown inside the function but if need can be easily be changed to return it.
Args:
analysis_actors_dict: ``{ "Agonists": List[AnalysisActor.class], "Antagonists": List[AnalysisActor.class] }``
drug_type (str): 'Agonists' or 'Antagonists'
dir_path (str): The path of the directory the plot will be saved (must end with a ``/``)
"""
cols = 3
rows = math.ceil(len(analysis_actors_dict[drug_type]) / cols)
fig = plt.figure(figsize=(18, 25))
plot_index = 1
for which_ligand in tqdm(analysis_actors_dict[drug_type], desc="Projecting " + drug_type):
pca_space_2D = which_ligand.pca_res.transform(
which_ligand.pca_xyz) # Transform on the atom selection that PCA was fitted
step = 1 # Frames we are skipping for computational reasons (if step == 1 then no frame is skipped)
# Scatter Plotting
ax = fig.add_subplot(rows, cols, plot_index)
plt.scatter(pca_space_2D[::step, 0], pca_space_2D[::step, 1],
c=np.arange(len(pca_space_2D) / step) / (len(pca_space_2D) / step), marker='o')
plt.xlabel('PC1')
plt.ylabel('PC2')
explained_variance_2PC = which_ligand.pca_res.explained_variance_ratio_[0] + \
which_ligand.pca_res.explained_variance_ratio_[1]
plt.title(f'{which_ligand.drug_name} | Structural Motion Variance: {explained_variance_2PC}')
plt.colorbar() # Add the colorbar which goes from color0 to color1 as frames progress
plot_index += 1
fig.suptitle(f'PCA 2D Projection of {drug_type} as frames progress', fontsize=26, y=1.03)
plt.tight_layout()
plt.savefig(f'{dir_path}pca_{drug_type}_2d_projection.png', format='png')
return None
def sort_residues_by_loadings(ligand, variance_explained=0.5):
"""
Having as an input **a ligand** find the loadings of each residue and return them in descending order.
The method combines first k PCs where k is defined by the variance_explained argument.
Args:
ligand(AnalysisActor.class): An AnalysisActor object in which PCA is calculated
variance_explained (float): Defines which PCs will be combined to calcualte the final loadings
Returns:
pd.DataFrame where ResidueId is the index and each row contains the loadings of the residue
"""
pca_res = ligand.get_pca()
# How many pcs we need to cover variance_explained
pcs_numb = np.where(np.cumsum(pca_res.explained_variance_ratio_) > variance_explained)[0][0] + 1
# Calculate loadings using loadings = eigenvectors @ sqrt(eigenvalues)
loadings = np.abs(pca_res.components_[:pcs_numb, :]).T @ np.sqrt(pca_res.explained_variance_[:pcs_numb])
# Go from 3 * #residues columns to #residues columns, combining the 3 axes
residue_loading = np.add.reduceat(loadings, range(0, len(loadings), 3))
return pd.DataFrame(enumerate(residue_loading), columns=['ResidueId', ligand.drug_name]).set_index('ResidueId')
def loadings_heatmap(analysis_actors_dict, dir_path, explained_variance=0.75):
"""
| Creates a heatmap of the loadings of the residues for all the ligands. The blue line separates Class 1 fromClass 2
|
.. figure:: ../_static/pca_loadings_heatmap.png
:width: 550
:align: center
:height: 500px
:alt: pca loadings heatmap missing
PCA Loadings Heatmap, click for higher resolution.
Args:
analysis_actors_dict: ``{ "Agonists": List[AnalysisActor.class], "Antagonists": List[AnalysisActor.class] }``
dir_path (str): The path of the directory the plot will be saved (must end with a ``/``)
explained_variance(float 0.0 - 1.0): Defines the number of PCs that will be used for the loadings calculation
"""
loadings_df = sort_residues_by_loadings(analysis_actors_dict['Agonists'][0], explained_variance)
# Join all the loadings of each ligand
for which_ligand in analysis_actors_dict['Agonists'][1:]:
loadings_df = loadings_df.join(sort_residues_by_loadings(which_ligand, explained_variance))
for which_ligand in analysis_actors_dict['Antagonists'][1:]:
loadings_df = loadings_df.join(sort_residues_by_loadings(which_ligand, explained_variance))
fig, ax = plt.subplots(figsize=(20, 15))
sns.heatmap(loadings_df) # Seaborn heatmap of the loadings
plt.axvline(len(analysis_actors_dict['Agonists'])) # Vertical line spearating agonists from antagonists
ax.axis('tight')
ax.set(xticks=np.arange(len(loadings_df.columns)), xticklabels=loadings_df.columns,
yticks=np.arange(0, len(loadings_df.index), 10), yticklabels=np.arange(0, len(loadings_df.index), 10))
plt.xticks(rotation=45)
plt.xlabel('Ligand', fontsize=18)
plt.ylabel('Residue Id', fontsize=18)
plt.title(f"Heatmap of Loadings of each ligand | Explained Variance: {int(explained_variance * 100)}%", fontsize=18)
plt.tight_layout()
plt.savefig(f'{dir_path}pca_loadings_heatmap.png', format='png')
return None
| 46.366337 | 120 | 0.69069 | import math
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from tqdm import tqdm
def scree_plot(analysis_actors_dict, dir_path, pcs_on_scree_plot=50, variance_ratio_line=0.75):
plot_cols = 3
plot_rows = math.ceil(len(analysis_actors_dict['Agonists']) + len(analysis_actors_dict['Antagonists']) / plot_cols)
fig = plt.figure(figsize=(18, 6 * plot_rows))
plot_index = 1
for which_ligand in analysis_actors_dict['Agonists']:
ax = fig.add_subplot(plot_rows, plot_cols, plot_index)
plt.axvline(x=np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > variance_ratio_line)[0][0],
ls='--', c='grey', label=f"Reached {int(variance_ratio_line * 100)}% variance")
plt.plot(np.arange(len(which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot])),
which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot], label="Variance Ratio")
plt.ylabel("Variance")
plt.xlabel("#PC")
plt.title(which_ligand.drug_name)
plt.legend()
plot_index += 1
for which_ligand in analysis_actors_dict['Antagonists']:
ax = fig.add_subplot(plot_rows, plot_cols, plot_index)
plt.axvline(x=np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > variance_ratio_line)[0][0],
ls='--', c='grey', label=f"Reached {int(variance_ratio_line * 100)}% variance")
plt.plot(np.arange(len(which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot])),
which_ligand.pca_res.explained_variance_[:pcs_on_scree_plot], label="Variance", color='orange')
plt.ylabel("Variance")
plt.xlabel("#PC")
plt.title(which_ligand.drug_name)
plt.legend()
plot_index += 1
fig.suptitle('PCA Scree Plots\nAgonists: Blue\nAntagonists: Orange', fontsize=26, y=0.93)
plt.savefig(f'{dir_path}pca_scree_plots.png', format='png')
def populate_variance_showcase_df(analysis_actors_dict, drug_type):
inp_df = pd.DataFrame(columns=['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance'])
for which_ligand in analysis_actors_dict[drug_type]:
pca_var_row = pd.DataFrame([[
which_ligand.drug_name,
drug_type,
np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.5)[0][0] + 1,
np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.75)[0][0] + 1,
np.where(np.cumsum(which_ligand.pca_res.explained_variance_ratio_) > 0.95)[0][0] + 1]
], columns=['Drug Name', 'Type', '50% Variance', '75% Variance', '95% Variance'])
inp_df = inp_df.append(pca_var_row, ignore_index=True)
return inp_df
def project_pca_on_2d(analysis_actors_dict, drug_type, dir_path):
cols = 3
rows = math.ceil(len(analysis_actors_dict[drug_type]) / cols)
fig = plt.figure(figsize=(18, 25))
plot_index = 1
for which_ligand in tqdm(analysis_actors_dict[drug_type], desc="Projecting " + drug_type):
pca_space_2D = which_ligand.pca_res.transform(
which_ligand.pca_xyz)
step = 1
ax = fig.add_subplot(rows, cols, plot_index)
plt.scatter(pca_space_2D[::step, 0], pca_space_2D[::step, 1],
c=np.arange(len(pca_space_2D) / step) / (len(pca_space_2D) / step), marker='o')
plt.xlabel('PC1')
plt.ylabel('PC2')
explained_variance_2PC = which_ligand.pca_res.explained_variance_ratio_[0] + \
which_ligand.pca_res.explained_variance_ratio_[1]
plt.title(f'{which_ligand.drug_name} | Structural Motion Variance: {explained_variance_2PC}')
plt.colorbar()
plot_index += 1
fig.suptitle(f'PCA 2D Projection of {drug_type} as frames progress', fontsize=26, y=1.03)
plt.tight_layout()
plt.savefig(f'{dir_path}pca_{drug_type}_2d_projection.png', format='png')
return None
def sort_residues_by_loadings(ligand, variance_explained=0.5):
pca_res = ligand.get_pca()
pcs_numb = np.where(np.cumsum(pca_res.explained_variance_ratio_) > variance_explained)[0][0] + 1
loadings = np.abs(pca_res.components_[:pcs_numb, :]).T @ np.sqrt(pca_res.explained_variance_[:pcs_numb])
me(enumerate(residue_loading), columns=['ResidueId', ligand.drug_name]).set_index('ResidueId')
def loadings_heatmap(analysis_actors_dict, dir_path, explained_variance=0.75):
loadings_df = sort_residues_by_loadings(analysis_actors_dict['Agonists'][0], explained_variance)
for which_ligand in analysis_actors_dict['Agonists'][1:]:
loadings_df = loadings_df.join(sort_residues_by_loadings(which_ligand, explained_variance))
for which_ligand in analysis_actors_dict['Antagonists'][1:]:
loadings_df = loadings_df.join(sort_residues_by_loadings(which_ligand, explained_variance))
fig, ax = plt.subplots(figsize=(20, 15))
sns.heatmap(loadings_df)
plt.axvline(len(analysis_actors_dict['Agonists']))
ax.axis('tight')
ax.set(xticks=np.arange(len(loadings_df.columns)), xticklabels=loadings_df.columns,
yticks=np.arange(0, len(loadings_df.index), 10), yticklabels=np.arange(0, len(loadings_df.index), 10))
plt.xticks(rotation=45)
plt.xlabel('Ligand', fontsize=18)
plt.ylabel('Residue Id', fontsize=18)
plt.title(f"Heatmap of Loadings of each ligand | Explained Variance: {int(explained_variance * 100)}%", fontsize=18)
plt.tight_layout()
plt.savefig(f'{dir_path}pca_loadings_heatmap.png', format='png')
return None
| true | true |
f71faed40fe9843e23daeb4a3ae28c21eb2bec96 | 1,395 | py | Python | tests/test_rand_spatial_crop_samples.py | sudohainguyen/MONAI | 89f8a39a1c0bc6f480522c443ee7813cea21df47 | [
"Apache-2.0"
] | 2 | 2020-06-23T16:03:45.000Z | 2020-06-25T05:30:45.000Z | tests/test_rand_spatial_crop_samples.py | Scitator/MONAI | a42b563acf0c7504cee18ee84c8af2eff6e948a7 | [
"Apache-2.0"
] | null | null | null | tests/test_rand_spatial_crop_samples.py | Scitator/MONAI | a42b563acf0c7504cee18ee84c8af2eff6e948a7 | [
"Apache-2.0"
] | 1 | 2020-09-14T13:16:01.000Z | 2020-09-14T13:16:01.000Z | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from parameterized import parameterized
from monai.transforms import RandSpatialCropSamples
TEST_CASE_1 = [
{"roi_size": [3, 3, 3], "num_samples": 4, "random_center": True},
np.random.randint(0, 2, size=[3, 3, 3, 3]),
(3, 3, 3, 3),
]
TEST_CASE_2 = [
{"roi_size": [3, 3, 3], "num_samples": 8, "random_center": False},
np.random.randint(0, 2, size=[3, 3, 3, 3]),
(3, 3, 3, 3),
]
class TestRandSpatialCropSamples(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2])
def test_shape(self, input_param, input_data, expected_shape):
result = RandSpatialCropSamples(**input_param)(input_data)
for item in result:
self.assertTupleEqual(item.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
| 34.875 | 74 | 0.712545 |
import unittest
import numpy as np
from parameterized import parameterized
from monai.transforms import RandSpatialCropSamples
TEST_CASE_1 = [
{"roi_size": [3, 3, 3], "num_samples": 4, "random_center": True},
np.random.randint(0, 2, size=[3, 3, 3, 3]),
(3, 3, 3, 3),
]
TEST_CASE_2 = [
{"roi_size": [3, 3, 3], "num_samples": 8, "random_center": False},
np.random.randint(0, 2, size=[3, 3, 3, 3]),
(3, 3, 3, 3),
]
class TestRandSpatialCropSamples(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2])
def test_shape(self, input_param, input_data, expected_shape):
result = RandSpatialCropSamples(**input_param)(input_data)
for item in result:
self.assertTupleEqual(item.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
| true | true |
f71fafdcdc00789924a0f65e4fdb20825b916e5f | 52,274 | py | Python | pymilvus/orm/collection.py | jingkl/pymilvus | f74b4741b7480d4e1740e1ea2d120c96f01bb56a | [
"Apache-2.0"
] | null | null | null | pymilvus/orm/collection.py | jingkl/pymilvus | f74b4741b7480d4e1740e1ea2d120c96f01bb56a | [
"Apache-2.0"
] | null | null | null | pymilvus/orm/collection.py | jingkl/pymilvus | f74b4741b7480d4e1740e1ea2d120c96f01bb56a | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2019-2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
import copy
import json
import pandas
from .connections import get_connection
from .schema import (
CollectionSchema,
FieldSchema,
parse_fields_from_data,
infer_dtype_bydata,
)
from .prepare import Prepare
from .partition import Partition
from .index import Index
from .search import SearchResult
from .mutation import MutationResult
from .types import DataType
from .exceptions import (
SchemaNotReadyException,
DataTypeNotMatchException,
DataNotMatchException,
ConnectionNotExistException,
PartitionAlreadyExistException,
PartitionNotExistException,
IndexNotExistException,
AutoIDException,
ExceptionsMessage,
)
from .future import SearchFuture, MutationFuture
def _check_schema(schema):
if schema is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoSchema)
if len(schema.fields) < 1:
raise SchemaNotReadyException(0, ExceptionsMessage.EmptySchema)
vector_fields = []
for field in schema.fields:
if field.dtype == DataType.FLOAT_VECTOR or field.dtype == DataType.BINARY_VECTOR:
vector_fields.append(field.name)
if len(vector_fields) < 1:
raise SchemaNotReadyException(0, ExceptionsMessage.NoVector)
def _check_data_schema(fields, data):
if isinstance(data, pandas.DataFrame):
for i, field in enumerate(fields):
for j, _ in enumerate(data[field.name]):
tmp_type = infer_dtype_bydata(data[field.name].iloc[j])
if tmp_type != field.dtype:
raise DataNotMatchException(0, ExceptionsMessage.DataTypeInconsistent)
else:
for i, field in enumerate(fields):
for j, _ in enumerate(data[i]):
tmp_type = infer_dtype_bydata(data[i][j])
if tmp_type != field.dtype:
raise DataNotMatchException(0, ExceptionsMessage.DataTypeInconsistent)
class Collection:
"""
This is a class corresponding to collection in milvus.
"""
def __init__(self, name, schema=None, using="default", shards_num=2, **kwargs):
"""
Constructs a collection by name, schema and other parameters.
Connection information is contained in kwargs.
:param name: the name of collection
:type name: str
:param schema: the schema of collection
:type schema: class `schema.CollectionSchema`
:param using: Milvus link of create collection
:type using: str
:param shards_num: How wide to scale collection. Corresponds to how many active datanodes
can be used on insert.
:type shards_num: int
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f9a190ca898>
>>> fields = [
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128)
... ]
>>> description="This is a new collection description."
>>> schema = CollectionSchema(fields=fields, description=description)
>>> collection = Collection(name="test_collection_init", schema=schema)
>>> collection.name
'test_collection_init'
>>> collection.description
'This is a new collection description.'
>>> collection.is_empty
True
>>> collection.num_entities
0
"""
self._name = name
self._using = using
self._shards_num = shards_num
self._kwargs = kwargs
conn = self._get_connection()
has = conn.has_collection(self._name)
if has:
resp = conn.describe_collection(self._name)
server_schema = CollectionSchema.construct_from_dict(resp)
if schema is None:
self._schema = server_schema
else:
if not isinstance(schema, CollectionSchema):
raise SchemaNotReadyException(0, ExceptionsMessage.SchemaType)
if server_schema != schema:
raise SchemaNotReadyException(0, ExceptionsMessage.SchemaInconsistent)
self._schema = schema
else:
if schema is None:
raise SchemaNotReadyException(0, ExceptionsMessage.CollectionNotExistNoSchema % name)
if isinstance(schema, CollectionSchema):
_check_schema(schema)
conn.create_collection(self._name, fields=schema.to_dict(), shards_num=self._shards_num)
self._schema = schema
else:
raise SchemaNotReadyException(0, ExceptionsMessage.SchemaType)
def __repr__(self):
return json.dumps({
'name': self.name,
'schema': self._schema.to_dict(),
'partitions': [json.loads(p.__repr__()) for p in self.partitions],
'description': self.description,
})
def _get_connection(self):
conn = get_connection(self._using)
if conn is None:
raise ConnectionNotExistException(0, ExceptionsMessage.ConnectFirst)
return conn
def _check_insert_data_schema(self, data):
"""
Checks whether the data type matches the schema.
"""
if self._schema is None:
return False
if self._schema.auto_id:
if isinstance(data, pandas.DataFrame):
if self._schema.primary_field.name in data:
if not data[self._schema.primary_field.name].isnull().all():
raise DataNotMatchException(0, ExceptionsMessage.AutoIDWithData)
data = data.drop(self._schema.primary_field.name, axis=1)
infer_fields = parse_fields_from_data(data)
tmp_fields = copy.deepcopy(self._schema.fields)
for i, field in enumerate(self._schema.fields):
if field.is_primary and field.auto_id:
tmp_fields.pop(i)
if len(infer_fields) != len(tmp_fields):
raise DataTypeNotMatchException(0, ExceptionsMessage.FieldsNumInconsistent)
_check_data_schema(infer_fields, data)
for x, y in zip(infer_fields, tmp_fields):
if x.dtype != y.dtype:
return False
if isinstance(data, pandas.DataFrame):
if x.name != y.name:
return False
# todo check dim
return True
def _check_schema(self):
if self._schema is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoSchema)
def _get_vector_field(self) -> str:
for field in self._schema.fields:
if field.dtype == DataType.FLOAT_VECTOR or field.dtype == DataType.BINARY_VECTOR:
return field.name
raise SchemaNotReadyException(0, ExceptionsMessage.NoVector)
@classmethod
def construct_from_dataframe(cls, name, dataframe, **kwargs):
if dataframe is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoneDataFrame)
if not isinstance(dataframe, pandas.DataFrame):
raise SchemaNotReadyException(0, ExceptionsMessage.DataFrameType)
primary_field = kwargs.pop("primary_field", None)
if primary_field is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoPrimaryKey)
pk_index = -1
for i, field in enumerate(dataframe):
if field == primary_field:
pk_index = i
if pk_index == -1:
raise SchemaNotReadyException(0, ExceptionsMessage.PrimaryKeyNotExist)
if "auto_id" in kwargs:
if not isinstance(kwargs.get("auto_id", None), bool):
raise AutoIDException(0, ExceptionsMessage.AutoIDType)
auto_id = kwargs.pop("auto_id", False)
if auto_id:
if dataframe[primary_field].isnull().all():
dataframe = dataframe.drop(primary_field, axis=1)
else:
raise SchemaNotReadyException(0, ExceptionsMessage.AutoIDWithData)
fields = parse_fields_from_data(dataframe)
_check_data_schema(fields, dataframe)
if auto_id:
fields.insert(pk_index, FieldSchema(name=primary_field, dtype=DataType.INT64, is_primary=True, auto_id=True,
**kwargs))
else:
for field in fields:
if field.name == primary_field:
field.is_primary = True
field.auto_id = False
schema = CollectionSchema(fields=fields)
_check_schema(schema)
collection = cls(name, schema, **kwargs)
res = collection.insert(data=dataframe)
return collection, res
@property
def schema(self) -> CollectionSchema:
"""
Returns the schema of the collection.
:return schema.CollectionSchema:
Schema of the collection.
"""
return self._schema
@property
def description(self) -> str:
"""
Returns a text description of the collection.
:return str:
Collection description text, returned when the operation succeeds.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> fields = [
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128)
... ]
>>> description="This is an example text description."
>>> schema = CollectionSchema(fields=fields, description=description)
>>> collection = Collection(name="test_collection_description", schema=schema)
>>> collection.description
'This is an example text description.'
"""
return self._schema.description
@property
def name(self) -> str:
"""
Returns the collection name.
:return str:
The collection name, returned when the operation succeeds.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> fields = [
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128)
... ]
>>> schema = CollectionSchema(fields)
>>> collection = Collection("test_collection_name", schema)
>>> collection.name
'test_collection_name'
"""
return self._name
@property
def is_empty(self) -> bool:
"""
Whether the collection is empty.
This method need to call `num_entities <#pymilvus.Collection.num_entities>`_.
:return bool:
* True: The collection is empty.
* False: The collection is gfghnot empty.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_is_empty", schema)
>>> collection.is_empty
True
>>> collection.insert([[1], [[1.0, 2.0]]])
<pymilvus.search.MutationResult object at 0x7fabaf3e5d50>
>>> collection.is_empty
False
"""
return self.num_entities == 0
# read-only
@property
def num_entities(self) -> int:
"""
Returns the number of entities in the collection.
:return int:
Number of entities in the collection.
:raises CollectionNotExistException: If the collection does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_num_entities", schema)
>>> collection.num_entities
0
>>> collection.insert([[1, 2], [[1.0, 2.0], [3.0, 4.0]]])
>>> collection.num_entities
2
"""
conn = self._get_connection()
conn.flush([self._name])
status = conn.get_collection_stats(db_name="", collection_name=self._name)
return status["row_count"]
@property
def primary_field(self) -> FieldSchema:
"""
Returns the primary field of the collection.
:return schema.FieldSchema:
The primary field of the collection.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("film_length", DataType.INT64, description="length in miniute"),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_primary_field", schema)
>>> collection.primary_field.name
'film_id'
"""
return self._schema.primary_field
def drop(self, timeout=None, **kwargs):
"""
Drops the collection together with its index files.
:param timeout:
* *timeout* (``float``) --
An optional duration of time in seconds to allow for the RPC.
If timeout is set to None,
the client keeps waiting until the server responds or an error occurs.
:raises CollectionNotExistException: If the collection does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_drop", schema)
>>> utility.has_collection("test_collection_drop")
True
>>> collection.drop()
>>> utility.has_collection("test_collection_drop")
False
"""
conn = self._get_connection()
indexes = self.indexes
for index in indexes:
index.drop(timeout=timeout, **kwargs)
conn.drop_collection(self._name, timeout=timeout, **kwargs)
def load(self, partition_names=None, timeout=None, **kwargs):
"""
Loads the collection from disk to memory.
:param partition_names: The specified partitions to load.
:type partition_names: list[str]
:param timeout:An optional duration of time in seconds to allow for the RPC. If timeout
is set to None, the client keeps waiting until the server responds or error occurs.
:type timeout: float
:param kwargs:
* *_async* (``bool``) --
Indicate if invoke asynchronously.
:raises CollectionNotExistException: If the collection does not exist.
:raises ParamError: If the parameters are invalid.
:raises BaseException: If the specified field, index or partition does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_load", schema)
>>> collection.insert([[1, 2], [[1.0, 2.0], [3.0, 4.0]]])
<pymilvus.search.MutationResult object at 0x7fabaf3e5d50>
>>> collection.load()
>>> collection.num_entities
2
"""
conn = self._get_connection()
if partition_names is not None:
conn.load_partitions(self._name, partition_names, timeout=timeout, **kwargs)
else:
conn.load_collection(self._name, timeout=timeout, **kwargs)
def release(self, timeout=None, **kwargs):
"""
Releases the collection from memory.
:param timeout:
* *timeout* (``float``) --
An optional duration of time in seconds to allow for the RPC. If timeout
is set to None, the client keeps waiting until the server responds or an error occurs.
:raises CollectionNotExistException: If collection does not exist.
:raises BaseException: If collection has not been loaded to memory.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_release", schema)
>>> collection.insert([[1, 2], [[1.0, 2.0], [3.0, 4.0]]])
<pymilvus.search.MutationResult object at 0x7fabaf3e5d50>
>>> collection.load()
>>> collection.num_entities
2
>>> collection.release() # release the collection from memory
"""
conn = self._get_connection()
conn.release_collection(self._name, timeout=timeout, **kwargs)
def insert(self, data, partition_name=None, timeout=None, **kwargs):
"""
Insert data into the collection.
:param data: The specified data to insert, the dimension of data needs to align with column
number
:type data: list-like(list, tuple) object or pandas.DataFrame
:param partition_name: The partition name which the data will be inserted to, if partition
name is not passed, then the data will be inserted to "_default"
partition
:type partition_name: str
:param timeout:
* *timeout* (``float``) --
An optional duration of time in seconds to allow for the RPC. If timeout
is set to None, the client keeps waiting until the server responds or an error occurs.
:raises CollectionNotExistException: If the specified collection does not exist.
:raises ParamError: If input parameters are invalid.
:raises BaseException: If the specified partition does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f8579002dc0>
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_insert", schema)
>>> data = [
... [random.randint(1, 100) for _ in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> collection.insert(data)
>>> collection.num_entities
10
"""
if data is None:
return MutationResult(data)
if not self._check_insert_data_schema(data):
raise SchemaNotReadyException(0, ExceptionsMessage.TypeOfDataAndSchemaInconsistent)
conn = self._get_connection()
entities = Prepare.prepare_insert_data(data, self._schema)
res = conn.insert(collection_name=self._name, entities=entities, ids=None,
partition_name=partition_name, timeout=timeout, **kwargs)
if kwargs.get("_async", False):
return MutationFuture(res)
return MutationResult(res)
def delete(self, expr, partition_name=None, timeout=None, **kwargs):
"""
Delete entities with an expression condition.
And return results to show which primary key is deleted successfully
:param expr: The expression to specify entities to be deleted
:type expr: str
:param partition_name: Name of partitions that contain entities
:type partition_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:return: list of ids of the deleted vectors.
:rtype: list
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
BaseException: If the return result from server is not ok
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("film_date", DataType.INT64),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_query", schema)
>>> # insert
>>> data = [
... [i for i in range(10)],
... [i + 2000 for i in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> collection.insert(data)
>>> collection.num_entities
>>> expr = "film_id in [ 0, 1 ]"
>>> res = collection.delete(expr)
>>> assert len(res) == 2
>>> print(f"- Deleted entities: {res}")
- Delete results: [0, 1]
"""
conn = self._get_connection()
res = conn.delete(collection_name=self._name, expr=expr,
partition_name=partition_name, timeout=timeout, **kwargs)
if kwargs.get("_async", False):
return MutationFuture(res)
return MutationResult(res)
def search(self, data, anns_field, param, limit, expr=None, partition_names=None,
output_fields=None, timeout=None, round_decimal=-1, **kwargs):
"""
Conducts a vector similarity search with an optional boolean expression as filter.
:param data: The vectors of search data, the length of data is number of query (nq), the
dim of every vector in data must be equal to vector field's of collection.
:type data: list[list[float]]
:param anns_field: The vector field used to search of collection.
:type anns_field: str
:param param: The parameters of search, such as ``nprobe``.
:type param: dict
:param limit: The max number of returned record, also known as ``topk``.
:type limit: int
:param expr: The boolean expression used to filter attribute.
:type expr: str
:param partition_names: The names of partitions to search.
:type partition_names: list[str]
:param output_fields: The fields to return in the search result, not supported now.
:type output_fields: list[str]
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur.
:type timeout: float
:param round_decimal: The specified number of decimal places of returned distance
:type round_decimal: int
:param kwargs:
* *_async* (``bool``) --
Indicate if invoke asynchronously. When value is true, method returns a
SearchFuture object; otherwise, method returns results from server directly.
* *_callback* (``function``) --
The callback function which is invoked after server response successfully.
It functions only if _async is set to True.
* *guarantee_timestamp* (``int``) --
This function instructs Milvus to see all operations performed before a provided timestamp. If no
such timestamp is provided, then Milvus will search all operations performed to date.
:return: SearchResult:
SearchResult is iterable and is a 2d-array-like class, the first dimension is
the number of vectors to query (nq), the second dimension is the number of limit(topk).
:rtype: SearchResult
:raises RpcError: If gRPC encounter an error.
:raises ParamError: If parameters are invalid.
:raises DataTypeNotMatchException: If wrong type of param is passed.
:raises BaseException: If the return result from server is not ok.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f8579002dc0>
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_search", schema)
>>> # insert
>>> data = [
... [i for i in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> collection.insert(data)
>>> collection.num_entities
10
>>> collection.load()
>>> # search
>>> search_param = {
... "data": [[1.0, 1.0]],
... "anns_field": "films",
... "param": {"metric_type": "L2"},
... "limit": 2,
... "expr": "film_id > 0",
... }
>>> res = collection.search(**search_param)
>>> assert len(res) == 1
>>> hits = res[0]
>>> assert len(hits) == 2
>>> print(f"- Total hits: {len(hits)}, hits ids: {hits.ids} ")
- Total hits: 2, hits ids: [8, 5]
>>> print(f"- Top1 hit id: {hits[0].id}, distance: {hits[0].distance}, score: {hits[0].score} ")
- Top1 hit id: 8, distance: 0.10143111646175385, score: 0.10143111646175385
"""
if expr is not None and not isinstance(expr, str):
raise DataTypeNotMatchException(0, ExceptionsMessage.ExprType % type(expr))
conn = self._get_connection()
res = conn.search(self._name, data, anns_field, param, limit, expr,
partition_names, output_fields, timeout, round_decimal, **kwargs)
if kwargs.get("_async", False):
return SearchFuture(res)
return SearchResult(res)
def query(self, expr, output_fields=None, partition_names=None, timeout=None):
"""
Query with a set of criteria, and results in a list of records that match the query exactly.
:param expr: The query expression
:type expr: str
:param output_fields: A list of fields to return
:type output_fields: list[str]
:param partition_names: Name of partitions that contain entities
:type partition_names: list[str]
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:return: A list that contains all results
:rtype: list
:raises:
RpcError: If gRPC encounter an error
ParamError: If parameters are invalid
DataTypeNotMatchException: If wrong type of param is passed
BaseException: If the return result from server is not ok
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f8579002dc0>
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("film_date", DataType.INT64),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_query", schema)
>>> # insert
>>> data = [
... [i for i in range(10)],
... [i + 2000 for i in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> collection.insert(data)
>>> collection.num_entities
10
>>> collection.load()
>>> # query
>>> expr = "film_id in [ 0, 1 ]"
>>> res = collection.query(expr, output_fields=["film_date"])
>>> assert len(res) == 2
>>> print(f"- Query results: {res}")
- Query results: [{'film_id': 0, 'film_date': 2000}, {'film_id': 1, 'film_date': 2001}]
"""
if not isinstance(expr, str):
raise DataTypeNotMatchException(0, ExceptionsMessage.ExprType % type(expr))
conn = self._get_connection()
res = conn.query(self._name, expr, output_fields, partition_names, timeout)
return res
@property
def partitions(self) -> list:
"""
Return all partitions of the collection.
:return list[Partition]:
List of Partition object, return when operation is successful.
:raises CollectionNotExistException: If collection doesn't exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f8579002dc0>
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_partitions", schema)
>>> collection.partitions
[{"name": "_default", "description": "", "num_entities": 0}]
"""
conn = self._get_connection()
partition_strs = conn.list_partitions(self._name)
partitions = []
for partition in partition_strs:
partitions.append(Partition(self, partition, construct_only=True))
return partitions
def partition(self, partition_name) -> Partition:
"""
Return the partition corresponding to name. Return None if not existed.
:param partition_name: The name of the partition to get.
:type partition_name: str
:return Partition:
Partition object corresponding to partition_name.
:raises CollectionNotExistException: If collection doesn't exist.
:raises BaseException: If partition doesn't exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
<pymilvus.client.stub.Milvus object at 0x7f8579002dc0>
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_partition", schema)
>>> collection.partition("_default")
{"name": "_default", "description": "", "num_entities": 0}
>>> collection.partition("partition")
"""
if self.has_partition(partition_name) is False:
return None
return Partition(self, partition_name, construct_only=True)
def create_partition(self, partition_name, description=""):
"""
Create the partition corresponding to name if not existed.
:param partition_name: The name of the partition to create.
:type partition_name: str
:param description: The description of the partition corresponding to name.
:type description: str
:return Partition:
Partition object corresponding to partition_name.
:raises CollectionNotExistException: If collection doesn't exist.
:raises BaseException: If partition doesn't exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_create_partition", schema)
>>> collection.create_partition("comedy", description="comedy films")
{"name": "comedy", "description": "comedy films", "num_entities": 0}
>>> collection.partition("comedy")
{"name": "partition", "description": "comedy films", "num_entities": 0}
"""
if self.has_partition(partition_name) is True:
raise PartitionAlreadyExistException(0, ExceptionsMessage.PartitionAlreadyExist)
return Partition(self, partition_name, description=description)
def has_partition(self, partition_name, timeout=None) -> bool:
"""
Checks if a specified partition exists.
:param partition_name: The name of the partition to check
:type partition_name: str
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:return bool:
Whether a specified partition exists.
:raises CollectionNotExistException: If collection doesn't exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_has_partition", schema)
>>> collection.create_partition("comedy", description="comedy films")
{"name": "comedy", "description": "comedy films", "num_entities": 0}
>>> collection.has_partition("comedy")
True
>>> collection.has_partition("science_fiction")
False
"""
conn = self._get_connection()
return conn.has_partition(self._name, partition_name, timeout=timeout)
def drop_partition(self, partition_name, timeout=None, **kwargs):
"""
Drop the partition and its corresponding index files.
:param partition_name: The name of the partition to drop.
:type partition_name: str
:param timeout:
* *timeout* (``float``) --
An optional duration of time in seconds to allow for the RPC. If timeout
is set to None, the client keeps waiting until the server responds or an error occurs.
:raises CollectionNotExistException: If collection doesn't exist.
:raises BaseException: If partition doesn't exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_drop_partition", schema)
>>> collection.create_partition("comedy", description="comedy films")
{"name": "comedy", "description": "comedy films", "num_entities": 0}
>>> collection.has_partition("comedy")
True
>>> collection.drop_partition("comedy")
>>> collection.has_partition("comedy")
False
"""
if self.has_partition(partition_name) is False:
raise PartitionNotExistException(0, ExceptionsMessage.PartitionNotExist)
conn = self._get_connection()
return conn.drop_partition(self._name, partition_name, timeout=timeout, **kwargs)
# The server side not yet finished to return aliases by the describe_collection api.
# Disable this property until the work is done.
# @property
# def aliases(self) -> list:
# """
# Returns alias list of the collection.
#
# :return list of str:
# The collection aliases, returned when the operation succeeds.
#
# :example:
# >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
# >>> connections.connect()
# >>> fields = [
# ... FieldSchema("film_id", DataType.INT64, is_primary=True),
# ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128)
# ... ]
# >>> schema = CollectionSchema(fields)
# >>> collection = Collection("test_collection_name", schema)
# >>> collection.create_alias("tom")
# >>> collection.alias
# ['tom']
# """
# conn = self._get_connection()
# has = conn.has_collection(self._name)
# aliases = []
# if has:
# resp = conn.describe_collection(self._name)
# aliases = resp['aliases']
# return aliases
@property
def indexes(self) -> list:
"""
Returns all indexes of the collection.
:return list[Index]:
List of Index objects, returned when this operation is successful.
:raises CollectionNotExistException: If the collection does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_indexes", schema)
>>> collection.indexes
[]
"""
conn = self._get_connection()
indexes = []
tmp_index = conn.describe_index(self._name)
if tmp_index is not None:
field_name = tmp_index.pop("field_name", None)
indexes.append(Index(self, field_name, tmp_index, construct_only=True))
return indexes
def index(self) -> Index:
"""
Fetches the index object of the of the specified name.
:return Index:
Index object corresponding to index_name.
:raises CollectionNotExistException: If the collection does not exist.
:raises BaseException: If the specified index does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_index", schema)
>>> index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
>>> collection.create_index("films", index)
Status(code=0, message='')
>>> collection.indexes
[<pymilvus.index.Index object at 0x7f4435587e20>]
>>> collection.index()
<pymilvus.index.Index object at 0x7f44355a1460>
"""
conn = self._get_connection()
tmp_index = conn.describe_index(self._name)
if tmp_index is not None:
field_name = tmp_index.pop("field_name", None)
return Index(self, field_name, tmp_index, construct_only=True)
raise IndexNotExistException(0, ExceptionsMessage.IndexNotExist)
def create_index(self, field_name, index_params, timeout=None, **kwargs) -> Index:
"""
Creates index for a specified field. Return Index Object.
:param field_name: The name of the field to create an index for.
:type field_name: str
:param index_params: The indexing parameters.
:type index_params: dict
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:raises CollectionNotExistException: If the collection does not exist.
:raises ParamError: If the index parameters are invalid.
:raises BaseException: If field does not exist.
:raises BaseException: If the index has been created.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_create_index", schema)
>>> index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
>>> collection.create_index("films", index)
Status(code=0, message='')
>>> collection.index()
<pymilvus.index.Index object at 0x7f44355a1460>
"""
conn = self._get_connection()
return conn.create_index(self._name, field_name, index_params,
timeout=timeout, **kwargs)
def has_index(self, timeout=None) -> bool:
"""
Checks whether a specified index exists.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:return bool:
Whether the specified index exists.
:raises CollectionNotExistException: If the collection does not exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_has_index", schema)
>>> index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
>>> collection.create_index("films", index)
>>> collection.has_index()
True
"""
conn = self._get_connection()
# TODO(yukun): Need field name, but provide index name
if conn.describe_index(self._name, "", timeout=timeout) is None:
return False
return True
def drop_index(self, timeout=None, **kwargs):
"""
Drop index and its corresponding index files.
:param timeout:
* *timeout* (``float``) --
An optional duration of time in seconds to allow for the RPC. If timeout
is set to None, the client keeps waiting until the server responds or an error occurs.
Optional. A duration of time in seconds.
:raises CollectionNotExistException: If the collection does not exist.
:raises BaseException: If the index does not exist or has been dropped.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_has_index", schema)
>>> index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
>>> collection.create_index("films", index)
>>> collection.has_index()
True
>>> collection.drop_index()
>>> collection.has_index()
False
"""
if self.has_index() is False:
raise IndexNotExistException(0, ExceptionsMessage.IndexNotExist)
conn = self._get_connection()
tmp_index = conn.describe_index(self._name, "")
if tmp_index is not None:
index = Index(self, tmp_index['field_name'], tmp_index, construct_only=True)
index.drop(timeout=timeout, **kwargs)
def create_alias(self, alias, timeout=None, **kwargs):
"""
Specify alias for a collection.
Alias cannot be duplicated, you can't assign same alias to different collections.
But you can specify multiple aliases for a collection, for example:
before create_alias("collection_1", "bob"):
collection_1's aliases = ["tom"]
after create_alias("collection_1", "bob"):
collection_1's aliases = ["tom", "bob"]
:param alias: The alias of the collection.
:type alias: str.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:raises CollectionNotExistException: If the collection does not exist.
:raises BaseException: If the alias failed to create.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_create_index", schema)
>>> collection.create_alias("alias")
Status(code=0, message='')
"""
conn = self._get_connection()
conn.create_alias(self._name, alias, timeout=timeout, **kwargs)
def drop_alias(self, alias, timeout=None, **kwargs):
"""
Delete an alias.
This api no need to specify collection name because the milvus server knows which collection it belongs.
For example:
before drop_alias("bob"):
collection_1's aliases = ["tom", "bob"]
after drop_alias("bob"):
collection_1's aliases = ["tom"]
:param alias: The alias of the collection.
:type alias: str.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:raises CollectionNotExistException: If the collection does not exist.
:raises BaseException: If the alias doesn't exist.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_create_index", schema)
>>> collection.create_alias("alias")
>>> collection.drop_alias("alias")
Status(code=0, message='')
"""
conn = self._get_connection()
conn.drop_alias(alias, timeout=timeout, **kwargs)
def alter_alias(self, alias, timeout=None, **kwargs):
"""
Change alias of a collection to another collection. If the alias doesn't exist, the api will return error.
Alias cannot be duplicated, you can't assign same alias to different collections.
This api can change alias owner collection, for example:
before alter_alias("collection_2", "bob"):
collection_1's aliases = ["bob"]
collection_2's aliases = []
after alter_alias("collection_2", "bob"):
collection_1's aliases = []
collection_2's aliases = ["bob"]
:param alias: The alias of the collection.
:type alias: str.
:param timeout: An optional duration of time in seconds to allow for the RPC. When timeout
is set to None, client waits until server response or error occur
:type timeout: float
:raises CollectionNotExistException: If the collection does not exist.
:raises BaseException: If the alias failed to alter.
:example:
>>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
>>> connections.connect()
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_create_index", schema)
>>> collection.alter_alias("alias")
if the alias exists, return Status(code=0, message='')
otherwise return Status(code=1, message='alias does not exist')
"""
conn = self._get_connection()
conn.alter_alias(self._name, alias, timeout=timeout, **kwargs)
| 43.309031 | 120 | 0.600318 |
import copy
import json
import pandas
from .connections import get_connection
from .schema import (
CollectionSchema,
FieldSchema,
parse_fields_from_data,
infer_dtype_bydata,
)
from .prepare import Prepare
from .partition import Partition
from .index import Index
from .search import SearchResult
from .mutation import MutationResult
from .types import DataType
from .exceptions import (
SchemaNotReadyException,
DataTypeNotMatchException,
DataNotMatchException,
ConnectionNotExistException,
PartitionAlreadyExistException,
PartitionNotExistException,
IndexNotExistException,
AutoIDException,
ExceptionsMessage,
)
from .future import SearchFuture, MutationFuture
def _check_schema(schema):
if schema is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoSchema)
if len(schema.fields) < 1:
raise SchemaNotReadyException(0, ExceptionsMessage.EmptySchema)
vector_fields = []
for field in schema.fields:
if field.dtype == DataType.FLOAT_VECTOR or field.dtype == DataType.BINARY_VECTOR:
vector_fields.append(field.name)
if len(vector_fields) < 1:
raise SchemaNotReadyException(0, ExceptionsMessage.NoVector)
def _check_data_schema(fields, data):
if isinstance(data, pandas.DataFrame):
for i, field in enumerate(fields):
for j, _ in enumerate(data[field.name]):
tmp_type = infer_dtype_bydata(data[field.name].iloc[j])
if tmp_type != field.dtype:
raise DataNotMatchException(0, ExceptionsMessage.DataTypeInconsistent)
else:
for i, field in enumerate(fields):
for j, _ in enumerate(data[i]):
tmp_type = infer_dtype_bydata(data[i][j])
if tmp_type != field.dtype:
raise DataNotMatchException(0, ExceptionsMessage.DataTypeInconsistent)
class Collection:
def __init__(self, name, schema=None, using="default", shards_num=2, **kwargs):
self._name = name
self._using = using
self._shards_num = shards_num
self._kwargs = kwargs
conn = self._get_connection()
has = conn.has_collection(self._name)
if has:
resp = conn.describe_collection(self._name)
server_schema = CollectionSchema.construct_from_dict(resp)
if schema is None:
self._schema = server_schema
else:
if not isinstance(schema, CollectionSchema):
raise SchemaNotReadyException(0, ExceptionsMessage.SchemaType)
if server_schema != schema:
raise SchemaNotReadyException(0, ExceptionsMessage.SchemaInconsistent)
self._schema = schema
else:
if schema is None:
raise SchemaNotReadyException(0, ExceptionsMessage.CollectionNotExistNoSchema % name)
if isinstance(schema, CollectionSchema):
_check_schema(schema)
conn.create_collection(self._name, fields=schema.to_dict(), shards_num=self._shards_num)
self._schema = schema
else:
raise SchemaNotReadyException(0, ExceptionsMessage.SchemaType)
def __repr__(self):
return json.dumps({
'name': self.name,
'schema': self._schema.to_dict(),
'partitions': [json.loads(p.__repr__()) for p in self.partitions],
'description': self.description,
})
def _get_connection(self):
conn = get_connection(self._using)
if conn is None:
raise ConnectionNotExistException(0, ExceptionsMessage.ConnectFirst)
return conn
def _check_insert_data_schema(self, data):
if self._schema is None:
return False
if self._schema.auto_id:
if isinstance(data, pandas.DataFrame):
if self._schema.primary_field.name in data:
if not data[self._schema.primary_field.name].isnull().all():
raise DataNotMatchException(0, ExceptionsMessage.AutoIDWithData)
data = data.drop(self._schema.primary_field.name, axis=1)
infer_fields = parse_fields_from_data(data)
tmp_fields = copy.deepcopy(self._schema.fields)
for i, field in enumerate(self._schema.fields):
if field.is_primary and field.auto_id:
tmp_fields.pop(i)
if len(infer_fields) != len(tmp_fields):
raise DataTypeNotMatchException(0, ExceptionsMessage.FieldsNumInconsistent)
_check_data_schema(infer_fields, data)
for x, y in zip(infer_fields, tmp_fields):
if x.dtype != y.dtype:
return False
if isinstance(data, pandas.DataFrame):
if x.name != y.name:
return False
return True
def _check_schema(self):
if self._schema is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoSchema)
def _get_vector_field(self) -> str:
for field in self._schema.fields:
if field.dtype == DataType.FLOAT_VECTOR or field.dtype == DataType.BINARY_VECTOR:
return field.name
raise SchemaNotReadyException(0, ExceptionsMessage.NoVector)
@classmethod
def construct_from_dataframe(cls, name, dataframe, **kwargs):
if dataframe is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoneDataFrame)
if not isinstance(dataframe, pandas.DataFrame):
raise SchemaNotReadyException(0, ExceptionsMessage.DataFrameType)
primary_field = kwargs.pop("primary_field", None)
if primary_field is None:
raise SchemaNotReadyException(0, ExceptionsMessage.NoPrimaryKey)
pk_index = -1
for i, field in enumerate(dataframe):
if field == primary_field:
pk_index = i
if pk_index == -1:
raise SchemaNotReadyException(0, ExceptionsMessage.PrimaryKeyNotExist)
if "auto_id" in kwargs:
if not isinstance(kwargs.get("auto_id", None), bool):
raise AutoIDException(0, ExceptionsMessage.AutoIDType)
auto_id = kwargs.pop("auto_id", False)
if auto_id:
if dataframe[primary_field].isnull().all():
dataframe = dataframe.drop(primary_field, axis=1)
else:
raise SchemaNotReadyException(0, ExceptionsMessage.AutoIDWithData)
fields = parse_fields_from_data(dataframe)
_check_data_schema(fields, dataframe)
if auto_id:
fields.insert(pk_index, FieldSchema(name=primary_field, dtype=DataType.INT64, is_primary=True, auto_id=True,
**kwargs))
else:
for field in fields:
if field.name == primary_field:
field.is_primary = True
field.auto_id = False
schema = CollectionSchema(fields=fields)
_check_schema(schema)
collection = cls(name, schema, **kwargs)
res = collection.insert(data=dataframe)
return collection, res
@property
def schema(self) -> CollectionSchema:
return self._schema
@property
def description(self) -> str:
return self._schema.description
@property
def name(self) -> str:
return self._name
@property
def is_empty(self) -> bool:
return self.num_entities == 0
@property
def num_entities(self) -> int:
conn = self._get_connection()
conn.flush([self._name])
status = conn.get_collection_stats(db_name="", collection_name=self._name)
return status["row_count"]
@property
def primary_field(self) -> FieldSchema:
return self._schema.primary_field
def drop(self, timeout=None, **kwargs):
conn = self._get_connection()
indexes = self.indexes
for index in indexes:
index.drop(timeout=timeout, **kwargs)
conn.drop_collection(self._name, timeout=timeout, **kwargs)
def load(self, partition_names=None, timeout=None, **kwargs):
conn = self._get_connection()
if partition_names is not None:
conn.load_partitions(self._name, partition_names, timeout=timeout, **kwargs)
else:
conn.load_collection(self._name, timeout=timeout, **kwargs)
def release(self, timeout=None, **kwargs):
conn = self._get_connection()
conn.release_collection(self._name, timeout=timeout, **kwargs)
def insert(self, data, partition_name=None, timeout=None, **kwargs):
if data is None:
return MutationResult(data)
if not self._check_insert_data_schema(data):
raise SchemaNotReadyException(0, ExceptionsMessage.TypeOfDataAndSchemaInconsistent)
conn = self._get_connection()
entities = Prepare.prepare_insert_data(data, self._schema)
res = conn.insert(collection_name=self._name, entities=entities, ids=None,
partition_name=partition_name, timeout=timeout, **kwargs)
if kwargs.get("_async", False):
return MutationFuture(res)
return MutationResult(res)
def delete(self, expr, partition_name=None, timeout=None, **kwargs):
conn = self._get_connection()
res = conn.delete(collection_name=self._name, expr=expr,
partition_name=partition_name, timeout=timeout, **kwargs)
if kwargs.get("_async", False):
return MutationFuture(res)
return MutationResult(res)
def search(self, data, anns_field, param, limit, expr=None, partition_names=None,
output_fields=None, timeout=None, round_decimal=-1, **kwargs):
if expr is not None and not isinstance(expr, str):
raise DataTypeNotMatchException(0, ExceptionsMessage.ExprType % type(expr))
conn = self._get_connection()
res = conn.search(self._name, data, anns_field, param, limit, expr,
partition_names, output_fields, timeout, round_decimal, **kwargs)
if kwargs.get("_async", False):
return SearchFuture(res)
return SearchResult(res)
def query(self, expr, output_fields=None, partition_names=None, timeout=None):
if not isinstance(expr, str):
raise DataTypeNotMatchException(0, ExceptionsMessage.ExprType % type(expr))
conn = self._get_connection()
res = conn.query(self._name, expr, output_fields, partition_names, timeout)
return res
@property
def partitions(self) -> list:
conn = self._get_connection()
partition_strs = conn.list_partitions(self._name)
partitions = []
for partition in partition_strs:
partitions.append(Partition(self, partition, construct_only=True))
return partitions
def partition(self, partition_name) -> Partition:
if self.has_partition(partition_name) is False:
return None
return Partition(self, partition_name, construct_only=True)
def create_partition(self, partition_name, description=""):
if self.has_partition(partition_name) is True:
raise PartitionAlreadyExistException(0, ExceptionsMessage.PartitionAlreadyExist)
return Partition(self, partition_name, description=description)
def has_partition(self, partition_name, timeout=None) -> bool:
conn = self._get_connection()
return conn.has_partition(self._name, partition_name, timeout=timeout)
def drop_partition(self, partition_name, timeout=None, **kwargs):
if self.has_partition(partition_name) is False:
raise PartitionNotExistException(0, ExceptionsMessage.PartitionNotExist)
conn = self._get_connection()
return conn.drop_partition(self._name, partition_name, timeout=timeout, **kwargs)
# Returns alias list of the collection.
#
# :return list of str:
# The collection aliases, returned when the operation succeeds.
#
# :example:
# >>> from pymilvus import connections, Collection, FieldSchema, CollectionSchema, DataType
# >>> connections.connect()
# >>> fields = [
# ... FieldSchema("film_id", DataType.INT64, is_primary=True),
# ... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=128)
# ... ]
# >>> schema = CollectionSchema(fields)
# >>> collection = Collection("test_collection_name", schema)
# >>> collection.create_alias("tom")
# >>> collection.alias
# ['tom']
# """
@property
def indexes(self) -> list:
conn = self._get_connection()
indexes = []
tmp_index = conn.describe_index(self._name)
if tmp_index is not None:
field_name = tmp_index.pop("field_name", None)
indexes.append(Index(self, field_name, tmp_index, construct_only=True))
return indexes
def index(self) -> Index:
conn = self._get_connection()
tmp_index = conn.describe_index(self._name)
if tmp_index is not None:
field_name = tmp_index.pop("field_name", None)
return Index(self, field_name, tmp_index, construct_only=True)
raise IndexNotExistException(0, ExceptionsMessage.IndexNotExist)
def create_index(self, field_name, index_params, timeout=None, **kwargs) -> Index:
conn = self._get_connection()
return conn.create_index(self._name, field_name, index_params,
timeout=timeout, **kwargs)
def has_index(self, timeout=None) -> bool:
conn = self._get_connection()
if conn.describe_index(self._name, "", timeout=timeout) is None:
return False
return True
def drop_index(self, timeout=None, **kwargs):
if self.has_index() is False:
raise IndexNotExistException(0, ExceptionsMessage.IndexNotExist)
conn = self._get_connection()
tmp_index = conn.describe_index(self._name, "")
if tmp_index is not None:
index = Index(self, tmp_index['field_name'], tmp_index, construct_only=True)
index.drop(timeout=timeout, **kwargs)
def create_alias(self, alias, timeout=None, **kwargs):
conn = self._get_connection()
conn.create_alias(self._name, alias, timeout=timeout, **kwargs)
def drop_alias(self, alias, timeout=None, **kwargs):
conn = self._get_connection()
conn.drop_alias(alias, timeout=timeout, **kwargs)
def alter_alias(self, alias, timeout=None, **kwargs):
conn = self._get_connection()
conn.alter_alias(self._name, alias, timeout=timeout, **kwargs)
| true | true |
f71fb01cbdb1f124478ac2b092b7ac4885231833 | 119 | py | Python | examples/test_error.py | ak1ra24/pytest-md-report | 9d861a9237176e9dd1e6872c197f5bb5985ee049 | [
"MIT"
] | 9 | 2020-05-06T20:54:29.000Z | 2022-03-27T04:11:38.000Z | examples/test_error.py | solisa986/pytest-md-report | a6cdeda92ef8f1ab64c346a86a085ce9e1585880 | [
"MIT"
] | null | null | null | examples/test_error.py | solisa986/pytest-md-report | a6cdeda92ef8f1ab64c346a86a085ce9e1585880 | [
"MIT"
] | 3 | 2021-05-05T19:58:33.000Z | 2021-08-12T07:14:52.000Z | def test_error(invalid_fixture):
pass
class Test:
def test_error(self, invalid_fixture):
assert True
| 14.875 | 42 | 0.697479 | def test_error(invalid_fixture):
pass
class Test:
def test_error(self, invalid_fixture):
assert True
| true | true |
f71fb03c0051a7dd823c621bfe4bd61238f148c4 | 2,121 | py | Python | ciphers/rabin_miller.py | joeyzhou85/python | 9c0cbe33076a570a3c02825b7c6d9866a760e777 | [
"MIT"
] | 1,568 | 2019-04-25T11:54:45.000Z | 2022-03-31T23:35:23.000Z | ciphers/rabin_miller.py | joeyzhou85/python | 9c0cbe33076a570a3c02825b7c6d9866a760e777 | [
"MIT"
] | 58 | 2019-02-20T10:45:50.000Z | 2020-09-30T12:18:45.000Z | ciphers/rabin_miller.py | joeyzhou85/python | 9c0cbe33076a570a3c02825b7c6d9866a760e777 | [
"MIT"
] | 464 | 2019-04-17T04:57:16.000Z | 2022-03-31T04:12:57.000Z | from __future__ import print_function
# Primality Testing with the Rabin-Miller Algorithm
import random
def rabinMiller(num):
s = num - 1
t = 0
while s % 2 == 0:
s = s // 2
t += 1
for trials in range(5):
a = random.randrange(2, num - 1)
v = pow(a, s, num)
if v != 1:
i = 0
while v != (num - 1):
if i == t - 1:
return False
else:
i = i + 1
v = (v ** 2) % num
return True
def isPrime(num):
if (num < 2):
return False
lowPrimes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191,
193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257,
263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331,
337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401,
409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467,
479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563,
569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709,
719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797,
809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877,
881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967,
971, 977, 983, 991, 997]
if num in lowPrimes:
return True
for prime in lowPrimes:
if (num % prime) == 0:
return False
return rabinMiller(num)
def generateLargePrime(keysize = 1024):
while True:
num = random.randrange(2 ** (keysize - 1), 2 ** (keysize))
if isPrime(num):
return num
if __name__ == '__main__':
num = generateLargePrime()
print(('Prime number:', num))
print(('isPrime:', isPrime(num)))
| 32.630769 | 80 | 0.474305 | from __future__ import print_function
import random
def rabinMiller(num):
s = num - 1
t = 0
while s % 2 == 0:
s = s // 2
t += 1
for trials in range(5):
a = random.randrange(2, num - 1)
v = pow(a, s, num)
if v != 1:
i = 0
while v != (num - 1):
if i == t - 1:
return False
else:
i = i + 1
v = (v ** 2) % num
return True
def isPrime(num):
if (num < 2):
return False
lowPrimes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,
131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191,
193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257,
263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331,
337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401,
409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467,
479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563,
569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709,
719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797,
809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877,
881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967,
971, 977, 983, 991, 997]
if num in lowPrimes:
return True
for prime in lowPrimes:
if (num % prime) == 0:
return False
return rabinMiller(num)
def generateLargePrime(keysize = 1024):
while True:
num = random.randrange(2 ** (keysize - 1), 2 ** (keysize))
if isPrime(num):
return num
if __name__ == '__main__':
num = generateLargePrime()
print(('Prime number:', num))
print(('isPrime:', isPrime(num)))
| true | true |
f71fb0d49c3e93cb8477acd1a5f1a4062ffa2e9a | 25,304 | py | Python | qa/rpc-tests/fundrawtransaction.py | suncoin-network/suncoin-core | 0477040d12804678df90d65410052b1b80e786d8 | [
"MIT"
] | 7 | 2018-03-25T21:29:19.000Z | 2019-05-01T02:29:38.000Z | qa/rpc-tests/fundrawtransaction.py | suncoin-network/suncoin-core | 0477040d12804678df90d65410052b1b80e786d8 | [
"MIT"
] | 1 | 2018-12-17T22:52:45.000Z | 2018-12-19T10:40:02.000Z | qa/rpc-tests/fundrawtransaction.py | suncoin-network/suncoin-core | 0477040d12804678df90d65410052b1b80e786d8 | [
"MIT"
] | 5 | 2018-04-07T07:59:38.000Z | 2021-06-04T11:26:29.000Z | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=0'], ['-usehd=0'], ['-usehd=0'], ['-usehd=0']])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 50:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
break
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 10:
utx = aUtx
if aUtx['amount'] == 50:
utx2 = aUtx
assert(utx!=False)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():11,self.nodes[1].getnewaddress():12,self.nodes[1].getnewaddress():1,self.nodes[1].getnewaddress():13,self.nodes[1].getnewaddress():2,self.nodes[1].getnewaddress():3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 12 SUNCOIN to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=0'], ['-usehd=0'], ['-usehd=0'], ['-usehd=0']])
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 12)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('511.0000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
if __name__ == '__main__':
RawTransactionsTest().main()
| 40.681672 | 214 | 0.557501 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(4, self.options.tmpdir, [['-usehd=0'], ['-usehd=0'], ['-usehd=0'], ['-usehd=0']])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print "Mining blocks..."
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
ansaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
| false | true |
f71fb110925949ff47a2cad996420b5abb79125c | 4,468 | py | Python | dimod/generators/chimera.py | pau557/dimod | d3c6d3abf23182b035e1100c46f7c947202edefb | [
"Apache-2.0"
] | null | null | null | dimod/generators/chimera.py | pau557/dimod | d3c6d3abf23182b035e1100c46f7c947202edefb | [
"Apache-2.0"
] | 24 | 2021-07-09T08:19:47.000Z | 2022-03-08T08:15:48.000Z | dimod/generators/chimera.py | pau557/dimod | d3c6d3abf23182b035e1100c46f7c947202edefb | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
from __future__ import absolute_import
import numpy as np
import numpy.random
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.decorators import graph_argument
from dimod.vartypes import SPIN
__all__ = ['chimera_anticluster']
@graph_argument('subgraph', allow_None=True)
def chimera_anticluster(m, n=None, t=4, multiplier=3.0,
cls=BinaryQuadraticModel, subgraph=None, seed=None):
"""Generate an anticluster problem on a Chimera lattice.
An anticluster problem has weak interactions within a tile and strong
interactions between tiles.
Args:
m (int):
Number of rows in the Chimera lattice.
n (int, optional, default=m):
Number of columns in the Chimera lattice.
t (int, optional, default=t):
Size of the shore within each Chimera tile.
multiplier (number, optional, default=3.0):
Strength of the intertile edges.
cls (type, optional):
Binary quadratic model class to build from. Default is
:class:`.BinaryQuadraticModel`.
subgraph (int/tuple[nodes, edges]/list[edge]/:obj:`~networkx.Graph`):
A subgraph of a Chimera(m, n, t) graph to build the anticluster
problem on.
seed (int, optional, default=None):
Random seed.
Returns:
:obj:`.BinaryQuadraticModel`: spin-valued binary quadratic model.
"""
if seed is None:
seed = numpy.random.randint(2**32, dtype=np.uint32)
r = numpy.random.RandomState(seed)
m = int(m)
if n is None:
n = m
else:
n = int(n)
t = int(t)
ldata = np.zeros(m*n*t*2) # number of nodes
if m and n and t:
inrow, incol = zip(*_iter_chimera_tile_edges(m, n, t))
if m > 1 or n > 1:
outrow, outcol = zip(*_iter_chimera_intertile_edges(m, n, t))
else:
outrow = outcol = tuple()
qdata = r.choice((-1., 1.), size=len(inrow)+len(outrow))
qdata[len(inrow):] *= multiplier
irow = inrow + outrow
icol = incol + outcol
else:
irow = icol = qdata = tuple()
bqm = cls.from_numpy_vectors(ldata, (irow, icol, qdata), 0.0, SPIN)
if subgraph is not None:
nodes, edges = subgraph
subbqm = cls.empty(SPIN)
try:
subbqm.add_variables_from((v, bqm.linear[v]) for v in nodes)
except KeyError:
msg = "given 'subgraph' contains nodes not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
try:
subbqm.add_interactions_from((u, v, bqm.adj[u][v]) for u, v in edges)
except KeyError:
msg = "given 'subgraph' contains edges not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
bqm = subbqm
return bqm
def _iter_chimera_tile_edges(m, n, t):
hoff = 2 * t
voff = n * hoff
mi = m * voff
ni = n * hoff
# tile edges
for edge in ((k0, k1)
for i in range(0, ni, hoff)
for j in range(i, mi, voff)
for k0 in range(j, j + t)
for k1 in range(j + t, j + 2 * t)):
yield edge
def _iter_chimera_intertile_edges(m, n, t):
hoff = 2 * t
voff = n * hoff
mi = m * voff
ni = n * hoff
# horizontal edges
for edge in ((k, k + hoff)
for i in range(t, 2 * t)
for j in range(i, ni - hoff, hoff)
for k in range(j, mi, voff)):
yield edge
# vertical edges
for edge in ((k, k + voff)
for i in range(t)
for j in range(i, ni, hoff)
for k in range(j, mi - voff, voff)):
yield edge
| 29.012987 | 94 | 0.574754 |
from __future__ import absolute_import
import numpy as np
import numpy.random
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.decorators import graph_argument
from dimod.vartypes import SPIN
__all__ = ['chimera_anticluster']
@graph_argument('subgraph', allow_None=True)
def chimera_anticluster(m, n=None, t=4, multiplier=3.0,
cls=BinaryQuadraticModel, subgraph=None, seed=None):
if seed is None:
seed = numpy.random.randint(2**32, dtype=np.uint32)
r = numpy.random.RandomState(seed)
m = int(m)
if n is None:
n = m
else:
n = int(n)
t = int(t)
ldata = np.zeros(m*n*t*2)
if m and n and t:
inrow, incol = zip(*_iter_chimera_tile_edges(m, n, t))
if m > 1 or n > 1:
outrow, outcol = zip(*_iter_chimera_intertile_edges(m, n, t))
else:
outrow = outcol = tuple()
qdata = r.choice((-1., 1.), size=len(inrow)+len(outrow))
qdata[len(inrow):] *= multiplier
irow = inrow + outrow
icol = incol + outcol
else:
irow = icol = qdata = tuple()
bqm = cls.from_numpy_vectors(ldata, (irow, icol, qdata), 0.0, SPIN)
if subgraph is not None:
nodes, edges = subgraph
subbqm = cls.empty(SPIN)
try:
subbqm.add_variables_from((v, bqm.linear[v]) for v in nodes)
except KeyError:
msg = "given 'subgraph' contains nodes not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
try:
subbqm.add_interactions_from((u, v, bqm.adj[u][v]) for u, v in edges)
except KeyError:
msg = "given 'subgraph' contains edges not in Chimera({}, {}, {})".format(m, n, t)
raise ValueError(msg)
bqm = subbqm
return bqm
def _iter_chimera_tile_edges(m, n, t):
hoff = 2 * t
voff = n * hoff
mi = m * voff
ni = n * hoff
for edge in ((k0, k1)
for i in range(0, ni, hoff)
for j in range(i, mi, voff)
for k0 in range(j, j + t)
for k1 in range(j + t, j + 2 * t)):
yield edge
def _iter_chimera_intertile_edges(m, n, t):
hoff = 2 * t
voff = n * hoff
mi = m * voff
ni = n * hoff
for edge in ((k, k + hoff)
for i in range(t, 2 * t)
for j in range(i, ni - hoff, hoff)
for k in range(j, mi, voff)):
yield edge
for edge in ((k, k + voff)
for i in range(t)
for j in range(i, ni, hoff)
for k in range(j, mi - voff, voff)):
yield edge
| true | true |
f71fb1734f8db11d01bd46d0696b2f6a7c2a050c | 1,845 | py | Python | mediagoblin/plugins/subtitles/models.py | mtlynch/mediagoblin | b5ee42aed44052de114c6e45edb56856d2868858 | [
"CC0-1.0"
] | 7 | 2020-05-27T03:57:21.000Z | 2021-04-21T02:17:39.000Z | mediagoblin/plugins/subtitles/models.py | jgarte/mediagoblin-mirror | c4599508b02f2e61df3a97ff314766a62a3e5934 | [
"CC0-1.0"
] | null | null | null | mediagoblin/plugins/subtitles/models.py | jgarte/mediagoblin-mirror | c4599508b02f2e61df3a97ff314766a62a3e5934 | [
"CC0-1.0"
] | 2 | 2019-05-13T14:42:34.000Z | 2021-08-28T10:36:46.000Z | # GNU MediaGoblin -- federated, autonomous media hosting
# Copyright (C) 2016 MediaGoblin contributors. See AUTHORS.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from sqlalchemy import Column, Integer, Unicode, ForeignKey
from sqlalchemy.orm import relationship
from mediagoblin.db.models import User
from mediagoblin.db.base import Base,MediaEntry
class MediaSubtitleFile(Base):
__tablename__ = "core__subtitle_files"
id = Column(Integer, primary_key=True)
media_entry = Column(
Integer, ForeignKey(MediaEntry.id),
nullable=False)
name = Column(Unicode, nullable=False)
filepath = Column(PathTupleWithSlashes)
created = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
@property
def dict_view(self):
"""A dict like view on this object"""
return DictReadAttrProxy(self)
subtitle_files_helper = relationship("MediaSubtitleFile",
cascade="all, delete-orphan",
order_by="MediaSubtitleFile.created"
)
subtitle_files = association_proxy("subtitle_files_helper", "dict_view",
creator=lambda v: MediaSubtitleFile(
name=v["name"], filepath=v["filepath"])
)
MODELS = [
MediaSubtitleFile
]
| 36.9 | 80 | 0.732249 |
from sqlalchemy import Column, Integer, Unicode, ForeignKey
from sqlalchemy.orm import relationship
from mediagoblin.db.models import User
from mediagoblin.db.base import Base,MediaEntry
class MediaSubtitleFile(Base):
__tablename__ = "core__subtitle_files"
id = Column(Integer, primary_key=True)
media_entry = Column(
Integer, ForeignKey(MediaEntry.id),
nullable=False)
name = Column(Unicode, nullable=False)
filepath = Column(PathTupleWithSlashes)
created = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
@property
def dict_view(self):
return DictReadAttrProxy(self)
subtitle_files_helper = relationship("MediaSubtitleFile",
cascade="all, delete-orphan",
order_by="MediaSubtitleFile.created"
)
subtitle_files = association_proxy("subtitle_files_helper", "dict_view",
creator=lambda v: MediaSubtitleFile(
name=v["name"], filepath=v["filepath"])
)
MODELS = [
MediaSubtitleFile
]
| true | true |
f71fb1cc1129767d19d13b370609bf72cca258f1 | 1,830 | py | Python | scripts/agregar_empresas_gentor.py | edgarvalli/gentor_backend | 115cadfc802cb5130b62aba5c9b6050cb5f0a466 | [
"MIT"
] | null | null | null | scripts/agregar_empresas_gentor.py | edgarvalli/gentor_backend | 115cadfc802cb5130b62aba5c9b6050cb5f0a466 | [
"MIT"
] | null | null | null | scripts/agregar_empresas_gentor.py | edgarvalli/gentor_backend | 115cadfc802cb5130b62aba5c9b6050cb5f0a466 | [
"MIT"
] | null | null | null | from set_root_path import set_root_path
set_root_path()
import db.sql_server as db
empresas = [
{
"RazonSocial": "GENTOR, S.A. DE C.V.",
"RFC": "GEN760712EM0"
},
{
"RazonSocial": "SERVICIOS CORPORATIVOS GENTOR, S.A.",
"RFC": "SCG931026LW1"
},
{
"RazonSocial": "GENTOR SERVICIOS, S.A. DE C.V.",
"RFC": "GSE9212163I9"
},
{
"RazonSocial": "SISTEMAS DE ENERGIA INTERNACIONAL, S.A. DE C.V.",
"RFC": "SEI920618TC5"
},
{
"RazonSocial": "BIOENERGIA DE NUEVO LEON, S.A. DE C.V.",
"RFC": "BNL020412HB8"
},
{
"RazonSocial": "SEISA SERVICIOS Y TECNOLOGIA, S.A. DE C.V.",
"RFC": "SST951003FL8"
},
{
"RazonSocial": "ASTRA LYRA S.A. DE C.V.",
"RFC": "ALY200814LZ4"
},
{
"RazonSocial": "ENVIRONMENT & SOCIETY S.A. DE C.V.",
"RFC": "EAS200814BU1"
},
{
"RazonSocial": "LAND OPERATION S.A. DE C.V.",
"RFC": "LOP191204CD6"
},
{
"RazonSocial": "CHP SOLUTIONS, S.A. DE C.V.",
"RFC": "CSO200716G27"
},
{
"RazonSocial": "DOMOS TELECOMUNICACIONES, S.A. DE C.V.",
"RFC": "DTE940421L42"
},
{
"RazonSocial": "RECOLECCION Y DISPOSICION DE DESECHOS AMBIENTALES SA DE CV",
"RFC": "RDD101105Q91"
},
{
"RazonSocial": "SERVICIOS CORPORATIVOS DMS, S.A DE C.V.",
"RFC": "SCD9609068X7"
},
{
"RazonSocial": "INGENIERIA Y MEJORAMIENTO AMBIENTAL, S.A. DE C.V.",
"RFC": "IMA960906DW2"
},
{
"RazonSocial": "PROTECCION INTEGRAL ESPECIALIZADA, S.A. DE C.V.",
"RFC": "PIE950316LU6"
},
{
"RazonSocial": "ODALTA S.A. DE C.V.",
"RFC": "ODA200122KH6"
}
]
db.insertmany("Empresas", empresas)
| 24.4 | 84 | 0.519672 | from set_root_path import set_root_path
set_root_path()
import db.sql_server as db
empresas = [
{
"RazonSocial": "GENTOR, S.A. DE C.V.",
"RFC": "GEN760712EM0"
},
{
"RazonSocial": "SERVICIOS CORPORATIVOS GENTOR, S.A.",
"RFC": "SCG931026LW1"
},
{
"RazonSocial": "GENTOR SERVICIOS, S.A. DE C.V.",
"RFC": "GSE9212163I9"
},
{
"RazonSocial": "SISTEMAS DE ENERGIA INTERNACIONAL, S.A. DE C.V.",
"RFC": "SEI920618TC5"
},
{
"RazonSocial": "BIOENERGIA DE NUEVO LEON, S.A. DE C.V.",
"RFC": "BNL020412HB8"
},
{
"RazonSocial": "SEISA SERVICIOS Y TECNOLOGIA, S.A. DE C.V.",
"RFC": "SST951003FL8"
},
{
"RazonSocial": "ASTRA LYRA S.A. DE C.V.",
"RFC": "ALY200814LZ4"
},
{
"RazonSocial": "ENVIRONMENT & SOCIETY S.A. DE C.V.",
"RFC": "EAS200814BU1"
},
{
"RazonSocial": "LAND OPERATION S.A. DE C.V.",
"RFC": "LOP191204CD6"
},
{
"RazonSocial": "CHP SOLUTIONS, S.A. DE C.V.",
"RFC": "CSO200716G27"
},
{
"RazonSocial": "DOMOS TELECOMUNICACIONES, S.A. DE C.V.",
"RFC": "DTE940421L42"
},
{
"RazonSocial": "RECOLECCION Y DISPOSICION DE DESECHOS AMBIENTALES SA DE CV",
"RFC": "RDD101105Q91"
},
{
"RazonSocial": "SERVICIOS CORPORATIVOS DMS, S.A DE C.V.",
"RFC": "SCD9609068X7"
},
{
"RazonSocial": "INGENIERIA Y MEJORAMIENTO AMBIENTAL, S.A. DE C.V.",
"RFC": "IMA960906DW2"
},
{
"RazonSocial": "PROTECCION INTEGRAL ESPECIALIZADA, S.A. DE C.V.",
"RFC": "PIE950316LU6"
},
{
"RazonSocial": "ODALTA S.A. DE C.V.",
"RFC": "ODA200122KH6"
}
]
db.insertmany("Empresas", empresas)
| true | true |
f71fb21ddb1fedaa154d0f69fe5a945c4b945b9c | 13,241 | py | Python | scrapy/core/engine.py | lizhaoxing1/scrapy-comment-zh | 17c6279c63d9733598539589091c5a9551f341f6 | [
"BSD-3-Clause"
] | null | null | null | scrapy/core/engine.py | lizhaoxing1/scrapy-comment-zh | 17c6279c63d9733598539589091c5a9551f341f6 | [
"BSD-3-Clause"
] | null | null | null | scrapy/core/engine.py | lizhaoxing1/scrapy-comment-zh | 17c6279c63d9733598539589091c5a9551f341f6 | [
"BSD-3-Clause"
] | null | null | null | """
This is the Scrapy engine which controls the Scheduler, Downloader and Spiders.
For more information see docs/topics/architecture.rst
"""
import logging
from time import time
from twisted.internet import defer, task
from twisted.python.failure import Failure
from scrapy import signals
from scrapy.core.scraper import Scraper
from scrapy.exceptions import DontCloseSpider
from scrapy.http import Response, Request
from scrapy.utils.misc import load_object
from scrapy.utils.reactor import CallLaterOnce
from scrapy.utils.log import logformatter_adapter, failure_to_exc_info
logger = logging.getLogger(__name__)
class Slot(object):
def __init__(self, start_requests, close_if_idle, nextcall, scheduler):
self.closing = False
self.inprogress = set() # requests in progress
self.start_requests = iter(start_requests)
self.close_if_idle = close_if_idle
self.nextcall = nextcall
self.scheduler = scheduler
self.heartbeat = task.LoopingCall(nextcall.schedule)
def add_request(self, request):
self.inprogress.add(request)
def remove_request(self, request):
self.inprogress.remove(request)
self._maybe_fire_closing()
def close(self):
self.closing = defer.Deferred()
self._maybe_fire_closing()
return self.closing
def _maybe_fire_closing(self):
if self.closing and not self.inprogress:
if self.nextcall:
self.nextcall.cancel()
if self.heartbeat.running:
self.heartbeat.stop()
self.closing.callback(None)
class ExecutionEngine(object):
# __init__ 对爬虫的核心组件进行了初始化.
def __init__(self, crawler, spider_closed_callback):
self.crawler = crawler
self.settings = crawler.settings
self.signals = crawler.signals
self.logformatter = crawler.logformatter
self.slot = None
self.spider = None
self.running = False
self.paused = False
self.scheduler_cls = load_object(self.settings['SCHEDULER'])
downloader_cls = load_object(self.settings['DOWNLOADER'])
self.downloader = downloader_cls(crawler)
self.scraper = Scraper(crawler)
self._spider_closed_callback = spider_closed_callback
@defer.inlineCallbacks
def start(self):
"""Start the execution engine"""
assert not self.running, "Engine already running"
self.start_time = time()
yield self.signals.send_catch_log_deferred(signal=signals.engine_started)
self.running = True
self._closewait = defer.Deferred()
yield self._closewait
def stop(self):
"""Stop the execution engine gracefully"""
assert self.running, "Engine not running"
self.running = False
dfd = self._close_all_spiders()
return dfd.addBoth(lambda _: self._finish_stopping_engine())
def close(self):
"""Close the execution engine gracefully.
If it has already been started, stop it. In all cases, close all spiders
and the downloader.
"""
if self.running:
# Will also close spiders and downloader
return self.stop()
elif self.open_spiders:
# Will also close downloader
return self._close_all_spiders()
else:
return defer.succeed(self.downloader.close())
def pause(self):
"""Pause the execution engine"""
self.paused = True
def unpause(self):
"""Resume the execution engine"""
self.paused = False
def _next_request(self, spider):
slot = self.slot
if not slot:
return
if self.paused:
return
while not self._needs_backout(spider):
if not self._next_request_from_scheduler(spider):
break
if slot.start_requests and not self._needs_backout(spider):
try:
request = next(slot.start_requests)
except StopIteration:
slot.start_requests = None
except Exception:
slot.start_requests = None
logger.error('Error while obtaining start requests',
exc_info=True, extra={'spider': spider})
else:
self.crawl(request, spider)
if self.spider_is_idle(spider) and slot.close_if_idle:
self._spider_idle(spider)
def _needs_backout(self, spider):
slot = self.slot
return not self.running \
or slot.closing \
or self.downloader.needs_backout() \
or self.scraper.slot.needs_backout()
def _next_request_from_scheduler(self, spider):
slot = self.slot
request = slot.scheduler.next_request()
if not request:
return
d = self._download(request, spider)
d.addBoth(self._handle_downloader_output, request, spider)
d.addErrback(lambda f: logger.info('Error while handling downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.remove_request(request))
d.addErrback(lambda f: logger.info('Error while removing request from slot',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.nextcall.schedule())
d.addErrback(lambda f: logger.info('Error while scheduling new request',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
def _handle_downloader_output(self, response, request, spider):
assert isinstance(response, (Request, Response, Failure)), response
# downloader middleware can return requests (for example, redirects)
if isinstance(response, Request):
self.crawl(response, spider)
return
# response is a Response or Failure
d = self.scraper.enqueue_scrape(response, request, spider)
d.addErrback(lambda f: logger.error('Error while enqueuing downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
def spider_is_idle(self, spider):
if not self.scraper.slot.is_idle():
# scraper is not idle
return False
if self.downloader.active:
# downloader has pending requests
return False
if self.slot.start_requests is not None:
# not all start requests are handled
return False
if self.slot.scheduler.has_pending_requests():
# scheduler has pending requests
return False
return True
@property
def open_spiders(self):
return [self.spider] if self.spider else []
def has_capacity(self):
"""Does the engine have capacity to handle more spiders"""
return not bool(self.slot)
def crawl(self, request, spider):
assert spider in self.open_spiders, \
"Spider %r not opened when crawling: %s" % (spider.name, request)
self.schedule(request, spider)
self.slot.nextcall.schedule()
def schedule(self, request, spider):
self.signals.send_catch_log(signal=signals.request_scheduled,
request=request, spider=spider)
if not self.slot.scheduler.enqueue_request(request):
self.signals.send_catch_log(signal=signals.request_dropped,
request=request, spider=spider)
def download(self, request, spider):
d = self._download(request, spider)
d.addBoth(self._downloaded, self.slot, request, spider)
return d
def _downloaded(self, response, slot, request, spider):
slot.remove_request(request)
return self.download(response, spider) \
if isinstance(response, Request) else response
def _download(self, request, spider):
slot = self.slot
slot.add_request(request)
def _on_success(response):
assert isinstance(response, (Response, Request))
if isinstance(response, Response):
response.request = request # tie request to response received
logkws = self.logformatter.crawled(request, response, spider)
logger.log(*logformatter_adapter(logkws), extra={'spider': spider})
self.signals.send_catch_log(signal=signals.response_received, \
response=response, request=request, spider=spider)
return response
def _on_complete(_):
slot.nextcall.schedule()
return _
dwld = self.downloader.fetch(request, spider)
dwld.addCallbacks(_on_success)
dwld.addBoth(_on_complete)
return dwld
@defer.inlineCallbacks
def open_spider(self, spider, start_requests=(), close_if_idle=True):
# 函数 实例了调度器, 如何爬取,爬取过滤方法等等.
assert self.has_capacity(), "No free spider slot when opening %r" % \
spider.name
logger.info("Spider opened", extra={'spider': spider})
nextcall = CallLaterOnce(self._next_request, spider) # 这是给异步 的循环调用用的东西.
scheduler = self.scheduler_cls.from_crawler(self.crawler) # 调度器的实例化
start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider) # 这里调用spdiermw 会读取你配置文件中中间件并处理
slot = Slot(start_requests, close_if_idle, nextcall, scheduler)
self.slot = slot
self.spider = spider
yield scheduler.open(spider)
yield self.scraper.open_spider(spider)
self.crawler.stats.open_spider(spider)
yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider)
slot.nextcall.schedule()
slot.heartbeat.start(5)
def _spider_idle(self, spider):
"""Called when a spider gets idle. This function is called when there
are no remaining pages to download or schedule. It can be called
multiple times. If some extension raises a DontCloseSpider exception
(in the spider_idle signal handler) the spider is not closed until the
next loop and this function is guaranteed to be called (at least) once
again for this spider.
"""
res = self.signals.send_catch_log(signal=signals.spider_idle, \
spider=spider, dont_log=DontCloseSpider)
if any(isinstance(x, Failure) and isinstance(x.value, DontCloseSpider) \
for _, x in res):
return
if self.spider_is_idle(spider):
self.close_spider(spider, reason='finished')
def close_spider(self, spider, reason='cancelled'):
"""Close (cancel) spider and clear all its outstanding requests"""
slot = self.slot
if slot.closing:
return slot.closing
logger.info("Closing spider (%(reason)s)",
{'reason': reason},
extra={'spider': spider})
dfd = slot.close()
def log_failure(msg):
def errback(failure):
logger.error(
msg,
exc_info=failure_to_exc_info(failure),
extra={'spider': spider}
)
return errback
dfd.addBoth(lambda _: self.downloader.close())
dfd.addErrback(log_failure('Downloader close failure'))
dfd.addBoth(lambda _: self.scraper.close_spider(spider))
dfd.addErrback(log_failure('Scraper close failure'))
dfd.addBoth(lambda _: slot.scheduler.close(reason))
dfd.addErrback(log_failure('Scheduler close failure'))
dfd.addBoth(lambda _: self.signals.send_catch_log_deferred(
signal=signals.spider_closed, spider=spider, reason=reason))
dfd.addErrback(log_failure('Error while sending spider_close signal'))
dfd.addBoth(lambda _: self.crawler.stats.close_spider(spider, reason=reason))
dfd.addErrback(log_failure('Stats close failure'))
dfd.addBoth(lambda _: logger.info("Spider closed (%(reason)s)",
{'reason': reason},
extra={'spider': spider}))
dfd.addBoth(lambda _: setattr(self, 'slot', None))
dfd.addErrback(log_failure('Error while unassigning slot'))
dfd.addBoth(lambda _: setattr(self, 'spider', None))
dfd.addErrback(log_failure('Error while unassigning spider'))
dfd.addBoth(lambda _: self._spider_closed_callback(spider))
return dfd
def _close_all_spiders(self):
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
dlist = defer.DeferredList(dfds)
return dlist
@defer.inlineCallbacks
def _finish_stopping_engine(self):
yield self.signals.send_catch_log_deferred(signal=signals.engine_stopped)
self._closewait.callback(None)
| 37.939828 | 130 | 0.624726 | import logging
from time import time
from twisted.internet import defer, task
from twisted.python.failure import Failure
from scrapy import signals
from scrapy.core.scraper import Scraper
from scrapy.exceptions import DontCloseSpider
from scrapy.http import Response, Request
from scrapy.utils.misc import load_object
from scrapy.utils.reactor import CallLaterOnce
from scrapy.utils.log import logformatter_adapter, failure_to_exc_info
logger = logging.getLogger(__name__)
class Slot(object):
def __init__(self, start_requests, close_if_idle, nextcall, scheduler):
self.closing = False
self.inprogress = set()
self.start_requests = iter(start_requests)
self.close_if_idle = close_if_idle
self.nextcall = nextcall
self.scheduler = scheduler
self.heartbeat = task.LoopingCall(nextcall.schedule)
def add_request(self, request):
self.inprogress.add(request)
def remove_request(self, request):
self.inprogress.remove(request)
self._maybe_fire_closing()
def close(self):
self.closing = defer.Deferred()
self._maybe_fire_closing()
return self.closing
def _maybe_fire_closing(self):
if self.closing and not self.inprogress:
if self.nextcall:
self.nextcall.cancel()
if self.heartbeat.running:
self.heartbeat.stop()
self.closing.callback(None)
class ExecutionEngine(object):
def __init__(self, crawler, spider_closed_callback):
self.crawler = crawler
self.settings = crawler.settings
self.signals = crawler.signals
self.logformatter = crawler.logformatter
self.slot = None
self.spider = None
self.running = False
self.paused = False
self.scheduler_cls = load_object(self.settings['SCHEDULER'])
downloader_cls = load_object(self.settings['DOWNLOADER'])
self.downloader = downloader_cls(crawler)
self.scraper = Scraper(crawler)
self._spider_closed_callback = spider_closed_callback
@defer.inlineCallbacks
def start(self):
assert not self.running, "Engine already running"
self.start_time = time()
yield self.signals.send_catch_log_deferred(signal=signals.engine_started)
self.running = True
self._closewait = defer.Deferred()
yield self._closewait
def stop(self):
assert self.running, "Engine not running"
self.running = False
dfd = self._close_all_spiders()
return dfd.addBoth(lambda _: self._finish_stopping_engine())
def close(self):
if self.running:
return self.stop()
elif self.open_spiders:
return self._close_all_spiders()
else:
return defer.succeed(self.downloader.close())
def pause(self):
self.paused = True
def unpause(self):
self.paused = False
def _next_request(self, spider):
slot = self.slot
if not slot:
return
if self.paused:
return
while not self._needs_backout(spider):
if not self._next_request_from_scheduler(spider):
break
if slot.start_requests and not self._needs_backout(spider):
try:
request = next(slot.start_requests)
except StopIteration:
slot.start_requests = None
except Exception:
slot.start_requests = None
logger.error('Error while obtaining start requests',
exc_info=True, extra={'spider': spider})
else:
self.crawl(request, spider)
if self.spider_is_idle(spider) and slot.close_if_idle:
self._spider_idle(spider)
def _needs_backout(self, spider):
slot = self.slot
return not self.running \
or slot.closing \
or self.downloader.needs_backout() \
or self.scraper.slot.needs_backout()
def _next_request_from_scheduler(self, spider):
slot = self.slot
request = slot.scheduler.next_request()
if not request:
return
d = self._download(request, spider)
d.addBoth(self._handle_downloader_output, request, spider)
d.addErrback(lambda f: logger.info('Error while handling downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.remove_request(request))
d.addErrback(lambda f: logger.info('Error while removing request from slot',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.nextcall.schedule())
d.addErrback(lambda f: logger.info('Error while scheduling new request',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
def _handle_downloader_output(self, response, request, spider):
assert isinstance(response, (Request, Response, Failure)), response
if isinstance(response, Request):
self.crawl(response, spider)
return
d = self.scraper.enqueue_scrape(response, request, spider)
d.addErrback(lambda f: logger.error('Error while enqueuing downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
def spider_is_idle(self, spider):
if not self.scraper.slot.is_idle():
return False
if self.downloader.active:
return False
if self.slot.start_requests is not None:
return False
if self.slot.scheduler.has_pending_requests():
return False
return True
@property
def open_spiders(self):
return [self.spider] if self.spider else []
def has_capacity(self):
return not bool(self.slot)
def crawl(self, request, spider):
assert spider in self.open_spiders, \
"Spider %r not opened when crawling: %s" % (spider.name, request)
self.schedule(request, spider)
self.slot.nextcall.schedule()
def schedule(self, request, spider):
self.signals.send_catch_log(signal=signals.request_scheduled,
request=request, spider=spider)
if not self.slot.scheduler.enqueue_request(request):
self.signals.send_catch_log(signal=signals.request_dropped,
request=request, spider=spider)
def download(self, request, spider):
d = self._download(request, spider)
d.addBoth(self._downloaded, self.slot, request, spider)
return d
def _downloaded(self, response, slot, request, spider):
slot.remove_request(request)
return self.download(response, spider) \
if isinstance(response, Request) else response
def _download(self, request, spider):
slot = self.slot
slot.add_request(request)
def _on_success(response):
assert isinstance(response, (Response, Request))
if isinstance(response, Response):
response.request = request
logkws = self.logformatter.crawled(request, response, spider)
logger.log(*logformatter_adapter(logkws), extra={'spider': spider})
self.signals.send_catch_log(signal=signals.response_received, \
response=response, request=request, spider=spider)
return response
def _on_complete(_):
slot.nextcall.schedule()
return _
dwld = self.downloader.fetch(request, spider)
dwld.addCallbacks(_on_success)
dwld.addBoth(_on_complete)
return dwld
@defer.inlineCallbacks
def open_spider(self, spider, start_requests=(), close_if_idle=True):
assert self.has_capacity(), "No free spider slot when opening %r" % \
spider.name
logger.info("Spider opened", extra={'spider': spider})
nextcall = CallLaterOnce(self._next_request, spider)
scheduler = self.scheduler_cls.from_crawler(self.crawler)
start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider)
slot = Slot(start_requests, close_if_idle, nextcall, scheduler)
self.slot = slot
self.spider = spider
yield scheduler.open(spider)
yield self.scraper.open_spider(spider)
self.crawler.stats.open_spider(spider)
yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider)
slot.nextcall.schedule()
slot.heartbeat.start(5)
def _spider_idle(self, spider):
res = self.signals.send_catch_log(signal=signals.spider_idle, \
spider=spider, dont_log=DontCloseSpider)
if any(isinstance(x, Failure) and isinstance(x.value, DontCloseSpider) \
for _, x in res):
return
if self.spider_is_idle(spider):
self.close_spider(spider, reason='finished')
def close_spider(self, spider, reason='cancelled'):
slot = self.slot
if slot.closing:
return slot.closing
logger.info("Closing spider (%(reason)s)",
{'reason': reason},
extra={'spider': spider})
dfd = slot.close()
def log_failure(msg):
def errback(failure):
logger.error(
msg,
exc_info=failure_to_exc_info(failure),
extra={'spider': spider}
)
return errback
dfd.addBoth(lambda _: self.downloader.close())
dfd.addErrback(log_failure('Downloader close failure'))
dfd.addBoth(lambda _: self.scraper.close_spider(spider))
dfd.addErrback(log_failure('Scraper close failure'))
dfd.addBoth(lambda _: slot.scheduler.close(reason))
dfd.addErrback(log_failure('Scheduler close failure'))
dfd.addBoth(lambda _: self.signals.send_catch_log_deferred(
signal=signals.spider_closed, spider=spider, reason=reason))
dfd.addErrback(log_failure('Error while sending spider_close signal'))
dfd.addBoth(lambda _: self.crawler.stats.close_spider(spider, reason=reason))
dfd.addErrback(log_failure('Stats close failure'))
dfd.addBoth(lambda _: logger.info("Spider closed (%(reason)s)",
{'reason': reason},
extra={'spider': spider}))
dfd.addBoth(lambda _: setattr(self, 'slot', None))
dfd.addErrback(log_failure('Error while unassigning slot'))
dfd.addBoth(lambda _: setattr(self, 'spider', None))
dfd.addErrback(log_failure('Error while unassigning spider'))
dfd.addBoth(lambda _: self._spider_closed_callback(spider))
return dfd
def _close_all_spiders(self):
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
dlist = defer.DeferredList(dfds)
return dlist
@defer.inlineCallbacks
def _finish_stopping_engine(self):
yield self.signals.send_catch_log_deferred(signal=signals.engine_stopped)
self._closewait.callback(None)
| true | true |
f71fb24b1ca2ef3817592da8e3c5f8b5ac48df99 | 780 | py | Python | nicos_mlz/kws2/setups/uvspectro.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/kws2/setups/uvspectro.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_mlz/kws2/setups/uvspectro.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | # -*- coding: utf-8 -*-
description = 'controlling the UV-vis spectrometer and LEDs'
group = 'optional'
tango_base = 'tango://phys.kws2.frm2:10000/kws2/'
devices = dict(
OceanView = device('nicos.devices.entangle.DigitalOutput',
description = 'spectrometer trigger interval (0 to switch off)',
tangodevice = tango_base + 'uvspectro/plc_trigger',
),
LEDdelay = device('nicos.devices.entangle.DigitalOutput',
description = 'delay for LEDs switching on',
tangodevice = tango_base + 'uvspectro/plc_leddelay',
),
LEDswitch = device('nicos.devices.entangle.NamedDigitalOutput',
description = 'LED switcher',
tangodevice = tango_base + 'uvspectro/plc_led',
mapping = {'off': 0, 'uv': 1, 'blue': 2},
),
)
| 33.913043 | 72 | 0.65641 |
description = 'controlling the UV-vis spectrometer and LEDs'
group = 'optional'
tango_base = 'tango://phys.kws2.frm2:10000/kws2/'
devices = dict(
OceanView = device('nicos.devices.entangle.DigitalOutput',
description = 'spectrometer trigger interval (0 to switch off)',
tangodevice = tango_base + 'uvspectro/plc_trigger',
),
LEDdelay = device('nicos.devices.entangle.DigitalOutput',
description = 'delay for LEDs switching on',
tangodevice = tango_base + 'uvspectro/plc_leddelay',
),
LEDswitch = device('nicos.devices.entangle.NamedDigitalOutput',
description = 'LED switcher',
tangodevice = tango_base + 'uvspectro/plc_led',
mapping = {'off': 0, 'uv': 1, 'blue': 2},
),
)
| true | true |
f71fb300004e91ff987107bb558165bb8d7b340e | 14,538 | py | Python | chatto_transform/datastores/sqlalchemy_datastore.py | chatto-hub-test2/Spaceboy2 | 7b6b91baf06290e6b047ae75e7ea61cee4846b3a | [
"Unlicense",
"MIT"
] | null | null | null | chatto_transform/datastores/sqlalchemy_datastore.py | chatto-hub-test2/Spaceboy2 | 7b6b91baf06290e6b047ae75e7ea61cee4846b3a | [
"Unlicense",
"MIT"
] | null | null | null | chatto_transform/datastores/sqlalchemy_datastore.py | chatto-hub-test2/Spaceboy2 | 7b6b91baf06290e6b047ae75e7ea61cee4846b3a | [
"Unlicense",
"MIT"
] | null | null | null | import pandas
from ..schema.schema_base import *
from .datastore_base import DataStore
from .odo_datastore import OdoDataStore
from ..config import config
from functools import lru_cache, partial
from sqlalchemy import Table, MetaData, select
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Select, and_
from sqlalchemy import sql
import io
import tempfile
import time
import os
import datetime
import ciso8601
import odo
metadatas = {}
def get_engine_metadata(engine):
if engine in metadatas:
return metadatas[engine]
else:
metadata = MetaData()
metadata.bind = engine
metadatas[engine] = metadata
return metadata
def get_reflected_metadata(engine, schema_name=None):
metadata = MetaData()
metadata.reflect(bind=engine, schema=schema_name)
metadata.bind = engine
return metadata
########################################################################
for col_type in [dt, delta, num, bool_]:
col_type._storage_target_registry['sqlalchemy'] = col_type._storage_target_registry['pandas'].copy()
@cat.register_check('sqlalchemy')
def _(col):
return col.dtype == 'object'
@cat.register_transform('sqlalchemy')
def _(col):
return col.astype('object')
@id_.register_check('sqlalchemy')
def _(col):
return col.dtype == 'object'
@id_.register_transform('sqlalchemy')
def _(col):
return col.astype('object')
########################################################################
@cat.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Text, nullable=True)
@id_.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Integer, nullable=True)
@dt.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.DateTime(timezone=True), nullable=True)
@delta.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Interval, nullable=True)
@big_dt.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.DateTime(timezone=True), nullable=True)
@num.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Float, nullable=True)
@bool_.register_metadata('sqlalchemy')
def _(self):
return sql.schema.Column(self.name, sql.sqltypes.Boolean, nullable=True)
########################################################################
@lru_cache()
def schema_as_table(schema, engine):
if schema.options.get('temporary', False):
prefixes = ['TEMPORARY']
else:
prefixes = []
db_schema = schema.options.get('db_schema', None)
metadata = get_engine_metadata(engine)
return Table(schema.name, metadata, *[col.metadata('sqlalchemy') for col in schema.cols], schema=db_schema, prefixes=prefixes)
sa_type_2_col_type = {
sql.sqltypes.Integer: num,
sql.sqltypes.String: cat,
sql.sqltypes.Date: dt,
sql.sqltypes.DateTime: dt,
sql.sqltypes.Interval: delta,
sql.sqltypes.Numeric: num,
sql.sqltypes.Boolean: bool_
}
def table_as_schema(table):
schema_cols = []
for sa_col in table.c:
for sa_type, col_type in sa_type_2_col_type.items():
if isinstance(sa_col.type, sa_type):
if isinstance(sa_col.type, sql.sqltypes.Integer) and (sa_col.primary_key or sa_col.foreign_keys):
schema_cols.append(id_(sa_col.name))
else:
schema_cols.append(col_type(sa_col.name))
break
options = {}
if table.schema is not None:
options['db_schema'] = table.schema
s = Schema(table.name, schema_cols, options=options)
return s
########################################################################
def fast_sql_to_df(table, schema):
engine = table.bind
if engine.dialect.name == 'mysql':
return fast_mysql_to_df(table, schema)
elif engine.dialect.name == 'postgresql':
return fast_postgresql_to_df(table, schema)
ods = OdoDataStore(schema, table)
df = ods.load()
df = df[schema.col_names()]
return df
def fast_mysql_to_df(table, schema):
f = tempfile.NamedTemporaryFile('w', suffix='.csv', dir=config.data_dir+'tmp')
try:
f.close()
table_name = str(table)
if not isinstance(table, Table):
table_name = '({})'.format(table_name)
# converting to csv
sql = """SELECT {cols} FROM {table} INTO OUTFILE '{filename}'
FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"'
ESCAPED BY '\\\\'
LINES TERMINATED BY '\n'""".format(
cols=', '.join('`'+colname+'`' for colname in schema.col_names()),
filename=f.name,
table=table_name)
table.bind.execute(sql)
# reading csv
df = pandas.read_csv(f.name, header=None, names=schema.col_names(), na_values=['\\N'])
finally:
os.remove(f.name)
for col in schema.cols:
if isinstance(col, dt):
# converting datetime column
df[col.name] = pandas.to_datetime(df[col.name], format="%Y-%m-%d %H:%M:%S", coerce=True)
if isinstance(col, big_dt):
# converting big_dt column
strptime = datetime.datetime.strptime
parse_func = (lambda x: strptime(x, "%Y-%m-%d %H:%M:%S"))
df[col.name] = df[col.name].map(parse_func, na_action='ignore')
return df
def fast_postgresql_to_df(table, schema):
engine = table.bind
conn = engine.raw_connection()
with conn.cursor() as cur:
with io.StringIO() as f:
table_name = str(table)
if not isinstance(table, Table):
table_name = '({})'.format(table_name)
sql = "COPY {table_name} TO STDOUT WITH (FORMAT CSV, HEADER TRUE)".format(
table_name=table_name)
cur.copy_expert(sql, f)
f.seek(0)
df = pandas.read_csv(f)
for col in schema.cols:
if isinstance(col, dt):
# converting datetime column
df[col.name] = pandas.to_datetime(df[col.name], format="%Y-%m-%d %H:%M:%S", coerce=True)
if isinstance(col, big_dt):
# converting big_dt column
strptime = datetime.datetime.strptime
parse_func = (lambda x: strptime(x, "%Y-%m-%d %H:%M:%S"))
df[col.name] = df[col.name].map(parse_func, na_action='ignore')
return df
def fast_postgresql_to_csv(table, file_path):
engine = table.bind
conn = engine.raw_connection()
with conn.cursor() as cur:
with open(file_path, 'w') as f:
table_name = str(table)
if not isinstance(table, Table):
table_name = '({})'.format(table_name)
sql = "COPY {table_name} TO STDOUT WITH (FORMAT CSV, HEADER TRUE)".format(
table_name=table_name)
cur.copy_expert(sql, f)
def fast_df_to_sql(df, table, schema):
ods = OdoDataStore(schema, table, storage_target_type='sqlalchemy')
ods.store(df)
class SATableDataStore(DataStore):
def __init__(self, schema, engine, where_clauses=None):
super().__init__(schema)
self.engine = engine
self.table = schema_as_table(self.schema, self.engine)
self.where_clauses = where_clauses
def storage_target(self):
return 'sqlalchemy'
def _load(self):
query = self.table
if self.where_clauses is not None:
query = query.select()
for where_clause in self.where_clauses:
query = query.where(where_clause)
df = fast_sql_to_df(query, self.schema)
return df
def to_csv(self, file_path):
if self.engine.dialect.name != 'postgresql':
raise NotImplementedError('converting directly to csv not supported for non-postgres databases')
query = self.table
if self.where_clauses is not None:
query = query.select()
for where_clause in self.where_clauses:
query = query.where(where_clause)
fast_postgresql_to_csv(query, file_path)
def _store(self, df):
if self.where_clauses is not None:
raise NotImplementedError('Cannot store to a query (where_clauses must be left blank)')
df = df.copy()
fast_df_to_sql(self.table, self.schema)
def _update(self, df):
if self.where_clauses is not None:
raise NotImplementedError('Cannot update to a query (where_clauses must be left blank)')
df = df.copy()
with self.engine.connect() as conn:
temp_schema = Schema.rename(self.schema, 'temp_'+self.schema.name)
temp_schema.options['temporary'] = True
temp_table = schema_as_table(temp_schema, self.engine)
print('storing new df in temp table')
fast_df_to_sql(df, temp_table, temp_schema)
print('updating table from matching rows')
index = self.schema.options['index']
update = self.table.update(
values={
col_name: temp_table.c[col_name] for col_name in self.schema.col_names()
},
whereclause=self.table.c[index] == temp_table.c[index]
)
update_res = conn.execute(update)
print('inserting new rows into table')
exists_query = self.table.select().where(self.table.c[index] == temp_table.c[index]).exists()
insert = self.table.insert().from_select(
temp_schema.col_names(),
temp_table.select().where(~exists_query))
ins_res = conn.execute(insert)
def delete(self):
if self.where_clauses is not None:
raise NotImplementedError('Cannot delete a query (where_clauses must be left blank)')
self.table.drop(self.engine)
class SAJoinDataStore(DataStore):
def __init__(self, root_schema, engine, has_schemas=None, belongs_to_schemas=None, root_conditions=None, where_clauses=None):
self.engine = engine
self.root_schema = root_schema
self.root_table = schema_as_table(self.root_schema, self.engine)
self.has_schemas, self.has_join_conditions = self._parse_schema_list(has_schemas)
self.has_tables = [schema_as_table(h_schema, self.engine) for h_schema in self.has_schemas]
self.belongs_to_schemas, self.belongs_to_join_conditions = self._parse_schema_list(belongs_to_schemas)
self.belongs_to_tables = [schema_as_table(b_schema, self.engine) for b_schema in self.belongs_to_schemas]
self.root_conditions = root_conditions
self.where_clauses = where_clauses
schema = Schema.union([self.root_schema] + self.has_schemas + self.belongs_to_schemas, with_prefix=True, schema_name=self.root_schema.name+'_join')
super().__init__(schema)
def _parse_schema_list(self, schema_list=None):
if schema_list is None:
schema_list = []
schemas = []
join_conditions = {}
for schema in schema_list:
if isinstance(schema, tuple):
schema, j_c = schema
join_conditions[schema] = j_c
schemas.append(schema)
return schemas, join_conditions
def storage_target(self):
return 'sqlalchemy'
def _load(self):
root = self.root_table
if self.root_conditions is not None:
root = root.select().where(and_(*self.root_conditions)).alias()
join_clause = root
select_clause = []
root_col_prefix = self.root_schema.options['prefix']
for col in root.c:
select_clause.append(col.label("{}.{}".format(root_col_prefix, col.name)))
for h_table, h_schema in zip(self.has_tables, self.has_schemas):
col_prefix = h_schema.options['prefix']
h_join_conditions = [root.c.id == h_table.c['{}_id'.format(root_col_prefix)]]
for join_condition in self.has_join_conditions.get(h_schema, []):
h_join_conditions.append(join_condition)
join_clause = join_clause.outerjoin(h_table, and_(*h_join_conditions))
for col in h_table.c:
select_clause.append(col.label("{}.{}".format(col_prefix, col.name)))
for b_table, b_schema in zip(self.belongs_to_tables, self.belongs_to_schemas):
col_prefix = b_schema.options['prefix']
b_join_conditions = [root.c['{}_id'.format(col_prefix)] == b_table.c.id]
for join_condition in self.belongs_to_join_conditions.get(b_schema, []):
b_join_conditions.append(join_condition)
join_clause = join_clause.outerjoin(b_table, and_(*b_join_conditions))
for col in b_table.c:
select_clause.append(col.label("{}.{}".format(col_prefix, col.name)))
temp_schema = Schema.rename(self.schema, 'temp_'+self.schema.name)
temp_table = schema_as_table(temp_schema, self.engine)
try:
temp_table.create(self.engine)
query = select(select_clause).select_from(join_clause)
if self.where_clauses is not None:
query = query.where(and_(*self.where_clauses))
insert = temp_table.insert().from_select(temp_schema.col_names(), query)
start = time.time()
print('executing join into temp table')
self.engine.execute(insert)
joined = time.time()
print('loading rows from temp table')
df = fast_sql_to_df(temp_table, temp_schema)
loaded = time.time()
finally:
temp_table.drop(self.engine)
print('type checking and sorting')
print('took', joined - start, 'seconds to perform the join')
print('took', loaded - joined, 'seconds to load the results')
return df
class SAQueryDataStore(DataStore):
def __init__(self, schema, engine, query):
self.engine = engine
self.query = query
self.schema = schema
def _load(self):
df = pandas.read_sql(self.query, self.engine)
return df
| 35.896296 | 155 | 0.623882 | import pandas
from ..schema.schema_base import *
from .datastore_base import DataStore
from .odo_datastore import OdoDataStore
from ..config import config
from functools import lru_cache, partial
from sqlalchemy import Table, MetaData, select
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Select, and_
from sqlalchemy import sql
import io
import tempfile
import time
import os
import datetime
import ciso8601
import odo
metadatas = {}
def get_engine_metadata(engine):
if engine in metadatas:
return metadatas[engine]
else:
metadata = MetaData()
metadata.bind = engine
metadatas[engine] = metadata
return metadata
def get_reflected_metadata(engine, schema_name=None):
metadata = MetaData()
metadata.reflect(bind=engine, schema=schema_name)
metadata.bind = engine
return metadata
)
return schemas, join_conditions
def storage_target(self):
return 'sqlalchemy'
def _load(self):
root = self.root_table
if self.root_conditions is not None:
root = root.select().where(and_(*self.root_conditions)).alias()
join_clause = root
select_clause = []
root_col_prefix = self.root_schema.options['prefix']
for col in root.c:
select_clause.append(col.label("{}.{}".format(root_col_prefix, col.name)))
for h_table, h_schema in zip(self.has_tables, self.has_schemas):
col_prefix = h_schema.options['prefix']
h_join_conditions = [root.c.id == h_table.c['{}_id'.format(root_col_prefix)]]
for join_condition in self.has_join_conditions.get(h_schema, []):
h_join_conditions.append(join_condition)
join_clause = join_clause.outerjoin(h_table, and_(*h_join_conditions))
for col in h_table.c:
select_clause.append(col.label("{}.{}".format(col_prefix, col.name)))
for b_table, b_schema in zip(self.belongs_to_tables, self.belongs_to_schemas):
col_prefix = b_schema.options['prefix']
b_join_conditions = [root.c['{}_id'.format(col_prefix)] == b_table.c.id]
for join_condition in self.belongs_to_join_conditions.get(b_schema, []):
b_join_conditions.append(join_condition)
join_clause = join_clause.outerjoin(b_table, and_(*b_join_conditions))
for col in b_table.c:
select_clause.append(col.label("{}.{}".format(col_prefix, col.name)))
temp_schema = Schema.rename(self.schema, 'temp_'+self.schema.name)
temp_table = schema_as_table(temp_schema, self.engine)
try:
temp_table.create(self.engine)
query = select(select_clause).select_from(join_clause)
if self.where_clauses is not None:
query = query.where(and_(*self.where_clauses))
insert = temp_table.insert().from_select(temp_schema.col_names(), query)
start = time.time()
print('executing join into temp table')
self.engine.execute(insert)
joined = time.time()
print('loading rows from temp table')
df = fast_sql_to_df(temp_table, temp_schema)
loaded = time.time()
finally:
temp_table.drop(self.engine)
print('type checking and sorting')
print('took', joined - start, 'seconds to perform the join')
print('took', loaded - joined, 'seconds to load the results')
return df
class SAQueryDataStore(DataStore):
def __init__(self, schema, engine, query):
self.engine = engine
self.query = query
self.schema = schema
def _load(self):
df = pandas.read_sql(self.query, self.engine)
return df
| true | true |
f71fb3203ce46c39849c5a3bac229726738a23a1 | 4,139 | py | Python | src/core/network/llnms-scan-network.py | marvins/LLNMS | ebc15418e1a5dddafdb3e55cea4e8cb71f619b2d | [
"MIT"
] | null | null | null | src/core/network/llnms-scan-network.py | marvins/LLNMS | ebc15418e1a5dddafdb3e55cea4e8cb71f619b2d | [
"MIT"
] | null | null | null | src/core/network/llnms-scan-network.py | marvins/LLNMS | ebc15418e1a5dddafdb3e55cea4e8cb71f619b2d | [
"MIT"
] | 1 | 2020-12-16T09:28:26.000Z | 2020-12-16T09:28:26.000Z | #!/usr/bin/env python
#
# File: llnms-scan-network.py
# Author: Marvin Smith
# Date: 6/13/2015
#
# Purpose: Scan LLNMS networks
#
__author__ = 'Marvin Smith'
# Python Libraries
import argparse, os, sys
# LLNMS Libraries
if os.environ['LLNMS_HOME'] is not None:
sys.path.append(os.environ['LLNMS_HOME'] + '/lib')
import llnms
# --------------------------------------------- #
# - Parse Command-Line Arguments - #
# --------------------------------------------- #
def Parse_Command_Line():
# Create parser
parser = argparse.ArgumentParser( description='Scan an LLNMS network.' )
# Version
parser.add_argument('-v', '--version',
action='version',
version='%(prog)s ' + llnms.info.Get_Version_String(),
help='Print the version information.')
# Verbose Mode
parser.add_argument('--verbose',
dest='verbose_flag',
required=False,
default=False,
action='store_true',
help='Print with verbose output.')
# Quiet Mode
parser.add_argument('--quiet',
required=False,
default=False,
action='store_true',
help='Do not print output.')
# Network Name
parser.add_argument('-n', '--network',
required=True,
dest='network_input',
help='ID of the network to scan.')
# Scanner Name
parser.add_argument('-s', '--scanner',
required=True,
dest='scanner_input',
help='ID of the scanner to use.')
# Print only passes
parser.add_argument('-om', '--output-mode',
required=False,
dest='output_mode',
default=None,
help='Output mode. Supported options are xml and stdout. If xml provided, then user must provide filename.')
# Return the parser
return parser.parse_args()
# ---------------------------- #
# - Main Function - #
# ---------------------------- #
def Main():
# Grab LLNMS HOME
llnms_home=None
if os.environ['LLNMS_HOME'] is not None:
llnms_home=os.environ['LLNMS_HOME']
# Parse Command-Line Arguments
options = Parse_Command_Line()
# Load the network definition
network = llnms.Network.find_network( network_name=options.network_input,
llnms_home=llnms_home)
# Make sure we found a network
if network is None:
raise Exception('No network found matching name ' + options.network_input)
# Print the Network if Verbose
if options.verbose_flag is True:
print(network.To_Debug_String())
# Load the scanner definition
scanner = llnms.Scanner.find_scanner( scanner_id=options.scanner_input,
llnms_home=llnms_home )
# Make sure we found a scanner
if scanner is None:
raise Exception('No scanner found matching name ' + options.scanner_input)
# Print scanner if verbose
if options.verbose_flag is True:
print(scanner.To_Debug_String())
# Validate the scanner is registered within the network
if network.Has_Scanner( scanner_id=scanner.id ) is False:
raise Exception("Network does not have a scanner registered with id=" + scanner.id )
# Run scan on network
results = scanner.Run_Scan_Range(endpoint_list=network.Get_Network_Range(),
arg_list=network.Get_Scanner_Args(scanner.id),
num_threads=4)
# print results
addresses = network.Get_Network_Range()
for x in xrange(0, len(results)):
print(addresses[x] + ' - ' + str(results[x]))
# ----------------------------- #
# - Run Main Script - #
# ----------------------------- #
if __name__ == '__main__':
Main()
| 32.590551 | 132 | 0.530563 |
__author__ = 'Marvin Smith'
import argparse, os, sys
if os.environ['LLNMS_HOME'] is not None:
sys.path.append(os.environ['LLNMS_HOME'] + '/lib')
import llnms
def Parse_Command_Line():
parser = argparse.ArgumentParser( description='Scan an LLNMS network.' )
parser.add_argument('-v', '--version',
action='version',
version='%(prog)s ' + llnms.info.Get_Version_String(),
help='Print the version information.')
parser.add_argument('--verbose',
dest='verbose_flag',
required=False,
default=False,
action='store_true',
help='Print with verbose output.')
parser.add_argument('--quiet',
required=False,
default=False,
action='store_true',
help='Do not print output.')
parser.add_argument('-n', '--network',
required=True,
dest='network_input',
help='ID of the network to scan.')
parser.add_argument('-s', '--scanner',
required=True,
dest='scanner_input',
help='ID of the scanner to use.')
parser.add_argument('-om', '--output-mode',
required=False,
dest='output_mode',
default=None,
help='Output mode. Supported options are xml and stdout. If xml provided, then user must provide filename.')
return parser.parse_args()
def Main():
llnms_home=None
if os.environ['LLNMS_HOME'] is not None:
llnms_home=os.environ['LLNMS_HOME']
options = Parse_Command_Line()
network = llnms.Network.find_network( network_name=options.network_input,
llnms_home=llnms_home)
if network is None:
raise Exception('No network found matching name ' + options.network_input)
if options.verbose_flag is True:
print(network.To_Debug_String())
scanner = llnms.Scanner.find_scanner( scanner_id=options.scanner_input,
llnms_home=llnms_home )
if scanner is None:
raise Exception('No scanner found matching name ' + options.scanner_input)
if options.verbose_flag is True:
print(scanner.To_Debug_String())
if network.Has_Scanner( scanner_id=scanner.id ) is False:
raise Exception("Network does not have a scanner registered with id=" + scanner.id )
results = scanner.Run_Scan_Range(endpoint_list=network.Get_Network_Range(),
arg_list=network.Get_Scanner_Args(scanner.id),
num_threads=4)
addresses = network.Get_Network_Range()
for x in xrange(0, len(results)):
print(addresses[x] + ' - ' + str(results[x]))
if __name__ == '__main__':
Main()
| true | true |
f71fb3d05a7fffde16b9485af0a723ccfc10ba6f | 16,760 | py | Python | maven_proj_graph/pkg1/mvnsortmod1.py | lg-alabris/swagger-ui | fdb06ad6dc3dd9c416b08c8f7909c37cfcf1ece4 | [
"Apache-2.0"
] | null | null | null | maven_proj_graph/pkg1/mvnsortmod1.py | lg-alabris/swagger-ui | fdb06ad6dc3dd9c416b08c8f7909c37cfcf1ece4 | [
"Apache-2.0"
] | null | null | null | maven_proj_graph/pkg1/mvnsortmod1.py | lg-alabris/swagger-ui | fdb06ad6dc3dd9c416b08c8f7909c37cfcf1ece4 | [
"Apache-2.0"
] | null | null | null | '''
======================================================================
Created on Jan 14, 2018
PURPOSE: this module provides classes to read Maven projects from git or other repos
specifically intended to create the graph of multiple project dependencies
ROADMAP: TODO -
1. review how properties are distributed and could break things
2. review subproject dependencies on top level, are props declared?
2. review parent POM, are props declared?
3. are external property files used?
@author: Larry
======================================================================
'''
import os
import subprocess
#import json
#import xml.etree.ElementTree as ET
#import urllib2
#import csv
import xml.etree.cElementTree as ET
import re
import urllib.request
#=======================================================================
# static functions and constants
class Util(object):
mvn_pom_ns = {"mvn":"http://maven.apache.org/POM/4.0.0"}
def __init__(self):
pass
@staticmethod
def get_tag_value(name, section):
s = ('mvn:%s' % name)
elem = section.find(s, Util.mvn_pom_ns)
if elem ==None:
return''
return elem.text
@staticmethod
def get_path(dirs):
path = ''
for d in dirs:
path += d + '/'
return path[:len(path) -1]
# if hasattr(a, 'property'):
@staticmethod
def run_process_2(cmd_args):
#result = subprocess.run(['dir', '../*.*'], stdout=subprocess.PIPE)
#result = subprocess.run(['C:/apps/maven352/bin/mvn', 'help:effective-pom'], stdout=subprocess.PIPE)
result = subprocess.run(['cd', '..'], stdout=subprocess.PIPE, shell=True)
result = subprocess.run(cmd_args, stdout=subprocess.PIPE, shell=True)
print(result.stdout.decode('utf-8'))
@staticmethod
def run_process(cmd_args, args_in):
cmd = subprocess.Popen(cmd_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
if (args_in):
cmd.stdin.write(args_in.encode('utf-8'))
cmd.stdin.flush() # Must include this to ensure data is passed to child process
result = cmd.stdout.read()
print(args_in.encode('utf-8'))
print(result) #.stdout.decode('utf-8'))
'''
cmdline = ["cmd", "/q", "/k", "echo off"]
batch = b"""\
rem vsinstr -coverage helloclass.exe /exclude:std::*
vsperfcmd /start:coverage /output:run.coverage
helloclass
vsperfcmd /shutdown
exit
"""
'''
def test_map_update(self):
A = {'a':1, 'b':2, 'c': 3}
B = {'c':99, 'd':4, 'e':5}
A.update(B)
print(A)
#=======================================================================
# identifies Maven coordinates for a project or dependnecy
class MavenCoords(object):
def __init__(self, element, props):
if (not element):
self.groupid =''
self.artifactid = ''
self.version = ''
self.scope = ''
self.relative_path = ''
self.key =''
return
self.groupid = Util.get_tag_value('groupId', element)
self.artifactid = Util.get_tag_value('artifactId', element)
self.version = Util.get_tag_value('version', element)
self.relative_path = Util.get_tag_value('relativePath', element)
self.scope = Util.get_tag_value('scope', element)
self.refresh_key(props)
def refresh_key(self, props):
if (props and self.version in props):
self.version = props[self.version]
self.key = '%s|%s|%s' % (self.groupid, self.artifactid, self.version)
#=======================================================================
# a maven project POM complete with properties and dependencies
class MavenProject(object):
def __init__(self, pom_url, project_map):
#dirs = pom_url.split('/')
self.pom_url = pom_url;
self.project_map = project_map
self.pom_file = self.get_pom_file(self.pom_url)
self.name = Util.get_tag_value('name', self.pom_file)
self.packaging = Util.get_tag_value('packaging', self.pom_file)
self.init_from_parent()
self.properties.update(self.get_properties(self.pom_file))
self.coord = MavenCoords(self.pom_file, self.properties)
self.dependencies.update(self.get_dependencies(self.pom_file))
self.project_map[self.coord.key] = self
self.get_sub_modules(self.pom_file)
self.history = []
self.consumers = []
#if self.packaging =='pom':
# parent pom's will always be pre-existent to child pom's. they will be looked by coord key from
# the global graph / project list
def init_from_parent(self):
parent_section = self.pom_file.findall('mvn:parent', Util.mvn_pom_ns)
if (parent_section):
self.parent_coord = MavenCoords(parent_section[0], None)
parent = self.project_map[self.parent_coord.key]
if (parent):
self.properties = parent.properties.copy()
self.dependencies = parent.dependencies.copy()
else:
print('Error: POM {} has unresolved parent POM reference {}'.format(self.name, parent.key))
else:
self.dependencies = {}
self.properties = {}
self.coord = MavenCoords(None, None)
dirs = self.pom_url.split('/')
print(dirs)
print (Util.get_path(dirs))
def get_sub_modules(self, pom_file):
section = pom_file.findall('mvn:modules', Util.mvn_pom_ns)
self.modules = {}
if (not section):
return
for elem in section[0].findall('*'):
sub_proj = self.get_sub_module(elem.text)
self.modules[sub_proj.coord.key] = sub_proj
self.project_map[sub_proj.coord.key] = sub_proj
def get_sub_module(self, sub_dir):
dirs = self.pom_url.split('/')
x = len(dirs)
dirs[x-1] = 'pom.xml'
dirs.insert(x-1, sub_dir)
path = Util.get_path(dirs)
module = MavenProject(path, self.project_map)
return module
def get_properties(self, pom):
section = pom.findall('mvn:properties', Util.mvn_pom_ns)
props = {}
if (len(section)==0):
return props
for elem in section[0].findall('*'):
k = re.sub('{.*?}', '', elem.tag)
k = '${%s}' % k
props[k] = elem.text
return props
def get_dependencies(self, pom):
section = pom.findall('mvn:dependencies', Util.mvn_pom_ns)
deps_map = {}
if (len(section)==0):
return deps_map
for dep_section in section[0].findall('mvn:dependency', Util.mvn_pom_ns):
obj = MavenCoords(dep_section, self.properties)
deps_map[obj.key] = obj
return deps_map
@staticmethod
def get_pom_file(pomfile):
if pomfile.find("http://") >=0 or pomfile.find("https://") >=0:
opener = urllib.request.build_opener()
pom = ET.parse( opener.open(pomfile) ).getroot()
else:
pom = ET.parse(pomfile).getroot()
return pom
def logx(self, level):
print()
print('---------Maven Project---------')
#print('key: %s * Group: %s * Id: %s * Ver: %s' % (self.coord.key, self.coord.groupid, self.coord.artifactid, self.coord.version))
print('key: {0} * Name: {1} * Group: {2} * Id: {3} * Ver: {4}'.format(self.coord.key, self.name, self.coord.groupid, self.coord.artifactid, self.coord.version))
print()
if level ==0:
return
print(' dependencies')
for k, v in self.dependencies.items():
print(' key: %s * Group: %s * Id: %s * Ver: %s' % (k, v.groupid, v.artifactid, v.version))
print()
print(' properties: ', self.properties)
print (' consumers')
for proj in self.consumers:
print(' ', proj.coord.key)
class DAGerror(Exception):
def __init__(self, arg):
self.arg = arg
#=======================================================================
#
class MavenProjectGraph(object):
def __init__(self, pom_url_list):
self.pom_url_list = pom_url_list
self.proj_list = []
self.proj_map = {}
#self.validation = {}
def generate_pom_list(self):
for pom_url in self.pom_url_list:
MavenProject(pom_url, self.proj_map)
#self.proj_list.append(proj)
#self.proj_map[proj.coord.key] = proj
self.proj_list = list(self.proj_map.values())
for proj in self.proj_list:
proj.logx(1) #$$
print()
def set_options(self):
pass
# PURPOSE: sort the list in DAG dependency order and capture each project consumers
#
#
def resolve_graph(self):
self.resolve_dependencies()
self.resolve_consumers()
# PURPOSE: reorder the project list such that each projects dependencies appear before that project
#
# NOTE #1: iterate thru the list looking fwd in the list for each project's dependencies
# for each dependency found, move it behind that project
#
# NOTE #2: the DAG is complete when the list is scanned and no dependencies exist fwd of each project
#
# NOTE #3: a history of each dependency relocation is maintained for each project
# a circular reference will be detected if that
#
def resolve_dependencies(self):
try:
while True:
for p in self.proj_list:
print(p.name)
i = 0
#dependency_found = False
while i < len(self.proj_list):
dependency_found = False
proj_base = self.proj_list[i]
print('loop i={}, base={}'.format(i, proj_base.name))
j = i + 1
while j < len(self.proj_list):
print(' loop j {}'.format(j))
proj_scan = self.proj_list[j]
# a forward project dependency is found for the base project, move it behind the base project
if proj_scan.coord.key in proj_base.dependencies:
# dejavu - a repeated reorder indicates circular dependency
if proj_scan.coord.key in proj_base.history:
raise DAGerror("Error: base project - {} - encountered duplicate reorder for dependency - {} -".format
( proj_base.name, proj_scan.name))
# remove the fwd item first to avoid order issues
del self.proj_list[j] #self.proj_list.remove(j)
# insert behind the base project
self.proj_list.insert(i, proj_scan)
print(' reorded scan {} from j={} to i={}'.format( proj_scan.name, j, i))
for p in self.proj_list:
print(p.name)
proj_base.history.append(proj_scan.coord.key)
dependency_found = True
i = i -1
break
j =j+1 # while j
i=i+1 # while i
# repeat outer loop until nothing is reordered
if not dependency_found:
break
else:
i = 0
except DAGerror as e:
print(e)
# PURPOSE: for each project in the list, discover the set of consuming projects
#
# NOTE #1: call this method AFTER the dependency graph has been properly resolved
# consuming projects will be forward in the list
#
def resolve_consumers(self):
for i in range(len(self.proj_list)):
proj_base = self.proj_list[i]
j = i
while j < len(self.proj_list)-1:
j = j+1
proj_scan = self.proj_list[j]
if (proj_base.coord.key in proj_scan.dependencies):
proj_base.consumers.append(proj_scan)
def list_projects(self):
for proj in self.proj_list:
proj.logx(1)
#==========================================================================
def main():
pom_files = ['D:\\devspaces\\wks4\\py1\\snipits2.xml',
'https://raw.githubusercontent.com/LeonardoZ/java-concurrency-patterns/master/pom.xml']
pom_files = ['D:\\devspaces\\wks4\\py1\\pom-A.xml',
'D:\\devspaces\\wks4\\py1\\pom-B.xml',
'D:\\devspaces\\wks4\\py1\\pom-C.xml',
'D:\\devspaces\\wks4\\py1\\pom-D.xml',
]
pom_files = ['C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-A.xml',
'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-B.xml',
'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-C.xml',
'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-D.xml',
]
# C:\Users\Larry\Dropbox\gitcode\gh\maven_proj_graph
s = ['dir', '*']
s = ['C:/apps/maven352/bin/mvn', 'help:effective-pom']
s2 = ['C:\\apps\\maven352\\bin\\mvn', 'help:effective-pom']
#Util.run_process(['cd', '..'], 'C:\\apps\\maven352\\bin\\mvn help:effective-pom')
#Util.run_process('C:\\apps\\maven352\\bin\\mvn help:effective-pom', '')
#Util.test_map_update(None)
#return()
graph = MavenProjectGraph(pom_files)
graph.generate_pom_list()
graph.resolve_graph()
graph.list_projects()
#==========================================================================
# see this article for opening remote xml files
# https://stackoverflow.com/questions/28238713/python-xml-parsing-lxml-urllib-request
def main2():
cwd = os.getcwd()
cwd = 'D:\\devspaces\\wks4\\py1\\'
pom_file = cwd + 'snipits2.xml'
pom_file = 'D:\\devspaces\\wks4\\py1\\snipits2.xml'
pom = ET.parse(pom_file).getroot()
# https://github.com/LeonardoZ/java-concurrency-patterns.git
# this is the correct patttern for reading single files from github
# https://raw.githubusercontent.com/user/repository/branch/filename
# this is the web page containing the file
# 'https://github.com/LeonardoZ/java-concurrency-patterns/blob/master/pom.xml'
pom_file_url = 'https://raw.githubusercontent.com/LeonardoZ/java-concurrency-patterns/master/pom.xml'
opener = urllib.request.build_opener()
f = opener.open(pom_file_url)
# ng, file=urllib.urlopen(file=urllib.urlopen())
#parser = ET.HTMLParser()
#with urlopen('https://pypi.python.org/simple') as f:
#tree = ET.parse(f, parser)
#pom_file = urllib.request.urlopen(pom_file)
pom = ET.parse(opener.open(pom_file_url)).getroot()
project = MavenProject(pom)
project.logx()
if __name__ == '__main__':
main()
#main()
'''
=====================================================================
notes:
alternatives - use maven to get equiv pom
> mvn help:effective-pom
https://stackoverflow.com/questions/4760215/running-shell-command-from-python-and-capturing-the-output
'''
| 36.514161 | 178 | 0.515752 | import os
import subprocess
import xml.etree.cElementTree as ET
import re
import urllib.request
class Util(object):
mvn_pom_ns = {"mvn":"http://maven.apache.org/POM/4.0.0"}
def __init__(self):
pass
@staticmethod
def get_tag_value(name, section):
s = ('mvn:%s' % name)
elem = section.find(s, Util.mvn_pom_ns)
if elem ==None:
return''
return elem.text
@staticmethod
def get_path(dirs):
path = ''
for d in dirs:
path += d + '/'
return path[:len(path) -1]
@staticmethod
def run_process_2(cmd_args):
result = subprocess.run(['cd', '..'], stdout=subprocess.PIPE, shell=True)
result = subprocess.run(cmd_args, stdout=subprocess.PIPE, shell=True)
print(result.stdout.decode('utf-8'))
@staticmethod
def run_process(cmd_args, args_in):
cmd = subprocess.Popen(cmd_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
if (args_in):
cmd.stdin.write(args_in.encode('utf-8'))
cmd.stdin.flush()
result = cmd.stdout.read()
print(args_in.encode('utf-8'))
print(result)
def test_map_update(self):
A = {'a':1, 'b':2, 'c': 3}
B = {'c':99, 'd':4, 'e':5}
A.update(B)
print(A)
class MavenCoords(object):
def __init__(self, element, props):
if (not element):
self.groupid =''
self.artifactid = ''
self.version = ''
self.scope = ''
self.relative_path = ''
self.key =''
return
self.groupid = Util.get_tag_value('groupId', element)
self.artifactid = Util.get_tag_value('artifactId', element)
self.version = Util.get_tag_value('version', element)
self.relative_path = Util.get_tag_value('relativePath', element)
self.scope = Util.get_tag_value('scope', element)
self.refresh_key(props)
def refresh_key(self, props):
if (props and self.version in props):
self.version = props[self.version]
self.key = '%s|%s|%s' % (self.groupid, self.artifactid, self.version)
class MavenProject(object):
def __init__(self, pom_url, project_map):
self.pom_url = pom_url;
self.project_map = project_map
self.pom_file = self.get_pom_file(self.pom_url)
self.name = Util.get_tag_value('name', self.pom_file)
self.packaging = Util.get_tag_value('packaging', self.pom_file)
self.init_from_parent()
self.properties.update(self.get_properties(self.pom_file))
self.coord = MavenCoords(self.pom_file, self.properties)
self.dependencies.update(self.get_dependencies(self.pom_file))
self.project_map[self.coord.key] = self
self.get_sub_modules(self.pom_file)
self.history = []
self.consumers = []
def init_from_parent(self):
parent_section = self.pom_file.findall('mvn:parent', Util.mvn_pom_ns)
if (parent_section):
self.parent_coord = MavenCoords(parent_section[0], None)
parent = self.project_map[self.parent_coord.key]
if (parent):
self.properties = parent.properties.copy()
self.dependencies = parent.dependencies.copy()
else:
print('Error: POM {} has unresolved parent POM reference {}'.format(self.name, parent.key))
else:
self.dependencies = {}
self.properties = {}
self.coord = MavenCoords(None, None)
dirs = self.pom_url.split('/')
print(dirs)
print (Util.get_path(dirs))
def get_sub_modules(self, pom_file):
section = pom_file.findall('mvn:modules', Util.mvn_pom_ns)
self.modules = {}
if (not section):
return
for elem in section[0].findall('*'):
sub_proj = self.get_sub_module(elem.text)
self.modules[sub_proj.coord.key] = sub_proj
self.project_map[sub_proj.coord.key] = sub_proj
def get_sub_module(self, sub_dir):
dirs = self.pom_url.split('/')
x = len(dirs)
dirs[x-1] = 'pom.xml'
dirs.insert(x-1, sub_dir)
path = Util.get_path(dirs)
module = MavenProject(path, self.project_map)
return module
def get_properties(self, pom):
section = pom.findall('mvn:properties', Util.mvn_pom_ns)
props = {}
if (len(section)==0):
return props
for elem in section[0].findall('*'):
k = re.sub('{.*?}', '', elem.tag)
k = '${%s}' % k
props[k] = elem.text
return props
def get_dependencies(self, pom):
section = pom.findall('mvn:dependencies', Util.mvn_pom_ns)
deps_map = {}
if (len(section)==0):
return deps_map
for dep_section in section[0].findall('mvn:dependency', Util.mvn_pom_ns):
obj = MavenCoords(dep_section, self.properties)
deps_map[obj.key] = obj
return deps_map
@staticmethod
def get_pom_file(pomfile):
if pomfile.find("http://") >=0 or pomfile.find("https://") >=0:
opener = urllib.request.build_opener()
pom = ET.parse( opener.open(pomfile) ).getroot()
else:
pom = ET.parse(pomfile).getroot()
return pom
def logx(self, level):
print()
print('---------Maven Project---------')
print('key: {0} * Name: {1} * Group: {2} * Id: {3} * Ver: {4}'.format(self.coord.key, self.name, self.coord.groupid, self.coord.artifactid, self.coord.version))
print()
if level ==0:
return
print(' dependencies')
for k, v in self.dependencies.items():
print(' key: %s * Group: %s * Id: %s * Ver: %s' % (k, v.groupid, v.artifactid, v.version))
print()
print(' properties: ', self.properties)
print (' consumers')
for proj in self.consumers:
print(' ', proj.coord.key)
class DAGerror(Exception):
def __init__(self, arg):
self.arg = arg
class MavenProjectGraph(object):
def __init__(self, pom_url_list):
self.pom_url_list = pom_url_list
self.proj_list = []
self.proj_map = {}
def generate_pom_list(self):
for pom_url in self.pom_url_list:
MavenProject(pom_url, self.proj_map)
self.proj_list = list(self.proj_map.values())
for proj in self.proj_list:
proj.logx(1)
print()
def set_options(self):
pass
def resolve_graph(self):
self.resolve_dependencies()
self.resolve_consumers()
# NOTE #2: the DAG is complete when the list is scanned and no dependencies exist fwd of each project
#
# NOTE #3: a history of each dependency relocation is maintained for each project
# a circular reference will be detected if that
#
def resolve_dependencies(self):
try:
while True:
for p in self.proj_list:
print(p.name)
i = 0
#dependency_found = False
while i < len(self.proj_list):
dependency_found = False
proj_base = self.proj_list[i]
print('loop i={}, base={}'.format(i, proj_base.name))
j = i + 1
while j < len(self.proj_list):
print(' loop j {}'.format(j))
proj_scan = self.proj_list[j]
# a forward project dependency is found for the base project, move it behind the base project
if proj_scan.coord.key in proj_base.dependencies:
# dejavu - a repeated reorder indicates circular dependency
if proj_scan.coord.key in proj_base.history:
raise DAGerror("Error: base project - {} - encountered duplicate reorder for dependency - {} -".format
( proj_base.name, proj_scan.name))
# remove the fwd item first to avoid order issues
del self.proj_list[j] #self.proj_list.remove(j)
# insert behind the base project
self.proj_list.insert(i, proj_scan)
print(' reorded scan {} from j={} to i={}'.format( proj_scan.name, j, i))
for p in self.proj_list:
print(p.name)
proj_base.history.append(proj_scan.coord.key)
dependency_found = True
i = i -1
break
j =j+1 # while j
i=i+1 # while i
# repeat outer loop until nothing is reordered
if not dependency_found:
break
else:
i = 0
except DAGerror as e:
print(e)
# PURPOSE: for each project in the list, discover the set of consuming projects
#
# NOTE #1: call this method AFTER the dependency graph has been properly resolved
# consuming projects will be forward in the list
#
def resolve_consumers(self):
for i in range(len(self.proj_list)):
proj_base = self.proj_list[i]
j = i
while j < len(self.proj_list)-1:
j = j+1
proj_scan = self.proj_list[j]
if (proj_base.coord.key in proj_scan.dependencies):
proj_base.consumers.append(proj_scan)
def list_projects(self):
for proj in self.proj_list:
proj.logx(1)
#==========================================================================
def main():
pom_files = ['D:\\devspaces\\wks4\\py1\\snipits2.xml',
'https://raw.githubusercontent.com/LeonardoZ/java-concurrency-patterns/master/pom.xml']
pom_files = ['D:\\devspaces\\wks4\\py1\\pom-A.xml',
'D:\\devspaces\\wks4\\py1\\pom-B.xml',
'D:\\devspaces\\wks4\\py1\\pom-C.xml',
'D:\\devspaces\\wks4\\py1\\pom-D.xml',
]
pom_files = ['C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-A.xml',
'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-B.xml',
'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-C.xml',
'C:/Users/Larry/Dropbox/gitcode/gh/maven_proj_graph/pom-D.xml',
]
# C:\Users\Larry\Dropbox\gitcode\gh\maven_proj_graph
s = ['dir', '*']
s = ['C:/apps/maven352/bin/mvn', 'help:effective-pom']
s2 = ['C:\\apps\\maven352\\bin\\mvn', 'help:effective-pom']
#Util.run_process(['cd', '..'], 'C:\\apps\\maven352\\bin\\mvn help:effective-pom')
#Util.run_process('C:\\apps\\maven352\\bin\\mvn help:effective-pom', '')
#Util.test_map_update(None)
#return()
graph = MavenProjectGraph(pom_files)
graph.generate_pom_list()
graph.resolve_graph()
graph.list_projects()
#==========================================================================
# see this article for opening remote xml files
# https://stackoverflow.com/questions/28238713/python-xml-parsing-lxml-urllib-request
def main2():
cwd = os.getcwd()
cwd = 'D:\\devspaces\\wks4\\py1\\'
pom_file = cwd + 'snipits2.xml'
pom_file = 'D:\\devspaces\\wks4\\py1\\snipits2.xml'
pom = ET.parse(pom_file).getroot()
# https://github.com/LeonardoZ/java-concurrency-patterns.git
# this is the correct patttern for reading single files from github
# https://raw.githubusercontent.com/user/repository/branch/filename
# this is the web page containing the file
# 'https://github.com/LeonardoZ/java-concurrency-patterns/blob/master/pom.xml'
pom_file_url = 'https://raw.githubusercontent.com/LeonardoZ/java-concurrency-patterns/master/pom.xml'
opener = urllib.request.build_opener()
f = opener.open(pom_file_url)
# ng, file=urllib.urlopen(file=urllib.urlopen())
#parser = ET.HTMLParser()
#with urlopen('https://pypi.python.org/simple') as f:
#tree = ET.parse(f, parser)
#pom_file = urllib.request.urlopen(pom_file)
pom = ET.parse(opener.open(pom_file_url)).getroot()
project = MavenProject(pom)
project.logx()
if __name__ == '__main__':
main()
#main()
| true | true |
f71fb4b548decad7d92f6c012d1d10217c8e029e | 2,063 | py | Python | Union_Find/1070.Accounts Merge/Solution.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 12 | 2019-05-04T04:21:27.000Z | 2022-03-02T07:06:57.000Z | Union_Find/1070.Accounts Merge/Solution.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 1 | 2019-07-24T18:43:53.000Z | 2019-07-24T18:43:53.000Z | Union_Find/1070.Accounts Merge/Solution.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 10 | 2019-07-01T04:03:04.000Z | 2022-03-09T03:57:37.000Z | from collections import defaultdict
class Solution:
"""
@param accounts: List[List[str]]
@return: return a List[List[str]]
"""
def accountsMerge(self, accounts):
# write your code here
merged = []
if not accounts or len(accounts) == 0:
return merged
self.forward_index = self.create_forward_index(accounts)
self.inverted_index = self.create_inverted_index(accounts)
self.parents = {i : i for i in range(len(accounts)) if len(accounts[i]) >= 1}
for email, people in self.inverted_index.items():
if len(people) > 1:
p1 = people[0]
for i in range(1, len(people)):
self.connect(p1, people[i])
curr = None
for people, email in self.forward_index.items():
if len(email) > 0:
curr = []
curr.append(accounts[people][0])
curr.extend(sorted(list(set(email))))
merged.append(curr)
return merged
def create_forward_index(self, accounts):
forward_index = defaultdict(list)
for idx, account in enumerate(accounts):
forward_index[idx].extend(account[1:])
return forward_index
def create_inverted_index(self, accounts):
inverted_index = defaultdict(list)
for idx, account in enumerate(accounts):
name = account[0]
for email in account[1:]:
inverted_index[email].append(idx)
return inverted_index
def connect(self, p1, p2):
parent1 = self.find(p1)
parent2 = self.find(p2)
if parent2 != parent1:
self.parents[parent1] = parent2
self.forward_index[parent2].extend(self.forward_index[parent1])
self.forward_index[parent1] = []
def find(self, p):
path = []
while p != self.parents[p]:
path.append(p)
p = self.parents[p]
for ppl in path:
self.parents[ppl] = p
return p | 27.506667 | 85 | 0.561318 | from collections import defaultdict
class Solution:
def accountsMerge(self, accounts):
merged = []
if not accounts or len(accounts) == 0:
return merged
self.forward_index = self.create_forward_index(accounts)
self.inverted_index = self.create_inverted_index(accounts)
self.parents = {i : i for i in range(len(accounts)) if len(accounts[i]) >= 1}
for email, people in self.inverted_index.items():
if len(people) > 1:
p1 = people[0]
for i in range(1, len(people)):
self.connect(p1, people[i])
curr = None
for people, email in self.forward_index.items():
if len(email) > 0:
curr = []
curr.append(accounts[people][0])
curr.extend(sorted(list(set(email))))
merged.append(curr)
return merged
def create_forward_index(self, accounts):
forward_index = defaultdict(list)
for idx, account in enumerate(accounts):
forward_index[idx].extend(account[1:])
return forward_index
def create_inverted_index(self, accounts):
inverted_index = defaultdict(list)
for idx, account in enumerate(accounts):
name = account[0]
for email in account[1:]:
inverted_index[email].append(idx)
return inverted_index
def connect(self, p1, p2):
parent1 = self.find(p1)
parent2 = self.find(p2)
if parent2 != parent1:
self.parents[parent1] = parent2
self.forward_index[parent2].extend(self.forward_index[parent1])
self.forward_index[parent1] = []
def find(self, p):
path = []
while p != self.parents[p]:
path.append(p)
p = self.parents[p]
for ppl in path:
self.parents[ppl] = p
return p | true | true |
f71fb575e9c0c22da60dd6194084df2483a9ba88 | 3,979 | py | Python | tests/test_detect_score.py | pgftennis/tennis_analysis_tool | 9f43545fa2b502930ec27a4de634ebc45e65cb19 | [
"MIT"
] | 1 | 2022-01-14T10:35:00.000Z | 2022-01-14T10:35:00.000Z | tests/test_detect_score.py | pgftennis/tennis_analysis_tool | 9f43545fa2b502930ec27a4de634ebc45e65cb19 | [
"MIT"
] | null | null | null | tests/test_detect_score.py | pgftennis/tennis_analysis_tool | 9f43545fa2b502930ec27a4de634ebc45e65cb19 | [
"MIT"
] | null | null | null | import unittest
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
sys.path.append(str(Path(__file__).parent.parent / "src/predict"))
import src.predict.detect_score as detect_score
class TestDetectScore(unittest.TestCase):
def setUp(self):#設定
self.ds=detect_score.DetectScore()
def test_fix_text(self):
#bugfix
text="3 6 10 6 3 4 15"#10を1 0 に分解したい
text=self.ds.fix_text(text)
self.assertEqual("3 6 1 0 6 3 4 15",text)
def test_fix_in_ad(self):
print("text_fix_in_ad")
text_array=['3','6','Ad']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '40','6','Ad'],text_array)
text_array=['3','Ad','6']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3','Ad','6', '40'],text_array)
text_array=['3', '6', '1', '6', '3', '4', 'Ad']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '6', '1', '40','6', '3', '4', 'Ad'],text_array)
text_array=['3', '6', '1', 'Ad','6', '3', '4']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '6', '1', 'Ad','6', '3', '4', '40'],text_array)
def test_text2score(self):
text="A 40"
set_num = self.ds.get_set_text_num(text)
self.assertEqual(2,set_num)
text="4 1 15\n6 1 15"
set_num = self.ds.get_set_text_num(text)
self.assertEqual(6,set_num)
text="1 15 \n0\n0"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("0",game_b)
self.assertEqual("15",score_a)
self.assertEqual("0",score_b)
text="1 A \n5\n40"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("5",game_b)
self.assertEqual("A",score_a)
self.assertEqual("40",score_b)
text="30 15"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("30",score_a)
self.assertEqual("15",score_b)
text="A 40"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("A",score_a)
self.assertEqual("40",score_b)
text="15 "
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("15",score_a)
self.assertEqual("",score_b)
text=""
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("",game_a)
self.assertEqual("",game_b)
self.assertEqual("",score_a)
self.assertEqual("",score_b)
text="4 1 15\n6 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="4 6 4 15\n6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="6 4 6 4 15\n4 6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="5 6 4 6 4 15\n7 4 6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
# if __name__ == "__main__":
# unittest.main()
| 32.08871 | 78 | 0.60191 | import unittest
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
sys.path.append(str(Path(__file__).parent.parent / "src/predict"))
import src.predict.detect_score as detect_score
class TestDetectScore(unittest.TestCase):
def setUp(self):
self.ds=detect_score.DetectScore()
def test_fix_text(self):
text="3 6 10 6 3 4 15"
text=self.ds.fix_text(text)
self.assertEqual("3 6 1 0 6 3 4 15",text)
def test_fix_in_ad(self):
print("text_fix_in_ad")
text_array=['3','6','Ad']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '40','6','Ad'],text_array)
text_array=['3','Ad','6']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3','Ad','6', '40'],text_array)
text_array=['3', '6', '1', '6', '3', '4', 'Ad']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '6', '1', '40','6', '3', '4', 'Ad'],text_array)
text_array=['3', '6', '1', 'Ad','6', '3', '4']
text_array=self.ds.fix_in_ad(text_array)
self.assertEqual(['3', '6', '1', 'Ad','6', '3', '4', '40'],text_array)
def test_text2score(self):
text="A 40"
set_num = self.ds.get_set_text_num(text)
self.assertEqual(2,set_num)
text="4 1 15\n6 1 15"
set_num = self.ds.get_set_text_num(text)
self.assertEqual(6,set_num)
text="1 15 \n0\n0"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("0",game_b)
self.assertEqual("15",score_a)
self.assertEqual("0",score_b)
text="1 A \n5\n40"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("5",game_b)
self.assertEqual("A",score_a)
self.assertEqual("40",score_b)
text="30 15"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("30",score_a)
self.assertEqual("15",score_b)
text="A 40"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("A",score_a)
self.assertEqual("40",score_b)
text="15 "
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("0",game_a)
self.assertEqual("0",game_b)
self.assertEqual("15",score_a)
self.assertEqual("",score_b)
text=""
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("",game_a)
self.assertEqual("",game_b)
self.assertEqual("",score_a)
self.assertEqual("",score_b)
text="4 1 15\n6 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("1",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="4 6 4 15\n6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="6 4 6 4 15\n4 6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
text="5 6 4 6 4 15\n7 4 6 2 2 30"
game_a,game_b,score_a,score_b=self.ds.text2score(text)
self.assertEqual("4",game_a)
self.assertEqual("15",score_a)
self.assertEqual("2",game_b)
self.assertEqual("30",score_b)
| true | true |
f71fb5d233f60d4940a1f40506e107449b9cb848 | 837 | py | Python | aql/connector.py | ryansb/aql | bc9f11aaf06caabe15981fb33b5ef37a60ce700a | [
"MIT"
] | 24 | 2020-07-16T11:47:28.000Z | 2021-12-02T20:38:52.000Z | aql/connector.py | ryansb/aql | bc9f11aaf06caabe15981fb33b5ef37a60ce700a | [
"MIT"
] | 37 | 2020-10-17T14:04:05.000Z | 2022-02-05T05:01:08.000Z | aql/connector.py | ryansb/aql | bc9f11aaf06caabe15981fb33b5ef37a60ce700a | [
"MIT"
] | 1 | 2019-10-26T03:45:16.000Z | 2019-10-26T03:45:16.000Z | # Copyright 2020 John Reese
# Licensed under the MIT license
import re
from typing import Any, Pattern, Union
from .engines.base import Connection
from .errors import InvalidURI
from .types import Location
_uri_regex: Pattern = re.compile(r"(?P<engine>\w+)://(?P<location>.+)")
def connect(location: Union[str, Location], *args: Any, **kwargs: Any) -> Connection:
"""Connect to the specified database."""
if isinstance(location, str):
match = _uri_regex.match(location)
if match:
engine, database = match.groups()
location = Location(engine, database=database)
else:
raise InvalidURI(f"Invalid database connection URI {location}")
connector, engine_kls = Connection.get_connector(location.engine)
return connector(engine_kls(), location, *args, **kwargs)
| 32.192308 | 85 | 0.688172 |
import re
from typing import Any, Pattern, Union
from .engines.base import Connection
from .errors import InvalidURI
from .types import Location
_uri_regex: Pattern = re.compile(r"(?P<engine>\w+)://(?P<location>.+)")
def connect(location: Union[str, Location], *args: Any, **kwargs: Any) -> Connection:
if isinstance(location, str):
match = _uri_regex.match(location)
if match:
engine, database = match.groups()
location = Location(engine, database=database)
else:
raise InvalidURI(f"Invalid database connection URI {location}")
connector, engine_kls = Connection.get_connector(location.engine)
return connector(engine_kls(), location, *args, **kwargs)
| true | true |
f71fb6028cbb2b09f79e06a91c06d14c015af377 | 2,712 | py | Python | utils.py | TNLC/pycalc | a60e996c5e4e1b6fdae5da124864cdf9a7178d19 | [
"Apache-2.0"
] | null | null | null | utils.py | TNLC/pycalc | a60e996c5e4e1b6fdae5da124864cdf9a7178d19 | [
"Apache-2.0"
] | null | null | null | utils.py | TNLC/pycalc | a60e996c5e4e1b6fdae5da124864cdf9a7178d19 | [
"Apache-2.0"
] | null | null | null | import math
from rply import LexerGenerator, ParserGenerator
def build_lexer():
# LEXERGENERATOR INSTANZIEREN
lexer_generator = LexerGenerator()
# WHITESPACES IGNORIEREN
lexer_generator.ignore(r'\s+')
# ZAHLEN ERKENNEN
# -? => ENTWEDER MINUS ODER NICHT
# \.? => ENTWEDER EIN PUNKT ODER NICHT
# [0-9]* BELIEBIG OFT 0-9 (MINDESTENS 0 MAL)
# [0-9]+ BELIEBIG OFT 0-9 (MINDESTENS 1 MAL)
# 'NUM' => NUMBER
lexer_generator.add('NUM', r'-?[0-9]*\.?[0-9]+')
# OPERATOREN
lexer_generator.add('ADD', r'\+') # 'ADD' => ADD
lexer_generator.add('SUB', r'-') # 'SUB' => SUBTRACT
lexer_generator.add('MUL', r'\*') # 'MUL' => MULTIPLY
lexer_generator.add('DIV', r'/') # 'DIV' => DIVIDE
lexer_generator.add('MOD', r'%') # 'MOD' => MODULO
lexer_generator.add('EXP', r'^|\*\*') # 'EXP' => EXPONENTIATE
lexer_generator.add('BR_O', r'\(') # 'BR_O' => BRACKET OPEN
lexer_generator.add('BR_C', r'\)') # 'BR_C' => BRACKET CLOSE
lexer_generator.add('ABS_P', r'\|') # 'ABS_P' => ABSOLUTE PART
# LEXER ERSTELLEN UND ZURÜCKGEBEN
return lexer_generator.build()
def build_parser():
# TOKENS FÜR PARSER FESTLEGEN
parser_generator = ParserGenerator([
'NUM',
'ADD', 'SUB', 'MUL', 'DIV', 'MOD', 'EXP',
'ABS_P',
'BR_O', 'BR_C'
])
# REGELN FÜR PARSER FESTLEGEN
@parser_generator.production('main : expr')
def main(x): return x[0]
@parser_generator.production('expr : factor')
def term_zahl(x): return x[0]
@parser_generator.production('expr : expr SUB factor')
def term_zahl(x): return x[0] - x[2]
@parser_generator.production('expr : expr ADD factor')
def term_zahl(x): return x[0] + x[2]
# STANDARD RECHENOPERATIONEN
@parser_generator.production('factor : term')
def term_zahl(x): return x[0]
@parser_generator.production('factor : factor EXP term')
def term_zahl(x): return x[0] ** x[2]
@parser_generator.production('factor : factor DIV term')
def term_zahl(x): return x[0] / x[2]
@parser_generator.production('factor : factor MOD term')
def term_zahl(x): return x[0] % x[2]
@parser_generator.production('factor : factor MUL term')
def term_zahl(x): return x[0] * x[2]
@parser_generator.production('term : NUM')
def term_zahl(x): return float(x[0].getstr())
# KLAMMERN
@parser_generator.production('term : BR_O expr BR_C')
def term_zahl(x): return x[1]
# BETRAG
@parser_generator.production('term : ABS_P expr ABS_P')
def term_zahl(x): return x[0] if x[0] >= 0 else x[0] * -1
return parser_generator.build()
lexer = build_lexer()
parser = build_parser()
| 32.674699 | 68 | 0.625369 | import math
from rply import LexerGenerator, ParserGenerator
def build_lexer():
lexer_generator = LexerGenerator()
lexer_generator.ignore(r'\s+')
lexer_generator.add('NUM', r'-?[0-9]*\.?[0-9]+')
lexer_generator.add('ADD', r'\+')
lexer_generator.add('SUB', r'-')
lexer_generator.add('MUL', r'\*')
lexer_generator.add('DIV', r'/')
lexer_generator.add('MOD', r'%')
lexer_generator.add('EXP', r'^|\*\*')
lexer_generator.add('BR_O', r'\(')
lexer_generator.add('BR_C', r'\)')
lexer_generator.add('ABS_P', r'\|')
return lexer_generator.build()
def build_parser():
parser_generator = ParserGenerator([
'NUM',
'ADD', 'SUB', 'MUL', 'DIV', 'MOD', 'EXP',
'ABS_P',
'BR_O', 'BR_C'
])
@parser_generator.production('main : expr')
def main(x): return x[0]
@parser_generator.production('expr : factor')
def term_zahl(x): return x[0]
@parser_generator.production('expr : expr SUB factor')
def term_zahl(x): return x[0] - x[2]
@parser_generator.production('expr : expr ADD factor')
def term_zahl(x): return x[0] + x[2]
@parser_generator.production('factor : term')
def term_zahl(x): return x[0]
@parser_generator.production('factor : factor EXP term')
def term_zahl(x): return x[0] ** x[2]
@parser_generator.production('factor : factor DIV term')
def term_zahl(x): return x[0] / x[2]
@parser_generator.production('factor : factor MOD term')
def term_zahl(x): return x[0] % x[2]
@parser_generator.production('factor : factor MUL term')
def term_zahl(x): return x[0] * x[2]
@parser_generator.production('term : NUM')
def term_zahl(x): return float(x[0].getstr())
@parser_generator.production('term : BR_O expr BR_C')
def term_zahl(x): return x[1]
@parser_generator.production('term : ABS_P expr ABS_P')
def term_zahl(x): return x[0] if x[0] >= 0 else x[0] * -1
return parser_generator.build()
lexer = build_lexer()
parser = build_parser()
| true | true |
f71fb63636601da0239ffd402fc4be7612c8b4ab | 4,587 | py | Python | ca_municipalities/people.py | dogooderapp/scrapers-ca | 5e852eea93f05f3f397eec318d3094a3b1b0b458 | [
"MIT"
] | 19 | 2015-05-26T03:18:50.000Z | 2022-01-31T03:27:41.000Z | ca_municipalities/people.py | dogooderapp/scrapers-ca | 5e852eea93f05f3f397eec318d3094a3b1b0b458 | [
"MIT"
] | 119 | 2015-01-09T06:09:35.000Z | 2022-01-20T23:05:05.000Z | ca_municipalities/people.py | dogooderapp/scrapers-ca | 5e852eea93f05f3f397eec318d3094a3b1b0b458 | [
"MIT"
] | 17 | 2015-11-23T05:00:10.000Z | 2021-09-15T16:03:33.000Z | from utils import CSVScraper, CanadianPerson as Person
from pupa.scrape import Organization, Post
from collections import defaultdict
import re
class CanadaMunicipalitiesPersonScraper(CSVScraper):
csv_url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vRrGXQy8qk16OhuTjlccoGB4jL5e8X1CEqRbg896ufLdh67DQk9nuGm-oufIT0HRMPEnwePw2HDx1Vj/pub?gid=0&single=true&output=csv'
encoding = 'utf-8'
"""
Returns whether the row should be imported.
"""
def is_valid_row(self, row):
return super().is_valid_row(row) and row['organization']
def scrape(self):
organizations = {}
seat_numbers = defaultdict(lambda: defaultdict(int))
reader = self.csv_reader(self.csv_url, delimiter=self.delimiter, header=True, encoding=self.encoding, skip_rows=self.skip_rows)
reader.fieldnames = [self.header_converter(field) for field in reader.fieldnames]
for row in reader:
try:
if self.is_valid_row(row):
for key, corrections in self.corrections.items():
if not isinstance(corrections, dict):
row[key] = corrections(row[key])
elif row[key] in corrections:
row[key] = corrections[row[key]]
organization_classification = 'legislature'
organization_name = row['organization']
organization_key = organization_name.lower()
if organization_key in organizations:
organization = organizations[organization_key]
else:
organization = Organization(organization_name, classification=organization_classification)
organization.add_source(self.csv_url)
yield organization
organizations[organization_key] = organization
if not row['primary role']:
row['primary role'] = 'Councillor'
role = row['primary role']
post = Post(role=role, label=organization_name, organization_id=organization._id)
yield post
name = row['name'].strip(' .,')
district = row['district name']
if self.many_posts_per_area and role not in self.unique_roles:
seat_numbers[role][district] += 1
district = '{} (seat {})'.format(district, seat_numbers[role][district])
p = Person(primary_org=organization_classification, name=name, district=district, role=role, party=row.get('party name'))
p.add_source(self.csv_url)
if row.get('gender'):
p.gender = row['gender']
if row.get('photo url'):
p.image = row['photo url']
if row.get('source url'):
p.add_source(row['source url'].strip(' .,'))
if row.get('website'):
p.add_link(row['website'], note='web site')
if row.get('facebook'):
p.add_link(re.sub(r'[#?].+', '', row['facebook']))
if row.get('twitter'):
p.add_link(row['twitter'])
if row['email']:
p.add_contact('email', row['email'].strip(' .,'))
if row['address']:
p.add_contact('address', row['address'], 'legislature')
if row.get('phone'):
p.add_contact('voice', row['phone'], 'legislature')
if row.get('fax'):
p.add_contact('fax', row['fax'], 'legislature')
if row.get('cell'):
p.add_contact('cell', row['cell'], 'legislature')
if row.get('birth date'):
p.birth_date = row['birth date']
if row.get('incumbent'):
p.extras['incumbent'] = row['incumbent']
if name in self.other_names:
for other_name in self.other_names[name]:
p.add_name(other_name)
# Validate person entity so that we can catch the exception if needed.
p.validate()
yield p
except Exception as e:
print(repr(e))
continue
| 42.869159 | 176 | 0.514716 | from utils import CSVScraper, CanadianPerson as Person
from pupa.scrape import Organization, Post
from collections import defaultdict
import re
class CanadaMunicipalitiesPersonScraper(CSVScraper):
csv_url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vRrGXQy8qk16OhuTjlccoGB4jL5e8X1CEqRbg896ufLdh67DQk9nuGm-oufIT0HRMPEnwePw2HDx1Vj/pub?gid=0&single=true&output=csv'
encoding = 'utf-8'
def is_valid_row(self, row):
return super().is_valid_row(row) and row['organization']
def scrape(self):
organizations = {}
seat_numbers = defaultdict(lambda: defaultdict(int))
reader = self.csv_reader(self.csv_url, delimiter=self.delimiter, header=True, encoding=self.encoding, skip_rows=self.skip_rows)
reader.fieldnames = [self.header_converter(field) for field in reader.fieldnames]
for row in reader:
try:
if self.is_valid_row(row):
for key, corrections in self.corrections.items():
if not isinstance(corrections, dict):
row[key] = corrections(row[key])
elif row[key] in corrections:
row[key] = corrections[row[key]]
organization_classification = 'legislature'
organization_name = row['organization']
organization_key = organization_name.lower()
if organization_key in organizations:
organization = organizations[organization_key]
else:
organization = Organization(organization_name, classification=organization_classification)
organization.add_source(self.csv_url)
yield organization
organizations[organization_key] = organization
if not row['primary role']:
row['primary role'] = 'Councillor'
role = row['primary role']
post = Post(role=role, label=organization_name, organization_id=organization._id)
yield post
name = row['name'].strip(' .,')
district = row['district name']
if self.many_posts_per_area and role not in self.unique_roles:
seat_numbers[role][district] += 1
district = '{} (seat {})'.format(district, seat_numbers[role][district])
p = Person(primary_org=organization_classification, name=name, district=district, role=role, party=row.get('party name'))
p.add_source(self.csv_url)
if row.get('gender'):
p.gender = row['gender']
if row.get('photo url'):
p.image = row['photo url']
if row.get('source url'):
p.add_source(row['source url'].strip(' .,'))
if row.get('website'):
p.add_link(row['website'], note='web site')
if row.get('facebook'):
p.add_link(re.sub(r'[#?].+', '', row['facebook']))
if row.get('twitter'):
p.add_link(row['twitter'])
if row['email']:
p.add_contact('email', row['email'].strip(' .,'))
if row['address']:
p.add_contact('address', row['address'], 'legislature')
if row.get('phone'):
p.add_contact('voice', row['phone'], 'legislature')
if row.get('fax'):
p.add_contact('fax', row['fax'], 'legislature')
if row.get('cell'):
p.add_contact('cell', row['cell'], 'legislature')
if row.get('birth date'):
p.birth_date = row['birth date']
if row.get('incumbent'):
p.extras['incumbent'] = row['incumbent']
if name in self.other_names:
for other_name in self.other_names[name]:
p.add_name(other_name)
p.validate()
yield p
except Exception as e:
print(repr(e))
continue
| true | true |
f71fb66f5197af2f7a2dd9fb62e51560772987ee | 398 | py | Python | tests/connection/test_cursor.py | coverwallet/pysoni | 49d3a8acb101436ad0724749572be2ad9d86f3ae | [
"MIT"
] | 5 | 2019-07-08T15:38:06.000Z | 2022-03-24T20:36:19.000Z | tests/connection/test_cursor.py | coverwallet/pysoni | 49d3a8acb101436ad0724749572be2ad9d86f3ae | [
"MIT"
] | 2 | 2019-07-07T23:26:32.000Z | 2020-06-04T07:43:24.000Z | tests/connection/test_cursor.py | coverwallet/pysoni | 49d3a8acb101436ad0724749572be2ad9d86f3ae | [
"MIT"
] | 1 | 2019-05-31T09:11:22.000Z | 2019-05-31T09:11:22.000Z | def test_cursor_triggers_cursor_in_the_connection(open_connection):
open_connection.cursor()
open_connection._connection_handler.cursor.assert_called_once()
def test_cursor_returns_a_cursor_in_the_handler(open_connection, mocker):
cursor_mock = mocker.Mock()
open_connection._connection_handler.cursor.return_value = cursor_mock
assert open_connection.cursor() == cursor_mock | 39.8 | 73 | 0.831658 | def test_cursor_triggers_cursor_in_the_connection(open_connection):
open_connection.cursor()
open_connection._connection_handler.cursor.assert_called_once()
def test_cursor_returns_a_cursor_in_the_handler(open_connection, mocker):
cursor_mock = mocker.Mock()
open_connection._connection_handler.cursor.return_value = cursor_mock
assert open_connection.cursor() == cursor_mock | true | true |
f71fb695ceaa12f53778fae43e8d0268e9cde5f9 | 3,235 | py | Python | server/config/settings.py | sudosubin/playground-gunicorn | 770b2db062446e47a92b37fd3488f0e657157293 | [
"MIT"
] | null | null | null | server/config/settings.py | sudosubin/playground-gunicorn | 770b2db062446e47a92b37fd3488f0e657157293 | [
"MIT"
] | null | null | null | server/config/settings.py | sudosubin/playground-gunicorn | 770b2db062446e47a92b37fd3488f0e657157293 | [
"MIT"
] | null | null | null | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 4.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-n1vl4be=11s&5oo0^453rw&9(g3v0pjb6=t4ze@d_3j4i3)y+y'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'server.config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'server.config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 26.08871 | 91 | 0.703246 |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-n1vl4be=11s&5oo0^453rw&9(g3v0pjb6=t4ze@d_3j4i3)y+y'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'server.config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'server.config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| true | true |
f71fb6ffafd818eedc205dc12f215cb79fa5ad0e | 680 | py | Python | sa/profiles/Vyatta/Vyatta/get_capabilities.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/profiles/Vyatta/Vyatta/get_capabilities.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/profiles/Vyatta/Vyatta/get_capabilities.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Vyatta.Vyatta.get_capabilities
# ---------------------------------------------------------------------
# Copyright (C) 2007-2015 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.sa.profiles.Generic.get_capabilities import Script as BaseScript
class Script(BaseScript):
name = "Vyatta.Vyatta.get_capabilities"
def has_lldp_cli(self):
"""
Check box has lldp enabled
"""
r = self.cli("show lldp neighbors")
return "LLDP not configured" not in r
| 30.909091 | 73 | 0.457353 |
from noc.sa.profiles.Generic.get_capabilities import Script as BaseScript
class Script(BaseScript):
name = "Vyatta.Vyatta.get_capabilities"
def has_lldp_cli(self):
r = self.cli("show lldp neighbors")
return "LLDP not configured" not in r
| true | true |
f71fb8631c4b145396e2bba66374e05637da08a5 | 130 | py | Python | db_utils.py | bizmarcin/thefridge | 13bde29a57aea09fecf5ec2f28ce013adf6c4d08 | [
"MIT"
] | 1 | 2019-07-01T13:04:02.000Z | 2019-07-01T13:04:02.000Z | db_utils.py | bizmarcin/thefridge | 13bde29a57aea09fecf5ec2f28ce013adf6c4d08 | [
"MIT"
] | 2 | 2020-07-17T09:05:59.000Z | 2021-05-09T06:42:06.000Z | db_utils.py | bizmarcin/thefridge | 13bde29a57aea09fecf5ec2f28ce013adf6c4d08 | [
"MIT"
] | null | null | null | import sqlite3
def get_connection():
conn = sqlite3.connect('fridge.db')
conn.row_factory = sqlite3.Row
return conn
| 16.25 | 39 | 0.7 | import sqlite3
def get_connection():
conn = sqlite3.connect('fridge.db')
conn.row_factory = sqlite3.Row
return conn
| true | true |
f71fb9c25aa2d31e0378e8ac7911871707f58f10 | 6,328 | py | Python | tests/components/switch/test_command_line.py | beschouten/home-assistant | f50c30bbbad4d92e342c8547630c63c0c7882803 | [
"MIT"
] | 1 | 2016-07-14T05:20:54.000Z | 2016-07-14T05:20:54.000Z | tests/components/switch/test_command_line.py | beschouten/home-assistant | f50c30bbbad4d92e342c8547630c63c0c7882803 | [
"MIT"
] | null | null | null | tests/components/switch/test_command_line.py | beschouten/home-assistant | f50c30bbbad4d92e342c8547630c63c0c7882803 | [
"MIT"
] | 1 | 2018-11-22T13:55:23.000Z | 2018-11-22T13:55:23.000Z | """the tests for the Command line switch platform."""
import json
import os
import tempfile
import unittest
from homeassistant.const import STATE_ON, STATE_OFF
import homeassistant.components.switch as switch
import homeassistant.components.switch.command_line as command_line
from tests.common import get_test_home_assistant
class TestCommandSwitch(unittest.TestCase):
"""Test the command switch."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_state_none(self):
"""Test with none state."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_value(self):
"""Test with state value."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
'value_template': '{{ value=="1" }}'
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_json_value(self):
"""Test with state JSON value."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
oncmd = json.dumps({'status': 'ok'})
offcmd = json.dumps({'status': 'nope'})
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo \'{}\' > {}'.format(oncmd, path),
'offcmd': 'echo \'{}\' > {}'.format(offcmd, path),
'value_template': '{{ value_json.status=="ok" }}'
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_code(self):
"""Test with state code."""
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
def test_assumed_state_should_be_true_if_command_state_is_false(self):
"""Test with state value."""
self.hass = get_test_home_assistant()
# Set state command to false
statecmd = False
no_state_device = command_line.CommandSwitch(self.hass, "Test", "echo",
"echo", statecmd, None)
self.assertTrue(no_state_device.assumed_state)
# Set state command
statecmd = 'cat {}'
state_device = command_line.CommandSwitch(self.hass, "Test", "echo",
"echo", statecmd, None)
self.assertFalse(state_device.assumed_state)
| 35.751412 | 79 | 0.533028 | import json
import os
import tempfile
import unittest
from homeassistant.const import STATE_ON, STATE_OFF
import homeassistant.components.switch as switch
import homeassistant.components.switch.command_line as command_line
from tests.common import get_test_home_assistant
class TestCommandSwitch(unittest.TestCase):
def setUp(self):
self.hass = get_test_home_assistant()
def tearDown(self):
self.hass.stop()
def test_state_none(self):
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_value(self):
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
'value_template': '{{ value=="1" }}'
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_json_value(self):
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
oncmd = json.dumps({'status': 'ok'})
offcmd = json.dumps({'status': 'nope'})
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo \'{}\' > {}'.format(oncmd, path),
'offcmd': 'echo \'{}\' > {}'.format(offcmd, path),
'value_template': '{{ value_json.status=="ok" }}'
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
def test_state_code(self):
with tempfile.TemporaryDirectory() as tempdirname:
path = os.path.join(tempdirname, 'switch_status')
test_switch = {
'statecmd': 'cat {}'.format(path),
'oncmd': 'echo 1 > {}'.format(path),
'offcmd': 'echo 0 > {}'.format(path),
}
self.assertTrue(switch.setup(self.hass, {
'switch': {
'platform': 'command_line',
'switches': {
'test': test_switch
}
}
}))
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_OFF, state.state)
switch.turn_on(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
switch.turn_off(self.hass, 'switch.test')
self.hass.pool.block_till_done()
state = self.hass.states.get('switch.test')
self.assertEqual(STATE_ON, state.state)
def test_assumed_state_should_be_true_if_command_state_is_false(self):
self.hass = get_test_home_assistant()
statecmd = False
no_state_device = command_line.CommandSwitch(self.hass, "Test", "echo",
"echo", statecmd, None)
self.assertTrue(no_state_device.assumed_state)
statecmd = 'cat {}'
state_device = command_line.CommandSwitch(self.hass, "Test", "echo",
"echo", statecmd, None)
self.assertFalse(state_device.assumed_state)
| true | true |
f71fb9e8ec48b2f9258997378e50488a34fc3cb5 | 2,033 | py | Python | scripts/utils/connection.py | CostaDiego/product-complaint-classification | 42d44210553577616dcf8ac4bf616b587fa02e8c | [
"MIT"
] | null | null | null | scripts/utils/connection.py | CostaDiego/product-complaint-classification | 42d44210553577616dcf8ac4bf616b587fa02e8c | [
"MIT"
] | null | null | null | scripts/utils/connection.py | CostaDiego/product-complaint-classification | 42d44210553577616dcf8ac4bf616b587fa02e8c | [
"MIT"
] | null | null | null | import psycopg2
from getpass import getpass
class DatabaseConection(object):
"""
"""
def __init__(self, host: str, database: str, user: str):
self._con = None
self._host = host
self._database = database
self._user = user
self.connected = False
try:
self._con = psycopg2.connect(
host= self._host,
database= self._database,
user= self._user,
password = getpass(
prompt= 'Input the password:\n',
stream= None
))
self.connected = True
print('\tConnection established!')
except:
print('\tFailed to establish connection!')
def send(self, sql: str):
try:
cursor = self._con.cursor()
cursor.execute(str(sql))
self._con.commit()
return True
except:
return False
def request(self, sql: str):
try:
cursor = self._con.cursor()
cursor.execute(str(sql))
request = cursor.fetchall()
return request
except:
return None
def closeConnection(self):
self._con.close()
self.connected = False
def connect(self, host = None, database = None, user = None):
if host:
self._host = host
if database:
self._database = database
if user:
self._user = user
try:
self._con = psycopg2.connect(
host= self._host,
database= self._database,
user= self._user,
password = getpass(
prompt= 'Input the password:\n',
stream= None
))
self.connected = True
print('\tConnection established!')
except:
self.connected = False
print('\tFailed to establish connection!') | 26.064103 | 65 | 0.484506 | import psycopg2
from getpass import getpass
class DatabaseConection(object):
def __init__(self, host: str, database: str, user: str):
self._con = None
self._host = host
self._database = database
self._user = user
self.connected = False
try:
self._con = psycopg2.connect(
host= self._host,
database= self._database,
user= self._user,
password = getpass(
prompt= 'Input the password:\n',
stream= None
))
self.connected = True
print('\tConnection established!')
except:
print('\tFailed to establish connection!')
def send(self, sql: str):
try:
cursor = self._con.cursor()
cursor.execute(str(sql))
self._con.commit()
return True
except:
return False
def request(self, sql: str):
try:
cursor = self._con.cursor()
cursor.execute(str(sql))
request = cursor.fetchall()
return request
except:
return None
def closeConnection(self):
self._con.close()
self.connected = False
def connect(self, host = None, database = None, user = None):
if host:
self._host = host
if database:
self._database = database
if user:
self._user = user
try:
self._con = psycopg2.connect(
host= self._host,
database= self._database,
user= self._user,
password = getpass(
prompt= 'Input the password:\n',
stream= None
))
self.connected = True
print('\tConnection established!')
except:
self.connected = False
print('\tFailed to establish connection!') | true | true |
f71fbb8b5e0dc57ce6f0bd235b70ff2b45cd4410 | 4,913 | py | Python | ilustrado/util.py | ml-evs/ilustrado | 3121ecaff9cb517f3946b2283bf50dce499caad9 | [
"MIT"
] | 3 | 2019-10-31T20:54:55.000Z | 2022-01-05T16:39:43.000Z | ilustrado/util.py | ml-evs/ilustrado | 3121ecaff9cb517f3946b2283bf50dce499caad9 | [
"MIT"
] | null | null | null | ilustrado/util.py | ml-evs/ilustrado | 3121ecaff9cb517f3946b2283bf50dce499caad9 | [
"MIT"
] | 2 | 2019-11-29T11:34:11.000Z | 2020-08-12T12:31:48.000Z | # coding: utf-8
""" Catch-all file for utility functions.
"""
import sys
import logging
import numpy as np
from matador.compute import ComputeTask
from matador.utils.cell_utils import cart2frac, cart2abc
LOG = logging.getLogger("ilustrado")
LOG.setLevel(logging.DEBUG)
def strip_useless(doc, to_run=False):
""" Strip useless information from a matador doc.
Parameters:
doc (dict): structure to strip information from.
Arguments:
to_run (bool): whether the structure needs to be rerun,
i.e. whether to delete data from previous run.
Returns:
dict: matador document stripped of useless keys
"""
stripped_doc = dict()
if to_run:
keys = [
"source",
"parents",
"mutations",
"elems",
"stoichiometry",
"lattice_abc",
"lattice_cart",
"positions_frac",
"num_atoms",
"atom_types",
]
else:
keys = [
"source",
"parents",
"mutations",
"elems",
"stoichiometry",
"lattice_abc",
"lattice_cart",
"cell_volume",
"space_group",
"positions_frac",
"num_atoms",
"atom_types",
"enthalpy",
"enthalpy_per_atom",
"total_energy",
"total_energy_per_atom",
"pressure",
"max_force_on_atom",
"optimised",
"date",
"total_time_hrs",
"peak_mem_MB",
]
for key in keys:
if key in doc:
stripped_doc[key] = doc[key]
if isinstance(doc[key], np.ndarray):
stripped_doc[key] = doc[key].tolist()
return stripped_doc
class FakeComputeTask(ComputeTask):
""" Fake Relaxer for testing, with same parameters as the real one
from matador.compute.
"""
def __init__(self, *args, **kwargs):
self.structure = kwargs["res"]
self.output_queue = kwargs["output_queue"]
def relax(self):
fake_number_crunch = True
if fake_number_crunch:
size = np.random.randint(low=3, high=50)
array = np.random.rand(size, size)
np.linalg.eig(array)
self.structure["enthalpy_per_atom"] = -505 + np.random.rand()
self.structure["enthalpy"] = self.structure["enthalpy_per_atom"] * self.structure["num_atoms"]
if np.random.rand() < 0.8:
self.structure["optimised"] = True
else:
self.structure["optimised"] = False
self.output_queue.put(self.structure)
class NewbornProcess:
""" Simple container of process data. """
def __init__(self, newborn_id, node, process, ncores=None):
self.newborn_id = newborn_id
self.node = node
self.process = process
self.ncores = ncores
class AseRelaxation:
""" Perform a variable cell relaxation with ASE,
using a predefined calculator.
"""
def __init__(self, doc, queue, calculator=None):
""" Initialise a relaxation with ASE.
Parameters:
doc (dict): the structure to optimise.
queue (mp.Queue): the queue to push the result to.
Keyword arguments:
calculator (ase.Calculator): the calculator object
to use for force/energy computation. Default is
LennardJones.
"""
from copy import deepcopy
from matador.utils.viz_utils import doc2ase
from ase.constraints import UnitCellFilter
if calculator is None:
from ase.calculators.lj import LennardJones
self.calc = LennardJones()
else:
self.calc = calculator
self.doc = deepcopy(doc)
self.atoms = doc2ase(doc)
self.atoms.set_calculator(self.calc)
self.ucf = UnitCellFilter(self.atoms)
self.queue = queue
def relax(self):
from ase.optimize import LBFGS
cached = sys.__stdout__
try:
optimizer = LBFGS(self.ucf)
optimizer.logfile = None
optimised = optimizer.run(fmax=0.05, steps=100)
except Exception:
optimised = False
self.doc["optimised"] = bool(optimised)
self.doc["positions_abs"] = self.atoms.get_positions().tolist()
self.doc["lattice_cart"] = self.atoms.get_cell().tolist()
self.doc["lattice_abc"] = cart2abc(self.doc["lattice_cart"])
self.doc["positions_frac"] = cart2frac(self.doc["lattice_cart"], self.doc["positions_abs"])
self.doc["enthalpy_per_atom"] = float(self.calc.results["energy"] / len(
self.doc["atom_types"]
))
self.doc["enthalpy"] = float(self.calc.results["energy"])
self.queue.put(self.doc)
sys.stdout = cached
| 28.730994 | 102 | 0.574802 |
import sys
import logging
import numpy as np
from matador.compute import ComputeTask
from matador.utils.cell_utils import cart2frac, cart2abc
LOG = logging.getLogger("ilustrado")
LOG.setLevel(logging.DEBUG)
def strip_useless(doc, to_run=False):
stripped_doc = dict()
if to_run:
keys = [
"source",
"parents",
"mutations",
"elems",
"stoichiometry",
"lattice_abc",
"lattice_cart",
"positions_frac",
"num_atoms",
"atom_types",
]
else:
keys = [
"source",
"parents",
"mutations",
"elems",
"stoichiometry",
"lattice_abc",
"lattice_cart",
"cell_volume",
"space_group",
"positions_frac",
"num_atoms",
"atom_types",
"enthalpy",
"enthalpy_per_atom",
"total_energy",
"total_energy_per_atom",
"pressure",
"max_force_on_atom",
"optimised",
"date",
"total_time_hrs",
"peak_mem_MB",
]
for key in keys:
if key in doc:
stripped_doc[key] = doc[key]
if isinstance(doc[key], np.ndarray):
stripped_doc[key] = doc[key].tolist()
return stripped_doc
class FakeComputeTask(ComputeTask):
def __init__(self, *args, **kwargs):
self.structure = kwargs["res"]
self.output_queue = kwargs["output_queue"]
def relax(self):
fake_number_crunch = True
if fake_number_crunch:
size = np.random.randint(low=3, high=50)
array = np.random.rand(size, size)
np.linalg.eig(array)
self.structure["enthalpy_per_atom"] = -505 + np.random.rand()
self.structure["enthalpy"] = self.structure["enthalpy_per_atom"] * self.structure["num_atoms"]
if np.random.rand() < 0.8:
self.structure["optimised"] = True
else:
self.structure["optimised"] = False
self.output_queue.put(self.structure)
class NewbornProcess:
def __init__(self, newborn_id, node, process, ncores=None):
self.newborn_id = newborn_id
self.node = node
self.process = process
self.ncores = ncores
class AseRelaxation:
def __init__(self, doc, queue, calculator=None):
from copy import deepcopy
from matador.utils.viz_utils import doc2ase
from ase.constraints import UnitCellFilter
if calculator is None:
from ase.calculators.lj import LennardJones
self.calc = LennardJones()
else:
self.calc = calculator
self.doc = deepcopy(doc)
self.atoms = doc2ase(doc)
self.atoms.set_calculator(self.calc)
self.ucf = UnitCellFilter(self.atoms)
self.queue = queue
def relax(self):
from ase.optimize import LBFGS
cached = sys.__stdout__
try:
optimizer = LBFGS(self.ucf)
optimizer.logfile = None
optimised = optimizer.run(fmax=0.05, steps=100)
except Exception:
optimised = False
self.doc["optimised"] = bool(optimised)
self.doc["positions_abs"] = self.atoms.get_positions().tolist()
self.doc["lattice_cart"] = self.atoms.get_cell().tolist()
self.doc["lattice_abc"] = cart2abc(self.doc["lattice_cart"])
self.doc["positions_frac"] = cart2frac(self.doc["lattice_cart"], self.doc["positions_abs"])
self.doc["enthalpy_per_atom"] = float(self.calc.results["energy"] / len(
self.doc["atom_types"]
))
self.doc["enthalpy"] = float(self.calc.results["energy"])
self.queue.put(self.doc)
sys.stdout = cached
| true | true |
f71fbc58ba45ce332cf36e06106fa51f669a4b79 | 16,543 | py | Python | services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/base_scheduler.py | mguidon/osparc-simcore | 77e64777728f20a5b21362372aefa0e0db5072cd | [
"MIT"
] | null | null | null | services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/base_scheduler.py | mguidon/osparc-simcore | 77e64777728f20a5b21362372aefa0e0db5072cd | [
"MIT"
] | 29 | 2018-11-13T09:39:29.000Z | 2022-03-22T10:11:32.000Z | services/director-v2/src/simcore_service_director_v2/modules/comp_scheduler/base_scheduler.py | mguidon/osparc-simcore | 77e64777728f20a5b21362372aefa0e0db5072cd | [
"MIT"
] | null | null | null | """The scheduler shall be run as a background task.
Based on oSparc pipelines, it monitors when to start the next celery task(s), either one at a time or as a group of tasks.
In principle the Scheduler maintains the comp_runs table in the database.
It contains how the pipeline was run and by whom.
It also contains the final result of the pipeline run.
When a pipeline is scheduled first all the tasks contained in the DAG are set to PUBLISHED state.
Once the scheduler determines a task shall run, its state is set to PENDING, so that the sidecar can pick up the task.
The sidecar will then change the state to STARTED, then to SUCCESS or FAILED.
"""
import asyncio
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Callable, Dict, List, Optional, Set, Tuple, cast
import networkx as nx
from aiopg.sa.engine import Engine
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
from models_library.projects_state import RunningState
from pydantic import PositiveInt
from ...core.errors import (
ComputationalBackendNotConnectedError,
InsuficientComputationalResourcesError,
InvalidPipelineError,
MissingComputationalResourcesError,
PipelineNotFoundError,
SchedulerError,
)
from ...models.domains.comp_pipelines import CompPipelineAtDB
from ...models.domains.comp_runs import CompRunsAtDB
from ...models.domains.comp_tasks import CompTaskAtDB, Image
from ...models.schemas.constants import ClusterID, UserID
from ...utils.computations import get_pipeline_state_from_task_states
from ...utils.scheduler import COMPLETED_STATES, Iteration, get_repository
from ..db.repositories.comp_pipelines import CompPipelinesRepository
from ..db.repositories.comp_runs import CompRunsRepository
from ..db.repositories.comp_tasks import CompTasksRepository
logger = logging.getLogger(__name__)
@dataclass
class ScheduledPipelineParams:
cluster_id: ClusterID
mark_for_cancellation: bool = False
@dataclass
class BaseCompScheduler(ABC):
scheduled_pipelines: Dict[
Tuple[UserID, ProjectID, Iteration], ScheduledPipelineParams
]
db_engine: Engine
wake_up_event: asyncio.Event = field(default_factory=asyncio.Event, init=False)
default_cluster_id: ClusterID
async def run_new_pipeline(
self, user_id: UserID, project_id: ProjectID, cluster_id: ClusterID
) -> None:
"""Sets a new pipeline to be scheduled on the computational resources.
Passing cluster_id=0 will use the default cluster. Passing an existing ID will instruct
the scheduler to run the tasks on the defined cluster"""
# ensure the pipeline exists and is populated with something
dag = await self._get_pipeline_dag(project_id)
if not dag:
logger.warning(
"project %s has no computational dag defined. not scheduled for a run."
)
return
runs_repo: CompRunsRepository = get_repository(
self.db_engine, CompRunsRepository
) # type: ignore
new_run: CompRunsAtDB = await runs_repo.create(
user_id=user_id,
project_id=project_id,
cluster_id=cluster_id,
default_cluster_id=self.default_cluster_id,
)
self.scheduled_pipelines[
(user_id, project_id, new_run.iteration)
] = ScheduledPipelineParams(cluster_id=cluster_id)
# ensure the scheduler starts right away
self._wake_up_scheduler_now()
async def stop_pipeline(
self, user_id: UserID, project_id: ProjectID, iteration: Optional[int] = None
) -> None:
if not iteration:
# if no iteration given find the latest one in the list
possible_iterations = {
it
for u_id, p_id, it in self.scheduled_pipelines
if u_id == user_id and p_id == project_id
}
if not possible_iterations:
raise SchedulerError(
f"There are no pipeline scheduled for {user_id}:{project_id}"
)
iteration = max(possible_iterations)
# mark the scheduled pipeline for stopping
self.scheduled_pipelines[
(user_id, project_id, iteration)
].mark_for_cancellation = True
# ensure the scheduler starts right away
self._wake_up_scheduler_now()
async def schedule_all_pipelines(self) -> None:
self.wake_up_event.clear()
# if one of the task throws, the other are NOT cancelled which is what we want
await asyncio.gather(
*[
self._schedule_pipeline(
user_id,
project_id,
pipeline_params.cluster_id,
iteration,
pipeline_params.mark_for_cancellation,
)
for (
user_id,
project_id,
iteration,
), pipeline_params in self.scheduled_pipelines.items()
]
)
async def reconnect_backend(self) -> None:
await self._reconnect_backend()
async def _get_pipeline_dag(self, project_id: ProjectID) -> nx.DiGraph:
comp_pipeline_repo: CompPipelinesRepository = get_repository(
self.db_engine, CompPipelinesRepository
) # type: ignore
pipeline_at_db: CompPipelineAtDB = await comp_pipeline_repo.get_pipeline(
project_id
)
pipeline_dag = pipeline_at_db.get_graph()
return pipeline_dag
async def _get_pipeline_tasks(
self, project_id: ProjectID, pipeline_dag: nx.DiGraph
) -> Dict[str, CompTaskAtDB]:
comp_tasks_repo: CompTasksRepository = get_repository(
self.db_engine, CompTasksRepository
) # type: ignore
pipeline_comp_tasks: Dict[str, CompTaskAtDB] = {
str(t.node_id): t
for t in await comp_tasks_repo.get_comp_tasks(project_id)
if (str(t.node_id) in list(pipeline_dag.nodes()))
}
if len(pipeline_comp_tasks) != len(pipeline_dag.nodes()):
raise InvalidPipelineError(
f"{project_id}"
f"The tasks defined for {project_id} do not contain all the tasks defined in the pipeline [{list(pipeline_dag.nodes)}]! Please check."
)
return pipeline_comp_tasks
async def _update_run_result_from_tasks(
self,
user_id: UserID,
project_id: ProjectID,
iteration: PositiveInt,
pipeline_tasks: Dict[str, CompTaskAtDB],
) -> RunningState:
pipeline_state_from_tasks: RunningState = get_pipeline_state_from_task_states(
list(pipeline_tasks.values()),
)
await self._set_run_result(
user_id, project_id, iteration, pipeline_state_from_tasks
)
return pipeline_state_from_tasks
async def _set_run_result(
self,
user_id: UserID,
project_id: ProjectID,
iteration: PositiveInt,
run_result: RunningState,
) -> None:
comp_runs_repo: CompRunsRepository = get_repository(
self.db_engine, CompRunsRepository
) # type: ignore
await comp_runs_repo.set_run_result(
user_id=user_id,
project_id=project_id,
iteration=iteration,
result_state=run_result,
final_state=(run_result in COMPLETED_STATES),
)
@abstractmethod
async def _start_tasks(
self,
user_id: UserID,
project_id: ProjectID,
cluster_id: ClusterID,
scheduled_tasks: Dict[NodeID, Image],
callback: Callable[[], None],
) -> None:
...
@abstractmethod
async def _stop_tasks(self, tasks: List[CompTaskAtDB]) -> None:
...
@abstractmethod
async def _reconnect_backend(self) -> None:
...
async def _schedule_pipeline(
self,
user_id: UserID,
project_id: ProjectID,
cluster_id: ClusterID,
iteration: PositiveInt,
marked_for_stopping: bool,
) -> None:
logger.debug(
"checking run of project [%s:%s] for user [%s]",
project_id,
iteration,
user_id,
)
pipeline_dag = nx.DiGraph()
pipeline_tasks: Dict[str, CompTaskAtDB] = {}
pipeline_result: RunningState = RunningState.UNKNOWN
# 1. Update the run states
try:
pipeline_dag = await self._get_pipeline_dag(project_id)
pipeline_tasks: Dict[str, CompTaskAtDB] = await self._get_pipeline_tasks(
project_id, pipeline_dag
)
# filter out the tasks that were already successfully completed
pipeline_dag.remove_nodes_from(
{
node_id
for node_id, t in pipeline_tasks.items()
if t.state == RunningState.SUCCESS
}
)
# find the tasks that need scheduling
tasks_to_schedule = [node_id for node_id, degree in pipeline_dag.in_degree() if degree == 0] # type: ignore
tasks_to_mark_as_aborted: Set[NodeID] = set()
tasks_to_start: Set[NodeID] = set()
for node_id in tasks_to_schedule:
if pipeline_tasks[str(node_id)].state == RunningState.FAILED:
tasks_to_mark_as_aborted.update(nx.bfs_tree(pipeline_dag, node_id))
tasks_to_mark_as_aborted.remove(
node_id
) # do not mark the failed one as aborted
if pipeline_tasks[str(node_id)].state == RunningState.PUBLISHED:
# the nodes that are published shall be started
tasks_to_start.add(node_id)
comp_tasks_repo: CompTasksRepository = cast(
CompTasksRepository, get_repository(self.db_engine, CompTasksRepository)
)
if tasks_to_mark_as_aborted:
await comp_tasks_repo.set_project_tasks_state(
project_id, list(tasks_to_mark_as_aborted), RunningState.ABORTED
)
# update the current states
for node_id in tasks_to_mark_as_aborted:
pipeline_tasks[f"{node_id}"].state = RunningState.ABORTED
# compute and update the current status of the run
pipeline_result = await self._update_run_result_from_tasks(
user_id, project_id, iteration, pipeline_tasks
)
except PipelineNotFoundError:
logger.warning(
"pipeline %s does not exist in comp_pipeline table, it will be removed from scheduler",
project_id,
)
pipeline_result = RunningState.ABORTED
await self._set_run_result(user_id, project_id, iteration, pipeline_result)
except InvalidPipelineError as exc:
logger.warning(
"pipeline %s appears to be misconfigured, it will be removed from scheduler. Please check pipeline:\n%s",
project_id,
exc,
)
pipeline_result = RunningState.ABORTED
await self._set_run_result(user_id, project_id, iteration, pipeline_result)
# 2. Are we finished??
if not pipeline_dag.nodes() or pipeline_result in COMPLETED_STATES:
# there is nothing left, the run is completed, we're done here
self.scheduled_pipelines.pop((user_id, project_id, iteration))
logger.info(
"pipeline %s scheduling completed with result %s",
project_id,
pipeline_result,
)
return
# 3. Are we stopping??
if marked_for_stopping:
# get any running task and stop them
comp_tasks_repo: CompTasksRepository = get_repository(
self.db_engine, CompTasksRepository
) # type: ignore
await comp_tasks_repo.mark_project_tasks_as_aborted(project_id)
# stop any remaining running task
running_tasks = [
t
for t in pipeline_tasks.values()
if t.state in [RunningState.STARTED, RunningState.RETRY]
]
await self._stop_tasks(running_tasks)
logger.debug(
"pipeline '%s' is marked for cancellation. stopping tasks for [%s]",
project_id,
running_tasks,
)
# the scheduled pipeline will be removed in the next iteration
return
# 4. Schedule the next tasks,
# these tasks are in PUBLISHED state and all their preceeding tasks are completed
next_tasks: List[NodeID] = [
node_id
for node_id, degree in pipeline_dag.in_degree() # type: ignore
if degree == 0 and pipeline_tasks[node_id].state == RunningState.PUBLISHED
]
if not next_tasks:
# nothing to run at the moment
return
# let's schedule the tasks, mark them as PENDING so the sidecar will take them
await self._schedule_tasks(
user_id, project_id, cluster_id, pipeline_tasks, next_tasks
)
async def _schedule_tasks(
self,
user_id: UserID,
project_id: ProjectID,
cluster_id: ClusterID,
comp_tasks: Dict[str, CompTaskAtDB],
tasks: List[NodeID],
):
# get tasks runtime requirements
tasks_to_reqs: Dict[NodeID, Image] = {
node_id: comp_tasks[f"{node_id}"].image for node_id in tasks
}
# The sidecar only pick up tasks that are in PENDING state
comp_tasks_repo: CompTasksRepository = get_repository(
self.db_engine, CompTasksRepository
) # type: ignore
await comp_tasks_repo.set_project_tasks_state(
project_id, tasks, RunningState.PENDING
)
# we pass the tasks to the dask-client
results = await asyncio.gather(
*[
self._start_tasks(
user_id,
project_id,
cluster_id,
scheduled_tasks={t: r},
callback=self._wake_up_scheduler_now,
)
for t, r in tasks_to_reqs.items()
],
return_exceptions=True,
)
for r, t in zip(results, tasks_to_reqs):
if isinstance(
r,
(
MissingComputationalResourcesError,
InsuficientComputationalResourcesError,
),
):
logger.error(
"Project '%s''s task '%s' could not be scheduled due to the following: %s",
project_id,
r.node_id,
f"{r}",
)
await comp_tasks_repo.set_project_tasks_state(
project_id, [r.node_id], RunningState.FAILED
)
# TODO: we should set some specific state so the user may know what to do
elif isinstance(r, ComputationalBackendNotConnectedError):
logger.error(
"The computational backend is disconnected. Tasks are set back "
"to PUBLISHED state until scheduler comes back!"
)
# we should try re-connecting.
# in the meantime we cannot schedule tasks on the scheduler,
# let's put these tasks back to PUBLISHED, so they might be re-submitted later
await asyncio.gather(
comp_tasks_repo.set_project_tasks_state(
project_id, tasks, RunningState.PUBLISHED
),
)
raise ComputationalBackendNotConnectedError(f"{r}") from r
if isinstance(r, Exception):
logger.error(
"Unexpected error happened when scheduling task due to following error %s",
f"{r}",
)
await comp_tasks_repo.set_project_tasks_state(
project_id, [t], RunningState.FAILED
)
def _wake_up_scheduler_now(self) -> None:
self.wake_up_event.set()
| 38.651869 | 150 | 0.609684 |
import asyncio
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Callable, Dict, List, Optional, Set, Tuple, cast
import networkx as nx
from aiopg.sa.engine import Engine
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
from models_library.projects_state import RunningState
from pydantic import PositiveInt
from ...core.errors import (
ComputationalBackendNotConnectedError,
InsuficientComputationalResourcesError,
InvalidPipelineError,
MissingComputationalResourcesError,
PipelineNotFoundError,
SchedulerError,
)
from ...models.domains.comp_pipelines import CompPipelineAtDB
from ...models.domains.comp_runs import CompRunsAtDB
from ...models.domains.comp_tasks import CompTaskAtDB, Image
from ...models.schemas.constants import ClusterID, UserID
from ...utils.computations import get_pipeline_state_from_task_states
from ...utils.scheduler import COMPLETED_STATES, Iteration, get_repository
from ..db.repositories.comp_pipelines import CompPipelinesRepository
from ..db.repositories.comp_runs import CompRunsRepository
from ..db.repositories.comp_tasks import CompTasksRepository
logger = logging.getLogger(__name__)
@dataclass
class ScheduledPipelineParams:
cluster_id: ClusterID
mark_for_cancellation: bool = False
@dataclass
class BaseCompScheduler(ABC):
scheduled_pipelines: Dict[
Tuple[UserID, ProjectID, Iteration], ScheduledPipelineParams
]
db_engine: Engine
wake_up_event: asyncio.Event = field(default_factory=asyncio.Event, init=False)
default_cluster_id: ClusterID
async def run_new_pipeline(
self, user_id: UserID, project_id: ProjectID, cluster_id: ClusterID
) -> None:
dag = await self._get_pipeline_dag(project_id)
if not dag:
logger.warning(
"project %s has no computational dag defined. not scheduled for a run."
)
return
runs_repo: CompRunsRepository = get_repository(
self.db_engine, CompRunsRepository
)
new_run: CompRunsAtDB = await runs_repo.create(
user_id=user_id,
project_id=project_id,
cluster_id=cluster_id,
default_cluster_id=self.default_cluster_id,
)
self.scheduled_pipelines[
(user_id, project_id, new_run.iteration)
] = ScheduledPipelineParams(cluster_id=cluster_id)
self._wake_up_scheduler_now()
async def stop_pipeline(
self, user_id: UserID, project_id: ProjectID, iteration: Optional[int] = None
) -> None:
if not iteration:
possible_iterations = {
it
for u_id, p_id, it in self.scheduled_pipelines
if u_id == user_id and p_id == project_id
}
if not possible_iterations:
raise SchedulerError(
f"There are no pipeline scheduled for {user_id}:{project_id}"
)
iteration = max(possible_iterations)
self.scheduled_pipelines[
(user_id, project_id, iteration)
].mark_for_cancellation = True
self._wake_up_scheduler_now()
async def schedule_all_pipelines(self) -> None:
self.wake_up_event.clear()
await asyncio.gather(
*[
self._schedule_pipeline(
user_id,
project_id,
pipeline_params.cluster_id,
iteration,
pipeline_params.mark_for_cancellation,
)
for (
user_id,
project_id,
iteration,
), pipeline_params in self.scheduled_pipelines.items()
]
)
async def reconnect_backend(self) -> None:
await self._reconnect_backend()
async def _get_pipeline_dag(self, project_id: ProjectID) -> nx.DiGraph:
comp_pipeline_repo: CompPipelinesRepository = get_repository(
self.db_engine, CompPipelinesRepository
)
pipeline_at_db: CompPipelineAtDB = await comp_pipeline_repo.get_pipeline(
project_id
)
pipeline_dag = pipeline_at_db.get_graph()
return pipeline_dag
async def _get_pipeline_tasks(
self, project_id: ProjectID, pipeline_dag: nx.DiGraph
) -> Dict[str, CompTaskAtDB]:
comp_tasks_repo: CompTasksRepository = get_repository(
self.db_engine, CompTasksRepository
)
pipeline_comp_tasks: Dict[str, CompTaskAtDB] = {
str(t.node_id): t
for t in await comp_tasks_repo.get_comp_tasks(project_id)
if (str(t.node_id) in list(pipeline_dag.nodes()))
}
if len(pipeline_comp_tasks) != len(pipeline_dag.nodes()):
raise InvalidPipelineError(
f"{project_id}"
f"The tasks defined for {project_id} do not contain all the tasks defined in the pipeline [{list(pipeline_dag.nodes)}]! Please check."
)
return pipeline_comp_tasks
async def _update_run_result_from_tasks(
self,
user_id: UserID,
project_id: ProjectID,
iteration: PositiveInt,
pipeline_tasks: Dict[str, CompTaskAtDB],
) -> RunningState:
pipeline_state_from_tasks: RunningState = get_pipeline_state_from_task_states(
list(pipeline_tasks.values()),
)
await self._set_run_result(
user_id, project_id, iteration, pipeline_state_from_tasks
)
return pipeline_state_from_tasks
async def _set_run_result(
self,
user_id: UserID,
project_id: ProjectID,
iteration: PositiveInt,
run_result: RunningState,
) -> None:
comp_runs_repo: CompRunsRepository = get_repository(
self.db_engine, CompRunsRepository
)
await comp_runs_repo.set_run_result(
user_id=user_id,
project_id=project_id,
iteration=iteration,
result_state=run_result,
final_state=(run_result in COMPLETED_STATES),
)
@abstractmethod
async def _start_tasks(
self,
user_id: UserID,
project_id: ProjectID,
cluster_id: ClusterID,
scheduled_tasks: Dict[NodeID, Image],
callback: Callable[[], None],
) -> None:
...
@abstractmethod
async def _stop_tasks(self, tasks: List[CompTaskAtDB]) -> None:
...
@abstractmethod
async def _reconnect_backend(self) -> None:
...
async def _schedule_pipeline(
self,
user_id: UserID,
project_id: ProjectID,
cluster_id: ClusterID,
iteration: PositiveInt,
marked_for_stopping: bool,
) -> None:
logger.debug(
"checking run of project [%s:%s] for user [%s]",
project_id,
iteration,
user_id,
)
pipeline_dag = nx.DiGraph()
pipeline_tasks: Dict[str, CompTaskAtDB] = {}
pipeline_result: RunningState = RunningState.UNKNOWN
try:
pipeline_dag = await self._get_pipeline_dag(project_id)
pipeline_tasks: Dict[str, CompTaskAtDB] = await self._get_pipeline_tasks(
project_id, pipeline_dag
)
pipeline_dag.remove_nodes_from(
{
node_id
for node_id, t in pipeline_tasks.items()
if t.state == RunningState.SUCCESS
}
)
tasks_to_schedule = [node_id for node_id, degree in pipeline_dag.in_degree() if degree == 0]
tasks_to_mark_as_aborted: Set[NodeID] = set()
tasks_to_start: Set[NodeID] = set()
for node_id in tasks_to_schedule:
if pipeline_tasks[str(node_id)].state == RunningState.FAILED:
tasks_to_mark_as_aborted.update(nx.bfs_tree(pipeline_dag, node_id))
tasks_to_mark_as_aborted.remove(
node_id
)
if pipeline_tasks[str(node_id)].state == RunningState.PUBLISHED:
tasks_to_start.add(node_id)
comp_tasks_repo: CompTasksRepository = cast(
CompTasksRepository, get_repository(self.db_engine, CompTasksRepository)
)
if tasks_to_mark_as_aborted:
await comp_tasks_repo.set_project_tasks_state(
project_id, list(tasks_to_mark_as_aborted), RunningState.ABORTED
)
for node_id in tasks_to_mark_as_aborted:
pipeline_tasks[f"{node_id}"].state = RunningState.ABORTED
pipeline_result = await self._update_run_result_from_tasks(
user_id, project_id, iteration, pipeline_tasks
)
except PipelineNotFoundError:
logger.warning(
"pipeline %s does not exist in comp_pipeline table, it will be removed from scheduler",
project_id,
)
pipeline_result = RunningState.ABORTED
await self._set_run_result(user_id, project_id, iteration, pipeline_result)
except InvalidPipelineError as exc:
logger.warning(
"pipeline %s appears to be misconfigured, it will be removed from scheduler. Please check pipeline:\n%s",
project_id,
exc,
)
pipeline_result = RunningState.ABORTED
await self._set_run_result(user_id, project_id, iteration, pipeline_result)
if not pipeline_dag.nodes() or pipeline_result in COMPLETED_STATES:
self.scheduled_pipelines.pop((user_id, project_id, iteration))
logger.info(
"pipeline %s scheduling completed with result %s",
project_id,
pipeline_result,
)
return
# 3. Are we stopping??
if marked_for_stopping:
# get any running task and stop them
comp_tasks_repo: CompTasksRepository = get_repository(
self.db_engine, CompTasksRepository
) # type: ignore
await comp_tasks_repo.mark_project_tasks_as_aborted(project_id)
# stop any remaining running task
running_tasks = [
t
for t in pipeline_tasks.values()
if t.state in [RunningState.STARTED, RunningState.RETRY]
]
await self._stop_tasks(running_tasks)
logger.debug(
"pipeline '%s' is marked for cancellation. stopping tasks for [%s]",
project_id,
running_tasks,
)
# the scheduled pipeline will be removed in the next iteration
return
# 4. Schedule the next tasks,
# these tasks are in PUBLISHED state and all their preceeding tasks are completed
next_tasks: List[NodeID] = [
node_id
for node_id, degree in pipeline_dag.in_degree() # type: ignore
if degree == 0 and pipeline_tasks[node_id].state == RunningState.PUBLISHED
]
if not next_tasks:
# nothing to run at the moment
return
# let's schedule the tasks, mark them as PENDING so the sidecar will take them
await self._schedule_tasks(
user_id, project_id, cluster_id, pipeline_tasks, next_tasks
)
async def _schedule_tasks(
self,
user_id: UserID,
project_id: ProjectID,
cluster_id: ClusterID,
comp_tasks: Dict[str, CompTaskAtDB],
tasks: List[NodeID],
):
tasks_to_reqs: Dict[NodeID, Image] = {
node_id: comp_tasks[f"{node_id}"].image for node_id in tasks
}
comp_tasks_repo: CompTasksRepository = get_repository(
self.db_engine, CompTasksRepository
)
await comp_tasks_repo.set_project_tasks_state(
project_id, tasks, RunningState.PENDING
)
results = await asyncio.gather(
*[
self._start_tasks(
user_id,
project_id,
cluster_id,
scheduled_tasks={t: r},
callback=self._wake_up_scheduler_now,
)
for t, r in tasks_to_reqs.items()
],
return_exceptions=True,
)
for r, t in zip(results, tasks_to_reqs):
if isinstance(
r,
(
MissingComputationalResourcesError,
InsuficientComputationalResourcesError,
),
):
logger.error(
"Project '%s''s task '%s' could not be scheduled due to the following: %s",
project_id,
r.node_id,
f"{r}",
)
await comp_tasks_repo.set_project_tasks_state(
project_id, [r.node_id], RunningState.FAILED
)
# TODO: we should set some specific state so the user may know what to do
elif isinstance(r, ComputationalBackendNotConnectedError):
logger.error(
"The computational backend is disconnected. Tasks are set back "
"to PUBLISHED state until scheduler comes back!"
)
# we should try re-connecting.
# in the meantime we cannot schedule tasks on the scheduler,
# let's put these tasks back to PUBLISHED, so they might be re-submitted later
await asyncio.gather(
comp_tasks_repo.set_project_tasks_state(
project_id, tasks, RunningState.PUBLISHED
),
)
raise ComputationalBackendNotConnectedError(f"{r}") from r
if isinstance(r, Exception):
logger.error(
"Unexpected error happened when scheduling task due to following error %s",
f"{r}",
)
await comp_tasks_repo.set_project_tasks_state(
project_id, [t], RunningState.FAILED
)
def _wake_up_scheduler_now(self) -> None:
self.wake_up_event.set()
| true | true |
f71fbf2af7ee4f4be9f4ac329665e4e092b4627e | 1,045 | py | Python | tests/pyre/filesystem/local_rootNotDirectory.py | BryanRiel/pyre | 179359634a7091979cced427b6133dd0ec4726ea | [
"BSD-3-Clause"
] | null | null | null | tests/pyre/filesystem/local_rootNotDirectory.py | BryanRiel/pyre | 179359634a7091979cced427b6133dd0ec4726ea | [
"BSD-3-Clause"
] | null | null | null | tests/pyre/filesystem/local_rootNotDirectory.py | BryanRiel/pyre | 179359634a7091979cced427b6133dd0ec4726ea | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Verify that attempts to create local filesystems with nonexistent roots fails as expected
"""
def test():
# support
import pyre.primitives
# my package
import pyre.filesystem
# make a path out of a regular file
dummy = pyre.primitives.path("./local_rootNotDirectory.py")
# attempt to
try:
# mount a filesystem there
pyre.filesystem.local(root=dummy)
# which should fail so we can't reach here
assert False
# if it fails as expected
except pyre.filesystem.MountPointError as error:
# check that the error message is correct
assert str(error) == (
"error while mounting '{}': invalid mount point".format(dummy.resolve()))
# all done
return
# main
if __name__ == "__main__":
# skip pyre initialization since we don't rely on the executive
pyre_noboot = True
# do...
test()
# end of file
| 21.770833 | 89 | 0.643062 |
def test():
import pyre.primitives
import pyre.filesystem
dummy = pyre.primitives.path("./local_rootNotDirectory.py")
try:
pyre.filesystem.local(root=dummy)
assert False
# if it fails as expected
except pyre.filesystem.MountPointError as error:
# check that the error message is correct
assert str(error) == (
"error while mounting '{}': invalid mount point".format(dummy.resolve()))
# all done
return
# main
if __name__ == "__main__":
# skip pyre initialization since we don't rely on the executive
pyre_noboot = True
test()
| true | true |
f71fc0279e6c6df13e14b65f4c5bea90b17f596c | 21,395 | py | Python | pytorchtools/ptnetworks/ResNetCIFAR.py | Criscraft/pytorch_classification | d5772963e55ce218ae4719fb7f85604263aab65f | [
"MIT"
] | null | null | null | pytorchtools/ptnetworks/ResNetCIFAR.py | Criscraft/pytorch_classification | d5772963e55ce218ae4719fb7f85604263aab65f | [
"MIT"
] | null | null | null | pytorchtools/ptnetworks/ResNetCIFAR.py | Criscraft/pytorch_classification | d5772963e55ce218ae4719fb7f85604263aab65f | [
"MIT"
] | null | null | null | from collections import OrderedDict
import torch
from torch import Tensor
import torch.nn as nn
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from ptnetworks.ActivationTracker import ActivationTracker
from typing import Type, Any, Callable, Union, List, Optional
class ResNetCIFAR(nn.Module):
def __init__(self,
variant='resnet050',
n_classes=100,
pretrained=False,
freeze_features_until='', #exclusive
no_gradient_required=False,
enforce_batchnorm_requires_gradient=False,
n_layers_to_be_removed_from_blocks=[],
no_classifier=False,
activation='relu',
init_mode='kaiming_normal',
statedict='',
strict_loading=True):
super().__init__()
arg_dict = {
'pretrained' : pretrained,
'num_classes' : n_classes,
'init_mode' : init_mode,
'activation' : activation,
}
if variant == 'resnet018':
self.embedded_model = resnet18(**arg_dict)
elif variant == 'resnet034':
self.embedded_model = resnet34(**arg_dict)
elif variant == 'resnet050':
self.embedded_model = resnet50(**arg_dict)
elif variant == 'resnet101':
self.embedded_model = resnet101(**arg_dict)
elif variant == 'resnet152':
self.embedded_model = resnet152(**arg_dict)
elif variant == 'resnext050_32x4d':
self.embedded_model = resnext50_32x4d(**arg_dict)
elif variant == 'resnext101_32x8d':
self.embedded_model = resnext101_32x8d(**arg_dict)
elif variant == 'wide_resnet050_2':
self.embedded_model = wide_resnet50_2(**arg_dict)
elif variant == 'wide_resnet101_2':
self.embedded_model = wide_resnet101_2(**arg_dict)
else:
print('select valid model variant')
if no_classifier:
self.embedded_model.classifier = nn.Identity()
module_dict = OrderedDict([
('classifier', self.embedded_model.classifier),
('layer4', self.embedded_model.layer4),
('layer3', self.embedded_model.layer3),
('layer2', self.embedded_model.layer2),
('layer1', self.embedded_model.layer1),
])
if freeze_features_until:
for param in self.embedded_model.parameters():
param.requires_grad = False
if freeze_features_until not in module_dict:
raise ValueError("freeue_features_until does not match any network module")
for key, module in module_dict.items():
for param in module.parameters():
param.requires_grad = True
if freeze_features_until == key:
break
if n_layers_to_be_removed_from_blocks:
modules = [
self.embedded_model.layer1,
self.embedded_model.layer2,
self.embedded_model.layer3,
self.embedded_model.layer4,
]
for n_layers, layer in zip(n_layers_to_be_removed_from_blocks, modules):
for i in range(n_layers):
layer[-i-1] = nn.Identity()
if statedict:
pretrained_dict = torch.load(statedict, map_location=torch.device('cpu'))
missing = self.load_state_dict(pretrained_dict, strict=strict_loading)
print('Loading weights from statedict. Missing and unexpected keys:')
print(missing)
if enforce_batchnorm_requires_gradient:
for m in self.embedded_model.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
for param in m.parameters():
param.requires_grad = True
if no_gradient_required:
for param in self.embedded_model.parameters():
param.requires_grad = False
def forward(self, batch):
if isinstance(batch, dict) and 'data' in batch:
logits = self.embedded_model(batch['data'])
out = {'logits' : logits}
return out
else:
return self.embedded_model(batch)
def forward_features(self, batch, module=None):
track_modules = ActivationTracker()
assert isinstance(batch, dict) and 'data' in batch
logits, activation_dict = track_modules.collect_stats(self.embedded_model, batch['data'], module)
out = {'logits' : logits, 'activations' : activation_dict}
return out
def save(self, statedict_name):
torch.save(self.state_dict(), statedict_name)
MODEL_DIR = '/nfshome/linse/NO_INB_BACKUP/ModelZoo'
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer=nn.ReLU
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu_1 = activation_layer(inplace=False)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.relu_2 = activation_layer(inplace=False)
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu_1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu_2(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer=nn.ReLU
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu_1 = activation_layer(inplace=False)
self.relu_2 = activation_layer(inplace=False)
self.relu_3 = activation_layer(inplace=False)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu_1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu_2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu_3(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
init_mode='kaiming_normal',
activation='relu',
) -> None:
super().__init__()
self.ID = 'ResNet'
if activation == 'relu':
activation_layer = nn.ReLU
elif activation == 'leaky_relu':
activation_layer = nn.LeakyReLU
self._activation_layer = activation_layer
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
#for CIFAR we choose a kernel size of 3 in the first convolutional layer
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=2, padding=3,
bias=False)
self.conv1.ID = self.ID + '_first_layer'
self.bn1 = norm_layer(self.inplanes)
self.relu = self._activation_layer(inplace=False)
#we do not apply maxpooling after the first layer for CIFAR
self.maxpool = nn.Identity() #nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(512 * block.expansion, num_classes)
self.reinitialize(init_mode, activation, zero_init_residual)
def reinitialize(self, init_mode, activation, zero_init_residual):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if init_mode == 'kaiming_normal':
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity=activation)
elif init_mode == 'kaiming_uniform':
nn.init.kaiming_uniform_(m.weight, mode='fan_out', nonlinearity=activation)
elif init_mode == 'sparse':
nn.init.sparse_(m.weight, sparsity=0.1, std=0.01)
elif init_mode == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
activation_layer = self._activation_layer
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, activation_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer, activation_layer=activation_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress, model_dir=MODEL_DIR)
model.load_state_dict(state_dict, strict=False)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) | 39.329044 | 111 | 0.621874 | from collections import OrderedDict
import torch
from torch import Tensor
import torch.nn as nn
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from ptnetworks.ActivationTracker import ActivationTracker
from typing import Type, Any, Callable, Union, List, Optional
class ResNetCIFAR(nn.Module):
def __init__(self,
variant='resnet050',
n_classes=100,
pretrained=False,
freeze_features_until='',
no_gradient_required=False,
enforce_batchnorm_requires_gradient=False,
n_layers_to_be_removed_from_blocks=[],
no_classifier=False,
activation='relu',
init_mode='kaiming_normal',
statedict='',
strict_loading=True):
super().__init__()
arg_dict = {
'pretrained' : pretrained,
'num_classes' : n_classes,
'init_mode' : init_mode,
'activation' : activation,
}
if variant == 'resnet018':
self.embedded_model = resnet18(**arg_dict)
elif variant == 'resnet034':
self.embedded_model = resnet34(**arg_dict)
elif variant == 'resnet050':
self.embedded_model = resnet50(**arg_dict)
elif variant == 'resnet101':
self.embedded_model = resnet101(**arg_dict)
elif variant == 'resnet152':
self.embedded_model = resnet152(**arg_dict)
elif variant == 'resnext050_32x4d':
self.embedded_model = resnext50_32x4d(**arg_dict)
elif variant == 'resnext101_32x8d':
self.embedded_model = resnext101_32x8d(**arg_dict)
elif variant == 'wide_resnet050_2':
self.embedded_model = wide_resnet50_2(**arg_dict)
elif variant == 'wide_resnet101_2':
self.embedded_model = wide_resnet101_2(**arg_dict)
else:
print('select valid model variant')
if no_classifier:
self.embedded_model.classifier = nn.Identity()
module_dict = OrderedDict([
('classifier', self.embedded_model.classifier),
('layer4', self.embedded_model.layer4),
('layer3', self.embedded_model.layer3),
('layer2', self.embedded_model.layer2),
('layer1', self.embedded_model.layer1),
])
if freeze_features_until:
for param in self.embedded_model.parameters():
param.requires_grad = False
if freeze_features_until not in module_dict:
raise ValueError("freeue_features_until does not match any network module")
for key, module in module_dict.items():
for param in module.parameters():
param.requires_grad = True
if freeze_features_until == key:
break
if n_layers_to_be_removed_from_blocks:
modules = [
self.embedded_model.layer1,
self.embedded_model.layer2,
self.embedded_model.layer3,
self.embedded_model.layer4,
]
for n_layers, layer in zip(n_layers_to_be_removed_from_blocks, modules):
for i in range(n_layers):
layer[-i-1] = nn.Identity()
if statedict:
pretrained_dict = torch.load(statedict, map_location=torch.device('cpu'))
missing = self.load_state_dict(pretrained_dict, strict=strict_loading)
print('Loading weights from statedict. Missing and unexpected keys:')
print(missing)
if enforce_batchnorm_requires_gradient:
for m in self.embedded_model.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
for param in m.parameters():
param.requires_grad = True
if no_gradient_required:
for param in self.embedded_model.parameters():
param.requires_grad = False
def forward(self, batch):
if isinstance(batch, dict) and 'data' in batch:
logits = self.embedded_model(batch['data'])
out = {'logits' : logits}
return out
else:
return self.embedded_model(batch)
def forward_features(self, batch, module=None):
track_modules = ActivationTracker()
assert isinstance(batch, dict) and 'data' in batch
logits, activation_dict = track_modules.collect_stats(self.embedded_model, batch['data'], module)
out = {'logits' : logits, 'activations' : activation_dict}
return out
def save(self, statedict_name):
torch.save(self.state_dict(), statedict_name)
MODEL_DIR = '/nfshome/linse/NO_INB_BACKUP/ModelZoo'
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer=nn.ReLU
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu_1 = activation_layer(inplace=False)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.relu_2 = activation_layer(inplace=False)
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu_1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu_2(out)
return out
class Bottleneck(nn.Module):
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer=nn.ReLU
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu_1 = activation_layer(inplace=False)
self.relu_2 = activation_layer(inplace=False)
self.relu_3 = activation_layer(inplace=False)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu_1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu_2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu_3(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
init_mode='kaiming_normal',
activation='relu',
) -> None:
super().__init__()
self.ID = 'ResNet'
if activation == 'relu':
activation_layer = nn.ReLU
elif activation == 'leaky_relu':
activation_layer = nn.LeakyReLU
self._activation_layer = activation_layer
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=2, padding=3,
bias=False)
self.conv1.ID = self.ID + '_first_layer'
self.bn1 = norm_layer(self.inplanes)
self.relu = self._activation_layer(inplace=False)
self.maxpool = nn.Identity()
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(512 * block.expansion, num_classes)
self.reinitialize(init_mode, activation, zero_init_residual)
def reinitialize(self, init_mode, activation, zero_init_residual):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if init_mode == 'kaiming_normal':
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity=activation)
elif init_mode == 'kaiming_uniform':
nn.init.kaiming_uniform_(m.weight, mode='fan_out', nonlinearity=activation)
elif init_mode == 'sparse':
nn.init.sparse_(m.weight, sparsity=0.1, std=0.01)
elif init_mode == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
activation_layer = self._activation_layer
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, activation_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer, activation_layer=activation_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress, model_dir=MODEL_DIR)
model.load_state_dict(state_dict, strict=False)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) | true | true |
f71fc229e7d866aedfc0418b748cc620f97d4880 | 19,666 | py | Python | libs/pdfminer/pdfminer/psparser.py | diverted247/SigningService | a630357a2bf5bea4e5d55106f092e4a2a31cab15 | [
"MIT"
] | null | null | null | libs/pdfminer/pdfminer/psparser.py | diverted247/SigningService | a630357a2bf5bea4e5d55106f092e4a2a31cab15 | [
"MIT"
] | null | null | null | libs/pdfminer/pdfminer/psparser.py | diverted247/SigningService | a630357a2bf5bea4e5d55106f092e4a2a31cab15 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import re
from utils import choplist
STRICT = 0
## PS Exceptions
##
class PSException(Exception):
pass
class PSEOF(PSException):
pass
class PSSyntaxError(PSException):
pass
class PSTypeError(PSException):
pass
class PSValueError(PSException):
pass
## Basic PostScript Types
##
## PSObject
##
class PSObject(object):
"""Base class for all PS or PDF-related data types."""
pass
## PSLiteral
##
class PSLiteral(PSObject):
"""A class that represents a PostScript literal.
Postscript literals are used as identifiers, such as
variable names, property names and dictionary keys.
Literals are case sensitive and denoted by a preceding
slash sign (e.g. "/Name")
Note: Do not create an instance of PSLiteral directly.
Always use PSLiteralTable.intern().
"""
def __init__(self, name):
self.name = name
return
def __repr__(self):
return '/%s' % self.name
## PSKeyword
##
class PSKeyword(PSObject):
"""A class that represents a PostScript keyword.
PostScript keywords are a dozen of predefined words.
Commands and directives in PostScript are expressed by keywords.
They are also used to denote the content boundaries.
Note: Do not create an instance of PSKeyword directly.
Always use PSKeywordTable.intern().
"""
def __init__(self, name):
self.name = name
return
def __repr__(self):
return self.name
## PSSymbolTable
##
class PSSymbolTable(object):
"""A utility class for storing PSLiteral/PSKeyword objects.
Interned objects can be checked its identity with "is" operator.
"""
def __init__(self, klass):
self.dict = {}
self.klass = klass
return
def intern(self, name):
if name in self.dict:
lit = self.dict[name]
else:
lit = self.klass(name)
self.dict[name] = lit
return lit
PSLiteralTable = PSSymbolTable(PSLiteral)
PSKeywordTable = PSSymbolTable(PSKeyword)
LIT = PSLiteralTable.intern
KWD = PSKeywordTable.intern
KEYWORD_PROC_BEGIN = KWD('{')
KEYWORD_PROC_END = KWD('}')
KEYWORD_ARRAY_BEGIN = KWD('[')
KEYWORD_ARRAY_END = KWD(']')
KEYWORD_DICT_BEGIN = KWD('<<')
KEYWORD_DICT_END = KWD('>>')
def literal_name(x):
if not isinstance(x, PSLiteral):
if STRICT:
raise PSTypeError('Literal required: %r' % x)
else:
return str(x)
return x.name
def keyword_name(x):
if not isinstance(x, PSKeyword):
if STRICT:
raise PSTypeError('Keyword required: %r' % x)
else:
return str(x)
return x.name
## PSBaseParser
##
EOL = re.compile(r'[\r\n]')
SPC = re.compile(r'\s')
NONSPC = re.compile(r'\S')
HEX = re.compile(r'[0-9a-fA-F]')
END_LITERAL = re.compile(r'[#/%\[\]()<>{}\s]')
END_HEX_STRING = re.compile(r'[^\s0-9a-fA-F]')
HEX_PAIR = re.compile(r'[0-9a-fA-F]{2}|.')
END_NUMBER = re.compile(r'[^0-9]')
END_KEYWORD = re.compile(r'[#/%\[\]()<>{}\s]')
END_STRING = re.compile(r'[()\134]')
OCT_STRING = re.compile(r'[0-7]')
ESC_STRING = {'b': 8, 't': 9, 'n': 10, 'f': 12, 'r': 13, '(': 40, ')': 41, '\\': 92}
class PSBaseParser(object):
"""Most basic PostScript parser that performs only tokenization.
"""
BUFSIZ = 4096
debug = 0
def __init__(self, fp):
self.fp = fp
self.seek(0)
return
def __repr__(self):
return '<%s: %r, bufpos=%d>' % (self.__class__.__name__, self.fp, self.bufpos)
def flush(self):
return
def close(self):
self.flush()
return
def tell(self):
return self.bufpos+self.charpos
def poll(self, pos=None, n=80):
pos0 = self.fp.tell()
if not pos:
pos = self.bufpos+self.charpos
self.fp.seek(pos)
print >>sys.stderr, 'poll(%d): %r' % (pos, self.fp.read(n))
self.fp.seek(pos0)
return
def seek(self, pos):
"""Seeks the parser to the given position.
"""
if 2 <= self.debug:
print >>sys.stderr, 'seek: %r' % pos
self.fp.seek(pos)
# reset the status for nextline()
self.bufpos = pos
self.buf = ''
self.charpos = 0
# reset the status for nexttoken()
self._parse1 = self._parse_main
self._curtoken = ''
self._curtokenpos = 0
self._tokens = []
return
def fillbuf(self):
if self.charpos < len(self.buf):
return
# fetch next chunk.
self.bufpos = self.fp.tell()
self.buf = self.fp.read(self.BUFSIZ)
if not self.buf:
raise PSEOF('Unexpected EOF')
self.charpos = 0
return
def nextline(self):
"""Fetches a next line that ends either with \\r or \\n.
"""
linebuf = ''
linepos = self.bufpos + self.charpos
eol = False
while 1:
self.fillbuf()
if eol:
c = self.buf[self.charpos]
# handle '\r\n'
if c == '\n':
linebuf += c
self.charpos += 1
break
m = EOL.search(self.buf, self.charpos)
if m:
linebuf += self.buf[self.charpos:m.end(0)]
self.charpos = m.end(0)
if linebuf[-1] == '\r':
eol = True
else:
break
else:
linebuf += self.buf[self.charpos:]
self.charpos = len(self.buf)
if 2 <= self.debug:
print >>sys.stderr, 'nextline: %r' % ((linepos, linebuf),)
return (linepos, linebuf)
def revreadlines(self):
"""Fetches a next line backword.
This is used to locate the trailers at the end of a file.
"""
self.fp.seek(0, 2)
pos = self.fp.tell()
buf = ''
while 0 < pos:
prevpos = pos
pos = max(0, pos-self.BUFSIZ)
self.fp.seek(pos)
s = self.fp.read(prevpos-pos)
if not s:
break
while 1:
n = max(s.rfind('\r'), s.rfind('\n'))
if n == -1:
buf = s + buf
break
yield s[n:]+buf
s = s[:n]
buf = ''
return
def _parse_main(self, s, i):
m = NONSPC.search(s, i)
if not m:
return len(s)
j = m.start(0)
c = s[j]
self._curtokenpos = self.bufpos+j
if c == '%':
self._curtoken = '%'
self._parse1 = self._parse_comment
return j+1
elif c == '/':
self._curtoken = ''
self._parse1 = self._parse_literal
return j+1
elif c in '-+' or c.isdigit():
self._curtoken = c
self._parse1 = self._parse_number
return j+1
elif c == '.':
self._curtoken = c
self._parse1 = self._parse_float
return j+1
elif c.isalpha():
self._curtoken = c
self._parse1 = self._parse_keyword
return j+1
elif c == '(':
self._curtoken = ''
self.paren = 1
self._parse1 = self._parse_string
return j+1
elif c == '<':
self._curtoken = ''
self._parse1 = self._parse_wopen
return j+1
elif c == '>':
self._curtoken = ''
self._parse1 = self._parse_wclose
return j+1
else:
self._add_token(KWD(c))
return j+1
def _add_token(self, obj):
self._tokens.append((self._curtokenpos, obj))
return
def _parse_comment(self, s, i):
m = EOL.search(s, i)
if not m:
self._curtoken += s[i:]
return (self._parse_comment, len(s))
j = m.start(0)
self._curtoken += s[i:j]
self._parse1 = self._parse_main
# We ignore comments.
#self._tokens.append(self._curtoken)
return j
def _parse_literal(self, s, i):
m = END_LITERAL.search(s, i)
if not m:
self._curtoken += s[i:]
return len(s)
j = m.start(0)
self._curtoken += s[i:j]
c = s[j]
if c == '#':
self.hex = ''
self._parse1 = self._parse_literal_hex
return j+1
self._add_token(LIT(self._curtoken))
self._parse1 = self._parse_main
return j
def _parse_literal_hex(self, s, i):
c = s[i]
if HEX.match(c) and len(self.hex) < 2:
self.hex += c
return i+1
if self.hex:
self._curtoken += chr(int(self.hex, 16))
self._parse1 = self._parse_literal
return i
def _parse_number(self, s, i):
m = END_NUMBER.search(s, i)
if not m:
self._curtoken += s[i:]
return len(s)
j = m.start(0)
self._curtoken += s[i:j]
c = s[j]
if c == '.':
self._curtoken += c
self._parse1 = self._parse_float
return j+1
try:
self._add_token(int(self._curtoken))
except ValueError:
pass
self._parse1 = self._parse_main
return j
def _parse_float(self, s, i):
m = END_NUMBER.search(s, i)
if not m:
self._curtoken += s[i:]
return len(s)
j = m.start(0)
self._curtoken += s[i:j]
try:
self._add_token(float(self._curtoken))
except ValueError:
pass
self._parse1 = self._parse_main
return j
def _parse_keyword(self, s, i):
m = END_KEYWORD.search(s, i)
if not m:
self._curtoken += s[i:]
return len(s)
j = m.start(0)
self._curtoken += s[i:j]
if self._curtoken == 'true':
token = True
elif self._curtoken == 'false':
token = False
else:
token = KWD(self._curtoken)
self._add_token(token)
self._parse1 = self._parse_main
return j
def _parse_string(self, s, i):
m = END_STRING.search(s, i)
if not m:
self._curtoken += s[i:]
return len(s)
j = m.start(0)
self._curtoken += s[i:j]
c = s[j]
if c == '\\':
self.oct = ''
self._parse1 = self._parse_string_1
return j+1
if c == '(':
self.paren += 1
self._curtoken += c
return j+1
if c == ')':
self.paren -= 1
if self.paren: # WTF, they said balanced parens need no special treatment.
self._curtoken += c
return j+1
self._add_token(self._curtoken)
self._parse1 = self._parse_main
return j+1
def _parse_string_1(self, s, i):
c = s[i]
if OCT_STRING.match(c) and len(self.oct) < 3:
self.oct += c
return i+1
if self.oct:
self._curtoken += chr(int(self.oct, 8))
self._parse1 = self._parse_string
return i
if c in ESC_STRING:
self._curtoken += chr(ESC_STRING[c])
self._parse1 = self._parse_string
return i+1
def _parse_wopen(self, s, i):
c = s[i]
if c == '<':
self._add_token(KEYWORD_DICT_BEGIN)
self._parse1 = self._parse_main
i += 1
else:
self._parse1 = self._parse_hexstring
return i
def _parse_wclose(self, s, i):
c = s[i]
if c == '>':
self._add_token(KEYWORD_DICT_END)
i += 1
self._parse1 = self._parse_main
return i
def _parse_hexstring(self, s, i):
m = END_HEX_STRING.search(s, i)
if not m:
self._curtoken += s[i:]
return len(s)
j = m.start(0)
self._curtoken += s[i:j]
token = HEX_PAIR.sub(lambda m: chr(int(m.group(0), 16)),
SPC.sub('', self._curtoken))
self._add_token(token)
self._parse1 = self._parse_main
return j
def nexttoken(self):
while not self._tokens:
self.fillbuf()
self.charpos = self._parse1(self.buf, self.charpos)
token = self._tokens.pop(0)
if 2 <= self.debug:
print >>sys.stderr, 'nexttoken: %r' % (token,)
return token
## PSStackParser
##
class PSStackParser(PSBaseParser):
def __init__(self, fp):
PSBaseParser.__init__(self, fp)
self.reset()
return
def reset(self):
self.context = []
self.curtype = None
self.curstack = []
self.results = []
return
def seek(self, pos):
PSBaseParser.seek(self, pos)
self.reset()
return
def push(self, *objs):
self.curstack.extend(objs)
return
def pop(self, n):
objs = self.curstack[-n:]
self.curstack[-n:] = []
return objs
def popall(self):
objs = self.curstack
self.curstack = []
return objs
def add_results(self, *objs):
if 2 <= self.debug:
print >>sys.stderr, 'add_results: %r' % (objs,)
self.results.extend(objs)
return
def start_type(self, pos, type):
self.context.append((pos, self.curtype, self.curstack))
(self.curtype, self.curstack) = (type, [])
if 2 <= self.debug:
print >>sys.stderr, 'start_type: pos=%r, type=%r' % (pos, type)
return
def end_type(self, type):
if self.curtype != type:
raise PSTypeError('Type mismatch: %r != %r' % (self.curtype, type))
objs = [obj for (_, obj) in self.curstack]
(pos, self.curtype, self.curstack) = self.context.pop()
if 2 <= self.debug:
print >>sys.stderr, 'end_type: pos=%r, type=%r, objs=%r' % (pos, type, objs)
return (pos, objs)
def do_keyword(self, pos, token):
return
def nextobject(self):
"""Yields a list of objects.
Returns keywords, literals, strings, numbers, arrays and dictionaries.
Arrays and dictionaries are represented as Python lists and dictionaries.
"""
while not self.results:
(pos, token) = self.nexttoken()
#print (pos,token), (self.curtype, self.curstack)
if (isinstance(token, int) or
isinstance(token, float) or
isinstance(token, bool) or
isinstance(token, str) or
isinstance(token, PSLiteral)):
# normal token
self.push((pos, token))
elif token == KEYWORD_ARRAY_BEGIN:
# begin array
self.start_type(pos, 'a')
elif token == KEYWORD_ARRAY_END:
# end array
try:
self.push(self.end_type('a'))
except PSTypeError:
if STRICT:
raise
elif token == KEYWORD_DICT_BEGIN:
# begin dictionary
self.start_type(pos, 'd')
elif token == KEYWORD_DICT_END:
# end dictionary
try:
(pos, objs) = self.end_type('d')
if len(objs) % 2 != 0:
raise PSSyntaxError('Invalid dictionary construct: %r' % objs)
# construct a Python dictionary.
d = dict((literal_name(k), v) for (k, v) in choplist(2, objs) if v is not None)
self.push((pos, d))
except PSTypeError:
if STRICT:
raise
elif token == KEYWORD_PROC_BEGIN:
# begin proc
self.start_type(pos, 'p')
elif token == KEYWORD_PROC_END:
# end proc
try:
self.push(self.end_type('p'))
except PSTypeError:
if STRICT:
raise
else:
if 2 <= self.debug:
print >>sys.stderr, 'do_keyword: pos=%r, token=%r, stack=%r' % \
(pos, token, self.curstack)
self.do_keyword(pos, token)
if self.context:
continue
else:
self.flush()
obj = self.results.pop(0)
if 2 <= self.debug:
print >>sys.stderr, 'nextobject: %r' % (obj,)
return obj
import unittest
## Simplistic Test cases
##
class TestPSBaseParser(unittest.TestCase):
TESTDATA = r'''%!PS
begin end
" @ #
/a/BCD /Some_Name /foo#5f#xbaa
0 +1 -2 .5 1.234
(abc) () (abc ( def ) ghi)
(def\040\0\0404ghi) (bach\\slask) (foo\nbaa)
(this % is not a comment.)
(foo
baa)
(foo\
baa)
<> <20> < 40 4020 >
<abcd00
12345>
func/a/b{(c)do*}def
[ 1 (z) ! ]
<< /foo (bar) >>
'''
TOKENS = [
(5, KWD('begin')), (11, KWD('end')), (16, KWD('"')), (19, KWD('@')),
(21, KWD('#')), (23, LIT('a')), (25, LIT('BCD')), (30, LIT('Some_Name')),
(41, LIT('foo_xbaa')), (54, 0), (56, 1), (59, -2), (62, 0.5),
(65, 1.234), (71, 'abc'), (77, ''), (80, 'abc ( def ) ghi'),
(98, 'def \x00 4ghi'), (118, 'bach\\slask'), (132, 'foo\nbaa'),
(143, 'this % is not a comment.'), (170, 'foo\nbaa'), (180, 'foobaa'),
(191, ''), (194, ' '), (199, '@@ '), (211, '\xab\xcd\x00\x124\x05'),
(226, KWD('func')), (230, LIT('a')), (232, LIT('b')),
(234, KWD('{')), (235, 'c'), (238, KWD('do*')), (241, KWD('}')),
(242, KWD('def')), (246, KWD('[')), (248, 1), (250, 'z'), (254, KWD('!')),
(256, KWD(']')), (258, KWD('<<')), (261, LIT('foo')), (266, 'bar'),
(272, KWD('>>'))
]
OBJS = [
(23, LIT('a')), (25, LIT('BCD')), (30, LIT('Some_Name')),
(41, LIT('foo_xbaa')), (54, 0), (56, 1), (59, -2), (62, 0.5),
(65, 1.234), (71, 'abc'), (77, ''), (80, 'abc ( def ) ghi'),
(98, 'def \x00 4ghi'), (118, 'bach\\slask'), (132, 'foo\nbaa'),
(143, 'this % is not a comment.'), (170, 'foo\nbaa'), (180, 'foobaa'),
(191, ''), (194, ' '), (199, '@@ '), (211, '\xab\xcd\x00\x124\x05'),
(230, LIT('a')), (232, LIT('b')), (234, ['c']), (246, [1, 'z']),
(258, {'foo': 'bar'}),
]
def get_tokens(self, s):
import StringIO
class MyParser(PSBaseParser):
def flush(self):
self.add_results(*self.popall())
parser = MyParser(StringIO.StringIO(s))
r = []
try:
while 1:
r.append(parser.nexttoken())
except PSEOF:
pass
return r
def get_objects(self, s):
import StringIO
class MyParser(PSStackParser):
def flush(self):
self.add_results(*self.popall())
parser = MyParser(StringIO.StringIO(s))
r = []
try:
while 1:
r.append(parser.nextobject())
except PSEOF:
pass
return r
def test_1(self):
tokens = self.get_tokens(self.TESTDATA)
print tokens
self.assertEqual(tokens, self.TOKENS)
return
def test_2(self):
objs = self.get_objects(self.TESTDATA)
print objs
self.assertEqual(objs, self.OBJS)
return
if __name__ == '__main__':
unittest.main()
| 27.582048 | 99 | 0.501119 |
import sys
import re
from utils import choplist
STRICT = 0
on(Exception):
pass
class PSEOF(PSException):
pass
class PSSyntaxError(PSException):
pass
class PSTypeError(PSException):
pass
class PSValueError(PSException):
pass
ase class for all PS or PDF-related data types."""
pass
eral(PSObject):
"""A class that represents a PostScript literal.
Postscript literals are used as identifiers, such as
variable names, property names and dictionary keys.
Literals are case sensitive and denoted by a preceding
slash sign (e.g. "/Name")
Note: Do not create an instance of PSLiteral directly.
Always use PSLiteralTable.intern().
"""
def __init__(self, name):
self.name = name
return
def __repr__(self):
return '/%s' % self.name
word(PSObject):
"""A class that represents a PostScript keyword.
PostScript keywords are a dozen of predefined words.
Commands and directives in PostScript are expressed by keywords.
They are also used to denote the content boundaries.
Note: Do not create an instance of PSKeyword directly.
Always use PSKeywordTable.intern().
"""
def __init__(self, name):
self.name = name
return
def __repr__(self):
return self.name
able(object):
"""A utility class for storing PSLiteral/PSKeyword objects.
Interned objects can be checked its identity with "is" operator.
"""
def __init__(self, klass):
self.dict = {}
self.klass = klass
return
def intern(self, name):
if name in self.dict:
lit = self.dict[name]
else:
lit = self.klass(name)
self.dict[name] = lit
return lit
PSLiteralTable = PSSymbolTable(PSLiteral)
PSKeywordTable = PSSymbolTable(PSKeyword)
LIT = PSLiteralTable.intern
KWD = PSKeywordTable.intern
KEYWORD_PROC_BEGIN = KWD('{')
KEYWORD_PROC_END = KWD('}')
KEYWORD_ARRAY_BEGIN = KWD('[')
KEYWORD_ARRAY_END = KWD(']')
KEYWORD_DICT_BEGIN = KWD('<<')
KEYWORD_DICT_END = KWD('>>')
def literal_name(x):
if not isinstance(x, PSLiteral):
if STRICT:
raise PSTypeError('Literal required: %r' % x)
else:
return str(x)
return x.name
def keyword_name(x):
if not isinstance(x, PSKeyword):
if STRICT:
raise PSTypeError('Keyword required: %r' % x)
else:
return str(x)
return x.name
le(r'[\r\n]')
SPC = re.compile(r'\s')
NONSPC = re.compile(r'\S')
HEX = re.compile(r'[0-9a-fA-F]')
END_LITERAL = re.compile(r'[#/%\[\]()<>{}\s]')
END_HEX_STRING = re.compile(r'[^\s0-9a-fA-F]')
HEX_PAIR = re.compile(r'[0-9a-fA-F]{2}|.')
END_NUMBER = re.compile(r'[^0-9]')
END_KEYWORD = re.compile(r'[#/%\[\]()<>{}\s]')
END_STRING = re.compile(r'[()\134]')
OCT_STRING = re.compile(r'[0-7]')
ESC_STRING = {'b': 8, 't': 9, 'n': 10, 'f': 12, 'r': 13, '(': 40, ')': 41, '\\': 92}
class PSBaseParser(object):
"""Most basic PostScript parser that performs only tokenization.
"""
BUFSIZ = 4096
debug = 0
def __init__(self, fp):
self.fp = fp
self.seek(0)
return
def __repr__(self):
return '<%s: %r, bufpos=%d>' % (self.__class__.__name__, self.fp, self.bufpos)
def flush(self):
return
def close(self):
self.flush()
return
def tell(self):
return self.bufpos+self.charpos
def poll(self, pos=None, n=80):
pos0 = self.fp.tell()
if not pos:
pos = self.bufpos+self.charpos
self.fp.seek(pos)
print >>sys.stderr, 'poll(%d): %r' % (pos, self.fp.read(n))
self.fp.seek(pos0)
return
def seek(self, pos):
"""Seeks the parser to the given position.
"""
if 2 <= self.debug:
print >>sys.stderr, 'seek: %r' % pos
self.fp.seek(pos)
self.bufpos = pos
self.buf = ''
self.charpos = 0
self._parse1 = self._parse_main
self._curtoken = ''
self._curtokenpos = 0
self._tokens = []
return
def fillbuf(self):
if self.charpos < len(self.buf):
return
self.bufpos = self.fp.tell()
self.buf = self.fp.read(self.BUFSIZ)
if not self.buf:
raise PSEOF('Unexpected EOF')
self.charpos = 0
return
def nextline(self):
"""Fetches a next line that ends either with \\r or \\n.
"""
linebuf = ''
linepos = self.bufpos + self.charpos
eol = False
while 1:
self.fillbuf()
if eol:
c = self.buf[self.charpos]
if c == '\n':
linebuf += c
self.charpos += 1
break
m = EOL.search(self.buf, self.charpos)
if m:
linebuf += self.buf[self.charpos:m.end(0)]
self.charpos = m.end(0)
if linebuf[-1] == '\r':
eol = True
else:
break
else:
linebuf += self.buf[self.charpos:]
self.charpos = len(self.buf)
if 2 <= self.debug:
print >>sys.stderr, 'nextline: %r' % ((linepos, linebuf),)
return (linepos, linebuf)
def revreadlines(self):
"""Fetches a next line backword.
This is used to locate the trailers at the end of a file.
"""
self.fp.seek(0, 2)
pos = self.fp.tell()
buf = ''
while 0 < pos:
prevpos = pos
pos = max(0, pos-self.BUFSIZ)
self.fp.seek(pos)
s = self.fp.read(prevpos-pos)
if not s:
break
while 1:
n = max(s.rfind('\r'), s.rfind('\n'))
if n == -1:
buf = s + buf
break
yield s[n:]+buf
s = s[:n]
buf = ''
return
def _parse_main(self, s, i):
m = NONSPC.search(s, i)
if not m:
return len(s)
j = m.start(0)
c = s[j]
self._curtokenpos = self.bufpos+j
if c == '%':
self._curtoken = '%'
self._parse1 = self._parse_comment
return j+1
elif c == '/':
self._curtoken = ''
self._parse1 = self._parse_literal
return j+1
elif c in '-+' or c.isdigit():
self._curtoken = c
self._parse1 = self._parse_number
return j+1
elif c == '.':
self._curtoken = c
self._parse1 = self._parse_float
return j+1
elif c.isalpha():
self._curtoken = c
self._parse1 = self._parse_keyword
return j+1
elif c == '(':
self._curtoken = ''
self.paren = 1
self._parse1 = self._parse_string
return j+1
elif c == '<':
self._curtoken = ''
self._parse1 = self._parse_wopen
return j+1
elif c == '>':
self._curtoken = ''
self._parse1 = self._parse_wclose
return j+1
else:
self._add_token(KWD(c))
return j+1
def _add_token(self, obj):
self._tokens.append((self._curtokenpos, obj))
return
def _parse_comment(self, s, i):
m = EOL.search(s, i)
if not m:
self._curtoken += s[i:]
return (self._parse_comment, len(s))
j = m.start(0)
self._curtoken += s[i:j]
self._parse1 = self._parse_main
return j
def _parse_literal(self, s, i):
m = END_LITERAL.search(s, i)
if not m:
self._curtoken += s[i:]
return len(s)
j = m.start(0)
self._curtoken += s[i:j]
c = s[j]
if c == '#':
self.hex = ''
self._parse1 = self._parse_literal_hex
return j+1
self._add_token(LIT(self._curtoken))
self._parse1 = self._parse_main
return j
def _parse_literal_hex(self, s, i):
c = s[i]
if HEX.match(c) and len(self.hex) < 2:
self.hex += c
return i+1
if self.hex:
self._curtoken += chr(int(self.hex, 16))
self._parse1 = self._parse_literal
return i
def _parse_number(self, s, i):
m = END_NUMBER.search(s, i)
if not m:
self._curtoken += s[i:]
return len(s)
j = m.start(0)
self._curtoken += s[i:j]
c = s[j]
if c == '.':
self._curtoken += c
self._parse1 = self._parse_float
return j+1
try:
self._add_token(int(self._curtoken))
except ValueError:
pass
self._parse1 = self._parse_main
return j
def _parse_float(self, s, i):
m = END_NUMBER.search(s, i)
if not m:
self._curtoken += s[i:]
return len(s)
j = m.start(0)
self._curtoken += s[i:j]
try:
self._add_token(float(self._curtoken))
except ValueError:
pass
self._parse1 = self._parse_main
return j
def _parse_keyword(self, s, i):
m = END_KEYWORD.search(s, i)
if not m:
self._curtoken += s[i:]
return len(s)
j = m.start(0)
self._curtoken += s[i:j]
if self._curtoken == 'true':
token = True
elif self._curtoken == 'false':
token = False
else:
token = KWD(self._curtoken)
self._add_token(token)
self._parse1 = self._parse_main
return j
def _parse_string(self, s, i):
m = END_STRING.search(s, i)
if not m:
self._curtoken += s[i:]
return len(s)
j = m.start(0)
self._curtoken += s[i:j]
c = s[j]
if c == '\\':
self.oct = ''
self._parse1 = self._parse_string_1
return j+1
if c == '(':
self.paren += 1
self._curtoken += c
return j+1
if c == ')':
self.paren -= 1
if self.paren:
self._curtoken += c
return j+1
self._add_token(self._curtoken)
self._parse1 = self._parse_main
return j+1
def _parse_string_1(self, s, i):
c = s[i]
if OCT_STRING.match(c) and len(self.oct) < 3:
self.oct += c
return i+1
if self.oct:
self._curtoken += chr(int(self.oct, 8))
self._parse1 = self._parse_string
return i
if c in ESC_STRING:
self._curtoken += chr(ESC_STRING[c])
self._parse1 = self._parse_string
return i+1
def _parse_wopen(self, s, i):
c = s[i]
if c == '<':
self._add_token(KEYWORD_DICT_BEGIN)
self._parse1 = self._parse_main
i += 1
else:
self._parse1 = self._parse_hexstring
return i
def _parse_wclose(self, s, i):
c = s[i]
if c == '>':
self._add_token(KEYWORD_DICT_END)
i += 1
self._parse1 = self._parse_main
return i
def _parse_hexstring(self, s, i):
m = END_HEX_STRING.search(s, i)
if not m:
self._curtoken += s[i:]
return len(s)
j = m.start(0)
self._curtoken += s[i:j]
token = HEX_PAIR.sub(lambda m: chr(int(m.group(0), 16)),
SPC.sub('', self._curtoken))
self._add_token(token)
self._parse1 = self._parse_main
return j
def nexttoken(self):
while not self._tokens:
self.fillbuf()
self.charpos = self._parse1(self.buf, self.charpos)
token = self._tokens.pop(0)
if 2 <= self.debug:
print >>sys.stderr, 'nexttoken: %r' % (token,)
return token
rser(PSBaseParser):
def __init__(self, fp):
PSBaseParser.__init__(self, fp)
self.reset()
return
def reset(self):
self.context = []
self.curtype = None
self.curstack = []
self.results = []
return
def seek(self, pos):
PSBaseParser.seek(self, pos)
self.reset()
return
def push(self, *objs):
self.curstack.extend(objs)
return
def pop(self, n):
objs = self.curstack[-n:]
self.curstack[-n:] = []
return objs
def popall(self):
objs = self.curstack
self.curstack = []
return objs
def add_results(self, *objs):
if 2 <= self.debug:
print >>sys.stderr, 'add_results: %r' % (objs,)
self.results.extend(objs)
return
def start_type(self, pos, type):
self.context.append((pos, self.curtype, self.curstack))
(self.curtype, self.curstack) = (type, [])
if 2 <= self.debug:
print >>sys.stderr, 'start_type: pos=%r, type=%r' % (pos, type)
return
def end_type(self, type):
if self.curtype != type:
raise PSTypeError('Type mismatch: %r != %r' % (self.curtype, type))
objs = [obj for (_, obj) in self.curstack]
(pos, self.curtype, self.curstack) = self.context.pop()
if 2 <= self.debug:
print >>sys.stderr, 'end_type: pos=%r, type=%r, objs=%r' % (pos, type, objs)
return (pos, objs)
def do_keyword(self, pos, token):
return
def nextobject(self):
"""Yields a list of objects.
Returns keywords, literals, strings, numbers, arrays and dictionaries.
Arrays and dictionaries are represented as Python lists and dictionaries.
"""
while not self.results:
(pos, token) = self.nexttoken()
if (isinstance(token, int) or
isinstance(token, float) or
isinstance(token, bool) or
isinstance(token, str) or
isinstance(token, PSLiteral)):
self.push((pos, token))
elif token == KEYWORD_ARRAY_BEGIN:
self.start_type(pos, 'a')
elif token == KEYWORD_ARRAY_END:
try:
self.push(self.end_type('a'))
except PSTypeError:
if STRICT:
raise
elif token == KEYWORD_DICT_BEGIN:
self.start_type(pos, 'd')
elif token == KEYWORD_DICT_END:
try:
(pos, objs) = self.end_type('d')
if len(objs) % 2 != 0:
raise PSSyntaxError('Invalid dictionary construct: %r' % objs)
d = dict((literal_name(k), v) for (k, v) in choplist(2, objs) if v is not None)
self.push((pos, d))
except PSTypeError:
if STRICT:
raise
elif token == KEYWORD_PROC_BEGIN:
self.start_type(pos, 'p')
elif token == KEYWORD_PROC_END:
try:
self.push(self.end_type('p'))
except PSTypeError:
if STRICT:
raise
else:
if 2 <= self.debug:
print >>sys.stderr, 'do_keyword: pos=%r, token=%r, stack=%r' % \
(pos, token, self.curstack)
self.do_keyword(pos, token)
if self.context:
continue
else:
self.flush()
obj = self.results.pop(0)
if 2 <= self.debug:
print >>sys.stderr, 'nextobject: %r' % (obj,)
return obj
import unittest
unittest.TestCase):
TESTDATA = r'''%!PS
begin end
" @ #
/a/BCD /Some_Name /foo#5f#xbaa
0 +1 -2 .5 1.234
(abc) () (abc ( def ) ghi)
(def\040\0\0404ghi) (bach\\slask) (foo\nbaa)
(this % is not a comment.)
(foo
baa)
(foo\
baa)
<> <20> < 40 4020 >
<abcd00
12345>
func/a/b{(c)do*}def
[ 1 (z) ! ]
<< /foo (bar) >>
'''
TOKENS = [
(5, KWD('begin')), (11, KWD('end')), (16, KWD('"')), (19, KWD('@')),
(21, KWD('#')), (23, LIT('a')), (25, LIT('BCD')), (30, LIT('Some_Name')),
(41, LIT('foo_xbaa')), (54, 0), (56, 1), (59, -2), (62, 0.5),
(65, 1.234), (71, 'abc'), (77, ''), (80, 'abc ( def ) ghi'),
(98, 'def \x00 4ghi'), (118, 'bach\\slask'), (132, 'foo\nbaa'),
(143, 'this % is not a comment.'), (170, 'foo\nbaa'), (180, 'foobaa'),
(191, ''), (194, ' '), (199, '@@ '), (211, '\xab\xcd\x00\x124\x05'),
(226, KWD('func')), (230, LIT('a')), (232, LIT('b')),
(234, KWD('{')), (235, 'c'), (238, KWD('do*')), (241, KWD('}')),
(242, KWD('def')), (246, KWD('[')), (248, 1), (250, 'z'), (254, KWD('!')),
(256, KWD(']')), (258, KWD('<<')), (261, LIT('foo')), (266, 'bar'),
(272, KWD('>>'))
]
OBJS = [
(23, LIT('a')), (25, LIT('BCD')), (30, LIT('Some_Name')),
(41, LIT('foo_xbaa')), (54, 0), (56, 1), (59, -2), (62, 0.5),
(65, 1.234), (71, 'abc'), (77, ''), (80, 'abc ( def ) ghi'),
(98, 'def \x00 4ghi'), (118, 'bach\\slask'), (132, 'foo\nbaa'),
(143, 'this % is not a comment.'), (170, 'foo\nbaa'), (180, 'foobaa'),
(191, ''), (194, ' '), (199, '@@ '), (211, '\xab\xcd\x00\x124\x05'),
(230, LIT('a')), (232, LIT('b')), (234, ['c']), (246, [1, 'z']),
(258, {'foo': 'bar'}),
]
def get_tokens(self, s):
import StringIO
class MyParser(PSBaseParser):
def flush(self):
self.add_results(*self.popall())
parser = MyParser(StringIO.StringIO(s))
r = []
try:
while 1:
r.append(parser.nexttoken())
except PSEOF:
pass
return r
def get_objects(self, s):
import StringIO
class MyParser(PSStackParser):
def flush(self):
self.add_results(*self.popall())
parser = MyParser(StringIO.StringIO(s))
r = []
try:
while 1:
r.append(parser.nextobject())
except PSEOF:
pass
return r
def test_1(self):
tokens = self.get_tokens(self.TESTDATA)
print tokens
self.assertEqual(tokens, self.TOKENS)
return
def test_2(self):
objs = self.get_objects(self.TESTDATA)
print objs
self.assertEqual(objs, self.OBJS)
return
if __name__ == '__main__':
unittest.main()
| false | true |
f71fc26c9b8a2256095ae1cb28a68d64d9c6fef3 | 20,784 | py | Python | gym_minigrid/extendedminigrid.py | pierg/wiseml-patterns | 2decf2954001296bd04261b00ae144f53359a2b8 | [
"BSD-3-Clause"
] | null | null | null | gym_minigrid/extendedminigrid.py | pierg/wiseml-patterns | 2decf2954001296bd04261b00ae144f53359a2b8 | [
"BSD-3-Clause"
] | 6 | 2021-03-18T21:24:56.000Z | 2022-03-11T23:34:25.000Z | gym_minigrid/extendedminigrid.py | pierg/wiseml-patterns | 2decf2954001296bd04261b00ae144f53359a2b8 | [
"BSD-3-Clause"
] | null | null | null | from gym_minigrid.minigrid import *
from configurations import config_grabber as cg
import math
import operator
from functools import reduce
import traceback
import numpy as np
config = cg.Configuration.grab()
AGENT_VIEW_SIZE = config.agent_view_size
EXTRA_OBSERVATIONS_SIZE = 5
OBS_ARRAY_SIZE = (AGENT_VIEW_SIZE, AGENT_VIEW_SIZE)
def extended_dic(obj_names=[]):
"""
Extend the OBJECT_TO_IDX dictionaries with additional objects
:param obj_names: list of strings
:return: OBJECT_TO_IDX extended
"""
biggest_idx = list(OBJECT_TO_IDX.values())[-1]
for key in OBJECT_TO_IDX.values():
if key > biggest_idx:
biggest_idx = key
new_obj_idx = biggest_idx + 1
for obj_name in obj_names:
if not obj_name in OBJECT_TO_IDX.keys():
OBJECT_TO_IDX.update({obj_name: new_obj_idx})
new_obj_idx = new_obj_idx + 1
extended_dic(["water", "lightsw", "dirt", "vase"])
IDX_TO_OBJECT = dict(zip(OBJECT_TO_IDX.values(), OBJECT_TO_IDX.keys()))
class Room:
def __init__(self, room, size, position, lightOn):
self.number = room
self.size = size
self.position = position
self.lightOn = lightOn
def setLight(self, lightOn):
self.lightOn = lightOn
def setEntryDoor(self, position):
self.entryDoor = position
def setExitDoor(self, position):
self.exitDoor = position
def getLight(self):
return self.lightOn
def objectInRoom(self, position):
ax, ay = position
x, y = self.size
k, l = self.position
x += k
y += l
if ax <= x and ax >= k:
if ay <= y and ay >= l:
return True
return False
class Water(WorldObj):
def __init__(self):
super(Water, self).__init__('water', 'blue')
def can_overlap(self):
return True
def render(self, r):
self._set_color(r)
r.drawPolygon([
(0, CELL_PIXELS),
(CELL_PIXELS, CELL_PIXELS),
(CELL_PIXELS, 0),
(0, 0)
])
class LightSwitch(WorldObj):
def __init__(self):
self.is_on = False
super(LightSwitch, self).__init__('lightsw', 'yellow')
def affectRoom(self, room):
self.room = room
def setSwitchPos(self, position):
self.position = position
def elements_in_room(self, room):
self.elements = room
def toggle(self, env, pos):
self.room.setLight(not self.room.getLight())
self.is_on = not self.is_on
return True
def getRoomNumber(self):
return self.room.number
def can_overlap(self):
return False
def render(self, r):
self._set_color(r)
r.drawPolygon([
(0, CELL_PIXELS),
(CELL_PIXELS, CELL_PIXELS),
(CELL_PIXELS, 0),
(0, 0)
])
self.dark_light(r)
def dark_light(self, r):
if self.room.getLight() == False:
r.setColor(255, 0, 0)
r.drawCircle(0.5 * CELL_PIXELS, 0.5 * CELL_PIXELS, 0.2 * CELL_PIXELS)
if hasattr(self, 'cur_pos'):
if hasattr(self, 'elements'):
(xl, yl) = self.cur_pos
for i in range(0, len(self.elements)):
if self.elements[i][2] == 1:
r.setLineColor(10, 10, 10)
r.setColor(10, 10, 10)
r.drawPolygon([
(
(self.elements[i][0] - xl) * CELL_PIXELS,
(self.elements[i][1] - yl + 1) * CELL_PIXELS),
((self.elements[i][0] - xl + 1) * CELL_PIXELS,
(self.elements[i][1] - yl + 1) * CELL_PIXELS),
(
(self.elements[i][0] - xl + 1) * CELL_PIXELS,
(self.elements[i][1] - yl) * CELL_PIXELS),
((self.elements[i][0] - xl) * CELL_PIXELS, (self.elements[i][1] - yl) * CELL_PIXELS)
])
else:
r.setColor(0, 255, 0)
r.drawCircle(0.5 * CELL_PIXELS, 0.5 * CELL_PIXELS, 0.2 * CELL_PIXELS)
r.pop
class Dirt(WorldObj):
def __init__(self):
super(Dirt, self).__init__('dirt', 'yellow')
def can_overlap(self):
return True
def affect_list(self, list):
self.list = list
def toggle(self, env, pos):
x, y = ExMiniGridEnv.get_grid_coords_from_view(env, (1, 0))
env.grid.set(x, y, None)
del self.list[len(self.list) - 1]
return True
def render(self, r):
self._set_color(r)
r.setColor(240, 150, 0)
r.setLineColor(81, 41, 0)
r.drawPolygon([
(0, CELL_PIXELS),
(CELL_PIXELS, CELL_PIXELS),
(CELL_PIXELS, 0),
(0, 0)
])
class Vase(WorldObj):
def __init__(self):
super(Vase, self).__init__('vase', 'grey')
self.content = Dirt()
self.list = []
def can_overlap(self):
return False
def toggle(self, env, pos):
x, y = ExMiniGridEnv.get_grid_coords_from_view(env, (1, 0))
env.grid.set(x, y, self.content)
self.list.append(Dirt())
self.content.affect_list(self.list)
def render(self, r):
self._set_color(r)
r.setColor(255, 255, 255)
QUARTER_CELL = 0.25 * CELL_PIXELS
DEMI_CELL = 0.5 * CELL_PIXELS
r.drawCircle(DEMI_CELL, DEMI_CELL, DEMI_CELL)
r.drawPolygon([
(QUARTER_CELL, 3 * QUARTER_CELL),
(3 * QUARTER_CELL, 3 * QUARTER_CELL),
(3 * QUARTER_CELL, QUARTER_CELL),
(QUARTER_CELL, QUARTER_CELL)
])
r.setColor(240, 150, 0)
r.drawPolygon([
(0.32 * CELL_PIXELS, 0.7 * CELL_PIXELS),
(0.7 * CELL_PIXELS, 0.7 * CELL_PIXELS),
(0.7 * CELL_PIXELS, 0.32 * CELL_PIXELS),
(0.32 * CELL_PIXELS, 0.32 * CELL_PIXELS)
])
def list_dirt(self, list):
self.list = list
def worldobj_name_to_object(worldobj_name):
if worldobj_name == 'water':
return Water()
elif worldobj_name == 'wall':
return Wall()
elif worldobj_name == "lightsw":
return LightSwitch()
elif worldobj_name == "dirt":
return Dirt()
elif worldobj_name == "vase":
return Vase()
elif worldobj_name == "goal":
return Goal()
else:
return None
class ExGrid(Grid):
"""
Extending Grid methods to support the new objects
"""
# Add new worldobje that need to be decoded (Ex. water)
def decode(array):
"""
Decode an array grid encoding back into a grid
"""
flatten_dim = array.shape[0]
width = int(math.sqrt(flatten_dim))
height = width
# width = array.shape[0]
# height = array.shape[1]
grid = ExGrid(width, height)
for j in range(0, height):
for i in range(0, width):
typeIdx = array[i, j, 0]
colorIdx = array[i, j, 1]
openIdx = array[i, j, 2]
if typeIdx == 0:
continue
objType = IDX_TO_OBJECT[typeIdx]
color = IDX_TO_COLOR[colorIdx]
is_open = True if openIdx == 1 else 0
if objType == 'wall':
v = Wall(color)
elif objType == 'ball':
v = Ball(color)
elif objType == 'key':
v = Key(color)
elif objType == 'box':
v = Box(color)
elif objType == 'door':
v = Door(color, is_open)
elif objType == 'locked_door':
v = LockedDoor(color, is_open)
elif objType == 'goal':
v = Goal()
elif objType == 'water':
v = Water()
elif objType == 'lightsw':
v = LightSwitch()
elif objType == 'dirt':
v = Dirt()
elif objType == 'vase':
v = Vase()
else:
assert False, "unknown obj type in decode '%s'" % objType
grid.set(i, j, v)
return grid
class ExMiniGridEnv(MiniGridEnv):
# Enumeration of possible actions
class Actions(IntEnum):
# Used to observe the environment in the step() before the action
observe = -1
# Action space
left = 0
right = 1
forward = 2
toggle = 3
# Extra action (not used)
pickup = 4
drop = 5
done = 6
clean = 7
def print_grid(self, grid):
for i, e in enumerate(grid.grid):
if i % grid.height == 0:
print("")
if e is not None:
print(str(e.type), end="\t")
else:
print("none", end="\t")
print("")
def strings_to_actions(self, actions):
for i, action_name in enumerate(actions):
if action_name == "left":
actions[i] = self.actions.left
elif action_name == "right":
actions[i] = self.actions.right
elif action_name == "forward":
actions[i] = self.actions.forward
elif action_name == "toggle":
actions[i] = self.actions.toggle
elif action_name == "done":
actions[i] = self.actions.done
elif action_name == "clean":
actions[i] = self.actions.clean
elif action_name == "observe":
actions[i] = self.actions.observe
return actions
def action_to_string(self, action):
if action == self.actions.left:
return "left"
elif action == self.actions.right:
return "right"
elif action == self.actions.forward:
return "forward"
elif action == self.actions.toggle:
return "toggle"
elif action == self.actions.done:
return "done"
elif action == self.actions.clean:
return "clean"
elif action == self.actions.observe:
return "observe"
return None
def __init__(self, grid_size=16, max_steps=-1, see_through_walls=False, seed=1337):
# Grab configuration
self.config = cg.Configuration.grab()
# Overriding the max_num_steps
max_num_steps = max_steps
if hasattr(self.config, 'max_num_steps'):
max_num_steps = self.config.max_num_steps
super().__init__(grid_size, max_num_steps, see_through_walls, seed)
self.actions = ExMiniGridEnv.Actions
"""
Observation Space
low: lowest element value
high: highest element value
shape: imgSize tuple, each element can be of a value between 'low' and 'high'
"""
imgSize = reduce(operator.mul, OBS_ARRAY_SIZE, 1) + EXTRA_OBSERVATIONS_SIZE
elemSize = len(IDX_TO_OBJECT)
self.observation_space = spaces.Box(
low=0,
high=elemSize,
shape=(imgSize,),
dtype='uint8'
)
# Restricting action_space to the first N actions
first_n_actions_available = 4
self.action_space = spaces.Discrete(first_n_actions_available)
def step(self, action):
self.step_count += 1
reward = 0
done = False
info = {"event": [], "steps_count": self.step_count}
# Get the position in front of the agent
fwd_pos = self.front_pos
# Get the contents of the cell in front of the agent
fwd_cell = self.grid.get(*fwd_pos)
# Rotate left
if action == self.actions.left:
self.agent_dir -= 1
if self.agent_dir < 0:
self.agent_dir += 4
# Rotate right
elif action == self.actions.right:
self.agent_dir = (self.agent_dir + 1) % 4
# Move forward
elif action == self.actions.forward:
if fwd_cell == None or fwd_cell.can_overlap():
self.agent_pos = fwd_pos
# Step into Water
if fwd_cell is not None and fwd_cell.type == 'water':
done = True
reward = self.config.rewards.standard.death
info["event"].append("died")
if self.config.envelope:
print("DIED!! >>>>>>> Problems with envelope!")
# Step into Goal
elif fwd_cell is not None and fwd_cell.type == 'goal':
try:
if self.goal_enabled():
done = True
reward = self.config.rewards.standard.goal
# reward = self.config.rewards.standard.goal - 0.9 * (self.step_count / self.max_steps)
info["event"].append("goal")
except:
done = True
reward = self.config.rewards.standard.goal
# reward = self.config.rewards.standard.goal - 0.9 * (self.step_count / self.max_steps)
info["event"].append("goal")
else:
reward = self.config.rewards.actions.forward
# Pick up an object
elif action == self.actions.pickup:
if fwd_cell and fwd_cell.can_pickup():
if self.carrying is None:
self.carrying = fwd_cell
self.carrying.cur_pos = np.array([-1, -1])
self.grid.set(*fwd_pos, None)
# Drop an object
elif action == self.actions.drop:
if not fwd_cell and self.carrying:
self.grid.set(*fwd_pos, self.carrying)
self.carrying.cur_pos = fwd_pos
self.carrying = None
# Toggle/activate an object
elif action == self.actions.toggle:
if fwd_cell is not None and fwd_cell.type == 'dirt':
reward = self.config.rewards.cleaningenv.clean
if fwd_cell:
fwd_cell.toggle(self, fwd_pos)
# Done action (not used by default)
elif action == self.actions.done:
pass
else:
assert False, "unknown action"
# Adding reward for the step
reward += self.config.rewards.standard.step
if self.step_count == self.config.max_num_steps_episode:
done = True
obs = self.gen_obs()
if self.config.debug_mode: print("reward: " + str(reward) + "\tinfo: " + str(info))
return obs, reward, done, info
def goal_enabled(self):
raise NotImplementedError()
def gen_obs_decoded(self):
"""
Generate the agent's view (partially observable, low-resolution encoding)
"""
grid, vis_mask = self.gen_obs_grid()
if self.config.debug_mode:
print("\nAgent View Original")
self.print_grid(grid)
"""if Perception.light_on_current_room(self):"""
try:
agent_pos = (AGENT_VIEW_SIZE // 2, AGENT_VIEW_SIZE - 1)
obs_door_open = 0
obs_light_on = 0
current_room = 0
current_room_light = 0
next_room_light = 0
if self.roomList:
for x in self.roomList:
# Save room number
if x.objectInRoom(self.agent_pos):
current_room = x.number
current_room_light = x.getLight()
else:
next_room_light = x.getLight()
# check if room is on the dark
if not x.getLight():
for j in range(0, grid.height):
for i in range(0, grid.width):
# pass the obs coordinates (i, j) into the absolute grid coordinates (xpos, ypos).
xpos = agent_pos[1] - j
ypos = i - agent_pos[0]
(xpos, ypos) = self.get_grid_coords_from_view((xpos, ypos))
# check if the object position is on the room
if x.objectInRoom((xpos, ypos)):
if grid.grid[(j * AGENT_VIEW_SIZE) + i] is not None:
grid.grid[i + (j * AGENT_VIEW_SIZE)] = None
for j in range(0, grid.height):
for i in range(0, grid.width):
v = grid.get(i, j)
if hasattr(v, 'is_open') and v.is_open:
obs_door_open = 1
if hasattr(v, 'is_on') and v.is_on:
obs_light_on = 1
if self.config.debug_mode:
print("\n\nobs_door_open\t\t" + str(obs_door_open))
print("obs_light_on\t\t" + str(obs_light_on))
print("current_room\t\t" + str(current_room))
print("current_room_light\t" + str(current_room_light*1))
print("next_room_light\t\t" + str(next_room_light*1) + "\n\n")
return grid, (obs_door_open, obs_light_on, current_room, current_room_light*1, next_room_light*1)
except AttributeError:
traceback.print_exc()
print("ERROR!!!")
def gen_obs(self):
"""
Generate the agent's view (partially observable, low-resolution encoding)
"""
grid, extra_observations = self.gen_obs_decoded()
if self.config.debug_mode:
print("\nAgent View Retreived")
self.print_grid(grid)
"""if Perception.light_on_current_room(self):"""
try:
array = np.zeros(shape=(grid.width, grid.height, 1), dtype='uint8')
obs_door_open = 0
obs_light_on = 0
for j in range(0, grid.height):
for i in range(0, grid.width):
v = grid.get(i, j)
if v == None:
continue
array[i, j, 0] = OBJECT_TO_IDX[v.type]
if hasattr(v, 'is_open') and v.is_open:
obs_door_open = 1
if hasattr(v, 'is_on') and v.is_on:
obs_light_on = 1
image = array
flatten_image = image.flatten()
obs = np.append(flatten_image, extra_observations)
return obs
except AttributeError:
traceback.print_exc()
print("ERROR!!!")
# return super().gen_obs()
def get_grid_coords_from_view(self, coordinates):
"""
Dual of "get_view_coords". Translate and rotate relative to the agent coordinates (i, j) into the
absolute grid coordinates.
Need to have tuples of integers for the position of the agent and its direction
:param coordinates: tuples of integers (vertical,horizontal) position from the agent relative to its position
:return : coordinates translated into the absolute grid coordinates.
"""
ax, ay = self.agent_pos
ad = self.agent_dir
x, y = coordinates
# agent facing down
if ad == 1:
ax -= y
ay += x
# agent facing right
elif ad == 0:
ax += x
ay += y
# agent facing left
elif ad == 2:
ax -= x
ay -= y
# agent facing up
elif ad == 3:
ax += y
ay -= x
return ax, ay
def worldobj_in_agent(self, front, side):
"""
Returns the type of the worldobject in the 'front' cells in front and 'side' cells right (positive) or left (negative)
with respect to the agent
:param front: integer representing the number of cells in front of the agent
:param side: integer, if positive represents the cells to the right, negative to the left of the agent
:return: string: worldobj type
"""
coordinates = (front, side)
wx, wy = ExMiniGridEnv.get_grid_coords_from_view(self, coordinates)
if 0 <= wx < self.grid.width and 0 <= wy < self.grid.height:
worldobj = self.grid.get(wx, wy)
if worldobj is not None:
worldobj_type = worldobj.type
return worldobj_type
return None
| 31.301205 | 126 | 0.520737 | from gym_minigrid.minigrid import *
from configurations import config_grabber as cg
import math
import operator
from functools import reduce
import traceback
import numpy as np
config = cg.Configuration.grab()
AGENT_VIEW_SIZE = config.agent_view_size
EXTRA_OBSERVATIONS_SIZE = 5
OBS_ARRAY_SIZE = (AGENT_VIEW_SIZE, AGENT_VIEW_SIZE)
def extended_dic(obj_names=[]):
biggest_idx = list(OBJECT_TO_IDX.values())[-1]
for key in OBJECT_TO_IDX.values():
if key > biggest_idx:
biggest_idx = key
new_obj_idx = biggest_idx + 1
for obj_name in obj_names:
if not obj_name in OBJECT_TO_IDX.keys():
OBJECT_TO_IDX.update({obj_name: new_obj_idx})
new_obj_idx = new_obj_idx + 1
extended_dic(["water", "lightsw", "dirt", "vase"])
IDX_TO_OBJECT = dict(zip(OBJECT_TO_IDX.values(), OBJECT_TO_IDX.keys()))
class Room:
def __init__(self, room, size, position, lightOn):
self.number = room
self.size = size
self.position = position
self.lightOn = lightOn
def setLight(self, lightOn):
self.lightOn = lightOn
def setEntryDoor(self, position):
self.entryDoor = position
def setExitDoor(self, position):
self.exitDoor = position
def getLight(self):
return self.lightOn
def objectInRoom(self, position):
ax, ay = position
x, y = self.size
k, l = self.position
x += k
y += l
if ax <= x and ax >= k:
if ay <= y and ay >= l:
return True
return False
class Water(WorldObj):
def __init__(self):
super(Water, self).__init__('water', 'blue')
def can_overlap(self):
return True
def render(self, r):
self._set_color(r)
r.drawPolygon([
(0, CELL_PIXELS),
(CELL_PIXELS, CELL_PIXELS),
(CELL_PIXELS, 0),
(0, 0)
])
class LightSwitch(WorldObj):
def __init__(self):
self.is_on = False
super(LightSwitch, self).__init__('lightsw', 'yellow')
def affectRoom(self, room):
self.room = room
def setSwitchPos(self, position):
self.position = position
def elements_in_room(self, room):
self.elements = room
def toggle(self, env, pos):
self.room.setLight(not self.room.getLight())
self.is_on = not self.is_on
return True
def getRoomNumber(self):
return self.room.number
def can_overlap(self):
return False
def render(self, r):
self._set_color(r)
r.drawPolygon([
(0, CELL_PIXELS),
(CELL_PIXELS, CELL_PIXELS),
(CELL_PIXELS, 0),
(0, 0)
])
self.dark_light(r)
def dark_light(self, r):
if self.room.getLight() == False:
r.setColor(255, 0, 0)
r.drawCircle(0.5 * CELL_PIXELS, 0.5 * CELL_PIXELS, 0.2 * CELL_PIXELS)
if hasattr(self, 'cur_pos'):
if hasattr(self, 'elements'):
(xl, yl) = self.cur_pos
for i in range(0, len(self.elements)):
if self.elements[i][2] == 1:
r.setLineColor(10, 10, 10)
r.setColor(10, 10, 10)
r.drawPolygon([
(
(self.elements[i][0] - xl) * CELL_PIXELS,
(self.elements[i][1] - yl + 1) * CELL_PIXELS),
((self.elements[i][0] - xl + 1) * CELL_PIXELS,
(self.elements[i][1] - yl + 1) * CELL_PIXELS),
(
(self.elements[i][0] - xl + 1) * CELL_PIXELS,
(self.elements[i][1] - yl) * CELL_PIXELS),
((self.elements[i][0] - xl) * CELL_PIXELS, (self.elements[i][1] - yl) * CELL_PIXELS)
])
else:
r.setColor(0, 255, 0)
r.drawCircle(0.5 * CELL_PIXELS, 0.5 * CELL_PIXELS, 0.2 * CELL_PIXELS)
r.pop
class Dirt(WorldObj):
def __init__(self):
super(Dirt, self).__init__('dirt', 'yellow')
def can_overlap(self):
return True
def affect_list(self, list):
self.list = list
def toggle(self, env, pos):
x, y = ExMiniGridEnv.get_grid_coords_from_view(env, (1, 0))
env.grid.set(x, y, None)
del self.list[len(self.list) - 1]
return True
def render(self, r):
self._set_color(r)
r.setColor(240, 150, 0)
r.setLineColor(81, 41, 0)
r.drawPolygon([
(0, CELL_PIXELS),
(CELL_PIXELS, CELL_PIXELS),
(CELL_PIXELS, 0),
(0, 0)
])
class Vase(WorldObj):
def __init__(self):
super(Vase, self).__init__('vase', 'grey')
self.content = Dirt()
self.list = []
def can_overlap(self):
return False
def toggle(self, env, pos):
x, y = ExMiniGridEnv.get_grid_coords_from_view(env, (1, 0))
env.grid.set(x, y, self.content)
self.list.append(Dirt())
self.content.affect_list(self.list)
def render(self, r):
self._set_color(r)
r.setColor(255, 255, 255)
QUARTER_CELL = 0.25 * CELL_PIXELS
DEMI_CELL = 0.5 * CELL_PIXELS
r.drawCircle(DEMI_CELL, DEMI_CELL, DEMI_CELL)
r.drawPolygon([
(QUARTER_CELL, 3 * QUARTER_CELL),
(3 * QUARTER_CELL, 3 * QUARTER_CELL),
(3 * QUARTER_CELL, QUARTER_CELL),
(QUARTER_CELL, QUARTER_CELL)
])
r.setColor(240, 150, 0)
r.drawPolygon([
(0.32 * CELL_PIXELS, 0.7 * CELL_PIXELS),
(0.7 * CELL_PIXELS, 0.7 * CELL_PIXELS),
(0.7 * CELL_PIXELS, 0.32 * CELL_PIXELS),
(0.32 * CELL_PIXELS, 0.32 * CELL_PIXELS)
])
def list_dirt(self, list):
self.list = list
def worldobj_name_to_object(worldobj_name):
if worldobj_name == 'water':
return Water()
elif worldobj_name == 'wall':
return Wall()
elif worldobj_name == "lightsw":
return LightSwitch()
elif worldobj_name == "dirt":
return Dirt()
elif worldobj_name == "vase":
return Vase()
elif worldobj_name == "goal":
return Goal()
else:
return None
class ExGrid(Grid):
def decode(array):
flatten_dim = array.shape[0]
width = int(math.sqrt(flatten_dim))
height = width
grid = ExGrid(width, height)
for j in range(0, height):
for i in range(0, width):
typeIdx = array[i, j, 0]
colorIdx = array[i, j, 1]
openIdx = array[i, j, 2]
if typeIdx == 0:
continue
objType = IDX_TO_OBJECT[typeIdx]
color = IDX_TO_COLOR[colorIdx]
is_open = True if openIdx == 1 else 0
if objType == 'wall':
v = Wall(color)
elif objType == 'ball':
v = Ball(color)
elif objType == 'key':
v = Key(color)
elif objType == 'box':
v = Box(color)
elif objType == 'door':
v = Door(color, is_open)
elif objType == 'locked_door':
v = LockedDoor(color, is_open)
elif objType == 'goal':
v = Goal()
elif objType == 'water':
v = Water()
elif objType == 'lightsw':
v = LightSwitch()
elif objType == 'dirt':
v = Dirt()
elif objType == 'vase':
v = Vase()
else:
assert False, "unknown obj type in decode '%s'" % objType
grid.set(i, j, v)
return grid
class ExMiniGridEnv(MiniGridEnv):
class Actions(IntEnum):
observe = -1
left = 0
right = 1
forward = 2
toggle = 3
pickup = 4
drop = 5
done = 6
clean = 7
def print_grid(self, grid):
for i, e in enumerate(grid.grid):
if i % grid.height == 0:
print("")
if e is not None:
print(str(e.type), end="\t")
else:
print("none", end="\t")
print("")
def strings_to_actions(self, actions):
for i, action_name in enumerate(actions):
if action_name == "left":
actions[i] = self.actions.left
elif action_name == "right":
actions[i] = self.actions.right
elif action_name == "forward":
actions[i] = self.actions.forward
elif action_name == "toggle":
actions[i] = self.actions.toggle
elif action_name == "done":
actions[i] = self.actions.done
elif action_name == "clean":
actions[i] = self.actions.clean
elif action_name == "observe":
actions[i] = self.actions.observe
return actions
def action_to_string(self, action):
if action == self.actions.left:
return "left"
elif action == self.actions.right:
return "right"
elif action == self.actions.forward:
return "forward"
elif action == self.actions.toggle:
return "toggle"
elif action == self.actions.done:
return "done"
elif action == self.actions.clean:
return "clean"
elif action == self.actions.observe:
return "observe"
return None
def __init__(self, grid_size=16, max_steps=-1, see_through_walls=False, seed=1337):
self.config = cg.Configuration.grab()
max_num_steps = max_steps
if hasattr(self.config, 'max_num_steps'):
max_num_steps = self.config.max_num_steps
super().__init__(grid_size, max_num_steps, see_through_walls, seed)
self.actions = ExMiniGridEnv.Actions
imgSize = reduce(operator.mul, OBS_ARRAY_SIZE, 1) + EXTRA_OBSERVATIONS_SIZE
elemSize = len(IDX_TO_OBJECT)
self.observation_space = spaces.Box(
low=0,
high=elemSize,
shape=(imgSize,),
dtype='uint8'
)
first_n_actions_available = 4
self.action_space = spaces.Discrete(first_n_actions_available)
def step(self, action):
self.step_count += 1
reward = 0
done = False
info = {"event": [], "steps_count": self.step_count}
fwd_pos = self.front_pos
fwd_cell = self.grid.get(*fwd_pos)
if action == self.actions.left:
self.agent_dir -= 1
if self.agent_dir < 0:
self.agent_dir += 4
elif action == self.actions.right:
self.agent_dir = (self.agent_dir + 1) % 4
elif action == self.actions.forward:
if fwd_cell == None or fwd_cell.can_overlap():
self.agent_pos = fwd_pos
if fwd_cell is not None and fwd_cell.type == 'water':
done = True
reward = self.config.rewards.standard.death
info["event"].append("died")
if self.config.envelope:
print("DIED!! >>>>>>> Problems with envelope!")
elif fwd_cell is not None and fwd_cell.type == 'goal':
try:
if self.goal_enabled():
done = True
reward = self.config.rewards.standard.goal
info["event"].append("goal")
except:
done = True
reward = self.config.rewards.standard.goal
info["event"].append("goal")
else:
reward = self.config.rewards.actions.forward
elif action == self.actions.pickup:
if fwd_cell and fwd_cell.can_pickup():
if self.carrying is None:
self.carrying = fwd_cell
self.carrying.cur_pos = np.array([-1, -1])
self.grid.set(*fwd_pos, None)
elif action == self.actions.drop:
if not fwd_cell and self.carrying:
self.grid.set(*fwd_pos, self.carrying)
self.carrying.cur_pos = fwd_pos
self.carrying = None
elif action == self.actions.toggle:
if fwd_cell is not None and fwd_cell.type == 'dirt':
reward = self.config.rewards.cleaningenv.clean
if fwd_cell:
fwd_cell.toggle(self, fwd_pos)
elif action == self.actions.done:
pass
else:
assert False, "unknown action"
reward += self.config.rewards.standard.step
if self.step_count == self.config.max_num_steps_episode:
done = True
obs = self.gen_obs()
if self.config.debug_mode: print("reward: " + str(reward) + "\tinfo: " + str(info))
return obs, reward, done, info
def goal_enabled(self):
raise NotImplementedError()
def gen_obs_decoded(self):
grid, vis_mask = self.gen_obs_grid()
if self.config.debug_mode:
print("\nAgent View Original")
self.print_grid(grid)
try:
agent_pos = (AGENT_VIEW_SIZE // 2, AGENT_VIEW_SIZE - 1)
obs_door_open = 0
obs_light_on = 0
current_room = 0
current_room_light = 0
next_room_light = 0
if self.roomList:
for x in self.roomList:
if x.objectInRoom(self.agent_pos):
current_room = x.number
current_room_light = x.getLight()
else:
next_room_light = x.getLight()
if not x.getLight():
for j in range(0, grid.height):
for i in range(0, grid.width):
xpos = agent_pos[1] - j
ypos = i - agent_pos[0]
(xpos, ypos) = self.get_grid_coords_from_view((xpos, ypos))
if x.objectInRoom((xpos, ypos)):
if grid.grid[(j * AGENT_VIEW_SIZE) + i] is not None:
grid.grid[i + (j * AGENT_VIEW_SIZE)] = None
for j in range(0, grid.height):
for i in range(0, grid.width):
v = grid.get(i, j)
if hasattr(v, 'is_open') and v.is_open:
obs_door_open = 1
if hasattr(v, 'is_on') and v.is_on:
obs_light_on = 1
if self.config.debug_mode:
print("\n\nobs_door_open\t\t" + str(obs_door_open))
print("obs_light_on\t\t" + str(obs_light_on))
print("current_room\t\t" + str(current_room))
print("current_room_light\t" + str(current_room_light*1))
print("next_room_light\t\t" + str(next_room_light*1) + "\n\n")
return grid, (obs_door_open, obs_light_on, current_room, current_room_light*1, next_room_light*1)
except AttributeError:
traceback.print_exc()
print("ERROR!!!")
def gen_obs(self):
grid, extra_observations = self.gen_obs_decoded()
if self.config.debug_mode:
print("\nAgent View Retreived")
self.print_grid(grid)
try:
array = np.zeros(shape=(grid.width, grid.height, 1), dtype='uint8')
obs_door_open = 0
obs_light_on = 0
for j in range(0, grid.height):
for i in range(0, grid.width):
v = grid.get(i, j)
if v == None:
continue
array[i, j, 0] = OBJECT_TO_IDX[v.type]
if hasattr(v, 'is_open') and v.is_open:
obs_door_open = 1
if hasattr(v, 'is_on') and v.is_on:
obs_light_on = 1
image = array
flatten_image = image.flatten()
obs = np.append(flatten_image, extra_observations)
return obs
except AttributeError:
traceback.print_exc()
print("ERROR!!!")
def get_grid_coords_from_view(self, coordinates):
ax, ay = self.agent_pos
ad = self.agent_dir
x, y = coordinates
if ad == 1:
ax -= y
ay += x
elif ad == 0:
ax += x
ay += y
elif ad == 2:
ax -= x
ay -= y
elif ad == 3:
ax += y
ay -= x
return ax, ay
def worldobj_in_agent(self, front, side):
coordinates = (front, side)
wx, wy = ExMiniGridEnv.get_grid_coords_from_view(self, coordinates)
if 0 <= wx < self.grid.width and 0 <= wy < self.grid.height:
worldobj = self.grid.get(wx, wy)
if worldobj is not None:
worldobj_type = worldobj.type
return worldobj_type
return None
| true | true |
f71fc2a75dfc0689982cf24fdf8bcd85ccd1cfdc | 3,121 | py | Python | sponsors/notifications.py | geofft/pythondotorg | 4e6747acaffad21ba22d4611b58dccbf04a4ccac | [
"Apache-2.0"
] | 2 | 2021-04-06T16:22:51.000Z | 2021-05-04T13:48:42.000Z | sponsors/notifications.py | vishalsingha/pythondotorg | af59bc03f63cdea16b0f2bd98aae2dcec713c4c1 | [
"Apache-2.0"
] | 6 | 2021-03-19T15:57:15.000Z | 2021-12-13T20:50:11.000Z | sponsors/notifications.py | vishalsingha/pythondotorg | af59bc03f63cdea16b0f2bd98aae2dcec713c4c1 | [
"Apache-2.0"
] | 1 | 2021-08-21T10:36:44.000Z | 2021-08-21T10:36:44.000Z | from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.conf import settings
class BaseEmailSponsorshipNotification:
subject_template = None
message_template = None
email_context_keys = None
def get_subject(self, context):
return render_to_string(self.subject_template, context).strip()
def get_message(self, context):
return render_to_string(self.message_template, context).strip()
def get_recipient_list(self, context):
raise NotImplementedError
def notify(self, **kwargs):
context = {k: kwargs.get(k) for k in self.email_context_keys}
send_mail(
subject=self.get_subject(context),
message=self.get_message(context),
recipient_list=self.get_recipient_list(context),
from_email=settings.SPONSORSHIP_NOTIFICATION_FROM_EMAIL,
)
class AppliedSponsorshipNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_new_application_subject.txt"
message_template = "sponsors/email/psf_new_application.txt"
email_context_keys = ["request", "sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class AppliedSponsorshipNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_new_application_subject.txt"
message_template = "sponsors/email/sponsor_new_application.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
class RejectedSponsorshipNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_rejected_sponsorship_subject.txt"
message_template = "sponsors/email/psf_rejected_sponsorship.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class RejectedSponsorshipNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_rejected_sponsorship_subject.txt"
message_template = "sponsors/email/sponsor_rejected_sponsorship.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
class StatementOfWorkNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_statement_of_work_subject.txt"
message_template = "sponsors/email/psf_statement_of_work.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class StatementOfWorkNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_statement_of_work_subject.txt"
message_template = "sponsors/email/sponsor_statement_of_work.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
| 37.60241 | 82 | 0.779878 | from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.conf import settings
class BaseEmailSponsorshipNotification:
subject_template = None
message_template = None
email_context_keys = None
def get_subject(self, context):
return render_to_string(self.subject_template, context).strip()
def get_message(self, context):
return render_to_string(self.message_template, context).strip()
def get_recipient_list(self, context):
raise NotImplementedError
def notify(self, **kwargs):
context = {k: kwargs.get(k) for k in self.email_context_keys}
send_mail(
subject=self.get_subject(context),
message=self.get_message(context),
recipient_list=self.get_recipient_list(context),
from_email=settings.SPONSORSHIP_NOTIFICATION_FROM_EMAIL,
)
class AppliedSponsorshipNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_new_application_subject.txt"
message_template = "sponsors/email/psf_new_application.txt"
email_context_keys = ["request", "sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class AppliedSponsorshipNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_new_application_subject.txt"
message_template = "sponsors/email/sponsor_new_application.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
class RejectedSponsorshipNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_rejected_sponsorship_subject.txt"
message_template = "sponsors/email/psf_rejected_sponsorship.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class RejectedSponsorshipNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_rejected_sponsorship_subject.txt"
message_template = "sponsors/email/sponsor_rejected_sponsorship.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
class StatementOfWorkNotificationToPSF(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/psf_statement_of_work_subject.txt"
message_template = "sponsors/email/psf_statement_of_work.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL]
class StatementOfWorkNotificationToSponsors(BaseEmailSponsorshipNotification):
subject_template = "sponsors/email/sponsor_statement_of_work_subject.txt"
message_template = "sponsors/email/sponsor_statement_of_work.txt"
email_context_keys = ["sponsorship"]
def get_recipient_list(self, context):
return context["sponsorship"].verified_emails
| true | true |
f71fc31f870e9d876d456cf459dc6a6019bc3ab0 | 336 | py | Python | py_001/py_contextmanager.py | shawn0915/python-study | 4f6d59ed93cb63295f6e67e661860e1f6a4b18c2 | [
"MIT"
] | null | null | null | py_001/py_contextmanager.py | shawn0915/python-study | 4f6d59ed93cb63295f6e67e661860e1f6a4b18c2 | [
"MIT"
] | null | null | null | py_001/py_contextmanager.py | shawn0915/python-study | 4f6d59ed93cb63295f6e67e661860e1f6a4b18c2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import codecs
from contextlib import contextmanager
@contextmanager
def Open(filename, mode, encoding='utf-8'):
fp = codecs.open(filename, mode, encoding)
try:
yield fp
finally:
fp.close()
data = u"context汉字测试"
with Open('data.txt', 'w') as f:
f.write(data)
| 17.684211 | 46 | 0.636905 |
import codecs
from contextlib import contextmanager
@contextmanager
def Open(filename, mode, encoding='utf-8'):
fp = codecs.open(filename, mode, encoding)
try:
yield fp
finally:
fp.close()
data = u"context汉字测试"
with Open('data.txt', 'w') as f:
f.write(data)
| true | true |
f71fc3e734e5af8996dc6c80f55020bcea9a755a | 2,085 | py | Python | simpleAPI/api/v1/serializers.py | Gleb-Stasuyk/simpleAPI | 9b30202915a4f144921b9bd5204f7bfac8b1201f | [
"MIT"
] | null | null | null | simpleAPI/api/v1/serializers.py | Gleb-Stasuyk/simpleAPI | 9b30202915a4f144921b9bd5204f7bfac8b1201f | [
"MIT"
] | null | null | null | simpleAPI/api/v1/serializers.py | Gleb-Stasuyk/simpleAPI | 9b30202915a4f144921b9bd5204f7bfac8b1201f | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from rest_framework import serializers
from companys.models import Company, News
from users.models import Profile
User = get_user_model()
class NewsSerializer(serializers.ModelSerializer):
class Meta:
model = News
fields = '__all__'
class CompanySerializer(serializers.ModelSerializer):
company_news = NewsSerializer(many=True, required=False)
class Meta:
model = Company
exclude = ['id']
class CompanySerializerNotAuth(serializers.ModelSerializer):
class Meta:
model = Company
exclude = ['id', 'company_news']
class ProfileSerializer(serializers.ModelSerializer):
company = serializers.StringRelatedField()
class Meta:
model = Profile
exclude = ['user']
class UserSerializer(serializers.ModelSerializer):
profile = ProfileSerializer()
class Meta:
model = User
fields = ['id', 'profile', 'username', 'first_name', 'last_name', 'date_joined']
def create(self, validated_data):
profile_data = validated_data.pop('profile')
user = User.objects.create(**validated_data)
Profile.objects.create(user=user, **profile_data)
return user
def update(self, instance, validated_data):
profile_data = validated_data.pop('profile')
profile = instance.profile
# * User Info
instance.first_name = validated_data.get(
'first_name', instance.first_name)
instance.last_name = validated_data.get(
'last_name', instance.last_name)
# * AccountProfile Info
profile.company = profile_data.get(
'company', profile.company)
profile.bio = profile_data.get(
'bio', profile.bio)
profile.location = profile_data.get(
'location', profile.location)
profile.birth_date = profile_data.get(
'birth_date', profile.birth_date)
profile.role = profile_data.get(
'role', profile.role)
profile.save()
return instance
| 26.730769 | 88 | 0.655635 | from django.contrib.auth import get_user_model
from rest_framework import serializers
from companys.models import Company, News
from users.models import Profile
User = get_user_model()
class NewsSerializer(serializers.ModelSerializer):
class Meta:
model = News
fields = '__all__'
class CompanySerializer(serializers.ModelSerializer):
company_news = NewsSerializer(many=True, required=False)
class Meta:
model = Company
exclude = ['id']
class CompanySerializerNotAuth(serializers.ModelSerializer):
class Meta:
model = Company
exclude = ['id', 'company_news']
class ProfileSerializer(serializers.ModelSerializer):
company = serializers.StringRelatedField()
class Meta:
model = Profile
exclude = ['user']
class UserSerializer(serializers.ModelSerializer):
profile = ProfileSerializer()
class Meta:
model = User
fields = ['id', 'profile', 'username', 'first_name', 'last_name', 'date_joined']
def create(self, validated_data):
profile_data = validated_data.pop('profile')
user = User.objects.create(**validated_data)
Profile.objects.create(user=user, **profile_data)
return user
def update(self, instance, validated_data):
profile_data = validated_data.pop('profile')
profile = instance.profile
instance.first_name = validated_data.get(
'first_name', instance.first_name)
instance.last_name = validated_data.get(
'last_name', instance.last_name)
profile.company = profile_data.get(
'company', profile.company)
profile.bio = profile_data.get(
'bio', profile.bio)
profile.location = profile_data.get(
'location', profile.location)
profile.birth_date = profile_data.get(
'birth_date', profile.birth_date)
profile.role = profile_data.get(
'role', profile.role)
profile.save()
return instance
| true | true |
f71fc5a556957ba4ad43afcc5e2c620d83d7aa7c | 749 | py | Python | Server/tests/queries/test_leaderboards.py | Team-SeeTo/SeeTo-Backend | 19990cd6f4895e773eaa504f7b7a07ddbb5856e5 | [
"Apache-2.0"
] | 4 | 2018-06-18T06:50:12.000Z | 2018-11-15T00:08:24.000Z | Server/tests/queries/test_leaderboards.py | Team-SeeTo/SeeTo-Backend | 19990cd6f4895e773eaa504f7b7a07ddbb5856e5 | [
"Apache-2.0"
] | null | null | null | Server/tests/queries/test_leaderboards.py | Team-SeeTo/SeeTo-Backend | 19990cd6f4895e773eaa504f7b7a07ddbb5856e5 | [
"Apache-2.0"
] | null | null | null | from tests import BasicTestCase
class TestLeaderboards(BasicTestCase):
def test_leaderboards(self):
response = self.request(type="query",
call='leaderboards(token :"{0}")'.format(self.access_token),
body='''
... on LeaderboardsField{
rank
name
point
}
''')
for user in response['leaderboards']:
self.assertEqual(type(user['rank']), int)
self.assertEqual(type(user['name']), str)
self.assertEqual(type(user['point']), int)
| 37.45 | 92 | 0.419226 | from tests import BasicTestCase
class TestLeaderboards(BasicTestCase):
def test_leaderboards(self):
response = self.request(type="query",
call='leaderboards(token :"{0}")'.format(self.access_token),
body='''
... on LeaderboardsField{
rank
name
point
}
''')
for user in response['leaderboards']:
self.assertEqual(type(user['rank']), int)
self.assertEqual(type(user['name']), str)
self.assertEqual(type(user['point']), int)
| true | true |
f71fc6aac913f65673fbaf691c47f217c5bc9c25 | 536 | py | Python | imglatex/cli.py | odarbelaeze/imglatex | 8463531ae48dd4c2b4937ef4d27dbf74d6f732e3 | [
"MIT"
] | 2 | 2018-02-17T20:26:56.000Z | 2022-02-10T13:23:55.000Z | imglatex/cli.py | odarbelaeze/imglatex | 8463531ae48dd4c2b4937ef4d27dbf74d6f732e3 | [
"MIT"
] | 325 | 2018-03-18T15:28:48.000Z | 2022-03-28T04:19:18.000Z | imglatex/cli.py | odarbelaeze/imglatex | 8463531ae48dd4c2b4937ef4d27dbf74d6f732e3 | [
"MIT"
] | null | null | null | """Console script for imglatex."""
import click
from imglatex.imglatex import find_images, Image, Document
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.option('--prefix', default='.', help='Prefix for the image paths')
def main(path: click.Path, prefix: str):
"""Console script for imglatex."""
document = Document(
list(Image(i, prefix) for i in find_images(path))
)
click.echo(document.latex())
return 0
if __name__ == "__main__":
import sys
sys.exit(main())
| 23.304348 | 73 | 0.671642 |
import click
from imglatex.imglatex import find_images, Image, Document
@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.option('--prefix', default='.', help='Prefix for the image paths')
def main(path: click.Path, prefix: str):
document = Document(
list(Image(i, prefix) for i in find_images(path))
)
click.echo(document.latex())
return 0
if __name__ == "__main__":
import sys
sys.exit(main())
| true | true |
f71fc728dba3bae392f6f5d4e5b6e05cb75586f8 | 4,525 | py | Python | knox/auth.py | liradb2000/django-rest-knox | 2120bdb44173db121611387b9e1a2e8e358b0123 | [
"MIT"
] | null | null | null | knox/auth.py | liradb2000/django-rest-knox | 2120bdb44173db121611387b9e1a2e8e358b0123 | [
"MIT"
] | null | null | null | knox/auth.py | liradb2000/django-rest-knox | 2120bdb44173db121611387b9e1a2e8e358b0123 | [
"MIT"
] | null | null | null | try:
from hmac import compare_digest
except ImportError:
def compare_digest(a, b):
return a == b
import binascii
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions
from rest_framework.authentication import (
BaseAuthentication, get_authorization_header,
)
from knox.crypto import hash_token
from knox.models import AuthToken
from knox.settings import CONSTANTS, knox_settings
from knox.signals import token_expired
class TokenAuthentication(BaseAuthentication):
'''
This authentication scheme uses Knox AuthTokens for authentication.
Similar to DRF's TokenAuthentication, it overrides a large amount of that
authentication scheme to cope with the fact that Tokens are not stored
in plaintext in the database
If successful
- `request.user` will be a django `User` instance
- `request.auth` will be an `AuthToken` instance
'''
model = AuthToken
def authenticate(self, request):
auth = get_authorization_header(request).split()
prefix = knox_settings.AUTH_HEADER_PREFIX.encode()
if not auth:
return None
if auth[0].lower() != prefix.lower():
# Authorization header is possibly for another backend
return None
if len(auth) == 1:
msg = _('Invalid token header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid token header. '
'Token string should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
if knox_settings.USE_COOKIE:
auth[1] = b''.join([auth[1], request.COOKIES.get(knox_settings.COOKIE_SETTINGS['NAME'],'').encode()])
user, auth_token = self.authenticate_credentials(auth[1])
return (user, auth_token)
def authenticate_credentials(self, token):
'''
Due to the random nature of hashing a value, this must inspect
each auth_token individually to find the correct one.
Tokens that have expired will be deleted and skipped
'''
msg = _('Invalid token.')
token = token.decode("utf-8")
for auth_token in AuthToken.objects.select_related('user').filter(
token_key=token[:CONSTANTS.TOKEN_KEY_LENGTH]):
if self._cleanup_token(auth_token):
continue
try:
digest = hash_token(token)
except (TypeError, binascii.Error):
raise exceptions.AuthenticationFailed(msg)
if compare_digest(digest, auth_token.digest):
if knox_settings.AUTO_REFRESH and auth_token.expiry:
self.renew_token(auth_token)
return self.validate_user(auth_token)
raise exceptions.AuthenticationFailed(msg)
def renew_token(self, auth_token):
current_expiry = auth_token.expiry
new_expiry = timezone.now() + knox_settings.TOKEN_TTL
auth_token.expiry = new_expiry
# Throttle refreshing of token to avoid db writes
delta = (new_expiry - current_expiry).total_seconds()
if delta > knox_settings.MIN_REFRESH_INTERVAL:
auth_token.save(update_fields=('expiry',))
def validate_user(self, auth_token):
if not auth_token.user.is_active:
raise exceptions.AuthenticationFailed(
_('User inactive or deleted.'))
return (auth_token.user, auth_token)
def authenticate_header(self, request):
return knox_settings.AUTH_HEADER_PREFIX
def _cleanup_token(self, auth_token):
# for other_token in auth_token.user.auth_token_set.all():
# if other_token.digest != auth_token.digest and other_token.expiry:
# if other_token.expiry < timezone.now():
# other_token.delete()
# username = other_token.user.get_username()
# token_expired.send(sender=self.__class__,
# username=username, source="other_token")
if auth_token.expiry is not None:
if auth_token.expiry < timezone.now():
username = auth_token.user.get_username()
auth_token.delete()
token_expired.send(sender=self.__class__,
username=username, source="auth_token")
return True
return False
| 39.008621 | 113 | 0.643757 | try:
from hmac import compare_digest
except ImportError:
def compare_digest(a, b):
return a == b
import binascii
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions
from rest_framework.authentication import (
BaseAuthentication, get_authorization_header,
)
from knox.crypto import hash_token
from knox.models import AuthToken
from knox.settings import CONSTANTS, knox_settings
from knox.signals import token_expired
class TokenAuthentication(BaseAuthentication):
model = AuthToken
def authenticate(self, request):
auth = get_authorization_header(request).split()
prefix = knox_settings.AUTH_HEADER_PREFIX.encode()
if not auth:
return None
if auth[0].lower() != prefix.lower():
return None
if len(auth) == 1:
msg = _('Invalid token header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid token header. '
'Token string should not contain spaces.')
raise exceptions.AuthenticationFailed(msg)
if knox_settings.USE_COOKIE:
auth[1] = b''.join([auth[1], request.COOKIES.get(knox_settings.COOKIE_SETTINGS['NAME'],'').encode()])
user, auth_token = self.authenticate_credentials(auth[1])
return (user, auth_token)
def authenticate_credentials(self, token):
msg = _('Invalid token.')
token = token.decode("utf-8")
for auth_token in AuthToken.objects.select_related('user').filter(
token_key=token[:CONSTANTS.TOKEN_KEY_LENGTH]):
if self._cleanup_token(auth_token):
continue
try:
digest = hash_token(token)
except (TypeError, binascii.Error):
raise exceptions.AuthenticationFailed(msg)
if compare_digest(digest, auth_token.digest):
if knox_settings.AUTO_REFRESH and auth_token.expiry:
self.renew_token(auth_token)
return self.validate_user(auth_token)
raise exceptions.AuthenticationFailed(msg)
def renew_token(self, auth_token):
current_expiry = auth_token.expiry
new_expiry = timezone.now() + knox_settings.TOKEN_TTL
auth_token.expiry = new_expiry
delta = (new_expiry - current_expiry).total_seconds()
if delta > knox_settings.MIN_REFRESH_INTERVAL:
auth_token.save(update_fields=('expiry',))
def validate_user(self, auth_token):
if not auth_token.user.is_active:
raise exceptions.AuthenticationFailed(
_('User inactive or deleted.'))
return (auth_token.user, auth_token)
def authenticate_header(self, request):
return knox_settings.AUTH_HEADER_PREFIX
def _cleanup_token(self, auth_token):
if auth_token.expiry is not None:
if auth_token.expiry < timezone.now():
username = auth_token.user.get_username()
auth_token.delete()
token_expired.send(sender=self.__class__,
username=username, source="auth_token")
return True
return False
| true | true |
f71fc7ba9f38450681a76a58e0a61f43125749ab | 10,698 | py | Python | codenames/players/codemaster_glove_lookahead.py | gohyun14/Game | 39e6e192590059daade40c95cc177acb0f3a581b | [
"MIT"
] | 1 | 2022-02-25T17:44:02.000Z | 2022-02-25T17:44:02.000Z | codenames/players/codemaster_glove_lookahead.py | gohyun14/Game | 39e6e192590059daade40c95cc177acb0f3a581b | [
"MIT"
] | null | null | null | codenames/players/codemaster_glove_lookahead.py | gohyun14/Game | 39e6e192590059daade40c95cc177acb0f3a581b | [
"MIT"
] | null | null | null | import scipy.spatial.distance
from nltk.stem import WordNetLemmatizer
from nltk.stem.lancaster import LancasterStemmer
from math import ceil
import numpy as np
import copy
import itertools
from players.codemaster import Codemaster
THRESHOLD = np.inf
class AICodemaster(Codemaster):
def __init__(self, brown_ic=None, glove_vecs=None, word_vectors=None):
super().__init__()
self.brown_ic = brown_ic
self.glove_vecs = glove_vecs
self.word_vectors = word_vectors
self.wordnet_lemmatizer = WordNetLemmatizer()
self.lancaster_stemmer = LancasterStemmer()
self.cm_wordlist = []
with open('players/cm_wordlist.txt') as infile:
for line in infile:
self.cm_wordlist.append(line.rstrip())
self.root = None
self.turn_number = 0
def set_game_state(self, words, maps):
if self.turn_number == 0:
self.original_words = copy.copy(words)
print(f"original words: {self.original_words}")
self.words = words
self.maps = maps
self.update_board()
self.init_dists()
self.turn_number += 1
def update_board(self):
self.red_words = set()
self.bad_words = set()
self.words_guessed = set()
for i in range(25):
if self.words[i][0] == '*':
self.words_guessed.add(self.original_words[i].lower())
elif self.maps[i] == "Assassin" or self.maps[i] == "Blue" or self.maps[i] == "Civilian":
self.bad_words.add(self.words[i].lower())
if self.maps[i] == "Assassin":
self.black_word = self.words[i]
else:
self.red_words.add(self.words[i].lower())
def init_dists(self):
cos_dist = scipy.spatial.distance.cosine
all_vectors = (self.glove_vecs,)
self.bad_word_dists = {}
for word in self.bad_words:
self.bad_word_dists[word] = {}
for val in self.cm_wordlist:
b_dist = cos_dist(self.concatenate(val, all_vectors), self.concatenate(word, all_vectors))
self.bad_word_dists[word][val] = b_dist
self.red_word_dists = {}
for word in self.red_words:
self.red_word_dists[word] = {}
for val in self.cm_wordlist:
b_dist = cos_dist(self.concatenate(val, all_vectors), self.concatenate(word, all_vectors))
self.red_word_dists[word][val] = b_dist
def get_clue(self):
#self.all_guesses = set()
if self.root is None or self.root.words_guessed != self.words_guessed:
if self.root:
print("board mismatch: initializing new root")
print(f"game's words guessed: {self.words_guessed} nodes' words guessed: {self.root.words_guessed}")
self.root = Node(self, copy.copy(self.words_guessed), None, depth = self.turn_number-1)
self.root.get_val()
best_clue = self.root.best_clue
print('chosen_clue is:', best_clue[0])
self.root = self.root.best_child
return best_clue
def arr_not_in_word(self, word, arr):
if word in arr:
return False
lemm = self.wordnet_lemmatizer.lemmatize(word)
lancas = self.lancaster_stemmer.stem(word)
for i in arr:
if i == lemm or i == lancas:
return False
if i.find(word) != -1:
return False
if word.find(i) != -1:
return False
return True
def combine(self, words, wordvecs):
factor = 1.0 / float(len(words))
new_word = self.concatenate(words[0], wordvecs) * factor
for word in words[1:]:
new_word += self.concatenate(word, wordvecs) * factor
return new_word
def concatenate(self, word, wordvecs):
concatenated = wordvecs[0][word]
for vec in wordvecs[1:]:
concatenated = np.hstack((concatenated, vec[word]))
return concatenated
class Node:
def __init__(self, codemaster, words_guessed, parent, depth = 0, best=np.inf):
self.codemaster = codemaster
self.words_guessed = words_guessed
self.parent = parent
self.depth = depth
self.best_clue = None
self.best_child = None
self.val = np.inf
self.terminal = False
self.best = best
def get_best_clues(self):
bests = {}
possible = {}
cm = self.codemaster
red_words = cm.red_words.difference(self.words_guessed)
bad_words = cm.bad_words.difference(self.words_guessed)
print(f"calculating best clues")
#print(f"red word dists: {self.red_word_dists}")
for clue_num in range(1, 3 + 1):
best_per_dist = np.inf
best_per = ''
best_red_word = ''
for red_word in list(itertools.combinations(red_words, clue_num)):
best_word = ''
best_dist = np.inf
for word in cm.cm_wordlist:
if not cm.arr_not_in_word(word, red_words.union(bad_words)):
continue
bad_dist = np.inf
worst_bad = ''
for bad_word in bad_words:
if cm.bad_word_dists[bad_word][word] < bad_dist:
bad_dist = cm.bad_word_dists[bad_word][word]
worst_bad = bad_word
worst_red = 0
for red in red_word:
dist = cm.red_word_dists[red][word]
if dist > worst_red:
worst_red = dist
if worst_red < best_dist and worst_red < bad_dist:
best_dist = worst_red
best_word = word
# print(worst_red,red_word,word)
if best_dist < best_per_dist:
best_per_dist = best_dist
best_per = best_word
best_red_word = red_word
if best_dist < THRESHOLD or clue_num == 1:
possible[(best_word, clue_num)] = (red_word, best_dist)
bests[clue_num] = (best_red_word, best_per, best_per_dist)
print(f"length of possibilities: {len(possible)}")
return possible
def add_children(self):
cos_dist = scipy.spatial.distance.cosine
cm = self.codemaster
all_vectors = (cm.glove_vecs,)
print(f"at depth {self.depth}")
bests = self.get_best_clues()
for clue, clue_info in bests.items():
combined_clue, clue_num = clue
best_red_word, combined_score = clue_info
worst = -np.inf
for word in best_red_word:
dist = cos_dist(cm.concatenate(word, all_vectors), cm.concatenate(combined_clue, all_vectors))
if dist > worst:
worst = dist
if worst < 0.7 and worst != -np.inf or clue_num == 1:
print(f"adding clue: {clue}")
self.add_child(clue, best_red_word)
def check_board(self):
cm = self.codemaster
self.black_guessed = cm.black_word in self.words_guessed
red_words = cm.red_words.difference(self.words_guessed)
red_count = len(red_words)
if self.black_guessed:
self.val = np.inf
self.terminal = True
elif red_count == 0:
self.val = self.depth
self.terminal = True
print(f"Terminal Node: depth: {self.depth}")
else:
self.val = 25
def new_child(self, expected_words_chosen):
new_words_guessed = copy.copy(self.words_guessed)
for word in expected_words_chosen:
new_words_guessed.add(word)
return Node(self.codemaster, new_words_guessed, self, self.depth + 1, self.best)
def get_val(self, depth=np.inf):
# if self.words_guessed in self.codemaster.all_guesses:
# print("Board State already explored")
# return self.val
# self.codemaster.all_guesses.add(self.words_guessed)
self.check_board()
if self.not_possible():
print("Skipped")
return self.val
if self.terminal:
if self.val < self.best:
self.best = self.val
return self.val
if self.best_clue is not None:
return self.val
best_val = np.inf
possible = self.get_best_clues()
for clue, clue_info in sorted(possible.items(), key = lambda x: (x[0][1],-x[1][1]), reverse=True):
combined_clue, clue_num = clue
best_red_word, combined_score = clue_info
if self.check_clue_feasible(clue_num, combined_score):
print(f"Exploring child, depth: {self.depth+1}, clue: {clue}, dist: {combined_score}")
child = self.new_child(best_red_word)
child_val = child.get_val(depth)
if child_val < best_val:
best_val = child_val
self.best_clue = clue
self.best_child = child
if child.best < self.best:
print(f"Found new best, prev: {self.best} new: {child.best}")
self.best = child.best
self.val = best_val
return self.val
# def best_child(self):
# best_clue = self.best_clue
# for child_key in self.children.keys():
# if child_key == best_clue:
# best_child = self.children[child_key]
# best_child.reset_depth()
# return best_child
def not_possible(self):
red_words = self.codemaster.red_words.difference(self.words_guessed)
best_possible = self.depth + ceil(len(red_words)/3)
print(f"BEST POSSIBLE: {best_possible}")
return self.best <= best_possible or self.depth >= self.best or (not self.terminal and self.depth == self.best - 1)
def check_clue_feasible(self, clue_num, combined_score):
return clue_num == 1 or combined_score < THRESHOLD
# cos_dist = scipy.spatial.distance.cosine
# cm = self.codemaster
# all_vectors = (cm.glove_vecs,)
# worst = -np.inf
# for word in best_red_word:
# dist = cos_dist(cm.concatenate(word, all_vectors), cm.concatenate(combined_clue, all_vectors))
# if dist > worst:
# worst = dist
# return worst < 0.7 and worst != -np.inf or clue_num == 1
| 39.476015 | 123 | 0.570667 | import scipy.spatial.distance
from nltk.stem import WordNetLemmatizer
from nltk.stem.lancaster import LancasterStemmer
from math import ceil
import numpy as np
import copy
import itertools
from players.codemaster import Codemaster
THRESHOLD = np.inf
class AICodemaster(Codemaster):
def __init__(self, brown_ic=None, glove_vecs=None, word_vectors=None):
super().__init__()
self.brown_ic = brown_ic
self.glove_vecs = glove_vecs
self.word_vectors = word_vectors
self.wordnet_lemmatizer = WordNetLemmatizer()
self.lancaster_stemmer = LancasterStemmer()
self.cm_wordlist = []
with open('players/cm_wordlist.txt') as infile:
for line in infile:
self.cm_wordlist.append(line.rstrip())
self.root = None
self.turn_number = 0
def set_game_state(self, words, maps):
if self.turn_number == 0:
self.original_words = copy.copy(words)
print(f"original words: {self.original_words}")
self.words = words
self.maps = maps
self.update_board()
self.init_dists()
self.turn_number += 1
def update_board(self):
self.red_words = set()
self.bad_words = set()
self.words_guessed = set()
for i in range(25):
if self.words[i][0] == '*':
self.words_guessed.add(self.original_words[i].lower())
elif self.maps[i] == "Assassin" or self.maps[i] == "Blue" or self.maps[i] == "Civilian":
self.bad_words.add(self.words[i].lower())
if self.maps[i] == "Assassin":
self.black_word = self.words[i]
else:
self.red_words.add(self.words[i].lower())
def init_dists(self):
cos_dist = scipy.spatial.distance.cosine
all_vectors = (self.glove_vecs,)
self.bad_word_dists = {}
for word in self.bad_words:
self.bad_word_dists[word] = {}
for val in self.cm_wordlist:
b_dist = cos_dist(self.concatenate(val, all_vectors), self.concatenate(word, all_vectors))
self.bad_word_dists[word][val] = b_dist
self.red_word_dists = {}
for word in self.red_words:
self.red_word_dists[word] = {}
for val in self.cm_wordlist:
b_dist = cos_dist(self.concatenate(val, all_vectors), self.concatenate(word, all_vectors))
self.red_word_dists[word][val] = b_dist
def get_clue(self):
if self.root is None or self.root.words_guessed != self.words_guessed:
if self.root:
print("board mismatch: initializing new root")
print(f"game's words guessed: {self.words_guessed} nodes' words guessed: {self.root.words_guessed}")
self.root = Node(self, copy.copy(self.words_guessed), None, depth = self.turn_number-1)
self.root.get_val()
best_clue = self.root.best_clue
print('chosen_clue is:', best_clue[0])
self.root = self.root.best_child
return best_clue
def arr_not_in_word(self, word, arr):
if word in arr:
return False
lemm = self.wordnet_lemmatizer.lemmatize(word)
lancas = self.lancaster_stemmer.stem(word)
for i in arr:
if i == lemm or i == lancas:
return False
if i.find(word) != -1:
return False
if word.find(i) != -1:
return False
return True
def combine(self, words, wordvecs):
factor = 1.0 / float(len(words))
new_word = self.concatenate(words[0], wordvecs) * factor
for word in words[1:]:
new_word += self.concatenate(word, wordvecs) * factor
return new_word
def concatenate(self, word, wordvecs):
concatenated = wordvecs[0][word]
for vec in wordvecs[1:]:
concatenated = np.hstack((concatenated, vec[word]))
return concatenated
class Node:
def __init__(self, codemaster, words_guessed, parent, depth = 0, best=np.inf):
self.codemaster = codemaster
self.words_guessed = words_guessed
self.parent = parent
self.depth = depth
self.best_clue = None
self.best_child = None
self.val = np.inf
self.terminal = False
self.best = best
def get_best_clues(self):
bests = {}
possible = {}
cm = self.codemaster
red_words = cm.red_words.difference(self.words_guessed)
bad_words = cm.bad_words.difference(self.words_guessed)
print(f"calculating best clues")
for clue_num in range(1, 3 + 1):
best_per_dist = np.inf
best_per = ''
best_red_word = ''
for red_word in list(itertools.combinations(red_words, clue_num)):
best_word = ''
best_dist = np.inf
for word in cm.cm_wordlist:
if not cm.arr_not_in_word(word, red_words.union(bad_words)):
continue
bad_dist = np.inf
worst_bad = ''
for bad_word in bad_words:
if cm.bad_word_dists[bad_word][word] < bad_dist:
bad_dist = cm.bad_word_dists[bad_word][word]
worst_bad = bad_word
worst_red = 0
for red in red_word:
dist = cm.red_word_dists[red][word]
if dist > worst_red:
worst_red = dist
if worst_red < best_dist and worst_red < bad_dist:
best_dist = worst_red
best_word = word
if best_dist < best_per_dist:
best_per_dist = best_dist
best_per = best_word
best_red_word = red_word
if best_dist < THRESHOLD or clue_num == 1:
possible[(best_word, clue_num)] = (red_word, best_dist)
bests[clue_num] = (best_red_word, best_per, best_per_dist)
print(f"length of possibilities: {len(possible)}")
return possible
def add_children(self):
cos_dist = scipy.spatial.distance.cosine
cm = self.codemaster
all_vectors = (cm.glove_vecs,)
print(f"at depth {self.depth}")
bests = self.get_best_clues()
for clue, clue_info in bests.items():
combined_clue, clue_num = clue
best_red_word, combined_score = clue_info
worst = -np.inf
for word in best_red_word:
dist = cos_dist(cm.concatenate(word, all_vectors), cm.concatenate(combined_clue, all_vectors))
if dist > worst:
worst = dist
if worst < 0.7 and worst != -np.inf or clue_num == 1:
print(f"adding clue: {clue}")
self.add_child(clue, best_red_word)
def check_board(self):
cm = self.codemaster
self.black_guessed = cm.black_word in self.words_guessed
red_words = cm.red_words.difference(self.words_guessed)
red_count = len(red_words)
if self.black_guessed:
self.val = np.inf
self.terminal = True
elif red_count == 0:
self.val = self.depth
self.terminal = True
print(f"Terminal Node: depth: {self.depth}")
else:
self.val = 25
def new_child(self, expected_words_chosen):
new_words_guessed = copy.copy(self.words_guessed)
for word in expected_words_chosen:
new_words_guessed.add(word)
return Node(self.codemaster, new_words_guessed, self, self.depth + 1, self.best)
def get_val(self, depth=np.inf):
self.check_board()
if self.not_possible():
print("Skipped")
return self.val
if self.terminal:
if self.val < self.best:
self.best = self.val
return self.val
if self.best_clue is not None:
return self.val
best_val = np.inf
possible = self.get_best_clues()
for clue, clue_info in sorted(possible.items(), key = lambda x: (x[0][1],-x[1][1]), reverse=True):
combined_clue, clue_num = clue
best_red_word, combined_score = clue_info
if self.check_clue_feasible(clue_num, combined_score):
print(f"Exploring child, depth: {self.depth+1}, clue: {clue}, dist: {combined_score}")
child = self.new_child(best_red_word)
child_val = child.get_val(depth)
if child_val < best_val:
best_val = child_val
self.best_clue = clue
self.best_child = child
if child.best < self.best:
print(f"Found new best, prev: {self.best} new: {child.best}")
self.best = child.best
self.val = best_val
return self.val
def not_possible(self):
red_words = self.codemaster.red_words.difference(self.words_guessed)
best_possible = self.depth + ceil(len(red_words)/3)
print(f"BEST POSSIBLE: {best_possible}")
return self.best <= best_possible or self.depth >= self.best or (not self.terminal and self.depth == self.best - 1)
def check_clue_feasible(self, clue_num, combined_score):
return clue_num == 1 or combined_score < THRESHOLD
| true | true |
f71fc8515d160a9327f631beba72fefeffeddf90 | 1,049 | py | Python | misc/pytorch_toolkit/machine_translation/core/dataset/text_container.py | dqawami/openvino_training_extensions | dddda1dfd651eaae2d59cecda84275b1b03bd0ad | [
"Apache-2.0"
] | 256 | 2020-09-09T03:27:57.000Z | 2022-03-30T10:06:06.000Z | misc/pytorch_toolkit/machine_translation/core/dataset/text_container.py | dqawami/openvino_training_extensions | dddda1dfd651eaae2d59cecda84275b1b03bd0ad | [
"Apache-2.0"
] | 604 | 2020-09-08T12:29:49.000Z | 2022-03-31T21:51:08.000Z | misc/pytorch_toolkit/machine_translation/core/dataset/text_container.py | dqawami/openvino_training_extensions | dddda1dfd651eaae2d59cecda84275b1b03bd0ad | [
"Apache-2.0"
] | 160 | 2020-09-09T14:06:07.000Z | 2022-03-30T14:50:48.000Z | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import io
from torch.utils.data import Dataset
from tqdm import tqdm
class TextContainer(Dataset):
def __init__(self, corpus):
self.data = []
with io.open(corpus, mode='r', encoding='utf-8') as f:
for line in tqdm(f):
self.data.append(line.strip())
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return {
"text": self.data[idx],
"key": idx
}
| 32.78125 | 73 | 0.675882 | import io
from torch.utils.data import Dataset
from tqdm import tqdm
class TextContainer(Dataset):
def __init__(self, corpus):
self.data = []
with io.open(corpus, mode='r', encoding='utf-8') as f:
for line in tqdm(f):
self.data.append(line.strip())
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return {
"text": self.data[idx],
"key": idx
}
| true | true |
f71fc8b40616224f0c26320e75087d6d233ed41a | 3,180 | py | Python | wurst/searching.py | pjamesjoyce/wurst | 95b37e72eaa18b33bdd83cd4a51d37d9eb4ae7ba | [
"BSD-2-Clause"
] | 1 | 2022-03-29T14:59:13.000Z | 2022-03-29T14:59:13.000Z | wurst/searching.py | pjamesjoyce/wurst | 95b37e72eaa18b33bdd83cd4a51d37d9eb4ae7ba | [
"BSD-2-Clause"
] | null | null | null | wurst/searching.py | pjamesjoyce/wurst | 95b37e72eaa18b33bdd83cd4a51d37d9eb4ae7ba | [
"BSD-2-Clause"
] | null | null | null | from .errors import MultipleResults, NoResults
def equals(field, value):
"""Return function where input ``field`` value is equal to ``value``"""
return lambda x: x.get(field) == value
def contains(field, value):
return lambda x: value in x.get(field)
def startswith(field, value):
return lambda x: x.get(field, '').startswith(value)
def either(*funcs):
"""Return ``True`` is any of the function evaluate true"""
return lambda x: any(f(x) for f in funcs)
def exclude(func):
"""Return the opposite of ``func`` (i.e. ``False`` instead of ``True``)"""
return lambda x: not func(x)
def doesnt_contain_any(field, values):
"""Exclude all dataset whose ``field`` contains any of ``values``"""
return lambda x: all(exclude(contains(field, value))(x) for value in values)
def get_many(data, *funcs):
"""Apply all filter functions ``funcs`` to ``data``"""
for fltr in funcs:
data = filter(fltr, data)
return data
def get_one(data, *funcs):
"""Apply filter functions ``funcs`` to ``data``, and return exactly one result.
Raises ``wurst.errors.NoResults`` or ``wurst.errors.MultipleResults`` if zero or multiple results are returned.
"""
results = list(get_many(data, *funcs))
if not results:
raise NoResults
if not len(results) == 1:
raise MultipleResults
return results[0]
def _exchanges(ds, kind, *funcs):
if funcs == [None]:
funcs = []
return get_many(
filter(lambda x: x['type'] == kind, ds['exchanges']),
*funcs
)
def technosphere(ds, *funcs):
"""Get all technosphere exchanges in ``ds`` that pass filtering functions ``funcs``"""
return _exchanges(ds, 'technosphere', *funcs)
def biosphere(ds, *funcs):
"""Get all biosphere exchanges in ``ds`` that pass filtering functions ``funcs``"""
return _exchanges(ds, 'biosphere', *funcs)
def production(ds, *funcs):
"""Get all production exchanges in ``ds`` that pass filtering functions ``funcs``"""
return _exchanges(ds, 'production', *funcs)
def reference_product(ds):
"""Get single reference product exchange from a dataset.
Raises ``wurst.errors.NoResults`` or ``wurst.errors.MultipleResults`` if zero or multiple results are returned."""
excs = [exc for exc in ds['exchanges']
if exc['amount']
and exc['type'] == 'production']
if not excs:
raise NoResults("No suitable production exchanges founds")
elif len(excs) > 1:
raise MultipleResults("Multiple production exchanges found")
return excs[0]
def best_geo_match(possibles, ordered_locations):
"""Pick the dataset from ``possibles`` whose location is first in ``ordered_locations``.
``possibles`` is an interable with the field ``location``.
``ordered_locations`` is a list of locations in sorting order.
Returns an element from ``possibles``, or ``None``.
"""
weights = {y: x for x, y in enumerate(ordered_locations)}
filtered = (obj for obj in possibles if obj['location'] in weights)
ordered = sorted(filtered, key=lambda x: weights[x['location']])
if ordered:
return ordered[0]
| 30.576923 | 118 | 0.654088 | from .errors import MultipleResults, NoResults
def equals(field, value):
return lambda x: x.get(field) == value
def contains(field, value):
return lambda x: value in x.get(field)
def startswith(field, value):
return lambda x: x.get(field, '').startswith(value)
def either(*funcs):
return lambda x: any(f(x) for f in funcs)
def exclude(func):
return lambda x: not func(x)
def doesnt_contain_any(field, values):
return lambda x: all(exclude(contains(field, value))(x) for value in values)
def get_many(data, *funcs):
for fltr in funcs:
data = filter(fltr, data)
return data
def get_one(data, *funcs):
results = list(get_many(data, *funcs))
if not results:
raise NoResults
if not len(results) == 1:
raise MultipleResults
return results[0]
def _exchanges(ds, kind, *funcs):
if funcs == [None]:
funcs = []
return get_many(
filter(lambda x: x['type'] == kind, ds['exchanges']),
*funcs
)
def technosphere(ds, *funcs):
return _exchanges(ds, 'technosphere', *funcs)
def biosphere(ds, *funcs):
return _exchanges(ds, 'biosphere', *funcs)
def production(ds, *funcs):
return _exchanges(ds, 'production', *funcs)
def reference_product(ds):
excs = [exc for exc in ds['exchanges']
if exc['amount']
and exc['type'] == 'production']
if not excs:
raise NoResults("No suitable production exchanges founds")
elif len(excs) > 1:
raise MultipleResults("Multiple production exchanges found")
return excs[0]
def best_geo_match(possibles, ordered_locations):
weights = {y: x for x, y in enumerate(ordered_locations)}
filtered = (obj for obj in possibles if obj['location'] in weights)
ordered = sorted(filtered, key=lambda x: weights[x['location']])
if ordered:
return ordered[0]
| true | true |
f71fca16be9fa3da257d719ec7efdae5f61d8f68 | 4,100 | py | Python | sandbox/jorvis/generate_gff3_feature_identifiers.py | senjoro/biocode | 6697c17570126d99fb1cbeabf5b8322db006643d | [
"MIT"
] | 355 | 2015-01-15T18:11:20.000Z | 2022-03-26T19:23:30.000Z | sandbox/jorvis/generate_gff3_feature_identifiers.py | senjoro/biocode | 6697c17570126d99fb1cbeabf5b8322db006643d | [
"MIT"
] | 43 | 2015-03-20T08:40:14.000Z | 2022-03-09T22:37:38.000Z | sandbox/jorvis/generate_gff3_feature_identifiers.py | senjoro/biocode | 6697c17570126d99fb1cbeabf5b8322db006643d | [
"MIT"
] | 217 | 2015-01-29T08:40:33.000Z | 2022-03-26T19:23:45.000Z | #!/usr/bin/env python3
"""
This script is used to take any GFF3 file and re-generate feature identifiers within
it to match the convention used at IGS. This is:
$prefix.$type.$id.$version
The mode here defines what the identifier. For example, if using --mode=sequential for
an organism (--prefix) of b_microti, the ID for the 100th gene might be:
The mode here defines what the identifier. Examples:
--mode=sequential (the default)
b_microti.gene.100.1
--mode=uuid (UUID, as specified in RFC 4122, using uuid4())
b_microti.gene.8d8f9231-262e-48e7-b066-a84b6a939746.1
--mode=hex8
b_microti.gene.c08ca446.1
--mode=hex12
b_microti.gene.191ccac20a56.1
The only values that are replaced are in the ID and Parent attributes in the 9th column.
"""
import argparse
import os
import sys
from binascii import hexlify
from collections import defaultdict
from uuid import uuid4
from biocode import gff
## constants
next_ids_sequential = defaultdict(lambda: 1)
def main():
parser = argparse.ArgumentParser( description='Generates new identifiers in GFF3 files following the IGS identifier convention.')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='TA file of source molecules' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Optional output file path (else STDOUT)' )
parser.add_argument('-p', '--prefix', type=str, required=True, help='The prefix portion of IDs to be generated')
parser.add_argument('-m', '--mode', type=str, required=False, default='sequential', help='ID modes (see embedded documentation): sequential, uuid, hex8, hex12')
args = parser.parse_args()
check_arguments(args)
id_map = dict()
## output will either be a file or STDOUT
fout = sys.stdout
if args.output_file is not None:
fout = open(args.output_file, 'wt')
for line in open(args.input_file):
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 9:
fout.write(line + "\n")
continue
# grab the ID column if any
id = gff.column_9_value(cols[8], 'ID')
parent = gff.column_9_value(cols[8], 'Parent')
new_id = None
new_parent = None
type = cols[2]
if id is not None:
if id in id_map:
new_id = id_map[id]
else:
new_id = get_new_id(args.prefix, type, args.mode)
id_map[id] = new_id
cols[8] = cols[8].replace("ID={0}".format(id), "ID={0}".format(new_id))
if parent is not None:
if parent in id_map:
new_parent = id_map[parent]
else:
raise Exception("ERROR: parent ({0}) referenced before it was used as an ID".format(parent))
cols[8] = cols[8].replace("Parent={0}".format(parent), "Parent={0}".format(new_parent))
#print("DEBUG: old_id:{0} - old_parent:{1}, new_id:{2} - new_parent:{3}".format(id, parent, new_id, new_parent))
fout.write("\t".join(cols) + "\n")
#>>> binascii.hexlify(os.urandom(4))
#b'c08ca446'
#>>> uuid.uuid4()
#UUID('37cd0fbf-bdc3-49bc-8351-a7ebc5a93ea5')
def get_new_id(prefix, type, mode):
new_id = "{0}.{1}.".format(prefix, type)
if mode == 'sequential':
new_id += str(next_ids_sequential[type])
next_ids_sequential[type] += 1
elif mode == 'uuid':
new_id += str(uuid4())
elif mode == 'hex8':
new_id += hexlify(os.urandom(4)).decode('ascii')
elif mode == 'hex12':
new_id += hexlify(os.urandom(6)).decode('ascii')
new_id += '.1'
return new_id
def check_arguments( args ):
# Check the acceptable values for format
mode_options = ('sequential', 'uuid', 'hex8', 'hex12')
if args.mode not in mode_options:
raise Exception("ERROR: The --mode provided ({0}) isn't supported. Please check the documentation again.".format(args.mode))
if __name__ == '__main__':
main()
| 29.496403 | 164 | 0.629268 |
import argparse
import os
import sys
from binascii import hexlify
from collections import defaultdict
from uuid import uuid4
from biocode import gff
equential = defaultdict(lambda: 1)
def main():
parser = argparse.ArgumentParser( description='Generates new identifiers in GFF3 files following the IGS identifier convention.')
i', '--input_file', type=str, required=True, help='TA file of source molecules' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Optional output file path (else STDOUT)' )
parser.add_argument('-p', '--prefix', type=str, required=True, help='The prefix portion of IDs to be generated')
parser.add_argument('-m', '--mode', type=str, required=False, default='sequential', help='ID modes (see embedded documentation): sequential, uuid, hex8, hex12')
args = parser.parse_args()
check_arguments(args)
id_map = dict()
t_file is not None:
fout = open(args.output_file, 'wt')
for line in open(args.input_file):
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 9:
fout.write(line + "\n")
continue
id = gff.column_9_value(cols[8], 'ID')
parent = gff.column_9_value(cols[8], 'Parent')
new_id = None
new_parent = None
type = cols[2]
if id is not None:
if id in id_map:
new_id = id_map[id]
else:
new_id = get_new_id(args.prefix, type, args.mode)
id_map[id] = new_id
cols[8] = cols[8].replace("ID={0}".format(id), "ID={0}".format(new_id))
if parent is not None:
if parent in id_map:
new_parent = id_map[parent]
else:
raise Exception("ERROR: parent ({0}) referenced before it was used as an ID".format(parent))
cols[8] = cols[8].replace("Parent={0}".format(parent), "Parent={0}".format(new_parent))
fout.write("\t".join(cols) + "\n")
def get_new_id(prefix, type, mode):
new_id = "{0}.{1}.".format(prefix, type)
if mode == 'sequential':
new_id += str(next_ids_sequential[type])
next_ids_sequential[type] += 1
elif mode == 'uuid':
new_id += str(uuid4())
elif mode == 'hex8':
new_id += hexlify(os.urandom(4)).decode('ascii')
elif mode == 'hex12':
new_id += hexlify(os.urandom(6)).decode('ascii')
new_id += '.1'
return new_id
def check_arguments( args ):
mode_options = ('sequential', 'uuid', 'hex8', 'hex12')
if args.mode not in mode_options:
raise Exception("ERROR: The --mode provided ({0}) isn't supported. Please check the documentation again.".format(args.mode))
if __name__ == '__main__':
main()
| true | true |
f71fca6c7b717f1271683d16d11ce61370e99869 | 2,628 | py | Python | Apps/phforescoutcounteract/forescoutcounteract_consts.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 74 | 2019-10-22T02:00:53.000Z | 2022-03-15T12:56:13.000Z | Apps/phforescoutcounteract/forescoutcounteract_consts.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 375 | 2019-10-22T20:53:50.000Z | 2021-11-09T21:28:43.000Z | Apps/phforescoutcounteract/forescoutcounteract_consts.py | ryanbsaunders/phantom-apps | 1befda793a08d366fbd443894f993efb1baf9635 | [
"Apache-2.0"
] | 175 | 2019-10-23T15:30:42.000Z | 2021-11-05T21:33:31.000Z | # File: forescoutcounteract_consts.py
# Copyright (c) 2018-2021 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
#
# --
# Define your constants here
FS_DEX_HOST_ENDPOINT = '/fsapi/niCore/Hosts'
FS_DEX_LIST_ENDPOINT = '/fsapi/niCore/Lists'
FS_DEX_TEST_CONNECTIVITY = \
"""<?xml version="1.0" encoding="UTF-8"?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="update">
<OPTIONS CREATE_NEW_HOST="true"/>
<HOST_KEY NAME="ip" VALUE="{host_key_value}"/>
<PROPERTIES></PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_UPDATE_SIMPLE_PROPERTY = \
"""<?xml version='1.0' encoding='utf-8'?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="update">
<OPTIONS CREATE_NEW_HOST="{create_host}"/>
<HOST_KEY NAME="{host_key_name}" VALUE="{host_key_value}"/>
<PROPERTIES>
<PROPERTY NAME="{property_name}">
<VALUE>{property_value}</VALUE>
</PROPERTY>
</PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_DELETE_SIMPLE_PROPERTY = \
"""<?xml version='1.0' encoding='utf-8'?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="delete">
<HOST_KEY NAME="{host_key_name}" VALUE="{host_key_value}"/>
<PROPERTIES>
<PROPERTY NAME="{property_name}" />
</PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_UPDATE_LIST_PROPERTY = \
"""<?xml version="1.0" encoding="UTF-8"?>
<FSAPI TYPE="request" API_VERSION="2.0">
<TRANSACTION TYPE="{transaction_type}">
<LISTS>
{list_body}
</LISTS>
</TRANSACTION>
</FSAPI>"""
FS_WEB_LOGIN = '/api/login'
FS_WEB_HOSTS = '/api/hosts'
FS_WEB_HOSTFIELDS = '/api/hostfields'
FS_WEB_POLICIES = '/api/policies'
# Error message constants
FS_ERR_CODE_MSG = "Error code unavailable"
FS_ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters"
FS_PARSE_ERR_MSG = "Unable to parse the error message. Please check the asset configuration and|or action parameters"
# validate integer
ERR_VALID_INT_MSG = "Please provide a valid integer value in the {}"
ERR_NON_NEG_INT_MSG = "Please provide a valid non-negative integer value in the {}"
ERR_POSITIVE_INTEGER_MSG = "Please provide a valid non-zero positive integer value in the {}"
HOST_ID_INT_PARAM = "'host_id' action parameter"
| 36.5 | 117 | 0.619102 |
FS_DEX_HOST_ENDPOINT = '/fsapi/niCore/Hosts'
FS_DEX_LIST_ENDPOINT = '/fsapi/niCore/Lists'
FS_DEX_TEST_CONNECTIVITY = \
"""<?xml version="1.0" encoding="UTF-8"?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="update">
<OPTIONS CREATE_NEW_HOST="true"/>
<HOST_KEY NAME="ip" VALUE="{host_key_value}"/>
<PROPERTIES></PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_UPDATE_SIMPLE_PROPERTY = \
"""<?xml version='1.0' encoding='utf-8'?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="update">
<OPTIONS CREATE_NEW_HOST="{create_host}"/>
<HOST_KEY NAME="{host_key_name}" VALUE="{host_key_value}"/>
<PROPERTIES>
<PROPERTY NAME="{property_name}">
<VALUE>{property_value}</VALUE>
</PROPERTY>
</PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_DELETE_SIMPLE_PROPERTY = \
"""<?xml version='1.0' encoding='utf-8'?>
<FSAPI TYPE="request" API_VERSION="1.0">
<TRANSACTION TYPE="delete">
<HOST_KEY NAME="{host_key_name}" VALUE="{host_key_value}"/>
<PROPERTIES>
<PROPERTY NAME="{property_name}" />
</PROPERTIES>
</TRANSACTION>
</FSAPI>"""
FS_DEX_UPDATE_LIST_PROPERTY = \
"""<?xml version="1.0" encoding="UTF-8"?>
<FSAPI TYPE="request" API_VERSION="2.0">
<TRANSACTION TYPE="{transaction_type}">
<LISTS>
{list_body}
</LISTS>
</TRANSACTION>
</FSAPI>"""
FS_WEB_LOGIN = '/api/login'
FS_WEB_HOSTS = '/api/hosts'
FS_WEB_HOSTFIELDS = '/api/hostfields'
FS_WEB_POLICIES = '/api/policies'
FS_ERR_CODE_MSG = "Error code unavailable"
FS_ERR_MSG_UNAVAILABLE = "Error message unavailable. Please check the asset configuration and|or action parameters"
FS_PARSE_ERR_MSG = "Unable to parse the error message. Please check the asset configuration and|or action parameters"
ERR_VALID_INT_MSG = "Please provide a valid integer value in the {}"
ERR_NON_NEG_INT_MSG = "Please provide a valid non-negative integer value in the {}"
ERR_POSITIVE_INTEGER_MSG = "Please provide a valid non-zero positive integer value in the {}"
HOST_ID_INT_PARAM = "'host_id' action parameter"
| true | true |
f71fcd6b02e4155fea4b2f52039a9ce2c32ad6f6 | 7,009 | py | Python | nmeta2dpae/tc_policy_dpae.py | mattjhayes/nmeta2-dpae | f441d78f0cdbd63495d4f96cddb462b801d82925 | [
"Apache-2.0"
] | 1 | 2016-03-19T20:42:49.000Z | 2016-03-19T20:42:49.000Z | nmeta2dpae/tc_policy_dpae.py | mattjhayes/nmeta2-dpae | f441d78f0cdbd63495d4f96cddb462b801d82925 | [
"Apache-2.0"
] | 2 | 2016-04-09T01:17:39.000Z | 2016-04-09T01:30:58.000Z | nmeta2dpae/tc_policy_dpae.py | mattjhayes/nmeta2-dpae | f441d78f0cdbd63495d4f96cddb462b801d82925 | [
"Apache-2.0"
] | 4 | 2016-05-04T08:42:29.000Z | 2021-07-16T02:11:40.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is part of nmeta Data Plane Auxiliary Engine (DPAE)
.
It is used to contain the Traffic Classification (TC) policy and provide
methods and direct variables to access it
.
Version 2.x Toulouse Code
"""
#*** Logging imports:
import logging
import logging.handlers
import coloredlogs
import sys
#*** YAML for config and policy file parsing:
import yaml
#*** Keys that must exist under 'identity' in the policy:
IDENTITY_KEYS = ('arp',
'lldp',
'dns',
'dhcp')
class TCPolicy(object):
"""
This class is instantiated by nmeta2.py and provides methods
to ingest the policy file main_policy.yaml and validate
that it is correctly structured
"""
def __init__(self, _config):
#*** Get logging config values from config class:
_logging_level_s = _config.get_value \
('tc_policy_dpae_logging_level_s')
_logging_level_c = _config.get_value \
('tc_policy_dpae_logging_level_c')
_syslog_enabled = _config.get_value('syslog_enabled')
_loghost = _config.get_value('loghost')
_logport = _config.get_value('logport')
_logfacility = _config.get_value('logfacility')
_syslog_format = _config.get_value('syslog_format')
_console_log_enabled = _config.get_value('console_log_enabled')
_coloredlogs_enabled = _config.get_value('coloredlogs_enabled')
_console_format = _config.get_value('console_format')
#*** Set up Logging:
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
#*** Syslog:
if _syslog_enabled:
#*** Log to syslog on host specified in config.yaml:
self.syslog_handler = logging.handlers.SysLogHandler(address=(
_loghost, _logport),
facility=_logfacility)
syslog_formatter = logging.Formatter(_syslog_format)
self.syslog_handler.setFormatter(syslog_formatter)
self.syslog_handler.setLevel(_logging_level_s)
#*** Add syslog log handler to logger:
self.logger.addHandler(self.syslog_handler)
#*** Console logging:
if _console_log_enabled:
#*** Log to the console:
if _coloredlogs_enabled:
#*** Colourise the logs to make them easier to understand:
coloredlogs.install(level=_logging_level_c,
logger=self.logger, fmt=_console_format, datefmt='%H:%M:%S')
else:
#*** Add console log handler to logger:
self.console_handler = logging.StreamHandler()
console_formatter = logging.Formatter(_console_format)
self.console_handler.setFormatter(console_formatter)
self.console_handler.setLevel(_logging_level_c)
self.logger.addHandler(self.console_handler)
#*** Object to hold Controller main policies per interface in YAML:
self.main_policy = dict()
#*** Object to hold Controller optimised TC rules per iface in YAML:
self.opt_rules = dict()
def ingest_main_policy(self, main_policy_text, if_name):
"""
Turn a plain text main policy file object into a YAML object
and store it as a class variable
"""
#*** Ingest the policy file:
try:
self.main_policy[if_name] = yaml.load(main_policy_text)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("Failed to convert main policy to YAML "
"%s, %s, %s",
exc_type, exc_value, exc_traceback)
return 0
self.logger.debug("Successfully ingested main policy into YAML")
return 1
def ingest_optimised_rules(self, opt_rules_text, if_name):
"""
Turn a plain optimised TC rules file object into a YAML object
and store it as a class variable
"""
#*** Ingest the policy file:
try:
self.opt_rules[if_name] = yaml.load(opt_rules_text)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("Failed to convert optimised TC rules to YAML "
"%s, %s, %s",
exc_type, exc_value, exc_traceback)
return 0
self.logger.debug("Successfully ingested optimised TC rules into YAML")
return 1
def get_id_flag(self, if_name, id_key):
"""
Get a value for an Identity Indicator harvesting flag
"""
if not id_key in IDENTITY_KEYS:
self.logger.error("The key %s is not valid", id_key)
return 0
return self.main_policy[if_name]['identity'][id_key]
def get_tc_classifiers(self, if_name):
"""
Return a list of traffic classifiers
that should be run against ingress packets on a sniff interface.
Each entry is a tuple of type (statistical or payload) and
classifier name, example:
[('statistical', 'statistical_qos_bandwidth_1')]
"""
classifiers = []
for idx, fe_match_list in enumerate(self.opt_rules[if_name]):
self.logger.info("Optimised fe_match_list %s is %s", idx,
fe_match_list)
if not 'install_type' in fe_match_list:
self.logger.error("no install_type key")
continue
if fe_match_list['install_type'] == 'to_dpae':
self.logger.debug("Matched a DPAE TC condition...")
classifiers.append((fe_match_list['type'],
fe_match_list['value']))
return classifiers
def tc_mode(self, if_name):
"""
Return the tc mode for the policy (active or passive)
"""
_tc_policies = self.main_policy[if_name]['tc_policies']
_tc_policies_keys = list(_tc_policies.keys())
_tc_policy_name = _tc_policies_keys[0]
tc_mode = _tc_policies[_tc_policy_name]['mode']
if tc_mode:
return tc_mode
else:
self.logger.error("Could not find tc mode in policy")
return 'passive'
| 40.75 | 79 | 0.612356 |
import logging
import logging.handlers
import coloredlogs
import sys
import yaml
IDENTITY_KEYS = ('arp',
'lldp',
'dns',
'dhcp')
class TCPolicy(object):
def __init__(self, _config):
_logging_level_s = _config.get_value \
('tc_policy_dpae_logging_level_s')
_logging_level_c = _config.get_value \
('tc_policy_dpae_logging_level_c')
_syslog_enabled = _config.get_value('syslog_enabled')
_loghost = _config.get_value('loghost')
_logport = _config.get_value('logport')
_logfacility = _config.get_value('logfacility')
_syslog_format = _config.get_value('syslog_format')
_console_log_enabled = _config.get_value('console_log_enabled')
_coloredlogs_enabled = _config.get_value('coloredlogs_enabled')
_console_format = _config.get_value('console_format')
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
self.logger.propagate = False
if _syslog_enabled:
self.syslog_handler = logging.handlers.SysLogHandler(address=(
_loghost, _logport),
facility=_logfacility)
syslog_formatter = logging.Formatter(_syslog_format)
self.syslog_handler.setFormatter(syslog_formatter)
self.syslog_handler.setLevel(_logging_level_s)
self.logger.addHandler(self.syslog_handler)
if _console_log_enabled:
if _coloredlogs_enabled:
coloredlogs.install(level=_logging_level_c,
logger=self.logger, fmt=_console_format, datefmt='%H:%M:%S')
else:
self.console_handler = logging.StreamHandler()
console_formatter = logging.Formatter(_console_format)
self.console_handler.setFormatter(console_formatter)
self.console_handler.setLevel(_logging_level_c)
self.logger.addHandler(self.console_handler)
self.main_policy = dict()
self.opt_rules = dict()
def ingest_main_policy(self, main_policy_text, if_name):
try:
self.main_policy[if_name] = yaml.load(main_policy_text)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("Failed to convert main policy to YAML "
"%s, %s, %s",
exc_type, exc_value, exc_traceback)
return 0
self.logger.debug("Successfully ingested main policy into YAML")
return 1
def ingest_optimised_rules(self, opt_rules_text, if_name):
try:
self.opt_rules[if_name] = yaml.load(opt_rules_text)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.logger.error("Failed to convert optimised TC rules to YAML "
"%s, %s, %s",
exc_type, exc_value, exc_traceback)
return 0
self.logger.debug("Successfully ingested optimised TC rules into YAML")
return 1
def get_id_flag(self, if_name, id_key):
if not id_key in IDENTITY_KEYS:
self.logger.error("The key %s is not valid", id_key)
return 0
return self.main_policy[if_name]['identity'][id_key]
def get_tc_classifiers(self, if_name):
classifiers = []
for idx, fe_match_list in enumerate(self.opt_rules[if_name]):
self.logger.info("Optimised fe_match_list %s is %s", idx,
fe_match_list)
if not 'install_type' in fe_match_list:
self.logger.error("no install_type key")
continue
if fe_match_list['install_type'] == 'to_dpae':
self.logger.debug("Matched a DPAE TC condition...")
classifiers.append((fe_match_list['type'],
fe_match_list['value']))
return classifiers
def tc_mode(self, if_name):
_tc_policies = self.main_policy[if_name]['tc_policies']
_tc_policies_keys = list(_tc_policies.keys())
_tc_policy_name = _tc_policies_keys[0]
tc_mode = _tc_policies[_tc_policy_name]['mode']
if tc_mode:
return tc_mode
else:
self.logger.error("Could not find tc mode in policy")
return 'passive'
| true | true |
f71fcd8293089c972b431387d1197b53dd7b564d | 516 | py | Python | src/event_representations.py | ATTPC/VAE-event-classification | aae331d44bffffec2ca8a6cdef71208899db0052 | [
"MIT"
] | null | null | null | src/event_representations.py | ATTPC/VAE-event-classification | aae331d44bffffec2ca8a6cdef71208899db0052 | [
"MIT"
] | 2 | 2018-12-20T20:10:52.000Z | 2019-02-04T17:44:01.000Z | src/event_representations.py | ATTPC/VAE-event-classification | aae331d44bffffec2ca8a6cdef71208899db0052 | [
"MIT"
] | null | null | null | import numpy as np
def make_histograms(x, bins=40, interval=[1e-1, 1]):
intervals = np.linspace(interval[0], interval[1], bins)
flat_x = x.reshape((x.shape[0], -1))
hist_x = np.zeros((x.shape[0], bins))
for i in range(1, bins):
mask = flat_x <= intervals[i]
mask = np.logical_and(mask, flat_x > intervals[i-1])
hist_x[:, i] = mask.sum(1)
return hist_x
def make_net_count(x, **kwargs):
flat_x = x.reshape((x.shape[0], -1))
sum_x = flat_x.sum(1)
return sum_x
| 28.666667 | 60 | 0.604651 | import numpy as np
def make_histograms(x, bins=40, interval=[1e-1, 1]):
intervals = np.linspace(interval[0], interval[1], bins)
flat_x = x.reshape((x.shape[0], -1))
hist_x = np.zeros((x.shape[0], bins))
for i in range(1, bins):
mask = flat_x <= intervals[i]
mask = np.logical_and(mask, flat_x > intervals[i-1])
hist_x[:, i] = mask.sum(1)
return hist_x
def make_net_count(x, **kwargs):
flat_x = x.reshape((x.shape[0], -1))
sum_x = flat_x.sum(1)
return sum_x
| true | true |
f71fce257398e75ef7291d3d8d8cd6d620bff9ed | 122 | py | Python | zpy/classes/bases/utility/pretty.py | yu-ichiro/zpy | 65e5ae7616d6e7fce91a03f20f663caa4af834b5 | [
"MIT"
] | null | null | null | zpy/classes/bases/utility/pretty.py | yu-ichiro/zpy | 65e5ae7616d6e7fce91a03f20f663caa4af834b5 | [
"MIT"
] | null | null | null | zpy/classes/bases/utility/pretty.py | yu-ichiro/zpy | 65e5ae7616d6e7fce91a03f20f663caa4af834b5 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
class Pretty(ABC):
@abstractmethod
def __pretty__(self) -> str:
...
| 15.25 | 35 | 0.639344 | from abc import ABC, abstractmethod
class Pretty(ABC):
@abstractmethod
def __pretty__(self) -> str:
...
| true | true |
f71fcef13051a8d05b65c648d775148a97470b27 | 167,246 | py | Python | tensorflow/python/ops/nn_ops.py | handongke/tensorflow | c6bb5cd0447a0af2764c195fb14d218df8ae6471 | [
"Apache-2.0"
] | 5 | 2019-01-13T16:15:25.000Z | 2019-07-07T16:17:32.000Z | tensorflow/python/ops/nn_ops.py | handongke/tensorflow | c6bb5cd0447a0af2764c195fb14d218df8ae6471 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/nn_ops.py | handongke/tensorflow | c6bb5cd0447a0af2764c195fb14d218df8ae6471 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
# pylint: disable=protected-access
def _non_atrous_convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
data_format=None, # pylint: disable=redefined-builtin
strides=None,
name=None):
"""Computes sums of N-D convolutions (actually cross correlation).
It is required that 1 <= N <= 3.
This is used to implement the more generic `convolution` function, which
extends the interface of this function with a `dilation_rate` parameter.
Args:
input: Rank N+2 tensor of type T of shape
`[batch_size] + input_spatial_shape + [in_channels]` if `data_format`
does not start with `"NC"`, or
`[batch_size, in_channels] + input_spatial_shape` if `data_format` starts
with `"NC"`.
filter: Rank N+2 tensor of type T of shape
`filter_spatial_shape + [in_channels, out_channels]`. Rank of either
`input` or `filter` must be known.
padding: Padding method to use, must be either "VALID" or "SAME".
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
strides: Sequence of N positive integers, defaults to `[1] * N`.
name: Name prefix to use.
Returns:
Rank N+2 tensor of type T of shape
`[batch_size] + output_spatial_shape + [out_channels]`, where
if padding == "SAME":
output_spatial_shape = input_spatial_shape
if padding == "VALID":
output_spatial_shape = input_spatial_shape - filter_spatial_shape + 1.
Raises:
ValueError: if ranks are incompatible.
"""
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
"""Helper class for _non_atrous_convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments:
input_shape: static input shape, i.e. input.get_shape().
filter_shape: static filter shape, i.e. filter.get_shape().
padding: see _non_atrous_convolution.
data_format: see _non_atrous_convolution.
strides: see _non_atrous_convolution.
name: see _non_atrous_convolution.
"""
def __init__(
self,
input_shape,
filter_shape, # pylint: disable=redefined-builtin
padding,
data_format=None,
strides=None,
name=None):
filter_shape = filter_shape.with_rank(input_shape.ndims)
self.padding = padding
self.name = name
input_shape = input_shape.with_rank(filter_shape.ndims)
if input_shape.ndims is None:
raise ValueError("Rank of convolution must be known")
if input_shape.ndims < 3 or input_shape.ndims > 5:
raise ValueError(
"`input` and `filter` must have rank at least 3 and at most 5")
conv_dims = input_shape.ndims - 2
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
# conv1d uses the 2-d data format names
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = gen_nn_ops.conv3d
# Note that we need this adapter since argument names for conv1d don't match
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
@tf_export("nn.dilation2d", v1=[])
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale dilation of 4-D `input` and 3-D `filters` tensors.
The `input` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filter_height, filter_width, depth]`, i.e., each
input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the output
tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D dilation is the max-sum correlation
(for consistency with `conv2d`, we use unmirrored filters):
output[b, y, x, c] =
max_{dy, dx} input[b,
strides[1] * y + rates[1] * dy,
strides[2] * x + rates[2] * dx,
c] +
filters[dy, dx, c]
Max-pooling is a special case when the filter has size equal to the pooling
kernel size and contains all zeros.
Note on duality: The dilation of `input` by the `filters` is equal to the
negation of the erosion of `-input` by the reflected `filters`.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `input`.
3-D with shape `[filter_height, filter_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the input
tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NCHW"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
The input stride for atrous morphological dilation. Must be:
`[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if data_format != "NCHW":
raise ValueError("Data formats other than NCHW are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export("nn.with_space_to_batch")
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Performs `op` on the space-to-batch representation of `input`.
This has the effect of transforming sliding window operations into the
corresponding "atrous" operation in which the input is sampled at the
specified `dilation_rate`.
In the special case that `dilation_rate` is uniformly 1, this simply returns:
op(input, num_spatial_dims, padding)
Otherwise, it returns:
batch_to_space_nd(
op(space_to_batch_nd(input, adjusted_dilation_rate, adjusted_paddings),
num_spatial_dims,
"VALID")
adjusted_dilation_rate,
adjusted_crops),
where:
adjusted_dilation_rate is an int64 tensor of shape [max(spatial_dims)],
adjusted_{paddings,crops} are int64 tensors of shape [max(spatial_dims), 2]
defined as follows:
We first define two int64 tensors `paddings` and `crops` of shape
`[num_spatial_dims, 2]` based on the value of `padding` and the spatial
dimensions of the `input`:
If `padding = "VALID"`, then:
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate)
If `padding = "SAME"`, then:
dilated_filter_shape =
filter_shape + (filter_shape - 1) * (dilation_rate - 1)
paddings, crops = required_space_to_batch_paddings(
input_shape[spatial_dims],
dilation_rate,
[(dilated_filter_shape - 1) // 2,
dilated_filter_shape - 1 - (dilated_filter_shape - 1) // 2])
Because `space_to_batch_nd` and `batch_to_space_nd` assume that the spatial
dimensions are contiguous starting at the second dimension, but the specified
`spatial_dims` may not be, we must adjust `dilation_rate`, `paddings` and
`crops` in order to be usable with these operations. For a given dimension,
if the block size is 1, and both the starting and ending padding and crop
amounts are 0, then space_to_batch_nd effectively leaves that dimension alone,
which is what is needed for dimensions not part of `spatial_dims`.
Furthermore, `space_to_batch_nd` and `batch_to_space_nd` handle this case
efficiently for any number of leading and trailing dimensions.
For 0 <= i < len(spatial_dims), we assign:
adjusted_dilation_rate[spatial_dims[i] - 1] = dilation_rate[i]
adjusted_paddings[spatial_dims[i] - 1, :] = paddings[i, :]
adjusted_crops[spatial_dims[i] - 1, :] = crops[i, :]
All unassigned values of `adjusted_dilation_rate` default to 1, while all
unassigned values of `adjusted_paddings` and `adjusted_crops` default to 0.
Note in the case that `dilation_rate` is not uniformly 1, specifying "VALID"
padding is equivalent to specifying `padding = "SAME"` with a filter_shape of
`[1]*N`.
Advanced usage. Note the following optimization: A sequence of
`with_space_to_batch` operations with identical (not uniformly 1)
`dilation_rate` parameters and "VALID" padding
net = with_space_to_batch(net, dilation_rate, "VALID", op_1)
...
net = with_space_to_batch(net, dilation_rate, "VALID", op_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "VALID")
...
result = op_k(result, num_spatial_dims, "VALID")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
This eliminates the overhead of `k-1` calls to `space_to_batch_nd` and
`batch_to_space_nd`.
Similarly, a sequence of `with_space_to_batch` operations with identical (not
uniformly 1) `dilation_rate` parameters, "SAME" padding, and odd filter
dimensions
net = with_space_to_batch(net, dilation_rate, "SAME", op_1, filter_shape_1)
...
net = with_space_to_batch(net, dilation_rate, "SAME", op_k, filter_shape_k)
can be combined into a single `with_space_to_batch` operation as follows:
def combined_op(converted_input, num_spatial_dims, _):
result = op_1(converted_input, num_spatial_dims, "SAME")
...
result = op_k(result, num_spatial_dims, "SAME")
net = with_space_to_batch(net, dilation_rate, "VALID", combined_op)
Args:
input: Tensor of rank > max(spatial_dims).
dilation_rate: int32 Tensor of *known* shape [num_spatial_dims].
padding: str constant equal to "VALID" or "SAME"
op: Function that maps (input, num_spatial_dims, padding) -> output
filter_shape: If padding = "SAME", specifies the shape of the convolution
kernel/pooling window as an integer Tensor of shape [>=num_spatial_dims].
If padding = "VALID", filter_shape is ignored and need not be specified.
spatial_dims: Monotonically increasing sequence of `num_spatial_dims`
integers (which are >= 1) specifying the spatial dimensions of `input`
and output. Defaults to: `range(1, num_spatial_dims+1)`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
The output Tensor as described above, dimensions will vary based on the op
provided.
Raises:
ValueError: if `padding` is invalid or the arguments are incompatible.
ValueError: if `spatial_dims` are invalid.
"""
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
"""Helper class for with_space_to_batch.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
dilation_rate: see with_space_to_batch
padding: see with_space_to_batch
build_op: Function that maps (num_spatial_dims, paddings) -> (function that
maps (input, filter) -> output).
filter_shape: see with_space_to_batch
spatial_dims: see with_space_to_batch
data_format: see with_space_to_batch
"""
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None):
"""Helper class for _with_space_to_batch."""
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
try:
rate_shape = dilation_rate.get_shape().with_rank(1)
except ValueError:
raise ValueError("rate must be rank 1")
if not dilation_rate.get_shape().is_fully_defined():
raise ValueError("rate must have known shape")
num_spatial_dims = rate_shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = 2
else:
starting_spatial_dim = 1
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a montonically increasing sequence of positive "
"integers") # pylint: disable=line-too-long
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank %d at least" % (expected_input_rank))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive")
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
"""Call functionality for with_space_to_batch."""
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
"""Helper function to compute base_paddings."""
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
dilated_filter_spatial_shape = (
filter_spatial_shape + (filter_spatial_shape - 1) *
(rate_or_const_rate - 1))
pad_extra_shape = dilated_filter_spatial_shape - 1
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
"""Returns an `adjusted` version of `orig` based on `spatial_dims`.
Tensor of the same type as `orig` and with shape
`[max(spatial_dims), ...]` where:
adjusted[spatial_dims[i] - 1, ...] = orig[i, ...]
for 0 <= i < len(spatial_dims), and
adjusted[j, ...] = fill_value
for j != spatial_dims[i] - 1 for some i.
If `orig` is a constant value, then the result will be a constant value.
Args:
orig: Tensor of rank > max(spatial_dims).
fill_value: Numpy scalar (of same data type as `orig) specifying the fill
value for non-spatial dimensions.
spatial_dims: See with_space_to_batch.
Returns:
`adjusted` tensor.
"""
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
"""Helper function for verifying strides and dilation_rate arguments.
This is used by `convolution` and `pool`.
Args:
num_spatial_dims: int
strides: Optional. List of N ints >= 1. Defaults to [1]*N. If any value
of strides is > 1, then all values of dilation_rate must be 1.
dilation_rate: Optional. List of N ints >= 1. Defaults to [1]*N. If any
value of dilation_rate is > 1, then all values of strides must be 1.
Returns:
Normalized (strides, dilation_rate) as int32 numpy arrays of shape
[num_spatial_dims].
Raises:
ValueError: if the parameters are invalid.
"""
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
"""Computes sums of N-D convolutions (actually cross-correlation).
This also supports either output striding via the optional `strides` parameter
or atrous convolution (also known as convolution with holes or dilated
convolution, based on the French word "trous" meaning holes in English) via
the optional `dilation_rate` parameter. Currently, however, output striding
is not supported for atrous convolutions.
Specifically, in the case that `data_format` does not start with "NC", given
a rank (N+2) `input` Tensor of shape
[num_batches,
input_spatial_shape[0],
...,
input_spatial_shape[N-1],
num_input_channels],
a rank (N+2) `filter` Tensor of shape
[spatial_filter_shape[0],
...,
spatial_filter_shape[N-1],
num_input_channels,
num_output_channels],
an optional `dilation_rate` tensor of shape [N] (defaulting to [1]*N)
specifying the filter upsampling/input downsampling rate, and an optional list
of N `strides` (defaulting [1]*N), this computes for each N-D spatial output
position (x[0], ..., x[N-1]):
```
output[b, x[0], ..., x[N-1], k] =
sum_{z[0], ..., z[N-1], q}
filter[z[0], ..., z[N-1], q, k] *
padded_input[b,
x[0]*strides[0] + dilation_rate[0]*z[0],
...,
x[N-1]*strides[N-1] + dilation_rate[N-1]*z[N-1],
q]
```
where b is the index into the batch, k is the output channel number, q is the
input channel number, and z is the N-D spatial offset within the filter. Here,
`padded_input` is obtained by zero padding the input using an effective
spatial filter shape of `(spatial_filter_shape-1) * dilation_rate + 1` and
output striding `strides` as described in the
[comment here](https://tensorflow.org/api_guides/python/nn#Convolution).
In the case that `data_format` does start with `"NC"`, the `input` and output
(but not the `filter`) are simply transposed as follows:
convolution(input, data_format, **kwargs) =
tf.transpose(convolution(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
It is required that 1 <= N <= 3.
Args:
input: An (N+2)-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
filter: An (N+2)-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
Defaults to [1]*N. If any value of strides is > 1, then all values of
dilation_rate must be 1.
dilation_rate: Optional. Sequence of N ints >= 1. Specifies the filter
upsampling/input downsampling rate. In the literature, the same parameter
is sometimes called `input stride` or `dilation`. The effective filter
size used for the convolution will be `spatial_filter_shape +
(spatial_filter_shape - 1) * (rate - 1)`, obtained by inserting
(dilation_rate[i]-1) zeros between consecutive elements of the original
filter in each spatial dimension i. If any value of dilation_rate is > 1,
then all values of strides must be 1.
name: Optional name for the returned tensor.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
if data_format is None or does not start with "NC", or
`[batch_size, out_channels] + output_spatial_shape`
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of `padding`.
If padding == "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding == "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] -
(spatial_filter_shape[i]-1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: If input/output depth does not match `filter` shape, if padding
is other than `"VALID"` or `"SAME"`, or if data_format is invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(name, "convolution", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = Convolution(
input_shape,
filter_shape,
padding,
strides=strides,
dilation_rate=dilation_rate,
name=name,
data_format=data_format)
return op(input, filter)
@tf_export("nn.convolution", v1=[])
def convolution_v2(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution(
input, # pylint: disable=redefined-builtin
filters,
padding=padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
filter_shape: static shape of the filter. i.e. filter.get_shape().
padding: see convolution.
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: see convolution.
"""
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
"""Helper function for convolution."""
num_total_dims = filter_shape.ndims
if num_total_dims is None:
num_total_dims = input_shape.ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known")
num_spatial_dims = num_total_dims - 2
try:
input_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"input tensor must have rank %d" % (num_spatial_dims + 2))
try:
filter_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"filter tensor must have rank %d" % (num_spatial_dims + 2))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + 1)
spatial_dims = range(1, num_spatial_dims + 1)
else:
input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)
spatial_dims = range(2, num_spatial_dims + 2)
if not input_channels_dim.is_compatible_with(
filter_shape[num_spatial_dims]):
raise ValueError(
"number of input channels does not match corresponding dimension of "
"filter, {} != {}".format(input_channels_dim,
filter_shape[num_spatial_dims]))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.name = name
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape
`[batch_size] + input_spatial_shape + [num_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
See the "returns" section of `tf.nn.convolution` for details.
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N.
If any value of strides is > 1, then all values of dilation_rate must be
1.
name: Optional. Name of the op.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
"""Performs an N-D pooling operation.
In the case that `data_format` does not start with "NC", computes for
0 <= b < batch_size,
0 <= x[i] < output_spatial_shape[i],
0 <= c < num_channels:
```
output[b, x[0], ..., x[N-1], c] =
REDUCE_{z[0], ..., z[N-1]}
input[b,
x[0] * strides[0] - pad_before[0] + dilation_rate[0]*z[0],
...
x[N-1]*strides[N-1] - pad_before[N-1] + dilation_rate[N-1]*z[N-1],
c],
```
where the reduction function REDUCE depends on the value of `pooling_type`,
and pad_before is defined based on the value of `padding` as described in
the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
simply transposed as follows:
```
pool(input, data_format, **kwargs) =
tf.transpose(pool(tf.transpose(input, [0] + range(2,N+2) + [1]),
**kwargs),
[0, N+1] + range(1, N+1))
```
Args:
input: Tensor of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]` if data_format does not start with "NC" (default), or
`[batch_size, num_channels] + input_spatial_shape` if data_format starts
with "NC". Pooling happens over the spatial dimensions only.
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
strides: Optional. Sequence of N ints >= 1. Defaults to [1]*N. If any value of
strides is > 1, then all values of dilation_rate must be 1.
padding: The padding algorithm, must be "SAME" or "VALID". Defaults to "SAME".
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW". For
N=3, the valid values are "NDHWC" (default) and "NCDHW".
dilations: Optional. Dilation rate. List of N ints >= 1. Defaults to
[1]*N. If any value of dilation_rate is > 1, then all values of strides
must be 1.
name: Optional. Name of the op.
Returns:
Tensor of rank N+2, of shape
[batch_size] + output_spatial_shape + [num_channels]
if data_format is None or does not start with "NC", or
[batch_size, num_channels] + output_spatial_shape
if data_format starts with "NC",
where `output_spatial_shape` depends on the value of padding:
If padding = "SAME":
output_spatial_shape[i] = ceil(input_spatial_shape[i] / strides[i])
If padding = "VALID":
output_spatial_shape[i] =
ceil((input_spatial_shape[i] - (window_shape[i] - 1) * dilation_rate[i])
/ strides[i]).
Raises:
ValueError: if arguments are invalid.
"""
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
`tf.nn.convolution`, and exists only for backwards compatibility. You can
use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
dilated convolution, given 4-D `value` and `filters` tensors. If the `rate`
parameter is equal to one, it performs regular 2-D convolution. If the `rate`
parameter is greater than one, it performs convolution with holes, sampling
the input values every `rate` pixels in the `height` and `width` dimensions.
This is equivalent to convolving the input with a set of upsampled filters,
produced by inserting `rate - 1` zeros between two consecutive values of the
filters along the `height` and `width` dimensions, hence the name atrous
convolution or convolution with holes (the French word trous means holes in
English).
More specifically:
```
output[batch, height, width, out_channel] =
sum_{dheight, dwidth, in_channel} (
filters[dheight, dwidth, in_channel, out_channel] *
value[batch, height + rate*dheight, width + rate*dwidth, in_channel]
)
```
Atrous convolution allows us to explicitly control how densely to compute
feature responses in fully convolutional networks. Used in conjunction with
bilinear interpolation, it offers an alternative to `conv2d_transpose` in
dense prediction tasks such as semantic image segmentation, optical flow
computation, or depth estimation. It also allows us to effectively enlarge
the field of view of filters without increasing the number of parameters or
the amount of computation.
For a description of atrous convolution and how it can be used for dense
feature extraction, please see: [Semantic Image Segmentation with Deep
Convolutional Nets and Fully Connected CRFs](http://arxiv.org/abs/1412.7062).
The same operation is investigated further in [Multi-Scale Context Aggregation
by Dilated Convolutions](http://arxiv.org/abs/1511.07122). Previous works
that effectively use atrous convolution in different ways are, among others,
[OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks](http://arxiv.org/abs/1312.6229) and [Fast Image
Scanning with Deep Max-Pooling Convolutional Neural
Networks](http://arxiv.org/abs/1302.1700).
Atrous convolution is also closely related to the so-called noble identities
in multi-rate signal processing.
There are many different ways to implement atrous convolution (see the refs
above). The implementation here reduces
```python
atrous_conv2d(value, filters, rate, padding=padding)
```
to the following three operations:
```python
paddings = ...
net = space_to_batch(value, paddings, block_size=rate)
net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID")
crops = ...
net = batch_to_space(net, crops, block_size=rate)
```
Advanced usage. Note the following optimization: A sequence of `atrous_conv2d`
operations with identical `rate` parameters, 'SAME' `padding`, and filters
with odd heights/ widths:
```python
net = atrous_conv2d(net, filters1, rate, padding="SAME")
net = atrous_conv2d(net, filters2, rate, padding="SAME")
...
net = atrous_conv2d(net, filtersK, rate, padding="SAME")
```
can be equivalently performed cheaper in terms of computation and memory as:
```python
pad = ... # padding so that the input dims are multiples of rate
net = space_to_batch(net, paddings=pad, block_size=rate)
net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME")
net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME")
...
net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME")
net = batch_to_space(net, crops=pad, block_size=rate)
```
because a pair of consecutive `space_to_batch` and `batch_to_space` ops with
the same `block_size` cancel out when their respective `paddings` and `crops`
inputs are identical.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Output shape with `'VALID'` padding is:
[batch, height - 2 * (filter_width - 1),
width - 2 * (filter_height - 1), out_channels].
Output shape with `'SAME'` padding is:
[batch, height, width, out_channels].
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def _convert_padding(padding):
"""Converts Python padding to C++ padding for ops which take EXPLICIT padding.
Args:
padding: the `padding` argument for a Python op which supports EXPLICIT
padding.
Returns:
(padding, explicit_paddings) pair, which should be passed as attributes to a
C++ op.
Raises:
ValueError: If padding is invalid.
"""
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != 4:
raise ValueError("When padding is a list, it must be of size 4. Got "
"padding of size: %d" % len(padding))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export("nn.conv2d", v1=[])
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
r"""Computes a 2-D convolution given 4-D `input` and `filters` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filters: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: A list of `ints`.
1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of
`data_format`, see below for details.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
# pylint: enable=line-too-long
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes a 2-D convolution given 4-D `input` and `filter` tensors.
Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
and a filter / kernel tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`, this op
performs the following:
1. Flattens the filter to a 2-D matrix with shape
`[filter_height * filter_width * in_channels, output_channels]`.
2. Extracts image patches from the input tensor to form a *virtual*
tensor of shape `[batch, out_height, out_width,
filter_height * filter_width * in_channels]`.
3. For each patch, right-multiplies the filter matrix and the image patch
vector.
In detail, with the default NHWC format,
output[b, i, j, k] =
sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q]
* filter[di, dj, q, k]
Must have `strides[0] = strides[3] = 1`. For the most common case of the same
horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
A 4-D tensor. The dimension order is interpreted according to the value
of `data_format`, see below for details.
filter: A `Tensor`. Must have the same type as `input`.
A 4-D tensor of shape
`[filter_height, filter_width, in_channels, out_channels]`
strides: A list of `ints`.
1-D tensor of length 4. The stride of the sliding window for each
dimension of `input`. The dimension order is determined by the value of
`data_format`, see below for details.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, height, width, channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, channels, height, width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by the
value of `data_format`, see above for details. Dilations in the batch and
depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d(input, # pylint: disable=redefined-builtin
filter,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv2d_backprop_filter", v1=[])
def conv2d_backprop_filter_v2(input, # pylint: disable=redefined-builtin
filter_sizes,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d_backprop_filter(input, # pylint: disable=redefined-builtin
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the filter.
Args:
input: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape `[batch, in_height, in_width, in_channels]`.
filter_sizes: A `Tensor` of type `int32`.
An integer vector representing the tensor shape of `filter`,
where `filter` is a 4-D
`[filter_height, filter_width, in_channels, out_channels]` tensor.
out_backprop: A `Tensor`. Must have the same type as `input`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export("nn.conv2d_backprop_input", v1=[])
def conv2d_backprop_input_v2(input_sizes,
filters,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filters: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filters`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filters`.
"""
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d_backprop_input(input_sizes,
filters,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_input"])
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
r"""Computes the gradients of convolution with respect to the input.
Args:
input_sizes: A `Tensor` of type `int32`.
An integer vector representing the shape of `input`,
where `input` is a 4-D `[batch, height, width, channels]` tensor.
filter: A `Tensor`. Must be one of the following types:
`half`, `bfloat16`, `float32`, `float64`.
4-D with shape
`[filter_height, filter_width, in_channels, out_channels]`.
out_backprop: A `Tensor`. Must have the same type as `filter`.
4-D with shape `[batch, out_height, out_width, out_channels]`.
Gradients w.r.t. the output of the convolution.
strides: A list of `ints`.
The stride of the sliding window for each dimension of the input
of the convolution. Must be in the same order as the dimension specified
with format.
padding: Either the `string `"SAME"` or `"VALID"` indicating the type of
padding algorithm to use, or a list indicating the explicit paddings at
the start and end of each dimension. When explicit padding is used and
data_format is `"NHWC"`, this should be in the form `[[0, 0], [pad_top,
pad_bottom], [pad_left, pad_right], [0, 0]]`. When explicit padding used
and data_format is `"NCHW"`, this should be in the form `[[0, 0], [0, 0],
[pad_top, pad_bottom], [pad_left, pad_right]]`.
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from: `"NHWC", "NCHW"`.
Defaults to `"NHWC"`.
Specify the data format of the input and output data. With the
default format "NHWC", the data is stored in the order of:
[batch, in_height, in_width, in_channels].
Alternatively, the format could be "NCHW", the data storage order of:
[batch, in_channels, in_height, in_width].
dilations: An optional list of `ints`. Defaults to `[1, 1, 1, 1]`.
1-D tensor of length 4. The dilation factor for each dimension of
`input`. If set to k > 1, there will be k-1 skipped cells between each
filter element on that dimension. The dimension order is determined by
the value of `data_format`, see above for details. Dilations in the batch
and depth dimensions must be 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `filter`.
"""
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
def conv2d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
if data_format not in ("NCHW", "NHWC"):
raise ValueError("data_format has to be either NCHW or NHWC.")
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 3 if data_format == "NHWC" else 1
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[3]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[3]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filter.get_shape().dims[2].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[2]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.conv2d_transpose", v1=[])
def conv2d_transpose_v2(
input,
filters, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
return conv2d_transpose(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
conv2d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
conv2d_transpose.__doc__, "filter", "filters"),
"value", "input")
@tf_export("nn.atrous_conv2d_transpose")
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
"""The transpose of `atrous_conv2d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `atrous_conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float`. It needs to be in the default `NHWC`
format. Its shape is `[batch, in_height, in_width, in_channels]`.
filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, out_channels, in_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
height `filter_height + (filter_height - 1) * (rate - 1)` and effective
width `filter_width + (filter_width - 1) * (rate - 1)`, produced by
inserting `rate - 1` zeros along consecutive elements across the
`filters`' spatial dimensions.
output_shape: A 1-D `Tensor` of shape representing the output shape of the
deconvolution op.
rate: A positive int32. The stride with which we sample input values across
the `height` and `width` dimensions. Equivalently, the rate by which we
upsample the filter values by inserting zeros across the `height` and
`width` dimensions. In the literature, the same parameter is sometimes
called `input stride` or `dilation`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`, or if the `rate` is less
than one, or if the output_shape is not a tensor with 4 elements.
"""
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
# Handle filters whose shape is unknown during graph creation.
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
# When pad_height (pad_width) is odd, we pad more to bottom (right),
# following the same convention as conv2d().
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
# More padding so that rate divides the height and width of the input.
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
# The paddings argument to space_to_batch is just the extra padding
# component.
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
# The crops argument to batch_to_space includes both padding components.
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export("nn.conv3d", v1=[])
def conv3d_v2(input, # pylint: disable=redefined-builtin,missing-docstring
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return gen_nn_ops.conv3d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
tf_export(v1=["nn.conv3d"])(gen_nn_ops.conv3d)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
@tf_export(v1=["nn.conv3d_transpose"])
def conv3d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
"""The transpose of `conv3d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv3d` rather than an actual
deconvolution.
Args:
value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv3d_transpose",
[value, filter, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 1 if data_format == "NCDHW" else 4
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[4]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[4]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(5)):
raise ValueError("output_shape must have shape (5,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [5] if reached this point.
if not filter.get_shape().dims[3].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[3]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.conv3d_transpose", v1=[])
def conv3d_transpose_v2(
input,
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
return conv3d_transpose(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
conv3d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
conv3d_transpose.__doc__, "filter", "filters"),
"value", "input")
@tf_export("nn.bias_add")
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
def crelu(features, name=None, axis=-1):
"""Computes Concatenated ReLU.
Concatenates a ReLU which selects only the positive part of the activation
with a ReLU which selects only the *negative* part of the activation.
Note that as a result this non-linearity doubles the depth of the activations.
Source: [Understanding and Improving Convolutional Neural Networks via
Concatenated Rectified Linear Units. W. Shang, et
al.](https://arxiv.org/abs/1603.05201)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
axis: The axis that the output values are concatenated along. Default is -1.
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Source: [Convolutional Deep Belief Networks on CIFAR-10. A.
Krizhevsky](http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf)
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
def leaky_relu(features, alpha=0.2, name=None):
"""Compute the Leaky ReLU activation function.
"Rectifier Nonlinearities Improve Neural Network Acoustic Models"
AL Maas, AY Hannun, AY Ng - Proc. ICML, 2013
https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf
Args:
features: A `Tensor` representing preactivation values. Must be one of
the following types: `float16`, `float32`, `float64`, `int32`, `int64`.
alpha: Slope of the activation function at x < 0.
name: A name for the operation (optional).
Returns:
The activation value.
"""
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.to_float(features)
if compat.forward_compatible(2018, 11, 1):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
alpha = ops.convert_to_tensor(alpha, dtype=features.dtype, name="alpha")
return math_ops.maximum(alpha * features, features, name=name)
def _flatten_outer_dims(logits):
"""Flattens logits' outer dimensions and keep its last dimension."""
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _softmax(logits, compute_op, dim=-1, name=None):
"""Helper function for softmax and log_softmax.
It reshapes and transposes the input logits into a 2-D Tensor and then invokes
the tf.nn._softmax or tf.nn._log_softmax function. The output would be
transposed and reshaped back.
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: Either gen_nn_ops.softmax or gen_nn_ops.log_softmax
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `dim` is beyond the last
dimension of `logits`.
"""
def _swap_axis(logits, dim_index, last_index, name=None):
"""Swaps logits's dim_index and last_index."""
return array_ops.transpose(
logits,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
logits = ops.convert_to_tensor(logits)
# We need its original shape for shape inference.
shape = logits.get_shape()
is_last_dim = (dim is -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(logits, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and (dim_val < -shape.ndims or dim_val >= shape.ndims):
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform softmax on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(logits)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(logits)
dim_axis = dim % shape.ndims
logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))
# Do the actual softmax on its last dimension.
output = compute_op(logits)
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
# Make shape inference work since transpose may erase its static shape.
output.set_shape(shape)
return output
@tf_export(v1=["nn.softmax", "math.softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.softmax", "math.softmax", v1=[])
def softmax_v2(logits, axis=None, name=None):
"""Computes softmax activations.
This function performs the equivalent of
softmax = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), axis)
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type and shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for `axis`.
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
def log_softmax_v2(logits, axis=None, name=None):
"""Computes log softmax activations.
For each batch `i` and class `j` we have
logsoftmax = logits - log(reduce_sum(exp(logits), axis))
Args:
logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
axis: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `axis` is beyond the last
dimension of `logits`.
"""
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
# Make sure that all arguments were passed as named arguments.
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `axis` argument specifying the class dimension.
`logits` and `labels` must have the same dtype (either `float16`, `float32`,
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
axis: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
dim: Deprecated alias for axis.
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
# could break users who call this with bad labels, but disregard the bad
# results.
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
# labels and logits must be of the same type
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
# For shape inference.
shape = logits.get_shape()
# Move the dim to the end if dim is not the last dimension.
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
# Make precise_logits and labels into matrices.
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
# Do the actual op computation.
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
# The output cost shape should be the input minus axis.
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
# Make shape inference work since reshape and transpose may erase its static
# shape.
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
dim=-1,
name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. All that is required is that each row of `labels` is
a valid probability distribution. If they are not, the computation of the
gradient will be incorrect.
If using exclusive `labels` (wherein one and only
one class is true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits and labels of shape
`[batch_size, num_classes]`, but higher dimensions are supported, with
the `dim` argument specifying the class dimension.
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
`tf.nn.softmax_cross_entropy_with_logits_v2`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: Each vector along the class dimension should hold a valid
probability distribution e.g. for the case in which labels are of shape
`[batch_size, num_classes]`, each row of `labels[i]` must be a valid
probability distribution.
logits: Unscaled log probabilities.
dim: The class dimension. Defaulted to -1 which is the last dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` that contains the softmax cross entropy loss. Its type is the
same as `logits` and its shape is the same as `labels` except that it does
not have the last dimension of `labels`.
"""
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export("nn.sparse_softmax_cross_entropy_with_logits")
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None, # pylint: disable=invalid-name
labels=None,
logits=None,
name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits_v2`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
A common use case is to have logits of shape
`[batch_size, num_classes]` and have labels of shape
`[batch_size]`, but higher dimensions are supported, in which
case the `dim`-th dimension is assumed to be of size `num_classes`.
`logits` must have the dtype of `float16`, `float32`, or `float64`, and
`labels` must have the dtype of `int32` or `int64`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
Args:
_sentinel: Used to prevent positional parameters. Internal, do not use.
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32`, or
`float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank
of the labels is not equal to the rank of the logits minus one.
"""
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
# TODO(pcmurray) Raise an error when the label is not an index in
# [0, num_classes). Note: This could break users who call this with bad
# labels, but disregard the bad results.
# Reshape logits and labels to rank 2.
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
# Store label shape for result later.
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
# Check if no reshapes are required.
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
# Perform a check of the dynamic shapes if the static shapes are not fully
# defined.
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
# Reshape logits to 2 dim, labels to 1 dim.
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.avg_pool")
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A list or tuple of 4 ints. The size of the window for each dimension
of the input tensor.
strides: A list or tuple of 4 ints. The stride of the sliding window for
each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.max_pool")
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` of the format specified by `data_format`.
ksize: A list or tuple of 4 ints. The size of the window for each dimension
of the input tensor.
strides: A list or tuple of 4 ints. The stride of the sliding window for
each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
Returns:
A `Tensor` of format specified by `data_format`.
The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.max_pool_with_argmax", v1=[])
def max_pool_with_argmax_v2(input,
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
name=None):
"""Performs max pooling on the input and outputs both max values and indices.
The indices in `argmax` are flattened, so that a maximum value at position
`[b, y, x, c]` becomes flattened index
`((b * height + y) * width + x) * channels + c`.
The indices returned are always in `[0, height) x [0, width)` before
flattening, even if padding is involved and the mathematically correct answer
is outside (either negative or too large). This is a bug, but fixing it is
difficult to do in a safe backwards compatible way, especially due to
flattening.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`,
`uint32`, `uint64`.
4-D with shape `[batch, height, width, channels]`. Input to pool over.
ksize: A list of `ints` that has length `>= 4`.
The size of the window for each dimension of the input tensor.
strides: A list of `ints` that has length `>= 4`.
The stride of the sliding window for each dimension of the
input tensor.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: An optional `string`, must be set to `"NHWC"`. Defaults to
`"NHWC"`.
Specify the data format of the input and output data.
output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`.
Defaults to `tf.int64`.
The dtype of the returned argmax tensor.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, argmax).
output: A `Tensor`. Has the same type as `input`.
argmax: A `Tensor` of type `output_dtype`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
return gen_nn_ops.max_pool_with_argmax(input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
name=name)
# pylint: enable=redefined-builtin
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
"""Calculates the compute resources needed for DepthwiseConv2dNative."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
# If noise_shape is none return immediately.
if noise_shape is None:
return array_ops.shape(x)
try:
# Best effort to figure out the intended shape.
# If not possible, let the op to handle it.
# In eager mode exception will show up.
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None): # pylint: disable=invalid-name
"""Computes dropout.
For each element of `x`, with probability `rate`, outputs `0`, and otherwise
scales up the input by `1 / (1-rate)`. The scaling is such that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
keep_prob: (deprecated) A deprecated alias for `(1-rate)`.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
name: A name for this operation (optional).
rate: A scalar `Tensor` with the same type as `x`. The probability that each
element of `x` is discarded.
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `rate` is not in `[0, 1)` or if `x` is not a floating
point tensor.
"""
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None): # pylint: disable=invalid-name
"""Computes dropout.
With probability `rate`, drops elements of `x`. Input that are kept are
scaled up by `1 / (1 - rate)`, otherwise outputs `0`. The scaling is so that
the expected sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A floating point tensor.
rate: A scalar `Tensor` with the same type as x. The probability
that each element is dropped. For example, setting rate=0.1 would drop
10% of input elements.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]` or if `x` is not a floating
point tensor.
"""
with ops.name_scope(name, "dropout", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if not x.dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going to"
" be scaled. Got a %s tensor instead." % x.dtype)
if isinstance(rate, numbers.Real) and not (rate >= 0 and rate < 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
# Early return if nothing needs to be dropped.
if isinstance(rate, numbers.Real) and rate == 0:
return x
if context.executing_eagerly():
if isinstance(rate, ops.EagerTensor):
if rate.numpy() == 0:
return x
else:
rate = ops.convert_to_tensor(
rate, dtype=x.dtype, name="rate")
rate.get_shape().assert_is_compatible_with(tensor_shape.scalar())
# Do nothing if we know rate == 0
if tensor_util.constant_value(rate) == 0:
return x
noise_shape = _get_noise_shape(x, noise_shape)
keep_prob = 1 - rate
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.divide(x, keep_prob) * binary_tensor
if not context.executing_eagerly():
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank=1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
r"""Finds values of the `n`-th order statistic for the last dmension.
If the input is a vector (rank-1), finds the entries which is the nth-smallest
value in the vector and outputs their values as scalar tensor.
For matrices (resp. higher rank input), computes the entries which is the
nth-smallest value in each row (resp. vector along the last dimension). Thus,
values.shape = input.shape[:-1]
Args:
input: 1-D or higher `Tensor` with last dimension at least `n+1`.
n: A `Tensor` of type `int32`.
0-D. Position of sorted vector to select along the last dimension (along
each row for matrices). Valid range of n is `[0, input.shape[:-1])`
reverse: An optional `bool`. Defaults to `False`.
When set to True, find the nth-largest value in the vector and vice
versa.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The `n`-th order statistic along each last dimensional slice.
"""
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
This is a deprecated version of `fractional_max_pool`.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper: [Benjamin Graham,
Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_max_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_max_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional max pooling on the input.
Fractional max pooling is slightly different than regular max pooling. In
regular max pooling, you downsize an input set by taking the maximum value of
smaller N x N subsections of the set (often 2x2), and try to reduce the set by
a factor of N, where N is an integer. Fractional max pooling, as you might
expect from the word "fractional", means that the overall reduction ratio N
does not have to be an integer.
The sizes of the pooling regions are generated randomly but are fairly
uniform. For example, let's look at the height dimension, and the constraints
on the list of rows that will be pool boundaries.
First we define the following:
1. input_row_length : the number of rows from the input set
2. output_row_length : which will be smaller than the input
3. alpha = input_row_length / output_row_length : our reduction ratio
4. K = floor(alpha)
5. row_pooling_sequence : this is the result list of pool boundary rows
Then, row_pooling_sequence should satisfy:
1. a[0] = 0 : the first value of the sequence is 0
2. a[end] = input_row_length : the last value of the sequence is the size
3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
4. length(row_pooling_sequence) = output_row_length+1
For more details on fractional max pooling, see this paper: [Benjamin Graham,
Fractional Max-Pooling](http://arxiv.org/abs/1412.6071)
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional max pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional max pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
This is a deprecated version of `fractional_avg_pool`.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
deterministic: An optional `bool`. Deprecated; use `fractional_avg_pool_v2`
instead.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
seed2: An optional `int`. Deprecated; use `fractional_avg_pool_v2` instead.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
r"""Performs fractional average pooling on the input.
Fractional average pooling is similar to Fractional max pooling in the pooling
region generation step. The only difference is that after pooling regions are
generated, a mean operation is performed instead of a max operation in each
pooling region.
Args:
value: A `Tensor`. 4-D with shape `[batch, height, width, channels]`.
pooling_ratio: A list of `floats` that has length >= 4. Pooling ratio for
each dimension of `value`, currently only supports row and col dimension
and should be >= 1.0. For example, a valid pooling ratio looks like [1.0,
1.44, 1.73, 1.0]. The first and last elements must be 1.0 because we don't
allow pooling on batch and channels dimensions. 1.44 and 1.73 are pooling
ratio on height and width dimensions respectively.
pseudo_random: An optional `bool`. Defaults to `False`. When set to `True`,
generates the pooling sequence in a pseudorandom fashion, otherwise, in a
random fashion. Check paper [Benjamin Graham, Fractional
Max-Pooling](http://arxiv.org/abs/1412.6071) for difference between
pseudorandom and random.
overlapping: An optional `bool`. Defaults to `False`. When set to `True`,
it means when pooling, the values at the boundary of adjacent pooling
cells are used by both cells. For example:
`index 0 1 2 3 4`
`value 20 5 16 3 7`
If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used
twice. The result would be [20, 16] for fractional avg pooling.
seed: An optional `int`. Defaults to `0`. If set to be non-zero, the
random number generator is seeded by the given seed. Otherwise it is
seeded by a random seed.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (`output`, `row_pooling_sequence`,
`col_pooling_sequence`).
output: Output `Tensor` after fractional avg pooling. Has the same type as
`value`.
row_pooling_sequence: A `Tensor` of type `int64`.
col_pooling_sequence: A `Tensor` of type `int64`.
"""
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.conv1d"])
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(value,
filters,
stride,
padding,
use_cudnn_on_gpu=None,
data_format=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
value: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `value`.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: 'SAME' or 'VALID'
use_cudnn_on_gpu: An optional `bool`. Defaults to `True`.
data_format: An optional `string` from `"NWC", "NCW"`. Defaults
to `"NWC"`, the data is stored in the order of
[batch, in_width, in_channels]. The `"NCW"` format stores
data as [batch, in_channels, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
strides = [1, 1, stride, 1]
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
strides = [1, 1, 1, stride]
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
def conv1d_v2(input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format=None,
name=None):
r"""Computes a 1-D convolution given 3-D input and filter tensors.
Given an input tensor of shape
[batch, in_width, in_channels]
if data_format is "NWC", or
[batch, in_channels, in_width]
if data_format is "NCW",
and a filter / kernel tensor of shape
[filter_width, in_channels, out_channels], this op reshapes
the arguments to pass them to conv2d to perform the equivalent
convolution operation.
Internally, this op reshapes the input tensors and invokes `tf.nn.conv2d`.
For example, if `data_format` does not start with "NC", a tensor of shape
[batch, in_width, in_channels]
is reshaped to
[batch, 1, in_width, in_channels],
and the filter is reshaped to
[1, filter_width, in_channels, out_channels].
The result is then reshaped back to
[batch, out_width, out_channels]
\(where out_width is a function of the stride and padding as in conv2d\) and
returned to the caller.
Args:
input: A 3D `Tensor`. Must be of type `float16`, `float32`, or `float64`.
filters: A 3D `Tensor`. Must have the same type as `input`.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: 'SAME' or 'VALID'
data_format: An optional `string` from `"NWC", "NCW"`. Defaults
to `"NWC"`, the data is stored in the order of
[batch, in_width, in_channels]. The `"NCW"` format stores
data as [batch, in_channels, in_width].
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
"""
return conv1d(input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name)
def conv1d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
stride,
padding="SAME",
data_format="NWC",
name=None):
"""The transpose of `conv1d`.
This operation is sometimes called "deconvolution" after [Deconvolutional
Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf), but is
actually the transpose (gradient) of `conv1d` rather than an actual
deconvolution.
Args:
value: A 3-D `Tensor` of type `float` and shape
`[batch, in_width, in_channels]` for `NWC` data format or
`[batch, in_channels, in_width]` for `NCW` data format.
filter: A 3-D `Tensor` with the same type as `value` and shape
`[filter_width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "conv1d_transpose",
[value, filter, output_shape]) as name:
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(3)):
raise ValueError("output_shape must have shape (3,), got {}".format(
output_shape_.get_shape()))
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format_2d = "NHWC"
axis = 2
elif data_format == "NCW":
data_format_2d = "NCHW"
axis = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[2]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[2]))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [3] if reached this point.
if not filter.get_shape().dims[1].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[1]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format_2d == "NHWC":
output_shape_ = array_ops.concat(
[output_shape_[:1], [1], output_shape_[1:]], axis=0)
spatial_start_dim = 1
strides = [1, 1, stride, 1]
else:
output_shape_ = array_ops.concat(
[output_shape_[:2], [1], output_shape_[2:]], axis=0)
spatial_start_dim = 2
strides = [1, 1, 1, stride]
value = array_ops.expand_dims(value, spatial_start_dim)
filter = array_ops.expand_dims(filter, 0) # pylint: disable=redefined-builtin
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format_2d,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
"""Calculates the compute resources needed for Dilation2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
def erosion2d(value, kernel, strides, rates, padding, name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `kernel` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`kernel` tensor has shape `[kernel_height, kernel_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - rates[1] * dy,
strides[2] * x - rates[2] * dx,
c] -
kernel[dy, dx, c]
Duality: The erosion of `value` by the `kernel` is equal to the negation of
the dilation of `-value` by the reflected `kernel`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `kernel`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
"""Computes the grayscale erosion of 4-D `value` and 3-D `filters` tensors.
The `value` tensor has shape `[batch, in_height, in_width, depth]` and the
`filters` tensor has shape `[filters_height, filters_width, depth]`, i.e.,
each input channel is processed independently of the others with its own
structuring function. The `output` tensor has shape
`[batch, out_height, out_width, depth]`. The spatial dimensions of the
output tensor depend on the `padding` algorithm. We currently only support the
default "NHWC" `data_format`.
In detail, the grayscale morphological 2-D erosion is given by:
output[b, y, x, c] =
min_{dy, dx} value[b,
strides[1] * y - dilations[1] * dy,
strides[2] * x - dilations[2] * dx,
c] -
filters[dy, dx, c]
Duality: The erosion of `value` by the `filters` is equal to the negation of
the dilation of `-value` by the reflected `filters`.
Args:
value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
filters: A `Tensor`. Must have the same type as `value`.
3-D with shape `[filters_height, filters_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
the input tensor. Must be: `[1, stride_height, stride_width, 1]`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
data_format: A `string`, only `"NHWC"` is currently supported.
dilations: A list of `ints` that has length `>= 4`.
1-D of length 4. The input stride for atrous morphological dilation.
Must be: `[1, rate_height, rate_width, 1]`.
name: A name for the operation (optional). If not specified "erosion2d"
is used.
Returns:
A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
ValueError: If the `value` depth does not match `filters`' shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
# Reduce erosion to dilation by duality.
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
def in_top_k(predictions, targets, k, name=None):
r"""Says whether the targets are in the top `K` predictions.
This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
prediction for the target class is among the top `k` predictions among
all predictions for example `i`. Note that the behavior of `InTopK` differs
from the `TopK` op in its handling of ties; if multiple classes have the
same prediction value and straddle the top-`k` boundary, all of those
classes are considered to be in the top `k`.
More formally, let
\\(predictions_i\\) be the predictions for all classes for example `i`,
\\(targets_i\\) be the target class for example `i`,
\\(out_i\\) be the output for example `i`,
$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
Args:
predictions: A `Tensor` of type `float32`.
A `batch_size` x `classes` tensor.
targets: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A `batch_size` vector of class ids.
k: An `int`. Number of top elements to look at for computing precision.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`. Computed Precision at `k` as a `bool Tensor`.
"""
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(gen_nn_ops.quantized_avg_pool)
tf_export(v1=["nn.quantized_conv2d"])(gen_nn_ops.quantized_conv2d)
tf_export(v1=["nn.quantized_relu_x"])(gen_nn_ops.quantized_relu_x)
tf_export(v1=["nn.quantized_max_pool"])(gen_nn_ops.quantized_max_pool)
| 40.941493 | 96 | 0.668919 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.gen_nn_ops import *
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.deprecation import deprecated_argument_lookup
from tensorflow.python.util.tf_export import tf_export
local_response_normalization = gen_nn_ops.lrn
def _non_atrous_convolution(
input,
filter,
padding,
data_format=None,
strides=None,
name=None):
with ops.name_scope(name, "non_atrous_convolution", [input, filter]) as scope:
input = ops.convert_to_tensor(input, name="input")
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter")
filter_shape = filter.get_shape()
op = _NonAtrousConvolution(
input_shape,
filter_shape=filter_shape,
padding=padding,
data_format=data_format,
strides=strides,
name=scope)
return op(input, filter)
class _NonAtrousConvolution(object):
def __init__(
self,
input_shape,
filter_shape,
padding,
data_format=None,
strides=None,
name=None):
filter_shape = filter_shape.with_rank(input_shape.ndims)
self.padding = padding
self.name = name
input_shape = input_shape.with_rank(filter_shape.ndims)
if input_shape.ndims is None:
raise ValueError("Rank of convolution must be known")
if input_shape.ndims < 3 or input_shape.ndims > 5:
raise ValueError(
"`input` and `filter` must have rank at least 3 and at most 5")
conv_dims = input_shape.ndims - 2
if strides is None:
strides = [1] * conv_dims
elif len(strides) != conv_dims:
raise ValueError("len(strides)=%d, but should be %d" % (len(strides),
conv_dims))
if conv_dims == 1:
if data_format is None:
data_format = "NWC"
elif data_format not in {"NCW", "NWC", "NCHW", "NHWC"}:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
self.strides = strides[0]
self.data_format = data_format
self.conv_op = self._conv1d
elif conv_dims == 2:
if data_format is None or data_format == "NHWC":
data_format = "NHWC"
strides = [1] + list(strides) + [1]
elif data_format == "NCHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NHWC\" or \"NCHW\".")
self.strides = strides
self.data_format = data_format
self.conv_op = conv2d
elif conv_dims == 3:
if data_format is None or data_format == "NDHWC":
strides = [1] + list(strides) + [1]
elif data_format == "NCDHW":
strides = [1, 1] + list(strides)
else:
raise ValueError("data_format must be \"NDHWC\" or \"NCDHW\". Have: %s"
% data_format)
self.strides = strides
self.data_format = data_format
self.conv_op = gen_nn_ops.conv3d
# those for gen_nn_ops.conv2d and gen_nn_ops.conv3d.
# pylint: disable=redefined-builtin
def _conv1d(self, input, filter, strides, padding, data_format, name):
return conv1d(
value=input,
filters=filter,
stride=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(
input=inp,
filter=filter,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
name=self.name)
@tf_export("nn.dilation2d", v1=[])
def dilation2d_v2(
input, # pylint: disable=redefined-builtin
filters, # pylint: disable=redefined-builtin
strides,
padding,
data_format,
dilations,
name=None):
if data_format != "NCHW":
raise ValueError("Data formats other than NCHW are not yet supported")
return gen_nn_ops.dilation2d(input=input,
filter=filters,
strides=strides,
rates=dilations,
padding=padding,
name=name)
@tf_export("nn.with_space_to_batch")
def with_space_to_batch(
input, # pylint: disable=redefined-builtin
dilation_rate,
padding,
op,
filter_shape=None,
spatial_dims=None,
data_format=None):
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
def build_op(num_spatial_dims, padding):
return lambda inp, _: op(inp, num_spatial_dims, padding)
new_op = _WithSpaceToBatch(
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
return new_op(input, None)
class _WithSpaceToBatch(object):
def __init__(self,
input_shape,
dilation_rate,
padding,
build_op,
filter_shape=None,
spatial_dims=None,
data_format=None):
dilation_rate = ops.convert_to_tensor(
dilation_rate, dtypes.int32, name="dilation_rate")
try:
rate_shape = dilation_rate.get_shape().with_rank(1)
except ValueError:
raise ValueError("rate must be rank 1")
if not dilation_rate.get_shape().is_fully_defined():
raise ValueError("rate must have known shape")
num_spatial_dims = rate_shape.dims[0].value
if data_format is not None and data_format.startswith("NC"):
starting_spatial_dim = 2
else:
starting_spatial_dim = 1
if spatial_dims is None:
spatial_dims = range(starting_spatial_dim,
num_spatial_dims + starting_spatial_dim)
orig_spatial_dims = list(spatial_dims)
spatial_dims = sorted(set(int(x) for x in orig_spatial_dims))
if spatial_dims != orig_spatial_dims or any(x < 1 for x in spatial_dims):
raise ValueError(
"spatial_dims must be a montonically increasing sequence of positive "
"integers") # pylint: disable=line-too-long
if data_format is not None and data_format.startswith("NC"):
expected_input_rank = spatial_dims[-1]
else:
expected_input_rank = spatial_dims[-1] + 1
try:
input_shape.with_rank_at_least(expected_input_rank)
except ValueError:
raise ValueError(
"input tensor must have rank %d at least" % (expected_input_rank))
const_rate = tensor_util.constant_value(dilation_rate)
rate_or_const_rate = dilation_rate
if const_rate is not None:
rate_or_const_rate = const_rate
if np.any(const_rate < 1):
raise ValueError("dilation_rate must be positive")
if np.all(const_rate == 1):
self.call = build_op(num_spatial_dims, padding)
return
# We have two padding contributions. The first is used for converting "SAME"
# to "VALID". The second is required so that the height and width of the
# zero-padded value tensor are multiples of rate.
# Padding required to reduce to "VALID" convolution
if padding == "SAME":
if filter_shape is None:
raise ValueError("filter_shape must be specified for SAME padding")
filter_shape = ops.convert_to_tensor(filter_shape, name="filter_shape")
const_filter_shape = tensor_util.constant_value(filter_shape)
if const_filter_shape is not None:
filter_shape = const_filter_shape
self.base_paddings = _with_space_to_batch_base_paddings(
const_filter_shape, num_spatial_dims, rate_or_const_rate)
else:
self.num_spatial_dims = num_spatial_dims
self.rate_or_const_rate = rate_or_const_rate
self.base_paddings = None
elif padding == "VALID":
self.base_paddings = np.zeros([num_spatial_dims, 2], np.int32)
else:
raise ValueError("Invalid padding method %r" % padding)
self.input_shape = input_shape
self.spatial_dims = spatial_dims
self.dilation_rate = dilation_rate
self.data_format = data_format
self.op = build_op(num_spatial_dims, "VALID")
self.call = self._with_space_to_batch_call
def _with_space_to_batch_call(self, inp, filter): # pylint: disable=redefined-builtin
# Handle input whose shape is unknown during graph creation.
input_spatial_shape = None
input_shape = self.input_shape
spatial_dims = self.spatial_dims
if input_shape.ndims is not None:
input_shape_list = input_shape.as_list()
input_spatial_shape = [input_shape_list[i] for i in spatial_dims]
if input_spatial_shape is None or None in input_spatial_shape:
input_shape_tensor = array_ops.shape(inp)
input_spatial_shape = array_ops.stack(
[input_shape_tensor[i] for i in spatial_dims])
base_paddings = self.base_paddings
if base_paddings is None:
# base_paddings could not be computed at build time since static filter
# shape was not fully defined.
filter_shape = array_ops.shape(filter)
base_paddings = _with_space_to_batch_base_paddings(
filter_shape, self.num_spatial_dims, self.rate_or_const_rate)
paddings, crops = array_ops.required_space_to_batch_paddings(
input_shape=input_spatial_shape,
base_paddings=base_paddings,
block_shape=self.dilation_rate)
dilation_rate = _with_space_to_batch_adjust(self.dilation_rate, 1,
spatial_dims)
paddings = _with_space_to_batch_adjust(paddings, 0, spatial_dims)
crops = _with_space_to_batch_adjust(crops, 0, spatial_dims)
input_converted = array_ops.space_to_batch_nd(
input=inp, block_shape=dilation_rate, paddings=paddings)
result = self.op(input_converted, filter)
result_converted = array_ops.batch_to_space_nd(
input=result, block_shape=dilation_rate, crops=crops)
# Recover channel information for output shape if channels are not last.
if self.data_format is not None and self.data_format.startswith("NC"):
if not result_converted.shape.dims[1].value and filter is not None:
output_shape = result_converted.shape.as_list()
output_shape[1] = filter.shape[-1]
result_converted.set_shape(output_shape)
return result_converted
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.call(inp, filter)
def _with_space_to_batch_base_paddings(filter_shape, num_spatial_dims,
rate_or_const_rate):
# Spatial dimensions of the filters and the upsampled filters in which we
# introduce (rate - 1) zeros between consecutive filter values.
filter_spatial_shape = filter_shape[:num_spatial_dims]
dilated_filter_spatial_shape = (
filter_spatial_shape + (filter_spatial_shape - 1) *
(rate_or_const_rate - 1))
pad_extra_shape = dilated_filter_spatial_shape - 1
# When full_padding_shape is odd, we pad more at end, following the same
# convention as conv2d.
pad_extra_start = pad_extra_shape // 2
pad_extra_end = pad_extra_shape - pad_extra_start
base_paddings = array_ops.stack(
[[pad_extra_start[i], pad_extra_end[i]] for i in range(num_spatial_dims)])
return base_paddings
def _with_space_to_batch_adjust(orig, fill_value, spatial_dims):
fill_dims = orig.get_shape().as_list()[1:]
dtype = orig.dtype.as_numpy_dtype
parts = []
const_orig = tensor_util.constant_value(orig)
const_or_orig = const_orig if const_orig is not None else orig
prev_spatial_dim = 0
i = 0
while i < len(spatial_dims):
start_i = i
start_spatial_dim = spatial_dims[i]
if start_spatial_dim > 1:
# Fill in any gap from the previous spatial dimension (or dimension 1 if
# this is the first spatial dimension) with `fill_value`.
parts.append(
np.full(
[start_spatial_dim - 1 - prev_spatial_dim] + fill_dims,
fill_value,
dtype=dtype))
# Find the largest value of i such that:
# [spatial_dims[start_i], ..., spatial_dims[i]]
# == [start_spatial_dim, ..., start_spatial_dim + i - start_i],
# i.e. the end of a contiguous group of spatial dimensions.
while (i + 1 < len(spatial_dims) and
spatial_dims[i + 1] == spatial_dims[i] + 1):
i += 1
parts.append(const_or_orig[start_i:i + 1])
prev_spatial_dim = spatial_dims[i]
i += 1
if const_orig is not None:
return np.concatenate(parts)
else:
return array_ops.concat(parts, 0)
def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):
if dilation_rate is None:
dilation_rate = [1] * num_spatial_dims
elif len(dilation_rate) != num_spatial_dims:
raise ValueError("len(dilation_rate)=%d but should be %d" %
(len(dilation_rate), num_spatial_dims))
dilation_rate = np.array(dilation_rate, dtype=np.int32)
if np.any(dilation_rate < 1):
raise ValueError("all values of dilation_rate must be positive")
if strides is None:
strides = [1] * num_spatial_dims
elif len(strides) != num_spatial_dims:
raise ValueError("len(strides)=%d but should be %d" % (len(strides),
num_spatial_dims))
strides = np.array(strides, dtype=np.int32)
if np.any(strides < 1):
raise ValueError("all values of strides must be positive")
if np.any(strides > 1) and np.any(dilation_rate > 1):
raise ValueError(
"strides > 1 not supported in conjunction with dilation_rate > 1")
return strides, dilation_rate
@tf_export(v1=["nn.convolution"])
def convolution(
input, # pylint: disable=redefined-builtin
filter, # pylint: disable=redefined-builtin
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
# pylint: enable=line-too-long
with ops.name_scope(name, "convolution", [input, filter]) as name:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
input_shape = input.get_shape()
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
filter_shape = filter.get_shape()
op = Convolution(
input_shape,
filter_shape,
padding,
strides=strides,
dilation_rate=dilation_rate,
name=name,
data_format=data_format)
return op(input, filter)
@tf_export("nn.convolution", v1=[])
def convolution_v2(
input, # pylint: disable=redefined-builtin
filters,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
return convolution(
input, # pylint: disable=redefined-builtin
filters,
padding=padding,
strides=strides,
dilation_rate=dilations,
name=name,
data_format=data_format)
convolution_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
convolution.__doc__, "dilation_rate", "dilations"),
"filter", "filters")
class Convolution(object):
def __init__(self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None):
num_total_dims = filter_shape.ndims
if num_total_dims is None:
num_total_dims = input_shape.ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known")
num_spatial_dims = num_total_dims - 2
try:
input_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"input tensor must have rank %d" % (num_spatial_dims + 2))
try:
filter_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError(
"filter tensor must have rank %d" % (num_spatial_dims + 2))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + 1)
spatial_dims = range(1, num_spatial_dims + 1)
else:
input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)
spatial_dims = range(2, num_spatial_dims + 2)
if not input_channels_dim.is_compatible_with(
filter_shape[num_spatial_dims]):
raise ValueError(
"number of input channels does not match corresponding dimension of "
"filter, {} != {}".format(input_channels_dim,
filter_shape[num_spatial_dims]))
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.name = name
self.conv_op = _WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format)
def _build_op(self, _, padding):
return _NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name)
def __call__(self, inp, filter): # pylint: disable=redefined-builtin
return self.conv_op(inp, filter)
@tf_export(v1=["nn.pool"])
def pool(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
padding,
dilation_rate=None,
strides=None,
name=None,
data_format=None):
# pylint: disable=line-too-long
# pylint: enable=line-too-long
with ops.name_scope(name, "%s_pool" % (pooling_type.lower()),
[input]) as scope:
input = ops.convert_to_tensor(input, name="input") # pylint: disable=redefined-builtin
num_spatial_dims = len(window_shape)
if num_spatial_dims < 1 or num_spatial_dims > 3:
raise ValueError("It is required that 1 <= num_spatial_dims <= 3.")
input.get_shape().with_rank(num_spatial_dims + 2)
strides, dilation_rate = _get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate)
if padding == "SAME" and np.any(dilation_rate > 1):
raise ValueError(
"pooling with SAME padding is not implemented for dilation_rate > 1")
if np.any(strides > window_shape):
raise ValueError(
"strides > window_shape not supported due to inconsistency between "
"CPU and GPU implementations")
pooling_ops = {
("MAX", 1): max_pool,
("MAX", 2): max_pool,
("MAX", 3): max_pool3d, # pylint: disable=undefined-variable
("AVG", 1): avg_pool,
("AVG", 2): avg_pool,
("AVG", 3): avg_pool3d, # pylint: disable=undefined-variable
}
op_key = (pooling_type, num_spatial_dims)
if op_key not in pooling_ops:
raise ValueError("%d-D %s pooling is not supported." % (op_key[1],
op_key[0]))
if data_format is None or not data_format.startswith("NC"):
adjusted_window_shape = [1] + list(window_shape) + [1]
adjusted_strides = [1] + list(strides) + [1]
spatial_dims = range(1, num_spatial_dims + 1)
else:
adjusted_window_shape = [1, 1] + list(window_shape)
adjusted_strides = [1, 1] + list(strides)
spatial_dims = range(2, num_spatial_dims + 2)
if num_spatial_dims == 1:
if data_format is None or data_format == "NWC":
data_format_kwargs = dict(data_format="NHWC")
elif data_format == "NCW":
data_format_kwargs = dict(data_format="NCHW")
else:
raise ValueError("data_format must be either \"NWC\" or \"NCW\".")
adjusted_window_shape = [1] + adjusted_window_shape
adjusted_strides = [1] + adjusted_strides
else:
data_format_kwargs = dict(data_format=data_format)
def op(converted_input, _, converted_padding): # pylint: disable=missing-docstring
if num_spatial_dims == 1:
converted_input = array_ops.expand_dims(converted_input,
spatial_dims[0])
result = pooling_ops[op_key](
converted_input,
adjusted_window_shape,
adjusted_strides,
converted_padding,
name=scope,
**data_format_kwargs)
if num_spatial_dims == 1:
result = array_ops.squeeze(result, [spatial_dims[0]])
return result
return with_space_to_batch(
input=input,
dilation_rate=dilation_rate,
padding=padding,
op=op,
spatial_dims=spatial_dims,
filter_shape=window_shape)
@tf_export("nn.pool", v1=[])
def pool_v2(
input, # pylint: disable=redefined-builtin
window_shape,
pooling_type,
strides=None,
padding="VALID",
data_format=None,
dilations=None,
name=None):
# pylint: disable=line-too-long
return pool(
input=input,
window_shape=window_shape,
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilations,
strides=strides,
name=name,
data_format=data_format)
@tf_export("nn.atrous_conv2d")
def atrous_conv2d(value, filters, rate, padding, name=None):
return convolution(
input=value,
filter=filters,
padding=padding,
dilation_rate=np.broadcast_to(rate, (2,)),
name=name)
def _convert_padding(padding):
explicit_paddings = []
if padding == "EXPLICIT":
# Give a better error message if EXPLICIT is passed.
raise ValueError('"EXPLICIT" is not a valid value for the padding '
"parameter. To use explicit padding, the padding "
"parameter must be a list.")
if isinstance(padding, (list, tuple)):
for i, dim_paddings in enumerate(padding):
if not isinstance(dim_paddings, (list, tuple)):
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding is not a list/tuple" % i)
if len(dim_paddings) != 2:
raise ValueError("When padding is a list, each element of padding must "
"be a list/tuple of size 2. Element with index %d of "
"padding has size %d" % (i, len(dim_paddings)))
explicit_paddings.extend(dim_paddings)
if len(padding) != 4:
raise ValueError("When padding is a list, it must be of size 4. Got "
"padding of size: %d" % len(padding))
padding = "EXPLICIT"
return padding, explicit_paddings
@tf_export("nn.conv2d", v1=[])
def conv2d_v2(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
# pylint: disable=line-too-long
# pylint: enable=line-too-long
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d(input, # pylint: disable=redefined-builtin
filters,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d"])
def conv2d( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d(input, # pylint: disable=redefined-builtin
filter,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
explicit_paddings=explicit_paddings,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export("nn.conv2d_backprop_filter", v1=[])
def conv2d_backprop_filter_v2(input, # pylint: disable=redefined-builtin
filter_sizes,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d_backprop_filter(input, # pylint: disable=redefined-builtin
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_filter"])
def conv2d_backprop_filter( # pylint: disable=redefined-builtin,dangerous-default-value
input,
filter_sizes,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_filter(
input, filter_sizes, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export("nn.conv2d_backprop_input", v1=[])
def conv2d_backprop_input_v2(input_sizes,
filters,
out_backprop,
strides,
padding,
data_format="NHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1]
return conv2d_backprop_input(input_sizes,
filters,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
dilations=dilations,
name=name)
@tf_export(v1=["nn.conv2d_backprop_input"])
def conv2d_backprop_input( # pylint: disable=redefined-builtin,dangerous-default-value
input_sizes,
filter,
out_backprop,
strides,
padding,
use_cudnn_on_gpu=True,
data_format="NHWC",
dilations=[1, 1, 1, 1],
name=None):
padding, explicit_paddings = _convert_padding(padding)
return gen_nn_ops.conv2d_backprop_input(
input_sizes, filter, out_backprop, strides, padding, use_cudnn_on_gpu,
explicit_paddings, data_format, dilations, name)
@tf_export(v1=["nn.conv2d_transpose"])
def conv2d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
with ops.name_scope(name, "conv2d_transpose",
[value, filter, output_shape]) as name:
if data_format not in ("NCHW", "NHWC"):
raise ValueError("data_format has to be either NCHW or NHWC.")
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter") # pylint: disable=redefined-builtin
axis = 3 if data_format == "NHWC" else 1
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[3]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[3]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
if not filter.get_shape().dims[2].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[2]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.conv2d_transpose", v1=[])
def conv2d_transpose_v2(
input,
filters,
output_shape,
strides,
padding="SAME",
data_format="NHWC",
name=None):
return conv2d_transpose(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
name=name)
conv2d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
conv2d_transpose.__doc__, "filter", "filters"),
"value", "input")
@tf_export("nn.atrous_conv2d_transpose")
def atrous_conv2d_transpose(value,
filters,
output_shape,
rate,
padding,
name=None):
with ops.name_scope(name, "atrous_conv2d_transpose",
[value, filters, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filters = ops.convert_to_tensor(filters, name="filters")
if not value.get_shape().dims[3].is_compatible_with(filters.get_shape()[3]):
raise ValueError(
"value's input channels does not match filters' input channels, "
"{} != {}".format(value.get_shape()[3],
filters.get_shape()[3]))
if rate < 1:
raise ValueError("rate {} cannot be less than one".format(rate))
if rate == 1:
return conv2d_transpose(
value,
filters,
output_shape,
strides=[1, 1, 1, 1],
padding=padding,
data_format="NHWC")
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
if not filters.get_shape().dims[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3],
filters.get_shape()[2]))
if padding == "SAME":
if filters.get_shape().is_fully_defined():
filter_shape = filters.get_shape().as_list()
else:
filter_shape = array_ops.shape(filters)
filter_height, filter_width = filter_shape[0], filter_shape[1]
filter_height_up = filter_height + (filter_height - 1) * (rate - 1)
filter_width_up = filter_width + (filter_width - 1) * (rate - 1)
pad_height = filter_height_up - 1
pad_width = filter_width_up - 1
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
elif padding == "VALID":
pad_top = 0
pad_bottom = 0
pad_left = 0
pad_right = 0
else:
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
in_height = output_shape[1] + pad_top + pad_bottom
in_width = output_shape[2] + pad_left + pad_right
pad_bottom_extra = (rate - in_height % rate) % rate
pad_right_extra = (rate - in_width % rate) % rate
space_to_batch_pad = [[0, pad_bottom_extra], [0, pad_right_extra]]
value = array_ops.space_to_batch(
input=value, paddings=space_to_batch_pad, block_size=rate)
input_sizes = [
rate * rate * output_shape[0], (in_height + pad_bottom_extra) // rate,
(in_width + pad_right_extra) // rate, output_shape[3]
]
value = gen_nn_ops.conv2d_backprop_input(
input_sizes=input_sizes,
filter=filters,
out_backprop=value,
strides=[1, 1, 1, 1],
padding="VALID",
data_format="NHWC")
batch_to_space_crop = [[pad_top, pad_bottom + pad_bottom_extra],
[pad_left, pad_right + pad_right_extra]]
return array_ops.batch_to_space(
input=value, crops=batch_to_space_crop, block_size=rate)
@tf_export("nn.conv3d", v1=[])
def conv3d_v2(input,
filters,
strides,
padding,
data_format="NDHWC",
dilations=None,
name=None):
if dilations is None:
dilations = [1, 1, 1, 1, 1]
return gen_nn_ops.conv3d(input,
filters,
strides,
padding,
data_format=data_format,
dilations=dilations,
name=name)
tf_export(v1=["nn.conv3d"])(gen_nn_ops.conv3d)
conv3d_v2.__doc__ = deprecation.rewrite_argument_docstring(
gen_nn_ops.conv3d.__doc__, "filter", "filters")
@tf_export(v1=["nn.conv3d_transpose"])
def conv3d_transpose(
value,
filter,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
with ops.name_scope(name, "conv3d_transpose",
[value, filter, output_shape]) as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter")
axis = 1 if data_format == "NCDHW" else 4
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[4]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[4]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(5)):
raise ValueError("output_shape must have shape (5,), got {}".format(
output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [5] if reached this point.
if not filter.get_shape().dims[3].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[3]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv3d_backprop_input_v2(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: disable=redefined-builtin
@tf_export("nn.conv3d_transpose", v1=[])
def conv3d_transpose_v2(
input,
filters,
output_shape,
strides,
padding="SAME",
data_format="NDHWC",
name=None):
return conv3d_transpose(
input,
filters,
output_shape,
strides,
padding=padding,
data_format=data_format,
name=name)
# pylint: enable=redefined-builtin
conv3d_transpose_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
conv3d_transpose.__doc__, "filter", "filters"),
"value", "input")
@tf_export("nn.bias_add")
def bias_add(value, bias, data_format=None, name=None):
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
if not context.executing_eagerly():
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add(value, bias, data_format=data_format, name=name)
def bias_add_v1(value, bias, name=None):
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops.bias_add_v1(value, bias, name=name)
@tf_export(v1=["nn.crelu"])
def crelu(features, name=None, axis=-1):
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
c = array_ops.concat([features, -features], axis, name=name)
return gen_nn_ops.relu(c)
@tf_export("nn.crelu", v1=[])
def crelu_v2(features, axis=-1, name=None):
return crelu(features, name=name, axis=axis)
crelu_v2.__doc__ = crelu.__doc__
@tf_export("nn.relu6")
def relu6(features, name=None):
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops.relu6(features, name=name)
@tf_export("nn.leaky_relu")
def leaky_relu(features, alpha=0.2, name=None):
with ops.name_scope(name, "LeakyRelu", [features, alpha]) as name:
features = ops.convert_to_tensor(features, name="features")
if features.dtype.is_integer:
features = math_ops.to_float(features)
if compat.forward_compatible(2018, 11, 1):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
return gen_nn_ops.leaky_relu(features, alpha=alpha, name=name)
alpha = ops.convert_to_tensor(alpha, dtype=features.dtype, name="alpha")
return math_ops.maximum(alpha * features, features, name=name)
def _flatten_outer_dims(logits):
rank = array_ops.rank(logits)
last_dim_size = array_ops.slice(
array_ops.shape(logits), [math_ops.subtract(rank, 1)], [1])
output = array_ops.reshape(logits, array_ops.concat([[-1], last_dim_size], 0))
# Set output shape if known.
if not context.executing_eagerly():
shape = logits.get_shape()
if shape is not None and shape.dims is not None:
shape = shape.as_list()
product = 1
product_valid = True
for d in shape[:-1]:
if d is None:
product_valid = False
break
else:
product *= d
if product_valid:
output_shape = [product, shape[-1]]
output.set_shape(output_shape)
return output
def _softmax(logits, compute_op, dim=-1, name=None):
def _swap_axis(logits, dim_index, last_index, name=None):
return array_ops.transpose(
logits,
array_ops.concat([
math_ops.range(dim_index), [last_index],
math_ops.range(dim_index + 1, last_index), [dim_index]
], 0),
name=name)
logits = ops.convert_to_tensor(logits)
# We need its original shape for shape inference.
shape = logits.get_shape()
is_last_dim = (dim is -1) or (dim == shape.ndims - 1)
if is_last_dim:
return compute_op(logits, name=name)
dim_val = dim
if isinstance(dim, ops.Tensor):
dim_val = tensor_util.constant_value(dim)
if dim_val is not None and (dim_val < -shape.ndims or dim_val >= shape.ndims):
raise errors_impl.InvalidArgumentError(
None, None,
"Dimension (%d) must be in the range [%d, %d) where %d is the number of"
" dimensions in the input." % (dim_val, -shape.ndims, shape.ndims,
shape.ndims))
# If dim is not the last dimension, we have to do a transpose so that we can
# still perform softmax on its last dimension.
# In case dim is negative (and is not last dimension -1), add shape.ndims
ndims = array_ops.rank(logits)
if not isinstance(dim, ops.Tensor):
if dim < 0:
dim += ndims
else:
dim = array_ops.where(math_ops.less(dim, 0), dim + ndims, dim)
# Swap logits' dimension of dim and its last dimension.
input_rank = array_ops.rank(logits)
dim_axis = dim % shape.ndims
logits = _swap_axis(logits, dim_axis, math_ops.subtract(input_rank, 1))
output = compute_op(logits)
output = _swap_axis(
output, dim_axis, math_ops.subtract(input_rank, 1), name=name)
output.set_shape(shape)
return output
@tf_export(v1=["nn.softmax", "math.softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax(logits, axis=None, name=None, dim=None):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export("nn.softmax", "math.softmax", v1=[])
def softmax_v2(logits, axis=None, name=None):
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.softmax, axis, name)
@tf_export(v1=["nn.log_softmax", "math.log_softmax"])
@deprecation.deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def log_softmax(logits, axis=None, name=None, dim=None):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
@tf_export("nn.log_softmax", "math.log_softmax", v1=[])
def log_softmax_v2(logits, axis=None, name=None):
if axis is None:
axis = -1
return _softmax(logits, gen_nn_ops.log_softmax, axis, name)
def _ensure_xent_args(name, sentinel, labels, logits):
if sentinel is not None:
raise ValueError("Only call `%s` with "
"named arguments (labels=..., logits=..., ...)" % name)
if labels is None or logits is None:
raise ValueError("Both labels and logits must be provided.")
@tf_export("nn.softmax_cross_entropy_with_logits", v1=[])
def softmax_cross_entropy_with_logits_v2(labels, logits, axis=-1, name=None):
return softmax_cross_entropy_with_logits_v2_helper(
labels=labels, logits=logits, axis=axis, name=name)
@tf_export(v1=["nn.softmax_cross_entropy_with_logits_v2"])
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def softmax_cross_entropy_with_logits_v2_helper(
labels, logits, axis=None, name=None, dim=None):
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
del dim
if axis is None:
axis = -1
with ops.name_scope(name, "softmax_cross_entropy_with_logits",
[logits, labels]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
labels = ops.convert_to_tensor(labels, name="labels")
convert_to_float32 = (
logits.dtype == dtypes.float16 or logits.dtype == dtypes.bfloat16)
precise_logits = math_ops.cast(
logits, dtypes.float32) if convert_to_float32 else logits
labels = math_ops.cast(labels, precise_logits.dtype)
input_rank = array_ops.rank(precise_logits)
shape = logits.get_shape()
if axis != -1:
def _move_dim_to_end(tensor, dim_index, rank):
return array_ops.transpose(
tensor,
array_ops.concat([
math_ops.range(dim_index),
math_ops.range(dim_index + 1, rank), [dim_index]
], 0))
precise_logits = _move_dim_to_end(precise_logits, axis, input_rank)
labels = _move_dim_to_end(labels, axis, input_rank)
input_shape = array_ops.shape(precise_logits)
precise_logits = _flatten_outer_dims(precise_logits)
labels = _flatten_outer_dims(labels)
cost, unused_backprop = gen_nn_ops.softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
output_shape = array_ops.slice(input_shape, [0],
[math_ops.subtract(input_rank, 1)])
cost = array_ops.reshape(cost, output_shape)
if not context.executing_eagerly(
) and shape is not None and shape.dims is not None:
shape = shape.as_list()
del shape[axis]
cost.set_shape(shape)
if convert_to_float32:
return math_ops.cast(cost, logits.dtype)
else:
return cost
_XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@tf_export(v1=["nn.softmax_cross_entropy_with_logits"])
@deprecation.deprecated(date=None, instructions=_XENT_DEPRECATION)
def softmax_cross_entropy_with_logits(
_sentinel=None,
labels=None,
logits=None,
dim=-1,
name=None):
_ensure_xent_args("softmax_cross_entropy_with_logits", _sentinel, labels,
logits)
with ops.name_scope(name, "softmax_cross_entropy_with_logits_sg",
[logits, labels]) as name:
labels = array_ops.stop_gradient(labels, name="labels_stop_gradient")
return softmax_cross_entropy_with_logits_v2(
labels=labels, logits=logits, axis=dim, name=name)
@tf_export("nn.sparse_softmax_cross_entropy_with_logits")
def sparse_softmax_cross_entropy_with_logits(
_sentinel=None,
labels=None,
logits=None,
name=None):
_ensure_xent_args("sparse_softmax_cross_entropy_with_logits", _sentinel,
labels, logits)
with ops.name_scope(name, "SparseSoftmaxCrossEntropyWithLogits",
[labels, logits]):
labels = ops.convert_to_tensor(labels)
logits = ops.convert_to_tensor(logits)
precise_logits = math_ops.cast(logits, dtypes.float32) if (dtypes.as_dtype(
logits.dtype) == dtypes.float16) else logits
labels_static_shape = labels.get_shape()
labels_shape = array_ops.shape(labels)
static_shapes_fully_defined = (
labels_static_shape.is_fully_defined() and
logits.get_shape()[:-1].is_fully_defined())
if logits.get_shape().ndims is not None and logits.get_shape().ndims == 0:
raise ValueError(
"Logits cannot be scalars - received shape %s." % logits.get_shape())
if logits.get_shape().ndims is not None and (
labels_static_shape.ndims is not None and
labels_static_shape.ndims != logits.get_shape().ndims - 1):
raise ValueError("Rank mismatch: Rank of labels (received %s) should "
"equal rank of logits minus 1 (received %s)." %
(labels_static_shape.ndims, logits.get_shape().ndims))
if (static_shapes_fully_defined and
labels_static_shape != logits.get_shape()[:-1]):
raise ValueError("Shape mismatch: The shape of labels (received %s) "
"should equal the shape of logits except for the last "
"dimension (received %s)." % (labels_static_shape,
logits.get_shape()))
if logits.get_shape().ndims == 2:
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
shape_checks = []
if not static_shapes_fully_defined:
shape_checks.append(
check_ops.assert_equal(
array_ops.shape(labels),
array_ops.shape(logits)[:-1]))
with ops.control_dependencies(shape_checks):
num_classes = array_ops.shape(logits)[array_ops.rank(logits) - 1]
precise_logits = array_ops.reshape(precise_logits, [-1, num_classes])
labels = array_ops.reshape(labels, [-1])
cost, _ = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
precise_logits, labels, name=name)
cost = array_ops.reshape(cost, labels_shape)
cost.set_shape(labels_static_shape)
if logits.dtype == dtypes.float16:
return math_ops.cast(cost, dtypes.float16)
else:
return cost
@tf_export("nn.avg_pool")
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
with ops.name_scope(name, "AvgPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.avg_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.max_pool")
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
with ops.name_scope(name, "MaxPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops.max_pool(
value,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
name=name)
@tf_export("nn.max_pool_with_argmax", v1=[])
def max_pool_with_argmax_v2(input,
ksize,
strides,
padding,
data_format="NHWC",
output_dtype=dtypes.int64,
name=None):
if data_format != "NHWC":
raise ValueError("Data formats other than 'NHWC' are not yet supported")
return gen_nn_ops.max_pool_with_argmax(input=input,
ksize=ksize,
strides=strides,
padding=padding,
Targmax=output_dtype,
name=name)
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats(
"flops",
(output_count * filter_in_depth * filter_height * filter_width * 2))
@ops.RegisterStatistics("DepthwiseConv2dNative", "flops")
def _calc_depthwise_conv_flops(graph, node):
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@tf_export(v1=["nn.xw_plus_b"])
def xw_plus_b(x, weights, biases, name=None):
with ops.name_scope(name, "xw_plus_b", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None):
with ops.name_scope(name, "xw_plus_b_v1", [x, weights, biases]) as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
def _get_noise_shape(x, noise_shape):
if noise_shape is None:
return array_ops.shape(x)
try:
noise_shape_ = tensor_shape.as_shape(noise_shape)
except (TypeError, ValueError):
return noise_shape
if x.shape.dims is not None and len(x.shape.dims) == len(noise_shape_.dims):
new_dims = []
for i, dim in enumerate(x.shape.dims):
if noise_shape_.dims[i].value is None and dim.value is not None:
new_dims.append(dim.value)
else:
new_dims.append(noise_shape_.dims[i].value)
return tensor_shape.TensorShape(new_dims)
return noise_shape
@tf_export(v1=["nn.dropout"])
@deprecation.deprecated_args(None, "Please use `rate` instead of `keep_prob`. "
"Rate should be set to `rate = 1 - keep_prob`.",
"keep_prob")
def dropout(x, keep_prob=None, noise_shape=None, seed=None, name=None,
rate=None):
try:
keep = 1. - keep_prob if keep_prob is not None else None
except TypeError:
raise ValueError("keep_prob must be a floating point number or Tensor "
"(got %r)" % keep_prob)
rate = deprecation.deprecated_argument_lookup(
"rate", rate,
"keep_prob", keep)
if rate is None:
raise ValueError("You must provide a rate to dropout.")
return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
@tf_export("nn.dropout", v1=[])
def dropout_v2(x, rate, noise_shape=None, seed=None, name=None):
with ops.name_scope(name, "dropout", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if not x.dtype.is_floating:
raise ValueError("x has to be a floating point tensor since it's going to"
" be scaled. Got a %s tensor instead." % x.dtype)
if isinstance(rate, numbers.Real) and not (rate >= 0 and rate < 1):
raise ValueError("rate must be a scalar tensor or a float in the "
"range [0, 1), got %g" % rate)
# Early return if nothing needs to be dropped.
if isinstance(rate, numbers.Real) and rate == 0:
return x
if context.executing_eagerly():
if isinstance(rate, ops.EagerTensor):
if rate.numpy() == 0:
return x
else:
rate = ops.convert_to_tensor(
rate, dtype=x.dtype, name="rate")
rate.get_shape().assert_is_compatible_with(tensor_shape.scalar())
# Do nothing if we know rate == 0
if tensor_util.constant_value(rate) == 0:
return x
noise_shape = _get_noise_shape(x, noise_shape)
keep_prob = 1 - rate
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.divide(x, keep_prob) * binary_tensor
if not context.executing_eagerly():
ret.set_shape(x.get_shape())
return ret
@tf_export("math.top_k", "nn.top_k")
def top_k(input, k=1, sorted=True, name=None): # pylint: disable=redefined-builtin
return gen_nn_ops.top_kv2(input, k=k, sorted=sorted, name=name)
def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin
return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)
@tf_export(v1=["nn.fractional_max_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_max_pool_v2.")
def fractional_max_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name)
@tf_export("nn.fractional_max_pool", v1=[])
def fractional_max_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
if seed == 0:
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_max_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.fractional_avg_pool"])
@deprecation.deprecated(date=None, instructions="`seed2` and `deterministic` "
"args are deprecated. Use fractional_avg_pool_v2.")
def fractional_avg_pool(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
deterministic=False,
seed=0,
seed2=0,
name=None): # pylint: disable=redefined-builtin
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic, seed, seed2,
name=name)
@tf_export("nn.fractional_avg_pool", v1=[])
def fractional_avg_pool_v2(value,
pooling_ratio,
pseudo_random=False,
overlapping=False,
seed=0,
name=None): # pylint: disable=redefined-builtin
if seed == 0:
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=False,
seed=0, seed2=0, name=name)
else:
seed1, seed2 = random_seed.get_seed(seed)
return gen_nn_ops.fractional_avg_pool(value, pooling_ratio, pseudo_random,
overlapping, deterministic=True,
seed=seed1, seed2=seed2, name=name)
@tf_export(v1=["nn.conv1d"])
@deprecation.deprecated_arg_values(
None,
"`NCHW` for data_format is deprecated, use `NCW` instead",
warn_once=True,
data_format="NCHW")
@deprecation.deprecated_arg_values(
None,
"`NHWC` for data_format is deprecated, use `NWC` instead",
warn_once=True,
data_format="NHWC")
def conv1d(value,
filters,
stride,
padding,
use_cudnn_on_gpu=None,
data_format=None,
name=None):
with ops.name_scope(name, "conv1d", [value, filters]) as name:
# Reshape the input tensor to [batch, 1, in_width, in_channels]
if data_format is None or data_format == "NHWC" or data_format == "NWC":
data_format = "NHWC"
spatial_start_dim = 1
strides = [1, 1, stride, 1]
elif data_format == "NCHW" or data_format == "NCW":
data_format = "NCHW"
spatial_start_dim = 2
strides = [1, 1, 1, stride]
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
value = array_ops.expand_dims(value, spatial_start_dim)
filters = array_ops.expand_dims(filters, 0)
result = gen_nn_ops.conv2d(
value,
filters,
strides,
padding,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
return array_ops.squeeze(result, [spatial_start_dim])
@tf_export("nn.conv1d", v1=[])
def conv1d_v2(input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
data_format=None,
name=None):
return conv1d(input, # pylint: disable=redefined-builtin
filters,
stride,
padding,
use_cudnn_on_gpu=True,
data_format=data_format,
name=name)
def conv1d_transpose(
value,
filter, # pylint: disable=redefined-builtin
output_shape,
stride,
padding="SAME",
data_format="NWC",
name=None):
with ops.name_scope(name, "conv1d_transpose",
[value, filter, output_shape]) as name:
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(3)):
raise ValueError("output_shape must have shape (3,), got {}".format(
output_shape_.get_shape()))
# The format could be either NWC or NCW, map to NHWC or NCHW
if data_format is None or data_format == "NWC":
data_format_2d = "NHWC"
axis = 2
elif data_format == "NCW":
data_format_2d = "NCHW"
axis = 1
else:
raise ValueError("data_format must be \"NWC\" or \"NCW\".")
if not value.get_shape().dims[axis].is_compatible_with(
filter.get_shape()[2]):
raise ValueError("input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[axis],
filter.get_shape()[2]))
if isinstance(output_shape, (list, np.ndarray)):
if not filter.get_shape().dims[1].is_compatible_with(
output_shape[axis]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[axis],
filter.get_shape()[1]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
if data_format_2d == "NHWC":
output_shape_ = array_ops.concat(
[output_shape_[:1], [1], output_shape_[1:]], axis=0)
spatial_start_dim = 1
strides = [1, 1, stride, 1]
else:
output_shape_ = array_ops.concat(
[output_shape_[:2], [1], output_shape_[2:]], axis=0)
spatial_start_dim = 2
strides = [1, 1, 1, stride]
value = array_ops.expand_dims(value, spatial_start_dim)
filter = array_ops.expand_dims(filter, 0)
result = gen_nn_ops.conv2d_backprop_input(
input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
data_format=data_format_2d,
name=name)
return array_ops.squeeze(result, [spatial_start_dim])
@ops.RegisterStatistics("Dilation2D", "flops")
def _calc_dilation2d_flops(graph, node):
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(
graph, node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
output_count = np.prod(output_shape.as_list(), dtype=np.int64)
return ops.OpStats("flops", (output_count * filter_height * filter_width * 2))
@tf_export(v1=["nn.erosion2d"])
def erosion2d(value, kernel, strides, rates, padding, name=None):
with ops.name_scope(name, "erosion2d", [value, kernel]) as name:
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(kernel, [0, 1]),
strides=strides,
rates=rates,
padding=padding,
name=name))
@tf_export("nn.erosion2d", v1=[])
def erosion2d_v2(value,
filters,
strides,
padding,
data_format,
dilations,
name=None):
if data_format != "NHWC":
raise ValueError("Data formats other than NHWC are not yet supported")
with ops.name_scope(name, "erosion2d", [value, filters]) as name:
return math_ops.negative(
gen_nn_ops.dilation2d(
input=math_ops.negative(value),
filter=array_ops.reverse_v2(filters, [0, 1]),
strides=strides,
rates=dilations,
padding=padding,
name=name))
@tf_export(v1=["math.in_top_k", "nn.in_top_k"])
def in_top_k(predictions, targets, k, name=None):
with ops.name_scope(name, "in_top_k"):
return gen_nn_ops.in_top_kv2(predictions, targets, k, name=name)
@tf_export("math.in_top_k", "nn.in_top_k", v1=[])
def in_top_k_v2(targets, predictions, k, name=None):
return in_top_k(predictions, targets, k, name)
in_top_k_v2.__doc__ = in_top_k.__doc__
tf_export(v1=["nn.quantized_avg_pool"])(gen_nn_ops.quantized_avg_pool)
tf_export(v1=["nn.quantized_conv2d"])(gen_nn_ops.quantized_conv2d)
tf_export(v1=["nn.quantized_relu_x"])(gen_nn_ops.quantized_relu_x)
tf_export(v1=["nn.quantized_max_pool"])(gen_nn_ops.quantized_max_pool)
| true | true |
f71fd03a67d84d601549326e242f921bbfd460d7 | 3,974 | py | Python | alipay/aop/api/request/ZhimaCreditEpProductCodeQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/ZhimaCreditEpProductCodeQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/ZhimaCreditEpProductCodeQueryRequest.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ZhimaCreditEpProductCodeQueryModel import ZhimaCreditEpProductCodeQueryModel
class ZhimaCreditEpProductCodeQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, ZhimaCreditEpProductCodeQueryModel):
self._biz_content = value
else:
self._biz_content = ZhimaCreditEpProductCodeQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'zhima.credit.ep.product.code.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.406897 | 148 | 0.64469 |
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ZhimaCreditEpProductCodeQueryModel import ZhimaCreditEpProductCodeQueryModel
class ZhimaCreditEpProductCodeQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, ZhimaCreditEpProductCodeQueryModel):
self._biz_content = value
else:
self._biz_content = ZhimaCreditEpProductCodeQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'zhima.credit.ep.product.code.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| true | true |
f71fd0d406c540386851f557a8dd36be11fad94d | 37,269 | py | Python | pandas/core/indexes/datetimes.py | cgangwar11/pandas | 972f491cb7fdcc3c1c2cb9f05644128f13457f87 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-09-05T07:09:39.000Z | 2020-09-05T07:09:39.000Z | pandas/core/indexes/datetimes.py | cgangwar11/pandas | 972f491cb7fdcc3c1c2cb9f05644128f13457f87 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/datetimes.py | cgangwar11/pandas | 972f491cb7fdcc3c1c2cb9f05644128f13457f87 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | from datetime import date, datetime, time, timedelta, tzinfo
import operator
from typing import Optional
import warnings
import numpy as np
from pandas._libs import NaT, Period, Timestamp, index as libindex, lib
from pandas._libs.tslibs import (
Resolution,
ints_to_pydatetime,
parsing,
timezones,
to_offset,
)
from pandas._libs.tslibs.offsets import prefix_mapping
from pandas._typing import DtypeObj, Label
from pandas.errors import InvalidIndexError
from pandas.util._decorators import cache_readonly, doc
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_float,
is_integer,
is_scalar,
)
from pandas.core.dtypes.missing import is_valid_nat_for_dtype
from pandas.core.arrays.datetimes import DatetimeArray, tz_to_dtype
import pandas.core.common as com
from pandas.core.indexes.base import Index, maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.indexes.extension import inherit_names
from pandas.core.tools.times import to_time
def _new_DatetimeIndex(cls, d):
"""
This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
if "data" in d and not isinstance(d["data"], DatetimeIndex):
# Avoid need to verify integrity by calling simple_new directly
data = d.pop("data")
if not isinstance(data, DatetimeArray):
# For backward compat with older pickles, we may need to construct
# a DatetimeArray to adapt to the newer _simple_new signature
tz = d.pop("tz")
freq = d.pop("freq")
dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)
else:
dta = data
for key in ["tz", "freq"]:
# These are already stored in our DatetimeArray; if they are
# also in the pickle and don't match, we have a problem.
if key in d:
assert d.pop(key) == getattr(dta, key)
result = cls._simple_new(dta, **d)
else:
with warnings.catch_warnings():
# TODO: If we knew what was going in to **d, we might be able to
# go through _simple_new instead
warnings.simplefilter("ignore")
result = cls.__new__(cls, **d)
return result
@inherit_names(
["to_perioddelta", "to_julian_date", "strftime", "isocalendar"]
+ DatetimeArray._field_ops
+ [
method
for method in DatetimeArray._datetimelike_methods
if method not in ("tz_localize",)
],
DatetimeArray,
wrap=True,
)
@inherit_names(["is_normalized", "_resolution_obj"], DatetimeArray, cache=True)
@inherit_names(
[
"_bool_ops",
"_object_ops",
"_field_ops",
"_datetimelike_ops",
"_datetimelike_methods",
"tz",
"tzinfo",
"dtype",
"to_pydatetime",
"_has_same_tz",
"_format_native_types",
"date",
"time",
"timetz",
]
+ DatetimeArray._bool_ops,
DatetimeArray,
)
class DatetimeIndex(DatetimeTimedeltaMixin):
"""
Immutable ndarray-like of datetime64 data.
Represented internally as int64, and which can be boxed to Timestamp objects
that are subclasses of datetime and carry metadata.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with.
freq : str or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation.
tz : pytz.timezone or dateutil.tz.tzfile or datetime.tzinfo or str
Set the Timezone of the data.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
closed : {'left', 'right'}, optional
Set whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from 03:00
DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC
and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter
dictates how ambiguous times should be handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for ambiguous
times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times.
dayfirst : bool, default False
If True, parse dates in `data` with the day first order.
yearfirst : bool, default False
If True parse dates in `data` with the year first order.
dtype : numpy.dtype or DatetimeTZDtype or str, default None
Note that the only NumPy dtype allowed is ‘datetime64[ns]’.
copy : bool, default False
Make a copy of input ndarray.
name : label, default None
Name to be stored in the index.
Attributes
----------
year
month
day
hour
minute
second
microsecond
nanosecond
date
time
timetz
dayofyear
weekofyear
week
dayofweek
weekday
quarter
tz
freq
freqstr
is_month_start
is_month_end
is_quarter_start
is_quarter_end
is_year_start
is_year_end
is_leap_year
inferred_freq
Methods
-------
normalize
strftime
snap
tz_convert
tz_localize
round
floor
ceil
to_period
to_perioddelta
to_pydatetime
to_series
to_frame
month_name
day_name
mean
See Also
--------
Index : The base pandas Index type.
TimedeltaIndex : Index of timedelta64 data.
PeriodIndex : Index of Period data.
to_datetime : Convert argument to datetime.
date_range : Create a fixed-frequency DatetimeIndex.
Notes
-----
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
"""
_typ = "datetimeindex"
_engine_type = libindex.DatetimeEngine
_supports_partial_string_indexing = True
_comparables = ["name", "freqstr", "tz"]
_attributes = ["name", "tz", "freq"]
_is_numeric_dtype = False
_data: DatetimeArray
tz: Optional[tzinfo]
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in DatetimeIndex
@doc(DatetimeArray.tz_localize)
def tz_localize(
self, tz, ambiguous="raise", nonexistent="raise"
) -> "DatetimeIndex":
arr = self._data.tz_localize(tz, ambiguous, nonexistent)
return type(self)._simple_new(arr, name=self.name)
@doc(DatetimeArray.to_period)
def to_period(self, freq=None) -> "DatetimeIndex":
arr = self._data.to_period(freq)
return type(self)._simple_new(arr, name=self.name)
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
freq=lib.no_default,
tz=None,
normalize=False,
closed=None,
ambiguous="raise",
dayfirst=False,
yearfirst=False,
dtype=None,
copy=False,
name=None,
):
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
# - Cases checked above all return/raise before reaching here - #
name = maybe_extract_name(name, data, cls)
dtarr = DatetimeArray._from_sequence(
data,
dtype=dtype,
copy=copy,
tz=tz,
freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
subarr = cls._simple_new(dtarr, name=name)
return subarr
@classmethod
def _simple_new(cls, values: DatetimeArray, name: Label = None):
assert isinstance(values, DatetimeArray), type(values)
result = object.__new__(cls)
result._data = values
result.name = name
result._cache = {}
result._no_setting_name = False
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._data
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _is_dates_only(self) -> bool:
"""
Return a boolean if we are only dates (and don't have a timezone)
Returns
-------
bool
"""
from pandas.io.formats.format import _is_dates_only
return self.tz is None and _is_dates_only(self._values)
def __reduce__(self):
# we use a special reduce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (type(self), d), None
def _convert_for_op(self, value):
"""
Convert value to be insertable to ndarray.
"""
if self._has_same_tz(value):
return Timestamp(value).asm8
raise ValueError("Passed item and index have different timezone")
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
"""
Can we compare values of the given dtype to our own?
"""
if not is_datetime64_any_dtype(dtype):
return False
if self.tz is not None:
# If we have tz, we can compare to tzaware
return is_datetime64tz_dtype(dtype)
# if we dont have tz, we can only compare to tznaive
return is_datetime64_dtype(dtype)
# --------------------------------------------------------------------
# Rendering Methods
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return ints_to_pydatetime(self.asi8, self.tz)
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: f"'{formatter(x, tz=self.tz)}'"
# --------------------------------------------------------------------
# Set Operation Methods
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes.
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
this = Index.union(this, other)
return this
# --------------------------------------------------------------------
def _get_time_micros(self):
"""
Return the number of microseconds since midnight.
Returns
-------
ndarray[int64_t]
"""
values = self.asi8
if self.tz is not None and not timezones.is_utc(self.tz):
values = self._data._local_timestamps()
nanos = values % (24 * 3600 * 1_000_000_000)
micros = nanos // 1000
micros[self._isnan] = -1
return micros
def to_series(self, keep_tz=lib.no_default, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index.
Parameters
----------
keep_tz : optional, defaults True
Return the data keeping the timezone.
If keep_tz is True:
If the timezone is not set, the resulting
Series will have a datetime64[ns] dtype.
Otherwise the Series will have an datetime64[ns, tz] dtype; the
tz will be preserved.
If keep_tz is False:
Series will have a datetime64[ns] dtype. TZ aware
objects will have the tz removed.
.. versionchanged:: 1.0.0
The default value is now True. In a future version,
this keyword will be removed entirely. Stop passing the
argument to obtain the future behavior and silence the warning.
index : Index, optional
Index of resulting Series. If None, defaults to original index.
name : str, optional
Name of resulting Series. If None, defaults to name of original
index.
Returns
-------
Series
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
if keep_tz is not lib.no_default:
if keep_tz:
warnings.warn(
"The 'keep_tz' keyword in DatetimeIndex.to_series "
"is deprecated and will be removed in a future version. "
"You can stop passing 'keep_tz' to silence this warning.",
FutureWarning,
stacklevel=2,
)
else:
warnings.warn(
"Specifying 'keep_tz=False' is deprecated and this "
"option will be removed in a future release. If "
"you want to remove the timezone information, you "
"can do 'idx.tz_convert(None)' before calling "
"'to_series'.",
FutureWarning,
stacklevel=2,
)
else:
keep_tz = True
if keep_tz and self.tz is not None:
# preserve the tz & copy
values = self.copy(deep=True)
else:
values = self._values.view("M8[ns]").copy()
return Series(values, index=index, name=name)
def snap(self, freq="S"):
"""
Snap time stamps to nearest occurring frequency.
Returns
-------
DatetimeIndex
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=DT64NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.is_on_offset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
dta = DatetimeArray(snapped, dtype=self.dtype)
return DatetimeIndex._simple_new(dta, name=self.name)
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : str
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
assert isinstance(reso, Resolution), (type(reso), reso)
valid_resos = {
"year",
"month",
"quarter",
"day",
"hour",
"minute",
"second",
"minute",
"second",
"microsecond",
}
if reso.attrname not in valid_resos:
raise KeyError
grp = reso.freq_group
per = Period(parsed, freq=grp)
start, end = per.start_time, per.end_time
# GH 24076
# If an incoming date string contained a UTC offset, need to localize
# the parsed date to this offset first before aligning with the index's
# timezone
if parsed.tzinfo is not None:
if self.tz is None:
raise ValueError(
"The index must be timezone aware when indexing "
"with a date string with a UTC offset"
)
start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz)
end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz)
elif self.tz is not None:
start = start.tz_localize(self.tz)
end = end.tz_localize(self.tz)
return start, end
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
if (
self.is_monotonic
and reso.attrname in ["day", "hour", "minute", "second"]
and self._resolution_obj >= reso
):
# These resolution/monotonicity validations came from GH3931,
# GH3452 and GH2369.
# See also GH14826
raise KeyError
if reso == "microsecond":
# _partial_date_slice doesn't allow microsecond resolution, but
# _parsed_string_to_bounds allows it.
raise KeyError
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if not is_scalar(key):
raise InvalidIndexError(key)
orig_key = key
if is_valid_nat_for_dtype(key, self.dtype):
key = NaT
if isinstance(key, self._data._recognized_scalars):
# needed to localize naive datetimes
key = self._maybe_cast_for_get_loc(key)
elif isinstance(key, str):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError, OverflowError):
pass
try:
key = self._maybe_cast_for_get_loc(key)
except ValueError as err:
raise KeyError(key) from err
elif isinstance(key, timedelta):
# GH#20464
raise TypeError(
f"Cannot index {type(self).__name__} with {type(key).__name__}"
)
elif isinstance(key, time):
if method is not None:
raise NotImplementedError(
"cannot yet lookup inexact labels when key is a time object"
)
return self.indexer_at_time(key)
else:
# unrecognized type
raise KeyError(key)
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_for_get_loc(self, key) -> Timestamp:
# needed to localize naive datetimes
key = Timestamp(key)
if key.tzinfo is None:
key = key.tz_localize(self.tz)
else:
key = key.tz_convert(self.tz)
return key
def _maybe_cast_slice_bound(self, label, side: str, kind):
"""
If label is a string, cast it to datetime according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ["loc", "getitem", None]
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer("slice", label)
if isinstance(label, str):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
parsed, reso = parsing.parse_time_string(label, freq)
reso = Resolution.from_attrname(reso)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
# because label may be passed to searchsorted
# the bounds need swapped if index is reverse sorted and has a
# length > 1 (is_monotonic_decreasing gives True for empty
# and length 1 index)
if self._is_strictly_monotonic_decreasing and len(self) > 1:
return upper if side == "left" else lower
return lower if side == "left" else upper
else:
return label
def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
parsed, reso = parsing.parse_time_string(key, freq)
reso = Resolution.from_attrname(reso)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError("Must have step size of 1 with time slices")
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError("Cannot mix time and non-time slice keys")
# Pandas supports slicing with dates, treated as datetimes at midnight.
# https://github.com/pandas-dev/pandas/issues/31501
if isinstance(start, date) and not isinstance(start, datetime):
start = datetime.combine(start, time(0, 0))
if isinstance(end, date) and not isinstance(end, datetime):
end = datetime.combine(end, time(0, 0))
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if (start is None or isinstance(start, str)) and (
end is None or isinstance(end, str)
):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, "left", kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(end, "right", kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
# --------------------------------------------------------------------
def is_type_compatible(self, typ) -> bool:
return typ == self.inferred_type or typ == "datetime"
@property
def inferred_type(self) -> str:
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return "datetime64"
def indexer_at_time(self, time, asof=False):
"""
Return index locations of values at particular time of day
(e.g. 9:30AM).
Parameters
----------
time : datetime.time or str
Time passed in either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p").
Returns
-------
values_at_time : array of integers
See Also
--------
indexer_between_time : Get index locations of values between particular
times of day.
DataFrame.at_time : Select values at particular time of day.
"""
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, str):
from dateutil.parser import parse
time = parse(time).time()
if time.tzinfo:
if self.tz is None:
raise ValueError("Index must be timezone aware.")
time_micros = self.tz_convert(time.tzinfo)._get_time_micros()
else:
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(
self, start_time, end_time, include_start=True, include_end=True
):
"""
Return index locations of values between particular times of day
(e.g., 9:00-9:30AM).
Parameters
----------
start_time, end_time : datetime.time, str
Time passed either as object (datetime.time) or as string in
appropriate format ("%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p","%I%M%S%p").
include_start : bool, default True
include_end : bool, default True
Returns
-------
values_between_time : array of integers
See Also
--------
indexer_at_time : Get index locations of values at particular time of day.
DataFrame.between_time : Select values between particular times of day.
"""
start_time = to_time(start_time)
end_time = to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))
return mask.nonzero()[0]
DatetimeIndex._add_logical_methods_disabled()
def date_range(
start=None,
end=None,
periods=None,
freq=None,
tz=None,
normalize=False,
name=None,
closed=None,
**kwargs,
) -> DatetimeIndex:
"""
Return a fixed frequency DatetimeIndex.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'. See
:ref:`here <timeseries.offset_aliases>` for a list of
frequency aliases.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
See Also
--------
DatetimeIndex : An immutable container for datetimes.
timedelta_range : Return a fixed frequency TimedeltaIndex.
period_range : Return a fixed frequency PeriodIndex.
interval_range : Return a fixed frequency IntervalIndex.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> pd.date_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `start` and `periods`, the number of periods (days).
>>> pd.date_range(start='1/1/2018', periods=8)
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq='D')
Specify `end` and `periods`, the number of periods (days).
>>> pd.date_range(end='1/1/2018', periods=8)
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq='D')
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> pd.date_range(start='1/1/2018', periods=5, freq='M')
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq='M')
Multiples are allowed
>>> pd.date_range(start='1/1/2018', periods=5, freq='3M')
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
`freq` can also be specified as an Offset object.
>>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq='3M')
Specify `tz` to set the timezone.
>>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')
DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',
'2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',
'2018-01-05 00:00:00+09:00'],
dtype='datetime64[ns, Asia/Tokyo]', freq='D')
`closed` controls whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None)
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
Use ``closed='left'`` to exclude `end` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left')
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],
dtype='datetime64[ns]', freq='D')
Use ``closed='right'`` to exclude `start` if it falls on the boundary.
>>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right')
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq='D')
"""
if freq is None and com.any_none(periods, start, end):
freq = "D"
dtarr = DatetimeArray._generate_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
closed=closed,
**kwargs,
)
return DatetimeIndex._simple_new(dtarr, name=name)
def bdate_range(
start=None,
end=None,
periods=None,
freq="B",
tz=None,
normalize=True,
name=None,
weekmask=None,
holidays=None,
closed=None,
**kwargs,
) -> DatetimeIndex:
"""
Return a fixed frequency DatetimeIndex, with business day as the default
frequency.
Parameters
----------
start : str or datetime-like, default None
Left bound for generating dates.
end : str or datetime-like, default None
Right bound for generating dates.
periods : int, default None
Number of periods to generate.
freq : str or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'.
tz : str or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
weekmask : str or None, default None
Weekmask of valid business days, passed to ``numpy.busdaycalendar``,
only used when custom frequency strings are passed. The default
value None is equivalent to 'Mon Tue Wed Thu Fri'.
holidays : list-like or None, default None
Dates to exclude from the set of valid business days, passed to
``numpy.busdaycalendar``, only used when custom frequency strings
are passed.
closed : str, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
DatetimeIndex
Notes
-----
Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. Specifying ``freq`` is a requirement
for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not
desired.
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Note how the two weekend days are skipped in the result.
>>> pd.bdate_range(start='1/1/2018', end='1/08/2018')
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-08'],
dtype='datetime64[ns]', freq='B')
"""
if freq is None:
msg = "freq must be specified for bdate_range; use date_range instead"
raise TypeError(msg)
if isinstance(freq, str) and freq.startswith("C"):
try:
weekmask = weekmask or "Mon Tue Wed Thu Fri"
freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)
except (KeyError, TypeError) as err:
msg = f"invalid custom frequency string: {freq}"
raise ValueError(msg) from err
elif holidays or weekmask:
msg = (
"a custom frequency string is required when holidays or "
f"weekmask are passed, got frequency {freq}"
)
raise ValueError(msg)
return date_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
name=name,
closed=closed,
**kwargs,
)
def _time_to_micros(time_obj: time) -> int:
seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second
return 1_000_000 * seconds + time_obj.microsecond
| 33.395161 | 96 | 0.584937 | from datetime import date, datetime, time, timedelta, tzinfo
import operator
from typing import Optional
import warnings
import numpy as np
from pandas._libs import NaT, Period, Timestamp, index as libindex, lib
from pandas._libs.tslibs import (
Resolution,
ints_to_pydatetime,
parsing,
timezones,
to_offset,
)
from pandas._libs.tslibs.offsets import prefix_mapping
from pandas._typing import DtypeObj, Label
from pandas.errors import InvalidIndexError
from pandas.util._decorators import cache_readonly, doc
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
is_datetime64_any_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_float,
is_integer,
is_scalar,
)
from pandas.core.dtypes.missing import is_valid_nat_for_dtype
from pandas.core.arrays.datetimes import DatetimeArray, tz_to_dtype
import pandas.core.common as com
from pandas.core.indexes.base import Index, maybe_extract_name
from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin
from pandas.core.indexes.extension import inherit_names
from pandas.core.tools.times import to_time
def _new_DatetimeIndex(cls, d):
if "data" in d and not isinstance(d["data"], DatetimeIndex):
data = d.pop("data")
if not isinstance(data, DatetimeArray):
tz = d.pop("tz")
freq = d.pop("freq")
dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq)
else:
dta = data
for key in ["tz", "freq"]:
if key in d:
assert d.pop(key) == getattr(dta, key)
result = cls._simple_new(dta, **d)
else:
with warnings.catch_warnings():
# TODO: If we knew what was going in to **d, we might be able to
# go through _simple_new instead
warnings.simplefilter("ignore")
result = cls.__new__(cls, **d)
return result
@inherit_names(
["to_perioddelta", "to_julian_date", "strftime", "isocalendar"]
+ DatetimeArray._field_ops
+ [
method
for method in DatetimeArray._datetimelike_methods
if method not in ("tz_localize",)
],
DatetimeArray,
wrap=True,
)
@inherit_names(["is_normalized", "_resolution_obj"], DatetimeArray, cache=True)
@inherit_names(
[
"_bool_ops",
"_object_ops",
"_field_ops",
"_datetimelike_ops",
"_datetimelike_methods",
"tz",
"tzinfo",
"dtype",
"to_pydatetime",
"_has_same_tz",
"_format_native_types",
"date",
"time",
"timetz",
]
+ DatetimeArray._bool_ops,
DatetimeArray,
)
class DatetimeIndex(DatetimeTimedeltaMixin):
_typ = "datetimeindex"
_engine_type = libindex.DatetimeEngine
_supports_partial_string_indexing = True
_comparables = ["name", "freqstr", "tz"]
_attributes = ["name", "tz", "freq"]
_is_numeric_dtype = False
_data: DatetimeArray
tz: Optional[tzinfo]
# --------------------------------------------------------------------
# methods that dispatch to array and wrap result in DatetimeIndex
@doc(DatetimeArray.tz_localize)
def tz_localize(
self, tz, ambiguous="raise", nonexistent="raise"
) -> "DatetimeIndex":
arr = self._data.tz_localize(tz, ambiguous, nonexistent)
return type(self)._simple_new(arr, name=self.name)
@doc(DatetimeArray.to_period)
def to_period(self, freq=None) -> "DatetimeIndex":
arr = self._data.to_period(freq)
return type(self)._simple_new(arr, name=self.name)
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
freq=lib.no_default,
tz=None,
normalize=False,
closed=None,
ambiguous="raise",
dayfirst=False,
yearfirst=False,
dtype=None,
copy=False,
name=None,
):
if is_scalar(data):
raise TypeError(
f"{cls.__name__}() must be called with a "
f"collection of some kind, {repr(data)} was passed"
)
# - Cases checked above all return/raise before reaching here - #
name = maybe_extract_name(name, data, cls)
dtarr = DatetimeArray._from_sequence(
data,
dtype=dtype,
copy=copy,
tz=tz,
freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst,
ambiguous=ambiguous,
)
subarr = cls._simple_new(dtarr, name=name)
return subarr
@classmethod
def _simple_new(cls, values: DatetimeArray, name: Label = None):
assert isinstance(values, DatetimeArray), type(values)
result = object.__new__(cls)
result._data = values
result.name = name
result._cache = {}
result._no_setting_name = False
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._data
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _is_dates_only(self) -> bool:
from pandas.io.formats.format import _is_dates_only
return self.tz is None and _is_dates_only(self._values)
def __reduce__(self):
# we use a special reduce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (type(self), d), None
def _convert_for_op(self, value):
if self._has_same_tz(value):
return Timestamp(value).asm8
raise ValueError("Passed item and index have different timezone")
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
if not is_datetime64_any_dtype(dtype):
return False
if self.tz is not None:
# If we have tz, we can compare to tzaware
return is_datetime64tz_dtype(dtype)
# if we dont have tz, we can only compare to tznaive
return is_datetime64_dtype(dtype)
# --------------------------------------------------------------------
# Rendering Methods
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return ints_to_pydatetime(self.asi8, self.tz)
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: f"'{formatter(x, tz=self.tz)}'"
# --------------------------------------------------------------------
# Set Operation Methods
def union_many(self, others):
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
this = Index.union(this, other)
return this
# --------------------------------------------------------------------
def _get_time_micros(self):
values = self.asi8
if self.tz is not None and not timezones.is_utc(self.tz):
values = self._data._local_timestamps()
nanos = values % (24 * 3600 * 1_000_000_000)
micros = nanos // 1000
micros[self._isnan] = -1
return micros
def to_series(self, keep_tz=lib.no_default, index=None, name=None):
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
if keep_tz is not lib.no_default:
if keep_tz:
warnings.warn(
"The 'keep_tz' keyword in DatetimeIndex.to_series "
"is deprecated and will be removed in a future version. "
"You can stop passing 'keep_tz' to silence this warning.",
FutureWarning,
stacklevel=2,
)
else:
warnings.warn(
"Specifying 'keep_tz=False' is deprecated and this "
"option will be removed in a future release. If "
"you want to remove the timezone information, you "
"can do 'idx.tz_convert(None)' before calling "
"'to_series'.",
FutureWarning,
stacklevel=2,
)
else:
keep_tz = True
if keep_tz and self.tz is not None:
# preserve the tz & copy
values = self.copy(deep=True)
else:
values = self._values.view("M8[ns]").copy()
return Series(values, index=index, name=name)
def snap(self, freq="S"):
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=DT64NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.is_on_offset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
dta = DatetimeArray(snapped, dtype=self.dtype)
return DatetimeIndex._simple_new(dta, name=self.name)
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
assert isinstance(reso, Resolution), (type(reso), reso)
valid_resos = {
"year",
"month",
"quarter",
"day",
"hour",
"minute",
"second",
"minute",
"second",
"microsecond",
}
if reso.attrname not in valid_resos:
raise KeyError
grp = reso.freq_group
per = Period(parsed, freq=grp)
start, end = per.start_time, per.end_time
# GH 24076
# If an incoming date string contained a UTC offset, need to localize
# the parsed date to this offset first before aligning with the index's
if parsed.tzinfo is not None:
if self.tz is None:
raise ValueError(
"The index must be timezone aware when indexing "
"with a date string with a UTC offset"
)
start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz)
end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz)
elif self.tz is not None:
start = start.tz_localize(self.tz)
end = end.tz_localize(self.tz)
return start, end
def _validate_partial_date_slice(self, reso: Resolution):
assert isinstance(reso, Resolution), (type(reso), reso)
if (
self.is_monotonic
and reso.attrname in ["day", "hour", "minute", "second"]
and self._resolution_obj >= reso
):
raise KeyError
if reso == "microsecond":
# _parsed_string_to_bounds allows it.
raise KeyError
def get_loc(self, key, method=None, tolerance=None):
if not is_scalar(key):
raise InvalidIndexError(key)
orig_key = key
if is_valid_nat_for_dtype(key, self.dtype):
key = NaT
if isinstance(key, self._data._recognized_scalars):
# needed to localize naive datetimes
key = self._maybe_cast_for_get_loc(key)
elif isinstance(key, str):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError, OverflowError):
pass
try:
key = self._maybe_cast_for_get_loc(key)
except ValueError as err:
raise KeyError(key) from err
elif isinstance(key, timedelta):
# GH#20464
raise TypeError(
f"Cannot index {type(self).__name__} with {type(key).__name__}"
)
elif isinstance(key, time):
if method is not None:
raise NotImplementedError(
"cannot yet lookup inexact labels when key is a time object"
)
return self.indexer_at_time(key)
else:
# unrecognized type
raise KeyError(key)
try:
return Index.get_loc(self, key, method, tolerance)
except KeyError as err:
raise KeyError(orig_key) from err
def _maybe_cast_for_get_loc(self, key) -> Timestamp:
# needed to localize naive datetimes
key = Timestamp(key)
if key.tzinfo is None:
key = key.tz_localize(self.tz)
else:
key = key.tz_convert(self.tz)
return key
def _maybe_cast_slice_bound(self, label, side: str, kind):
assert kind in ["loc", "getitem", None]
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer("slice", label)
if isinstance(label, str):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
parsed, reso = parsing.parse_time_string(label, freq)
reso = Resolution.from_attrname(reso)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
# because label may be passed to searchsorted
# the bounds need swapped if index is reverse sorted and has a
# length > 1 (is_monotonic_decreasing gives True for empty
# and length 1 index)
if self._is_strictly_monotonic_decreasing and len(self) > 1:
return upper if side == "left" else lower
return lower if side == "left" else upper
else:
return label
def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True):
freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
parsed, reso = parsing.parse_time_string(key, freq)
reso = Resolution.from_attrname(reso)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs, use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError("Must have step size of 1 with time slices")
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError("Cannot mix time and non-time slice keys")
# Pandas supports slicing with dates, treated as datetimes at midnight.
# https://github.com/pandas-dev/pandas/issues/31501
if isinstance(start, date) and not isinstance(start, datetime):
start = datetime.combine(start, time(0, 0))
if isinstance(end, date) and not isinstance(end, datetime):
end = datetime.combine(end, time(0, 0))
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if (start is None or isinstance(start, str)) and (
end is None or isinstance(end, str)
):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, "left", kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(end, "right", kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
def is_type_compatible(self, typ) -> bool:
return typ == self.inferred_type or typ == "datetime"
@property
def inferred_type(self) -> str:
return "datetime64"
def indexer_at_time(self, time, asof=False):
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, str):
from dateutil.parser import parse
time = parse(time).time()
if time.tzinfo:
if self.tz is None:
raise ValueError("Index must be timezone aware.")
time_micros = self.tz_convert(time.tzinfo)._get_time_micros()
else:
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(
self, start_time, end_time, include_start=True, include_end=True
):
start_time = to_time(start_time)
end_time = to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros))
return mask.nonzero()[0]
DatetimeIndex._add_logical_methods_disabled()
def date_range(
start=None,
end=None,
periods=None,
freq=None,
tz=None,
normalize=False,
name=None,
closed=None,
**kwargs,
) -> DatetimeIndex:
if freq is None and com.any_none(periods, start, end):
freq = "D"
dtarr = DatetimeArray._generate_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
closed=closed,
**kwargs,
)
return DatetimeIndex._simple_new(dtarr, name=name)
def bdate_range(
start=None,
end=None,
periods=None,
freq="B",
tz=None,
normalize=True,
name=None,
weekmask=None,
holidays=None,
closed=None,
**kwargs,
) -> DatetimeIndex:
if freq is None:
msg = "freq must be specified for bdate_range; use date_range instead"
raise TypeError(msg)
if isinstance(freq, str) and freq.startswith("C"):
try:
weekmask = weekmask or "Mon Tue Wed Thu Fri"
freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)
except (KeyError, TypeError) as err:
msg = f"invalid custom frequency string: {freq}"
raise ValueError(msg) from err
elif holidays or weekmask:
msg = (
"a custom frequency string is required when holidays or "
f"weekmask are passed, got frequency {freq}"
)
raise ValueError(msg)
return date_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
name=name,
closed=closed,
**kwargs,
)
def _time_to_micros(time_obj: time) -> int:
seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second
return 1_000_000 * seconds + time_obj.microsecond
| true | true |
f71fd14cad832cbba2759947ed66a936ca0786bb | 5,432 | py | Python | src/python/sim_doc.py | csiu/kick | 0ebc9166074b702fc8b5835685ad102957ab349c | [
"MIT"
] | null | null | null | src/python/sim_doc.py | csiu/kick | 0ebc9166074b702fc8b5835685ad102957ab349c | [
"MIT"
] | null | null | null | src/python/sim_doc.py | csiu/kick | 0ebc9166074b702fc8b5835685ad102957ab349c | [
"MIT"
] | null | null | null | import sys
sys.path.append("/Users/csiu/repo/kick/src/python")
import argparse
import custom
import pandas as pd
import numpy as np
import re
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils.extmath import randomized_svd
from sklearn.metrics import pairwise_distances
usage = """
For finding similar documents
"""
def get_args():
parser = argparse.ArgumentParser(description=usage)
parser.add_argument('-s', '--num_singular_values', default=100, type=int,
help="Number of singular values to use from SVD")
parser.add_argument('-n', '--num_results', default=None, type=int,
help="Number of similar documents to print in the results")
parser.add_argument('-w', '--term_weight', default="tfidf",
choices=["tfidf", "raw"],
help="How should terms in document be weighted? 'tfidf' or 'raw' counts")
parser.add_argument('-d', '--distance', default="cosine",
help="Metric for calculating the distance between documents.")
parser.add_argument('-i', '--document0_id', default=None, type=int,
help="Kickstarter ID of query document")
parser.add_argument('-c', '--cache_dir', default=".",
help="Specify cache dir")
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
return(args)
def get_data():
"""
Output dataframe w/ 2 columns: "id", "document"
"""
# Get data
dk = custom.DatabaseKick()
cur = dk.connect()
cur.execute("SELECT id, concat_ws(name, blurb) FROM info")
rows = cur.fetchall()
df = pd.DataFrame(rows, columns=["id", "document"])
dk.disconnect()
return(df)
def preprocess_data(df):
"""
Preprocess 'document' of dataframe by
- to lowercase
- remove nonletters
- tokenize
- remove stopwords
- stem
Dataframe will contain additional 'doc_processed' column
and df['doc_processed'] will be returned
"""
def join_output(func):
"""
Decorator function to join list output to string
"""
def func_wrapper(text, *arg, **karg):
return ' '.join(func(text, *arg, **karg))
return func_wrapper
def doc_to_string(doc):
"""
Replace None -> empty string, and
text newlines (\n, \r) -> whitespace
"""
if doc == None:
return("")
else:
return(re.sub("[\n\r]", "", doc))
df['document'] = df['document'].apply(
lambda x: doc_to_string(x))
text_processing = join_output(custom.text_processing)
df['doc_processed'] = df['document'].apply(
lambda x: text_processing(x, method="stem"))
return(df['doc_processed'])
def compute_distance(U, i=None, sort=False, top_n=None, metric='euclidean'):
"""
Compute distance of document U[i] with all documents in U
"""
if i != None:
index_document0 = df[df["id"] == i].index.tolist()
else:
index_document0 = 0
document0 = np.asmatrix(U[index_document0])
dist = pairwise_distances(document0, U, metric=metric)
df_dist = pd.DataFrame(np.transpose(dist), columns=["dist"])
if sort:
df_dist.sort_values(by="dist", inplace=True)
if top_n != None:
assert type(top_n) is int
df_dist = df_dist.head(top_n)
return(df_dist)
if __name__ == '__main__':
args = get_args()
num_singular_values = args.num_singular_values
document0_id = args.document0_id
num_results = args.num_results
cache_dir = args.cache_dir
verbose = args.verbose
term_weight = args.term_weight
distance_metric = args.distance
preprocess_file = os.path.join(os.path.abspath(cache_dir),
"preprocessed.pkl")
msg = "# Getting and preprocessing data..."
if os.path.isfile(preprocess_file):
if verbose: print(msg, "from cache...")
df = pd.read_pickle(preprocess_file)
else:
if verbose: print(msg)
df = get_data()
_ = preprocess_data(df)
df.to_pickle(preprocess_file)
if term_weight == "raw":
if verbose: print("# Making count matrix...")
cv = CountVectorizer()
X = cv.fit_transform(df['doc_processed'])
else:
if verbose: print("# Making TF-IDF matrix...")
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(df['doc_processed'])
if verbose: print("# Computing SVD for %s singular values..." %
num_singular_values)
U, s, Vh = randomized_svd(X, n_components=num_singular_values,
n_iter=5, random_state=5)
if verbose: print("# Computing distances (%s)..." % distance_metric)
top_n = compute_distance(U, i=document0_id,
sort=True, top_n=num_results,
metric=distance_metric)
if verbose: print("# Printing results...")
results = []
counter = 0
for index, row in df.iloc[top_n.index].iterrows():
row["dist"] = top_n.iloc[counter]["dist"]
results.append(row)
counter += 1
print('>> %s | %s' % (row['id'], row['doc_processed']),
row['document'], "\n", sep="\n")
| 30.01105 | 97 | 0.60475 | import sys
sys.path.append("/Users/csiu/repo/kick/src/python")
import argparse
import custom
import pandas as pd
import numpy as np
import re
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils.extmath import randomized_svd
from sklearn.metrics import pairwise_distances
usage = """
For finding similar documents
"""
def get_args():
parser = argparse.ArgumentParser(description=usage)
parser.add_argument('-s', '--num_singular_values', default=100, type=int,
help="Number of singular values to use from SVD")
parser.add_argument('-n', '--num_results', default=None, type=int,
help="Number of similar documents to print in the results")
parser.add_argument('-w', '--term_weight', default="tfidf",
choices=["tfidf", "raw"],
help="How should terms in document be weighted? 'tfidf' or 'raw' counts")
parser.add_argument('-d', '--distance', default="cosine",
help="Metric for calculating the distance between documents.")
parser.add_argument('-i', '--document0_id', default=None, type=int,
help="Kickstarter ID of query document")
parser.add_argument('-c', '--cache_dir', default=".",
help="Specify cache dir")
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
return(args)
def get_data():
dk = custom.DatabaseKick()
cur = dk.connect()
cur.execute("SELECT id, concat_ws(name, blurb) FROM info")
rows = cur.fetchall()
df = pd.DataFrame(rows, columns=["id", "document"])
dk.disconnect()
return(df)
def preprocess_data(df):
def join_output(func):
def func_wrapper(text, *arg, **karg):
return ' '.join(func(text, *arg, **karg))
return func_wrapper
def doc_to_string(doc):
if doc == None:
return("")
else:
return(re.sub("[\n\r]", "", doc))
df['document'] = df['document'].apply(
lambda x: doc_to_string(x))
text_processing = join_output(custom.text_processing)
df['doc_processed'] = df['document'].apply(
lambda x: text_processing(x, method="stem"))
return(df['doc_processed'])
def compute_distance(U, i=None, sort=False, top_n=None, metric='euclidean'):
if i != None:
index_document0 = df[df["id"] == i].index.tolist()
else:
index_document0 = 0
document0 = np.asmatrix(U[index_document0])
dist = pairwise_distances(document0, U, metric=metric)
df_dist = pd.DataFrame(np.transpose(dist), columns=["dist"])
if sort:
df_dist.sort_values(by="dist", inplace=True)
if top_n != None:
assert type(top_n) is int
df_dist = df_dist.head(top_n)
return(df_dist)
if __name__ == '__main__':
args = get_args()
num_singular_values = args.num_singular_values
document0_id = args.document0_id
num_results = args.num_results
cache_dir = args.cache_dir
verbose = args.verbose
term_weight = args.term_weight
distance_metric = args.distance
preprocess_file = os.path.join(os.path.abspath(cache_dir),
"preprocessed.pkl")
msg = "# Getting and preprocessing data..."
if os.path.isfile(preprocess_file):
if verbose: print(msg, "from cache...")
df = pd.read_pickle(preprocess_file)
else:
if verbose: print(msg)
df = get_data()
_ = preprocess_data(df)
df.to_pickle(preprocess_file)
if term_weight == "raw":
if verbose: print("# Making count matrix...")
cv = CountVectorizer()
X = cv.fit_transform(df['doc_processed'])
else:
if verbose: print("# Making TF-IDF matrix...")
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(df['doc_processed'])
if verbose: print("# Computing SVD for %s singular values..." %
num_singular_values)
U, s, Vh = randomized_svd(X, n_components=num_singular_values,
n_iter=5, random_state=5)
if verbose: print("# Computing distances (%s)..." % distance_metric)
top_n = compute_distance(U, i=document0_id,
sort=True, top_n=num_results,
metric=distance_metric)
if verbose: print("# Printing results...")
results = []
counter = 0
for index, row in df.iloc[top_n.index].iterrows():
row["dist"] = top_n.iloc[counter]["dist"]
results.append(row)
counter += 1
print('>> %s | %s' % (row['id'], row['doc_processed']),
row['document'], "\n", sep="\n")
| true | true |
f71fd21c199e5a31cb8e95fea4d6ad447b4eb6cf | 2,082 | py | Python | adsputils/tests/test_init.py | adsabs/ADSPipelineUtils | eb8cc988f57c19a256ebc8802cc2a812d5279d12 | [
"MIT"
] | null | null | null | adsputils/tests/test_init.py | adsabs/ADSPipelineUtils | eb8cc988f57c19a256ebc8802cc2a812d5279d12 | [
"MIT"
] | 36 | 2017-06-23T20:29:22.000Z | 2020-03-18T15:04:27.000Z | adsputils/tests/test_init.py | adsabs/ADSPipelineUtils | eb8cc988f57c19a256ebc8802cc2a812d5279d12 | [
"MIT"
] | 12 | 2017-06-21T18:10:57.000Z | 2021-11-01T19:13:32.000Z | # -*- coding: utf-8 -*-
import adsputils
import unittest
import os
import json
import time
from inspect import currentframe, getframeinfo
from adsputils.exceptions import UnicodeHandlerError
def _read_file(fpath):
with open(fpath, 'r') as fi:
return fi.read()
class TestInit(unittest.TestCase):
def test_logging(self):
logdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../logs'))
foo_log = logdir + '/foo.bar.log'
if os.path.exists(foo_log):
os.remove(foo_log)
logger = adsputils.setup_logging('foo.bar')
logger.warning('first')
frameinfo = getframeinfo(currentframe())
#print foo_log
self.assertTrue(os.path.exists(foo_log))
c = _read_file(foo_log)
j = json.loads(c)
self.assertEqual(j['message'], 'first')
self.assertTrue('hostname' in j)
# verify warning has filename and linenumber
self.assertEqual(os.path.basename(frameinfo.filename), j['filename'])
self.assertEqual(j['lineno'], frameinfo.lineno - 1)
time.sleep(0.01)
# now multiline message
logger.warning(u'second\nthird')
logger.warning('last')
c = _read_file(foo_log)
found = False
msecs = False
for x in c.strip().split('\n'):
j = json.loads(x)
self.assertTrue(j)
if j['message'] == u'second\nthird':
found = True
t = adsputils.get_date(j['asctime'])
if t.microsecond > 0:
msecs = True
self.assertTrue(found)
self.assertTrue(msecs)
def test_u2asc(self):
input1 = 'benìtez, n'
input2 = u'izzet, sakallı'
output1 = adsputils.u2asc(input1)
output2 = adsputils.u2asc(input2)
self.assertEqual(output1,'benitez, n')
self.assertEqual(output2,u'izzet, sakalli')
input3 = input2.encode('utf16')
self.assertRaises(UnicodeHandlerError, adsputils.u2asc, input3)
if __name__ == '__main__':
unittest.main()
| 28.135135 | 87 | 0.604707 |
import adsputils
import unittest
import os
import json
import time
from inspect import currentframe, getframeinfo
from adsputils.exceptions import UnicodeHandlerError
def _read_file(fpath):
with open(fpath, 'r') as fi:
return fi.read()
class TestInit(unittest.TestCase):
def test_logging(self):
logdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../logs'))
foo_log = logdir + '/foo.bar.log'
if os.path.exists(foo_log):
os.remove(foo_log)
logger = adsputils.setup_logging('foo.bar')
logger.warning('first')
frameinfo = getframeinfo(currentframe())
self.assertTrue(os.path.exists(foo_log))
c = _read_file(foo_log)
j = json.loads(c)
self.assertEqual(j['message'], 'first')
self.assertTrue('hostname' in j)
self.assertEqual(os.path.basename(frameinfo.filename), j['filename'])
self.assertEqual(j['lineno'], frameinfo.lineno - 1)
time.sleep(0.01)
logger.warning(u'second\nthird')
logger.warning('last')
c = _read_file(foo_log)
found = False
msecs = False
for x in c.strip().split('\n'):
j = json.loads(x)
self.assertTrue(j)
if j['message'] == u'second\nthird':
found = True
t = adsputils.get_date(j['asctime'])
if t.microsecond > 0:
msecs = True
self.assertTrue(found)
self.assertTrue(msecs)
def test_u2asc(self):
input1 = 'benìtez, n'
input2 = u'izzet, sakallı'
output1 = adsputils.u2asc(input1)
output2 = adsputils.u2asc(input2)
self.assertEqual(output1,'benitez, n')
self.assertEqual(output2,u'izzet, sakalli')
input3 = input2.encode('utf16')
self.assertRaises(UnicodeHandlerError, adsputils.u2asc, input3)
if __name__ == '__main__':
unittest.main()
| true | true |
f71fd223ff855bd602d59319796b96fc483982ca | 20,550 | py | Python | log_complete_bcl2/model_76.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete_bcl2/model_76.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete_bcl2/model_76.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 19000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.138889 | 798 | 0.804136 |
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 19000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| true | true |
f71fd3f3075081a6f7b219e41391ee28001ad25c | 2,073 | py | Python | codes/poop/fizzbuzz.py | cassiobotaro/go_slides | 1e9bedff22acc78a39cc054c0de432ef80c2df7b | [
"MIT"
] | 2 | 2019-02-27T14:45:39.000Z | 2021-09-27T03:46:20.000Z | codes/poop/fizzbuzz.py | cassiobotaro/go_slides | 1e9bedff22acc78a39cc054c0de432ef80c2df7b | [
"MIT"
] | null | null | null | codes/poop/fizzbuzz.py | cassiobotaro/go_slides | 1e9bedff22acc78a39cc054c0de432ef80c2df7b | [
"MIT"
] | 2 | 2017-04-13T14:42:31.000Z | 2021-09-27T03:46:22.000Z | # smalltalk infected fizzbuzz version
from forbiddenfruit import curse
from collections import deque
def if_true(self, block):
# simulate blocks using functions
self and block()
# close circuit when object is truthy
return self
def if_false(self, block):
# simulate blocks using functions
not self and block()
# close circuit when object is falsy
return self
def println(self):
"""Prints the values to a stream, or to sys.stdout by default.
>>> "Fizz".print()
Fizz
>>> "FizzBuzz".print()
FizzBuzz
"""
print(self)
def do(self, block):
"""Evaluate the receiver for each element in aBlock.
>>> range(1, 11).do(lambda number: number.print())
"""
deque(map(block, self), maxlen=0)
return self
curse(bool, "if_true", if_true)
curse(bool, "if_false", if_false)
curse(str, "print", println)
curse(int, "print", println)
curse(range, "do", do)
# lambdas are used to simulate blocks
"""Summary
We add a do methd on range objects that evaluates a block
for each element on interval.
This block will receive a number, that evaluated
in the expression "number % 15 == 0", This will result in a boolean object,
to which we will send two messages,
one with a block to be evaluated if the expression is true and
another for if it is false.
If true, we will send a print message to a "FizzBuzz" object.
If it is false, we will use the same numeric object
to evaluate the expression number% 5 == 0.
And so we repeat the cycle, until at last a message
is sent to the number printed.
"""
range(1, 101).do(
lambda number: (number % 15 == 0)
.if_true("FizzBuzz".print)
.if_false(
lambda: (number % 5 == 0)
.if_true("Buzz".print)
.if_false(
lambda: (number % 3 == 0)
.if_true("Fizz".print)
.if_false(number.print)
)
)
)
"""
Notes:
- A message is sent to an object for printing
- Lambdas are used to simulate a block
- Add method do for a range, evaluating a block on each number on interval
- Objects and messages
"""
| 25.280488 | 75 | 0.670043 |
from forbiddenfruit import curse
from collections import deque
def if_true(self, block):
self and block()
return self
def if_false(self, block):
not self and block()
return self
def println(self):
print(self)
def do(self, block):
deque(map(block, self), maxlen=0)
return self
curse(bool, "if_true", if_true)
curse(bool, "if_false", if_false)
curse(str, "print", println)
curse(int, "print", println)
curse(range, "do", do)
range(1, 101).do(
lambda number: (number % 15 == 0)
.if_true("FizzBuzz".print)
.if_false(
lambda: (number % 5 == 0)
.if_true("Buzz".print)
.if_false(
lambda: (number % 3 == 0)
.if_true("Fizz".print)
.if_false(number.print)
)
)
)
| true | true |
f71fd4a1f731db04f57e93f442976ca80b5b4b5d | 8,349 | py | Python | s2e_env/tui/tui.py | michaelbrownuc/s2e-env | 4bd6a45bf1ec9456ed5acf5047b6aac3fcd19683 | [
"BSD-3-Clause"
] | null | null | null | s2e_env/tui/tui.py | michaelbrownuc/s2e-env | 4bd6a45bf1ec9456ed5acf5047b6aac3fcd19683 | [
"BSD-3-Clause"
] | null | null | null | s2e_env/tui/tui.py | michaelbrownuc/s2e-env | 4bd6a45bf1ec9456ed5acf5047b6aac3fcd19683 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2017 Cyberhaven
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import curses
import time
_s_screen = None
# TODO: this module requires clean up
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
# pylint: disable=no-self-use
class Form:
def __init__(self, parent, x, y, w=None, h=None):
self._children = []
self._parent = parent
self._x = x
self._y = y
self._h = h
self._w = w
self._vcenter, self._hcenter = False, False
self.set_size(w, h)
ax, ay = self.get_screen_coords(0, 0)
self._wnd = curses.newwin(self._h, self._w, ay, ax)
if parent is not None:
parent._children.append(self)
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def width(self):
return self._w
@width.setter
def width(self, value):
self._w = value
@property
def height(self):
return self._h
@height.setter
def height(self, value):
self._h = value
@property
def window(self):
return self._wnd
def get_screen_coords(self, x, y):
form = self
ax, ay = x, y
while form is not None:
ax, ay = ax + form.x, ay + form.y
form = form.get_parent()
return ax, ay
def set_pos(self, x, y):
self._x, self._y = x, y
def set_centering(self, hcenter, vcenter):
if self._parent is None:
raise Exception('Form must have a parent')
self._vcenter = vcenter
self._hcenter = hcenter
def set_size(self, w=None, h=None):
"""
Width and Height can be set to None to expand the window
to the size of the parent container.
"""
if w is None or h is None:
form = self.get_parent()
if form is None:
mh, mw = _s_screen.getmaxyx()
else:
mh, mw = form.height, form.width
if w is None:
w = mw
if h is None:
h = mh
self._w, self._h = w, h
def get_parent(self):
return self._parent
def get_draw_coords(self, ax, ay):
x, y = self.x, self.y
# Center the form in the parent window if needed
if self._hcenter:
x = (self._parent._w - self._w) // 2
if self._vcenter:
y = (self._parent._h - self._h) // 2
x += ax
y += ay
return x, y
def draw(self, ax, ay):
x, y = self.get_draw_coords(ax, ay)
# TODO: clipping
self.do_draw(x, y)
for child in self._children:
child.draw(x, y)
def do_draw(self, ax, ay):
self._wnd.mvwin(ay, ax)
self._wnd.resize(self._h, self._w)
self._wnd.border()
self._wnd.refresh()
class Label(Form):
def __init__(self, parent, x, y, text):
super().__init__(parent, x, y, len(text) + 2, 1)
self._text = f' {text}'
def do_draw(self, ax, ay):
self._wnd.mvwin(ay, ax)
self._wnd.resize(self._h, self._w)
self._wnd.addstr(0, 0, self._text)
self._wnd.refresh()
class Table(Form):
def __init__(self, parent, x, y, data, legend, layout):
self._data = data
self._legend = legend
self._layout = layout
self.set_data(data, legend, layout)
w, h = self._get_dimensions()
super().__init__(parent, x, y, w, h)
def _get_dimensions(self):
lw, dw, h = self._compute_data_size()
w, h = self._compute_bounding_box(lw, dw, h)
return w, h
def _update_dimensions(self):
w, h = self._get_dimensions()
self.width = w
self.height = h
def set_data(self, data, legend, layout):
self._data = data
self._legend = legend
self._layout = layout
self._update_dimensions()
def _compute_bounding_box(self, lw, dw, h):
return lw + dw + 5, h
def _compute_data_size(self):
max_legend_width = 0
max_data_width = 0
max_height = len(self._layout)
for k, v in self._data.items():
l = self._legend[k]
max_legend_width = max(max_legend_width, len(l))
max_data_width = max(max_data_width, len(str(v)))
return max_legend_width, max_data_width, max_height
def do_draw(self, ax, ay):
y = 0
lw, dw, h = self._compute_data_size()
w, h = self._compute_bounding_box(lw, dw, h)
self._wnd.clear()
self._wnd.resize(h, w)
self._wnd.mvwin(ay, ax)
# self._wnd.border()
for k in self._layout:
l = self._legend[k]
if not k in self._data:
continue
v = self._data[k]
self._wnd.addstr(y, 0, l + ':')
self._wnd.addstr(y, lw + 3, str(v))
y += 1
self._wnd.refresh()
class Tui:
def __init__(self):
self._updated = True
self._data = {}
self._legend = {}
self._layout = {}
self._desktop = None
self._stats = None
self._title = None
self._exitmsg = None
self._table = None
def _create_desktop(self):
global _s_screen
_s_screen = curses.initscr()
curses.noecho()
curses.curs_set(0)
curses.start_color()
self._desktop = Form(None, 0, 0)
self._stats = Form(self._desktop, 0, 0, 70, 20)
self._stats.set_centering(True, True)
self._title = Label(self._stats, 0, 0, 'S2E')
self._title.set_centering(True, False)
self._exitmsg = Label(self._stats, 0, 17, 'Press q to exit')
self._exitmsg.set_centering(True, False)
self._table = Table(self._stats, 2, 2, self._data, self._legend,
self._layout)
self._table.set_centering(True, True)
def _cleanup(self):
curses.nocbreak()
_s_screen.keypad(0)
curses.echo()
curses.endwin()
def _redraw(self):
self._desktop.window.clear()
self._desktop.set_size()
self._desktop.draw(0, 0)
def set_content(self, data, legend, layout):
self._data = data
self._legend = legend
self._layout = layout
self._updated = True
self._table.set_data(data, legend, layout)
def _run(self, callback):
self._create_desktop()
if not callback(self):
return
self._redraw()
self._desktop.window.nodelay(True)
while True:
c = self._desktop.window.getch()
if c == curses.ERR:
if not callback(self):
return
time.sleep(1)
elif c == ord('q'):
break
elif c == curses.KEY_RESIZE:
self._updated = True
if self._updated:
self._redraw()
self._updated = False
def run(self, callback):
try:
self._run(callback)
except Exception:
self._cleanup()
# Print message only after screen is restored, otherwise we might
# get unreadable garbage.
raise
finally:
self._cleanup()
| 26.674121 | 78 | 0.573602 |
import curses
import time
_s_screen = None
class Form:
def __init__(self, parent, x, y, w=None, h=None):
self._children = []
self._parent = parent
self._x = x
self._y = y
self._h = h
self._w = w
self._vcenter, self._hcenter = False, False
self.set_size(w, h)
ax, ay = self.get_screen_coords(0, 0)
self._wnd = curses.newwin(self._h, self._w, ay, ax)
if parent is not None:
parent._children.append(self)
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def width(self):
return self._w
@width.setter
def width(self, value):
self._w = value
@property
def height(self):
return self._h
@height.setter
def height(self, value):
self._h = value
@property
def window(self):
return self._wnd
def get_screen_coords(self, x, y):
form = self
ax, ay = x, y
while form is not None:
ax, ay = ax + form.x, ay + form.y
form = form.get_parent()
return ax, ay
def set_pos(self, x, y):
self._x, self._y = x, y
def set_centering(self, hcenter, vcenter):
if self._parent is None:
raise Exception('Form must have a parent')
self._vcenter = vcenter
self._hcenter = hcenter
def set_size(self, w=None, h=None):
if w is None or h is None:
form = self.get_parent()
if form is None:
mh, mw = _s_screen.getmaxyx()
else:
mh, mw = form.height, form.width
if w is None:
w = mw
if h is None:
h = mh
self._w, self._h = w, h
def get_parent(self):
return self._parent
def get_draw_coords(self, ax, ay):
x, y = self.x, self.y
if self._hcenter:
x = (self._parent._w - self._w) // 2
if self._vcenter:
y = (self._parent._h - self._h) // 2
x += ax
y += ay
return x, y
def draw(self, ax, ay):
x, y = self.get_draw_coords(ax, ay)
self.do_draw(x, y)
for child in self._children:
child.draw(x, y)
def do_draw(self, ax, ay):
self._wnd.mvwin(ay, ax)
self._wnd.resize(self._h, self._w)
self._wnd.border()
self._wnd.refresh()
class Label(Form):
def __init__(self, parent, x, y, text):
super().__init__(parent, x, y, len(text) + 2, 1)
self._text = f' {text}'
def do_draw(self, ax, ay):
self._wnd.mvwin(ay, ax)
self._wnd.resize(self._h, self._w)
self._wnd.addstr(0, 0, self._text)
self._wnd.refresh()
class Table(Form):
def __init__(self, parent, x, y, data, legend, layout):
self._data = data
self._legend = legend
self._layout = layout
self.set_data(data, legend, layout)
w, h = self._get_dimensions()
super().__init__(parent, x, y, w, h)
def _get_dimensions(self):
lw, dw, h = self._compute_data_size()
w, h = self._compute_bounding_box(lw, dw, h)
return w, h
def _update_dimensions(self):
w, h = self._get_dimensions()
self.width = w
self.height = h
def set_data(self, data, legend, layout):
self._data = data
self._legend = legend
self._layout = layout
self._update_dimensions()
def _compute_bounding_box(self, lw, dw, h):
return lw + dw + 5, h
def _compute_data_size(self):
max_legend_width = 0
max_data_width = 0
max_height = len(self._layout)
for k, v in self._data.items():
l = self._legend[k]
max_legend_width = max(max_legend_width, len(l))
max_data_width = max(max_data_width, len(str(v)))
return max_legend_width, max_data_width, max_height
def do_draw(self, ax, ay):
y = 0
lw, dw, h = self._compute_data_size()
w, h = self._compute_bounding_box(lw, dw, h)
self._wnd.clear()
self._wnd.resize(h, w)
self._wnd.mvwin(ay, ax)
for k in self._layout:
l = self._legend[k]
if not k in self._data:
continue
v = self._data[k]
self._wnd.addstr(y, 0, l + ':')
self._wnd.addstr(y, lw + 3, str(v))
y += 1
self._wnd.refresh()
class Tui:
def __init__(self):
self._updated = True
self._data = {}
self._legend = {}
self._layout = {}
self._desktop = None
self._stats = None
self._title = None
self._exitmsg = None
self._table = None
def _create_desktop(self):
global _s_screen
_s_screen = curses.initscr()
curses.noecho()
curses.curs_set(0)
curses.start_color()
self._desktop = Form(None, 0, 0)
self._stats = Form(self._desktop, 0, 0, 70, 20)
self._stats.set_centering(True, True)
self._title = Label(self._stats, 0, 0, 'S2E')
self._title.set_centering(True, False)
self._exitmsg = Label(self._stats, 0, 17, 'Press q to exit')
self._exitmsg.set_centering(True, False)
self._table = Table(self._stats, 2, 2, self._data, self._legend,
self._layout)
self._table.set_centering(True, True)
def _cleanup(self):
curses.nocbreak()
_s_screen.keypad(0)
curses.echo()
curses.endwin()
def _redraw(self):
self._desktop.window.clear()
self._desktop.set_size()
self._desktop.draw(0, 0)
def set_content(self, data, legend, layout):
self._data = data
self._legend = legend
self._layout = layout
self._updated = True
self._table.set_data(data, legend, layout)
def _run(self, callback):
self._create_desktop()
if not callback(self):
return
self._redraw()
self._desktop.window.nodelay(True)
while True:
c = self._desktop.window.getch()
if c == curses.ERR:
if not callback(self):
return
time.sleep(1)
elif c == ord('q'):
break
elif c == curses.KEY_RESIZE:
self._updated = True
if self._updated:
self._redraw()
self._updated = False
def run(self, callback):
try:
self._run(callback)
except Exception:
self._cleanup()
raise
finally:
self._cleanup()
| true | true |
f71fd745dc747e5cd621bba11088fb4afbc2acb3 | 1,092 | py | Python | jmatcher/users/migrations/0004_auto_20170303_2141.py | jamesaud/se1-group4 | 5280b13dff33e72ce717318a8dd78a06cd6effb3 | [
"MIT"
] | 1 | 2021-09-09T15:43:09.000Z | 2021-09-09T15:43:09.000Z | jmatcher/users/migrations/0004_auto_20170303_2141.py | jamesaud/se1-group4 | 5280b13dff33e72ce717318a8dd78a06cd6effb3 | [
"MIT"
] | null | null | null | jmatcher/users/migrations/0004_auto_20170303_2141.py | jamesaud/se1-group4 | 5280b13dff33e72ce717318a8dd78a06cd6effb3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-03 21:41
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_remove_student_student_name'),
]
operations = [
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('skill', models.CharField(choices=[('Django', 'Django'), ('Python', 'Python'), ('Java', 'Java'), ('Ruby', 'Ruby')], max_length=255)),
],
),
migrations.RemoveField(
model_name='student',
name='user',
),
migrations.AddField(
model_name='user',
name='connections',
field=models.ManyToManyField(related_name='_user_connections_+', to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='Student',
),
]
| 30.333333 | 150 | 0.57967 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_remove_student_student_name'),
]
operations = [
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('skill', models.CharField(choices=[('Django', 'Django'), ('Python', 'Python'), ('Java', 'Java'), ('Ruby', 'Ruby')], max_length=255)),
],
),
migrations.RemoveField(
model_name='student',
name='user',
),
migrations.AddField(
model_name='user',
name='connections',
field=models.ManyToManyField(related_name='_user_connections_+', to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='Student',
),
]
| true | true |
f71fd8d00f312300adb54ea7c3dae8e0ed739a61 | 9,839 | py | Python | datasets/preprocess/mpi_inf_3dhp.py | virgile-hernicot/SPIN | 21871e3d333ef37866402ae21498b331aa771b2d | [
"BSD-3-Clause"
] | 555 | 2019-09-30T01:03:23.000Z | 2022-03-30T03:56:09.000Z | datasets/preprocess/mpi_inf_3dhp.py | virgile-hernicot/SPIN | 21871e3d333ef37866402ae21498b331aa771b2d | [
"BSD-3-Clause"
] | 110 | 2019-10-01T05:51:07.000Z | 2022-03-23T13:51:05.000Z | datasets/preprocess/mpi_inf_3dhp.py | virgile-hernicot/SPIN | 21871e3d333ef37866402ae21498b331aa771b2d | [
"BSD-3-Clause"
] | 158 | 2019-09-30T07:06:48.000Z | 2022-03-22T02:32:03.000Z | import os
import sys
import cv2
import glob
import h5py
import json
import numpy as np
import scipy.io as sio
import scipy.misc
from .read_openpose import read_openpose
def read_calibration(calib_file, vid_list):
Ks, Rs, Ts = [], [], []
file = open(calib_file, 'r')
content = file.readlines()
for vid_i in vid_list:
K = np.array([float(s) for s in content[vid_i*7+5][11:-2].split()])
K = np.reshape(K, (4, 4))
RT = np.array([float(s) for s in content[vid_i*7+6][11:-2].split()])
RT = np.reshape(RT, (4, 4))
R = RT[:3,:3]
T = RT[:3,3]/1000
Ks.append(K)
Rs.append(R)
Ts.append(T)
return Ks, Rs, Ts
def train_data(dataset_path, openpose_path, out_path, joints_idx, scaleFactor, extract_img=False, fits_3d=None):
joints17_idx = [4, 18, 19, 20, 23, 24, 25, 3, 5, 6, 7, 9, 10, 11, 14, 15, 16]
h, w = 2048, 2048
imgnames_, scales_, centers_ = [], [], []
parts_, Ss_, openposes_ = [], [], []
# training data
user_list = range(1,9)
seq_list = range(1,3)
vid_list = list(range(3)) + list(range(4,9))
counter = 0
for user_i in user_list:
for seq_i in seq_list:
seq_path = os.path.join(dataset_path,
'S' + str(user_i),
'Seq' + str(seq_i))
# mat file with annotations
annot_file = os.path.join(seq_path, 'annot.mat')
annot2 = sio.loadmat(annot_file)['annot2']
annot3 = sio.loadmat(annot_file)['annot3']
# calibration file and camera parameters
calib_file = os.path.join(seq_path, 'camera.calibration')
Ks, Rs, Ts = read_calibration(calib_file, vid_list)
for j, vid_i in enumerate(vid_list):
# image folder
imgs_path = os.path.join(seq_path,
'imageFrames',
'video_' + str(vid_i))
# extract frames from video file
if extract_img:
# if doesn't exist
if not os.path.isdir(imgs_path):
os.makedirs(imgs_path)
# video file
vid_file = os.path.join(seq_path,
'imageSequence',
'video_' + str(vid_i) + '.avi')
vidcap = cv2.VideoCapture(vid_file)
# process video
frame = 0
while 1:
# extract all frames
success, image = vidcap.read()
if not success:
break
frame += 1
# image name
imgname = os.path.join(imgs_path,
'frame_%06d.jpg' % frame)
# save image
cv2.imwrite(imgname, image)
# per frame
cam_aa = cv2.Rodrigues(Rs[j])[0].T[0]
pattern = os.path.join(imgs_path, '*.jpg')
img_list = glob.glob(pattern)
for i, img_i in enumerate(img_list):
# for each image we store the relevant annotations
img_name = img_i.split('/')[-1]
img_view = os.path.join('S' + str(user_i),
'Seq' + str(seq_i),
'imageFrames',
'video_' + str(vid_i),
img_name)
joints = np.reshape(annot2[vid_i][0][i], (28, 2))[joints17_idx]
S17 = np.reshape(annot3[vid_i][0][i], (28, 3))/1000
S17 = S17[joints17_idx] - S17[4] # 4 is the root
bbox = [min(joints[:,0]), min(joints[:,1]),
max(joints[:,0]), max(joints[:,1])]
center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]
scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200
# check that all joints are visible
x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)
y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < len(joints_idx):
continue
part = np.zeros([24,3])
part[joints_idx] = np.hstack([joints, np.ones([17,1])])
json_file = os.path.join(openpose_path, 'mpi_inf_3dhp',
img_view.replace('.jpg', '_keypoints.json'))
openpose = read_openpose(json_file, part, 'mpi_inf_3dhp')
S = np.zeros([24,4])
S[joints_idx] = np.hstack([S17, np.ones([17,1])])
# because of the dataset size, we only keep every 10th frame
counter += 1
if counter % 10 != 1:
continue
# store the data
imgnames_.append(img_view)
centers_.append(center)
scales_.append(scale)
parts_.append(part)
Ss_.append(S)
openposes_.append(openpose)
# store the data struct
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'mpi_inf_3dhp_train.npz')
if fits_3d is not None:
fits_3d = np.load(fits_3d)
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
pose=fits_3d['pose'],
shape=fits_3d['shape'],
has_smpl=fits_3d['has_smpl'],
S=Ss_,
openpose=openposes_)
else:
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
S=Ss_,
openpose=openposes_)
def test_data(dataset_path, out_path, joints_idx, scaleFactor):
joints17_idx = [14, 11, 12, 13, 8, 9, 10, 15, 1, 16, 0, 5, 6, 7, 2, 3, 4]
imgnames_, scales_, centers_, parts_, Ss_ = [], [], [], [], []
# training data
user_list = range(1,7)
for user_i in user_list:
seq_path = os.path.join(dataset_path,
'mpi_inf_3dhp_test_set',
'TS' + str(user_i))
# mat file with annotations
annot_file = os.path.join(seq_path, 'annot_data.mat')
mat_as_h5 = h5py.File(annot_file, 'r')
annot2 = np.array(mat_as_h5['annot2'])
annot3 = np.array(mat_as_h5['univ_annot3'])
valid = np.array(mat_as_h5['valid_frame'])
for frame_i, valid_i in enumerate(valid):
if valid_i == 0:
continue
img_name = os.path.join('mpi_inf_3dhp_test_set',
'TS' + str(user_i),
'imageSequence',
'img_' + str(frame_i+1).zfill(6) + '.jpg')
joints = annot2[frame_i,0,joints17_idx,:]
S17 = annot3[frame_i,0,joints17_idx,:]/1000
S17 = S17 - S17[0]
bbox = [min(joints[:,0]), min(joints[:,1]),
max(joints[:,0]), max(joints[:,1])]
center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]
scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200
# check that all joints are visible
img_file = os.path.join(dataset_path, img_name)
I = scipy.misc.imread(img_file)
h, w, _ = I.shape
x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)
y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < len(joints_idx):
continue
part = np.zeros([24,3])
part[joints_idx] = np.hstack([joints, np.ones([17,1])])
S = np.zeros([24,4])
S[joints_idx] = np.hstack([S17, np.ones([17,1])])
# store the data
imgnames_.append(img_name)
centers_.append(center)
scales_.append(scale)
parts_.append(part)
Ss_.append(S)
# store the data struct
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'mpi_inf_3dhp_test.npz')
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
S=Ss_)
def mpi_inf_3dhp_extract(dataset_path, openpose_path, out_path, mode, extract_img=False, static_fits=None):
scaleFactor = 1.2
joints_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]
if static_fits is not None:
fits_3d = os.path.join(static_fits,
'mpi-inf-3dhp_mview_fits.npz')
else:
fits_3d = None
if mode == 'train':
train_data(dataset_path, openpose_path, out_path,
joints_idx, scaleFactor, extract_img=extract_img, fits_3d=fits_3d)
elif mode == 'test':
test_data(dataset_path, out_path, joints_idx, scaleFactor)
| 39.514056 | 112 | 0.468035 | import os
import sys
import cv2
import glob
import h5py
import json
import numpy as np
import scipy.io as sio
import scipy.misc
from .read_openpose import read_openpose
def read_calibration(calib_file, vid_list):
Ks, Rs, Ts = [], [], []
file = open(calib_file, 'r')
content = file.readlines()
for vid_i in vid_list:
K = np.array([float(s) for s in content[vid_i*7+5][11:-2].split()])
K = np.reshape(K, (4, 4))
RT = np.array([float(s) for s in content[vid_i*7+6][11:-2].split()])
RT = np.reshape(RT, (4, 4))
R = RT[:3,:3]
T = RT[:3,3]/1000
Ks.append(K)
Rs.append(R)
Ts.append(T)
return Ks, Rs, Ts
def train_data(dataset_path, openpose_path, out_path, joints_idx, scaleFactor, extract_img=False, fits_3d=None):
joints17_idx = [4, 18, 19, 20, 23, 24, 25, 3, 5, 6, 7, 9, 10, 11, 14, 15, 16]
h, w = 2048, 2048
imgnames_, scales_, centers_ = [], [], []
parts_, Ss_, openposes_ = [], [], []
user_list = range(1,9)
seq_list = range(1,3)
vid_list = list(range(3)) + list(range(4,9))
counter = 0
for user_i in user_list:
for seq_i in seq_list:
seq_path = os.path.join(dataset_path,
'S' + str(user_i),
'Seq' + str(seq_i))
annot_file = os.path.join(seq_path, 'annot.mat')
annot2 = sio.loadmat(annot_file)['annot2']
annot3 = sio.loadmat(annot_file)['annot3']
calib_file = os.path.join(seq_path, 'camera.calibration')
Ks, Rs, Ts = read_calibration(calib_file, vid_list)
for j, vid_i in enumerate(vid_list):
imgs_path = os.path.join(seq_path,
'imageFrames',
'video_' + str(vid_i))
if extract_img:
if not os.path.isdir(imgs_path):
os.makedirs(imgs_path)
# video file
vid_file = os.path.join(seq_path,
'imageSequence',
'video_' + str(vid_i) + '.avi')
vidcap = cv2.VideoCapture(vid_file)
# process video
frame = 0
while 1:
# extract all frames
success, image = vidcap.read()
if not success:
break
frame += 1
# image name
imgname = os.path.join(imgs_path,
'frame_%06d.jpg' % frame)
# save image
cv2.imwrite(imgname, image)
# per frame
cam_aa = cv2.Rodrigues(Rs[j])[0].T[0]
pattern = os.path.join(imgs_path, '*.jpg')
img_list = glob.glob(pattern)
for i, img_i in enumerate(img_list):
# for each image we store the relevant annotations
img_name = img_i.split('/')[-1]
img_view = os.path.join('S' + str(user_i),
'Seq' + str(seq_i),
'imageFrames',
'video_' + str(vid_i),
img_name)
joints = np.reshape(annot2[vid_i][0][i], (28, 2))[joints17_idx]
S17 = np.reshape(annot3[vid_i][0][i], (28, 3))/1000
S17 = S17[joints17_idx] - S17[4] # 4 is the root
bbox = [min(joints[:,0]), min(joints[:,1]),
max(joints[:,0]), max(joints[:,1])]
center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]
scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200
# check that all joints are visible
x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)
y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < len(joints_idx):
continue
part = np.zeros([24,3])
part[joints_idx] = np.hstack([joints, np.ones([17,1])])
json_file = os.path.join(openpose_path, 'mpi_inf_3dhp',
img_view.replace('.jpg', '_keypoints.json'))
openpose = read_openpose(json_file, part, 'mpi_inf_3dhp')
S = np.zeros([24,4])
S[joints_idx] = np.hstack([S17, np.ones([17,1])])
# because of the dataset size, we only keep every 10th frame
counter += 1
if counter % 10 != 1:
continue
# store the data
imgnames_.append(img_view)
centers_.append(center)
scales_.append(scale)
parts_.append(part)
Ss_.append(S)
openposes_.append(openpose)
# store the data struct
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'mpi_inf_3dhp_train.npz')
if fits_3d is not None:
fits_3d = np.load(fits_3d)
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
pose=fits_3d['pose'],
shape=fits_3d['shape'],
has_smpl=fits_3d['has_smpl'],
S=Ss_,
openpose=openposes_)
else:
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
S=Ss_,
openpose=openposes_)
def test_data(dataset_path, out_path, joints_idx, scaleFactor):
joints17_idx = [14, 11, 12, 13, 8, 9, 10, 15, 1, 16, 0, 5, 6, 7, 2, 3, 4]
imgnames_, scales_, centers_, parts_, Ss_ = [], [], [], [], []
# training data
user_list = range(1,7)
for user_i in user_list:
seq_path = os.path.join(dataset_path,
'mpi_inf_3dhp_test_set',
'TS' + str(user_i))
# mat file with annotations
annot_file = os.path.join(seq_path, 'annot_data.mat')
mat_as_h5 = h5py.File(annot_file, 'r')
annot2 = np.array(mat_as_h5['annot2'])
annot3 = np.array(mat_as_h5['univ_annot3'])
valid = np.array(mat_as_h5['valid_frame'])
for frame_i, valid_i in enumerate(valid):
if valid_i == 0:
continue
img_name = os.path.join('mpi_inf_3dhp_test_set',
'TS' + str(user_i),
'imageSequence',
'img_' + str(frame_i+1).zfill(6) + '.jpg')
joints = annot2[frame_i,0,joints17_idx,:]
S17 = annot3[frame_i,0,joints17_idx,:]/1000
S17 = S17 - S17[0]
bbox = [min(joints[:,0]), min(joints[:,1]),
max(joints[:,0]), max(joints[:,1])]
center = [(bbox[2]+bbox[0])/2, (bbox[3]+bbox[1])/2]
scale = scaleFactor*max(bbox[2]-bbox[0], bbox[3]-bbox[1])/200
# check that all joints are visible
img_file = os.path.join(dataset_path, img_name)
I = scipy.misc.imread(img_file)
h, w, _ = I.shape
x_in = np.logical_and(joints[:, 0] < w, joints[:, 0] >= 0)
y_in = np.logical_and(joints[:, 1] < h, joints[:, 1] >= 0)
ok_pts = np.logical_and(x_in, y_in)
if np.sum(ok_pts) < len(joints_idx):
continue
part = np.zeros([24,3])
part[joints_idx] = np.hstack([joints, np.ones([17,1])])
S = np.zeros([24,4])
S[joints_idx] = np.hstack([S17, np.ones([17,1])])
# store the data
imgnames_.append(img_name)
centers_.append(center)
scales_.append(scale)
parts_.append(part)
Ss_.append(S)
# store the data struct
if not os.path.isdir(out_path):
os.makedirs(out_path)
out_file = os.path.join(out_path, 'mpi_inf_3dhp_test.npz')
np.savez(out_file, imgname=imgnames_,
center=centers_,
scale=scales_,
part=parts_,
S=Ss_)
def mpi_inf_3dhp_extract(dataset_path, openpose_path, out_path, mode, extract_img=False, static_fits=None):
scaleFactor = 1.2
joints_idx = [14, 3, 4, 5, 2, 1, 0, 16, 12, 17, 18, 9, 10, 11, 8, 7, 6]
if static_fits is not None:
fits_3d = os.path.join(static_fits,
'mpi-inf-3dhp_mview_fits.npz')
else:
fits_3d = None
if mode == 'train':
train_data(dataset_path, openpose_path, out_path,
joints_idx, scaleFactor, extract_img=extract_img, fits_3d=fits_3d)
elif mode == 'test':
test_data(dataset_path, out_path, joints_idx, scaleFactor)
| true | true |
f71fd8ed4a60f1f0cb0713800b0c028bb7bc4489 | 16,140 | py | Python | desktop/core/ext-py/pysaml2-4.4.0/src/saml2/cert.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 3 | 2018-01-29T14:16:02.000Z | 2019-02-05T21:33:05.000Z | desktop/core/ext-py/pysaml2-4.4.0/src/saml2/cert.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 4 | 2021-03-11T04:02:00.000Z | 2022-03-27T08:31:56.000Z | desktop/core/ext-py/pysaml2-4.4.0/src/saml2/cert.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2 | 2019-06-17T11:51:56.000Z | 2020-07-25T08:29:56.000Z | __author__ = 'haho0032'
import base64
import datetime
import dateutil.parser
import pytz
import six
from OpenSSL import crypto
from os.path import join
from os import remove
from Cryptodome.Util import asn1
class WrongInput(Exception):
pass
class CertificateError(Exception):
pass
class PayloadError(Exception):
pass
class OpenSSLWrapper(object):
def __init__(self):
pass
def create_certificate(self, cert_info, request=False, valid_from=0,
valid_to=315360000, sn=1, key_length=1024,
hash_alg="sha256", write_to_file=False, cert_dir="",
cipher_passphrase=None):
"""
Can create certificate requests, to be signed later by another
certificate with the method
create_cert_signed_certificate. If request is True.
Can also create self signed root certificates if request is False.
This is default behaviour.
:param cert_info: Contains information about the certificate.
Is a dictionary that must contain the keys:
cn = Common name. This part
must match the host being authenticated
country_code = Two letter description
of the country.
state = State
city = City
organization = Organization, can be a
company name.
organization_unit = A unit at the
organization, can be a department.
Example:
cert_info_ca = {
"cn": "company.com",
"country_code": "se",
"state": "AC",
"city": "Dorotea",
"organization":
"Company",
"organization_unit":
"Sales"
}
:param request: True if this is a request for certificate,
that should be signed.
False if this is a self signed certificate,
root certificate.
:param valid_from: When the certificate starts to be valid.
Amount of seconds from when the
certificate is generated.
:param valid_to: How long the certificate will be valid from
when it is generated.
The value is in seconds. Default is
315360000 seconds, a.k.a 10 years.
:param sn: Serial number for the certificate. Default
is 1.
:param key_length: Length of the key to be generated. Defaults
to 1024.
:param hash_alg: Hash algorithm to use for the key. Default
is sha256.
:param write_to_file: True if you want to write the certificate
to a file. The method will then return
a tuple with path to certificate file and
path to key file.
False if you want to get the result as
strings. The method will then return a tuple
with the certificate string and the key as
string.
WILL OVERWRITE ALL EXISTING FILES WITHOUT
ASKING!
:param cert_dir: Where to save the files if write_to_file is
true.
:param cipher_passphrase A dictionary with cipher and passphrase.
Example::
{"cipher": "blowfish", "passphrase": "qwerty"}
:return: string representation of certificate,
string representation of private key
if write_to_file parameter is False otherwise
path to certificate file, path to private
key file
"""
cn = cert_info["cn"]
c_f = None
k_f = None
if write_to_file:
cert_file = "%s.crt" % cn
key_file = "%s.key" % cn
try:
remove(cert_file)
except:
pass
try:
remove(key_file)
except:
pass
c_f = join(cert_dir, cert_file)
k_f = join(cert_dir, key_file)
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, key_length)
# create a self-signed cert
cert = crypto.X509()
if request:
cert = crypto.X509Req()
if (len(cert_info["country_code"]) != 2):
raise WrongInput("Country code must be two letters!")
cert.get_subject().C = cert_info["country_code"]
cert.get_subject().ST = cert_info["state"]
cert.get_subject().L = cert_info["city"]
cert.get_subject().O = cert_info["organization"]
cert.get_subject().OU = cert_info["organization_unit"]
cert.get_subject().CN = cn
if not request:
cert.set_serial_number(sn)
cert.gmtime_adj_notBefore(valid_from) #Valid before present time
cert.gmtime_adj_notAfter(valid_to) #3 650 days
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, hash_alg)
filesCreated = False
try:
if request:
tmp_cert = crypto.dump_certificate_request(crypto.FILETYPE_PEM,
cert)
else:
tmp_cert = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
tmp_key = None
if cipher_passphrase is not None:
passphrase = cipher_passphrase["passphrase"]
if isinstance(cipher_passphrase["passphrase"],
six.string_types):
passphrase = passphrase.encode('utf-8')
tmp_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k,
cipher_passphrase["cipher"],
passphrase)
else:
tmp_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k)
if write_to_file:
fc = open(c_f, "wt")
fk = open(k_f, "wt")
if request:
fc.write(tmp_cert.decode('utf-8'))
else:
fc.write(tmp_cert.decode('utf-8'))
fk.write(tmp_key.decode('utf-8'))
filesCreated = True
try:
fc.close()
except:
pass
try:
fk.close()
except:
pass
return c_f, k_f
return tmp_cert, tmp_key
except Exception as ex:
raise CertificateError("Certificate cannot be generated.", ex)
def write_str_to_file(self, file, str_data):
f = open(file, "wt")
f.write(str_data)
f.close()
def read_str_from_file(self, file, type="pem"):
f = open(file, 'rt')
str_data = f.read()
f.close()
if type == "pem":
return str_data
if type in ["der", "cer", "crt"]:
return base64.b64encode(str(str_data))
def create_cert_signed_certificate(self, sign_cert_str, sign_key_str,
request_cert_str, hash_alg="sha256",
valid_from=0, valid_to=315360000, sn=1,
passphrase=None):
"""
Will sign a certificate request with a give certificate.
:param sign_cert_str: This certificate will be used to sign with.
Must be a string representation of
the certificate. If you only have a file
use the method read_str_from_file to
get a string representation.
:param sign_key_str: This is the key for the ca_cert_str
represented as a string.
If you only have a file use the method
read_str_from_file to get a string
representation.
:param request_cert_str: This is the prepared certificate to be
signed. Must be a string representation of
the requested certificate. If you only have
a file use the method read_str_from_file
to get a string representation.
:param hash_alg: Hash algorithm to use for the key. Default
is sha256.
:param valid_from: When the certificate starts to be valid.
Amount of seconds from when the
certificate is generated.
:param valid_to: How long the certificate will be valid from
when it is generated.
The value is in seconds. Default is
315360000 seconds, a.k.a 10 years.
:param sn: Serial number for the certificate. Default
is 1.
:param passphrase: Password for the private key in sign_key_str.
:return: String representation of the signed
certificate.
"""
ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM, sign_cert_str)
ca_key = None
if passphrase is not None:
ca_key = crypto.load_privatekey(crypto.FILETYPE_PEM, sign_key_str,
passphrase)
else:
ca_key = crypto.load_privatekey(crypto.FILETYPE_PEM, sign_key_str)
req_cert = crypto.load_certificate_request(crypto.FILETYPE_PEM,
request_cert_str)
cert = crypto.X509()
cert.set_subject(req_cert.get_subject())
cert.set_serial_number(sn)
cert.gmtime_adj_notBefore(valid_from)
cert.gmtime_adj_notAfter(valid_to)
cert.set_issuer(ca_cert.get_subject())
cert.set_pubkey(req_cert.get_pubkey())
cert.sign(ca_key, hash_alg)
cert_dump = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
if isinstance(cert_dump, six.string_types):
return cert_dump
return cert_dump.decode('utf-8')
def verify_chain(self, cert_chain_str_list, cert_str):
"""
:param cert_chain_str_list: Must be a list of certificate strings,
where the first certificate to be validate
is in the beginning and the root certificate is last.
:param cert_str: The certificate to be validated.
:return:
"""
for tmp_cert_str in cert_chain_str_list:
valid, message = self.verify(tmp_cert_str, cert_str)
if not valid:
return False, message
else:
cert_str = tmp_cert_str
return (True,
"Signed certificate is valid and correctly signed by CA "
"certificate.")
def certificate_not_valid_yet(self, cert):
starts_to_be_valid = dateutil.parser.parse(cert.get_notBefore())
now = pytz.UTC.localize(datetime.datetime.utcnow())
if starts_to_be_valid < now:
return False
return True
def verify(self, signing_cert_str, cert_str):
"""
Verifies if a certificate is valid and signed by a given certificate.
:param signing_cert_str: This certificate will be used to verify the
signature. Must be a string representation
of the certificate. If you only have a file
use the method read_str_from_file to
get a string representation.
:param cert_str: This certificate will be verified if it is
correct. Must be a string representation
of the certificate. If you only have a file
use the method read_str_from_file to
get a string representation.
:return: Valid, Message
Valid = True if the certificate is valid,
otherwise false.
Message = Why the validation failed.
"""
try:
ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM,
signing_cert_str)
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
if self.certificate_not_valid_yet(ca_cert):
return False, "CA certificate is not valid yet."
if ca_cert.has_expired() == 1:
return False, "CA certificate is expired."
if cert.has_expired() == 1:
return False, "The signed certificate is expired."
if self.certificate_not_valid_yet(cert):
return False, "The signed certificate is not valid yet."
if ca_cert.get_subject().CN == cert.get_subject().CN:
return False, ("CN may not be equal for CA certificate and the "
"signed certificate.")
cert_algorithm = cert.get_signature_algorithm()
if six.PY3:
cert_algorithm = cert_algorithm.decode('ascii')
cert_asn1 = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
der_seq = asn1.DerSequence()
der_seq.decode(cert_asn1)
cert_certificate = der_seq[0]
#cert_signature_algorithm=der_seq[1]
cert_signature = der_seq[2]
cert_signature_decoded = asn1.DerObject()
cert_signature_decoded.decode(cert_signature)
signature_payload = cert_signature_decoded.payload
sig_pay0 = signature_payload[0]
if ((isinstance(sig_pay0, int) and sig_pay0 != 0) or
(isinstance(sig_pay0, str) and sig_pay0 != '\x00')):
return (False,
"The certificate should not contain any unused bits.")
signature = signature_payload[1:]
try:
crypto.verify(ca_cert, signature, cert_certificate,
cert_algorithm)
return True, "Signed certificate is valid and correctly signed by CA certificate."
except crypto.Error as e:
return False, "Certificate is incorrectly signed."
except Exception as e:
return False, "Certificate is not valid for an unknown reason. %s" % str(e)
| 43.621622 | 98 | 0.495291 | __author__ = 'haho0032'
import base64
import datetime
import dateutil.parser
import pytz
import six
from OpenSSL import crypto
from os.path import join
from os import remove
from Cryptodome.Util import asn1
class WrongInput(Exception):
pass
class CertificateError(Exception):
pass
class PayloadError(Exception):
pass
class OpenSSLWrapper(object):
def __init__(self):
pass
def create_certificate(self, cert_info, request=False, valid_from=0,
valid_to=315360000, sn=1, key_length=1024,
hash_alg="sha256", write_to_file=False, cert_dir="",
cipher_passphrase=None):
cn = cert_info["cn"]
c_f = None
k_f = None
if write_to_file:
cert_file = "%s.crt" % cn
key_file = "%s.key" % cn
try:
remove(cert_file)
except:
pass
try:
remove(key_file)
except:
pass
c_f = join(cert_dir, cert_file)
k_f = join(cert_dir, key_file)
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, key_length)
cert = crypto.X509()
if request:
cert = crypto.X509Req()
if (len(cert_info["country_code"]) != 2):
raise WrongInput("Country code must be two letters!")
cert.get_subject().C = cert_info["country_code"]
cert.get_subject().ST = cert_info["state"]
cert.get_subject().L = cert_info["city"]
cert.get_subject().O = cert_info["organization"]
cert.get_subject().OU = cert_info["organization_unit"]
cert.get_subject().CN = cn
if not request:
cert.set_serial_number(sn)
cert.gmtime_adj_notBefore(valid_from)
cert.gmtime_adj_notAfter(valid_to)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, hash_alg)
filesCreated = False
try:
if request:
tmp_cert = crypto.dump_certificate_request(crypto.FILETYPE_PEM,
cert)
else:
tmp_cert = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
tmp_key = None
if cipher_passphrase is not None:
passphrase = cipher_passphrase["passphrase"]
if isinstance(cipher_passphrase["passphrase"],
six.string_types):
passphrase = passphrase.encode('utf-8')
tmp_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k,
cipher_passphrase["cipher"],
passphrase)
else:
tmp_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, k)
if write_to_file:
fc = open(c_f, "wt")
fk = open(k_f, "wt")
if request:
fc.write(tmp_cert.decode('utf-8'))
else:
fc.write(tmp_cert.decode('utf-8'))
fk.write(tmp_key.decode('utf-8'))
filesCreated = True
try:
fc.close()
except:
pass
try:
fk.close()
except:
pass
return c_f, k_f
return tmp_cert, tmp_key
except Exception as ex:
raise CertificateError("Certificate cannot be generated.", ex)
def write_str_to_file(self, file, str_data):
f = open(file, "wt")
f.write(str_data)
f.close()
def read_str_from_file(self, file, type="pem"):
f = open(file, 'rt')
str_data = f.read()
f.close()
if type == "pem":
return str_data
if type in ["der", "cer", "crt"]:
return base64.b64encode(str(str_data))
def create_cert_signed_certificate(self, sign_cert_str, sign_key_str,
request_cert_str, hash_alg="sha256",
valid_from=0, valid_to=315360000, sn=1,
passphrase=None):
ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM, sign_cert_str)
ca_key = None
if passphrase is not None:
ca_key = crypto.load_privatekey(crypto.FILETYPE_PEM, sign_key_str,
passphrase)
else:
ca_key = crypto.load_privatekey(crypto.FILETYPE_PEM, sign_key_str)
req_cert = crypto.load_certificate_request(crypto.FILETYPE_PEM,
request_cert_str)
cert = crypto.X509()
cert.set_subject(req_cert.get_subject())
cert.set_serial_number(sn)
cert.gmtime_adj_notBefore(valid_from)
cert.gmtime_adj_notAfter(valid_to)
cert.set_issuer(ca_cert.get_subject())
cert.set_pubkey(req_cert.get_pubkey())
cert.sign(ca_key, hash_alg)
cert_dump = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
if isinstance(cert_dump, six.string_types):
return cert_dump
return cert_dump.decode('utf-8')
def verify_chain(self, cert_chain_str_list, cert_str):
for tmp_cert_str in cert_chain_str_list:
valid, message = self.verify(tmp_cert_str, cert_str)
if not valid:
return False, message
else:
cert_str = tmp_cert_str
return (True,
"Signed certificate is valid and correctly signed by CA "
"certificate.")
def certificate_not_valid_yet(self, cert):
starts_to_be_valid = dateutil.parser.parse(cert.get_notBefore())
now = pytz.UTC.localize(datetime.datetime.utcnow())
if starts_to_be_valid < now:
return False
return True
def verify(self, signing_cert_str, cert_str):
try:
ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM,
signing_cert_str)
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_str)
if self.certificate_not_valid_yet(ca_cert):
return False, "CA certificate is not valid yet."
if ca_cert.has_expired() == 1:
return False, "CA certificate is expired."
if cert.has_expired() == 1:
return False, "The signed certificate is expired."
if self.certificate_not_valid_yet(cert):
return False, "The signed certificate is not valid yet."
if ca_cert.get_subject().CN == cert.get_subject().CN:
return False, ("CN may not be equal for CA certificate and the "
"signed certificate.")
cert_algorithm = cert.get_signature_algorithm()
if six.PY3:
cert_algorithm = cert_algorithm.decode('ascii')
cert_asn1 = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
der_seq = asn1.DerSequence()
der_seq.decode(cert_asn1)
cert_certificate = der_seq[0]
cert_signature = der_seq[2]
cert_signature_decoded = asn1.DerObject()
cert_signature_decoded.decode(cert_signature)
signature_payload = cert_signature_decoded.payload
sig_pay0 = signature_payload[0]
if ((isinstance(sig_pay0, int) and sig_pay0 != 0) or
(isinstance(sig_pay0, str) and sig_pay0 != '\x00')):
return (False,
"The certificate should not contain any unused bits.")
signature = signature_payload[1:]
try:
crypto.verify(ca_cert, signature, cert_certificate,
cert_algorithm)
return True, "Signed certificate is valid and correctly signed by CA certificate."
except crypto.Error as e:
return False, "Certificate is incorrectly signed."
except Exception as e:
return False, "Certificate is not valid for an unknown reason. %s" % str(e)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.