hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ca0c4db4947c4f147ab6c53413af197da534268c | 15,907 | py | Python | pandas/core/computation/ops.py | vimalromeo/pandas | 7c14e4f14aff216be558bf5d4d2d00b4838c2360 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 69 | 2020-03-31T06:40:17.000Z | 2022-02-25T11:48:18.000Z | venv/lib/python3.7/site-packages/pandas/core/computation/ops.py | John1001Song/Big-Data-Robo-Adviser | 9444dce96954c546333d5aecc92a06c3bfd19aa5 | [
"MIT"
] | 8 | 2019-12-04T23:44:11.000Z | 2022-02-10T08:31:40.000Z | venv/lib/python3.7/site-packages/pandas/core/computation/ops.py | John1001Song/Big-Data-Robo-Adviser | 9444dce96954c546333d5aecc92a06c3bfd19aa5 | [
"MIT"
] | 28 | 2020-04-15T15:24:17.000Z | 2021-12-26T04:05:02.000Z | """Operator classes for eval.
"""
import operator as op
from functools import partial
from datetime import datetime
import numpy as np
from pandas.core.dtypes.common import is_list_like, is_scalar
import pandas as pd
from pandas.compat import PY3, string_types, text_type
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded
from pandas.core.base import StringMixin
from pandas.core.computation.common import _ensure_decoded, _result_type_many
from pandas.core.computation.scope import _DEFAULT_GLOBALS
_reductions = 'sum', 'prod'
_unary_math_ops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p',
'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos',
'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs')
_binary_math_ops = ('arctan2',)
_mathops = _unary_math_ops + _binary_math_ops
_LOCAL_TAG = '__pd_eval_local_'
class UndefinedVariableError(NameError):
"""NameError subclass for local variables."""
def __init__(self, name, is_local):
if is_local:
msg = 'local variable {0!r} is not defined'
else:
msg = 'name {0!r} is not defined'
super(UndefinedVariableError, self).__init__(msg.format(name))
class Term(StringMixin):
def __new__(cls, name, env, side=None, encoding=None):
klass = Constant if not isinstance(name, string_types) else cls
supr_new = super(Term, klass).__new__
return supr_new(klass)
def __init__(self, name, env, side=None, encoding=None):
self._name = name
self.env = env
self.side = side
tname = text_type(name)
self.is_local = (tname.startswith(_LOCAL_TAG) or
tname in _DEFAULT_GLOBALS)
self._value = self._resolve_name()
self.encoding = encoding
@property
def local_name(self):
return self.name.replace(_LOCAL_TAG, '')
def __unicode__(self):
return pprint_thing(self.name)
def __call__(self, *args, **kwargs):
return self.value
def evaluate(self, *args, **kwargs):
return self
def _resolve_name(self):
res = self.env.resolve(self.local_name, is_local=self.is_local)
self.update(res)
if hasattr(res, 'ndim') and res.ndim > 2:
raise NotImplementedError("N-dimensional objects, where N > 2,"
" are not supported with eval")
return res
def update(self, value):
"""
search order for local (i.e., @variable) variables:
scope, key_variable
[('locals', 'local_name'),
('globals', 'local_name'),
('locals', 'key'),
('globals', 'key')]
"""
key = self.name
# if it's a variable name (otherwise a constant)
if isinstance(key, string_types):
self.env.swapkey(self.local_name, key, new_value=value)
self.value = value
@property
def is_scalar(self):
return is_scalar(self._value)
@property
def type(self):
try:
# potentially very slow for large, mixed dtype frames
return self._value.values.dtype
except AttributeError:
try:
# ndarray
return self._value.dtype
except AttributeError:
# scalar
return type(self._value)
return_type = type
@property
def raw(self):
return pprint_thing('{0}(name={1!r}, type={2})'
''.format(self.__class__.__name__, self.name,
self.type))
@property
def is_datetime(self):
try:
t = self.type.type
except AttributeError:
t = self.type
return issubclass(t, (datetime, np.datetime64))
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
@property
def name(self):
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def ndim(self):
return self._value.ndim
class Constant(Term):
def __init__(self, value, env, side=None, encoding=None):
super(Constant, self).__init__(value, env, side=side,
encoding=encoding)
def _resolve_name(self):
return self._name
@property
def name(self):
return self.value
def __unicode__(self):
# in python 2 str() of float
# can truncate shorter than repr()
return repr(self.name)
_bool_op_map = {'not': '~', 'and': '&', 'or': '|'}
class Op(StringMixin):
"""Hold an operator of arbitrary arity
"""
def __init__(self, op, operands, *args, **kwargs):
self.op = _bool_op_map.get(op, op)
self.operands = operands
self.encoding = kwargs.get('encoding', None)
def __iter__(self):
return iter(self.operands)
def __unicode__(self):
"""Print a generic n-ary operator and its operands using infix
notation"""
# recurse over the operands
parened = ('({0})'.format(pprint_thing(opr))
for opr in self.operands)
return pprint_thing(' {0} '.format(self.op).join(parened))
@property
def return_type(self):
# clobber types to bool if the op is a boolean operator
if self.op in (_cmp_ops_syms + _bool_ops_syms):
return np.bool_
return _result_type_many(*(term.type for term in com.flatten(self)))
@property
def has_invalid_return_type(self):
types = self.operand_types
obj_dtype_set = frozenset([np.dtype('object')])
return self.return_type == object and types - obj_dtype_set
@property
def operand_types(self):
return frozenset(term.type for term in com.flatten(self))
@property
def is_scalar(self):
return all(operand.is_scalar for operand in self.operands)
@property
def is_datetime(self):
try:
t = self.return_type.type
except AttributeError:
t = self.return_type
return issubclass(t, (datetime, np.datetime64))
def _in(x, y):
"""Compute the vectorized membership of ``x in y`` if possible, otherwise
use Python.
"""
try:
return x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return y.isin(x)
except AttributeError:
pass
return x in y
def _not_in(x, y):
"""Compute the vectorized membership of ``x not in y`` if possible,
otherwise use Python.
"""
try:
return ~x.isin(y)
except AttributeError:
if is_list_like(x):
try:
return ~y.isin(x)
except AttributeError:
pass
return x not in y
_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in'
_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in
_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs))
_bool_ops_syms = '&', '|', 'and', 'or'
_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_
_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs))
_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%'
_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div,
op.pow, op.floordiv, op.mod)
_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs))
_special_case_arith_ops_syms = '**', '//', '%'
_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod
_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms,
_special_case_arith_ops_funcs))
_binary_ops_dict = {}
for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict):
_binary_ops_dict.update(d)
def _cast_inplace(terms, acceptable_dtypes, dtype):
"""Cast an expression inplace.
Parameters
----------
terms : Op
The expression that should cast.
acceptable_dtypes : list of acceptable numpy.dtype
Will not cast if term's dtype in this list.
.. versionadded:: 0.19.0
dtype : str or numpy.dtype
The dtype to cast to.
"""
dt = np.dtype(dtype)
for term in terms:
if term.type in acceptable_dtypes:
continue
try:
new_value = term.value.astype(dt)
except AttributeError:
new_value = dt.type(term.value)
term.update(new_value)
def is_term(obj):
return isinstance(obj, Term)
class BinOp(Op):
"""Hold a binary operator and its operands
Parameters
----------
op : str
left : Term or Op
right : Term or Op
"""
def __init__(self, op, lhs, rhs, **kwargs):
super(BinOp, self).__init__(op, (lhs, rhs))
self.lhs = lhs
self.rhs = rhs
self._disallow_scalar_only_bool_ops()
self.convert_values()
try:
self.func = _binary_ops_dict[op]
except KeyError:
# has to be made a list for python3
keys = list(_binary_ops_dict.keys())
raise ValueError('Invalid binary operator {0!r}, valid'
' operators are {1}'.format(op, keys))
def __call__(self, env):
"""Recursively evaluate an expression in Python space.
Parameters
----------
env : Scope
Returns
-------
object
The result of an evaluated expression.
"""
# handle truediv
if self.op == '/' and env.scope['truediv']:
self.func = op.truediv
# recurse over the left/right nodes
left = self.lhs(env)
right = self.rhs(env)
return self.func(left, right)
def evaluate(self, env, engine, parser, term_type, eval_in_python):
"""Evaluate a binary operation *before* being passed to the engine.
Parameters
----------
env : Scope
engine : str
parser : str
term_type : type
eval_in_python : list
Returns
-------
term_type
The "pre-evaluated" expression as an instance of ``term_type``
"""
if engine == 'python':
res = self(env)
else:
# recurse over the left/right nodes
left = self.lhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
right = self.rhs.evaluate(env, engine=engine, parser=parser,
term_type=term_type,
eval_in_python=eval_in_python)
# base cases
if self.op in eval_in_python:
res = self.func(left.value, right.value)
else:
res = pd.eval(self, local_dict=env, engine=engine,
parser=parser)
name = env.add_tmp(res)
return term_type(name, env=env)
def convert_values(self):
"""Convert datetimes to a comparable value in an expression.
"""
def stringify(value):
if self.encoding is not None:
encoder = partial(pprint_thing_encoded,
encoding=self.encoding)
else:
encoder = pprint_thing
return encoder(value)
lhs, rhs = self.lhs, self.rhs
if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar:
v = rhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.rhs.update(v)
if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar:
v = lhs.value
if isinstance(v, (int, float)):
v = stringify(v)
v = pd.Timestamp(_ensure_decoded(v))
if v.tz is not None:
v = v.tz_convert('UTC')
self.lhs.update(v)
def _disallow_scalar_only_bool_ops(self):
if ((self.lhs.is_scalar or self.rhs.is_scalar) and
self.op in _bool_ops_dict and
(not (issubclass(self.rhs.return_type, (bool, np.bool_)) and
issubclass(self.lhs.return_type, (bool, np.bool_))))):
raise NotImplementedError("cannot evaluate scalar only bool ops")
def isnumeric(dtype):
return issubclass(np.dtype(dtype).type, np.number)
class Div(BinOp):
"""Div operator to special case casting.
Parameters
----------
lhs, rhs : Term or Op
The Terms or Ops in the ``/`` expression.
truediv : bool
Whether or not to use true division. With Python 3 this happens
regardless of the value of ``truediv``.
"""
def __init__(self, lhs, rhs, truediv, *args, **kwargs):
super(Div, self).__init__('/', lhs, rhs, *args, **kwargs)
if not isnumeric(lhs.return_type) or not isnumeric(rhs.return_type):
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(self.op,
lhs.return_type,
rhs.return_type))
if truediv or PY3:
# do not upcast float32s to float64 un-necessarily
acceptable_dtypes = [np.float32, np.float_]
_cast_inplace(com.flatten(self), acceptable_dtypes, np.float_)
_unary_ops_syms = '+', '-', '~', 'not'
_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert
_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs))
class UnaryOp(Op):
"""Hold a unary operator and its operands
Parameters
----------
op : str
The token used to represent the operator.
operand : Term or Op
The Term or Op operand to the operator.
Raises
------
ValueError
* If no function associated with the passed operator token is found.
"""
def __init__(self, op, operand):
super(UnaryOp, self).__init__(op, (operand,))
self.operand = operand
try:
self.func = _unary_ops_dict[op]
except KeyError:
raise ValueError('Invalid unary operator {0!r}, valid operators '
'are {1}'.format(op, _unary_ops_syms))
def __call__(self, env):
operand = self.operand(env)
return self.func(operand)
def __unicode__(self):
return pprint_thing('{0}({1})'.format(self.op, self.operand))
@property
def return_type(self):
operand = self.operand
if operand.return_type == np.dtype('bool'):
return np.dtype('bool')
if (isinstance(operand, Op) and
(operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict)):
return np.dtype('bool')
return np.dtype('int')
class MathCall(Op):
def __init__(self, func, args):
super(MathCall, self).__init__(func.name, args)
self.func = func
def __call__(self, env):
operands = [op(env) for op in self.operands]
with np.errstate(all='ignore'):
return self.func.func(*operands)
def __unicode__(self):
operands = map(str, self.operands)
return pprint_thing('{0}({1})'.format(self.op, ','.join(operands)))
class FuncNode(object):
def __init__(self, name):
if name not in _mathops:
raise ValueError(
"\"{0}\" is not a supported function".format(name))
self.name = name
self.func = getattr(np, name)
def __call__(self, *args):
return MathCall(self, args)
| 28.921818 | 79 | 0.57635 |
3b358c2274f7380753cacf138817dd2fb62dfea5 | 1,162 | py | Python | src/livecli/plugins/powerapp.py | NghiemTrung/livecli | 6a21b1b144b045963b6d1db8d4d8dc8471b62737 | [
"BSD-2-Clause"
] | 1 | 2019-12-04T11:54:52.000Z | 2019-12-04T11:54:52.000Z | src/livecli/plugins/powerapp.py | NghiemTrung/livecli | 6a21b1b144b045963b6d1db8d4d8dc8471b62737 | [
"BSD-2-Clause"
] | null | null | null | src/livecli/plugins/powerapp.py | NghiemTrung/livecli | 6a21b1b144b045963b6d1db8d4d8dc8471b62737 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import print_function
import re
from livecli.plugin import Plugin
from livecli.plugin.api import http
from livecli.plugin.api import validate
from livecli.stream import HLSStream
__livecli_docs__ = {
"domains": [
"powerapp.com.tr",
],
"geo_blocked": [],
"notes": "",
"live": True,
"vod": False,
"last_update": "2016-12-16",
}
class PowerApp(Plugin):
url_re = re.compile(r"https?://(?:www.)?powerapp.com.tr/tv/(\w+)")
api_url = "http://api.powergroup.com.tr/Channels/{0}/?appRef=iPowerWeb&apiVersion=11"
api_schema = validate.Schema(validate.all({
"errorCode": 0,
"response": {
"channel_stream_url": validate.url()
}
}, validate.get("response")))
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_streams(self):
channel = self.url_re.match(self.url).group(1)
res = http.get(self.api_url.format(channel))
data = http.json(res, schema=self.api_schema)
return HLSStream.parse_variant_playlist(self.session, data["channel_stream_url"])
__plugin__ = PowerApp
| 25.822222 | 89 | 0.650602 |
fddf675c8b17d1f8e36aebfe5443696102a92f86 | 5,206 | py | Python | remediation_worker/jobs/security_group_close_port_3389/security_group_close_port_3389.py | kshrutik/secure-state-remediation-jobs | dc0a5acc3a74dd70d0b18e448124761a8481990d | [
"Apache-2.0"
] | null | null | null | remediation_worker/jobs/security_group_close_port_3389/security_group_close_port_3389.py | kshrutik/secure-state-remediation-jobs | dc0a5acc3a74dd70d0b18e448124761a8481990d | [
"Apache-2.0"
] | null | null | null | remediation_worker/jobs/security_group_close_port_3389/security_group_close_port_3389.py | kshrutik/secure-state-remediation-jobs | dc0a5acc3a74dd70d0b18e448124761a8481990d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 VMware Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from botocore.exceptions import ClientError
import json
import logging
import sys
import boto3
logging.basicConfig(level=logging.INFO)
class SecurityGroupClosePort3389(object):
def parse(self, payload):
"""Parse payload received from Remediation Service.
:param payload: JSON string containing parameters received from the remediation service.
:type payload: str.
:returns: Dictionary of parsed parameters
:rtype: dict
:raises: Exception, JSONDecodeError
"""
remediation_entry = json.loads(payload)
notification_info = remediation_entry.get("notificationInfo", None)
finding_info = notification_info.get("FindingInfo", None)
security_group_id = finding_info.get("ObjectId", None)
if security_group_id is None:
logging.error("Missing parameters for 'payload.notificationInfo.ObjectId'.")
raise Exception(
"Missing parameters for 'payload.notificationInfo.ObjectId'."
)
region = finding_info.get("Region", None)
if region is None:
logging.warning("no region specified - defaulting to us-east-1")
region = "us-east-1"
logging.info("parsed params")
logging.info(f" security_group_id: {security_group_id}")
logging.info(f" region: {region}")
return {"security_group_id": security_group_id}, region
def remediate(self, client, security_group_id):
"""Block public access to port 3389 for both IPv4 and IPv6.
:param client: Instance of the AWS boto3 client.
:param security_group_id: The ID of the security group. You must specify either the security group ID or the
security group name in the request. For security groups in a nondefault VPC, you must specify the security
group ID.
:type security_group_id: str.
:returns: Integer signaling success or failure
:rtype: int
:raises: botocore.exceptions.ClientError
"""
# Revoke ipv4 permission
logging.info("revoking ivp4 permissions")
port = 3389
try:
logging.info(" executing client.revoke_security_group_ingress")
logging.info(' CidrIp="0.0.0.0/0"')
logging.info(f" FromPort={port}")
logging.info(f" GroupId={security_group_id}")
logging.info(' IpProtocol="tcp"')
logging.info(f" ToPort={port}")
client.revoke_security_group_ingress(
CidrIp="0.0.0.0/0",
FromPort=port,
GroupId=security_group_id,
IpProtocol="tcp",
ToPort=port,
)
except ClientError as e:
if "InvalidPermission.NotFound" not in str(e):
logging.error(f"{str(e)}")
raise
# Revoke ipv6 permission
logging.info("revoking ivp6 permissions")
try:
logging.info(" executing client.revoke_security_group_ingress")
logging.info(f" FromPort={port}")
logging.info(f" GroupId={security_group_id}")
logging.info(' IpProtocol="tcp"')
logging.info(' "Ipv6Ranges": [{"CidrIpv6": "::/0"}]')
logging.info(f" ToPort={port}")
client.revoke_security_group_ingress(
GroupId=security_group_id,
IpPermissions=[
{
"FromPort": port,
"IpProtocol": "tcp",
"Ipv6Ranges": [{"CidrIpv6": "::/0"}],
"ToPort": port,
},
],
)
except ClientError as e:
if "InvalidPermission.NotFound" not in str(e):
logging.error(f"{str(e)}")
raise
logging.info("successfully executed remediation")
return 0
def run(self, args):
"""Run the remediation job.
:param args: List of arguments provided to the job.
:type args: list.
:returns: int
"""
params, region = self.parse(args[1])
client = boto3.client("ec2", region_name=region)
logging.info("acquired ec2 client and parsed params - starting remediation")
rc = self.remediate(client=client, **params)
return rc
if __name__ == "__main__":
logging.info("security_group_close_port_3389.py called - running now")
obj = SecurityGroupClosePort3389()
obj.run(sys.argv)
| 36.661972 | 118 | 0.605263 |
f12d108fc8e783185b70d5f5dfb9da21a1e0f661 | 754 | py | Python | pingle/core/game.py | advancedresearch/pingle | ae5535d8d427bea4bbc174712c5f94a41729653a | [
"MIT"
] | 2 | 2018-05-23T11:29:46.000Z | 2018-05-28T17:49:24.000Z | pingle/core/game.py | advancedresearch/pingle | ae5535d8d427bea4bbc174712c5f94a41729653a | [
"MIT"
] | 6 | 2018-05-24T10:49:52.000Z | 2018-05-28T20:56:44.000Z | pingle/core/game.py | advancedresearch/pingle | ae5535d8d427bea4bbc174712c5f94a41729653a | [
"MIT"
] | 1 | 2018-05-23T11:30:17.000Z | 2018-05-23T11:30:17.000Z | import abc
import pingle.core
class Game:
def __init__(self):
pass
@abc.abstractmethod
def play(self, **players):
"""
"""
pass
@abc.abstractmethod
def step(self, *
player_id,
policy,
duration):
"""
Parameters
----------
player_id: int
Id of the player to step.
policy: Policy
Policy to follow for some duration.
duration: float
Length of time to follow the policy before asking for
additional instruction.
"""
pass
@abc.abstractmethod
def done(self):
pass
@abc.abstractmethod
def results(self):
pass
| 18.390244 | 65 | 0.496021 |
33f6437802050c6714775bee671200270d8f59cc | 3,336 | py | Python | bert_seq2seq/basic_bert.py | jackie930/bert-seq2seq-textsummary | e685f28b650667f24adc055ca7ca27c0b44d30bd | [
"Apache-2.0"
] | 5 | 2021-04-12T07:38:12.000Z | 2022-03-17T09:49:50.000Z | bert_seq2seq/basic_bert.py | jackie930/bert-seq2seq-textsummary | e685f28b650667f24adc055ca7ca27c0b44d30bd | [
"Apache-2.0"
] | null | null | null | bert_seq2seq/basic_bert.py | jackie930/bert-seq2seq-textsummary | e685f28b650667f24adc055ca7ca27c0b44d30bd | [
"Apache-2.0"
] | 2 | 2021-12-07T14:13:35.000Z | 2022-03-17T09:49:52.000Z |
import torch
import torch.nn as nn
class BasicBert(nn.Module):
def __init__(self):
super().__init__()
self.device = torch.device("cpu")
def load_pretrain_params(self, pretrain_model_path, keep_tokens=None):
checkpoint = torch.load(pretrain_model_path, map_location=self.device)
# 模型刚开始训练的时候, 需要载入预训练的BERT
checkpoint = {k: v for k, v in checkpoint.items()
if k[:4] == "bert" and "pooler" not in k}
if keep_tokens is not None:
## 说明精简词表了,embeedding层也要过滤下
embedding_weight_name = "bert.embeddings.word_embeddings.weight"
checkpoint[embedding_weight_name] = checkpoint[embedding_weight_name][keep_tokens]
self.load_state_dict(checkpoint, strict=False)
torch.cuda.empty_cache()
print("{} loaded!".format(pretrain_model_path))
def load_all_params(self, model_path, device="cuda"):
checkpoint = torch.load(model_path, map_location=device)
self.load_state_dict(checkpoint)
torch.cuda.empty_cache()
print(str(model_path) + " loaded!")
def forward(self, x):
raise NotImplemented
def set_device(self, device):
self.device = torch.device(device)
self.to(device)
def save_all_params(self, save_path):
torch.save(self.state_dict(), save_path)
class BasicGPT(nn.Module):
def __init__(self):
super().__init__()
self.device = torch.device("cpu")
def load_pretrain_params(self, pretrain_model_path):
checkpoint = torch.load(pretrain_model_path, map_location=self.device)
checkpoint = {"model." + k: v for k, v in checkpoint.items()}
self.load_state_dict(checkpoint, strict=True)
torch.cuda.empty_cache()
print("{} loaded!".format(pretrain_model_path))
def load_all_params(self, model_path, device="cuda"):
checkpoint = torch.load(model_path, map_location=device)
self.load_state_dict(checkpoint)
torch.cuda.empty_cache()
print(str(model_path) + " loaded!")
def forward(self, x):
raise NotImplemented
def set_device(self, device):
self.device = torch.device(device)
self.to(device)
def save_all_params(self, save_path):
torch.save(self.state_dict(), save_path)
class BasicT5(nn.Module):
def __init__(self):
super().__init__()
self.device = torch.device("cpu")
def load_pretrain_params(self, pretrain_model_path):
checkpoint = torch.load(pretrain_model_path, map_location=self.device)
checkpoint = {"model." + k: v for k, v in checkpoint.items()}
self.load_state_dict(checkpoint, strict=True)
torch.cuda.empty_cache()
print("{} loaded!".format(pretrain_model_path))
def load_all_params(self, model_path, device="cuda"):
checkpoint = torch.load(model_path, map_location=device)
self.load_state_dict(checkpoint)
torch.cuda.empty_cache()
print(str(model_path) + " loaded!")
def forward(self, x):
raise NotImplemented
def set_device(self, device):
self.device = torch.device(device)
self.to(device)
def save_all_params(self, save_path):
torch.save(self.state_dict(), save_path)
| 33.36 | 94 | 0.647482 |
c7e3c8c0276185fec1ce7dd7dadd792a83c2b690 | 1,327 | py | Python | homeassistant/components/damper5/__init__.py | magicmatt007/core | a162ccb3f961f98a8bb40e57b162ce3dbc42922a | [
"Apache-2.0"
] | 1 | 2020-09-14T19:44:47.000Z | 2020-09-14T19:44:47.000Z | homeassistant/components/damper5/__init__.py | magicmatt007/core | a162ccb3f961f98a8bb40e57b162ce3dbc42922a | [
"Apache-2.0"
] | null | null | null | homeassistant/components/damper5/__init__.py | magicmatt007/core | a162ccb3f961f98a8bb40e57b162ce3dbc42922a | [
"Apache-2.0"
] | null | null | null | """The damper5a integration."""
import asyncio
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import DOMAIN
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({})}, extra=vol.ALLOW_EXTRA)
# TODO List the platforms that you want to support.
# For your initial PR, limit it to 1 platform.
PLATFORMS = ["light"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the damper5a component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up damper5a from a config entry."""
# TODO Store an API object for your platforms to access
# hass.data[DOMAIN][entry.entry_id] = MyApi(...)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| 26.54 | 80 | 0.68425 |
dcec5c5a013fe73fc268a5dbc5386c1089e3ed91 | 82 | py | Python | CodingBat/Warmup-1/parrot_trouble.py | arthxvr/coding--python | 1e91707be6cb8fef816dad0c1a65f2cc3327357e | [
"MIT"
] | null | null | null | CodingBat/Warmup-1/parrot_trouble.py | arthxvr/coding--python | 1e91707be6cb8fef816dad0c1a65f2cc3327357e | [
"MIT"
] | null | null | null | CodingBat/Warmup-1/parrot_trouble.py | arthxvr/coding--python | 1e91707be6cb8fef816dad0c1a65f2cc3327357e | [
"MIT"
] | null | null | null | def parrot_trouble(talking, hour):
return talking and (hour < 7 or hour > 20)
| 27.333333 | 46 | 0.695122 |
43e3bbdb6dc3eca28ffbdf6f8aecd779d3ef22a5 | 921 | py | Python | src/hicolor.py | tateishi/py-urwid-test | f5545840a5320633df23a48c03a6422d6e965e6c | [
"MIT"
] | null | null | null | src/hicolor.py | tateishi/py-urwid-test | f5545840a5320633df23a48c03a6422d6e965e6c | [
"MIT"
] | null | null | null | src/hicolor.py | tateishi/py-urwid-test | f5545840a5320633df23a48c03a6422d6e965e6c | [
"MIT"
] | null | null | null | import urwid
def exit_on_q(key):
if key in 'qQ':
raise urwid.ExitMainLoop()
palette = [
('banner', '', '', '', '#ffa', '#60d'),
('streak', '', '', '', 'g50', '#60a'),
('inside', '', '', '', 'g38','#808'),
('outside', '', '', '', 'g27', '#a06'),
('bg', '', '', '', 'g7', '#d06'),]
placeholder = urwid.SolidFill()
loop = urwid.MainLoop(placeholder, palette, unhandled_input=exit_on_q)
loop.screen.set_terminal_properties(colors=256)
loop.widget = urwid.AttrMap(placeholder, 'bg')
loop.widget.origianl_widget = urwid.Filler(urwid.Pile([]))
div = urwid.Divider()
outside = urwid.AttrMap(div, 'outside')
inside = urwid.AttrMap(div, 'inside')
txt = urwid.Text(('banner', 'Hello World '), align='center')
streak = urwid.AttrMap(txt, 'streak')
pile = loop.widget.base_widget
for item in [outside, inside, streak, inside, outside]:
pile.contents.append((item, pile.options()))
loop.run()
| 29.709677 | 70 | 0.617807 |
ae8c3353dbcc3a82c9b0eaac0a65f5c55bd1cb09 | 5,372 | py | Python | lib/carbon/storage.py | jblaine/carbon | d3903c5e1c9fb4c401161d380a432f58cfda9d20 | [
"Apache-2.0"
] | null | null | null | lib/carbon/storage.py | jblaine/carbon | d3903c5e1c9fb4c401161d380a432f58cfda9d20 | [
"Apache-2.0"
] | null | null | null | lib/carbon/storage.py | jblaine/carbon | d3903c5e1c9fb4c401161d380a432f58cfda9d20 | [
"Apache-2.0"
] | null | null | null | """Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os, re
import whisper
from os.path import join, exists, sep
from carbon.conf import OrderedConfigParser, settings
from carbon.util import pickle
from carbon import log
STORAGE_SCHEMAS_CONFIG = join(settings.CONF_DIR, 'storage-schemas.conf')
STORAGE_AGGREGATION_CONFIG = join(settings.CONF_DIR, 'storage-aggregation.conf')
STORAGE_LISTS_DIR = join(settings.CONF_DIR, 'lists')
def getFilesystemPath(metric):
metric_path = metric.replace('.',sep).lstrip(sep) + '.wsp'
return join(settings.LOCAL_DATA_DIR, metric_path)
class Schema:
def test(self, metric):
raise NotImplementedError()
def matches(self, metric):
return bool( self.test(metric) )
class DefaultSchema(Schema):
def __init__(self, name, archives):
self.name = name
self.archives = archives
def test(self, metric):
return True
class PatternSchema(Schema):
def __init__(self, name, pattern, archives):
self.name = name
self.pattern = pattern
self.regex = re.compile(pattern)
self.archives = archives
def test(self, metric):
return self.regex.search(metric)
class ListSchema(Schema):
def __init__(self, name, listName, archives):
self.name = name
self.listName = listName
self.archives = archives
self.path = join(settings.WHITELISTS_DIR, listName)
if exists(self.path):
self.mtime = os.stat(self.path).st_mtime
fh = open(self.path, 'rb')
self.members = pickle.load(fh)
fh.close()
else:
self.mtime = 0
self.members = frozenset()
def test(self, metric):
if exists(self.path):
current_mtime = os.stat(self.path).st_mtime
if current_mtime > self.mtime:
self.mtime = current_mtime
fh = open(self.path, 'rb')
self.members = pickle.load(fh)
fh.close()
return metric in self.members
class Archive:
def __init__(self,secondsPerPoint,points):
self.secondsPerPoint = int(secondsPerPoint)
self.points = int(points)
def __str__(self):
return "Archive = (Seconds per point: %d, Datapoints to save: %d)" % (self.secondsPerPoint, self.points)
def getTuple(self):
return (self.secondsPerPoint,self.points)
@staticmethod
def fromString(retentionDef):
(secondsPerPoint, points) = whisper.parseRetentionDef(retentionDef)
return Archive(secondsPerPoint, points)
def loadStorageSchemas():
schemaList = []
config = OrderedConfigParser()
config.read(STORAGE_SCHEMAS_CONFIG)
for section in config.sections():
options = dict( config.items(section) )
matchAll = options.get('match-all')
pattern = options.get('pattern')
listName = options.get('list')
retentions = options['retentions'].split(',')
archives = [ Archive.fromString(s) for s in retentions ]
if matchAll:
mySchema = DefaultSchema(section, archives)
elif pattern:
mySchema = PatternSchema(section, pattern, archives)
elif listName:
mySchema = ListSchema(section, listName, archives)
archiveList = [a.getTuple() for a in archives]
try:
whisper.validateArchiveList(archiveList)
schemaList.append(mySchema)
except whisper.InvalidConfiguration, e:
log.msg("Invalid schemas found in %s: %s" % (section, e) )
schemaList.append(defaultSchema)
return schemaList
def loadAggregationSchemas():
# NOTE: This abuses the Schema classes above, and should probably be refactored.
schemaList = []
config = OrderedConfigParser()
try:
config.read(STORAGE_AGGREGATION_CONFIG)
except IOError:
log.msg("%s not found, ignoring." % STORAGE_AGGREGATION_CONFIG)
for section in config.sections():
options = dict( config.items(section) )
matchAll = options.get('match-all')
pattern = options.get('pattern')
listName = options.get('list')
xFilesFactor = options.get('xfilesfactor')
aggregationMethod = options.get('aggregationmethod')
try:
if xFilesFactor is not None:
xFilesFactor = float(xFilesFactor)
assert 0 <= xFilesFactor <= 1
if aggregationMethod is not None:
assert aggregationMethod in whisper.aggregationMethods
except:
log.msg("Invalid schemas found in %s." % section )
continue
archives = (xFilesFactor, aggregationMethod)
if matchAll:
mySchema = DefaultSchema(section, archives)
elif pattern:
mySchema = PatternSchema(section, pattern, archives)
elif listName:
mySchema = ListSchema(section, listName, archives)
schemaList.append(mySchema)
schemaList.append(defaultAggregation)
return schemaList
defaultArchive = Archive(60, 60 * 24 * 7) #default retention for unclassified data (7 days of minutely data)
defaultSchema = DefaultSchema('default', [defaultArchive])
defaultAggregation = DefaultSchema('default', (None, None))
| 27.548718 | 109 | 0.708116 |
a48664e32869d0ddd095f1e2b856f8331bb78054 | 23,925 | py | Python | equideepdmri/layers/layer_builders.py | philip-mueller/equivariant-deep-dmri | 4391de51eacea045783742bfd2ca2f1e2f7e964e | [
"MIT"
] | 13 | 2021-02-19T06:08:53.000Z | 2022-02-21T10:30:05.000Z | equideepdmri/layers/layer_builders.py | philip-mueller/equivariant-deep-dmri | 4391de51eacea045783742bfd2ca2f1e2f7e964e | [
"MIT"
] | 3 | 2021-03-25T14:18:40.000Z | 2021-11-05T12:32:57.000Z | equideepdmri/layers/layer_builders.py | philip-mueller/equivariant-deep-dmri | 4391de51eacea045783742bfd2ca2f1e2f7e964e | [
"MIT"
] | 5 | 2021-04-23T12:59:00.000Z | 2021-12-26T21:09:37.000Z | from collections import OrderedDict
from functools import partial
from typing import Union, List
import torch
from torch import nn
from e3nn.non_linearities.rescaled_act import sigmoid
from e3nn.non_linearities.gated_block import GatedBlock
from equideepdmri.layers.EquivariantPQLayer import EquivariantPQLayer
from equideepdmri.layers.BatchNormalization import BatchNorm
from equideepdmri.layers.QLengthWeightedPool import QLengthWeightedAvgPool
from equideepdmri.utils.q_space import Q_SamplingSchema
from equideepdmri.utils.spherical_tensor import SphericalTensorType
from equideepdmri.layers.filter.utils import get_scalar_non_linearity
def build_pq_layer(type_in: Union[SphericalTensorType, List[int]],
type_out: Union[SphericalTensorType, List[int]],
p_kernel_size: int,
kernel: str,
q_sampling_schema_in: Union[Q_SamplingSchema, torch.Tensor, List, None],
q_sampling_schema_out: Union[Q_SamplingSchema, torch.Tensor, List, None],
non_linearity_config=None,
use_non_linearity=True,
batch_norm_config=None,
use_batch_norm=True,
transposed=False,
auto_recompute=True,
**kernel_kwargs) -> nn.Module:
"""
Builds a pq-layer consisting of an EquivariantPQLayer followed by a nonlinearity (e.g. gated nonlinearity).
:param type_in: The spherical tensor type of the input feature map.
This defines how many channels of each tensor order the input feature map has.
It can either be given as SphericalTensorType object or as List[int]] the element at index i of the list
defines the number of order-i channels,
e.g. the first element defines the number of order-0 (scalar) channels
and the second the number of order-1 (vector) channels and so on.
For all orders corresponding to out-of-range indices the number of channels is 0.
:param type_out: The spherical tensor type of the output feature map (after non-linearity).
This defines how many channels of each tensor order the output feature map has.
It can either be given as SphericalTensorType object or as List[int]] the element at index i of the list
defines the number of order-i channels,
e.g. the first element defines the number of order-0 (scalar) channels
and the second the number of order-1 (vector) channels and so on.
For all orders corresponding to out-of-range indices the number of channels is 0.
:param p_kernel_size: Size of the kernel in p-space.
Note that the kernel always covers the whole q-space (as it is not translationally equivariant),
so there is no q_kernel_size.
:param kernel: Which filter basis to use in the EquivariantPQLayer layer.
Valid options are:
- "p_space": to use the p-space filter basis
using only p-space coordinate offsets in the angular and radial part.
- "q_space": to use the q-space filter basis
using only q-space coordinate offsets in the angular part
and q-space coordinates from input and output in the radial part.
- "pq_diff": to use the pq-diff filter basis
using difference between p- and q-space coordinate offsets in the angular part
and p-space coordinate offsets, q-space coordinates from input and output in the radial part.
- "pq_TP": to use the TP (tensor product) filter basis
using the tensor product of the p- and q-space filters in the angular part
and p-space coordinate offsets, q-space coordinates from input and output in the radial part.
- "sum(<filters>)": where <filters> is a ";"-separated list (without spaces) of valid options for kernel_definition,
e.g. "sum(pq_diff;p_space)" or "sum(pq_diff;q_space)". This uses the sum of the named basis filters.
- "concat(<filters>)" where <filters> is a ";"-separated list (without spaces) of strings "<output_channels>:<filter_type>"
where <output_channels> lists the channels of each order where the named filter is to be used
(e.g. "[3, 4]" to use it for 3 scalar and 4 vector output channelw) and
<filter_type> names a valid kernel_definition to use for these output channels.
The number of all concatenated channels needs to math type_out.
Example: "concat([3,4]:pq_diff,[5,2,1]:p_space)" which would require type_out = [8,6,1]
:param q_sampling_schema_in: The q-sampling schema of input feature map.
The q-sampling schema may either be given as a Q_SamplingSchema object,
a Tensor of size (Q_in, 3) or a list of length Q_in (one element for each vector) of lists of size 3 of floats.
Note that Q_in is not explicitly given but derived form the length of this parameter.
If this is None (default) then the input does not have q-space but only p-space.
:param q_sampling_schema_out: The q-sampling schema of output feature map.
The q-sampling schema may either be given as a Q_SamplingSchema object,
a Tensor of size (Q_out, 3) or a list of length Q_out (one element for each vector) of lists of size 3 of floats.
Note that Q_out is not explicitly given but derived form the length of this parameter.
If this is None (default) then the output does not have q-space but only p-space.
:param non_linearity_config: Dict with the following optional keys:
- tensor_non_lin: The nonlinearity to use for channels with l>0 (non-scalar channels).
Default (and currently only option) is "gated".
- scalar_non_lin: The nonlinearity to use for channles with l=0 (scalar channels).
Valid options are "swish" and "relu".
Default is "swish".
:param use_non_linearity: Whether to use a nonlinearity.
:param batch_norm_config: Dict with the following optional keys:
- eps: avoid division by zero when we normalize by the variance
- momentum: momentum of the running average
- affine: do we have weight and bias parameters
- reduce: method to contract over the spacial dimensions
:param use_batch_norm: Whether to use a batch normalization
:param transposed: Whether to perform a transposed convolution using the equivariant kernel
:param auto_recompute: Whether to automatically recompute the kernel in each forward pass.
By default it is recomputed each time.
If this parameter is set to false, it is not recomputed and the method recompute() needs to be called
explicitly after parameters of this nn.Module have been updated.
:param kernel_selection_rule: Rule defining which angular filter orders (l_filter) to use
for a paths form input orders l_in to output orders l_out.
Defaults to using all possible filter orders,
i.e. all l_filter with \|l_in - l_out\| <= l_filter <= l_in + l_out.
Options are:
- dict with key "lmax" and int value which additionally defines a maximum l_filter.
- dict with int-pairs as keys and list of ints as values that defines
for each pair of l_in and l_out the list of l_filter to use.
E.g. {(0,0): [0], (1,1): [0,1], (0,1): [1]}
:param p_radial_basis_type: The radial basis function type used for p-space.
Valid options are "gaussian" (default), "cosine", "bessel".
Note that this parameter is ignored if there is no basis filter using p-space.
:param p_radial_basis_params: A (optional) dict of additional parameters for the radial basis function used for p-space.
Valid keys in this dict are:
- num_layers: Number of layers in the FC applied to the radial basis function.
If num_layers = 0 (default) then no FC is applied to the radial basis function.
- num_units: Number of units (neurons) in each of the layer in the FC applied to the radial basis function.
No default, this parameter is required and must be >0 if num_layers > 0.
- activation_function: activation function used in the FC applied to the radial basis function,
valid are "relu" (default) or "swish"
Note that this parameter is ignored if there is no basis filter using p-space.
:param q_radial_basis_type: The radial basis function type used for q-space (q-in and q-out).
Valid options are "gaussian" (default), "cosine", "bessel".
Note that this parameter is ignored if there is no basis filter using q-space.
:param q_out_radial_basis_type: The radial basis function type used for q-out (q-space of output feature map).
See q_radial_basis_type but only for q-out.
Defaults to q_radial_basis_type.
:param q_in_radial_basis_type: The radial basis function type used for q-in (q-space of input feature map).
See q_radial_basis_type but only for q-in.
Defaults to q_radial_basis_type.
:param q_radial_basis_params: A (optional) dict of additional parameters for the radial basis function used for q-space.
Valid keys in this dict are:
- num_layers: Number of layers in the FC applied to the radial basis function.
If num_layers = 0 (default) then no FC is applied to the radial basis function.
- num_units: Number of units (neurons) in each of the layer in the FC applied to the radial basis function.
No default, this parameter is required and must be >0 if num_layers > 0.
- activation_function: activation function used in the FC applied to the radial basis function,
valid are "relu" (default) or "swish"
Note that this parameter is ignored if there is no basis filter using q-space.
:param q_out_radial_basis_params: A dict of additional parameters for the radial basis function used for q-out (q-space of output feature map).
See q_radial_basis_params but only for q-out.
Defaults to q_radial_basis_params.
:param q_in_radial_basis_params: A dict of additional parameters for the radial basis function used for q-in (q-space of input feature map).
See q_radial_basis_params but only for q-in.
Defaults to q_radial_basis_params.
:param sub_kernel_selection_rule:
Rule defining for the TP filter which pairs of l_p and l_q to use for each l_filter.
Defaults to "TP\pm 1".
Options are:
- dict with string keys: defines some constraints which combinations to use.
The following constraint always holds:
\|l_p - l_q\| <= l_filter <= l_p + l_q
Additionally constraints can be defined by the following keys in the dict:
- "l_diff_to_out_max": Maximum difference between l_p and l_filter as well as l_q and l_filter.
Default to 1 (as in "TP\pm 1")
- "l_max" (optional): Maximum value for l_p and l_q.
- "l_in_diff_max" (optional): Maximum difference between l_p and l_q.
- dict with ints as keys and list of int-pairs as values that defines
for each l_filter the used pairs of l_p and l_q.
E.g. {0: [(0, 0), (1, 1)], 1: [(0, 1), (1, 0), (1, 1)]}
Note that this parameter is ignored if no TP-filter basis is used.
For additional parameters see EquivariantPQLayer.
"""
type_in = SphericalTensorType.from_multiplicities_or_type(type_in)
type_out = SphericalTensorType.from_multiplicities_or_type(type_out)
if batch_norm_config is None:
batch_norm_config = {}
if non_linearity_config is None:
non_linearity_config = {}
if use_non_linearity:
type_non_lin_in, non_linearity = build_non_linearity(type_out, **non_linearity_config)
conv = EquivariantPQLayer(type_in, type_non_lin_in,
kernel_definition=kernel,
p_kernel_size=p_kernel_size,
q_sampling_schema_in=q_sampling_schema_in,
q_sampling_schema_out=q_sampling_schema_out,
transposed=transposed,
auto_recompute_kernel=auto_recompute,
**kernel_kwargs)
if use_batch_norm:
batch_norm = BatchNorm(type_non_lin_in.Rs, **batch_norm_config)
return nn.Sequential(
OrderedDict([('conv', conv), ('batch_norm', batch_norm), ('non_linearity', non_linearity)]))
else:
return nn.Sequential(OrderedDict([('conv', conv), ('non_linearity', non_linearity)]))
else:
conv = EquivariantPQLayer(type_in, type_out,
kernel_definition=kernel,
p_kernel_size=p_kernel_size,
q_sampling_schema_in=q_sampling_schema_in,
q_sampling_schema_out=q_sampling_schema_out,
transposed=transposed,
auto_recompute_kernel=auto_recompute,
**kernel_kwargs)
if use_batch_norm:
batch_norm = BatchNorm(type_out.Rs, **batch_norm_config)
return nn.Sequential(OrderedDict([('conv', conv), ('batch_norm', batch_norm)]))
else:
return conv
def build_p_layer(type_in: Union[SphericalTensorType, List[int]],
type_out: Union[SphericalTensorType, List[int]],
kernel_size: int,
non_linearity_config=None,
use_non_linearity=True,
batch_norm_config=None,
use_batch_norm=True,
transposed=False,
auto_recompute=True,
**kernel_kwargs):
"""
Builds a p-layer consisting of an EquivariantPLayer followed by a nonlinearity (e.g. gated nonlinearity).
:param type_in: The spherical tensor type of the input feature map.
This defines how many channels of each tensor order the input feature map has.
It can either be given as SphericalTensorType object or as List[int]] the element at index i of the list
defines the number of order-i channels,
e.g. the first element defines the number of order-0 (scalar) channels
and the second the number of order-1 (vector) channels and so on.
For all orders corresponding to out-of-range indices the number of channels is 0.
:param type_out: The spherical tensor type of the output feature map (after non-linearity).
This defines how many channels of each tensor order the output feature map has.
It can either be given as SphericalTensorType object or as List[int]] the element at index i of the list
defines the number of order-i channels,
e.g. the first element defines the number of order-0 (scalar) channels
and the second the number of order-1 (vector) channels and so on.
For all orders corresponding to out-of-range indices the number of channels is 0.
:param p_kernel_size: Size of the kernel in p-space.
Note that the kernel always covers the whole q-space (as it is not translationally equivariant),
so there is no q_kernel_size.
:param non_linearity_config: Dict with the following optional keys:
- tensor_non_lin: The nonlinearity to use for channels with l>0 (non-scalar channels).
Default (and currently only option) is "gated".
- scalar_non_lin: The nonlinearity to use for channles with l=0 (scalar channels).
Valid options are "swish" and "relu".
Default is "swish".
:param use_non_linearity: Whether to use a nonlinearity.
:param batch_norm_config: Dict with the following optional keys:
- eps: avoid division by zero when we normalize by the variance
- momentum: momentum of the running average
- affine: do we have weight and bias parameters
- reduce: method to contract over the spacial dimensions
:param use_batch_norm: Whether to use a batch normalization
:param transposed: Whether to perform a transposed convolution using the equivariant kernel
:param auto_recompute: Whether to automatically recompute the kernel in each forward pass.
By default it is recomputed each time.
If this parameter is set to false, it is not recomputed and the method recompute() needs to be called
explicitly after parameters of this nn.Module have been updated.
:param kernel_selection_rule: Rule defining which angular filter orders (l_filter) to use
for a paths form input orders l_in to output orders l_out.
Defaults to using all possible filter orders,
i.e. all l_filter with \|l_in - l_out\| <= l_filter <= l_in + l_out.
Options are:
- dict with key "lmax" and int value which additionally defines a maximum l_filter.
- dict with int-pairs as keys and list of ints as values that defines
for each pair of l_in and l_out the list of l_filter to use.
E.g. {(0,0): [0], (1,1): [0,1], (0,1): [1]}
:param p_radial_basis_type: The radial basis function type used for p-space.
Valid options are "gaussian" (default), "cosine", "bessel".
Note that this parameter is ignored if there is no basis filter using p-space.
:param p_radial_basis_params: A (optional) dict of additional parameters for the radial basis function used for p-space.
Valid keys in this dict are:
- num_layers: Number of layers in the FC applied to the radial basis function.
If num_layers = 0 (default) then no FC is applied to the radial basis function.
- num_units: Number of units (neurons) in each of the layer in the FC applied to the radial basis function.
No default, this parameter is required and must be >0 if num_layers > 0.
- activation_function: activation function used in the FC applied to the radial basis function,
valid are "relu" (default) or "swish"
Note that this parameter is ignored if there is no basis filter using p-space.
For additional parameters see EquivariantPLayer.
"""
return build_pq_layer(type_in, type_out, kernel_size,
kernel='p_space',
q_sampling_schema_in=None, q_sampling_schema_out=None,
non_linearity_config=non_linearity_config,
use_non_linearity=use_non_linearity,
batch_norm_config=batch_norm_config,
use_batch_norm=use_batch_norm,
transposed=transposed,
auto_recompute=auto_recompute,
**kernel_kwargs)
def build_q_reduction_layer(type_in: Union[SphericalTensorType, List[int]], q_sampling_schema_in: Q_SamplingSchema,
reduction='length_weighted_average',
auto_recompute=True,
**kwargs):
"""
Builds a q-reduction layer to globally reduce q-space leaving only p-space.
:param type_in: The spherical tensor type of the input feature map.
This defines how many channels of each tensor order the input feature map has.
It can either be given as SphericalTensorType object or as List[int]] the element at index i of the list
defines the number of order-i channels,
e.g. the first element defines the number of order-0 (scalar) channels
and the second the number of order-1 (vector) channels and so on.
For all orders corresponding to out-of-range indices the number of channels is 0.
:param q_sampling_schema_in: The q-sampling schema of input feature map.
The q-sampling schema may either be given as a Q_SamplingSchema object,
a Tensor of size (Q_in, 3) or a list of length Q_in (one element for each vector) of lists of size 3 of floats.
Note that Q_in is not explicitly given but derived form the length of this parameter.
If this is None (default) then the input does not have q-space but only p-space.
:param reduction: The type of reduction to use. Valid options are:
- length_weighted_average: To use QLengthWeightedAvgPool (global length-weighted avg-pooling over q-space)
For additional parameters in param kwargs see QLengthWeightedAvgPool.
- mean: To use global avg-pooling over q-space.
- conv: To use an EquivariantPQLayer (and gated nonlinearity) without output q-space.
For additional parameters in param kwargs see build_pq_layer
(except the params type_out, q_sampling_schema_out).
:param auto_recompute: Whether to automatically recompute the kernels in each forward pass.
:return (reduction_layer, type_out):
- reduction_layer: The created q-reduction layer (nn.Module)
- type_out: The spherical tensor type of the output feature map.
"""
type_in = SphericalTensorType.from_multiplicities_or_type(type_in)
if reduction == 'length_weighted_average':
return QLengthWeightedAvgPool(type_in, q_sampling_schema_in,
auto_recompute=auto_recompute, **kwargs), type_in
elif reduction == 'mean':
return partial(torch.mean, dim=2), type_in
elif reduction == 'conv':
type_out = SphericalTensorType.from_multiplicities_or_type(kwargs.pop('type_out', type_in))
return build_pq_layer(type_in, type_out,
q_sampling_schema_in=q_sampling_schema_in,
q_sampling_schema_out=None,
**kwargs), type_out
else:
raise ValueError(f'q-reduction "{reduction}" not supported.')
def build_non_linearity(type_out: SphericalTensorType, tensor_non_lin='gated', scalar_non_lin='swish') -> (
SphericalTensorType, nn.Module):
"""
Builds a nonlinearity for spherical tensor feature maps.
Currently only the gated nonlinearity is supported.
:param type_out: The spherical tensor type of the output feature map (after non-linearity).
This defines how many channels of each tensor order the output feature map has.
It can either be given as SphericalTensorType object or as List[int]] the element at index i of the list
defines the number of order-i channels,
e.g. the first element defines the number of order-0 (scalar) channels
and the second the number of order-1 (vector) channels and so on.
For all orders corresponding to out-of-range indices the number of channels is 0.
:param tensor_non_lin: The nonlinearity to use for channels with l>0 (non-scalar channels).
Default (and currently only option) is "gated".
:param scalar_non_lin: The nonlinearity to use for channles with l=0 (scalar channels).
Valid options are "swish" and "relu".
Default is "swish".
:return (type_in, nonlinearity):
- type_in: The expected spherical tensor type of the input feature map.
- nonlinearity: the nonlinearity (as nn.Module) which accepts the input feature map.
"""
type_out = SphericalTensorType.from_multiplicities_or_type(type_out)
if tensor_non_lin == 'gated':
scalar_non_lin = get_scalar_non_linearity(scalar_non_lin)
non_lin = GatedBlockNonLin(type_out.Rs, scalar_non_lin, sigmoid)
return SphericalTensorType.from_Rs(non_lin.Rs_in), non_lin
else:
raise ValueError(f'Tensor Non-linearity "{tensor_non_lin}" not supported.')
class GatedBlockNonLin(GatedBlock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, x):
x = super(GatedBlockNonLin, self).forward(x, dim=1)
return x
| 59.962406 | 147 | 0.680418 |
ee255cc669db8c6a4b11e2b11078561f883e28e7 | 758 | py | Python | mylogging.py | PavelPu/PyIOT | 8d970e2106c6befa5e076096167faea489edd17c | [
"MIT"
] | null | null | null | mylogging.py | PavelPu/PyIOT | 8d970e2106c6befa5e076096167faea489edd17c | [
"MIT"
] | null | null | null | mylogging.py | PavelPu/PyIOT | 8d970e2106c6befa5e076096167faea489edd17c | [
"MIT"
] | null | null | null | import time
class Logging:
def logStateString(self, logStateString):
self._dt = time.strftime("%d %b %Y", time.localtime(time.time()))
self._logname = '/home/pi/PyIOT/logs/json/' + self._dt +'_log.txt'
self._stateLogFile = open(self._logname, 'a')
self._stateLogFile.write(logStateString + '\n')
self._stateLogFile.close()
def __init__(self):
self._launchDate = time.strftime("%d %b %Y", time.localtime(time.time()))
self._appLogFileName = '/home/pi/PyIOT/logs/applog/' + self._launchDate +'_applog.txt'
self._logfile = open(self._appLogFileName, 'a')
self._logfile.write(time.asctime( time.localtime(time.time())) + '\t' + 'App started\n')
self._logfile.close()
| 34.454545 | 96 | 0.637203 |
9d6b6d7eca8819bb43dc7f73e3226a01f187c5d5 | 265 | py | Python | tests/artificial/transf_Anscombe/trend_MovingMedian/cycle_7/ar_/test_artificial_128_Anscombe_MovingMedian_7__0.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/artificial/transf_Anscombe/trend_MovingMedian/cycle_7/ar_/test_artificial_128_Anscombe_MovingMedian_7__0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/artificial/transf_Anscombe/trend_MovingMedian/cycle_7/ar_/test_artificial_128_Anscombe_MovingMedian_7__0.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 7, transform = "Anscombe", sigma = 0.0, exog_count = 0, ar_order = 0); | 37.857143 | 165 | 0.732075 |
6ebfcf92d10f3189711ad2188630296080e73709 | 8,676 | py | Python | stable_baselines/plot.py | shanlior/OAL | 39c9eb24f64a27d3da09e92b6da9bf60326baabe | [
"MIT"
] | 3 | 2021-04-08T12:49:16.000Z | 2022-03-11T00:53:47.000Z | stable_baselines/plot.py | shanlior/OAL | 39c9eb24f64a27d3da09e92b6da9bf60326baabe | [
"MIT"
] | null | null | null | stable_baselines/plot.py | shanlior/OAL | 39c9eb24f64a27d3da09e92b6da9bf60326baabe | [
"MIT"
] | null | null | null | # DEPRECATED, use baselines.common.plot_util instead
on_policy = False
import os
import matplotlib.pyplot as plt
import numpy as np
import json
import seaborn as sns
sns.set()
import glob2
import argparse
import visdom;
vis = visdom.Visdom()
from PIL import Image
sns.set_style("whitegrid")
sns.set_context("paper")
# sns.set(rc={'figure.figsize':(200, 200)})
def smooth_reward_curve(x, y):
halfwidth = int(np.ceil(len(x) / 60)) # Halfwidth of our smoothing convolution
k = halfwidth
xsmoo = x
ysmoo = np.convolve(y, np.ones(2 * k + 1), mode='same') / np.convolve(np.ones_like(y), np.ones(2 * k + 1),
mode='same')
return xsmoo, ysmoo
def load_results(file):
if not os.path.exists(file):
return None
with open(file, 'r') as f:
lines = [line for line in f]
if len(lines) < 2:
return None
keys = [name.strip() for name in lines[0].split(',')]
print("keys", keys, file)
data = np.genfromtxt(file, delimiter=',', skip_header=1, filling_values=0.)
if data.ndim == 1:
data = data.reshape(1, -1)
assert data.ndim == 2
assert data.shape[-1] == len(keys)
result = {}
for idx, key in enumerate(keys):
result[key] = data[:, idx]
return result
def pad(xs, value=np.nan):
maxlen = np.max([len(x) for x in xs])
padded_xs = []
for x in xs:
if x.shape[0] >= maxlen:
padded_xs.append(x)
else:
padding = np.ones((maxlen - x.shape[0],) + x.shape[1:]) * value
x_padded = np.concatenate([x, padding], axis=0)
assert x_padded.shape[1:] == x.shape[1:]
assert x_padded.shape[0] == maxlen
padded_xs.append(x_padded)
return np.array(padded_xs)
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str)
parser.add_argument('--smooth', type=int, default=1)
# parser.add_argument('--name', type=str, default='None')
args = parser.parse_args()
off_policy = not on_policy
if on_policy:
method = 'on_policy'
else:
method = 'off_policy'
# Load all data.
data = {}
paths = [os.path.abspath(os.path.join(path, '..')) for path in glob2.glob(os.path.join(args.dir, method, '**', 'progress.csv'))]
for curr_path in paths:
if not os.path.isdir(curr_path):
continue
results = load_results(os.path.join(curr_path, 'progress.csv'))
if not results:
print('skipping {}'.format(curr_path))
continue
try:
rewards = np.array(results['ep_rewmean'])
steps = results['steps'] # np.arange(len(results['EpisodesSoFar'])) + 1
except:
rewards = np.array(results['EpTrueRewMean'])
try:
steps = results['TimestepsSoFar'] # np.arange(len(results['n_updates'])) + 1
except:
steps = results['steps'] # np.arange(len(results['EpisodesSoFar'])) + 1
env_algo = os.path.split(os.path.split(curr_path)[0])
env = os.path.split(env_algo[0])[1]
algo = env_algo[1] #.split("_")[0]
# Process and smooth data.
assert rewards.shape == steps.shape
x = steps
y = rewards
if args.smooth:
x, y = smooth_reward_curve(steps, rewards)
assert x.shape == y.shape
if env not in data:
data[env] = {}
if algo not in data[env]:
data[env][algo] = []
data[env][algo].append((x, y))
legend = True
if on_policy or off_policy:
color_id = {"mdal_linear": "#296E01FF", "mdal_neural": "#FF6F61FF", "gail": "#135DD8FF",
"mdal_trpo_linear": "#296E01FF", "mdal_trpo_neural": "#FF6F61FF", "gail_off_policy": "#135DD8FF"}
else:
color_id = {"mdal_linear": "#4F3466FF", "mdal_neural": "#FF6F61FF", "gail": "#FF0000FF",
"mdal_trpo_linear": "#296E01FF", "mdal_trpo_neural": "#FF30AAFF", "gail_off_policy": "#135DD8FF"}
# alg_names = {"mdal_linear": "GAL Linear", "mdal_neural": "GAL Neural", "gail": "GAIL",
# "mdal_trpo_linear": "GAL Linear TRPO", "mdal_trpo_neural": "GAL Neural TRPO", "gail_off_policy": "GAIL MDPO",
# "Expert": "Expert"}
alg_names = {"mdal_linear": "OAL Linear", "mdal_neural": "OAL Neural", "gail": "GAIL",
"mdal_trpo_linear": "OAL Linear", "mdal_trpo_neural": "OAL Neural", "gail_off_policy": "GAIL",
"Expert": "Expert"}
expert_rewards = {"walker2d": 3464, "hopper": 3053, "halfcheetah": 9052, "humanoid": 6494, "ant": 3238}
axes_order = {"walker2d": 0, "hopper": 1, "halfcheetah": 2, "humanoid": 3, "ant": 4}
uniform_legend = True
if uniform_legend:
fig, axs = plt.subplots(ncols=5, figsize=(16,3))
else:
fig, axs = plt.subplots(ncols=5, figsize=(16,6))
# Plot data.
for env_id in sorted(data.keys()):
print('exporting {}'.format(env_id))
# plt.clf()
# plt.xlim(-0.1, 3)
legend_entries = []
if env_id == "invertedpendulum":
continue
axes_id = axes_order[env_id]
ax = axs[axes_id]
# ax = axs[axes_id // 2][axes_id % 2]
x_max_total = 0
for algo in sorted(data[env_id].keys()):
legend_entries.append(algo)
xs, ys = zip(*data[env_id][algo])
# makes sure all trajectories are of the same length (comment out if not required)
min_len = np.min([l.shape[0] for l in xs])
xs, ys = [x[:min_len] for x in xs], [y[:min_len] for y in ys]
if on_policy and env_id == "humanoid":
xs, ys = [x[:4882] for x in xs], [y[:4882] for y in ys]
ax.set_xlim([0,3])
if off_policy and env_id == "ant":
xs, ys = [x[:1501] for x in xs], [y[:1501] for y in ys]
ax.set_xlim([0,3])
xs, ys = pad(xs), pad(ys)
assert xs.shape == ys.shape
x_max = np.max(xs) / 1e6
if x_max > x_max_total:
x_max_total = x_max
if on_policy and env_id == "humanoid":
x_max_total = 5
mean = np.mean(ys, axis=0) / expert_rewards[env_id]
std = np.nanstd(ys, axis=0)
nSeeds = ys.shape[0]
ci_coef = 1.96 / (np.sqrt(nSeeds) * expert_rewards[env_id])
# if algo == "mdal_neural" and env_id == "walker2d":
# entry = 630
# xs = xs[:,:entry]
# mean = mean[:entry]
# std = std[:entry]
if algo in color_id.keys():
color = color_id[algo]
else:
color = np.random.rand(3,)
ax.plot(xs[0] / 1e6, mean, label=algo, color=color)
ax.fill_between(xs[0] / 1e6, mean - ci_coef * std, mean + ci_coef * std, alpha=0.2, color=color)
expert_line = ax.hlines(1, 0, x_max_total, colors='k', linestyles='dashed')
ax.text(0.05, 1.01, format(expert_rewards[env_id]), fontsize=11, rotation_mode='anchor')
if not on_policy:
ax.set_title(env_id, fontsize=14)
if not off_policy:
ax.set_xlabel('Timesteps (1e6)', fontsize=11)
if axes_id == 0:
ax.set_ylabel('Mean Episode Reward', fontsize=11)
# ax.set_xticks(fontsize=11)
# ax.set_yticks(fontsize=11)
ax.tick_params(axis="x", labelsize=11)
ax.tick_params(axis="y", labelsize=11)
handles, labels = ax.get_legend_handles_labels()
handles += [expert_line]
labels += ['Expert']
# order = [0, 1, 2, 3, 4]
# legend = plt.legend([handles[idx] for idx in order],[labels[idx] for idx in order], ncol=5)
if not uniform_legend and legend:
legend = ax.legend(handles, labels, ncol=1, bbox_to_anchor=(0.5, -1.3), loc='lower center')
# legend = ax.legend(handles, labels, loc='lower center')
fig.subplots_adjust(bottom=0.5)
# env_fig.axes[0] = ax
# Uncomment for separate graphs
# import pickle
# p = pickle.dumps(ax)
# env_ax = pickle.loads(p)
# env_ax.change_geometry(1,1,1)
#
# env_fig = plt.figure(figsize=(13,8))
# env_fig._axstack.add(env_fig._make_key(env_ax), env_ax)
# env_fig.axes.append(env_ax)
# env_fig.tight_layout()
# plt.gca().set_aspect('equal', adjustable='box', anchor='NW')
# env_ax.legend(handles, labels, ncol=1, bbox_to_anchor=(0.5, -0.9), loc='lower center')
#
# env_fig.subplots_adjust(bottom=0.4)
# env_fig.savefig(os.path.join(args.dir, 'fig_{}.png'.format(env_id)))
if uniform_legend:
labels = [alg_names[label] for label in labels]
if legend:
# legend = ax.legend(handles, labels, ncol=7, bbox_to_anchor=(2.23, -0.5), loc='lower center', fontsize=11)
legend = axs[3].legend(handles, labels, ncol=1, loc='right', bbox_to_anchor=(1, 0.33), fontsize=9)
fig.subplots_adjust(bottom=0.3)
# fig.set_figwidth(14)
fig.savefig(os.path.join(args.dir, method, '{}.png'.format(method)), bbox_inches='tight')
| 35.412245 | 128 | 0.603389 |
9174285d3dd847cfbdbe743405f34de766a938ff | 6,746 | py | Python | tensorspark/example/spark_mnist.py | liangfengsid/tensoronspark | 76f0d7ed4a17879f6dfc1817ba8bd7c58459d803 | [
"Apache-2.0"
] | 21 | 2016-08-16T15:34:44.000Z | 2021-09-17T22:36:46.000Z | tensorspark/example/spark_mnist.py | liangfengsid/tensorspark | 76f0d7ed4a17879f6dfc1817ba8bd7c58459d803 | [
"Apache-2.0"
] | 7 | 2016-08-10T11:06:07.000Z | 2017-07-06T16:01:05.000Z | tensorspark/example/spark_mnist.py | liangfengsid/tensorspark | 76f0d7ed4a17879f6dfc1817ba8bd7c58459d803 | [
"Apache-2.0"
] | 11 | 2016-08-07T09:23:58.000Z | 2019-06-27T15:20:55.000Z | import numpy as np
import struct
import tensorflow as tf
import tensorflow.python.framework.dtypes as dtypes
import pyspark
import random
import tensorspark.core.spark_session as sps
import tensorspark.core.partitioner as par
import tensorspark.core.weight_combiner as comb
def extract_images(sc, filepath, dtype=dtypes.float32, reshape=True):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
def _extract_single_image_file(iterator):
file_image_map = []
while True:
try:
pathData = iterator.next()
filename = pathData[0]
fileindex = filename.split('-')[-2]
data = pathData[1]
image_meta = struct.unpack('>iiii', buffer(data, 0, 16))
magic = image_meta[0]
if magic !=2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, filename))
num_images = image_meta[1]
rows = image_meta[2]
cols = image_meta[3]
buf = buffer(data, 16)
images = np.frombuffer(buf, dtype=np.uint8)
images = images.reshape(num_images, rows, cols, 1)
# dt = np.dtype(np.uint32).newbyteorder('>')
# magic = np.fromstring(data[0:4], dt)[0]
# if magic !=2051:
# raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, filename))
# numImages = np.fromstring(data[4:8], dtype=dt)[0]
# rows = np.fromstring(data[8:12], dtype=dt)[0]
# cols = np.fromstring(data[12:16], dtype=dt)[0]
# images = np.fromstring(data[16:], dtype=np.uint8)
# images = images.reshape(numImages, rows, cols, 1)
if reshape == True:
images = images.reshape(num_images, rows * cols)
if dtype == dtypes.float32:
images = np.multiply(images, 1.0 / 255.0)
file_image_map.append((fileindex, images))
except StopIteration:
break
return file_image_map
#mnist_train(images)
# return [images]
# import struct
data_rdd = sc.binaryFiles(filepath)
image_rdd = data_rdd.mapPartitions(_extract_single_image_file)
return image_rdd
def extract_labels(sc, filepath, num_class, one_hot=False):
def _extract_single_label_file(iterator):
file_label_map = []
while True:
try:
pathData = iterator.next()
filename = pathData[0]
fileindex = filename.split('-')[-2]
data = pathData[1]
label_meta = struct.unpack('>ii', buffer(data, 0, 8))
magic = label_meta[0]
if magic !=2049:
raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, filename))
num_labels = label_meta[1]
buf = buffer(data, 8)
labels = np.frombuffer(buf, dtype=np.uint8)
if one_hot:
labels = dense_to_one_hot(labels, num_class)
# dt = np.dtype(np.uint32).newbyteorder('>')
# magic = np.fromstring(data[0:4], dt)[0]
# if magic != 2049:
# raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, filename))
# num_items = mp.fromstring(data[4:8], dt)[0]
# labels = np.fromstring(data[8:], dtype=np.uint8)
file_label_map.append((fileindex,labels))
except StopIteration:
break
return file_label_map
data_rdd = sc.binaryFiles(filepath)
label_rdd = data_rdd.mapPartitions(_extract_single_label_file)
return label_rdd
def dense_to_one_hot(labels, num_class):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels.shape[0]
index_offset = np.arange(num_labels) * num_class
labels_one_hot = np.zeros((num_labels, num_class))
labels_one_hot.flat[index_offset + labels.ravel()] = 1
return labels_one_hot
def flatten_image_label(iteration):
image_label_list = []
while True:
try:
file_image_label = iteration.next()
images = file_image_label[1][0]
labels = file_image_label[1][1]
assert (images.shape[0] == labels.shape[0])
for i in xrange(images.shape[0]):
image_label_list.append((images[i], labels[i]))
except StopIteration:
break
return image_label_list
data_dir = '/data/mnist/' # Should be some file on hdfs
train_image_path=data_dir+'train-images-idx1-ubyte'
train_label_path=data_dir+'train-labels-idx1-ubyte'
test_image_path=data_dir+'t10k-images-idx1-ubyte'
test_label_path=data_dir+'t10k-labels-idx1-ubyte'
class RandomPartitioner(object):
def __init__(self, num_partition):
self.num_partition = num_partition
def __eq__(self, other):
return (isinstance(other, RandomPartitioner) and self.num_partition == other.num_partition)
def __call__(self, k):
return random.randint(0, num_partition - 1)
def train(sc=None, user=None, name='spark_mnist', server_host='localhost', server_port=10080, sync_interval=100, batch_size=100, num_partition=1, num_epoch=1, server_reusable=True):
is_new_sc = False
if sc is None:
sc = pyspark.SparkContext(conf=pyspark.SparkConf())
is_new_sc = True
image_rdd = extract_images(sc, train_image_path)
label_rdd = extract_labels(sc, train_label_path, num_class=10, one_hot=True)
image_label_rdd = image_rdd.join(label_rdd, numPartitions=num_partition).mapPartitions(flatten_image_label).cache()
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
feed_name_list = [x.name, y_.name]
param_list = [W, b]
spark_sess = sps.SparkSession(sc, sess, user=user, name=name, server_host=server_host, server_port=server_port, sync_interval=sync_interval, batch_size=batch_size)
partitioner = par.RandomPartitioner(num_partition)
combiner = comb.DeltaWeightCombiner()
for i in range(num_epoch):
spark_sess.run(train_step, feed_rdd=image_label_rdd, feed_name_list=feed_name_list, param_list=param_list, weight_combiner=combiner, shuffle_within_partition=True, server_reusable=server_reusable)
if i != num_epoch-1:
temp_image_label_rdd = image_label_rdd.partitionBy(num_partition, partitioner).cache()
image_label_rdd.unpersist()
image_label_rdd = temp_image_label_rdd
# Since the parameter server is reusable in this spark_sess.run() example, one should stop the parameter server manually when it is no long used.
if server_reusable:
spark_sess.stop_param_server()
if is_new_sc:
sc.close()
from tensorflow.examples.tutorials.mnist import input_data
mnist_data = input_data.read_data_sets("MNIST_data/", one_hot=True)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist_data.test.images, y_: mnist_data.test.labels}))
spark_sess.close()
| 34.594872 | 198 | 0.728432 |
5e0bdcd3f0d3e2770623e5b9e275a65598fe4f03 | 6,746 | py | Python | demo.py | pranaysy/ETCPy | d08c50ae5e379ee11cc9d9eb076ae4319516314c | [
"Apache-2.0"
] | 10 | 2020-07-30T12:03:24.000Z | 2022-02-14T17:45:29.000Z | demo.py | HarikrishnanNB/ETCPy | afb476629c5cd858fdc8bcbbab23d74efe18c871 | [
"Apache-2.0"
] | null | null | null | demo.py | HarikrishnanNB/ETCPy | afb476629c5cd858fdc8bcbbab23d74efe18c871 | [
"Apache-2.0"
] | 2 | 2021-02-18T07:20:34.000Z | 2021-03-15T04:10:33.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This is a demo script for showcasing this package's functionality in brief.
@author: Pranay S. Yadav
"""
# Import call
import ETC
# ------------------------
# IO & SEQUENCE MANAGEMENT
# ------------------------
# Read data to a list
text = ETC.read(filepath="somefile.txt", delimiter=",") # Pick any file
# Check validity of input and automatically cast to the right form if valid
ETC.cast(text)
# Recode data to integers in lexicographic order
ETC.recode_lexical("bbacdbedf", case_sensitive=False)
# Partition real-valued data to integer-valued discrete data
ETC.partition([0.1, 0.34, 0.68, -1.9, 25.3], n_bins=2)
# Generate synthetic data from the discrete uniform distribution
ETC.generate(size=1000, partitions=4)
# Reproducibility of random generation can be controlled by passing the same seed value
ETC.generate(size=1000, partitions=4, seed=101)
# Compute Shannon Entropy for a sequence
ETC.entropy(seq=[1, 2, 1, 1, 1, 2, 1])
# ---------------------------------------
# 1D ETC ESTIMATION FOR A SINGLE SEQUENCE
# ---------------------------------------
# Generate a random discrete symbolic sequence
seq = ETC.generate(size=1000, partitions=2, seed=31)
# Simplest way to run
out = ETC.compute_1D(seq)
# The result is a dict of 2 key-value pairs: the raw and normalized ETC estimates
print(out)
# Get whichever is needed by using their respective keys
print(out.get('ETC1D'))
# [Out]: 225
print(out.get('NETC1D'))
# [Out]: 0.22522522522522523
# The normalization is done over one less than the overall length
print(out.get('ETC1D') / (len(seq) - 1))
# [Out]: 0.22522522522522523
# If more details about the trajectory are desired, set verbosity to True
out = ETC.compute_1D(seq, verbose=True)
# The result is now a dict of 3 elements: the 2 ETC estimates and the Trajectory
print(out.get('Trajectory')) # List of dicts - one dict for each step
# The default behavior is to truncate the iteration process until the sequence gets
# saturated to have all unique pairs occurring just once. This speeds up computation as
# the remaining steps don't need to be computed and ETC reduces to an analytic expression.
# However, the substitution table or features may be of interest and this truncation can
# then be turned off so that the iteration continues till entropy of 0 is reached:
out = ETC.compute_1D(seq, verbose=True, truncate=False)
print(out.get('Trajectory')) # Last step has length 1 and entropy 0
# This Trajectory can be saved to CSV for later use through a convenience function:
ETC.save(out.get('Trajectory'), filename="ETC_results.csv")
# -------------------------------------------------------------------------------------#
# Additionally, instead of pair-substitution (NSRPS), a window of any size may be
# substituted using the order switch, for example substitute triplets:
out = ETC.compute_1D(seq, order=3, verbose=True, truncate=False)
print(out.get("Trajectory"))
# The default function call ETC.compute_1D(seq) is the same as:
# ETC.compute_1D(seq, order=2, verbose=False, truncate=True)
# --------------------------------------------------------------
# PARALLELIZED 1D ETC ESTIMATION FOR CHUNKS OF A SINGLE SEQUENCE
# --------------------------------------------------------------
# Generate a long sequence
seq = ETC.generate(size=20000, partitions=2)
# Compute ETC on overlapping chunks of 1000 elements offsetted by 100, in parallel
if __name__ == "__main__":
outp = ETC.pcompute_single(seq, size=1000, offset=100)
# The output is a list of dictionaries with estimates, one dict for each ordered chunk
print(outp)
# Compute ETC on non-overlapping chunks of 1000 elements (set offset = size), in parallel
if __name__ == "__main__":
outp = ETC.pcompute_single(seq, size=1000, offset=1000)
# Similarly,
print(outp)
# ------------------------------------------------------------------
# PARALLELIZED 1D ETC ESTIMATION FOR MULTIPLE SEQUENCES IN PARALLEL
# ------------------------------------------------------------------
# Generate 10 long sequences
seqs = [ETC.generate(10000, 2) for _ in range(10)]
# Compute ETC estimates for each sequence in parallel
if __name__ == "__main__":
outp = ETC.pcompute_multiple_seq(seqs)
print(outp)
# --------------------------------
# WORKS WITH NUMPY OUT OF THE BOX!
# --------------------------------
# Generate a random discrete symbolic sequence and compute 1D ETC on it
import numpy as np
np.random.seed(10)
seq = np.random.randint(1, 3, size=5000)
out = ETC.compute_1D(seq)
print(out)
# {'ETC1D': 884, 'NETC1D': 0.17683536707341468}
# Parallelized ETC estimation - row-wise for 2D numpy arrays
seq = np.random.normal(1, 3, size=[10,5000]) # Each row is a distinct sequence
seq = ETC.partition_numpy(nparr=seq, n_bins=2)
out = ETC.pcompute_numpy(nparr=seq)
print(out)
# One estimate per row
# -----------------------------------------
# 2D ETC ESTIMATION FOR A PAIR OF SEQUENCES
# -----------------------------------------
# Generate two random sequences
seq_x = ETC.generate(size=1000, partitions=2, seed=17)
seq_y = ETC.generate(size=1000, partitions=2, seed=19)
# Compute Effort To Compress using Non-Sequential Recursive Pair Substitution
out = ETC.compute_2D(seq_x, seq_y, order=2, verbose=True, truncate=False)
# View estimates
print(out.get('ETC2D'))
print(out.get('NETC2D'))
# View trajectory
print(out.get('Trajectory'))
# -----------------------------------------
# CAUSALITY TESTING USING THE CCC FRAMEWORK
# -----------------------------------------
# Import call for CCC sub-package
from ETC import CCC
# Compute CCC for the above two sequences
ccc_est = CCC.compute(
seq_x, seq_y, LEN_past=150, ADD_meas=15, STEP_size=20, n_partitions=False
)
# [Out]: CCC for seq_y -> seq_x = -0.00301035257856264
# See docstrings for more information on CCC estimation
# ?CCC.compute
# Simulate a pair of coupled first-order AR processes
ar = CCC.coupled_AR(length=10000, a=0.8, b=0.9, c=0.8, e=0.01, burn=1000, seed=1)
# ar is a dictionary of two key-value pairs with the following keys:
# "dependent" and "independent", each with their respective values in float arrays
# ?CCC.coupled_AR for more information on sampling from AR processes
# Estimate CCC for the direction independent -> dependent with binning
ccc_ar = CCC.compute(
seq_x=ar["dependent"],
seq_y=ar["independent"],
LEN_past=150,
ADD_meas=15,
STEP_size=20,
n_partitions=2,
)
# [Out]: CCC for seq_y -> seq_x = 0.005755172746030292
# And for the opposite direction
ccc_ar = CCC.compute(
seq_x=ar["independent"],
seq_y=ar["dependent"],
LEN_past=150,
ADD_meas=15,
STEP_size=20,
n_partitions=2,
)
# [Out]: CCC for seq_y -> seq_x = 0.0002971309733327245
| 33.73 | 90 | 0.662763 |
bd426afbb01eb1b5695f78808d1d36acf0c1c3ca | 5,581 | py | Python | lib/app/reportdatasources/report_metrics.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | lib/app/reportdatasources/report_metrics.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | lib/app/reportdatasources/report_metrics.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# ReportMetrics datasource
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
import time
from collections import namedtuple, OrderedDict
# NOC modules
from noc.core.clickhouse.connect import connection
from .base import BaseReportColumn
class ReportMetrics(BaseReportColumn):
CHUNK_SIZE = 4000
TABLE_NAME = None
SELECT_QUERY_MAP = {
# (List#, Name, Alias): TypeNormalizer or (TypeNormalizer, DefaultValue)
}
# KEY_FIELDS = OrderedDict([("iface_name", "path")])
CUSTOM_FILTER = {"having": [], "where": []}
KEY_FIELDS = None
def __init__(self, mos_ids, f_date, to_date, columns=None):
super(ReportMetrics, self).__init__(mos_ids)
self.from_date = f_date
self.to_date = to_date
self.ch_client = connection()
if not (self.TABLE_NAME and self.SELECT_QUERY_MAP):
raise NotImplementedError
if columns and isinstance(columns, list):
for c in set(self.ATTRS) - set(columns):
self.ATTRS.pop(c)
elif columns and isinstance(columns, OrderedDict):
self.ATTRS = columns
self.unknown_value = ([[""] * (len(self.SELECT_QUERY_MAP) + len(self.KEY_FIELDS))],)
@staticmethod
def get_mo_filter(ids, use_dictionary=False):
return "managed_object IN (%s)" % ", ".join([str(c) for c in ids])
def get_custom_conditions(self):
return self.CUSTOM_FILTER
def get_query_ch(self, from_date, to_date):
ts_from_date = time.mktime(from_date.timetuple())
ts_to_date = time.mktime(to_date.timetuple())
custom_conditions = self.get_custom_conditions()
def_map = {
"q_select": [],
"q_where": [
"%s", # mo_filter
"(date >= toDate(%d)) AND (ts >= toDateTime(%d) AND ts <= toDateTime(%d))"
% (ts_from_date, ts_from_date, ts_to_date),
]
+ custom_conditions["where"][:],
"q_group": self.KEY_FIELDS,
"q_having": custom_conditions["having"][:],
"q_order_by": self.KEY_FIELDS,
}
for num, field, alias in sorted(self.SELECT_QUERY_MAP, key=lambda x: x[0]):
func = self.SELECT_QUERY_MAP[(num, field, alias)] or "avg(%s)" % field
def_map["q_select"] += ["%s AS %s" % (func, alias or "a_" + field)]
query = " ".join(
[
"SELECT %s" % ",".join(def_map["q_select"]),
"FROM %s" % self.TABLE_NAME,
"WHERE %s" % " AND ".join(def_map["q_where"]),
"GROUP BY %s" % ",".join(def_map["q_group"]),
"HAVING %s" % " AND ".join(def_map["q_having"]) if def_map["q_having"] else "",
"ORDER BY %s" % ",".join(def_map["q_order_by"]),
]
)
return query
def do_query(self):
mo_ids = self.sync_ids[:]
f_date, to_date = self.from_date, self.to_date
query = self.get_query_ch(f_date, to_date)
while mo_ids:
chunk, mo_ids = mo_ids[: self.CHUNK_SIZE], mo_ids[self.CHUNK_SIZE :]
for row in self.ch_client.execute(query % self.get_mo_filter(chunk)):
yield row
def extract(self):
# do_query_ch(self, moss, query_map, f_date, to_date)
Metrics = namedtuple("Metrics", [x[1] for x in self.KEY_FIELDS] + list(self.ATTRS))
Metrics.__new__.__defaults__ = ("",) * len(Metrics._fields)
current_mo, block = None, []
for row in self.do_query():
# print (row)
if current_mo and row[0] != current_mo:
yield int(row[0]), block
block = []
block += [Metrics(*row[1:])]
block = row[1:]
current_mo = row[0]
if current_mo and block:
yield int(current_mo), block
class ReportInterfaceMetrics(ReportMetrics):
TABLE_NAME = "noc.interface"
SELECT_QUERY_MAP = {
# (List#, Name, Alias): TypeNormalizer or (TypeNormalizer, DefaultValue)
# Column#, db_name, column_alias, query
(0, "managed_object", None): "",
(1, "path", "iface"): "arrayStringConcat(path)",
(2, "load_in", "l_in"): "round(quantile(0.90)(load_in), 0)",
(3, "load_in", "load_in_p"): "",
(4, "load_out", None): "",
(5, "load_out", "load_out_p"): "",
(6, "packets_in", None): "",
(7, "packets_out", None): "",
(8, "errors_in", None): "",
(9, "errors_out", None): "",
(10, "speed", None): "",
}
# KEY_FIELDS = OrderedDict([("iface_name", "path")])
KEY_FIELDS = ("managed_object", "path")
class ReportCPUMetrics(ReportMetrics):
TABLE_NAME = "noc.cpu"
SELECT_QUERY_MAP = {(0, "managed_object", None): "", (1, "usage", "cpu_usage"): ""}
KEY_FIELDS = ["managed_object", "path"]
class ReportMemoryMetrics(ReportMetrics):
TABLE_NAME = "noc.memory"
SELECT_QUERY_MAP = {(0, "managed_object", None): "", (1, "usage", "memory_usage"): ""}
KEY_FIELDS = ["managed_object", "path"]
class ReportPingMetrics(ReportMetrics):
TABLE_NAME = "noc.ping"
SELECT_QUERY_MAP = {(0, "managed_object", None): "", (1, "avg(rtt)", "ping_rtt"): ""}
KEY_FIELDS = ["managed_object"]
| 38.756944 | 95 | 0.555456 |
9f1057eea13f4043fb2cd648eb4988fada76cdf3 | 1,976 | py | Python | wultlibs/OSInfo.py | intel/wult | 578c1714565c85afe90643b55c390b0ec4069252 | [
"Intel",
"BSD-3-Clause"
] | 4 | 2020-10-29T15:27:18.000Z | 2021-11-14T17:11:44.000Z | wultlibs/OSInfo.py | intel/wult | 578c1714565c85afe90643b55c390b0ec4069252 | [
"Intel",
"BSD-3-Clause"
] | null | null | null | wultlibs/OSInfo.py | intel/wult | 578c1714565c85afe90643b55c390b0ec4069252 | [
"Intel",
"BSD-3-Clause"
] | 2 | 2021-01-04T10:11:53.000Z | 2021-11-09T12:37:52.000Z | # -*- coding: utf-8 -*-
# vim: ts=4 sw=4 tw=100 et ai si
#
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
# Author: Antti Laakso <antti.laakso@intel.com>
"""
This module provides API for discovering Operating System information.
"""
import contextlib
from pathlib import Path
from pepclibs.helperlibs.Exceptions import Error
from pepclibs.helperlibs import Procs
_COMMONPKGS = { "phc2sys" : "linuxptp" , "tc" : "iproute2" }
_FEDORAPKGS = { "sch_etf.ko" : "kernel-modules-extra" }
_FEDORAPKGS.update(_COMMONPKGS)
_DEBIANPKGS = { "sch_etf.ko" : "linux-modules" }
_DEBIANPKGS.update(_COMMONPKGS)
_PKGMAP = { "Ubuntu" : _DEBIANPKGS,
"Debian GNU/Linux" : _DEBIANPKGS,
"Fedora" : _FEDORAPKGS,
"CentOS Linux" : _FEDORAPKGS }
def read_os_release(sysroot="/", proc=None):
"""
Read the 'os-release' file from the host defined by 'proc' and return it as a dictionary.
"""
if not proc:
proc = Procs.Proc()
paths = ("/usr/lib/os-release", "/etc/os-release")
paths = [Path(sysroot) / path.lstrip("/") for path in paths]
osinfo = {}
for path in paths:
with contextlib.suppress(proc.Error):
with proc.open(path, "r") as fobj:
for line in fobj:
key, val = line.rstrip().split("=")
osinfo[key] = val.strip('"')
if osinfo:
break
if not osinfo:
files = "\n".join(paths)
raise Error(f"cannot discover OS version{proc.hostmsg}, these files were checked:\n{files}")
return osinfo
def tool_to_package_name(tool, proc=None):
"""
Get OS package name providing 'tool' on host 'proc'. Returns 'None' if package name is not
found.
"""
if not proc:
proc = Procs.Proc()
osinfo = read_os_release(proc=proc)
osname = osinfo.get("NAME")
if osname not in _PKGMAP:
return None
return _PKGMAP[osname].get(tool)
| 27.444444 | 100 | 0.623482 |
60be9ab68251ba6e1c911ba9989e77a5c988afd9 | 86 | py | Python | seg1/__init__.py | iViolinSolo/2018MathYouKnow | b7027ef874c69c751ddfeac2a0b861de8d49e5b0 | [
"Apache-2.0"
] | null | null | null | seg1/__init__.py | iViolinSolo/2018MathYouKnow | b7027ef874c69c751ddfeac2a0b861de8d49e5b0 | [
"Apache-2.0"
] | null | null | null | seg1/__init__.py | iViolinSolo/2018MathYouKnow | b7027ef874c69c751ddfeac2a0b861de8d49e5b0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: violinsolo
# Created on 15/09/2018 | 21.5 | 23 | 0.639535 |
833558aeb0ab08f0c9ae05882f77a23e2217bc70 | 40,667 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/aio/operations/_connection_monitors_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/aio/operations/_connection_monitors_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_10_01/aio/operations/_connection_monitors_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations:
"""ConnectionMonitorsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.ConnectionMonitor",
**kwargs
) -> "_models.ConnectionMonitorResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.ConnectionMonitor",
**kwargs
) -> AsyncLROPoller["_models.ConnectionMonitorResult"]:
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2017_10_01.models.ConnectionMonitor
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_10_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs
) -> "_models.ConnectionMonitorResult":
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_10_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
async def begin_start(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
async def _query_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs
) -> "_models.ConnectionMonitorQueryResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
# Construct URL
url = self._query_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
async def begin_query(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs
) -> AsyncLROPoller["_models.ConnectionMonitorQueryResult"]:
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2017_10_01.models.ConnectionMonitorQueryResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs
) -> AsyncIterable["_models.ConnectionMonitorListResult"]:
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2017_10_01.models.ConnectionMonitorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
| 52.137179 | 242 | 0.682839 |
fb3d0b42b27658b9b66b9431c5b02e7e25853942 | 4,484 | py | Python | code/mass_spec/20170401_scr_control/processing_massspec_purTscrscr1.py | RPGroup-PBoC/sortseq_belliveau | ca3b0b8092bbe6deaf1b82b2dab67b4bcca679f2 | [
"MIT"
] | 4 | 2018-05-07T00:50:08.000Z | 2021-05-10T12:40:56.000Z | code/mass_spec/20170401_scr_control/processing_massspec_purTscrscr1.py | RPGroup-PBoC/sortseq_belliveau | ca3b0b8092bbe6deaf1b82b2dab67b4bcca679f2 | [
"MIT"
] | null | null | null | code/mass_spec/20170401_scr_control/processing_massspec_purTscrscr1.py | RPGroup-PBoC/sortseq_belliveau | ca3b0b8092bbe6deaf1b82b2dab67b4bcca679f2 | [
"MIT"
] | 1 | 2019-07-29T01:40:40.000Z | 2019-07-29T01:40:40.000Z | # Purpose of script:
# For each DNA affinity purification dataset, this script will extract the
# relavant information from the 'proteingroups.txt' output after running the
# mass spec .raw data on MaxQuant (version 1.5.XX). It will also add in
# the additional experimental details (DNA sequence used, strain used for lysate
# media cells were grown in, etc. that are noted below). Lastely, it will
# calculate median shifted enrichment ratios using all available ratio
# measurements.
# Import dependencies.
import sys
import glob
import numpy as np
import pandas as pd
promoter = 'scr_control'
date = 20170401
WT_sequence = 'CAAAACTGCAG CAGCAATAAAGACAC GTACATGGATAAGCTT TTATACTGCGCGCGG'
control_sequence = 'CAAAACTGCAG CAGCAATAAAGACAC GTACATGGATAAGCTT TTATACTGCGCGCGG'
strain = 'MG1655deltalysA'
media = 'M9glucose'
binding_additions = 'na'
replicate = 1
spec_machine = 'orbitrap_elite'
labeling = 'forward'
df_details = pd.DataFrame([[promoter, date, WT_sequence, control_sequence,
strain, media, binding_additions, replicate, spec_machine, labeling]])
#==============================================================================
# Load in the data files
#==============================================================================
# Load the output of protein groups containing SILAC ratios into a DataFrame
df = pd.read_csv('../../data/mass_spec/20170506_purT_scr_scr_control/proteinGroups.txt',
delimiter=' ')
# remove non-e.coli items detected
df = df[~df['Protein IDs'].str.contains("REV")]
df = df[~df['Protein IDs'].str.contains("CON")]
# from MaxQuant output, I want to collect the following for each replicate:
df = df[['Protein IDs','Gene names','Peptides', 'Intensity scrscr_trial1',
'Intensity L scrscr_trial1', 'Intensity H scrscr_trial1',
'Ratio H/L type', 'Ratio H/L scrscr_trial1']]
# Load the list of proteins with predicted or confirmed DNA binding motifs
# in E. coli
EcoliTF = pd.read_csv('../20150712_EcoCyc_TF_query-results.txt',delimiter=' ')
# compare list of proteins with DNA binding motifs to those detected
df_temp = np.zeros(len(df))
for i, gene in enumerate(df['Gene names']):
test = pd.Series(gene).isin(EcoliTF['Protein'])
if test[0] == True:
df_temp[i] = 1
# add to dataframe
df['TF_check'] = df_temp
df = df.reset_index(drop=True)
# Tile experimental details for number of protein entries in df
# i.e. make an array containing the same experimental details in
# each row, which will be merged with the experimental results.
df_details = pd.DataFrame(pd.np.tile(df_details, (len(df), 1)))
#==============================================================================
# Combine experimental data with experimental details and rename columns.
#==============================================================================
# combine mass spec data with experimental details into tidy format
df = pd.concat([df, df_details], axis=1, ignore_index=True)
# Rename the columns of the data_frame
df.columns = ['ProteinIDs','gene','peptides', 'intensity_total', 'intensity_L',
'intensity_H', 'maxquant_ratio_type', 'maxquant_ratio',
'TF_check', 'promoter', 'date',
'wt_sequence', 'control_sequence', 'strain', 'media',
'binding_additions', 'replicate', 'spec_machine', 'labeling']
#==============================================================================
# Renormalization of SILAC ratio data correction.
# Due to potential experimental variability, the log ratios
# are median shifted so that the entire sample population is centered
# at 1:1 (heavy: light).
#==============================================================================
# Let's median shift the maxquant_ratio
df['maxquant_logratio'] = np.log(df['maxquant_ratio'])
# Calculate the median of log ratios
median_shift = df['maxquant_logratio'].replace([-np.inf,np.inf], np.nan).dropna().median()
# Calculate the median shifted log ratios and non-log ratios for all proteins
# in the sample.
df['maxquant_logratio_medianshift'] = df['maxquant_logratio'] - median_shift
df['maxquant_ratio_medianshift'] = np.exp(df['maxquant_logratio_medianshift'])
#==============================================================================
# Generate the summary file
#==============================================================================
df.to_csv(+ str(date) + '_' + promoter + '_' + str(replicate) +
'_mass_spec_summary.csv', index=False)
| 43.960784 | 90 | 0.632025 |
d9a736cccc603203308a88adaf2e64437b273b51 | 685 | py | Python | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/02_Conditional-Statements/00.Book-Exercise-3.2-02-Pipes-in-Pool.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/02_Conditional-Statements/00.Book-Exercise-3.2-02-Pipes-in-Pool.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/02_Conditional-Statements/00.Book-Exercise-3.2-02-Pipes-in-Pool.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | # тръби в басейн
# Басейн с обем V има две тръби, от които се пълни. Всяка тръба има определен дебит (литрите вода, минаващи през една тръба за един час).
# Работникът пуска тръбите едновременно и излиза за N часа.
# Напишете програма, която изкарва състоянието на басейна, в момента, когато работникът се върне.
from math import trunc
V = int(input())
P1 = int(input())
P2 = int(input())
H = float(input())
V1 = P1 * H
V2 = P2 * H
if V >= (V1 + V2):
print(f'The pool is {trunc(((V1 + V2) / V ) * 100)}% full. Pipe 1: {trunc((V1 / (V1 + V2)) * 100)}%. Pipe 2: {trunc((V2 / (V1 + V2)) * 100)}%.')
else:
print(f'For {H} hours the pool overflows with {(V1 + V2) - V} liters.') | 36.052632 | 148 | 0.642336 |
1b2846845c7c4b93c77fb5ba51da3602fff5f20e | 9,437 | py | Python | pymc/distributions/logprob.py | t-triobox/pymc3 | 8b063f9c3b344753acdf91416cf2bb3c4533f80a | [
"Apache-2.0"
] | null | null | null | pymc/distributions/logprob.py | t-triobox/pymc3 | 8b063f9c3b344753acdf91416cf2bb3c4533f80a | [
"Apache-2.0"
] | null | null | null | pymc/distributions/logprob.py | t-triobox/pymc3 | 8b063f9c3b344753acdf91416cf2bb3c4533f80a | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Mapping
from typing import Dict, List, Optional, Sequence, Union
import aesara
import aesara.tensor as at
import numpy as np
from aeppl import factorized_joint_logprob
from aeppl.logprob import logcdf as logcdf_aeppl
from aeppl.logprob import logprob as logp_aeppl
from aeppl.transforms import TransformValuesOpt
from aesara.graph.basic import graph_inputs, io_toposort
from aesara.tensor.subtensor import (
AdvancedIncSubtensor,
AdvancedIncSubtensor1,
AdvancedSubtensor,
AdvancedSubtensor1,
IncSubtensor,
Subtensor,
)
from aesara.tensor.var import TensorVariable
from pymc.aesaraf import floatX
def _get_scaling(total_size: Optional[Union[int, Sequence[int]]], shape, ndim: int):
"""
Gets scaling constant for logp.
Parameters
----------
total_size: Optional[int|List[int]]
size of a fully observed data without minibatching,
`None` means data is fully observed
shape: shape
shape of an observed data
ndim: int
ndim hint
Returns
-------
scalar
"""
if total_size is None:
coef = 1.0
elif isinstance(total_size, int):
if ndim >= 1:
denom = shape[0]
else:
denom = 1
coef = floatX(total_size) / floatX(denom)
elif isinstance(total_size, (list, tuple)):
if not all(isinstance(i, int) for i in total_size if (i is not Ellipsis and i is not None)):
raise TypeError(
"Unrecognized `total_size` type, expected "
"int or list of ints, got %r" % total_size
)
if Ellipsis in total_size:
sep = total_size.index(Ellipsis)
begin = total_size[:sep]
end = total_size[sep + 1 :]
if Ellipsis in end:
raise ValueError(
"Double Ellipsis in `total_size` is restricted, got %r" % total_size
)
else:
begin = total_size
end = []
if (len(begin) + len(end)) > ndim:
raise ValueError(
"Length of `total_size` is too big, "
"number of scalings is bigger that ndim, got %r" % total_size
)
elif (len(begin) + len(end)) == 0:
coef = 1.0
if len(end) > 0:
shp_end = shape[-len(end) :]
else:
shp_end = np.asarray([])
shp_begin = shape[: len(begin)]
begin_coef = [
floatX(t) / floatX(shp_begin[i]) for i, t in enumerate(begin) if t is not None
]
end_coef = [floatX(t) / floatX(shp_end[i]) for i, t in enumerate(end) if t is not None]
coefs = begin_coef + end_coef
coef = at.prod(coefs)
else:
raise TypeError(
"Unrecognized `total_size` type, expected int or list of ints, got %r" % total_size
)
return at.as_tensor(coef, dtype=aesara.config.floatX)
subtensor_types = (
AdvancedIncSubtensor,
AdvancedIncSubtensor1,
AdvancedSubtensor,
AdvancedSubtensor1,
IncSubtensor,
Subtensor,
)
def joint_logpt(
var: Union[TensorVariable, List[TensorVariable]],
rv_values: Optional[Union[TensorVariable, Dict[TensorVariable, TensorVariable]]] = None,
*,
jacobian: bool = True,
scaling: bool = True,
transformed: bool = True,
sum: bool = True,
**kwargs,
) -> Union[TensorVariable, List[TensorVariable]]:
"""Create a measure-space (i.e. log-likelihood) graph for a random variable
or a list of random variables at a given point.
The input `var` determines which log-likelihood graph is used and
`rv_value` is that graph's input parameter. For example, if `var` is
the output of a ``NormalRV`` ``Op``, then the output is a graph of the
density function for `var` set to the value `rv_value`.
Parameters
==========
var
The `RandomVariable` output that determines the log-likelihood graph.
Can also be a list of variables. The final log-likelihood graph will
be the sum total of all individual log-likelihood graphs of variables
in the list.
rv_values
A variable, or ``dict`` of variables, that represents the value of
`var` in its log-likelihood. If no `rv_value` is provided,
``var.tag.value_var`` will be checked and, when available, used.
jacobian
Whether or not to include the Jacobian term.
scaling
A scaling term to apply to the generated log-likelihood graph.
transformed
Apply transforms.
sum
Sum the log-likelihood or return each term as a separate list item.
"""
# TODO: In future when we drop support for tag.value_var most of the following
# logic can be removed and logpt can just be a wrapper function that calls aeppl's
# joint_logprob directly.
# If var is not a list make it one.
if not isinstance(var, (list, tuple)):
var = [var]
# If logpt isn't provided values it is assumed that the tagged value var or
# observation is the value variable for that particular RV.
if rv_values is None:
rv_values = {}
for rv in var:
value_var = getattr(rv.tag, "observations", getattr(rv.tag, "value_var", None))
if value_var is None:
raise ValueError(f"No value variable found for var {rv}")
rv_values[rv] = value_var
# Else we assume we were given a single rv and respective value
elif not isinstance(rv_values, Mapping):
if len(var) == 1:
rv_values = {var[0]: at.as_tensor_variable(rv_values).astype(var[0].type)}
else:
raise ValueError("rv_values must be a dict if more than one var is requested")
if scaling:
rv_scalings = {}
for rv, value_var in rv_values.items():
rv_scalings[value_var] = _get_scaling(
getattr(rv.tag, "total_size", None), value_var.shape, value_var.ndim
)
# Aeppl needs all rv-values pairs, not just that of the requested var.
# Hence we iterate through the graph to collect them.
tmp_rvs_to_values = rv_values.copy()
for node in io_toposort(graph_inputs(var), var):
try:
curr_vars = [node.default_output()]
except ValueError:
curr_vars = node.outputs
for curr_var in curr_vars:
if curr_var in tmp_rvs_to_values:
continue
# Check if variable has a value variable
value_var = getattr(
curr_var.tag, "observations", getattr(curr_var.tag, "value_var", None)
)
if value_var is not None:
tmp_rvs_to_values[curr_var] = value_var
# After collecting all necessary rvs and values, we check for any value transforms
transform_map = {}
if transformed:
for rv, value_var in tmp_rvs_to_values.items():
if hasattr(value_var.tag, "transform"):
transform_map[value_var] = value_var.tag.transform
# If the provided value_variable does not have transform information, we
# check if the original `rv.tag.value_var` does.
# TODO: This logic should be replaced by an explicit dict of
# `{value_var: transform}` similar to `rv_values`.
else:
original_value_var = getattr(rv.tag, "value_var", None)
if original_value_var is not None and hasattr(original_value_var.tag, "transform"):
transform_map[value_var] = original_value_var.tag.transform
transform_opt = TransformValuesOpt(transform_map)
temp_logp_var_dict = factorized_joint_logprob(
tmp_rvs_to_values, extra_rewrites=transform_opt, use_jacobian=jacobian, **kwargs
)
# aeppl returns the logpt for every single value term we provided to it. This includes
# the extra values we plugged in above, so we filter those we actually wanted in the
# same order they were given in.
logp_var_dict = {}
for value_var in rv_values.values():
logp_var_dict[value_var] = temp_logp_var_dict[value_var]
if scaling:
for value_var in logp_var_dict.keys():
if value_var in rv_scalings:
logp_var_dict[value_var] *= rv_scalings[value_var]
if sum:
logp_var = at.sum([at.sum(factor) for factor in logp_var_dict.values()])
else:
logp_var = list(logp_var_dict.values())
return logp_var
def logp(rv, value):
"""Return the log-probability graph of a Random Variable"""
value = at.as_tensor_variable(value, dtype=rv.dtype)
return logp_aeppl(rv, value)
def logcdf(rv, value):
"""Return the log-cdf graph of a Random Variable"""
value = at.as_tensor_variable(value, dtype=rv.dtype)
return logcdf_aeppl(rv, value)
| 36.577519 | 100 | 0.641623 |
b1c595f888b3ae86072e5c86f6f2759230b16600 | 3,748 | py | Python | musico/settings.py | NimaAram1/music-webApi | 0abe2761e4359d5aa49a21d832aad25cb08f3982 | [
"BSD-3-Clause"
] | null | null | null | musico/settings.py | NimaAram1/music-webApi | 0abe2761e4359d5aa49a21d832aad25cb08f3982 | [
"BSD-3-Clause"
] | null | null | null | musico/settings.py | NimaAram1/music-webApi | 0abe2761e4359d5aa49a21d832aad25cb08f3982 | [
"BSD-3-Clause"
] | null | null | null | """
Django settings for musico project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0lsy+lp1gemye6%(_-w0(j8urjdss0*bme%ea!z28q=#dyqrs)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'music.apps.MusicConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'musico.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'musico.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join('media')
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.session'
],
'DEFAULT_PERMISSION_CLASSES' : [
'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_THROTTLE_CLASSES': [
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle'
],
'DEFAULT_THROTTLE_RATES': {
'anon': '5/minute',
'user': '10/minute'
}
} | 25.848276 | 91 | 0.690235 |
80c2aab4da2e59f2fafbb7024d0e2fdc922e8138 | 5,356 | py | Python | bokeh/tests/test_embed.py | timelyportfolio/bokeh | a976a85535cf137c6238ce9e90b41ab14ae8ce22 | [
"BSD-3-Clause"
] | 1 | 2021-11-07T18:55:59.000Z | 2021-11-07T18:55:59.000Z | bokeh/tests/test_embed.py | timelyportfolio/bokeh | a976a85535cf137c6238ce9e90b41ab14ae8ce22 | [
"BSD-3-Clause"
] | null | null | null | bokeh/tests/test_embed.py | timelyportfolio/bokeh | a976a85535cf137c6238ce9e90b41ab14ae8ce22 | [
"BSD-3-Clause"
] | 1 | 2021-08-01T08:38:53.000Z | 2021-08-01T08:38:53.000Z | from __future__ import absolute_import
import unittest
import bs4
import bokeh.embed as embed
from bokeh.resources import CDN, INLINE, Resources
from bokeh.plotting import figure
from bokeh.session import Session
_embed_test_plot = None
def setUpModule():
global _embed_test_plot
_embed_test_plot = figure()
_embed_test_plot.circle([1,2], [2,3])
class TestComponents(unittest.TestCase):
def test_return_type(self):
r = embed.components(_embed_test_plot, CDN)
self.assertEqual(len(r), 2)
def test_result_attrs(self):
script, div = embed.components(_embed_test_plot, CDN)
html = bs4.BeautifulSoup(script)
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
self.assertTrue(scripts[0].attrs, {'type': 'text/javascript'})
def test_div_attrs(self):
script, div = embed.components(_embed_test_plot, CDN)
html = bs4.BeautifulSoup(div)
divs = html.findAll(name='div')
self.assertEqual(len(divs), 1)
div = divs[0]
self.assertTrue(set(div.attrs), set(['class', 'id']))
self.assertEqual(div.attrs['class'], ['plotdiv'])
self.assertEqual(div.text, "")
class TestNotebookDiv(unittest.TestCase):
def test_return_type(self):
r = embed.notebook_div(_embed_test_plot)
self.assertTrue(isinstance(r, str))
def test_result_attrs(self):
r = embed.notebook_div(_embed_test_plot)
html = bs4.BeautifulSoup(r)
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
self.assertTrue(scripts[0].attrs, {'type': 'text/javascript'})
def test_div_attrs(self):
r = embed.notebook_div(_embed_test_plot)
html = bs4.BeautifulSoup(r)
divs = html.findAll(name='div')
self.assertEqual(len(divs), 1)
div = divs[0]
self.assertTrue(set(div.attrs), set(['class', 'id']))
self.assertEqual(div.attrs['class'], ['plotdiv'])
self.assertEqual(div.text, "")
class TestFileHTML(unittest.TestCase):
def test_return_type(self):
class fake_template:
def render(self, title, plot_resources, plot_script, plot_div):
return "template result"
r = embed.file_html(_embed_test_plot, CDN, "title")
self.assertTrue(isinstance(r, str))
r = embed.file_html(_embed_test_plot, CDN, "title", fake_template())
self.assertTrue(isinstance(r, str))
class TestAutoloadStatic(unittest.TestCase):
def test_invalid_resources(self):
self.assertRaises(ValueError, embed.autoload_static, _embed_test_plot, INLINE, "some/path")
dev_resources = (Resources("absolute-dev"), Resources("server-dev"),Resources("relative-dev"))
for x in dev_resources:
self.assertRaises(ValueError, embed.autoload_static, _embed_test_plot, x, "some/path")
def test_return_type(self):
r = embed.autoload_static(_embed_test_plot, CDN, "some/path")
self.assertEqual(len(r), 2)
def test_script_attrs(self):
js, tag = embed.autoload_static(_embed_test_plot, CDN, "some/path")
html = bs4.BeautifulSoup(tag)
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
attrs = scripts[0].attrs
self.assertTrue(set(attrs), set(['src',
'data-bokeh-modeltype',
'data-bokeh-modelid',
'async',
'id',
'data-bokeh-data']))
self.assertEqual(attrs['async'], 'true')
self.assertEqual(attrs['data-bokeh-data'], 'static')
self.assertEqual(attrs['data-bokeh-modeltype'], 'Plot')
self.assertEqual(attrs['data-bokeh-modelid'], str(_embed_test_plot._id))
self.assertEqual(attrs['src'], 'some/path')
class TestAutoloadServer(unittest.TestCase):
def setUp(self):
self.sess = Session(load_from_config=False)
self.sess.docid = 'docid10'
self.sess.apikey = 'apikey123'
self.sess.root_url = "http://foo"
def test_return_type(self):
r = embed.autoload_server(_embed_test_plot, self.sess)
self.assertTrue(isinstance(r, str))
def test_script_attrs(self):
r = embed.autoload_server(_embed_test_plot, self.sess)
html = bs4.BeautifulSoup(r)
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
attrs = scripts[0].attrs
self.assertTrue(set(attrs), set([
'src',
'data-bokeh-docid',
'data-bokeh-docapikey',
'data-bokeh-modeltype',
'data-bokeh-modelid',
'data-bokeh-root-url',
'async',
'id',
'data-bokeh-data',
'data-bokeh-conn-string'
]))
self.assertEqual(attrs['async'], 'true')
self.assertEqual(attrs['data-bokeh-data'], 'server')
self.assertEqual(attrs['data-bokeh-docapikey'], 'apikey123')
self.assertEqual(attrs['data-bokeh-docid'], 'docid10')
self.assertEqual(attrs['data-bokeh-modelid'], str(_embed_test_plot._id))
self.assertEqual(attrs['data-bokeh-root-url'], "http://foo/")
divid = attrs['id']
self.assertEqual(attrs['src'], "%s/bokeh/autoload.js/%s" % ("http://foo", divid))
if __name__ == "__main__":
unittest.main()
| 34.333333 | 102 | 0.633495 |
76c841a55efce7f6361d93378a4ffcc1ef76e8fe | 696 | py | Python | notebook/pandas_index_select_query.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 174 | 2018-05-30T21:14:50.000Z | 2022-03-25T07:59:37.000Z | notebook/pandas_index_select_query.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 5 | 2019-08-10T03:22:02.000Z | 2021-07-12T20:31:17.000Z | notebook/pandas_index_select_query.py | vhn0912/python-snippets | 80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038 | [
"MIT"
] | 53 | 2018-04-27T05:26:35.000Z | 2022-03-25T07:59:37.000Z | import pandas as pd
df = pd.read_csv('data/src/sample_pandas_normal.csv', index_col=0)
print(df)
# age state point
# name
# Alice 24 NY 64
# Bob 42 CA 92
# Charlie 18 CA 70
# Dave 68 TX 70
# Ellen 24 CA 88
# Frank 30 NY 57
print(df.query('index.str.contains("li")', engine='python'))
# age state point
# name
# Alice 24 NY 64
# Charlie 18 CA 70
print(df.query('name.str.endswith("e")', engine='python'))
# age state point
# name
# Alice 24 NY 64
# Charlie 18 CA 70
# Dave 68 TX 70
| 26.769231 | 66 | 0.475575 |
76eef53a002c42b231fd9ac14eca2287041c9b82 | 2,343 | py | Python | project-euler/problem-1/problem-1.py | fugitiveinkc/lessons-with-tim | f057856936cfd235bac63b158a109f80ce90b3b6 | [
"CC0-1.0"
] | null | null | null | project-euler/problem-1/problem-1.py | fugitiveinkc/lessons-with-tim | f057856936cfd235bac63b158a109f80ce90b3b6 | [
"CC0-1.0"
] | null | null | null | project-euler/problem-1/problem-1.py | fugitiveinkc/lessons-with-tim | f057856936cfd235bac63b158a109f80ce90b3b6 | [
"CC0-1.0"
] | null | null | null | """
Description: If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 of 5 below 1000
"""
#Method 1: First thoughts -- Go through each value below 1000, see if they are a multiple of 3 or 5, add to list and sum
below_100 = []
for x in range (1,1000):
if x%3 == 0 or x%5 == 0:
below_100.append(x)
print("Method 1 solution: " + str(sum(below_100)))
#Method 2: Shorter?
print("Method 2 solution: " + str(sum(set([x for x in range(1,1000) if x%5 == 0] + [y for y in range(1,1000) if y%3 == 0]))))
"""
Notes from Tim:
- Map: Apply some kind of transformation to every element of a list
- Filter: Filter out some valeus
- Reduce: Combine values in some way
Functional programming: Computations are done by combining functions that take arguments and return a concrete value (or values) as a result. These functions don't modify their input arguments and don't change the program's state. They just provide the result of a given computation.
Since map() is written in C and is highly optimizd, its internal implied loop can be more efficient than a regular python for loop.
Second advantage of using map() is related to memory consumption. WIth a for loop, you need to store the whole list in your system's memory. With map(), you get items on demand, and only one item is in your system's memory at a given time.
In python 2, map() returns a list.
In python 3, map() returns a map object -> You'll need to convert it to a list.
lambda functions are handy when you need to pass an expression-based function to map().
You can actually use multiple iterables with a function in map as long as your function can take two variables
filter() is a built-in function that takes two positional arguments: function that yields true or false and an interable.
filter() takes an iterable and filters out values based on a predicate or boolean-valued function.
reduce() is in module functools
reduce() is useful when you need to apply a function to an iterable and reduce it to a single or cumulative value (reduction or folding)
"""
#Method 3: Functional one line
import functools, operator
print("Method 3: " + str(functools.reduce(operator.add, filter(lambda x: not (x%3 and x%5), range(1,1000)))))
| 44.207547 | 283 | 0.734102 |
a343add110644e57289fe574ce1e4ae48bfb27a2 | 3,367 | py | Python | ambassador/ambassador/ir/irtracing.py | Pluies/ambassador | 03ed4d8021df9da5634dd5bc1203d5e0fbe802bd | [
"Apache-2.0"
] | null | null | null | ambassador/ambassador/ir/irtracing.py | Pluies/ambassador | 03ed4d8021df9da5634dd5bc1203d5e0fbe802bd | [
"Apache-2.0"
] | null | null | null | ambassador/ambassador/ir/irtracing.py | Pluies/ambassador | 03ed4d8021df9da5634dd5bc1203d5e0fbe802bd | [
"Apache-2.0"
] | 1 | 2020-01-11T00:05:47.000Z | 2020-01-11T00:05:47.000Z | from typing import Optional, TYPE_CHECKING
from ..config import Config
from ..utils import RichStatus
from .irresource import IRResource
from .ircluster import IRCluster
if TYPE_CHECKING:
from .ir import IR
class IRTracing (IRResource):
cluster: Optional[IRCluster]
service: str
driver: str
driver_config: dict
tag_headers: list
host_rewrite: Optional[str]
def __init__(self, ir: 'IR', aconf: Config,
rkey: str="ir.tracing",
kind: str="ir.tracing",
name: str="tracing",
**kwargs) -> None:
del kwargs # silence unused-variable warning
super().__init__(
ir=ir, aconf=aconf, rkey=rkey, kind=kind, name=name
)
self.cluster = None
def setup(self, ir: 'IR', aconf: Config) -> bool:
# Some of the validations might go away if JSON Schema is doing the validations, but need to check on that
config_info = aconf.get_config('tracing_configs')
if not config_info:
ir.logger.debug("IRTracing: no tracing config, bailing")
# No tracing info. Be done.
return False
configs = config_info.values()
number_configs = len(configs)
if number_configs != 1:
self.post_error(
RichStatus.fromError("exactly one TracingService is supported, got {}".format(number_configs),
module=aconf))
return False
config = list(configs)[0]
service = config.get('service')
if not service:
self.post_error(RichStatus.fromError("service field is required in TracingService"))
return False
driver = config.get('driver')
if not driver:
self.post_error(RichStatus.fromError("driver field is required in TracingService"))
return False
grpc = False
if driver == "lightstep":
grpc = True
if driver == "datadog":
driver = "envoy.tracers.datadog"
# OK, we have a valid config.
self.sourced_by(config)
self.service = service
self.driver = driver
self.grpc = grpc
self.cluster = None
self.driver_config = config.get("config", {})
self.tag_headers = config.get('tag_headers', [])
# XXX host_rewrite actually isn't in the schema right now.
self.host_rewrite = config.get('host_rewrite', None)
# Remember that the config references us.
self.referenced_by(config)
return True
def add_mappings(self, ir: 'IR', aconf: Config):
cluster = ir.add_cluster(
IRCluster(
ir=ir,
aconf=aconf,
location=self.location,
service=self.service,
host_rewrite=self.get('host_rewrite', None),
marker='tracing',
grpc=self.grpc
)
)
cluster.referenced_by(self)
self.cluster = cluster
# if not ir.add_to_primary_listener(tracing=True):
# raise Exception("Failed to update primary listener with tracing config")
def finalize(self):
self.ir.logger.info("tracing cluster name: %s" % self.cluster.name)
self.driver_config['collector_cluster'] = self.cluster.name
| 30.609091 | 114 | 0.588358 |
c67c14a3c020d44f5b4dc9b6a8b703bf30b48812 | 4,314 | py | Python | lavalink/node.py | Taarek/Lavalink.py | b306fc72aecb8c4ebf4c196606ad1d641d742e29 | [
"MIT"
] | null | null | null | lavalink/node.py | Taarek/Lavalink.py | b306fc72aecb8c4ebf4c196606ad1d641d742e29 | [
"MIT"
] | null | null | null | lavalink/node.py | Taarek/Lavalink.py | b306fc72aecb8c4ebf4c196606ad1d641d742e29 | [
"MIT"
] | null | null | null | from .events import Event
from .websocket import WebSocket
class Node:
"""
Represents a Node connection with Lavalink.
Note
----
Nodes are **NOT** mean't to be added manually, but rather with :func:`Client.add_node`. Doing this can cause
invalid cache and much more problems.
Attributes
----------
host: :class:`str`
The address of the Lavalink node.
port: :class:`int`
The port to use for websocket and REST connections.
password: :class:`str`
The password used for authentication.
region: :class:`str`
The region to assign this node to.
name: :class:`str`
The name the :class:`Node` is identified by.
stats: :class:`Stats`
The statistics of how the :class:`Node` is performing.
"""
def __init__(self, manager, host: str, port: int, password: str,
region: str, resume_key: str, resume_timeout: int, name: str = None):
self._manager = manager
self._ws = WebSocket(self, host, port, password, resume_key, resume_timeout)
self.host = host
self.port = port
self.password = password
self.region = region
self.name = name or '{}-{}:{}'.format(self.region, self.host, self.port)
self.stats = None
@property
def available(self):
""" Returns whether the node is available for requests. """
return self._ws.connected
@property
def _original_players(self):
""" Returns a list of players that were assigned to this node, but were moved due to failover etc. """
return [p for p in self._manager._lavalink.player_manager.values() if p._original_node == self]
@property
def players(self):
""" Returns a list of all players on this node. """
return [p for p in self._manager._lavalink.player_manager.values() if p.node == self]
@property
def penalty(self):
""" Returns the load-balancing penalty for this node. """
if not self.available or not self.stats:
return 9e30
return self.stats.penalty.total
async def get_tracks(self, query: str):
"""|coro|
Gets all tracks associated with the given query.
Parameters
----------
query: :class:`str`
The query to perform a search for.
Returns
-------
:class:`dict`
A dict representing an AudioTrack.
"""
return await self._manager._lavalink.get_tracks(query, self)
async def routeplanner_status(self):
"""|coro|
Gets the routeplanner status of the target node.
Returns
-------
:class:`dict`
A dict representing the routeplanner information.
"""
return await self._manager._lavalink.routeplanner_status(self)
async def routeplanner_free_address(self, address: str):
"""|coro|
Gets the routeplanner status of the target node.
Parameters
----------
address: :class:`str`
The address to free.
Returns
-------
bool
True if the address was freed, False otherwise.
"""
return await self._manager._lavalink.routeplanner_free_address(self, address)
async def routeplanner_free_all_failing(self):
"""|coro|
Gets the routeplanner status of the target node.
Returns
-------
bool
True if all failing addresses were freed, False otherwise.
"""
return await self._manager._lavalink.routeplanner_free_all_failing(self)
async def _dispatch_event(self, event: Event):
"""|coro|
Dispatches the given event to all registered hooks.
Parameters
----------
event: :class:`Event`
The event to dispatch to the hooks.
"""
await self._manager._lavalink._dispatch_event(event)
async def _send(self, **data):
"""|coro|
Sends the passed data to the node via the websocket connection.
Parameters
----------
data: class:`any`
The dict to send to Lavalink.
"""
await self._ws._send(**data)
def __repr__(self):
return '<Node name={0.name} region={0.region}>'.format(self)
| 30.380282 | 112 | 0.596662 |
cb2190e069522d1f9e066f39639804cb3b0100fb | 111,442 | py | Python | keras/callbacks.py | b-little/keras | f1e9c76675981ee6683f54a3ce569212d551d12d | [
"Apache-2.0"
] | 37,222 | 2017-12-13T00:52:55.000Z | 2022-03-31T22:34:35.000Z | keras/callbacks.py | amirsadafi/keras | f1e9c76675981ee6683f54a3ce569212d551d12d | [
"Apache-2.0"
] | 7,624 | 2017-12-13T01:03:40.000Z | 2022-03-31T23:57:24.000Z | keras/callbacks.py | amirsadafi/keras | f1e9c76675981ee6683f54a3ce569212d551d12d | [
"Apache-2.0"
] | 14,914 | 2017-12-13T02:30:46.000Z | 2022-03-30T14:49:16.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
# pylint: disable=g-classes-have-attributes
"""Callbacks: utilities called at certain points during model training."""
import collections
import copy
import csv
import json
import os
import re
import sys
import time
from keras import backend
from keras.distribute import distributed_file_utils
from keras.distribute import worker_training_state
from keras.optimizer_v2 import learning_rate_schedule
from keras.utils import generic_utils
from keras.utils import io_utils
from keras.utils import tf_utils
from keras.utils import version_utils
from keras.utils.data_utils import Sequence
from keras.utils.generic_utils import Progbar
from keras.utils.mode_keys import ModeKeys
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
try:
import requests
except ImportError:
requests = None
# Note: `configure_callbacks` is only used in TF1.
def configure_callbacks(callbacks,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
count_mode='steps',
mode=ModeKeys.TRAIN):
"""Configures callbacks for use in various training loops.
Args:
callbacks: List of Callbacks.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
Returns:
Instance of CallbackList used to control all Callbacks.
"""
# Check if callbacks have already been configured.
if isinstance(callbacks, CallbackList):
return callbacks
if not callbacks:
callbacks = []
# Add additional callbacks during training.
if mode == ModeKeys.TRAIN:
model.history = History()
callbacks = [BaseLogger()] + (callbacks or []) + [model.history]
if verbose:
callbacks.append(ProgbarLogger(count_mode))
callback_list = CallbackList(callbacks)
# Set callback model
callback_model = model._get_callback_model() # pylint: disable=protected-access
callback_list.set_model(callback_model)
set_callback_parameters(
callback_list,
model,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=samples,
verbose=verbose,
mode=mode)
callback_list.model.stop_training = False
return callback_list
def set_callback_parameters(callback_list,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
mode=ModeKeys.TRAIN):
"""Sets callback parameters.
Args:
callback_list: CallbackList instance.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
"""
metric_names = model.metrics_names
for cbk in callback_list:
if isinstance(cbk, (BaseLogger, ProgbarLogger)):
cbk.stateful_metrics = metric_names[1:] # Exclude `loss`
# Set callback parameters
callback_metrics = []
# When we have deferred build scenario with iterator input, we will compile
# when we standardize first batch of data.
if mode != ModeKeys.PREDICT:
callback_metrics = copy.copy(metric_names)
if do_validation:
callback_metrics += ['val_' + n for n in metric_names]
callback_params = {
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
}
callback_list.set_params(callback_params)
def _is_generator_like(data):
"""Checks if data is a generator, Sequence, or Iterator."""
return (hasattr(data, '__next__') or hasattr(data, 'next') or isinstance(
data, (Sequence, tf.compat.v1.data.Iterator, tf.data.Iterator)))
def make_logs(model, logs, outputs, mode, prefix=''):
"""Computes logs for sending to `on_batch_end` methods."""
metric_names = model.metrics_names
if mode in {ModeKeys.TRAIN, ModeKeys.TEST} and metric_names:
for label, output in zip(metric_names, outputs):
logs[prefix + label] = output
else:
logs['outputs'] = outputs
return logs
@keras_export('keras.callbacks.CallbackList')
class CallbackList:
"""Container abstracting a list of callbacks."""
def __init__(self,
callbacks=None,
add_history=False,
add_progbar=False,
model=None,
**params):
"""Container for `Callback` instances.
This object wraps a list of `Callback` instances, making it possible
to call them all at once via a single endpoint
(e.g. `callback_list.on_epoch_end(...)`).
Args:
callbacks: List of `Callback` instances.
add_history: Whether a `History` callback should be added, if one does not
already exist in the `callbacks` list.
add_progbar: Whether a `ProgbarLogger` callback should be added, if one
does not already exist in the `callbacks` list.
model: The `Model` these callbacks are used with.
**params: If provided, parameters will be passed to each `Callback` via
`Callback.set_params`.
"""
self.callbacks = tf.nest.flatten(callbacks) if callbacks else []
self._add_default_callbacks(add_history, add_progbar)
if model:
self.set_model(model)
if params:
self.set_params(params)
# Performance optimization: determines if batch hooks need to be called.
# pylint: disable=protected-access
self._supports_tf_logs = all(
getattr(cb, '_supports_tf_logs', False) for cb in self.callbacks)
self._batch_hooks_support_tf_logs = all(
getattr(cb, '_supports_tf_logs', False)
for cb in self.callbacks
if cb._implements_train_batch_hooks() or cb
._implements_test_batch_hooks() or cb._implements_predict_batch_hooks())
self._should_call_train_batch_hooks = any(
cb._implements_train_batch_hooks() for cb in self.callbacks)
self._should_call_test_batch_hooks = any(
cb._implements_test_batch_hooks() for cb in self.callbacks)
self._should_call_predict_batch_hooks = any(
cb._implements_predict_batch_hooks() for cb in self.callbacks)
# pylint: enable=protected-access
self._disallow_batch_hooks_in_ps_strategy()
# Performance check: Check batch hooks for slowness compared to batch time.
# Only run check for custom callbacks (i.e. not present in this file).
self._check_timing = any(
cbk.__class__.__name__ not in globals() for cbk in self.callbacks)
self._num_batches_for_timing_check = 5
self._hook_times = {}
self._batch_start_time = None
self._batch_times = []
def _add_default_callbacks(self, add_history, add_progbar):
"""Adds `Callback`s that are always present."""
self._progbar = None
self._history = None
for cb in self.callbacks:
if isinstance(cb, ProgbarLogger):
self._progbar = cb
elif isinstance(cb, History):
self._history = cb
if self._history is None and add_history:
self._history = History()
self.callbacks.append(self._history)
if self._progbar is None and add_progbar:
self._progbar = ProgbarLogger(count_mode='steps')
self.callbacks.append(self._progbar)
def _process_logs(self, logs, is_batch_hook=False):
"""Turns tensors into numpy arrays or Python scalars if necessary."""
if logs is None:
return {}
if self._supports_tf_logs:
return logs
if is_batch_hook and self._batch_hooks_support_tf_logs:
return logs
return tf_utils.sync_to_numpy_or_python_type(logs)
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
self.params = params
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
self.model = model
if self._history:
model.history = self._history
for callback in self.callbacks:
callback.set_model(model)
def _call_batch_hook(self, mode, hook, batch, logs=None):
"""Helper function for all batch_{begin | end} methods."""
if not self.callbacks:
return
if hook == 'begin':
self._call_batch_begin_hook(mode, batch, logs)
elif hook == 'end':
self._call_batch_end_hook(mode, batch, logs)
else:
raise ValueError(
f'Unrecognized hook: {hook}. Expected values are ["begin", "end"]')
def _call_batch_begin_hook(self, mode, batch, logs):
"""Helper function for `on_*_batch_begin` methods."""
hook_name = 'on_{mode}_batch_begin'.format(mode=mode)
self._call_batch_hook_helper(hook_name, batch, logs)
if self._check_timing:
self._batch_start_time = time.time()
def _call_batch_end_hook(self, mode, batch, logs):
"""Helper function for `on_*_batch_end` methods."""
hook_name = 'on_{mode}_batch_end'.format(mode=mode)
if self._check_timing and batch >= 1:
batch_time = time.time() - self._batch_start_time
self._batch_times.append(batch_time)
self._call_batch_hook_helper(hook_name, batch, logs)
if len(self._batch_times) >= self._num_batches_for_timing_check:
end_hook_name = hook_name
begin_hook_name = 'on_{mode}_batch_begin'.format(mode=mode)
avg_batch_time = sum(self._batch_times) / len(self._batch_times)
avg_end_hook_time = sum(self._hook_times[end_hook_name]) / len(
self._hook_times[end_hook_name])
avg_begin_hook_time = sum(self._hook_times[begin_hook_name]) / len(
self._hook_times[begin_hook_name])
threshold_time = 1.0 * avg_batch_time
warning_msg = ('Callback method `{hook}` is slow compared to '
'the batch time (batch time: {batch_time:.4f}s vs '
'`{hook}` time: {hook_time:.4f}s). Check your callbacks.')
if avg_begin_hook_time > threshold_time:
logging.warning(warning_msg.format(
hook=begin_hook_name,
batch_time=avg_batch_time,
hook_time=avg_begin_hook_time))
if avg_end_hook_time > threshold_time:
logging.warning(warning_msg.format(
hook=end_hook_name,
batch_time=avg_batch_time,
hook_time=avg_end_hook_time))
self._check_timing = False
self._batch_start_time = None
self._batch_times = []
self._hook_times = {}
def _call_batch_hook_helper(self, hook_name, batch, logs):
"""Helper function for `on_*_batch_*` methods."""
if self._check_timing:
start_time = time.time()
logs = self._process_logs(logs, is_batch_hook=True)
for callback in self.callbacks:
hook = getattr(callback, hook_name)
hook(batch, logs)
if self._check_timing:
if hook_name not in self._hook_times:
self._hook_times[hook_name] = []
self._hook_times[hook_name].append(time.time() - start_time)
def _call_begin_hook(self, mode):
"""Helper function for on_{train|test|predict}_begin methods."""
if mode == ModeKeys.TRAIN:
self.on_train_begin()
elif mode == ModeKeys.TEST:
self.on_test_begin()
else:
self.on_predict_begin()
def _call_end_hook(self, mode):
"""Helper function for on_{train|test|predict}_end methods."""
if mode == ModeKeys.TRAIN:
self.on_train_end()
elif mode == ModeKeys.TEST:
self.on_test_end()
else:
self.on_predict_end()
def on_batch_begin(self, batch, logs=None):
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_batch_end(self, batch, logs=None):
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
"""Calls the `on_epoch_begin` methods of its callbacks.
This function should only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
def on_epoch_end(self, epoch, logs=None):
"""Calls the `on_epoch_end` methods of its callbacks.
This function should only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_train_batch_begin(self, batch, logs=None):
"""Calls the `on_train_batch_begin` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.train_step`. Typically,
the values of the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
"""Calls the `on_train_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
"""Calls the `on_test_batch_begin` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.test_step`. Typically,
the values of the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
if self._should_call_test_batch_hooks:
self._call_batch_hook(ModeKeys.TEST, 'begin', batch, logs=logs)
def on_test_batch_end(self, batch, logs=None):
"""Calls the `on_test_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
if self._should_call_test_batch_hooks:
self._call_batch_hook(ModeKeys.TEST, 'end', batch, logs=logs)
def on_predict_batch_begin(self, batch, logs=None):
"""Calls the `on_predict_batch_begin` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.predict_step`,
it typically returns a dict with a key 'outputs' containing
the model's outputs.
"""
if self._should_call_predict_batch_hooks:
self._call_batch_hook(ModeKeys.PREDICT, 'begin', batch, logs=logs)
def on_predict_batch_end(self, batch, logs=None):
"""Calls the `on_predict_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
if self._should_call_predict_batch_hooks:
self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs)
def on_train_begin(self, logs=None):
"""Calls the `on_train_begin` methods of its callbacks.
Args:
logs: Dict. Currently, no data is passed via this argument
for this method, but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs=None):
"""Calls the `on_train_end` methods of its callbacks.
Args:
logs: Dict. Currently, no data is passed via this argument
for this method, but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_train_end(logs)
def on_test_begin(self, logs=None):
"""Calls the `on_test_begin` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_test_begin(logs)
def on_test_end(self, logs=None):
"""Calls the `on_test_end` methods of its callbacks.
Args:
logs: Dict. Currently, no data is passed via this argument
for this method, but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_test_end(logs)
def on_predict_begin(self, logs=None):
"""Calls the 'on_predict_begin` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_predict_begin(logs)
def on_predict_end(self, logs=None):
"""Calls the `on_predict_end` methods of its callbacks.
Args:
logs: Dict. Currently, no data is passed via this argument
for this method, but that may change in the future.
"""
logs = self._process_logs(logs)
for callback in self.callbacks:
callback.on_predict_end(logs)
def __iter__(self):
return iter(self.callbacks)
def _disallow_batch_hooks_in_ps_strategy(self):
"""Error out if batch-level callbacks are passed with PSStrategy."""
# pylint: disable=protected-access
strategy = tf.distribute.get_strategy()
if strategy._should_use_with_coordinator:
unsupported_callbacks = []
for cb in self.callbacks:
# These Callbacks can accept RemoteValues directly.
if getattr(cb, '_supports_tf_logs', False):
continue
if (cb._implements_train_batch_hooks() or
cb._implements_test_batch_hooks() or
cb._implements_predict_batch_hooks()):
unsupported_callbacks.append(cb)
if unsupported_callbacks:
raise ValueError(
'Batch-level `Callback`s are not supported with '
'`ParameterServerStrategy`. Found unsupported '
f'callbacks: {unsupported_callbacks}')
# pylint: enable=protected-access
@keras_export('keras.callbacks.Callback')
class Callback:
"""Abstract base class used to build new callbacks.
Callbacks can be passed to keras methods such as `fit`, `evaluate`, and
`predict` in order to hook into the various stages of the model training and
inference lifecycle.
To create a custom callback, subclass `keras.callbacks.Callback` and override
the method associated with the stage of interest. See
https://www.tensorflow.org/guide/keras/custom_callback for more information.
Example:
>>> training_finished = False
>>> class MyCallback(tf.keras.callbacks.Callback):
... def on_train_end(self, logs=None):
... global training_finished
... training_finished = True
>>> model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])
>>> model.compile(loss='mean_squared_error')
>>> model.fit(tf.constant([[1.0]]), tf.constant([[1.0]]),
... callbacks=[MyCallback()])
>>> assert training_finished == True
If you want to use `Callback` objects in a custom training loop:
1. You should pack all your callbacks into a single `callbacks.CallbackList`
so they can all be called together.
2. You will need to manually call all the `on_*` methods at the appropriate
locations in your loop. Like this:
```
callbacks = tf.keras.callbacks.CallbackList([...])
callbacks.append(...)
callbacks.on_train_begin(...)
for epoch in range(EPOCHS):
callbacks.on_epoch_begin(epoch)
for i, data in dataset.enumerate():
callbacks.on_train_batch_begin(i)
batch_logs = model.train_step(data)
callbacks.on_train_batch_end(i, batch_logs)
epoch_logs = ...
callbacks.on_epoch_end(epoch, epoch_logs)
final_logs=...
callbacks.on_train_end(final_logs)
```
Attributes:
params: Dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: Instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch (see method-specific docstrings).
"""
def __init__(self):
self.validation_data = None # pylint: disable=g-missing-from-attributes
self.model = None
# Whether this Callback should only run on the chief worker in a
# Multi-Worker setting.
# TODO(omalleyt): Make this attr public once solution is stable.
self._chief_worker_only = None
self._supports_tf_logs = False
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_batch_begin(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_begin`."""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_batch_end(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_end`."""
@doc_controls.for_subclass_implementers
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`. For training epoch, the values of the
`Model`'s metrics are returned. Example : `{'loss': 0.2, 'accuracy':
0.7}`.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_train_batch_begin(self, batch, logs=None):
"""Called at the beginning of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
# For backwards compatibility.
self.on_batch_begin(batch, logs=logs)
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_train_batch_end(self, batch, logs=None):
"""Called at the end of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
# For backwards compatibility.
self.on_batch_end(batch, logs=logs)
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_test_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `evaluate` methods.
Also called at the beginning of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_test_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `evaluate` methods.
Also called at the end of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_predict_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `predict` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_predict_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `predict` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
@doc_controls.for_subclass_implementers
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_train_end(self, logs=None):
"""Called at the end of training.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently the output of the last call to `on_epoch_end()`
is passed to this argument for this method but that may change in
the future.
"""
@doc_controls.for_subclass_implementers
def on_test_begin(self, logs=None):
"""Called at the beginning of evaluation or validation.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_test_end(self, logs=None):
"""Called at the end of evaluation or validation.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently the output of the last call to
`on_test_batch_end()` is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_predict_begin(self, logs=None):
"""Called at the beginning of prediction.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_predict_end(self, logs=None):
"""Called at the end of prediction.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def _implements_train_batch_hooks(self):
"""Determines if this Callback should be called for each train batch."""
return (not generic_utils.is_default(self.on_batch_begin) or
not generic_utils.is_default(self.on_batch_end) or
not generic_utils.is_default(self.on_train_batch_begin) or
not generic_utils.is_default(self.on_train_batch_end))
def _implements_test_batch_hooks(self):
"""Determines if this Callback should be called for each test batch."""
return (not generic_utils.is_default(self.on_test_batch_begin) or
not generic_utils.is_default(self.on_test_batch_end))
def _implements_predict_batch_hooks(self):
"""Determines if this Callback should be called for each predict batch."""
return (not generic_utils.is_default(self.on_predict_batch_begin) or
not generic_utils.is_default(self.on_predict_batch_end))
@keras_export('keras.callbacks.BaseLogger')
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
Args:
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is in `on_epoch_end`.
All others will be averaged in `on_epoch_end`.
"""
def __init__(self, stateful_metrics=None):
super(BaseLogger, self).__init__()
self.stateful_metrics = set(stateful_metrics or [])
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
# In case of distribution strategy we can potentially run multiple steps
# at the same time, we should account for that in the `seen` calculation.
num_steps = logs.get('num_steps', 1)
self.seen += batch_size * num_steps
for k, v in logs.items():
if k in self.stateful_metrics:
self.totals[k] = v
else:
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
if k in self.stateful_metrics:
logs[k] = self.totals[k]
else:
logs[k] = self.totals[k] / self.seen
@keras_export('keras.callbacks.TerminateOnNaN')
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered.
"""
def __init__(self):
super(TerminateOnNaN, self).__init__()
self._supports_tf_logs = True
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
loss = tf_utils.sync_to_numpy_or_python_type(loss)
if np.isnan(loss) or np.isinf(loss):
io_utils.print_msg(f'Batch {batch}: Invalid loss, terminating training')
self.model.stop_training = True
@keras_export('keras.callbacks.ProgbarLogger')
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Args:
count_mode: One of `"steps"` or `"samples"`.
Whether the progress bar should
count samples seen or steps (batches) seen.
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is.
All others will be averaged over time (e.g. loss, etc).
If not provided, defaults to the `Model`'s metrics.
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples', stateful_metrics=None):
super(ProgbarLogger, self).__init__()
self._supports_tf_logs = True
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError(
f'Unknown `count_mode`: {count_mode}. '
'Expected values are ["samples", "steps"]')
# Defaults to all Model's metrics except for loss.
self.stateful_metrics = set(stateful_metrics) if stateful_metrics else set()
self.seen = 0
self.progbar = None
self.target = None
self.verbose = 1
self.epochs = 1
self._train_step, self._test_step, self._predict_step = None, None, None
self._call_batch_hooks = True
self._called_in_fit = False
def set_params(self, params):
self.verbose = params['verbose']
self.epochs = params['epochs']
if self.use_steps and 'steps' in params:
self.target = params['steps']
elif not self.use_steps and 'samples' in params:
self.target = params['samples']
else:
self.target = None # Will be inferred at the end of the first epoch.
self._call_batch_hooks = self.verbose == 1
if self.target is None:
try:
self._train_step = self.model._train_counter # pylint: disable=protected-access
self._test_step = self.model._test_counter # pylint: disable=protected-access
self._predict_step = self.model._predict_counter # pylint: disable=protected-access
except AttributeError:
self._call_batch_hooks = True
def on_train_begin(self, logs=None):
# When this logger is called inside `fit`, validation is silent.
self._called_in_fit = True
def on_test_begin(self, logs=None):
if not self._called_in_fit:
self._reset_progbar()
self._maybe_init_progbar()
def on_predict_begin(self, logs=None):
self._reset_progbar()
self._maybe_init_progbar()
def on_epoch_begin(self, epoch, logs=None):
self._reset_progbar()
self._maybe_init_progbar()
if self.verbose and self.epochs > 1:
io_utils.print_msg(f'Epoch {epoch + 1}/{self.epochs}')
def on_train_batch_end(self, batch, logs=None):
self._batch_update_progbar(batch, logs)
def on_test_batch_end(self, batch, logs=None):
if not self._called_in_fit:
self._batch_update_progbar(batch, logs)
def on_predict_batch_end(self, batch, logs=None):
# Don't pass prediction results.
self._batch_update_progbar(batch, None)
def on_epoch_end(self, epoch, logs=None):
self._finalize_progbar(logs, self._train_step)
def on_test_end(self, logs=None):
if not self._called_in_fit:
self._finalize_progbar(logs, self._test_step)
def on_predict_end(self, logs=None):
self._finalize_progbar(logs, self._predict_step)
def _reset_progbar(self):
self.seen = 0
self.progbar = None
def _maybe_init_progbar(self):
"""Instantiate a `Progbar` if not yet, and update the stateful metrics."""
# TODO(rchao): Legacy TF1 code path may use list for
# `self.stateful_metrics`. Remove "cast to set" when TF1 support is dropped.
self.stateful_metrics = set(self.stateful_metrics)
if self.model:
# Update the existing stateful metrics as `self.model.metrics` may contain
# updated metrics after `MetricsContainer` is built in the first train
# step.
self.stateful_metrics = self.stateful_metrics.union(
set(m.name for m in self.model.metrics))
if self.progbar is None:
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics,
unit_name='step' if self.use_steps else 'sample')
self.progbar._update_stateful_metrics(self.stateful_metrics) # pylint: disable=protected-access
def _implements_train_batch_hooks(self):
return self._call_batch_hooks
def _implements_test_batch_hooks(self):
return self._call_batch_hooks
def _implements_predict_batch_hooks(self):
return self._call_batch_hooks
def _batch_update_progbar(self, batch, logs=None):
"""Updates the progbar."""
logs = logs or {}
self._maybe_init_progbar()
if self.use_steps:
self.seen = batch + 1 # One-indexed.
else:
# v1 path only.
logs = copy.copy(logs)
batch_size = logs.pop('size', 0)
num_steps = logs.pop('num_steps', 1)
logs.pop('batch', None)
add_seen = num_steps * batch_size
self.seen += add_seen
if self.verbose == 1:
# Only block async when verbose = 1.
logs = tf_utils.sync_to_numpy_or_python_type(logs)
self.progbar.update(self.seen, list(logs.items()), finalize=False)
def _finalize_progbar(self, logs, counter):
logs = tf_utils.sync_to_numpy_or_python_type(logs or {})
if self.target is None:
if counter is not None:
counter = counter.numpy()
if not self.use_steps:
counter *= logs.get('size', 1)
self.target = counter or self.seen
self.progbar.target = self.target
self.progbar.update(self.target, list(logs.items()), finalize=True)
@keras_export('keras.callbacks.History')
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
Example:
>>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=10)
>>> print(history.params)
{'verbose': 1, 'epochs': 10, 'steps': 1}
>>> # check the keys of history object
>>> print(history.history.keys())
dict_keys(['loss'])
"""
def __init__(self):
super(History, self).__init__()
self.history = {}
def on_train_begin(self, logs=None):
self.epoch = []
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
# Set the history attribute on the model after the epoch ends. This will
# make sure that the state which is set is the latest one.
self.model.history = self
@keras_export('keras.callbacks.ModelCheckpoint')
class ModelCheckpoint(Callback):
"""Callback to save the Keras model or model weights at some frequency.
`ModelCheckpoint` callback is used in conjunction with training using
`model.fit()` to save a model or weights (in a checkpoint file) at some
interval, so the model or weights can be loaded later to continue the training
from the state saved.
A few options this callback provides include:
- Whether to only keep the model that has achieved the "best performance" so
far, or whether to save the model at the end of every epoch regardless of
performance.
- Definition of 'best'; which quantity to monitor and whether it should be
maximized or minimized.
- The frequency it should save at. Currently, the callback supports saving at
the end of every epoch, or after a fixed number of training batches.
- Whether only weights are saved, or the whole model is saved.
Note: If you get `WARNING:tensorflow:Can save best model only with <name>
available, skipping` see the description of the `monitor` argument for
details on how to get this right.
Example:
```python
model.compile(loss=..., optimizer=...,
metrics=['accuracy'])
EPOCHS = 10
checkpoint_filepath = '/tmp/checkpoint'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# Model weights are saved at the end of every epoch, if it's the best seen
# so far.
model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback])
# The model weights (that are considered the best) are loaded into the model.
model.load_weights(checkpoint_filepath)
```
Args:
filepath: string or `PathLike`, path to save the model file. e.g.
filepath = os.path.join(working_dir, 'ckpt', file_name). `filepath`
can contain named formatting options, which will be filled the value of
`epoch` and keys in `logs` (passed in `on_epoch_end`). For example: if
`filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`, then the model
checkpoints will be saved with the epoch number and the validation loss
in the filename. The directory of the filepath should not be reused by
any other callbacks to avoid conflicts.
monitor: The metric name to monitor. Typically the metrics are set by the
`Model.compile` method. Note:
* Prefix the name with `"val_`" to monitor validation metrics.
* Use `"loss"` or "`val_loss`" to monitor the model's total loss.
* If you specify metrics as strings, like `"accuracy"`, pass the same
string (with or without the `"val_"` prefix).
* If you pass `metrics.Metric` objects, `monitor` should be set to
`metric.name`
* If you're not sure about the metric names you can check the contents
of the `history.history` dictionary returned by
`history = model.fit()`
* Multi-output models set additional prefixes on the metric names.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`, it only saves when the model
is considered the "best" and the latest best model according to the
quantity monitored will not be overwritten. If `filepath` doesn't
contain formatting options like `{epoch}` then `filepath` will be
overwritten by each new better model.
mode: one of {'auto', 'min', 'max'}. If `save_best_only=True`, the
decision to overwrite the current save file is made based on either
the maximization or the minimization of the monitored quantity.
For `val_acc`, this should be `max`, for `val_loss` this should be
`min`, etc. In `auto` mode, the mode is set to `max` if the quantities
monitored are 'acc' or start with 'fmeasure' and are set to `min` for
the rest of the quantities.
save_weights_only: if True, then only the model's weights will be saved
(`model.save_weights(filepath)`), else the full model is saved
(`model.save(filepath)`).
save_freq: `'epoch'` or integer. When using `'epoch'`, the callback saves
the model after each epoch. When using integer, the callback saves the
model at end of this many batches. If the `Model` is compiled with
`steps_per_execution=N`, then the saving criteria will be
checked every Nth batch. Note that if the saving isn't aligned to
epochs, the monitored metric may potentially be less reliable (it
could reflect as little as 1 batch, since the metrics get reset every
epoch). Defaults to `'epoch'`.
options: Optional `tf.train.CheckpointOptions` object if
`save_weights_only` is true or optional `tf.saved_model.SaveOptions`
object if `save_weights_only` is false.
initial_value_threshold: Floating point initial "best" value of the metric
to be monitored. Only applies if `save_best_value=True`. Only overwrites
the model weights already saved if the performance of current
model is better than this value.
**kwargs: Additional arguments for backwards compatibility. Possible key
is `period`.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
options=None,
initial_value_threshold=None,
**kwargs):
super(ModelCheckpoint, self).__init__()
self._supports_tf_logs = True
self.monitor = monitor
self.verbose = verbose
self.filepath = io_utils.path_to_string(filepath)
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self.epochs_since_last_save = 0
self._batches_seen_since_last_saving = 0
self._last_batch_seen = 0
self.best = initial_value_threshold
if save_weights_only:
if options is None or isinstance(
options, tf.train.CheckpointOptions):
self._options = options or tf.train.CheckpointOptions()
else:
raise TypeError(
'If save_weights_only is True, then `options` must be '
f'either None or a tf.train.CheckpointOptions. Got {options}.')
else:
if options is None or isinstance(options, tf.saved_model.SaveOptions):
self._options = options or tf.saved_model.SaveOptions()
else:
raise TypeError(
'If save_weights_only is False, then `options` must be '
f'either None or a tf.saved_model.SaveOptions. Got {options}.')
# Deprecated field `load_weights_on_restart` is for loading the checkpoint
# file from `filepath` at the start of `model.fit()`
# TODO(rchao): Remove the arg during next breaking release.
if 'load_weights_on_restart' in kwargs:
self.load_weights_on_restart = kwargs['load_weights_on_restart']
logging.warning('`load_weights_on_restart` argument is deprecated. '
'Please use `model.load_weights()` for loading weights '
'before the start of `model.fit()`.')
else:
self.load_weights_on_restart = False
# Deprecated field `period` is for the number of epochs between which
# the model is saved.
if 'period' in kwargs:
self.period = kwargs['period']
logging.warning('`period` argument is deprecated. Please use `save_freq` '
'to specify the frequency in number of batches seen.')
else:
self.period = 1
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
if self.best is None:
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
if self.best is None:
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
if self.best is None:
self.best = -np.Inf
else:
self.monitor_op = np.less
if self.best is None:
self.best = np.Inf
if self.save_freq != 'epoch' and not isinstance(self.save_freq, int):
raise ValueError(
f'Unrecognized save_freq: {self.save_freq}. '
'Expected save_freq are "epoch" or integer')
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def on_train_begin(self, logs=None):
if self.load_weights_on_restart:
filepath_to_load = (
self._get_most_recently_modified_file_matching_pattern(self.filepath))
if (filepath_to_load is not None and
self._checkpoint_exists(filepath_to_load)):
try:
# `filepath` may contain placeholders such as `{epoch:02d}`, and
# thus it attempts to load the most recently modified file with file
# name matching the pattern.
self.model.load_weights(filepath_to_load)
except (IOError, ValueError) as e:
raise ValueError(
f'Error loading file from {filepath_to_load}. Reason: {e}')
def _implements_train_batch_hooks(self):
# Only call batch hooks when saving on batch
return self.save_freq != 'epoch'
def on_train_batch_end(self, batch, logs=None):
if self._should_save_on_batch(batch):
self._save_model(epoch=self._current_epoch, batch=batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
# pylint: disable=protected-access
if self.save_freq == 'epoch':
self._save_model(epoch=epoch, batch=None, logs=logs)
def _should_save_on_batch(self, batch):
"""Handles batch-level saving logic, supports steps_per_execution."""
if self.save_freq == 'epoch':
return False
if batch <= self._last_batch_seen: # New epoch.
add_batches = batch + 1 # batches are zero-indexed.
else:
add_batches = batch - self._last_batch_seen
self._batches_seen_since_last_saving += add_batches
self._last_batch_seen = batch
if self._batches_seen_since_last_saving >= self.save_freq:
self._batches_seen_since_last_saving = 0
return True
return False
def _save_model(self, epoch, batch, logs):
"""Saves the model.
Args:
epoch: the epoch this iteration is in.
batch: the batch this iteration is in. `None` if the `save_freq`
is set to `epoch`.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
logs = logs or {}
if isinstance(self.save_freq,
int) or self.epochs_since_last_save >= self.period:
# Block only when saving interval is reached.
logs = tf_utils.sync_to_numpy_or_python_type(logs)
self.epochs_since_last_save = 0
filepath = self._get_file_path(epoch, batch, logs)
try:
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.', self.monitor)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
io_utils.print_msg(
f'\nEpoch {epoch + 1}: {self.monitor} improved '
f'from {self.best:.5f} to {current:.5f}, '
f'saving model to {filepath}')
self.best = current
if self.save_weights_only:
self.model.save_weights(
filepath, overwrite=True, options=self._options)
else:
self.model.save(filepath, overwrite=True, options=self._options)
else:
if self.verbose > 0:
io_utils.print_msg(
f'\nEpoch {epoch + 1}: '
f'{self.monitor} did not improve from {self.best:.5f}')
else:
if self.verbose > 0:
io_utils.print_msg(
f'\nEpoch {epoch + 1}: saving model to {filepath}')
if self.save_weights_only:
self.model.save_weights(
filepath, overwrite=True, options=self._options)
else:
self.model.save(filepath, overwrite=True, options=self._options)
self._maybe_remove_file()
except IsADirectoryError as e: # h5py 3.x
raise IOError('Please specify a non-directory filepath for '
'ModelCheckpoint. Filepath used is an existing '
f'directory: {filepath}')
except IOError as e: # h5py 2.x
# `e.errno` appears to be `None` so checking the content of `e.args[0]`.
if 'is a directory' in str(e.args[0]).lower():
raise IOError('Please specify a non-directory filepath for '
'ModelCheckpoint. Filepath used is an existing '
f'directory: f{filepath}')
# Re-throw the error for any other causes.
raise e
def _get_file_path(self, epoch, batch, logs):
"""Returns the file path for checkpoint."""
# pylint: disable=protected-access
try:
# `filepath` may contain placeholders such as `{epoch:02d}`,`{batch:02d}`
# and `{mape:.2f}`. A mismatch between logged metrics and the path's
# placeholders can cause formatting to fail.
if batch is None or 'batch' in logs:
file_path = self.filepath.format(epoch=epoch + 1, **logs)
else:
file_path = self.filepath.format(
epoch=epoch + 1, batch=batch + 1, **logs)
except KeyError as e:
raise KeyError(
f'Failed to format this callback filepath: "{self.filepath}". '
f'Reason: {e}')
self._write_filepath = distributed_file_utils.write_filepath(
file_path, self.model.distribute_strategy)
return self._write_filepath
def _maybe_remove_file(self):
# Remove the checkpoint directory in multi-worker training where this worker
# should not checkpoint. It is a dummy directory previously saved for sync
# distributed training.
distributed_file_utils.remove_temp_dir_with_filepath(
self._write_filepath, self.model.distribute_strategy)
def _checkpoint_exists(self, filepath):
"""Returns whether the checkpoint `filepath` refers to exists."""
if filepath.endswith('.h5'):
return tf.io.gfile.exists(filepath)
tf_saved_model_exists = tf.io.gfile.exists(filepath)
tf_weights_only_checkpoint_exists = tf.io.gfile.exists(
filepath + '.index')
return tf_saved_model_exists or tf_weights_only_checkpoint_exists
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
Pattern may contain python formatting placeholder. If
`tf.train.latest_checkpoint()` does not return None, use that; otherwise,
check for most recently modified one that matches the pattern.
In the rare case where there are more than one pattern-matching file having
the same modified time that is most recent among all, return the filepath
that is largest (by `>` operator, lexicographically using the numeric
equivalents). This provides a tie-breaker when multiple files are most
recent. Note that a larger `filepath` can sometimes indicate a later time of
modification (for instance, when epoch/batch is used as formatting option),
but not necessarily (when accuracy or loss is used). The tie-breaker is
put in the logic as best effort to return the most recent, and to avoid
undeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
# Write something to each of the files
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Args:
pattern: The file pattern that may optionally contain python placeholder
such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`. If
`pattern` does not contain any placeholder, this returns the filepath
that
exactly matches `pattern`. Returns `None` if no match is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = '^' + re.sub(r'{.*}', r'.*', base_name) + '$'
# If tf.train.latest_checkpoint tells us there exists a latest checkpoint,
# use that as it is more robust than `os.path.getmtime()`.
latest_tf_checkpoint = tf.train.latest_checkpoint(dir_name)
if latest_tf_checkpoint is not None and re.match(
base_name_regex, os.path.basename(latest_tf_checkpoint)):
return latest_tf_checkpoint
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if tf.io.gfile.exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (file_path_with_largest_file_name is None or
file_path > file_path_with_largest_file_name):
file_path_with_largest_file_name = file_path
if mod_time > latest_mod_time:
latest_mod_time = mod_time
file_path_with_latest_mod_time = file_path
# In the case a file with later modified time is found, reset
# the counter for the number of files with latest modified time.
n_file_with_latest_mod_time = 1
elif mod_time == latest_mod_time:
# In the case a file has modified time tied with the most recent,
# increment the counter for the number of files with latest modified
# time by 1.
n_file_with_latest_mod_time += 1
if n_file_with_latest_mod_time == 1:
# Return the sole file that has most recent modified time.
return file_path_with_latest_mod_time
else:
# If there are more than one file having latest modified time, return
# the file path with the largest file name.
return file_path_with_largest_file_name
@keras_export('keras.callbacks.BackupAndRestore', v1=[])
class BackupAndRestore(Callback):
"""Callback to back up and restore the training state.
`BackupAndRestore` callback is intended to recover training from an
interruption that has happened in the middle of a `Model.fit` execution, by
backing up the training states in a temporary checkpoint file (with the help
of a `tf.train.CheckpointManager`), at the end of each epoch. Each backup
overwrites the previously written checkpoint file, so at any given time there
is at most one such checkpoint file for backup/restoring purpose.
If training restarts before completion, the training state (which includes the
`Model` weights and epoch number) is restored to the most recently saved state
at the beginning of a new `Model.fit` run. At the completion of a `Model.fit`
run, the temporary checkpoint file is deleted.
Note that the user is responsible to bring jobs back after the interruption.
This callback is important for the backup and restore mechanism for fault
tolerance purpose, and the model to be restored from an previous checkpoint is
expected to be the same as the one used to back up. If user changes arguments
passed to compile or fit, the checkpoint saved for fault tolerance can become
invalid.
Note:
1. This callback is not compatible with eager execution disabled.
2. A checkpoint is saved at the end of each epoch. After restoring,
`Model.fit` redoes any partial work during the unfinished epoch in which the
training got restarted (so the work done before the interruption doesn't
affect the final model state).
3. This works for both single worker and multi-worker modes. When `Model.fit`
is used with `tf.distribute`, it supports `tf.distribute.MirroredStrategy`,
`tf.distribute.MultiWorkerMirroredStrategy`, `tf.distribute.TPUStrategy`, and
`tf.distribute.experimental.ParameterServerStrategy`.
Example:
>>> class InterruptingCallback(tf.keras.callbacks.Callback):
... def on_epoch_begin(self, epoch, logs=None):
... if epoch == 4:
... raise RuntimeError('Interrupting!')
>>> callback = tf.keras.callbacks.experimental.BackupAndRestore(
... backup_dir="/tmp/backup")
>>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
>>> try:
... model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10,
... batch_size=1, callbacks=[callback, InterruptingCallback()],
... verbose=0)
... except:
... pass
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10,
... batch_size=1, callbacks=[callback], verbose=0)
>>> # Only 6 more epochs are run, since first trainning got interrupted at
>>> # zero-indexed epoch 4, second training will continue from 4 to 9.
>>> len(history.history['loss'])
6
Args:
backup_dir: String, path to store the checkpoint.
e.g. backup_dir = os.path.join(working_dir, 'backup')
This is the directory in which the system stores temporary files to
recover the model from jobs terminated unexpectedly. The directory
cannot be reused elsewhere to store other files, e.g. by
BackupAndRestore callback of another training, or by another callback
(ModelCheckpoint) of the same training.
"""
def __init__(self, backup_dir):
super(BackupAndRestore, self).__init__()
self.backup_dir = backup_dir
self._supports_tf_logs = True
self._supported_strategies = (
tf.distribute.MirroredStrategy,
tf.distribute.MultiWorkerMirroredStrategy,
tf.distribute.experimental.TPUStrategy, tf.distribute.TPUStrategy,
tf.distribute.experimental.ParameterServerStrategy)
if not tf.executing_eagerly():
if tf.inside_function():
raise ValueError('This Callback\'s method contains Python state and '
'should be called outside of `tf.function`s.')
else: # Legacy graph mode:
raise ValueError(
'BackupAndRestore only supports eager mode. In graph '
'mode, consider using ModelCheckpoint to manually save '
'and restore weights with `model.load_weights()` and by '
'providing `initial_epoch` in `model.fit()` for fault tolerance.')
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def on_train_begin(self, logs=None):
# TrainingState is used to manage the training state needed for
# failure-recovery of a worker in training.
# pylint: disable=protected-access
if self.model._distribution_strategy and not isinstance(
self.model.distribute_strategy, self._supported_strategies):
raise NotImplementedError(
f'{type(self.model.distribute_strategy)} is not supported yet. '
'Currently BackupAndRestore callback only supports empty strategy, '
'MirroredStrategy, MultiWorkerMirroredStrategy and TPUStrategy.')
self.model._training_state = (
worker_training_state.WorkerTrainingState(self.model, self.backup_dir))
self._training_state = self.model._training_state
self._training_state.restore()
def on_train_end(self, logs=None):
# pylint: disable=protected-access
# On exit of training, delete the training state backup file that was saved
# for the purpose of worker recovery.
self._training_state.delete_backup()
# Clean up the training state.
del self._training_state
del self.model._training_state
def on_epoch_end(self, epoch, logs=None):
# Back up the model and current epoch for possible future recovery.
self._training_state.back_up(epoch)
@keras_export('keras.callbacks.experimental.BackupAndRestore', v1=[])
@deprecation.deprecated_endpoints(
'keras.callbacks.experimental.BackupAndRestore')
class BackupAndRestoreExperimental(BackupAndRestore):
"""Deprecated. Please use `tf.keras.callbacks.BackupAndRestore` instead.
Caution: `tf.keras.callbacks.experimental.BackupAndRestore` endpoint is
deprecated and will be removed in a future release. Please use
`tf.keras.callbacks.BackupAndRestore`.
"""
def __init__(self, *args, **kwargs):
logging.warning(
'`tf.keras.callbacks.experimental.BackupAndRestore` endpoint is '
'deprecated and will be removed in a future release. Please use '
'`tf.keras.callbacks.BackupAndRestore`.')
super(BackupAndRestoreExperimental, self).__init__(*args, **kwargs)
@keras_export('keras.callbacks.EarlyStopping')
class EarlyStopping(Callback):
"""Stop training when a monitored metric has stopped improving.
Assuming the goal of a training is to minimize the loss. With this, the
metric to be monitored would be `'loss'`, and mode would be `'min'`. A
`model.fit()` training loop will check at end of every epoch whether
the loss is no longer decreasing, considering the `min_delta` and
`patience` if applicable. Once it's found no longer decreasing,
`model.stop_training` is marked True and the training terminates.
The quantity to be monitored needs to be available in `logs` dict.
To make it so, pass the loss or metrics at `model.compile()`.
Args:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `"max"`
mode it will stop when the quantity
monitored has stopped increasing; in `"auto"`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
restore_best_weights: Whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used. An epoch will be restored regardless
of the performance relative to the `baseline`. If no epoch
improves on `baseline`, training will run for `patience`
epochs and restore weights from the best epoch in that set.
Example:
>>> callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
>>> # This callback will stop the training when there is no improvement in
>>> # the loss for three consecutive epochs.
>>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=10, batch_size=1, callbacks=[callback],
... verbose=0)
>>> len(history.history['loss']) # Only 4 epochs are run.
4
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if (self.monitor.endswith('acc') or self.monitor.endswith('accuracy') or
self.monitor.endswith('auc')):
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
self.best_weights = None
self.best_epoch = 0
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current is None:
return
if self.restore_best_weights and self.best_weights is None:
# Restore the weights after first epoch if no progress is ever made.
self.best_weights = self.model.get_weights()
self.wait += 1
if self._is_improvement(current, self.best):
self.best = current
self.best_epoch = epoch
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
# Only restart wait if we beat both the baseline and our previous best.
if self.baseline is None or self._is_improvement(current, self.baseline):
self.wait = 0
# Only check after the first epoch.
if self.wait >= self.patience and epoch > 0:
self.stopped_epoch = epoch
self.model.stop_training = True
if self.restore_best_weights and self.best_weights is not None:
if self.verbose > 0:
io_utils.print_msg(
'Restoring model weights from the end of the best epoch: '
f'{self.best_epoch + 1}.')
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
io_utils.print_msg(
f'Epoch {self.stopped_epoch + 1}: early stopping')
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning('Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return monitor_value
def _is_improvement(self, monitor_value, reference_value):
return self.monitor_op(monitor_value - self.min_delta, reference_value)
@keras_export('keras.callbacks.RemoteMonitor')
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
If `send_as_json=True`, the content type of the request will be
`"application/json"`.
Otherwise the serialized JSON will be sent within a form.
Args:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
The field is used only if the payload is sent within a form
(i.e. send_as_json is set to False).
headers: Dictionary; optional custom HTTP headers.
send_as_json: Boolean; whether the request should be
sent as `"application/json"`.
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None,
send_as_json=False):
super(RemoteMonitor, self).__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
# np.ndarray and np.generic are not scalar types
# therefore we must unwrap their scalar values and
# pass to the json-serializable dict 'send'
if isinstance(v, (np.ndarray, np.generic)):
send[k] = v.item()
else:
send[k] = v
try:
if self.send_as_json:
requests.post(self.root + self.path, json=send, headers=self.headers)
else:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
logging.warning('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
@keras_export('keras.callbacks.LearningRateScheduler')
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
At the beginning of every epoch, this callback gets the updated learning rate
value from `schedule` function provided at `__init__`, with the current epoch
and current learning rate, and applies the updated learning rate
on the optimizer.
Args:
schedule: a function that takes an epoch index (integer, indexed from 0)
and current learning rate (float) as inputs and returns a new
learning rate as output (float).
verbose: int. 0: quiet, 1: update messages.
Example:
>>> # This function keeps the initial learning rate for the first ten epochs
>>> # and decreases it exponentially after that.
>>> def scheduler(epoch, lr):
... if epoch < 10:
... return lr
... else:
... return lr * tf.math.exp(-0.1)
>>>
>>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
>>> round(model.optimizer.lr.numpy(), 5)
0.01
>>> callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=15, callbacks=[callback], verbose=0)
>>> round(model.optimizer.lr.numpy(), 5)
0.00607
"""
def __init__(self, schedule, verbose=0):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
try: # new API
lr = float(backend.get_value(self.model.optimizer.lr))
lr = self.schedule(epoch, lr)
except TypeError: # Support for old API for backward compatibility
lr = self.schedule(epoch)
if not isinstance(lr, (tf.Tensor, float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
f'should be float. Got: {lr}')
if isinstance(lr, tf.Tensor) and not lr.dtype.is_floating:
raise ValueError(
f'The dtype of `lr` Tensor should be float. Got: {lr.dtype}')
backend.set_value(self.model.optimizer.lr, backend.get_value(lr))
if self.verbose > 0:
io_utils.print_msg(
f'\nEpoch {epoch + 1}: LearningRateScheduler setting learning '
f'rate to {lr}.')
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = backend.get_value(self.model.optimizer.lr)
def keras_model_summary(name, data, step=None):
"""Writes a Keras model as JSON to as a Summary.
Writing the Keras model configuration allows the TensorBoard graph plugin to
render a conceptual graph, as opposed to graph of ops. In case the model fails
to serialize as JSON, it ignores and returns False.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A Keras Model to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or False if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = tf.compat.v1.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = 'graph_keras_model'
# version number = 1
summary_metadata.plugin_data.content = b'1'
try:
json_string = data.to_json()
except Exception as exc: # pylint: disable=broad-except
# An exception should not break a model code.
logging.warning('Model failed to serialize as JSON. Ignoring... %s', exc)
return False
with tf.summary.experimental.summary_scope(
name, 'graph_keras_model', [data, step]) as (tag, _):
with tf.device('cpu:0'):
tensor = tf.constant(json_string, dtype=tf.string)
return tf.summary.write(
tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
@keras_export('keras.callbacks.TensorBoard', v1=[])
class TensorBoard(Callback, version_utils.TensorBoardVersionSelector):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
When used in `Model.evaluate`, in addition to epoch summaries, there will be
a summary that records evaluation metrics vs `Model.optimizer.iterations`
written. The metric names will be prepended with `evaluation`, with
`Model.optimizer.iterations` being the step in the visualized TensorBoard.
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Args:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard. e.g. log_dir = os.path.join(working_dir, 'logs')
This directory should not be reused by any other callbacks.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_images: whether to write model weights to visualize as image in
TensorBoard.
write_steps_per_second: whether to log the training steps per second into
Tensorboard. This supports both epoch and batch frequency logging.
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
batches. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch(es) to sample compute characteristics.
profile_batch must be a non-negative integer or a tuple of integers.
A pair of positive integers signify a range of batches to profile.
By default, profiling is disabled.
embeddings_freq: frequency (in epochs) at which embedding layers will be
visualized. If set to 0, embeddings won't be visualized.
embeddings_metadata: Dictionary which maps embedding layer names to the
filename of a file in which to save metadata for the embedding layer.
In case the same metadata file is to be
used for all embedding layers, a single filename can be passed.
Examples:
Basic usage:
```python
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="./logs")
model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
# Then run the tensorboard command to view the visualizations.
```
Custom batch-level summaries in a subclassed Model:
```python
class MyModel(tf.keras.Model):
def build(self, _):
self.dense = tf.keras.layers.Dense(10)
def call(self, x):
outputs = self.dense(x)
tf.summary.histogram('outputs', outputs)
return outputs
model = MyModel()
model.compile('sgd', 'mse')
# Make sure to set `update_freq=N` to log a batch-level summary every N batches.
# In addition to any `tf.summary` contained in `Model.call`, metrics added in
# `Model.compile` will be logged every N batches.
tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1)
model.fit(x_train, y_train, callbacks=[tb_callback])
```
Custom batch-level summaries in a Functional API Model:
```python
def my_summary(x):
tf.summary.histogram('x', x)
return x
inputs = tf.keras.Input(10)
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Lambda(my_summary)(x)
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', 'mse')
# Make sure to set `update_freq=N` to log a batch-level summary every N batches.
# In addition to any `tf.summary` contained in `Model.call`, metrics added in
# `Model.compile` will be logged every N batches.
tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1)
model.fit(x_train, y_train, callbacks=[tb_callback])
```
Profiling:
```python
# Profile a single batch, e.g. the 5th batch.
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir='./logs', profile_batch=5)
model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
# Profile a range of batches, e.g. from 10 to 20.
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir='./logs', profile_batch=(10,20))
model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
```
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='logs',
histogram_freq=0,
write_graph=True,
write_images=False,
write_steps_per_second=False,
update_freq='epoch',
profile_batch=0,
embeddings_freq=0,
embeddings_metadata=None,
**kwargs):
super(TensorBoard, self).__init__()
self._supports_tf_logs = True
self._validate_kwargs(kwargs)
self.log_dir = io_utils.path_to_string(log_dir)
self.histogram_freq = histogram_freq
self.write_graph = write_graph
self.write_images = write_images
self.write_steps_per_second = write_steps_per_second
self.update_freq = 1 if update_freq == 'batch' else update_freq
self.embeddings_freq = embeddings_freq
self.embeddings_metadata = embeddings_metadata
self._init_profile_batch(profile_batch)
self._global_train_batch = 0
self._previous_epoch_iterations = 0
self._train_accumulated_time = 0
self._batch_start_time = 0
# Lazily initialized in order to avoid creating event files when
# not needed.
self._writers = {}
# Used to restore any existing `SummaryWriter` after training ends.
self._prev_summary_state = []
def _validate_kwargs(self, kwargs):
"""Handle arguments were supported in V1."""
if kwargs.get('write_grads', False):
logging.warning('`write_grads` will be ignored in TensorFlow 2.0 '
'for the `TensorBoard` Callback.')
if kwargs.get('batch_size', False):
logging.warning('`batch_size` is no longer needed in the '
'`TensorBoard` Callback and will be ignored '
'in TensorFlow 2.0.')
if kwargs.get('embeddings_layer_names', False):
logging.warning('`embeddings_layer_names` is not supported in '
'TensorFlow 2.0. Instead, all `Embedding` layers '
'will be visualized.')
if kwargs.get('embeddings_data', False):
logging.warning('`embeddings_data` is not supported in TensorFlow '
'2.0. Instead, all `Embedding` variables will be '
'visualized.')
supported_kwargs = {'write_grads', 'embeddings_layer_names',
'embeddings_data', 'batch_size'}
unrecognized_kwargs = set(kwargs.keys()) - supported_kwargs
# Only allow kwargs that were supported in V1.
if unrecognized_kwargs:
raise ValueError(
'Unrecognized arguments in `TensorBoard` Callback: '
f'{unrecognized_kwargs}. Supported kwargs are: {supported_kwargs}')
def set_model(self, model):
"""Sets Keras model and writes graph if specified."""
self.model = model
self._log_write_dir = self._get_log_write_dir()
self._train_dir = os.path.join(self._log_write_dir, 'train')
self._train_step = self.model._train_counter # pylint: disable=protected-access
self._val_dir = os.path.join(self._log_write_dir, 'validation')
self._val_step = self.model._test_counter # pylint: disable=protected-access
self._writers = {} # Resets writers.
self._should_write_train_graph = False
if self.write_graph:
self._write_keras_model_summary()
self._should_write_train_graph = True
if self.embeddings_freq:
self._configure_embeddings()
@property
def _train_writer(self):
if 'train' not in self._writers:
self._writers['train'] = tf.summary.create_file_writer(
self._train_dir)
return self._writers['train']
@property
def _val_writer(self):
if 'val' not in self._writers:
self._writers['val'] = tf.summary.create_file_writer(self._val_dir)
return self._writers['val']
def _get_log_write_dir(self):
"""For multi-worker, only chief should write, others write to '/tmp'."""
return distributed_file_utils.write_dirpath(self.log_dir,
self.model.distribute_strategy)
def _delete_tmp_write_dir(self):
"""Deletes tmp write directories for multi-worker."""
distributed_file_utils.remove_temp_dirpath(self.log_dir,
self.model.distribute_strategy)
def _write_keras_model_train_graph(self):
"""Writes Keras model train_function graph to TensorBoard."""
with self._train_writer.as_default():
with tf.summary.record_if(True):
train_fn = self.model.train_tf_function
# If the train_function is a `tf.function`, we can write out a graph
if hasattr(train_fn, 'function_spec'):
tf.summary.graph(train_fn._concrete_stateful_fn.graph) # pylint: disable=protected-access
def _write_keras_model_summary(self):
"""Writes Keras graph network summary to TensorBoard."""
with self._train_writer.as_default():
with tf.summary.record_if(True):
summary_writable = (
self.model._is_graph_network or # pylint: disable=protected-access
self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access
if summary_writable:
keras_model_summary('keras', self.model, step=0)
def _configure_embeddings(self):
"""Configure the Projector for embeddings."""
# TODO(omalleyt): Add integration tests.
from google.protobuf import text_format
from keras.layers import embeddings
from keras.protobuf import projector_config_pb2
config = projector_config_pb2.ProjectorConfig()
for layer in self.model.layers:
if isinstance(layer, embeddings.Embedding):
embedding = config.embeddings.add()
# Embeddings are always the first layer, so this naming should be
# consistent in any keras models checkpoints.
name = 'layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE'
embedding.tensor_name = name
if self.embeddings_metadata is not None:
if isinstance(self.embeddings_metadata, str):
embedding.metadata_path = self.embeddings_metadata
else:
if layer.name in self.embeddings_metadata.keys():
embedding.metadata_path = self.embeddings_metadata.pop(layer.name)
if self.embeddings_metadata and not isinstance(self.embeddings_metadata,
str):
raise ValueError('Unrecognized `Embedding` layer names passed to '
'`keras.callbacks.TensorBoard` `embeddings_metadata` '
f'argument: {self.embeddings_metadata.keys()}')
config_pbtxt = text_format.MessageToString(config)
path = os.path.join(self._log_write_dir, 'projector_config.pbtxt')
with tf.io.gfile.GFile(path, 'w') as f:
f.write(config_pbtxt)
def _push_writer(self, writer, step):
"""Sets the default writer for custom batch-level summaries."""
if self.update_freq == 'epoch':
return
should_record = lambda: tf.equal(step % self.update_freq, 0)
# TODO(b/151339474): Fix deadlock when not using .value() here.
summary_context = (writer.as_default(step.value()),
tf.summary.record_if(should_record))
self._prev_summary_state.append(summary_context)
summary_context[0].__enter__()
summary_context[1].__enter__()
def _pop_writer(self):
"""Pops the current writer."""
if self.update_freq == 'epoch':
return
# See _push_writer for the content of the previous_context, which is pair
# of context.
previous_context = self._prev_summary_state.pop()
previous_context[1].__exit__(*sys.exc_info())
previous_context[0].__exit__(*sys.exc_info())
def _close_writers(self):
for writer in self._writers.values():
writer.close()
def _init_profile_batch(self, profile_batch):
"""Validate profile_batch value and set the range of batches to profile.
Sets values of _start_batch and _stop_batch attributes,
specifying the start and stop batch to profile.
Setting `profile_batch=0` disables profiling.
Args:
profile_batch: The range of batches to profile. Should be a non-negative
integer or a comma separated string of pair of positive integers. A pair
of positive integers signify a range of batches to profile.
Raises:
ValueError: If profile_batch is not an integer or a comma separated pair
of positive integers.
"""
profile_batch_error_message = (
'profile_batch must be a non-negative integer or 2-tuple of positive '
'integers. A pair of positive integers signifies a range of batches '
f'to profile. Found: {profile_batch}')
# Support legacy way of specifying "start,stop" or "start" as str.
if isinstance(profile_batch, str):
profile_batch = str(profile_batch).split(',')
profile_batch = tf.nest.map_structure(int, profile_batch)
if isinstance(profile_batch, int):
self._start_batch = profile_batch
self._stop_batch = profile_batch
elif isinstance(profile_batch, (tuple, list)) and len(profile_batch) == 2:
self._start_batch, self._stop_batch = profile_batch
else:
raise ValueError(profile_batch_error_message)
if self._start_batch < 0 or self._stop_batch < self._start_batch:
raise ValueError(profile_batch_error_message)
# True when the profiler was successfully started by this callback.
# We track the status here to make sure callbacks do not interfere with
# each other. The callback will only stop the profiler it started.
self._profiler_started = False
if self._start_batch > 0:
# Warm up and improve the profiling accuracy.
self._start_profiler(logdir='')
self._stop_profiler(save=False)
# True when a trace is running.
self._is_tracing = False
# Setting `profile_batch=0` disables profiling.
self._should_trace = not (self._start_batch == 0 and self._stop_batch == 0)
def on_train_begin(self, logs=None):
self._global_train_batch = 0
self._previous_epoch_iterations = 0
self._push_writer(self._train_writer, self._train_step)
def on_train_end(self, logs=None):
self._pop_writer()
if self._is_tracing:
self._stop_trace()
self._close_writers()
self._delete_tmp_write_dir()
def on_test_begin(self, logs=None):
self._push_writer(self._val_writer, self._val_step)
def on_test_end(self, logs=None):
if self.model.optimizer and hasattr(self.model.optimizer, 'iterations'):
with tf.summary.record_if(True), self._val_writer.as_default():
for name, value in logs.items():
tf.summary.scalar(
'evaluation_' + name + '_vs_iterations',
value,
step=self.model.optimizer.iterations.read_value())
self._pop_writer()
def _implements_train_batch_hooks(self):
# Only call batch hooks when tracing or write_steps_per_second are enabled
return self._should_trace or self.write_steps_per_second
def on_train_batch_begin(self, batch, logs=None):
self._global_train_batch += 1
if self.write_steps_per_second:
self._batch_start_time = time.time()
if not self._should_trace:
return
if self._global_train_batch == self._start_batch:
self._start_trace()
def on_train_batch_end(self, batch, logs=None):
if self._should_write_train_graph:
self._write_keras_model_train_graph()
self._should_write_train_graph = False
if self.write_steps_per_second:
batch_run_time = time.time() - self._batch_start_time
tf.summary.scalar(
'batch_steps_per_second', 1. / batch_run_time, step=self._train_step)
if not self._should_trace:
return
if self._is_tracing and self._global_train_batch >= self._stop_batch:
self._stop_trace()
def on_epoch_begin(self, epoch, logs=None):
# Keeps track of epoch for profiling.
if self.write_steps_per_second:
self._previous_epoch_iterations = self.model.optimizer.iterations.numpy()
self._epoch_start_time = time.time()
def on_epoch_end(self, epoch, logs=None):
"""Runs metrics and histogram summaries at epoch end."""
self._log_epoch_metrics(epoch, logs)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_weights(epoch)
if self.embeddings_freq and epoch % self.embeddings_freq == 0:
self._log_embeddings(epoch)
def _start_trace(self):
tf.summary.trace_on(graph=True, profiler=False)
self._start_profiler(logdir=self.log_dir)
self._is_tracing = True
def _stop_trace(self, batch=None):
"""Logs the trace graph to TensorBoard."""
if batch is None:
batch = self._stop_batch
with self._train_writer.as_default():
with tf.summary.record_if(True):
# TODO(b/126388999): Remove step info in the summary name.
tf.summary.trace_export(name='batch_%d' % batch, step=batch)
self._stop_profiler()
self._is_tracing = False
def _collect_learning_rate(self, logs):
lr_schedule = getattr(self.model.optimizer, 'lr', None)
if isinstance(lr_schedule, learning_rate_schedule.LearningRateSchedule):
logs['learning_rate'] = lr_schedule(self.model.optimizer.iterations)
return logs
def _compute_steps_per_second(self):
current_iteration = self.model.optimizer.iterations.numpy()
time_since_epoch_begin = time.time() - self._epoch_start_time
steps_per_second = ((current_iteration - self._previous_epoch_iterations) /
time_since_epoch_begin)
return steps_per_second
def _log_epoch_metrics(self, epoch, logs):
"""Writes epoch metrics out as scalar summaries.
Args:
epoch: Int. The global step to use for TensorBoard.
logs: Dict. Keys are scalar summary names, values are scalars.
"""
if not logs:
return
train_logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
val_logs = {k: v for k, v in logs.items() if k.startswith('val_')}
train_logs = self._collect_learning_rate(train_logs)
if self.write_steps_per_second:
train_logs['steps_per_second'] = self._compute_steps_per_second()
with tf.summary.record_if(True):
if train_logs:
with self._train_writer.as_default():
for name, value in train_logs.items():
tf.summary.scalar('epoch_' + name, value, step=epoch)
if val_logs:
with self._val_writer.as_default():
for name, value in val_logs.items():
name = name[4:] # Remove 'val_' prefix.
tf.summary.scalar('epoch_' + name, value, step=epoch)
def _log_weights(self, epoch):
"""Logs the weights of the Model to TensorBoard."""
with self._train_writer.as_default():
with tf.summary.record_if(True):
for layer in self.model.layers:
for weight in layer.weights:
weight_name = weight.name.replace(':', '_')
tf.summary.histogram(weight_name, weight, step=epoch)
if self.write_images:
self._log_weight_as_image(weight, weight_name, epoch)
self._train_writer.flush()
def _log_weight_as_image(self, weight, weight_name, epoch):
"""Logs a weight as a TensorBoard image."""
w_img = tf.squeeze(weight)
shape = backend.int_shape(w_img)
if len(shape) == 1: # Bias case
w_img = tf.reshape(w_img, [1, shape[0], 1, 1])
elif len(shape) == 2: # Dense layer kernel case
if shape[0] > shape[1]:
w_img = tf.transpose(w_img)
shape = backend.int_shape(w_img)
w_img = tf.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # ConvNet case
if backend.image_data_format() == 'channels_last':
# Switch to channels_first to display every kernel as a separate
# image.
w_img = tf.transpose(w_img, perm=[2, 0, 1])
shape = backend.int_shape(w_img)
w_img = tf.reshape(w_img, [shape[0], shape[1], shape[2], 1])
shape = backend.int_shape(w_img)
# Not possible to handle 3D convnets etc.
if len(shape) == 4 and shape[-1] in [1, 3, 4]:
tf.summary.image(weight_name, w_img, step=epoch)
def _log_embeddings(self, epoch):
embeddings_ckpt = os.path.join(self._log_write_dir, 'train',
'keras_embedding.ckpt-{}'.format(epoch))
self.model.save_weights(embeddings_ckpt)
def _start_profiler(self, logdir):
"""Starts the profiler if currently inactive.
Args:
logdir: Directory where profiler results will be saved.
"""
if self._profiler_started:
return
try:
tf.profiler.experimental.start(logdir=logdir)
self._profiler_started = True
except tf.errors.AlreadyExistsError as e:
# Profiler errors should not be fatal.
logging.error('Failed to start profiler: %s', e.message)
def _stop_profiler(self, save=True):
"""Stops the profiler if currently active.
Args:
save: Whether to save the profiler results to TensorBoard.
"""
if not self._profiler_started:
return
try:
tf.profiler.experimental.stop(save=save)
except tf.errors.UnavailableError as e:
# Profiler errors should not be fatal.
logging.error('Failed to stop profiler: %s', e.message)
finally:
self._profiler_started = False
@keras_export('keras.callbacks.ReduceLROnPlateau')
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Args:
monitor: quantity to be monitored.
factor: factor by which the learning rate will be reduced.
`new_lr = lr * factor`.
patience: number of epochs with no improvement after which learning rate
will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of `{'auto', 'min', 'max'}`. In `'min'` mode,
the learning rate will be reduced when the
quantity monitored has stopped decreasing; in `'max'` mode it will be
reduced when the quantity monitored has stopped increasing; in `'auto'`
mode, the direction is automatically inferred from the name of the
monitored quantity.
min_delta: threshold for measuring the new optimum, to only focus on
significant changes.
cooldown: number of epochs to wait before resuming normal operation after
lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
min_delta=1e-4,
cooldown=0,
min_lr=0,
**kwargs):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError(
f'ReduceLROnPlateau does not support a factor >= 1.0. Got {factor}')
if 'epsilon' in kwargs:
min_delta = kwargs.pop('epsilon')
logging.warning('`epsilon` argument is deprecated and '
'will be removed, use `min_delta` instead.')
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
logging.warning('Learning rate reduction mode %s is unknown, '
'fallback to auto mode.', self.mode)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = backend.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
logging.warning('Learning rate reduction is conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = backend.get_value(self.model.optimizer.lr)
if old_lr > np.float32(self.min_lr):
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
backend.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
io_utils.print_msg(
f'\nEpoch {epoch +1}: '
f'ReduceLROnPlateau reducing learning rate to {new_lr}.')
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
@keras_export('keras.callbacks.CSVLogger')
class CSVLogger(Callback):
"""Callback that streams epoch results to a CSV file.
Supports all values that can be represented as a string,
including 1D iterables such as `np.ndarray`.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Args:
filename: Filename of the CSV file, e.g. `'run/log.csv'`.
separator: String used to separate elements in the CSV file.
append: Boolean. True: append if file exists (useful for continuing
training). False: overwrite existing file.
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = io_utils.path_to_string(filename)
self.append = append
self.writer = None
self.keys = None
self.append_header = True
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if tf.io.gfile.exists(self.filename):
with tf.io.gfile.GFile(self.filename, 'r') as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = tf.io.gfile.GFile(self.filename, mode)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, str):
return k
elif isinstance(k, collections.abc.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict((k, logs[k]) if k in logs else (k, 'NA') for k in self.keys)
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch'] + self.keys
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
@keras_export('keras.callbacks.LambdaCallback')
class LambdaCallback(Callback):
r"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time (during `Model.{fit | evaluate | predict}`).
Note that the callbacks expects positional arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Args:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
| 37.957084 | 100 | 0.678093 |
aad07e9bd944982a213544fbd049e4a26820927e | 7,236 | py | Python | desert/desert.py | inconvergent/desert | 377e8d48ca10a6733696943b6b933d7f82979d77 | [
"MIT"
] | 65 | 2017-10-27T20:08:16.000Z | 2022-01-09T10:04:34.000Z | desert/desert.py | inconvergent/desert | 377e8d48ca10a6733696943b6b933d7f82979d77 | [
"MIT"
] | null | null | null | desert/desert.py | inconvergent/desert | 377e8d48ca10a6733696943b6b933d7f82979d77 | [
"MIT"
] | 6 | 2017-10-28T21:15:21.000Z | 2020-01-21T18:54:35.000Z | # -*- coding: utf-8 -*-
from os import getenv
from time import time
import matplotlib.pyplot as plt
import pkg_resources
from PIL import Image
from numpy import arange
from numpy import argsort
from numpy import column_stack
from numpy import concatenate
from numpy import cumsum
from numpy import vstack
from numpy import float32 as npfloat
from numpy import int32 as npint
from numpy import pi
from numpy import repeat
from numpy import reshape
from numpy import row_stack
from numpy import zeros
import pycuda.driver as cuda
from .color import Rgba
from .color import black
from .color import white
from .helpers import load_kernel
from .helpers import unpack
THREADS = int(getenv('THREADS', 512))
TWOPI = pi*2
def _build_ind_count(counts):
ind_count_reduced = column_stack((arange(counts.shape[0]).astype(npint),
counts))
cm = concatenate(([0], cumsum(ind_count_reduced[:, 1])[:-1])).astype(npint)
return column_stack((ind_count_reduced, cm, cm)).astype(npint)
class Desert():
def __init__(self, imsize, show=True,
gsamples=1000000, verbose=False):
self.imsize = imsize
self.imsize2 = imsize*imsize
self.img = zeros((self.imsize2, 4), npfloat)
self._img = cuda.mem_alloc(self.img.nbytes)
self.gsamples = gsamples
assert self.gsamples >= 5000, 'you must set gsamples to at least 5000.'
self.cuda_agg = load_kernel(
pkg_resources.resource_filename('desert', 'cuda/agg.cu'),
'agg',
subs={'_THREADS_': THREADS}
)
self.cuda_agg_bin = load_kernel(
pkg_resources.resource_filename('desert', 'cuda/agg_bin.cu'),
'agg_bin',
subs={'_THREADS_': THREADS}
)
self.cuda_dot = load_kernel(
pkg_resources.resource_filename('desert', 'cuda/dot.cu'),
'dot',
subs={'_THREADS_': THREADS}
)
self.fig = None
if show:
self.fig = plt.figure()
self.fig.patch.set_facecolor('gray')
self._updated = False
self.verbose = verbose
self.fg = None
self.bg = None
self._gdraw_reset()
def __enter__(self):
return self
def __exit__(self, _type, val, tb):
return
def _gdraw_reset(self):
self.est = 0
self.tdraw = 0
self.count = 0
self.color_list = []
self.pts = []
self._gupdated = True
def init(self, fg=black(0.01), bg=white()):
self.fg = fg
self.bg = bg
self.clear()
return self
def set_fg(self, c):
assert isinstance(c, Rgba)
self.fg = c
return self
def set_bg(self, c):
assert isinstance(c, Rgba)
self.bg = c
return self
def clear(self, bg=None):
if bg:
self.img[:, :] = bg.rgba
else:
self.img[:, :] = self.bg.rgba
cuda.memcpy_htod(self._img, self.img)
self._updated = True
return self
def _draw(self, pts, colors):
if not pts:
return False
imsize = self.imsize
dt0 = time()
ind_count = zeros(self.imsize2, npint)
colors = row_stack(colors).astype(npfloat)
xy = vstack(pts).astype(npfloat)
inds = zeros(xy.shape[0], npint)
self.cuda_agg(npint(inds.shape[0]),
npint(imsize),
cuda.In(xy),
cuda.InOut(inds),
cuda.InOut(ind_count),
block=(THREADS, 1, 1),
grid=(int(inds.shape[0]//THREADS) + 1, 1))
mask = inds > -1
if not mask.any():
print('-- no dots to draw. time: {:0.4f}'.format(time()-dt0))
return False
# xy = xy[mask, :]
inds = inds[mask]
colors = colors[mask]
ind_count_map = _build_ind_count(ind_count)
_ind_count_map = cuda.mem_alloc(ind_count_map.nbytes)
cuda.memcpy_htod(_ind_count_map, ind_count_map)
sort_colors = zeros((inds.shape[0], 4), npfloat)
_sort_colors = cuda.mem_alloc(sort_colors.nbytes)
cuda.memcpy_htod(_sort_colors, sort_colors)
self.cuda_agg_bin(npint(inds.shape[0]),
_ind_count_map,
cuda.In(colors),
cuda.In(inds),
_sort_colors,
block=(THREADS, 1, 1),
grid=(int(inds.shape[0]//THREADS) + 1, 1))
dotn, _ = ind_count_map.shape
self.cuda_dot(npint(dotn),
self._img,
_ind_count_map,
_sort_colors,
block=(THREADS, 1, 1),
grid=(int(dotn//THREADS) + 1, 1))
if self.verbose is not None:
print('-- drew dots: {:d}. time: {:0.4f}'.format(colors.shape[0],
time()-dt0))
self._updated = True
return True
def draw(self, primitives):
imsize = self.imsize
pts = []
color_list = []
count = 0
est = 0
t0 = time()
sample_verbose = True if self.verbose == 'vv' else None
for p in primitives:
count += p.num
inds = p.sample(imsize, verbose=sample_verbose)
colors = p.color_sample(imsize, self.fg)
est += p.est(imsize)
if inds.shape[0] > 0:
pts.append(inds)
color_list.append(colors)
if self.verbose is not None:
print('-- sampled primitives: {:d} ({:d}). time: {:0.4f}'\
.format(count, est, time()-t0))
self._draw(pts, color_list)
return self
def _gdraw(self, force=False):
if force or self.est > self.gsamples:
if force:
print('.. gsamples force: drawing.')
else:
print('.. hit gsamples limit: drawing.')
self._draw(self.pts, self.color_list)
if self.verbose is not None:
print('-- sampled primitives: {:d} ({:d}). time: {:0.4f}'\
.format(self.count, self.est, self.tdraw))
self._gdraw_reset()
else:
self._gupdated = False
def gforce(self):
self._gdraw(force=True)
return self
def gdraw(self, primitives, force=False):
imsize = self.imsize
sample_verbose = True if self.verbose == 'vv' else None
t0 = time()
for p in primitives:
self.count += p.num
xy = p.sample(imsize, verbose=sample_verbose)
colors = p.color_sample(imsize, self.fg)
self.est += p.est(imsize)
# mask = inds > 0
# inds = inds[mask]
# colors = colors[mask, :]
if xy.shape[0] > 0:
self.pts.append(xy)
self.color_list.append(colors)
self.tdraw += time()-t0
self._gdraw(force=force)
return self
def show(self, pause=0.1, gamma=1):
if not self.fig:
print('-- warn: show is not enabled.')
return
self.fig.clear()
imsize = self.imsize
t0 = time()
if self._updated:
cuda.memcpy_dtoh(self.img, self._img)
self._updated = False
plt.imshow(Image.fromarray(unpack(self.img, imsize, gamma=gamma)))
if self.verbose == 'vv':
print('-- show. time: {:0.4f}'.format(time()-t0))
plt.pause(pause)
return self
def save(self, fn, gamma=1):
self._gdraw(force=True)
imsize = self.imsize
print('-- wrote:', fn, (imsize, imsize))
if self._updated:
cuda.memcpy_dtoh(self.img, self._img)
self._updated = False
Image.fromarray(unpack(self.img, imsize, gamma=gamma)).save(fn)
return self
| 24.612245 | 77 | 0.59508 |
82abc4280d46da87fc0be77cbd7cac94ff8bc569 | 2,365 | py | Python | test/active_learning_strategies/test_bald.py | ansunsujoe/distil | cf6cae2b88ef129d09c159aae0569978190e9f98 | [
"MIT"
] | 83 | 2021-01-06T06:50:30.000Z | 2022-03-31T05:16:32.000Z | test/active_learning_strategies/test_bald.py | ansunsujoe/distil | cf6cae2b88ef129d09c159aae0569978190e9f98 | [
"MIT"
] | 30 | 2021-02-27T06:09:47.000Z | 2021-12-23T11:03:36.000Z | test/active_learning_strategies/test_bald.py | ansunsujoe/distil | cf6cae2b88ef129d09c159aae0569978190e9f98 | [
"MIT"
] | 13 | 2021-03-05T18:26:58.000Z | 2022-03-12T01:53:17.000Z | from distil.utils.models.simple_net import TwoLayerNet
from distil.active_learning_strategies.bayesian_active_learning_disagreement_dropout import BALDDropout
from test.utils import MyLabeledDataset, MyUnlabeledDataset
import unittest
import torch
class TestBALD(unittest.TestCase):
def setUp(self):
# Create model
self.input_dimension = 50
self.classes = 10
self.hidden_units = 20
mymodel = TwoLayerNet(self.input_dimension, self.classes, self.hidden_units)
# Create labeled dataset
self.num_labeled_points = 1000
rand_data_tensor = torch.randn((self.num_labeled_points, self.input_dimension), requires_grad=True)
rand_label_tensor = torch.randint(low=0,high=self.classes,size=(self.num_labeled_points,))
rand_labeled_dataset = MyLabeledDataset(rand_data_tensor, rand_label_tensor)
# Create unlabeled dataset
self.num_unlabeled_points = 10000
rand_data_tensor = torch.randn((self.num_unlabeled_points, self.input_dimension), requires_grad=True)
rand_unlabeled_dataset = MyUnlabeledDataset(rand_data_tensor)
# Create args array
device = 'cuda' if torch.cuda.is_available() else 'cpu'
args = {'batch_size': 1, 'device': device, 'loss': torch.nn.functional.cross_entropy}
self.strategy = BALDDropout(rand_labeled_dataset, rand_unlabeled_dataset, mymodel, self.classes, args)
def test_acquire_scores(self):
# Acquire scores for the entire dataset
scores = self.strategy.acquire_scores(self.strategy.unlabeled_dataset)
# Assert that there is a score for each point
self.assertEqual(len(scores), len(self.strategy.unlabeled_dataset))
def test_select(self):
budget = 10
idxs = self.strategy.select(budget)
# Ensure that indices are within the range spanned by the unlabeled dataset
for idx in idxs:
self.assertLess(idx, len(self.strategy.unlabeled_dataset))
self.assertGreaterEqual(idx, 0)
# Ensure that `budget` idx were returned
self.assertEqual(budget, len(idxs))
# Ensure that no point is selected multiple times
self.assertEqual(len(idxs), len(set(idxs))) | 41.491228 | 112 | 0.678647 |
147e48d49153cfa1827238c4188e37320b1f513b | 1,200 | py | Python | neutron/db/models/tag.py | 2020human/neutron | 1e2cfe8c06fcc2df52daa77b2b767ed6ffc2b19f | [
"Apache-2.0"
] | null | null | null | neutron/db/models/tag.py | 2020human/neutron | 1e2cfe8c06fcc2df52daa77b2b767ed6ffc2b19f | [
"Apache-2.0"
] | null | null | null | neutron/db/models/tag.py | 2020human/neutron | 1e2cfe8c06fcc2df52daa77b2b767ed6ffc2b19f | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib.db import model_base
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import standard_attr
class Tag(model_base.BASEV2):
standard_attr_id = sa.Column(
sa.BigInteger().with_variant(sa.Integer(), 'sqlite'),
sa.ForeignKey(standard_attr.StandardAttribute.id, ondelete="CASCADE"),
nullable=False, primary_key=True)
tag = sa.Column(sa.String(60), nullable=False, primary_key=True)
standard_attr = orm.relationship(
'StandardAttribute',
backref=orm.backref('tags', lazy='joined', viewonly=True))
revises_on_change = ('standard_attr', )
| 37.5 | 78 | 0.720833 |
99d5b06a50f08c1dde251fe9f1a421a1e7a34428 | 3,951 | py | Python | mitemp-mqtt.py | yolkhovyy/mitemp | d697da783dda64458d4b7d83e0f8e8b5c1fb6176 | [
"MIT"
] | null | null | null | mitemp-mqtt.py | yolkhovyy/mitemp | d697da783dda64458d4b7d83e0f8e8b5c1fb6176 | [
"MIT"
] | null | null | null | mitemp-mqtt.py | yolkhovyy/mitemp | d697da783dda64458d4b7d83e0f8e8b5c1fb6176 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import re
import getmac
import paho.mqtt.publish as publish
from btlewrap import BluepyBackend, GatttoolBackend, PygattBackend
from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY
MAC_ADDRESS = r'(?i)[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}'
def valid_mac(mac):
""" Validates MAC address """
regex_mac_address = re.compile(MAC_ADDRESS)
if regex_mac_address.match(mac):
return mac
raise argparse.ArgumentTypeError('Invalid MAC address {}'.format(mac))
def mac_to_eui64(mac):
""" Converts MAC address to EUI64 """
if valid_mac(mac):
eui64 = re.sub(r'[.:-]', '', mac).lower()
eui64 = eui64[0:6] + 'fffe' + eui64[6:]
eui64 = hex(int(eui64[0:2], 16) ^ 2)[2:].zfill(2) + eui64[2:]
return eui64
return None
MI_TEMP_V1 = r'(?i)58:2D:34:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}'
MI_TEMP_V2 = r'(?i)4C:65:A8:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}'
def valid_mitemp_mac(mac):
""" Validates MiTemp MAC address """
regex_v1 = re.compile(MI_TEMP_V1)
regex_v2 = re.compile(MI_TEMP_V2)
if regex_v1.match(mac) or regex_v2.match(mac):
return mac
raise argparse.ArgumentTypeError('Invalid MiTemp MAC address {}'.format(mac))
BACKEND = None
def get_backend(args):
""" Returns Bluetooth backend """
if args.backend == 'gatttool':
backend = GatttoolBackend
elif args.backend == 'bluepy':
backend = BluepyBackend
elif args.backend == 'pygatt':
backend = PygattBackend
else:
raise Exception('unknown backend: {}'.format(args.backend))
return backend
PARSER = argparse.ArgumentParser()
PARSER.add_argument('macs', type=valid_mitemp_mac, nargs="*")
PARSER.add_argument('-s', '--server', default='localhost')
PARSER.add_argument('-p', '--port', default=1883)
PARSER.add_argument('-b', '--backend', choices=['gatttool', 'bluepy', 'pygatt'], default='gatttool')
PARSER.add_argument('-d', '--devinfo', action='store_true')
PARSER.add_argument('-e', '--health', action='store_true')
PARSER.add_argument('-m', '--measurements', action='store_true')
ARGS = PARSER.parse_args()
BACKEND = get_backend(ARGS)
SELF_MAC = getmac.get_mac_address()
SELF_EUI64 = mac_to_eui64(valid_mac(SELF_MAC))
MQTT_CLIENT_ID = "mitemp-mqtt-" + SELF_EUI64
for mitemp_mac in ARGS.macs:
mitemp_eui64 = mac_to_eui64(mitemp_mac)
topic_device_info = 'OpenCH/Gw/{}/TeHu/{}/Evt/DeviceInfo'.format(SELF_EUI64, mitemp_eui64)
topic_health = 'OpenCH/Gw/{}/TeHu/{}/Evt/Health'.format(SELF_EUI64, mitemp_eui64)
topic_measurements = 'OpenCH/Gw/{}/TeHu/{}/Evt/Status'.format(SELF_EUI64, mitemp_eui64)
poller = MiTempBtPoller(mitemp_mac, BACKEND)
msgs = []
try:
if ARGS.devinfo:
payload = '{{"name":"{}","firmware_version":"{}"}}' \
.format( \
poller.name(), \
poller.firmware_version())
msgs.append({'topic': topic_device_info, 'payload': payload})
if ARGS.health:
payload = '{{"measurements":[{{"name":"battery","value":{},"units":"%"}}]}}' \
.format( \
poller.parameter_value(MI_BATTERY))
msgs.append({'topic': topic_health, 'payload': payload})
if ARGS.measurements:
payload = '{{"measurements":[{{"name":"temperature","value":{},"units":"℃"}},{{"name":"humidity","value":{},"units":"%"}}]}}' \
.format( \
poller.parameter_value(MI_TEMPERATURE), \
poller.parameter_value(MI_HUMIDITY))
msgs.append({'topic': topic_measurements, 'payload': payload})
except Exception as ex:
print(mitemp_mac + ' mitemp sensor failure: ' + str(ex))
if len(msgs) > 0:
publish.multiple(msgs, hostname=ARGS.server, port=ARGS.port, client_id=MQTT_CLIENT_ID)
| 37.990385 | 139 | 0.628702 |
8b887e933aec573f427f7c4b88e30e189f1c913d | 1,839 | py | Python | app/admin/views.py | SuYehTarn/CS651-Group8-Feedback_Forum | d1163442aea81214c4dfa8de1d353ec719bfa7ab | [
"MIT"
] | null | null | null | app/admin/views.py | SuYehTarn/CS651-Group8-Feedback_Forum | d1163442aea81214c4dfa8de1d353ec719bfa7ab | [
"MIT"
] | null | null | null | app/admin/views.py | SuYehTarn/CS651-Group8-Feedback_Forum | d1163442aea81214c4dfa8de1d353ec719bfa7ab | [
"MIT"
] | null | null | null | """Module of back stage views
"""
from flask import render_template, flash, redirect, url_for
from flask_login import login_required, current_user
from app import db
from app.admin.form import ReviewFeedbackForm
from app.models.feedback import Feedback
from app.models.review_status import ReviewStatus
from app.admin import admin
@admin.route('/admin')
@admin.route('/admin/')
@login_required
def index():
"""View of list all feedbacks"""
all_feedbacks = db.session.query(Feedback).all()
return render_template('/admin/index.html',
feedbacks=all_feedbacks)
@admin.route('/admin/<feedback_id>', methods=['GET', 'POST'])
@login_required
def read_feedback(feedback_id):
"""View of reading a feedback"""
feedback = db.session.query(Feedback)\
.filter_by(id=feedback_id).first()
if not feedback:
flash('Wrong feedback ID')
return redirect(url_for('admin.index'))
form = ReviewFeedbackForm()
# set choices of review statuses
review_statuses = ReviewStatus.query.all()
choices = [(status.id, status.name)
for status in review_statuses]
form.review_status.choices = choices
if form.validate_on_submit():
feedback.response = form.response.data
feedback.review_status_id = form.review_status.data
feedback.reviewer_id = current_user.id
db.session.commit()
flash('Modification saved.')
# set default values
form.review_status.default = feedback.review_status_id
form.response.default = feedback.response
form.id.default = feedback.id
form.email.default = feedback.email
form.title.default = feedback.title
form.content.default = feedback.content
form.process()
return render_template('/admin/feedback.html',
form=form)
| 29.66129 | 61 | 0.693312 |
9b942bce59938b1407110e307c29b89284df54e6 | 7,110 | py | Python | sdk/python/pulumi_azure/storage/container.py | kenny-wealth/pulumi-azure | e57e3a81f95bf622e7429c53f0bff93e33372aa1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/storage/container.py | kenny-wealth/pulumi-azure | e57e3a81f95bf622e7429c53f0bff93e33372aa1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/storage/container.py | kenny-wealth/pulumi-azure | e57e3a81f95bf622e7429c53f0bff93e33372aa1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Container(pulumi.CustomResource):
container_access_type: pulumi.Output[str]
"""
The Access Level configured for this Container. Possible values are `blob`, `container` or `private`. Defaults to `private`.
"""
has_immutability_policy: pulumi.Output[bool]
"""
Is there an Immutability Policy configured on this Storage Container?
"""
has_legal_hold: pulumi.Output[bool]
"""
Is there a Legal Hold configured on this Storage Container?
"""
metadata: pulumi.Output[dict]
"""
A mapping of MetaData for this Container.
"""
name: pulumi.Output[str]
"""
The name of the Container which should be created within the Storage Account.
"""
properties: pulumi.Output[dict]
"""
(**Deprecated**) Key-value definition of additional properties associated to the Storage Container
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to create the storage container. This field is no longer used and will be removed in 2.0.
"""
storage_account_name: pulumi.Output[str]
"""
The name of the Storage Account where the Container should be created.
"""
def __init__(__self__, resource_name, opts=None, container_access_type=None, metadata=None, name=None, resource_group_name=None, storage_account_name=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Container within an Azure Storage Account.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] container_access_type: The Access Level configured for this Container. Possible values are `blob`, `container` or `private`. Defaults to `private`.
:param pulumi.Input[dict] metadata: A mapping of MetaData for this Container.
:param pulumi.Input[str] name: The name of the Container which should be created within the Storage Account.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the storage container. This field is no longer used and will be removed in 2.0.
:param pulumi.Input[str] storage_account_name: The name of the Storage Account where the Container should be created.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/storage_container.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['container_access_type'] = container_access_type
__props__['metadata'] = metadata
__props__['name'] = name
__props__['resource_group_name'] = resource_group_name
if storage_account_name is None:
raise TypeError("Missing required property 'storage_account_name'")
__props__['storage_account_name'] = storage_account_name
__props__['has_immutability_policy'] = None
__props__['has_legal_hold'] = None
__props__['properties'] = None
super(Container, __self__).__init__(
'azure:storage/container:Container',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, container_access_type=None, has_immutability_policy=None, has_legal_hold=None, metadata=None, name=None, properties=None, resource_group_name=None, storage_account_name=None):
"""
Get an existing Container resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] container_access_type: The Access Level configured for this Container. Possible values are `blob`, `container` or `private`. Defaults to `private`.
:param pulumi.Input[bool] has_immutability_policy: Is there an Immutability Policy configured on this Storage Container?
:param pulumi.Input[bool] has_legal_hold: Is there a Legal Hold configured on this Storage Container?
:param pulumi.Input[dict] metadata: A mapping of MetaData for this Container.
:param pulumi.Input[str] name: The name of the Container which should be created within the Storage Account.
:param pulumi.Input[dict] properties: (**Deprecated**) Key-value definition of additional properties associated to the Storage Container
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the storage container. This field is no longer used and will be removed in 2.0.
:param pulumi.Input[str] storage_account_name: The name of the Storage Account where the Container should be created.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/storage_container.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["container_access_type"] = container_access_type
__props__["has_immutability_policy"] = has_immutability_policy
__props__["has_legal_hold"] = has_legal_hold
__props__["metadata"] = metadata
__props__["name"] = name
__props__["properties"] = properties
__props__["resource_group_name"] = resource_group_name
__props__["storage_account_name"] = storage_account_name
return Container(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 54.692308 | 217 | 0.704219 |
8827d6eef34cbe21296d6de552dd76275367a847 | 3,304 | py | Python | confidnet/models/segnet_oodconfid.py | luoyan407/predict_trustworthiness_smallscale | b7e1e2a68b0aee9b484228d1b5686f7252919e97 | [
"Apache-2.0"
] | 149 | 2019-10-01T14:04:05.000Z | 2022-03-24T12:25:15.000Z | confidnet/models/segnet_oodconfid.py | luoyan407/predict_trustworthiness_smallscale | b7e1e2a68b0aee9b484228d1b5686f7252919e97 | [
"Apache-2.0"
] | 10 | 2019-12-12T09:45:50.000Z | 2021-12-27T04:45:22.000Z | confidnet/models/segnet_oodconfid.py | luoyan407/predict_trustworthiness_smallscale | b7e1e2a68b0aee9b484228d1b5686f7252919e97 | [
"Apache-2.0"
] | 30 | 2019-12-02T16:25:22.000Z | 2022-02-16T10:48:47.000Z | import torch.nn as nn
import torch.nn.functional as F
from confidnet.models.model import AbstractModel
from confidnet.models.segnet import segnetDown2, segnetDown3, segnetUp2, segnetUp3
class SegNetOODConfid(AbstractModel):
def __init__(self, config_args, device):
super().__init__(config_args, device)
self.in_channels = config_args["data"]["input_channels"]
self.n_classes = config_args["data"]["num_classes"]
self.is_unpooling = True
self.dropout = config_args["model"]["is_dropout"]
self.down1 = segnetDown2(self.in_channels, 64)
self.down2 = segnetDown2(64, 128)
self.down3 = segnetDown3(128, 256)
self.dropout_down3 = nn.Dropout(0.5)
self.down4 = segnetDown3(256, 512)
self.dropout_down4 = nn.Dropout(0.5)
self.down5 = segnetDown3(512, 512)
self.dropout_down5 = nn.Dropout(0.5)
self.up5 = segnetUp3(512, 512)
self.dropout_up5 = nn.Dropout(0.5)
self.up4 = segnetUp3(512, 256)
self.dropout_up4 = nn.Dropout(0.4)
self.up3 = segnetUp3(256, 128)
self.dropout_up3 = nn.Dropout(0.3)
self.up2 = segnetUp2(128, 64)
self.up1 = segnetUp2(64, self.n_classes)
self.unpool_uncertainty = nn.MaxUnpool2d(2, 2)
self.uncertainty = nn.Conv2d(64, 1, 3, 1, 1)
def forward(self, inputs):
down1, indices_1, unpool_shape1 = self.down1(inputs)
down2, indices_2, unpool_shape2 = self.down2(down1)
down3, indices_3, unpool_shape3 = self.down3(down2)
if self.dropout:
if self.mc_dropout:
down3 = F.dropout(down3, 0.5, training=self.training)
else:
down3 = self.dropout_down3(down3)
down4, indices_4, unpool_shape4 = self.down4(down3)
if self.dropout:
if self.mc_dropout:
down4 = F.dropout(down4, 0.5, training=self.training)
else:
down4 = self.dropout_down3(down4)
down5, indices_5, unpool_shape5 = self.down5(down4)
if self.dropout:
if self.mc_dropout:
down5 = F.dropout(down5, 0.5, training=self.training)
else:
down5 = self.dropout_down3(down5)
up5 = self.up5(down5, indices_5, unpool_shape5)
if self.dropout:
if self.mc_dropout:
up5 = F.dropout(up5, 0.5, training=self.training)
else:
up5 = self.dropout_up5(up5)
up4 = self.up4(up5, indices_4, unpool_shape4)
if self.dropout:
if self.mc_dropout:
up4 = F.dropout(up4, 0.5, training=self.training)
else:
up4 = self.dropout_up4(up4)
up3 = self.up3(up4, indices_3, unpool_shape3)
if self.dropout:
if self.mc_dropout:
up3 = F.dropout(up3, 0.5, training=self.training)
else:
up3 = self.dropout_up3(up3)
up2 = self.up2(up3, indices_2, unpool_shape2)
up1 = self.up1(up2, indices_1, unpool_shape1)
uncertainty = self.unpool_uncertainty(up2, indices_1, unpool_shape1)
uncertainty = self.uncertainty(uncertainty)
return up1, uncertainty
def print_summary(self, input_size):
pass
| 37.545455 | 82 | 0.605327 |
42390a2b3d3c39bff8f25a323fef03d939a4ca89 | 8,286 | py | Python | tests/util/keyring.py | MintNetwork/mint-blockchain | 65ec05a015a07664ed25f83efa736065a17f7d7a | [
"Apache-2.0"
] | 12 | 2021-08-18T20:53:31.000Z | 2022-03-15T21:45:13.000Z | tests/util/keyring.py | MintNetwork/mint-blockchain | 65ec05a015a07664ed25f83efa736065a17f7d7a | [
"Apache-2.0"
] | 34 | 2021-08-18T19:12:11.000Z | 2022-01-06T17:15:34.000Z | tests/util/keyring.py | MintNetwork/mint-blockchain | 65ec05a015a07664ed25f83efa736065a17f7d7a | [
"Apache-2.0"
] | 7 | 2021-08-18T20:53:34.000Z | 2022-03-15T08:37:40.000Z | import os
import shutil
import tempfile
from mint.util.file_keyring import FileKeyring
from mint.util.keychain import Keychain, default_keychain_service, default_keychain_user, get_private_key_user
from mint.util.keyring_wrapper import KeyringWrapper
from functools import wraps
from keyring.util import platform_
from keyrings.cryptfile.cryptfile import CryptFileKeyring # pyright: reportMissingImports=false
from pathlib import Path
from typing import Any, Optional
from unittest.mock import patch
def create_empty_cryptfilekeyring() -> CryptFileKeyring:
"""
Create an empty legacy keyring
"""
crypt_file_keyring = CryptFileKeyring()
fd = os.open(crypt_file_keyring.file_path, os.O_CREAT | os.O_WRONLY | os.O_TRUNC, 0o600)
os.close(fd)
assert Path(crypt_file_keyring.file_path).exists()
return crypt_file_keyring
def add_dummy_key_to_cryptfilekeyring(crypt_file_keyring: CryptFileKeyring):
"""
Add a fake key to the CryptFileKeyring
"""
crypt_file_keyring.keyring_key = "your keyring password" # type: ignore
user: str = get_private_key_user(default_keychain_user(), 0)
crypt_file_keyring.set_password(default_keychain_service(), user, "abc123")
def setup_mock_file_keyring(mock_configure_backend, temp_file_keyring_dir, populate=False):
if populate:
# Populate the file keyring with an empty (but encrypted) data set
file_keyring_path = FileKeyring.keyring_path_from_root(Path(temp_file_keyring_dir))
os.makedirs(os.path.dirname(file_keyring_path), 0o700, True)
with open(
os.open(
FileKeyring.keyring_path_from_root(Path(temp_file_keyring_dir)),
os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
0o600,
),
"w",
) as f:
f.write(
# Encrypted using DEFAULT_PASSPHRASE_IF_NO_MASTER_PASSPHRASE. Data holds an empty keyring.
"data: xtcxYOWtbeO9ruv4Nkwhw1pcTJCNh/fvPSdFxez/L0ysnag=\n"
"nonce: 17ecac58deb7a392fccef49e\n"
"salt: b1aa32d5730288d653e82017e4a4057c\n"
"version: 1"
)
# Create the file keyring
mock_configure_backend.return_value = FileKeyring(keys_root_path=Path(temp_file_keyring_dir))
def using_temp_file_keyring(populate=False):
"""
Decorator that will create a temporary directory with a temporary keyring that is
automatically cleaned-up after invoking the decorated function. If `populate` is
true, the newly created keyring will be populated with a payload containing 0 keys
using the default passphrase.
"""
def outer(method):
@wraps(method)
def inner(self, *args, **kwargs):
with TempKeyring(populate=populate):
return method(self, *args, **kwargs)
return inner
return outer
def using_temp_file_keyring_and_cryptfilekeyring(populate=False):
"""
Like the `using_temp_file_keyring` decorator, this decorator will create a temp
dir and temp keyring. Additionally, an empty legacy Cryptfile keyring will be
created in the temp directory.
"""
def outer(method):
@wraps(method)
def inner(self, *args, **kwargs):
with TempKeyring(populate=populate, setup_cryptfilekeyring=True):
return method(self, *args, **kwargs)
return inner
return outer
class TempKeyring:
def __init__(
self,
*,
user: str = "testing-1.8.0",
service: str = "testing-mint-1.8.0",
populate: bool = False,
setup_cryptfilekeyring: bool = False,
existing_keyring_path: str = None,
delete_on_cleanup: bool = True,
use_os_credential_store: bool = False,
):
self.keychain = self._patch_and_create_keychain(
user=user,
service=service,
populate=populate,
existing_keyring_path=existing_keyring_path,
use_os_credential_store=use_os_credential_store,
setup_cryptfilekeyring=setup_cryptfilekeyring,
)
self.delete_on_cleanup = delete_on_cleanup
self.cleaned_up = False
def _patch_and_create_keychain(
self,
*,
user: str,
service: str,
populate: bool,
setup_cryptfilekeyring: bool,
existing_keyring_path: Optional[str],
use_os_credential_store: bool,
):
existing_keyring_dir = Path(existing_keyring_path).parent if existing_keyring_path else None
temp_dir = existing_keyring_dir or tempfile.mkdtemp(prefix="test_keyring_wrapper")
mock_supports_keyring_passphrase_patch = patch("mint.util.keychain.supports_keyring_passphrase")
mock_supports_keyring_passphrase = mock_supports_keyring_passphrase_patch.start()
# Patch supports_keyring_passphrase() to return True
mock_supports_keyring_passphrase.return_value = True
mock_supports_os_passphrase_storage_patch = patch("mint.util.keychain.supports_os_passphrase_storage")
mock_supports_os_passphrase_storage = mock_supports_os_passphrase_storage_patch.start()
# Patch supports_os_passphrase_storage() to return use_os_credential_store
mock_supports_os_passphrase_storage.return_value = use_os_credential_store
mock_configure_backend_patch = patch.object(KeyringWrapper, "_configure_backend")
mock_configure_backend = mock_configure_backend_patch.start()
setup_mock_file_keyring(mock_configure_backend, temp_dir, populate=populate)
mock_configure_legacy_backend_patch: Any = None
if setup_cryptfilekeyring is False:
mock_configure_legacy_backend_patch = patch.object(KeyringWrapper, "_configure_legacy_backend")
mock_configure_legacy_backend = mock_configure_legacy_backend_patch.start()
mock_configure_legacy_backend.return_value = None
mock_data_root_patch = patch.object(platform_, "data_root")
mock_data_root = mock_data_root_patch.start()
# Mock CryptFileKeyring's file_path indirectly by changing keyring.util.platform_.data_root
# We don't want CryptFileKeyring finding the real legacy keyring
mock_data_root.return_value = temp_dir
if setup_cryptfilekeyring is True:
crypt_file_keyring = create_empty_cryptfilekeyring()
add_dummy_key_to_cryptfilekeyring(crypt_file_keyring)
keychain = Keychain(user=user, service=service)
keychain.keyring_wrapper = KeyringWrapper(keys_root_path=Path(temp_dir))
# Stash the temp_dir in the keychain instance
keychain._temp_dir = temp_dir # type: ignore
# Stash the patches in the keychain instance
keychain._mock_supports_keyring_passphrase_patch = mock_supports_keyring_passphrase_patch # type: ignore
keychain._mock_supports_os_passphrase_storage_patch = mock_supports_os_passphrase_storage_patch # type: ignore
keychain._mock_configure_backend_patch = mock_configure_backend_patch # type: ignore
keychain._mock_configure_legacy_backend_patch = mock_configure_legacy_backend_patch # type: ignore
keychain._mock_data_root_patch = mock_data_root_patch # type: ignore
return keychain
def __enter__(self):
assert not self.cleaned_up
return self.get_keychain()
def __exit__(self, exc_type, exc_value, exc_tb):
self.cleanup()
def get_keychain(self):
return self.keychain
def cleanup(self):
assert not self.cleaned_up
if self.delete_on_cleanup:
self.keychain.keyring_wrapper.keyring.cleanup_keyring_file_watcher()
temp_dir = self.keychain._temp_dir
print(f"Cleaning up temp keychain in dir: {temp_dir}")
shutil.rmtree(temp_dir)
self.keychain._mock_supports_keyring_passphrase_patch.stop()
self.keychain._mock_supports_os_passphrase_storage_patch.stop()
self.keychain._mock_configure_backend_patch.stop()
if self.keychain._mock_configure_legacy_backend_patch is not None:
self.keychain._mock_configure_legacy_backend_patch.stop()
self.keychain._mock_data_root_patch.stop()
self.cleaned_up = True
| 39.645933 | 119 | 0.714699 |
34d6cde141b42bcf0b52ead8387d1ae84536a132 | 3,887 | py | Python | cartoframes/viz/popup.py | manmorjim/cartoframes | 4172e3dcdaedf207c10772a6dffe4f43b1993230 | [
"BSD-3-Clause"
] | null | null | null | cartoframes/viz/popup.py | manmorjim/cartoframes | 4172e3dcdaedf207c10772a6dffe4f43b1993230 | [
"BSD-3-Clause"
] | null | null | null | cartoframes/viz/popup.py | manmorjim/cartoframes | 4172e3dcdaedf207c10772a6dffe4f43b1993230 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from ..utils.utils import gen_variable_name
class Popup(object):
"""Popup
Args:
data (dict): The popup definition for a layer. It contains the information
to show a popup on 'click' and 'hover' events with the attributes provided
in the definition using the `CARTO VL expressions syntax
<https://carto.com/developers/carto-vl/reference/#cartoexpressions>`.
Example:
Show columns.
.. code::
from cartoframes.viz import Popup
Popup({
'hover': ['$name'],
'click': ['$name', '$pop_max']
})
Show expressions.
.. code::
from cartoframes.viz import Popup
Popup({
'click': ['$pop_min % 100', 'sqrt($pop_max)']
})
Show titles.
.. code::
from cartoframes.viz import Popup
Popup({
'hover': [{
'title': 'Name',
'value': '$name'
}],
'click': [{
'title': 'Name',
'value': '$name'
}, {
'title': 'Pop max',
'value': '$pop_max'
}]
})
"""
def __init__(self, data=None):
self._init_popup(data)
def _init_popup(self, data):
self._click = []
self._hover = []
if data is not None:
if isinstance(data, dict):
# TODO: error control
if 'click' in data:
click_data = data.get('click', [])
if isinstance(click_data, list):
self._click = click_data
else:
self._click = [click_data]
if 'hover' in data:
hover_data = data.get('hover', [])
if isinstance(hover_data, list):
self._hover = hover_data
else:
self._hover = [hover_data]
else:
raise ValueError('Wrong popup input')
def get_interactivity(self):
interactivity = []
if len(self._click) > 0:
interactivity.append({
'event': 'click',
'attrs': self._get_attrs(self._click)
})
if len(self._hover) > 0:
interactivity.append({
'event': 'hover',
'attrs': self._get_attrs(self._hover)
})
return interactivity
def _get_attrs(self, array):
output = []
for item in array:
if item:
if isinstance(item, str):
output.append({
'name': gen_variable_name(item),
'title': item
})
elif isinstance(item, dict) and 'value' in item:
output.append({
'name': gen_variable_name(item.get('value')),
'title': item.get('title')
})
else:
raise ValueError('Wrong popup input')
return output
def get_variables(self):
return self._get_vars(self._click + self._hover)
def _get_vars(self, array):
output = {}
for item in array:
if item:
if isinstance(item, str):
name = gen_variable_name(item)
output[name] = item
elif isinstance(item, dict) and 'value' in item:
name = gen_variable_name(item.get('value'))
output[name] = item.get('value')
else:
raise ValueError('Wrong popup input')
return output
| 29.44697 | 84 | 0.446102 |
ff0ef8d7d6f4502159fbd256a02b261b78739df0 | 5,534 | py | Python | Python3Code/Assignment_2/Assignment_2_1.py | kim66003/ML4QS_group25 | cd7f838e95f1583701892175670d7d0c8da0e1be | [
"MIT"
] | null | null | null | Python3Code/Assignment_2/Assignment_2_1.py | kim66003/ML4QS_group25 | cd7f838e95f1583701892175670d7d0c8da0e1be | [
"MIT"
] | null | null | null | Python3Code/Assignment_2/Assignment_2_1.py | kim66003/ML4QS_group25 | cd7f838e95f1583701892175670d7d0c8da0e1be | [
"MIT"
] | null | null | null | from Chapter5.DistanceMetrics import InstanceDistanceMetrics
from Chapter5.DistanceMetrics import PersonDistanceMetricsNoOrdering
from Chapter5.DistanceMetrics import PersonDistanceMetricsOrdering
from Chapter5.Clustering import NonHierarchicalClustering
from Chapter5.Clustering import HierarchicalClustering
import util.util as util
from util.VisualizeDataset import VisualizeDataset
import sys
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
from datetime import datetime
# datetime object containing current date and time
begin = datetime.now()
# dd/mm/YY H:M:S
dt_string = begin.strftime("%d/%m/%Y %H:%M:%S")
print("date and time =", dt_string)
# As usual, we set our program constants, read the input file and initialize a visualization object.
all_data = True if len(sys.argv) > 1 else False
DATA_PATH = Path('./intermediate_datafiles/')
DATASET_FNAME = 'chapter4_result.csv'
RESULT_FNAME = 'chapter5_result.csv'
try:
dataset = pd.read_csv(DATA_PATH / DATASET_FNAME, index_col=0)
if not all_data:
print('subset data')
dataset = dataset[:14780]
dataset.index = pd.to_datetime(dataset.index)
if all_data:
print('all data')
except IOError as e:
print('File not found, try to run previous crowdsignals scripts first!')
raise e
DataViz = VisualizeDataset(__file__)
# We'll start by applying non-hierarchical clustering.
clusteringNH = NonHierarchicalClustering()
# Let us look at k-means first.
k_values = range(2, 10)
silhouette_values = []
attributes_to_cluster = ['gyr_phone_x','gyr_phone_y','gyr_phone_z']
## Do some initial runs to determine the right number for k
print('===== kmeans clustering =====')
for k in k_values:
print(f'k = {k}')
dataset_cluster = clusteringNH.k_means_over_instances(copy.deepcopy(dataset), attributes_to_cluster, k, 'default', 20, 10)
silhouette_score = dataset_cluster['silhouette'].mean()
print(f'silhouette = {silhouette_score}')
silhouette_values.append(silhouette_score)
DataViz.plot_xy(x=[k_values], y=[silhouette_values], xlabel='k', ylabel='silhouette score',
ylim=[0,1], line_styles=['b-'])
# And run the knn with the highest silhouette score
# k = 6 # todo: replaced with np.argmax call over silhouette scores
k = k_values[np.argmax(silhouette_values)]
print(f'Highest K-Means silhouette score: k = {k}')
dataset_knn = clusteringNH.k_means_over_instances(copy.deepcopy(dataset), attributes_to_cluster, k, 'default', 50, 50)
DataViz.plot_clusters_3d(dataset_knn, attributes_to_cluster, 'cluster', ['label'])
DataViz.plot_silhouette(dataset_knn, 'cluster', 'silhouette')
util.print_latex_statistics_clusters(dataset_knn, 'cluster', attributes_to_cluster, 'label')
del dataset_knn['silhouette']
# datetime object containing current date and time
kmeans = datetime.now()
# dd/mm/YY H:M:S
dt_string = kmeans.strftime("%d/%m/%Y %H:%M:%S")
print("date and time =", dt_string)
diff = kmeans - begin
print('difference time', diff)
k_values = range(2, 10)
silhouette_values = []
print('===== k medoids clustering =====')
for k in k_values:
print(f'k = {k}')
dataset_cluster = clusteringNH.k_medoids_over_instances(copy.deepcopy(dataset), attributes_to_cluster, k, 'default', 20, n_inits=10)
silhouette_score = dataset_cluster['silhouette'].mean()
print(f'silhouette = {silhouette_score}')
silhouette_values.append(silhouette_score)
DataViz.plot_xy(x=[k_values], y=[silhouette_values], xlabel='k', ylabel='silhouette score',
ylim=[0,1], line_styles=['b-'])
# And run k medoids with the highest silhouette score
# k = 6 # todo: replaced with np.argmax call over silhouette scores
k = k_values[np.argmax(silhouette_values)]
print(f'Highest K-Medoids silhouette score: k = {k}')
dataset_kmed = clusteringNH.k_medoids_over_instances(copy.deepcopy(dataset), attributes_to_cluster, k, 'default', 20, n_inits=50)
DataViz.plot_clusters_3d(dataset_kmed, attributes_to_cluster, 'cluster', ['label'])
DataViz.plot_silhouette(dataset_kmed, 'cluster', 'silhouette')
util.print_latex_statistics_clusters(dataset_kmed, 'cluster', attributes_to_cluster, 'label')
# datetime object containing current date and time
kmedoids = datetime.now()
# dd/mm/YY H:M:S
dt_string = kmedoids.strftime("%d/%m/%Y %H:%M:%S")
print("date and time =", dt_string)
diff = kmedoids - begin
print('difference time', diff)
# And the hierarchical clustering is the last one we try
clusteringH = HierarchicalClustering()
k_values = range(2, 10)
silhouette_values = []
print('===== agglomerative clustering =====')
for k in k_values:
print(f'k = {k}')
dataset_cluster, l = clusteringH.agglomerative_over_instances(copy.deepcopy(dataset), attributes_to_cluster, k, 'euclidean', use_prev_linkage=True, link_function='ward')
silhouette_score = dataset_cluster['silhouette'].mean()
print(f'silhouette = {silhouette_score}')
silhouette_values.append(silhouette_score)
if k == k_values[0]:
DataViz.plot_dendrogram(dataset_cluster, l)
DataViz.plot_xy(x=[k_values], y=[silhouette_values], xlabel='k', ylabel='silhouette score',
ylim=[0,1], line_styles=['b-'])
# And we select the outcome dataset of the knn clustering....
dataset_knn.to_csv(DATA_PATH / RESULT_FNAME)
# datetime object containing current date and time
end = datetime.now()
# dd/mm/YY H:M:S
dt_string = end.strftime("%d/%m/%Y %H:%M:%S")
print("date and time =", dt_string)
diff = end - begin
print('difference time', diff) | 36.169935 | 173 | 0.743224 |
24f7ec73130adc5f4541b66270c38b13cc10c3d0 | 154 | py | Python | Worker/WebCheck/wafw00f/plugins/nevisproxy.py | p4sschen/Toy4Recon | e4fa6a512b238cca6d6d072ed5c66899de60bb06 | [
"MIT"
] | 1 | 2020-12-16T13:14:05.000Z | 2020-12-16T13:14:05.000Z | Worker/WebCheck/wafw00f/plugins/nevisproxy.py | p4sschen/Toy4Recon | e4fa6a512b238cca6d6d072ed5c66899de60bb06 | [
"MIT"
] | null | null | null | Worker/WebCheck/wafw00f/plugins/nevisproxy.py | p4sschen/Toy4Recon | e4fa6a512b238cca6d6d072ed5c66899de60bb06 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
NAME = 'NevisProxy (AdNovum)'
def is_waf(self):
if self.matchcookie(r'^Navajo(.*)?$'):
return True
return False
| 14 | 42 | 0.61039 |
8eeaffc04e5480830d535362850c1357251541ea | 16,160 | py | Python | plugins/modules/oci_compute_instance_agent_instance_agent_command_facts.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 108 | 2020-05-19T20:46:10.000Z | 2022-03-25T14:10:01.000Z | plugins/modules/oci_compute_instance_agent_instance_agent_command_facts.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 90 | 2020-06-14T22:07:11.000Z | 2022-03-07T05:40:29.000Z | plugins/modules/oci_compute_instance_agent_instance_agent_command_facts.py | slmjy/oci-ansible-collection | 349c91e2868bf4706a6e3d6fb3b47fc622bfe11b | [
"Apache-2.0"
] | 42 | 2020-08-30T23:09:12.000Z | 2022-03-25T16:58:01.000Z | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_compute_instance_agent_instance_agent_command_facts
short_description: Fetches details about one or multiple InstanceAgentCommand resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple InstanceAgentCommand resources in Oracle Cloud Infrastructure
- Lists the Oracle Cloud Agent commands issued in a compartment.
- If I(instance_agent_command_id) is specified, the details of a single InstanceAgentCommand will be returned.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
instance_agent_command_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the command.
- Required to get a specific instance_agent_command.
type: str
aliases: ["id"]
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment.
- Required to list multiple instance_agent_commands.
type: str
sort_by:
description:
- The field to sort by. You can provide one sort order (`sortOrder`). Default order for
`TIMECREATED` is descending.
- "**Note:** In general, some \\"List\\" operations (for example, `ListInstances`) let you
optionally filter by availability domain if the scope of the resource type is within a
single availability domain. If you call one of these \\"List\\" operations without specifying
an availability domain, the resources are grouped by availability domain, then sorted."
type: str
choices:
- "TIMECREATED"
- "DISPLAYNAME"
sort_order:
description:
- The sort order to use, either ascending (`ASC`) or descending (`DESC`). The `DISPLAYNAME` sort order
is case sensitive.
type: str
choices:
- "ASC"
- "DESC"
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_display_name_option ]
"""
EXAMPLES = """
- name: Get a specific instance_agent_command
oci_compute_instance_agent_instance_agent_command_facts:
# required
instance_agent_command_id: "ocid1.instanceagentcommand.oc1..xxxxxxEXAMPLExxxxxx"
- name: List instance_agent_commands
oci_compute_instance_agent_instance_agent_command_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
# optional
sort_by: TIMECREATED
sort_order: ASC
"""
RETURN = """
instance_agent_commands:
description:
- List of InstanceAgentCommand resources
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the command.
- Returned for get operation
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the compartment containing the command.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- A user-friendly name. Does not have to be unique. Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
time_created:
description:
- The date and time the command was created, in the format defined by
L(RFC3339,https://tools.ietf.org/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The date and time the command was last updated, in the format defined by
L(RFC3339,https://tools.ietf.org/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
is_canceled:
description:
- Whether a request was made to cancel the command. Canceling a command is a best-effort attempt.
returned: on success
type: bool
sample: true
execution_time_out_in_seconds:
description:
- The amount of time that Oracle Cloud Agent is given to run the command on the instance before timing
out. The timer starts when Oracle Cloud Agent starts the command. Zero means no timeout.
- Returned for get operation
returned: on success
type: int
sample: 56
target:
description:
- The target instance that the command runs on.
- Returned for get operation
returned: on success
type: complex
contains:
instance_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the target instance.
returned: on success
type: str
sample: "ocid1.instance.oc1..xxxxxxEXAMPLExxxxxx"
content:
description:
- The contents of the command.
- Returned for get operation
returned: on success
type: complex
contains:
source:
description:
- The source of the command.
returned: on success
type: complex
contains:
source_type:
description:
- "The source type for the command. The following values are supported:"
- "- `TEXT` - uses a plain text command that is specified inline with the request.
- `OBJECT_STORAGE_URI` - imports a command from an Object Storage URL.
- `OBJECT_STORAGE_TUPLE` - imports a command from an Object Storage bucket."
- For background information about Object Storage buckets and URLs, see
L(Overview of Object Storage,https://docs.cloud.oracle.com/Content/Object/Concepts/objectstorageoverview.htm).
returned: on success
type: str
sample: TEXT
bucket_name:
description:
- The Object Storage bucket for the command.
returned: on success
type: str
sample: bucket_name_example
namespace_name:
description:
- The Object Storage namespace for the command.
returned: on success
type: str
sample: namespace_name_example
object_name:
description:
- The Object Storage object name for the command.
returned: on success
type: str
sample: object_name_example
source_uri:
description:
- The Object Storage URL or pre-authenticated request (PAR) for the command.
returned: on success
type: str
sample: source_uri_example
text:
description:
- The plain text command.
returned: on success
type: str
sample: text_example
text_sha256:
description:
- SHA-256 checksum value of the text content.
returned: on success
type: str
sample: text_sha256_example
output:
description:
- The output destination for the command.
returned: on success
type: complex
contains:
output_type:
description:
- "The output type for the command. The following values are supported:"
- "- `TEXT` - the command output is returned as plain text.
- `OBJECT_STORAGE_URI` - the command output is saved to an Object Storage URL.
- `OBJECT_STORAGE_TUPLE` - the command output is saved to an Object Storage bucket."
- For background information about Object Storage buckets and URLs, see
L(Overview of Object Storage,https://docs.cloud.oracle.com/Content/Object/Concepts/objectstorageoverview.htm).
returned: on success
type: str
sample: TEXT
bucket_name:
description:
- The Object Storage bucket for the command output.
returned: on success
type: str
sample: bucket_name_example
namespace_name:
description:
- The Object Storage namespace for the command output.
returned: on success
type: str
sample: namespace_name_example
object_name:
description:
- The Object Storage object name for the command output.
returned: on success
type: str
sample: object_name_example
output_uri:
description:
- The Object Storage URL or pre-authenticated request (PAR) for the command output.
returned: on success
type: str
sample: output_uri_example
instance_agent_command_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the command.
- Returned for list operation
returned: on success
type: str
sample: "ocid1.instanceagentcommand.oc1..xxxxxxEXAMPLExxxxxx"
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"is_canceled": true,
"execution_time_out_in_seconds": 56,
"target": {
"instance_id": "ocid1.instance.oc1..xxxxxxEXAMPLExxxxxx"
},
"content": {
"source": {
"source_type": "TEXT",
"bucket_name": "bucket_name_example",
"namespace_name": "namespace_name_example",
"object_name": "object_name_example",
"source_uri": "source_uri_example",
"text": "text_example",
"text_sha256": "text_sha256_example"
},
"output": {
"output_type": "TEXT",
"bucket_name": "bucket_name_example",
"namespace_name": "namespace_name_example",
"object_name": "object_name_example",
"output_uri": "output_uri_example"
}
},
"instance_agent_command_id": "ocid1.instanceagentcommand.oc1..xxxxxxEXAMPLExxxxxx"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.compute_instance_agent import ComputeInstanceAgentClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class InstanceAgentCommandFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get, list"""
def get_required_params_for_get(self):
return [
"instance_agent_command_id",
]
def get_required_params_for_list(self):
return [
"compartment_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_instance_agent_command,
instance_agent_command_id=self.module.params.get(
"instance_agent_command_id"
),
)
def list_resources(self):
optional_list_method_params = [
"sort_by",
"sort_order",
"display_name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_instance_agent_commands,
compartment_id=self.module.params.get("compartment_id"),
**optional_kwargs
)
InstanceAgentCommandFactsHelperCustom = get_custom_class(
"InstanceAgentCommandFactsHelperCustom"
)
class ResourceFactsHelper(
InstanceAgentCommandFactsHelperCustom, InstanceAgentCommandFactsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
instance_agent_command_id=dict(aliases=["id"], type="str"),
compartment_id=dict(type="str"),
sort_by=dict(type="str", choices=["TIMECREATED", "DISPLAYNAME"]),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
display_name=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="instance_agent_command",
service_client_class=ComputeInstanceAgentClient,
namespace="compute_instance_agent",
)
result = []
if resource_facts_helper.is_get():
result = [resource_facts_helper.get()]
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(instance_agent_commands=result)
if __name__ == "__main__":
main()
| 41.119593 | 144 | 0.559158 |
b789b2562a9fa2695275f81df3fda6ca47cd7ffb | 778 | py | Python | scripts/slave/recipes/ndk/ndk_buildbot.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipes/ndk/ndk_buildbot.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
] | null | null | null | scripts/slave/recipes/ndk/ndk_buildbot.py | bopopescu/chromium-build | f8e42c70146c1b668421ee6358dc550a955770a3 | [
"BSD-3-Clause"
] | 1 | 2020-07-22T09:16:32.000Z | 2020-07-22T09:16:32.000Z | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'chromite',
'ndk',
'recipe_engine/properties',
]
# Map master name to 'chromite' configuration name.
_MASTER_CONFIG_MAP = {
'client.ndk': {
'master_config': 'chromite_config',
},
}
def RunSteps(api):
api.chromite.configure(api.properties, _MASTER_CONFIG_MAP)
api.chromite.run_cbuildbot()
def GenTests(api):
yield (
api.test('basic') +
api.properties.generic(
mastername='client.ndk',
branch='master',
cbb_config='ndk-linux-arm64-v8a',
# chromite module uses path['root'] which exists only in Buildbot.
path_config='buildbot',
)
)
| 21.611111 | 72 | 0.674807 |
282f7d9b5d5f6efea9271f5faeaac1ecbf5f90a8 | 1,023 | py | Python | sd_utils/logger.py | stephend017/sd_utils | 4c4a6da04fb244ada95c4c40dacf76c6b499e298 | [
"MIT"
] | null | null | null | sd_utils/logger.py | stephend017/sd_utils | 4c4a6da04fb244ada95c4c40dacf76c6b499e298 | [
"MIT"
] | 11 | 2021-03-14T23:53:58.000Z | 2021-07-17T06:12:16.000Z | sd_utils/logger.py | stephend017/sd_utils | 4c4a6da04fb244ada95c4c40dacf76c6b499e298 | [
"MIT"
] | null | null | null | import logging
from logging.handlers import RotatingFileHandler
def create_logger(path: str, level: int = logging.INFO) -> logging.Logger:
"""
Creates a logger for a given file path
Note: `path` should be `__file__` in most cases
Args:
path (str): the path of the file (common use is `__file__`)
level (int): the level of message to use in the logger (see
logging for constants)
Returns:
logging.Logger: The logger created for the given file
"""
logger = logging.getLogger(path)
log_formatter = logging.Formatter(
"[%(asctime)s] [%(levelname)s] [%(filename)s:%(funcName)s(%(lineno)d)]: %(message)s"
)
my_handler = RotatingFileHandler(
f"{path}.log",
mode="a",
maxBytes=5 * 1024 * 1024,
backupCount=2,
encoding=None,
delay=0,
)
my_handler.setFormatter(log_formatter)
my_handler.setLevel(level)
logger.setLevel(level)
logger.addHandler(my_handler)
return logger
| 26.921053 | 92 | 0.633431 |
a7a6d2cf1b73eb462360371804003b3e7eae5d87 | 1,099 | py | Python | networking/socket_echo_server_uds.py | scotthuang1989/Python-3-Module-of-the-Week | 5f45f4602f084c899924ebc9c6b0155a6dc76f56 | [
"Apache-2.0"
] | 2 | 2018-09-17T05:52:12.000Z | 2021-11-09T17:19:29.000Z | 04_Unix_Domain_Sockets/01_socket_echo_server_uds.py | lanzhiwang/socket-example | b3bfb17d8a3e35ac54b60e434ddd6eb6615303ec | [
"BSD-3-Clause"
] | null | null | null | 04_Unix_Domain_Sockets/01_socket_echo_server_uds.py | lanzhiwang/socket-example | b3bfb17d8a3e35ac54b60e434ddd6eb6615303ec | [
"BSD-3-Clause"
] | 2 | 2017-10-18T09:01:27.000Z | 2018-08-22T00:41:22.000Z | import socket
import sys
import os
server_address = './uds_socket'
# Make sure the socket does not already exist
try:
os.unlink(server_address)
except OSError:
if os.path.exists(server_address):
raise
# Create a UDS socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Bind the socket to the address
print('starting up on {}'.format(server_address))
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
print('waiting for a connection')
connection, client_address = sock.accept()
try:
print('connection from', client_address)
# Receive the data in small chunks and retransmit it
while True:
data = connection.recv(16)
print('received {!r}'.format(data))
if data:
print('sending data back to the client')
connection.sendall(data)
else:
print('no data from', client_address)
break
finally:
# Clean up the connection
connection.close() | 24.977273 | 60 | 0.637853 |
896b01b9f62c29c0933b7abd901942cdf1d842a3 | 2,713 | py | Python | sydent/config/crypto.py | acoby/sydent | efad8295c4457c44665f54169c5e5077cf81e474 | [
"Apache-2.0"
] | null | null | null | sydent/config/crypto.py | acoby/sydent | efad8295c4457c44665f54169c5e5077cf81e474 | [
"Apache-2.0"
] | null | null | null | sydent/config/crypto.py | acoby/sydent | efad8295c4457c44665f54169c5e5077cf81e474 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from configparser import ConfigParser
import nacl.encoding
import nacl.signing
import signedjson.key
import signedjson.types
from sydent.config._base import BaseConfig
class CryptoConfig(BaseConfig):
def parse_config(self, cfg: "ConfigParser") -> bool:
"""
Parse the crypto section of the config
:param cfg: the configuration to be parsed
"""
signing_key_str = cfg.get("crypto", "ed25519.signingkey")
signing_key_parts = signing_key_str.split(" ")
save_key = False
# N.B. `signedjson` expects `nacl.signing.SigningKey` instances which
# have been monkeypatched to include new `alg` and `version` attributes.
# This is captured by the `signedjson.types.SigningKey` protocol.
self.signing_key: signedjson.types.SigningKey
if signing_key_str == "":
print(
"INFO: This server does not yet have an ed25519 signing key. "
"Creating one and saving it in the config file."
)
self.signing_key = signedjson.key.generate_signing_key("0")
save_key = True
elif len(signing_key_parts) == 1:
# old format key
print("INFO: Updating signing key format: brace yourselves")
self.signing_key = nacl.signing.SigningKey(
signing_key_str.encode("ascii"), encoder=nacl.encoding.HexEncoder
)
self.signing_key.version = "0"
self.signing_key.alg = signedjson.key.NACL_ED25519
save_key = True
else:
self.signing_key = signedjson.key.decode_signing_key_base64(
signing_key_parts[0], signing_key_parts[1], signing_key_parts[2]
)
if save_key:
signing_key_str = "%s %s %s" % (
self.signing_key.alg,
self.signing_key.version,
signedjson.key.encode_signing_key_base64(self.signing_key),
)
cfg.set("crypto", "ed25519.signingkey", signing_key_str)
return True
else:
return False
| 35.233766 | 81 | 0.644674 |
ba9308f57b3cbc5465288326ff5d2a94d328b2cc | 322 | py | Python | propane/logging.py | tylerbutler/propane | 6c404285ab8d78865b7175a5c8adf8fae12d6be5 | [
"MIT"
] | 1 | 2017-12-21T18:16:20.000Z | 2017-12-21T18:16:20.000Z | propane/logging.py | tylerbutler/propane | 6c404285ab8d78865b7175a5c8adf8fae12d6be5 | [
"MIT"
] | null | null | null | propane/logging.py | tylerbutler/propane | 6c404285ab8d78865b7175a5c8adf8fae12d6be5 | [
"MIT"
] | null | null | null | # coding=utf-8
from __future__ import absolute_import, print_function
import pprint
__author__ = 'Tyler Butler <tyler@tylerbutler.com>'
pprinter = pprint.PrettyPrinter()
# noinspection PyBroadException
def log_object(obj):
try:
return pprinter.pformat(obj)
except Exception:
return str(obj)
| 16.947368 | 54 | 0.732919 |
a9e637c797cde5fcd33c51b7abef8c287a51b5aa | 785 | py | Python | gpytorch/utils/__init__.py | lrast/gpytorch | 2e0bbc9f59e4b4b54780c3e55db784c3d2c9a5bf | [
"MIT"
] | 2 | 2021-01-30T18:24:18.000Z | 2021-02-16T21:54:11.000Z | gpytorch/utils/__init__.py | lrast/gpytorch | 2e0bbc9f59e4b4b54780c3e55db784c3d2c9a5bf | [
"MIT"
] | 1 | 2021-02-24T14:01:43.000Z | 2021-02-24T14:01:43.000Z | gpytorch/utils/__init__.py | lrast/gpytorch | 2e0bbc9f59e4b4b54780c3e55db784c3d2c9a5bf | [
"MIT"
] | 1 | 2021-03-15T12:32:24.000Z | 2021-03-15T12:32:24.000Z | #!/usr/bin/env python3
from . import broadcasting, cholesky, grid, interpolation, lanczos, pivoted_cholesky, quadrature, sparse, warnings
from .contour_integral_quad import contour_integral_quad
from .linear_cg import linear_cg
from .memoize import cached
from .minres import minres
from .stochastic_lq import StochasticLQ
def prod(items):
"""
"""
if len(items):
res = items[0]
for item in items[1:]:
res = res * item
return res
else:
return 1
__all__ = [
"broadcasting",
"cached",
"contour_integral_quad",
"linear_cg",
"StochasticLQ",
"cholesky",
"grid",
"interpolation",
"lanczos",
"minres",
"pivoted_cholesky",
"prod",
"quadrature",
"sparse",
"warnings",
]
| 19.625 | 114 | 0.630573 |
ac93fcc7a0e646db75c9fcd0dfc00ce5d702a2c0 | 4,372 | py | Python | Radioactive Mutate Vampire Bunnies.py | Lyubomir-Dakov/Python-Advanced | 3b3b7181cc2bafc6f60329d6e42873d0f78b972f | [
"MIT"
] | null | null | null | Radioactive Mutate Vampire Bunnies.py | Lyubomir-Dakov/Python-Advanced | 3b3b7181cc2bafc6f60329d6e42873d0f78b972f | [
"MIT"
] | null | null | null | Radioactive Mutate Vampire Bunnies.py | Lyubomir-Dakov/Python-Advanced | 3b3b7181cc2bafc6f60329d6e42873d0f78b972f | [
"MIT"
] | null | null | null | from collections import deque
# import sys
# from io import StringIO
#
# input_1 = """5 6
# .....P
# ......
# ...B..
# ......
# ......
# ULDDDR
# """
#
# input_2 = """4 5
# .....
# .....
# .B...
# ...P.
# LLLLLLLL
# """
#
# input_3 = """5 8
# .......B
# ...B....
# ....B..B
# ........
# ..P.....
# ULLL
# """
#
# sys.stdin = StringIO(input_2)
def find_current_location(the_matrix: list, the_rows: int, the_cols: int):
for r in range(the_rows):
for c in range(the_cols):
if the_matrix[r][c] == 'P':
return [r, c]
def check_current_position(the_matrix: list, the_position: list, if_lose: bool, if_win: bool):
r, c = the_position
if the_matrix[r][c] == 'B':
if_lose = True
else:
the_matrix[r][c] = 'P'
return the_matrix, if_lose
def spread_rabbits(the_matrix: list, player_position: list, the_rows: int, the_cols: int, if_lose: bool):
new_bunnies = set()
for r in range(the_rows):
for c in range(the_cols):
if the_matrix[r][c] == 'B':
# add bunny top
if r - 1 >= 0:
new_bunnies.add((r - 1, c))
# add bunny down
if r + 1 < the_rows:
new_bunnies.add((r + 1, c))
# add bunny left
if c - 1 >= 0:
new_bunnies.add((r, c - 1))
# add bunny right
if c + 1 < the_cols:
new_bunnies.add((r, c + 1))
player_r, player_c = player_position
if (player_r, player_c) in new_bunnies:
if_lose = True
for cords in new_bunnies:
r, c = cords
the_matrix[r][c] = 'B'
return the_matrix, if_lose
def left_command(the_matrix: list, the_position: list, if_win: bool):
r, c = the_position
if c - 1 >= 0:
the_position = [r, c - 1]
else:
if_win = True
the_matrix[r][c] = '.'
return the_matrix, the_position, if_win
def right_command(the_matrix: list, the_position: list, the_col_size: int, if_win: bool):
r, c = the_position
if c + 1 < the_col_size:
the_position = [r, c + 1]
else:
if_win = True
the_matrix[r][c] = '.'
return the_matrix, the_position, if_win
def up_command(the_matrix: list, the_position: list, if_win: bool):
r, c = the_position
if r - 1 >= 0:
the_position = [r - 1, c]
else:
if_win = True
the_matrix[r][c] = '.'
return the_matrix, the_position, if_win
def down_command(the_matrix: list, the_position: list, the_row_count: int, if_win: bool):
r, c = the_position
if r + 1 < the_row_count:
the_position = [r + 1, c]
else:
if_win = True
the_matrix[r][c] = '.'
return the_matrix, the_position, if_win
rows, cols = [int(x) for x in input().split()]
matrix = [[x for x in input()] for x in range(rows)]
commands = deque(x for x in input())
current_location = find_current_location(matrix, rows, cols)
played_turns = 0
win_game = False
lose_game = False
while commands:
if win_game or lose_game:
break
current_command = commands.popleft()
# The player makes a move
if current_command == 'L':
matrix, current_location, win_game = left_command(matrix, current_location, win_game)
elif current_command == 'R':
matrix, current_location, win_game = right_command(matrix, current_location, cols, win_game)
elif current_command == 'U':
matrix, current_location, win_game = up_command(matrix, current_location, win_game)
elif current_command == 'D':
matrix, current_location, win_game = down_command(matrix, current_location, rows, win_game)
# check if player went out of the field (win_game) or step on rabbit (lose_game)
if not win_game:
matrix, lose_game = check_current_position(matrix, current_location, lose_game, win_game)
# spread the rabbits and check if rabbit steps on the player
matrix, lose_game = spread_rabbits(matrix, current_location, rows, cols, lose_game)
for row in matrix:
row = ''.join(row)
print(row)
row, col = current_location
if win_game:
print(f"won: {row} {col}")
if lose_game:
print(f"dead: {row} {col}")
| 28.763158 | 106 | 0.572964 |
0cf26b2040ea1720881ac31748d6f84cc078c2bc | 12,036 | py | Python | code/python/BondsAPIforDigitalPortals/v2/fds/sdk/BondsAPIforDigitalPortals/model/inline_response2003_instrument_issuer.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/BondsAPIforDigitalPortals/v2/fds/sdk/BondsAPIforDigitalPortals/model/inline_response2003_instrument_issuer.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/BondsAPIforDigitalPortals/v2/fds/sdk/BondsAPIforDigitalPortals/model/inline_response2003_instrument_issuer.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
Prime Developer Trial
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.BondsAPIforDigitalPortals.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.BondsAPIforDigitalPortals.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.BondsAPIforDigitalPortals.model.inline_response2003_instrument_issuer_country import InlineResponse2003InstrumentIssuerCountry
globals()['InlineResponse2003InstrumentIssuerCountry'] = InlineResponse2003InstrumentIssuerCountry
class InlineResponse2003InstrumentIssuer(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'id': (float,), # noqa: E501
'name': (str,), # noqa: E501
'country': (InlineResponse2003InstrumentIssuerCountry,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'name': 'name', # noqa: E501
'country': 'country', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""InlineResponse2003InstrumentIssuer - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (float): Identifier of the issuer.. [optional] # noqa: E501
name (str): Name of the issuer.. [optional] # noqa: E501
country (InlineResponse2003InstrumentIssuerCountry): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""InlineResponse2003InstrumentIssuer - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (float): Identifier of the issuer.. [optional] # noqa: E501
name (str): Name of the issuer.. [optional] # noqa: E501
country (InlineResponse2003InstrumentIssuerCountry): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.577778 | 143 | 0.581422 |
2a58aeff18f10b1d57856488d6d5b7bbb45c56e3 | 881 | py | Python | src/optims/sgd.py | ThomasRanvier/modular_nets | fa572a1bacdf5801156390ff2ef8754cca027fb9 | [
"MIT"
] | null | null | null | src/optims/sgd.py | ThomasRanvier/modular_nets | fa572a1bacdf5801156390ff2ef8754cca027fb9 | [
"MIT"
] | null | null | null | src/optims/sgd.py | ThomasRanvier/modular_nets | fa572a1bacdf5801156390ff2ef8754cca027fb9 | [
"MIT"
] | null | null | null | import numpy as np
class Sgd():
"""
Class that implements a vanilla Stochastic Gradient Descent layer.
This is the simplest form of update, it changes the parameters along the
negative gradient direction.
"""
def __init__(self, learning_rate = 1e-2):
"""
Instantiates a Sgd optimisation.
:param learning_rate: The learning rate to apply.
:type learning_rate: float.
"""
self.learning_rate = learning_rate
def update(self, w, dw):
"""
Performs one Sgd update.
:param w: The weights.
:type w: A numpy array.
:param dw: The gradients of the weights.
:type dw: A numpy array of the same shape.
:return w: The updated weights.
:rtype w: A numpy array of the same shape as w.
"""
w += -self.learning_rate * dw
return w
| 29.366667 | 77 | 0.601589 |
4c5326cdce0b6ab7fff9790ca28cfd9cc5b4f906 | 11,235 | py | Python | clients/hydra/python/ory_hydra_client/model/health_not_ready_status.py | ory/sdk-generator | 958314d130922ad6f20f439b5230141a832231a5 | [
"Apache-2.0"
] | null | null | null | clients/hydra/python/ory_hydra_client/model/health_not_ready_status.py | ory/sdk-generator | 958314d130922ad6f20f439b5230141a832231a5 | [
"Apache-2.0"
] | null | null | null | clients/hydra/python/ory_hydra_client/model/health_not_ready_status.py | ory/sdk-generator | 958314d130922ad6f20f439b5230141a832231a5 | [
"Apache-2.0"
] | null | null | null | """
ORY Hydra
Welcome to the ORY Hydra HTTP API documentation. You will find documentation for all HTTP APIs here. # noqa: E501
The version of the OpenAPI document: v1.11.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_hydra_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from ory_hydra_client.exceptions import ApiAttributeError
class HealthNotReadyStatus(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'errors': ({str: (str,)},), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'errors': 'errors', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""HealthNotReadyStatus - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
errors ({str: (str,)}): Errors contains a list of errors that caused the not ready status.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""HealthNotReadyStatus - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
errors ({str: (str,)}): Errors contains a list of errors that caused the not ready status.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.886719 | 128 | 0.57223 |
de1d0b4d3b925629af91776f6b011d13024ba888 | 2,027 | py | Python | py/bitbox02/bitbox02/__init__.py | jstrnbrg/bitbox02-firmware | 173a1fe340daf1a839b888286a654fb75745102d | [
"Apache-2.0"
] | null | null | null | py/bitbox02/bitbox02/__init__.py | jstrnbrg/bitbox02-firmware | 173a1fe340daf1a839b888286a654fb75745102d | [
"Apache-2.0"
] | null | null | null | py/bitbox02/bitbox02/__init__.py | jstrnbrg/bitbox02-firmware | 173a1fe340daf1a839b888286a654fb75745102d | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Shift Cryptosecurity AG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Library to interact with a BitBox02 device. """
from __future__ import print_function
import sys
if sys.version_info.major != 3 or sys.version_info.minor < 6:
print(
"Python version is {}.{}, but 3.6+ is required by this script.".format(
sys.version_info.major, sys.version_info.minor
),
file=sys.stderr,
)
sys.exit(1)
try:
import hid
hid.device # pylint: disable=pointless-statement
except AttributeError:
print(
"Unable to reference hid.device; maybe hid package is masking "
"hidapi? Try:\n\t$ pip3 uninstall hid",
file=sys.stderr,
)
sys.exit(1)
# pylint: disable=wrong-import-position
from .bitbox02 import (
AttestationException,
Backup,
BitBox02,
Bitbox02Exception,
BTCInputType,
BTCOutputExternal,
BTCOutputInternal,
BTCOutputType,
HARDENED,
hww,
UserAbortException,
)
from .bootloader import Bootloader
from .devices import (
DeviceInfo,
get_any_bitbox02,
get_any_bitbox02s,
get_any_bitbox02_bootloader,
get_any_bitbox02_bootloaders,
get_bitbox02multi_bootloader,
get_bitbox02multi_bootloaders,
get_bitbox02multi_device,
get_bitbox02multi_devices,
get_bitbox02btc_bootloader,
get_bitbox02btc_bootloaders,
get_bitbox02btc_device,
get_bitbox02btc_devices,
parse_device_version,
TooManyFoundException,
NoneFoundException,
)
| 27.767123 | 79 | 0.72077 |
73f21bca87fcfd33069a800562d2d3a64a33a840 | 12,888 | py | Python | resources/usr/lib/python2.7/dist-packages/numpy/_import_tools.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/usr/lib/python2.7/dist-packages/numpy/_import_tools.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/usr/lib/python2.7/dist-packages/numpy/_import_tools.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | 1 | 2020-05-28T23:01:44.000Z | 2020-05-28T23:01:44.000Z | import os
import sys
__all__ = ['PackageLoader']
class PackageLoader:
def __init__(self, verbose=False, infunc=False):
""" Manages loading packages.
"""
if infunc:
_level = 2
else:
_level = 1
self.parent_frame = frame = sys._getframe(_level)
self.parent_name = eval('__name__',frame.f_globals,frame.f_locals)
parent_path = eval('__path__',frame.f_globals,frame.f_locals)
if isinstance(parent_path, str):
parent_path = [parent_path]
self.parent_path = parent_path
if '__all__' not in frame.f_locals:
exec('__all__ = []',frame.f_globals,frame.f_locals)
self.parent_export_names = eval('__all__',frame.f_globals,frame.f_locals)
self.info_modules = {}
self.imported_packages = []
self.verbose = None
def _get_info_files(self, package_dir, parent_path, parent_package=None):
""" Return list of (package name,info.py file) from parent_path subdirectories.
"""
from glob import glob
files = glob(os.path.join(parent_path,package_dir,'info.py'))
for info_file in glob(os.path.join(parent_path,package_dir,'info.pyc')):
if info_file[:-1] not in files:
files.append(info_file)
info_files = []
for info_file in files:
package_name = os.path.dirname(info_file[len(parent_path)+1:])\
.replace(os.sep,'.')
if parent_package:
package_name = parent_package + '.' + package_name
info_files.append((package_name,info_file))
info_files.extend(self._get_info_files('*',
os.path.dirname(info_file),
package_name))
return info_files
def _init_info_modules(self, packages=None):
"""Initialize info_modules = {<package_name>: <package info.py module>}.
"""
import imp
info_files = []
info_modules = self.info_modules
if packages is None:
for path in self.parent_path:
info_files.extend(self._get_info_files('*',path))
else:
for package_name in packages:
package_dir = os.path.join(*package_name.split('.'))
for path in self.parent_path:
names_files = self._get_info_files(package_dir, path)
if names_files:
info_files.extend(names_files)
break
else:
try:
exec 'import %s.info as info' % (package_name)
info_modules[package_name] = info
except ImportError, msg:
self.warn('No scipy-style subpackage %r found in %s. '\
'Ignoring: %s'\
% (package_name,':'.join(self.parent_path), msg))
for package_name,info_file in info_files:
if package_name in info_modules:
continue
fullname = self.parent_name +'.'+ package_name
if info_file[-1]=='c':
filedescriptor = ('.pyc','rb',2)
else:
filedescriptor = ('.py','U',1)
try:
info_module = imp.load_module(fullname+'.info',
open(info_file,filedescriptor[1]),
info_file,
filedescriptor)
except Exception,msg:
self.error(msg)
info_module = None
if info_module is None or getattr(info_module,'ignore',False):
info_modules.pop(package_name,None)
else:
self._init_info_modules(getattr(info_module,'depends',[]))
info_modules[package_name] = info_module
return
def _get_sorted_names(self):
""" Return package names sorted in the order as they should be
imported due to dependence relations between packages.
"""
depend_dict = {}
for name,info_module in self.info_modules.items():
depend_dict[name] = getattr(info_module,'depends',[])
package_names = []
for name in depend_dict.keys():
if not depend_dict[name]:
package_names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in depend_dict.items():
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
package_names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return package_names
def __call__(self,*packages, **options):
"""Load one or more packages into parent package top-level namespace.
This function is intended to shorten the need to import many
subpackages, say of scipy, constantly with statements such as
import scipy.linalg, scipy.fftpack, scipy.etc...
Instead, you can say:
import scipy
scipy.pkgload('linalg','fftpack',...)
or
scipy.pkgload()
to load all of them in one call.
If a name which doesn't exist in scipy's namespace is
given, a warning is shown.
Parameters
----------
*packages : arg-tuple
the names (one or more strings) of all the modules one
wishes to load into the top-level namespace.
verbose= : integer
verbosity level [default: -1].
verbose=-1 will suspend also warnings.
force= : bool
when True, force reloading loaded packages [default: False].
postpone= : bool
when True, don't load packages [default: False]
"""
frame = self.parent_frame
self.info_modules = {}
if options.get('force',False):
self.imported_packages = []
self.verbose = verbose = options.get('verbose',-1)
postpone = options.get('postpone',None)
self._init_info_modules(packages or None)
self.log('Imports to %r namespace\n----------------------------'\
% self.parent_name)
for package_name in self._get_sorted_names():
if package_name in self.imported_packages:
continue
info_module = self.info_modules[package_name]
global_symbols = getattr(info_module,'global_symbols',[])
postpone_import = getattr(info_module,'postpone_import',False)
if (postpone and not global_symbols) \
or (postpone_import and postpone is not None):
continue
old_object = frame.f_locals.get(package_name,None)
cmdstr = 'import '+package_name
if self._execcmd(cmdstr):
continue
self.imported_packages.append(package_name)
if verbose!=-1:
new_object = frame.f_locals.get(package_name)
if old_object is not None and old_object is not new_object:
self.warn('Overwriting %s=%s (was %s)' \
% (package_name,self._obj2repr(new_object),
self._obj2repr(old_object)))
if '.' not in package_name:
self.parent_export_names.append(package_name)
for symbol in global_symbols:
if symbol=='*':
symbols = eval('getattr(%s,"__all__",None)'\
% (package_name),
frame.f_globals,frame.f_locals)
if symbols is None:
symbols = eval('dir(%s)' % (package_name),
frame.f_globals,frame.f_locals)
symbols = filter(lambda s:not s.startswith('_'),symbols)
else:
symbols = [symbol]
if verbose!=-1:
old_objects = {}
for s in symbols:
if s in frame.f_locals:
old_objects[s] = frame.f_locals[s]
cmdstr = 'from '+package_name+' import '+symbol
if self._execcmd(cmdstr):
continue
if verbose!=-1:
for s,old_object in old_objects.items():
new_object = frame.f_locals[s]
if new_object is not old_object:
self.warn('Overwriting %s=%s (was %s)' \
% (s,self._obj2repr(new_object),
self._obj2repr(old_object)))
if symbol=='*':
self.parent_export_names.extend(symbols)
else:
self.parent_export_names.append(symbol)
return
def _execcmd(self,cmdstr):
""" Execute command in parent_frame."""
frame = self.parent_frame
try:
exec (cmdstr, frame.f_globals,frame.f_locals)
except Exception,msg:
self.error('%s -> failed: %s' % (cmdstr,msg))
return True
else:
self.log('%s -> success' % (cmdstr))
return
def _obj2repr(self,obj):
""" Return repr(obj) with"""
module = getattr(obj,'__module__',None)
file = getattr(obj,'__file__',None)
if module is not None:
return repr(obj) + ' from ' + module
if file is not None:
return repr(obj) + ' from ' + file
return repr(obj)
def log(self,mess):
if self.verbose>1:
print >> sys.stderr, str(mess)
def warn(self,mess):
if self.verbose>=0:
print >> sys.stderr, str(mess)
def error(self,mess):
if self.verbose!=-1:
print >> sys.stderr, str(mess)
def _get_doc_title(self, info_module):
""" Get the title from a package info.py file.
"""
title = getattr(info_module,'__doc_title__',None)
if title is not None:
return title
title = getattr(info_module,'__doc__',None)
if title is not None:
title = title.lstrip().split('\n',1)[0]
return title
return '* Not Available *'
def _format_titles(self,titles,colsep='---'):
display_window_width = 70 # How to determine the correct value in runtime??
lengths = [len(name)-name.find('.')-1 for (name,title) in titles]+[0]
max_length = max(lengths)
lines = []
for (name,title) in titles:
name = name[name.find('.')+1:]
w = max_length - len(name)
words = title.split()
line = '%s%s %s' % (name,w*' ',colsep)
tab = len(line) * ' '
while words:
word = words.pop(0)
if len(line)+len(word)>display_window_width:
lines.append(line)
line = tab
line += ' ' + word
else:
lines.append(line)
return '\n'.join(lines)
def get_pkgdocs(self):
""" Return documentation summary of subpackages.
"""
import sys
self.info_modules = {}
self._init_info_modules(None)
titles = []
symbols = []
for package_name, info_module in self.info_modules.items():
global_symbols = getattr(info_module,'global_symbols',[])
fullname = self.parent_name +'.'+ package_name
note = ''
if fullname not in sys.modules:
note = ' [*]'
titles.append((fullname,self._get_doc_title(info_module) + note))
if global_symbols:
symbols.append((package_name,', '.join(global_symbols)))
retstr = self._format_titles(titles) +\
'\n [*] - using a package requires explicit import (see pkgload)'
if symbols:
retstr += """\n\nGlobal symbols from subpackages"""\
"""\n-------------------------------\n""" +\
self._format_titles(symbols,'-->')
return retstr
class PackageLoaderDebug(PackageLoader):
def _execcmd(self,cmdstr):
""" Execute command in parent_frame."""
frame = self.parent_frame
print 'Executing',`cmdstr`,'...',
sys.stdout.flush()
exec (cmdstr, frame.f_globals,frame.f_locals)
print 'ok'
sys.stdout.flush()
return
if int(os.environ.get('NUMPY_IMPORT_DEBUG','0')):
PackageLoader = PackageLoaderDebug
| 37.14121 | 87 | 0.523898 |
e617e507c9cd681433bac54ff72d33fdafb11a0b | 1,094 | py | Python | leaderboard/migrations/0009_auto_20200618_2345.py | AppraiseDev/OCELoT | 9237c1eb1d9feebb1a51966b8c1ef82b381b4b1e | [
"BSD-3-Clause"
] | 6 | 2020-06-25T05:00:45.000Z | 2022-03-30T09:45:11.000Z | leaderboard/migrations/0009_auto_20200618_2345.py | AppraiseDev/OCELoT | 9237c1eb1d9feebb1a51966b8c1ef82b381b4b1e | [
"BSD-3-Clause"
] | 42 | 2020-06-24T08:48:48.000Z | 2021-09-08T14:36:11.000Z | leaderboard/migrations/0009_auto_20200618_2345.py | AppraiseDev/OCELoT | 9237c1eb1d9feebb1a51966b8c1ef82b381b4b1e | [
"BSD-3-Clause"
] | 3 | 2020-05-25T20:34:08.000Z | 2021-03-21T05:10:11.000Z | # pylint: disable=invalid-name,missing-docstring
# Generated by Django 2.2.1 on 2020-06-19 06:45
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [('leaderboard', '0008_team')]
operations = [
migrations.AddField(
model_name='submission',
name='date_created',
field=models.DateTimeField(
auto_now_add=True,
help_text='Creation date of this submission',
null=True,
),
),
migrations.AlterField(
model_name='submission',
name='name',
field=models.CharField(
db_index=True,
help_text='Submission name (max 200 characters)',
max_length=200,
),
),
migrations.AlterField(
model_name='team',
name='token',
field=models.CharField(
blank=True, db_index=True, max_length=10
),
),
]
| 28.789474 | 66 | 0.519196 |
36dbceef459ad86842897f3a8430920bb313884f | 79 | py | Python | 00-simple.py | mtholder/git-branch-tutorial-data | b17606dcb289afe77b1222c16d83290e6b5e5aa5 | [
"BSD-2-Clause"
] | null | null | null | 00-simple.py | mtholder/git-branch-tutorial-data | b17606dcb289afe77b1222c16d83290e6b5e5aa5 | [
"BSD-2-Clause"
] | null | null | null | 00-simple.py | mtholder/git-branch-tutorial-data | b17606dcb289afe77b1222c16d83290e6b5e5aa5 | [
"BSD-2-Clause"
] | null | null | null | #!/bin/env python
from __future__ import print_function
print('Hello World!')
| 15.8 | 37 | 0.772152 |
492dc23a253e0af5c8799a4c1f2c5d533ae8acf5 | 2,745 | py | Python | ooi_instrument_agent/lock.py | oceanobservatories/ooi-instrument-agent | e22e4300079468bb99c543cbbf1cb5c8b4a96897 | [
"Apache-2.0"
] | null | null | null | ooi_instrument_agent/lock.py | oceanobservatories/ooi-instrument-agent | e22e4300079468bb99c543cbbf1cb5c8b4a96897 | [
"Apache-2.0"
] | null | null | null | ooi_instrument_agent/lock.py | oceanobservatories/ooi-instrument-agent | e22e4300079468bb99c543cbbf1cb5c8b4a96897 | [
"Apache-2.0"
] | null | null | null | from collections import MutableMapping
from logging import getLogger
log = getLogger(__name__)
class Locked(Exception):
status_code = 409
def __init__(self, message):
Exception.__init__(self)
self.message = message
class LockManager(MutableMapping):
"""
Class to wrap consul KV access as a "Check and Set" (CAS) atomic operation
This class behaves as a dictionary:
lock_manager = LockManager(consul)
# set a lock
lock_manager[DRIVER_ID] = lock_holder
# check a lock
lock_manager[DRIVER_ID]
# delete a lock
del lock_manager[DRIVER_ID]
For sets, we will fetch the current value and index, verify the lock is not currently set
and then set the lock with the previous fetched index. If two operations attempt to lock
simultaneously, one will fail:
locker 1 - GET(key) - returns 50, None
locker 2 - GET(key) - returns 50, None
locker 1 - SET(key, new_value, cas=50) - returns True (modify_index is incremented)
locker 2 - SET(key, new_value, cas=50) - returns False (modify_index != 50)
"""
def __init__(self, consul, prefix='agent/lock'):
self.consul = consul
self.prefix = prefix
def __getitem__(self, key):
key = '/'.join((self.prefix, key))
return self._get_value(key)
def __setitem__(self, key, value):
key = '/'.join((self.prefix, key))
current = self._get(key)
if current is not None:
value = current.get('Value')
if value is not None:
raise Locked({'locked-by': value})
modify_index = current.get('ModifyIndex')
else:
modify_index = 0
success = self.consul.kv.put(key, value, cas=modify_index)
if success:
return key
else:
return self[key]
def __delitem__(self, key):
key = '/'.join((self.prefix, key))
current = self._get(key)
if current is not None:
modify_index = current.get('ModifyIndex')
self.consul.kv.delete(key, cas=modify_index)
def __len__(self):
index, values = self.consul.kv.get(self.prefix, recurse=True)
if values is None:
return 0
return len(values)
def __iter__(self):
index, values = self.consul.kv.get(self.prefix, recurse=True)
if values is not None:
for value in values:
yield value.get('Key').replace(self.prefix, '').lstrip('/')
def _get(self, item):
index, value = self.consul.kv.get(item)
return value
def _get_value(self, item):
value = self._get(item)
if value is None:
return value
return value.get('Value')
| 29.202128 | 93 | 0.612022 |
a408dec73fecffc96fc04bc9119d17a8daefaf82 | 2,320 | py | Python | Input.py | Zazmuz/cordinate-jumper | 2a9eb32fb13188eb22a8457654212bd9e1c1633a | [
"MIT"
] | null | null | null | Input.py | Zazmuz/cordinate-jumper | 2a9eb32fb13188eb22a8457654212bd9e1c1633a | [
"MIT"
] | null | null | null | Input.py | Zazmuz/cordinate-jumper | 2a9eb32fb13188eb22a8457654212bd9e1c1633a | [
"MIT"
] | null | null | null | from pygame_engine import *
from Function import Function, PolynomialFuntion
def multiply(arr):
res = 1
for a in arr: res *= a
return res
class Input():
def __init__(self):
self.width = 500
self.height = 300
self.input_string = "y = "
self.function = None
def draw(self):
opacity_surface = pg.Surface((WIDTH, HEIGHT))
opacity_surface.set_alpha(180)
opacity_surface.fill((255,255,255))
screen.blit(opacity_surface, (0,0))
x = (WIDTH - self.width) // 2
y = (HEIGHT - self.height) // 2
pg.draw.rect(screen, (255,255,255), (x, HEIGHT - y - self.height, self.width, self.height))
myfont = pg.font.SysFont('Comic Sans MS', 30)
textsurface = myfont.render(self.input_string, False, (0, 0, 0))
screen.blit(textsurface,(x + 50, HEIGHT // 2 - 15))
def get_function(self, string):
print(string)
string = string[4:].split("+")
coefficients = [0]*100
for add in string:
multiply = add.split("*")
degree = 0
k = 1
for i in multiply:
if i == "x":
degree += 1
elif all([j in "1234567890" for j in i]):
k *= int(i)
coefficients[degree] += k
maximum_degree = 0
for i in range(100):
if coefficients[i] != 0:
maximum_degree = i + 1
function = PolynomialFuntion(maximum_degree)
function.coefficient = coefficients[:maximum_degree]
self.function = function
def take_input(self, EVENTS):
for event in EVENTS:
if event.type == pg.KEYDOWN:
if event.key == pg.K_BACKSPACE:
if len(self.input_string) == 4:
continue
self.input_string = self.input_string[:-1]
if event.key == pg.K_RETURN:
if self.get_function(self.input_string):
return self.function
else:
l = event.unicode
if l in "1234567890x+* ":
self.input_string += l
return -1
| 29.74359 | 100 | 0.495259 |
e7c2a439b3d1174fdef9a46ab3bc291f5e082b35 | 1,333 | py | Python | application_materials/Project2/pca_mnist2.py | Xiangzhoujushi/Projects | 492236608888e8f0adadcd7ad708c90da1ab74d5 | [
"MIT"
] | null | null | null | application_materials/Project2/pca_mnist2.py | Xiangzhoujushi/Projects | 492236608888e8f0adadcd7ad708c90da1ab74d5 | [
"MIT"
] | null | null | null | application_materials/Project2/pca_mnist2.py | Xiangzhoujushi/Projects | 492236608888e8f0adadcd7ad708c90da1ab74d5 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors
from sklearn.decomposition import PCA
import pandas as pd
# Step 1: load MNIST data (the smaller version)
X = np.load('./mnist_X.npy')
y = np.load('./mnist_y.npy')
# Step 2: layout the data with t-SNE
X_PCA = PCA(n_components=1).fit_transform(X)
print "The {} dims images have be projected to {}D space".format(X.shape[1], X_PCA.shape[1])
# Step 3: visualize with scatterplot (do this in D3)
color = ['black', 'red', 'green', 'blue', 'gold',
'purple','deeppink', 'orchid', 'teal', 'yellow']
label = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
fig, ax = plt.subplots()
# declare the pandas column names
X_columns = ['x_coord']
# ignore the index
df1 = pd.DataFrame(X_PCA,columns=X_columns)
color_list = []
label_list = []
for i in range(len(label)):
# each digit has 100 images in the smaller version MNIST (sorted in order)
# ax.scatter(X_2D[i*100:(i+1)*100,0], X_2D[i*100:(i+1)*100:,1],
# c=color[i], label=label[i], alpha=0.3, edgecolors='none')
for j in range(100):
color_list.append(color[i])
label_list.append(label[i])
df2 = pd.DataFrame({'color' : color_list, 'label': label_list})
df3 = df1.join(df2)
df3.to_csv('PCA_coords.csv',index = False)
# store all the data into a csv file.
# ax.legend()
# plt.show() | 32.512195 | 92 | 0.666167 |
46a81097c71b64a998f41ae86359abdfe09115d3 | 1,936 | py | Python | pgoapi/protos/pogoprotos/networking/requests/messages/fetch_all_news_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 2 | 2018-02-09T07:35:25.000Z | 2019-05-22T17:51:07.000Z | pgoapi/protos/pogoprotos/networking/requests/messages/fetch_all_news_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | null | null | null | pgoapi/protos/pogoprotos/networking/requests/messages/fetch_all_news_message_pb2.py | linherest/pgoapi | e3bdce71b06c099663e9796c8df166883059edd9 | [
"MIT"
] | 15 | 2017-02-24T01:30:23.000Z | 2021-06-27T08:46:43.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/fetch_all_news_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/fetch_all_news_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nDpogoprotos/networking/requests/messages/fetch_all_news_message.proto\x12\'pogoprotos.networking.requests.messages\"\x15\n\x13\x46\x65tchAllNewsMessageb\x06proto3')
)
_FETCHALLNEWSMESSAGE = _descriptor.Descriptor(
name='FetchAllNewsMessage',
full_name='pogoprotos.networking.requests.messages.FetchAllNewsMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=134,
)
DESCRIPTOR.message_types_by_name['FetchAllNewsMessage'] = _FETCHALLNEWSMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FetchAllNewsMessage = _reflection.GeneratedProtocolMessageType('FetchAllNewsMessage', (_message.Message,), dict(
DESCRIPTOR = _FETCHALLNEWSMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.fetch_all_news_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.FetchAllNewsMessage)
))
_sym_db.RegisterMessage(FetchAllNewsMessage)
# @@protoc_insertion_point(module_scope)
| 30.730159 | 186 | 0.805785 |
8aaf1c233fb59162abd8f9cfcf52f08f933a2337 | 2,347 | py | Python | mmgen/models/architectures/stylegan/utils.py | HXWAndCL/mmgeneration | 9afb1d740bf56a4ecde5064d5bb2a4e2d777638b | [
"Apache-2.0"
] | 1 | 2021-05-27T13:04:41.000Z | 2021-05-27T13:04:41.000Z | mmgen/models/architectures/stylegan/utils.py | HXWAndCL/mmgeneration | 9afb1d740bf56a4ecde5064d5bb2a4e2d777638b | [
"Apache-2.0"
] | null | null | null | mmgen/models/architectures/stylegan/utils.py | HXWAndCL/mmgeneration | 9afb1d740bf56a4ecde5064d5bb2a4e2d777638b | [
"Apache-2.0"
] | null | null | null | import torch
from ..common import get_module_device
@torch.no_grad()
def get_mean_latent(generator, num_samples=4096, bs_per_repeat=1024):
"""Get mean latent of W space in Style-based GANs.
Args:
generator (nn.Module): Generator of a Style-based GAN.
num_samples (int, optional): Number of sample times. Defaults to 4096.
bs_per_repeat (int, optional): Batch size of noises per sample.
Defaults to 1024.
Returns:
Tensor: Mean latent of this generator.
"""
device = get_module_device(generator)
mean_style = None
n_repeat = num_samples // bs_per_repeat
assert n_repeat * bs_per_repeat == num_samples
for _ in range(n_repeat):
style = generator.style_mapping(
torch.randn(bs_per_repeat,
generator.style_channels).to(device)).mean(
0, keepdim=True)
if mean_style is None:
mean_style = style
else:
mean_style += style
mean_style /= float(n_repeat)
return mean_style
@torch.no_grad()
def style_mixing(generator,
n_source,
n_target,
inject_index=1,
truncation_latent=None,
truncation=0.7,
style_channels=512,
**kwargs):
device = get_module_device(generator)
source_code = torch.randn(n_source, style_channels).to(device)
target_code = torch.randn(n_target, style_channels).to(device)
source_image = generator(
source_code,
truncation_latent=truncation_latent,
truncation=truncation,
**kwargs)
h, w = source_image.shape[-2:]
images = [torch.ones(1, 3, h, w).to(device) * -1]
target_image = generator(
target_code,
truncation_latent=truncation_latent,
truncation=truncation,
**kwargs)
images.append(source_image)
for i in range(n_target):
image = generator(
[target_code[i].unsqueeze(0).repeat(n_source, 1), source_code],
truncation_latent=truncation_latent,
truncation=truncation,
inject_index=inject_index,
**kwargs)
images.append(target_image[i].unsqueeze(0))
images.append(image)
images = torch.cat(images, 0)
return images
| 28.975309 | 78 | 0.613123 |
cd2ea538f7923c50246f78fe6925edf65a1332d5 | 3,133 | py | Python | miepy/vsh/expansion.py | johnaparker/MiePy | 5c5bb5a07c8ab79e9e2a9fc79fb9779e690147be | [
"MIT"
] | 3 | 2016-05-30T06:45:29.000Z | 2017-08-30T19:58:56.000Z | miepy/vsh/expansion.py | johnaparker/MiePy | 5c5bb5a07c8ab79e9e2a9fc79fb9779e690147be | [
"MIT"
] | null | null | null | miepy/vsh/expansion.py | johnaparker/MiePy | 5c5bb5a07c8ab79e9e2a9fc79fb9779e690147be | [
"MIT"
] | 5 | 2016-12-13T02:05:31.000Z | 2018-03-23T07:11:30.000Z | """
Expansion of electric and magnetic fields given expansion coefficients
"""
import numpy as np
from miepy import vsh
#TODO: move k argument to field function for consistency
def expand_E(p, k, mode):
"""Expand VSH coefficients to obtain an electric field function
Returns E(r,θ,φ) function
Arguments:
p[2,rmax] expansion coefficients
k wavenumber
mode: vsh_mode type of VSH (outgoing, incident, interior, ingoing)
"""
lmax = vsh.rmax_to_lmax(p.shape[1])
factor = 1j if mode == vsh.vsh_mode.outgoing else -1j
#TODO: depends on theta.shape
def f(rad, theta, phi):
(rad, theta, phi) = map(lambda A: np.asarray(A, dtype=float), (rad, theta, phi))
E_sph = np.zeros(shape=(3,) + theta.shape, dtype=complex)
for i,n,m in vsh.mode_indices(lmax):
Nfunc,Mfunc = vsh.VSH(n, m, mode=mode)
Emn_val = vsh.Emn(m, n)
N = Nfunc(rad, theta, phi, k)
M = Mfunc(rad, theta, phi, k)
E_sph += factor*Emn_val*(p[0,i]*N + p[1,i]*M)
return E_sph
return f
def expand_E_far(p_scat, k):
"""Expand VSH scattering coefficients to obtain an electric field function for the far-field
Returns E(r,θ,φ) function
Arguments:
p_scat[2,rmax] scattering coefficients
k wavenumber
"""
lmax = vsh.rmax_to_lmax(p_scat.shape[1])
#TODO: depends on theta.shape
def f(rad, theta, phi):
(rad, theta, phi) = map(lambda A: np.asarray(A, dtype=float), (rad, theta, phi))
E_sph = np.zeros(shape=(3,) + theta.shape, dtype=complex)
factor = np.exp(1j*k*rad)/(k*rad)
for i,n,m in vsh.mode_indices(lmax):
Emn_val = vsh.Emn(m, n)
pi = vsh.special.pi_func(n, m, theta)
tau = vsh.special.tau_func(n, m, theta)
E_sph[1] += 1j*factor*Emn_val*(-1j)**(n)*(p_scat[0,i]*tau + p_scat[1,i]*pi)*np.exp(1j*m*phi)
E_sph[2] += -factor*Emn_val*(-1j)**(n)*(p_scat[0,i]*pi + p_scat[1,i]*tau)*np.exp(1j*m*phi)
return E_sph
return f
def expand_H(p, k, mode, eps, mu):
"""Expand VSH coefficients to obtain a magnetic field function
Returns H(r,θ,φ) function
Arguments:
p[2,rmax] expansion coefficients
k wavenumber
mode: vsh_mode type of VSH (outgoing, incident, interior, ingoing)
eps medium permitiviity
mb medium permeability
"""
factor = -1j*np.sqrt(eps/mu)
E_func = expand_E(p[::-1], k, mode)
return lambda *args: factor*E_func(*args)
def expand_H_far(p_scat, k, eps, mu):
"""Expand VSH scattering coefficients to obtain a magnetic field function for the far-field
Returns H(r,θ,φ) function
Arguments:
p_scat[2,rmax] scattering coefficients
k wavenumber
eps medium permitiviity
mu medium permeability
"""
factor = -1j*np.sqrt(eps/mu)
E_func = expand_E_far(p_scat[::-1], k)
return lambda *args: factor*E_func(*args)
| 31.33 | 104 | 0.588254 |
75dd08810c7a8ac078456a9c39ab9ff5aef00e9e | 14,347 | py | Python | cli/tests/pcluster/config/defaults.py | ddeidda/aws-parallelcluster | b1f468d2283168dfd2992f791cee79bef3a4920a | [
"Apache-2.0"
] | null | null | null | cli/tests/pcluster/config/defaults.py | ddeidda/aws-parallelcluster | b1f468d2283168dfd2992f791cee79bef3a4920a | [
"Apache-2.0"
] | 14 | 2022-03-11T10:26:58.000Z | 2022-03-28T10:40:43.000Z | cli/tests/pcluster/config/defaults.py | rexcsn/aws-parallelcluster | be5f5fd926e4bd942df3da907351e5de99d49485 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
# ------------------ Default internal representation values ------------------ #
DEFAULT_AWS_DICT = {"aws_access_key_id": None, "aws_secret_access_key": None, "aws_region_name": None}
DEFAULT_GLOBAL_DICT = {"cluster_template": "default", "update_check": True, "sanity_check": True}
DEFAULT_ALIASES_DICT = {"ssh": "ssh {CFN_USER}@{MASTER_IP} {ARGS}"}
DEFAULT_SCALING_DICT = {"scaledown_idletime": 10}
DEFAULT_VPC_DICT = {
"vpc_id": None,
"master_subnet_id": None,
"ssh_from": "0.0.0.0/0",
"additional_sg": None,
"compute_subnet_id": None,
"compute_subnet_cidr": None,
"use_public_ips": True,
"vpc_security_group_id": None,
"master_availability_zone": None,
"compute_availability_zone": None,
}
DEFAULT_EBS_DICT = {
"shared_dir": None,
"ebs_snapshot_id": None,
"volume_type": "gp2",
"volume_size": None,
"volume_iops": None,
"encrypted": False,
"ebs_kms_key_id": None,
"ebs_volume_id": None,
"volume_throughput": 125,
}
DEFAULT_EFS_DICT = {
"shared_dir": None,
"efs_fs_id": None,
"performance_mode": "generalPurpose",
"efs_kms_key_id": None,
"provisioned_throughput": None,
"encrypted": False,
"throughput_mode": "bursting",
}
DEFAULT_RAID_DICT = {
"shared_dir": None,
"raid_type": None,
"num_of_raid_volumes": 2,
"volume_type": "gp2",
"volume_size": 20,
"volume_iops": None,
"encrypted": False,
"ebs_kms_key_id": None,
"volume_throughput": 125,
}
DEFAULT_FSX_DICT = {
"shared_dir": None,
"fsx_fs_id": None,
"storage_capacity": None,
"fsx_kms_key_id": None,
"imported_file_chunk_size": None,
"export_path": None,
"import_path": None,
"weekly_maintenance_start_time": None,
"deployment_type": None,
"per_unit_storage_throughput": None,
"daily_automatic_backup_start_time": None,
"automatic_backup_retention_days": None,
"copy_tags_to_backups": None,
"fsx_backup_id": None,
"auto_import_policy": None,
"storage_type": None,
"drive_cache_type": "NONE",
"existing_mount_name": "NONE",
"existing_dns_name": "NONE",
}
DEFAULT_DCV_DICT = {"enable": None, "port": 8443, "access_from": "0.0.0.0/0"}
DEFAULT_CLUSTER_SIT_DICT = {
"key_name": None,
"template_url": None,
"hit_template_url": None,
"cw_dashboard_template_url": None,
"base_os": None, # base_os does not have a default, but this is here to make testing easier
"scheduler": None, # The cluster does not have a default, but this is here to make testing easier
"shared_dir": "/shared",
"placement_group": None,
"placement": "compute",
"master_instance_type": None,
"master_root_volume_size": 35,
"compute_instance_type": None,
"compute_root_volume_size": 35,
"initial_queue_size": 0,
"max_queue_size": 10,
"maintain_initial_size": False,
"min_vcpus": 0,
"desired_vcpus": 4,
"max_vcpus": 10,
"cluster_type": "ondemand",
"spot_price": 0.0,
"spot_bid_percentage": 0,
"proxy_server": None,
"ec2_iam_role": None,
"additional_iam_policies": ["arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy"],
"s3_read_resource": None,
"s3_read_write_resource": None,
"enable_efa": None,
"enable_efa_gdr": None,
"ephemeral_dir": "/scratch",
"encrypted_ephemeral": False,
"custom_ami": None,
"pre_install": None,
"pre_install_args": None,
"post_install": None,
"post_install_args": None,
"extra_json": {},
"additional_cfn_template": None,
"tags": {},
"custom_chef_cookbook": None,
"disable_hyperthreading": False,
"enable_intel_hpc_platform": False,
"scaling_settings": "default",
"vpc_settings": "default",
"ebs_settings": None,
"efs_settings": None,
"raid_settings": None,
"fsx_settings": None,
"dcv_settings": None,
"cw_log_settings": None,
"dashboard_settings": None,
"cluster_config_metadata": {"sections": {}},
"architecture": "x86_64",
"network_interfaces_count": ["1", "1"],
"cluster_resource_bucket": None,
"iam_lambda_role": None,
"instance_types_data": {},
}
DEFAULT_CLUSTER_HIT_DICT = {
"key_name": None,
"template_url": None,
"hit_template_url": None,
"cw_dashboard_template_url": None,
"base_os": None, # base_os does not have a default, but this is here to make testing easier
"scheduler": None, # The cluster does not have a default, but this is here to make testing easier
"shared_dir": "/shared",
"master_instance_type": None,
"master_root_volume_size": 35,
"compute_root_volume_size": 35,
"proxy_server": None,
"ec2_iam_role": None,
"additional_iam_policies": ["arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy"],
"s3_read_resource": None,
"s3_read_write_resource": None,
"enable_efa": None,
"enable_efa_gdr": None,
"ephemeral_dir": "/scratch",
"encrypted_ephemeral": False,
"custom_ami": None,
"pre_install": None,
"pre_install_args": None,
"post_install": None,
"post_install_args": None,
"extra_json": {},
"additional_cfn_template": None,
"tags": {},
"custom_chef_cookbook": None,
"disable_hyperthreading": None,
"enable_intel_hpc_platform": False,
"disable_cluster_dns": False,
"scaling_settings": "default",
"vpc_settings": "default",
"ebs_settings": None,
"efs_settings": None,
"raid_settings": None,
"fsx_settings": None,
"dcv_settings": None,
"cw_log_settings": None,
"dashboard_settings": None,
"queue_settings": None,
"default_queue": None,
"cluster_config_metadata": {"sections": {}},
"architecture": "x86_64",
"network_interfaces_count": ["1", "1"],
"cluster_resource_bucket": None, # cluster_resource_bucket no default, but this is here to make testing easier
"iam_lambda_role": None,
"instance_types_data": {},
}
DEFAULT_CW_LOG_DICT = {"enable": True, "retention_days": 14}
DEFAULT_DASHBOARD_DICT = {"enable": True}
DEFAULT_PCLUSTER_DICT = {"cluster": DEFAULT_CLUSTER_SIT_DICT}
class DefaultDict(Enum):
"""Utility class to store default values for the internal dictionary representation of PclusterConfig sections."""
aws = DEFAULT_AWS_DICT
global_ = DEFAULT_GLOBAL_DICT
aliases = DEFAULT_ALIASES_DICT
cluster_sit = DEFAULT_CLUSTER_SIT_DICT
cluster_hit = DEFAULT_CLUSTER_HIT_DICT
scaling = DEFAULT_SCALING_DICT
vpc = DEFAULT_VPC_DICT
ebs = DEFAULT_EBS_DICT
efs = DEFAULT_EFS_DICT
raid = DEFAULT_RAID_DICT
fsx = DEFAULT_FSX_DICT
dcv = DEFAULT_DCV_DICT
cw_log = DEFAULT_CW_LOG_DICT
dashboard = DEFAULT_DASHBOARD_DICT
pcluster = DEFAULT_PCLUSTER_DICT
# ------------------ Default CFN parameters ------------------ #
# number of CFN parameters created by the PclusterConfig object.
CFN_SIT_CONFIG_NUM_OF_PARAMS = 64
CFN_HIT_CONFIG_NUM_OF_PARAMS = 54
# CFN parameters created by the pcluster CLI
CFN_CLI_RESERVED_PARAMS = ["ArtifactS3RootDirectory", "RemoveBucketOnDeletion"]
DEFAULT_SCALING_CFN_PARAMS = {"ScaleDownIdleTime": "10"}
DEFAULT_VPC_CFN_PARAMS = {
"VPCId": "NONE",
"MasterSubnetId": "NONE",
"AccessFrom": "0.0.0.0/0",
"AdditionalSG": "NONE",
"ComputeSubnetId": "NONE",
"ComputeSubnetCidr": "NONE",
"UsePublicIps": "true",
"VPCSecurityGroupId": "NONE",
"AvailabilityZone": "NONE",
}
DEFAULT_EBS_CFN_PARAMS = {
"SharedDir": "NONE,NONE,NONE,NONE,NONE",
"EBSSnapshotId": "NONE,NONE,NONE,NONE,NONE",
"VolumeType": "gp2,gp2,gp2,gp2,gp2",
"VolumeSize": "NONE,NONE,NONE,NONE,NONE",
"VolumeIOPS": "NONE,NONE,NONE,NONE,NONE",
"EBSEncryption": "false,false,false,false,false",
"EBSKMSKeyId": "NONE,NONE,NONE,NONE,NONE",
"EBSVolumeId": "NONE,NONE,NONE,NONE,NONE",
"VolumeIdThroughput": "125,125,125,125,125",
}
DEFAULT_EFS_CFN_PARAMS = {"EFSOptions": "NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE"}
DEFAULT_RAID_CFN_PARAMS = {"RAIDOptions": "NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE"}
DEFAULT_FSX_CFN_PARAMS = {"FSXOptions": "{}".format(",".join(["NONE"] * 19))}
DEFAULT_DCV_CFN_PARAMS = {"DCVOptions": "NONE,NONE,NONE"}
DEFAULT_CW_LOG_CFN_PARAMS = {"CWLogOptions": "true,14"}
DEFAULT_CLUSTER_SIT_CFN_PARAMS = {
"KeyName": "NONE",
"BaseOS": "alinux2",
"Scheduler": "slurm",
"SharedDir": "/shared",
"PlacementGroup": "NONE",
"Placement": "compute",
"MasterInstanceType": "t2.micro",
"MasterRootVolumeSize": "35",
"ComputeInstanceType": "t2.micro",
"ComputeRootVolumeSize": "35",
"DesiredSize": "0",
"MaxSize": "10",
"MinSize": "0",
"ClusterType": "ondemand",
"SpotPrice": "0",
"ProxyServer": "NONE",
"EC2IAMRoleName": "NONE",
"EC2IAMPolicies": "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy",
"S3ReadResource": "NONE",
"S3ReadWriteResource": "NONE",
"EFA": "NONE",
"EFAGDR": "NONE",
"EphemeralDir": "/scratch",
"EncryptedEphemeral": "false",
"CustomAMI": "NONE",
"PreInstallScript": "NONE",
"PreInstallArgs": "NONE",
"PostInstallScript": "NONE",
"PostInstallArgs": "NONE",
"ExtraJson": "{}",
"AdditionalCfnTemplate": "NONE",
"CustomChefCookbook": "NONE",
"NumberOfEBSVol": "1",
"Cores": "NONE,NONE,NONE,NONE",
"IntelHPCPlatform": "false",
"ResourcesS3Bucket": "NONE", # parameter added by the CLI
# "ArtifactS3RootDirectory": "NONE", # parameter added by the CLI
# "RemoveBucketOnDeletion": "NONE", # parameter added by the CLI
# scaling
"ScaleDownIdleTime": "10",
# vpc
"VPCId": "NONE",
"MasterSubnetId": "NONE",
"AccessFrom": "0.0.0.0/0",
"AdditionalSG": "NONE",
"ComputeSubnetId": "NONE",
"ComputeSubnetCidr": "NONE",
"UsePublicIps": "true",
"VPCSecurityGroupId": "NONE",
"AvailabilityZone": "NONE",
# ebs
# "SharedDir": "NONE,NONE,NONE,NONE,NONE", # not existing with single ebs volume
"EBSSnapshotId": "NONE,NONE,NONE,NONE,NONE",
"VolumeType": "gp2,gp2,gp2,gp2,gp2",
"VolumeSize": "NONE,NONE,NONE,NONE,NONE",
"VolumeIOPS": "NONE,NONE,NONE,NONE,NONE",
"EBSEncryption": "false,false,false,false,false",
"EBSKMSKeyId": "NONE,NONE,NONE,NONE,NONE",
"EBSVolumeId": "NONE,NONE,NONE,NONE,NONE",
"VolumeThroughput": "125,125,125,125,125",
# efs
"EFSOptions": "NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE",
# raid
"RAIDOptions": "NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE",
# fsx
"FSXOptions": "{}".format(",".join(["NONE"] * 19)),
# dcv
"DCVOptions": "NONE,NONE,NONE",
# cw_log_settings
"CWLogOptions": "true,14",
"ClusterConfigMetadata": "{'sections': {}}",
# architecture
"Architecture": "x86_64",
"NetworkInterfacesCount": "1,1",
"IAMLambdaRoleName": "NONE",
"InstanceTypesData": "{}",
}
DEFAULT_CLUSTER_HIT_CFN_PARAMS = {
"KeyName": "NONE",
"BaseOS": "alinux2",
"Scheduler": "slurm",
"SharedDir": "/shared",
"MasterInstanceType": "t2.micro",
"MasterRootVolumeSize": "35",
"ComputeRootVolumeSize": "35",
"ProxyServer": "NONE",
"EC2IAMRoleName": "NONE",
"EC2IAMPolicies": "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy",
"S3ReadResource": "NONE",
"S3ReadWriteResource": "NONE",
"EFA": "NONE",
"EFAGDR": "NONE",
"EphemeralDir": "/scratch",
"EncryptedEphemeral": "false",
"CustomAMI": "NONE",
"PreInstallScript": "NONE",
"PreInstallArgs": "NONE",
"PostInstallScript": "NONE",
"PostInstallArgs": "NONE",
"ExtraJson": "{}",
"AdditionalCfnTemplate": "NONE",
"CustomChefCookbook": "NONE",
"NumberOfEBSVol": "1",
"Cores": "NONE,NONE,NONE,NONE",
"IntelHPCPlatform": "false",
"ResourcesS3Bucket": "NONE", # parameter added by the CLI
# "ArtifactS3RootDirectory": "NONE", # parameter added by the CLI
# "RemoveBucketOnDeletion": "NONE", # parameter added by the CLI
# scaling
"ScaleDownIdleTime": "10",
# vpc
"VPCId": "NONE",
"MasterSubnetId": "NONE",
"AccessFrom": "0.0.0.0/0",
"AdditionalSG": "NONE",
"ComputeSubnetId": "NONE",
"ComputeSubnetCidr": "NONE",
"UsePublicIps": "true",
"VPCSecurityGroupId": "NONE",
"AvailabilityZone": "NONE",
# ebs
# "SharedDir": "NONE,NONE,NONE,NONE,NONE", # not existing with single ebs volume
"EBSSnapshotId": "NONE,NONE,NONE,NONE,NONE",
"VolumeType": "gp2,gp2,gp2,gp2,gp2",
"VolumeSize": "NONE,NONE,NONE,NONE,NONE",
"VolumeIOPS": "NONE,NONE,NONE,NONE,NONE",
"EBSEncryption": "false,false,false,false,false",
"EBSKMSKeyId": "NONE,NONE,NONE,NONE,NONE",
"EBSVolumeId": "NONE,NONE,NONE,NONE,NONE",
"VolumeThroughput": "125,125,125,125,125",
# efs
"EFSOptions": "NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE",
# raid
"RAIDOptions": "NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE,NONE",
# fsx
"FSXOptions": "{}".format(",".join(["NONE"] * 19)),
# dcv
"DCVOptions": "NONE,NONE,NONE",
# cw_log_settings
"CWLogOptions": "true,14",
"ClusterConfigMetadata": "{'sections': {}}",
# architecture
"Architecture": "x86_64",
"NetworkInterfacesCount": "1,1",
"IAMLambdaRoleName": "NONE",
"InstanceTypesData": "{}",
}
class DefaultCfnParams(Enum):
"""Utility class to store default values for CFN parameters."""
scaling = DEFAULT_SCALING_CFN_PARAMS
vpc = DEFAULT_VPC_CFN_PARAMS
ebs = DEFAULT_EBS_CFN_PARAMS
efs = DEFAULT_EFS_CFN_PARAMS
raid = DEFAULT_RAID_CFN_PARAMS
fsx = DEFAULT_FSX_CFN_PARAMS
dcv = DEFAULT_DCV_CFN_PARAMS
cw_log = DEFAULT_CW_LOG_CFN_PARAMS
cluster_sit = DEFAULT_CLUSTER_SIT_CFN_PARAMS
cluster_hit = DEFAULT_CLUSTER_HIT_CFN_PARAMS
| 32.313063 | 119 | 0.661393 |
09d4a8e9194c15f9f1237a706130be72186890ca | 135 | py | Python | flaskvel/Constants/FieldTypes.py | bogdan9898/flaskvel | b3ab27d050870ea5fe6a3cd2ed3833220b18e089 | [
"MIT"
] | 3 | 2020-08-13T19:39:53.000Z | 2022-01-03T18:02:58.000Z | flaskvel/Constants/FieldTypes.py | bogdan9898/flaskvel | b3ab27d050870ea5fe6a3cd2ed3833220b18e089 | [
"MIT"
] | null | null | null | flaskvel/Constants/FieldTypes.py | bogdan9898/flaskvel | b3ab27d050870ea5fe6a3cd2ed3833220b18e089 | [
"MIT"
] | null | null | null |
class FieldTypes():
STRING = 'string'
NUMERIC = 'numeric'
ARRAY = 'array'
JSON = 'json'
FILE = 'file'
UNKOWN = 'unknown_type'
| 13.5 | 24 | 0.62963 |
ff71e2414cfdb67e495370e276181de08b27065d | 1,163 | py | Python | frappe/tests/test_hooks.py | maheshghadage/frappe-praman | 276df54479ec0bd9a665924ef94120864fa0931b | [
"MIT"
] | 3 | 2017-12-09T22:05:11.000Z | 2019-10-22T12:03:43.000Z | frappe/tests/test_hooks.py | jigartarpara/frappe | aead9aef26138a55f92fcc7bc55988b7e67abd63 | [
"MIT"
] | 5 | 2021-04-28T06:55:26.000Z | 2022-02-10T07:59:06.000Z | frappe/tests/test_hooks.py | jigartarpara/frappe | aead9aef26138a55f92fcc7bc55988b7e67abd63 | [
"MIT"
] | 2 | 2021-05-06T06:14:40.000Z | 2021-05-06T10:05:29.000Z | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
from frappe.desk.doctype.todo.todo import ToDo
from frappe.cache_manager import clear_controller_cache
class TestHooks(unittest.TestCase):
def test_hooks(self):
hooks = frappe.get_hooks()
self.assertTrue(isinstance(hooks.get("app_name"), list))
self.assertTrue(isinstance(hooks.get("doc_events"), dict))
self.assertTrue(isinstance(hooks.get("doc_events").get("*"), dict))
self.assertTrue(isinstance(hooks.get("doc_events").get("*"), dict))
self.assertTrue("frappe.desk.notifications.clear_doctype_notifications" in
hooks.get("doc_events").get("*").get("on_update"))
def test_override_doctype_class(self):
from frappe import hooks
# Set hook
hooks.override_doctype_class = {
'ToDo': ['frappe.tests.test_hooks.CustomToDo']
}
# Clear cache
frappe.cache().delete_value('app_hooks')
clear_controller_cache('ToDo')
todo = frappe.get_doc(doctype='ToDo', description='asdf')
self.assertTrue(isinstance(todo, CustomToDo))
class CustomToDo(ToDo):
pass
| 30.605263 | 76 | 0.754944 |
d8dce2ce8ebc3f0c05a47c57b22aac9e1c73068f | 2,488 | py | Python | setup.py | jonhadfield/fval | c6945f6e9542a8a9faf1dda62e6b70c7df32c0e6 | [
"MIT"
] | null | null | null | setup.py | jonhadfield/fval | c6945f6e9542a8a9faf1dda62e6b70c7df32c0e6 | [
"MIT"
] | null | null | null | setup.py | jonhadfield/fval | c6945f6e9542a8a9faf1dda62e6b70c7df32c0e6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import re
import sys
from setuptools import (setup, find_packages)
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload -r pypi')
sys.exit()
install_requires = ['colorama',
'colorlog',
'PyYAML>=3.11',
'file-magic']
try:
import concurrent.futures
except ImportError:
install_requires.append('futures')
if sys.version_info < (2, 7):
exit('Python version 2.7 or above is required.')
test_requirements = ['pytest>=3.0.3', 'pytest-cov>=2.4.0']
with open('fval/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
readme = open('README.rst').read()
long_description = readme
setup(
name='fval',
version=version,
description='A file validator.',
long_description=long_description,
author='Jon Hadfield',
author_email='jon@lessknown.co.uk',
url='http://github.com/jonhadfield/fval',
packages=find_packages(),
data_files=[
('{0}/.fval'.format(os.path.expanduser('~')),
['samples/fval.cfg'])
],
entry_points={
'console_scripts': [
'fval = fval:main'
],
},
include_package_data=True,
install_requires=install_requires,
license='MIT',
scripts=['bin/fval'],
zip_safe=False,
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: System Administrators',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: BSD :: Linux',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
),
tests_require=install_requires + test_requirements,
)
| 30.716049 | 70 | 0.600482 |
808c71c59cab27ce293e313b90798899a7953f49 | 6,358 | py | Python | ht16k33_seg.py | hybotics/deshipu-micropython-ht16k33 | 6f98090190474008845ef2874313092b2c585051 | [
"MIT"
] | null | null | null | ht16k33_seg.py | hybotics/deshipu-micropython-ht16k33 | 6f98090190474008845ef2874313092b2c585051 | [
"MIT"
] | null | null | null | ht16k33_seg.py | hybotics/deshipu-micropython-ht16k33 | 6f98090190474008845ef2874313092b2c585051 | [
"MIT"
] | 1 | 2021-07-15T02:38:25.000Z | 2021-07-15T02:38:25.000Z | from ht16k33_matrix import HT16K33
CHARS = (
0b00000000, 0b00000000, #
0b01000000, 0b00000110, # !
0b00000010, 0b00100000, # "
0b00010010, 0b11001110, # #
0b00010010, 0b11101101, # $
0b00001100, 0b00100100, # %
0b00100011, 0b01011101, # &
0b00000100, 0b00000000, # '
0b00100100, 0b00000000, # (
0b00001001, 0b00000000, # )
0b00111111, 0b11000000, # *
0b00010010, 0b11000000, # +
0b00001000, 0b00000000, # ,
0b00000000, 0b11000000, # -
0b00000000, 0b00000000, # .
0b00001100, 0b00000000, # /
0b00001100, 0b00111111, # 0
0b00000000, 0b00000110, # 1
0b00000000, 0b11011011, # 2
0b00000000, 0b10001111, # 3
0b00000000, 0b11100110, # 4
0b00100000, 0b01101001, # 5
0b00000000, 0b11111101, # 6
0b00000000, 0b00000111, # 7
0b00000000, 0b11111111, # 8
0b00000000, 0b11101111, # 9
0b00010010, 0b00000000, # :
0b00001010, 0b00000000, # ;
0b00100100, 0b01000000, # <
0b00000000, 0b11001000, # =
0b00001001, 0b10000000, # >
0b01100000, 0b10100011, # ?
0b00000010, 0b10111011, # @
0b00000000, 0b11110111, # A
0b00010010, 0b10001111, # B
0b00000000, 0b00111001, # C
0b00010010, 0b00001111, # D
0b00000000, 0b11111001, # E
0b00000000, 0b01110001, # F
0b00000000, 0b10111101, # G
0b00000000, 0b11110110, # H
0b00010010, 0b00000000, # I
0b00000000, 0b00011110, # J
0b00100100, 0b01110000, # K
0b00000000, 0b00111000, # L
0b00000101, 0b00110110, # M
0b00100001, 0b00110110, # N
0b00000000, 0b00111111, # O
0b00000000, 0b11110011, # P
0b00100000, 0b00111111, # Q
0b00100000, 0b11110011, # R
0b00000000, 0b11101101, # S
0b00010010, 0b00000001, # T
0b00000000, 0b00111110, # U
0b00001100, 0b00110000, # V
0b00101000, 0b00110110, # W
0b00101101, 0b00000000, # X
0b00010101, 0b00000000, # Y
0b00001100, 0b00001001, # Z
0b00000000, 0b00111001, # [
0b00100001, 0b00000000, # \
0b00000000, 0b00001111, # ]
0b00001100, 0b00000011, # ^
0b00000000, 0b00001000, # _
0b00000001, 0b00000000, # `
0b00010000, 0b01011000, # a
0b00100000, 0b01111000, # b
0b00000000, 0b11011000, # c
0b00001000, 0b10001110, # d
0b00001000, 0b01011000, # e
0b00000000, 0b01110001, # f
0b00000100, 0b10001110, # g
0b00010000, 0b01110000, # h
0b00010000, 0b00000000, # i
0b00000000, 0b00001110, # j
0b00110110, 0b00000000, # k
0b00000000, 0b00110000, # l
0b00010000, 0b11010100, # m
0b00010000, 0b01010000, # n
0b00000000, 0b11011100, # o
0b00000001, 0b01110000, # p
0b00000100, 0b10000110, # q
0b00000000, 0b01010000, # r
0b00100000, 0b10001000, # s
0b00000000, 0b01111000, # t
0b00000000, 0b00011100, # u
0b00100000, 0b00000100, # v
0b00101000, 0b00010100, # w
0b00101000, 0b11000000, # x
0b00100000, 0b00001100, # y
0b00001000, 0b01001000, # z
0b00001001, 0b01001001, # {
0b00010010, 0b00000000, # |
0b00100100, 0b10001001, # }
0b00000101, 0b00100000, # ~
0b00111111, 0b11111111,
)
NUMBERS = (
0x3F, # 0
0x06, # 1
0x5B, # 2
0x4F, # 3
0x66, # 4
0x6D, # 5
0x7D, # 6
0x07, # 7
0x7F, # 8
0x6F, # 9
0x77, # a
0x7C, # b
0x39, # C
0x5E, # d
0x79, # E
0x71, # F
0x40, # -
)
class Seg14x4(HT16K33):
"""The alpha-numeric 14-segment display."""
def scroll(self, count=1):
"""Scroll the display by specified number of places."""
if count >= 0:
offset = 0
else:
offset = 2
for i in range(6):
self.buffer[i + offset] = self.buffer[i + 2 * count]
def put(self, char, index=0):
"""Put a character at the specified place."""
if not 0 <= index <= 3:
return
if not 32 <= ord(char) <= 127:
return
if char == '.':
self.buffer[index * 2 + 1] |= 0b01000000
return
c = ord(char) * 2 - 64
self.buffer[index * 2] = CHARS[1 + c]
self.buffer[index * 2 + 1] = CHARS[c]
def push(self, char):
"""Scroll the display and add a character at the end."""
if char != '.' or self.buffer[7] & 0b01000000:
self.scroll()
self.put(' ', 3)
self.put(char, 3)
def text(self, text):
"""Display the specified text."""
for c in text:
self.push(c)
def number(self, number):
"""Display the specified decimal number."""
s = "{:f}".format(number)
if len(s) > 4:
if s.find('.') > 4:
raise ValueError("Overflow")
self.fill(False)
places = 4
if '.' in s:
places += 1
self.text(s[:places])
def hex(self, number):
"""Display the specified hexadecimal number."""
s = "{:x}".format(number)
if len(s) > 4:
raise ValueError("Overflow")
self.fill(False)
self.text(s)
class Seg7x4(Seg14x4):
"""The numeric 7-segment display."""
P = [0, 2, 6, 8] # The positions of characters.
def scroll(self, count=1):
"""Scroll the display by specified number of places."""
if count >= 0:
offset = 0
else:
offset = 1
for i in range(3):
self.buffer[self.P[i + offset]] = self.buffer[self.P[i + count]]
def push(self, char):
"""Scroll the display and add a character at the end."""
if char in ':;':
self.put(char)
else:
super().push(char)
def put(self, char, index=0):
"""Put a character at the specified place."""
if not 0 <= index <= 3:
return
char = char.lower()
if char == '.':
self.buffer[self.P[index]] |= 0b10000000
return
elif char in 'abcdef':
c = ord(char) - 97 + 10
elif char == '-':
c = 16
elif char in '0123456789':
c = ord(char) - 48
elif char == ' ':
c = 0x00
elif char == ':':
self.buffer[4] = 0x02
return
elif char == ';':
self.buffer[4] = 0x00
return
else:
return
self.buffer[self.P[index]] = NUMBERS[c]
| 28.008811 | 76 | 0.554734 |
faaa49b482d0520dfbf320d7560bacf8c805f44b | 2,007 | py | Python | analysis/preliminary_cyclotron.py | prakhub/pi3 | 166e20fb156419a9b8b57e077769ae5e59691aa9 | [
"MIT"
] | null | null | null | analysis/preliminary_cyclotron.py | prakhub/pi3 | 166e20fb156419a9b8b57e077769ae5e59691aa9 | [
"MIT"
] | null | null | null | analysis/preliminary_cyclotron.py | prakhub/pi3 | 166e20fb156419a9b8b57e077769ae5e59691aa9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
""" "Main" script used to analyze invidual measurements"""
__author__ = 'Andreas Gsponer'
__license__ = 'MIT'
import numpy as np
from analyzeImage import analyze_image
from surface import CubicFitRotated
from preprocess import Preprocessor
import smoothing
import plots
P = Preprocessor(min_threshold=45, max_threshold=230, offset=0)
# Drift space focused by the BTL
#path = "../measurements/cyclotron/first_cyclotron_test/focused/3"
#P.import_video(path + "/focused_330_to_0.mkv")
# Changing steering current of the BTL
#path = "../measurements/cyclotron/first_cyclotron_test/changing_steering_magnet/2"
#P.import_video(path + "/changing_steering_magnet_330_to_0_10_steps.mkv")
# MBL magnets enabled
path = "../measurements/cyclotron/first_cyclotron_test/mbl_magnet/2"
P.import_video(path + "/mbl_magnet_330_to_0.mkv")
# m.set_size_inches(12,7)
# m.savefig("../report/images/measurements/cyclotron_pre/focused_center.png",dpi=300,optimize=True)
P.read_metadata(path)
P.preprocess()
Im = P.frames_processed
z_start = P.z_start
z_end = P.z_end
mean, cov = analyze_image(Im)
window_size = 10
mean_smoothed = smoothing.mean_moving_average(mean, window_size)
cov_smoothed = smoothing.cov_moving_average(cov, window_size)
plots.plot_mean(mean, z_start=z_start, z_end=z_end).show()
c = CubicFitRotated()
c.fit(mean=mean_smoothed, cov=cov_smoothed, z_start=z_start, z_end=z_end)
deviations = True
if deviations:
x = mean.T[0]
y = mean.T[1]
print("mean x varied by:" + str(np.max(x) - np.min(x)))
print("mean y varied by:" + str(np.max(y) - np.min(y)))
xc = np.sqrt(cov_smoothed.T[0][0])
yc = np.sqrt(cov_smoothed.T[1][1])
print("cov x varied by:" + str(np.max(xc) - np.min(xc)))
print("cov y varied by:" + str(np.max(yc) - np.min(yc)))
xc = cov_smoothed.T[0][0]
yc = cov_smoothed.T[1][1]
print("beta x varied by:" + str(np.max(xc) - np.min(xc)))
print("beta y varied by:" + str(np.max(yc) - np.min(yc)))
| 27.121622 | 99 | 0.721973 |
22717296eb3510c58ad9960a8aa76c821fb5da30 | 1,153 | py | Python | apps/core/migrations/0007_auto_20211125_2341.py | cmput404F21/CMPUT404-project-socialdistribution | 47f108b43886a4e482c6b6f9c6fdef6dcc005c3f | [
"W3C-20150513"
] | null | null | null | apps/core/migrations/0007_auto_20211125_2341.py | cmput404F21/CMPUT404-project-socialdistribution | 47f108b43886a4e482c6b6f9c6fdef6dcc005c3f | [
"W3C-20150513"
] | 48 | 2021-10-12T21:41:39.000Z | 2021-12-08T19:40:25.000Z | apps/core/migrations/0007_auto_20211125_2341.py | cmput404F21/CMPUT404-project-socialdistribution | 47f108b43886a4e482c6b6f9c6fdef6dcc005c3f | [
"W3C-20150513"
] | 1 | 2022-01-11T04:07:43.000Z | 2022-01-11T04:07:43.000Z | # Generated by Django 3.2.9 on 2021-11-25 23:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20211125_2157'),
]
operations = [
migrations.RemoveField(
model_name='externalhost',
name='basic_auth',
),
migrations.AddField(
model_name='externalhost',
name='password',
field=models.CharField(default=123, max_length=80, verbose_name='password'),
preserve_default=False,
),
migrations.AddField(
model_name='externalhost',
name='username',
field=models.CharField(default='name', max_length=80, verbose_name='username'),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='isServer',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='author',
name='isApproved',
field=models.BooleanField(default=False, verbose_name='isApproved'),
),
]
| 28.825 | 91 | 0.575889 |
f53533e5cdcfab560214b5fafcd2b7451d6cffa8 | 1,127 | py | Python | syngenta_digital_dta/postgres/sql_connector.py | syngenta-digital/dta-python | 2f313964bdc2b733b47c874b18bd72ebf5ed8817 | [
"Apache-2.0"
] | null | null | null | syngenta_digital_dta/postgres/sql_connector.py | syngenta-digital/dta-python | 2f313964bdc2b733b47c874b18bd72ebf5ed8817 | [
"Apache-2.0"
] | 62 | 2020-12-02T02:59:14.000Z | 2021-10-01T01:08:07.000Z | syngenta_digital_dta/postgres/sql_connector.py | syngenta-digital/dta-python | 2f313964bdc2b733b47c874b18bd72ebf5ed8817 | [
"Apache-2.0"
] | null | null | null | from functools import lru_cache
import psycopg2
from psycopg2.extras import RealDictCursor
class SQLConnector:
def __init__(self, cls):
self.endpoint = cls.endpoint
self.database = cls.database
self.table = cls.table
self.user = cls.user
self.password = cls.password
self.port = cls.port
self.autocommit = cls.autocommit
self.connection = None
@lru_cache(maxsize=128)
def connect(self):
try:
if not self.connection:
self.connection = psycopg2.connect(
dbname=self.database,
host=self.endpoint,
port=self.port,
user=self.user,
password=self.password,
)
if self.autocommit:
self.connection.set_session(autocommit=self.autocommit)
return self.connection
except Exception as error:
print(error)
raise error
@lru_cache(maxsize=128)
def cursor(self):
return self.connection.cursor(cursor_factory=RealDictCursor)
| 28.897436 | 71 | 0.57764 |
caf689e833ec79dd02f78c279b7de8271dda5c67 | 2,979 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}.py | giupo/cookiecutter-py-microservice | e29a5c74e39e0f6533c825c5c05d046122defced | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}.py | giupo/cookiecutter-py-microservice | e29a5c74e39e0f6533c825c5c05d046122defced | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}.py | giupo/cookiecutter-py-microservice | e29a5c74e39e0f6533c825c5c05d046122defced | [
"BSD-3-Clause"
] | null | null | null | """
Main module.
Defines the Controller for {{cookiecutter.project_slug}} resource
"""
import os
import logging
import tornado.web
from tornado.httpclient import AsyncHTTPClient
from {{cookiecutter.project_slug}}.events import (
init_events,
)
from {{cookiecutter.project_slug}}.config import make_config
from {{cookiecutter.project_slug}}.auth import authenticated
from {{cookiecutter.project_slug}} import __version__
from {{cookiecutter.project_slug}}.store import (
store,
)
log = logging.getLogger("{{cookiecutter.project_slug}}.{{cookiecutter.project_slug}}")
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
def build_routes():
init_params = {}
routes = []
return routes
async def get_app(config=make_config()):
settings = {
"cookie_secret": config.get('{{cookiecutter.project_slug}}', 'secret'),
"debug": config.getboolean('{{cookiecutter.project_slug}}', 'debug')
}
app = tornado.web.Application(build_routes(), **settings)
app.config = config
app.events = await init_events()
return app
async def on_shutdown(app):
log.info("Shutdown service started")
await app.events.close()
log.info("Shutdown completed")
def build_ssl_options(config):
ssl_options = {
"certfile": config.get('{{cookiecutter.project_slug}}', 'servercert'),
"keyfile": config.get('{{cookiecutter.project_slug}}', 'serverkey')
}
# if files for SSL do not exist, return None
for filename in ssl_options.values():
if not os.path.isfile(filename):
return None
return ssl_options
async def start_web_server(app):
log.info("Starting web server...")
addr = app.config.get('{{cookiecutter.project_slug}}', 'address')
port = app.config.getint('{{cookiecutter.project_slug}}', 'port')
protocol = app.config.get('{{cookiecutter.project_slug}}', 'protocol')
ssl_options = build_ssl_options(app.config)
protocol = "http" if ssl_options is None else "https"
if protocol != "https":
log.warn("This service should always be served on HTTPS!")
app.config.set('{{cookiecutter.project_slug}}', 'protocol', protocol)
server = tornado.httpserver.HTTPServer(app, ssl_options=ssl_options)
server.bind(port)
log.info("{{cookiecutter.project_slug}} at %s://%s:%s", protocol, addr, port)
app.config.set('{{cookiecutter.project_slug}}', 'port', str(port))
log.info("Registering services")
server.start(app.config.getint('{{cookiecutter.project_slug}}', 'nproc'))
log.info("{{cookiecutter.project_slug}}(%s) started (PID: %s)", __version__, os.getpid())
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self) -> None:
super().set_default_headers()
self.set_header('Content-Type', 'application/json')
self.set_header('Access-Control-Allow-Origin', '*')
async def finish_with_status(self, status, message):
self.set_status(status)
self.write({
"status": status,
"message": message
})
await self.flush()
| 28.92233 | 91 | 0.713998 |
dfe65a24ffc6d368cf24b6ac25bdfcace7279eca | 663 | py | Python | test/unit/grid/test_shapes.py | ignacia-fp/bempp-cl | a65232558826e51e624b1a4f649b6a0ed5a7f551 | [
"MIT"
] | 70 | 2019-09-04T15:15:05.000Z | 2022-03-22T16:54:40.000Z | test/unit/grid/test_shapes.py | ignacia-fp/bempp-cl | a65232558826e51e624b1a4f649b6a0ed5a7f551 | [
"MIT"
] | 66 | 2020-01-16T08:31:00.000Z | 2022-03-25T11:18:59.000Z | test/unit/grid/test_shapes.py | ignacia-fp/bempp-cl | a65232558826e51e624b1a4f649b6a0ed5a7f551 | [
"MIT"
] | 22 | 2019-09-30T08:50:33.000Z | 2022-03-20T19:37:22.000Z | import pytest
import numpy as np
import bempp.api
@pytest.mark.parametrize(
"gridname, args, kwargs",
[
("almond", [], {}),
("cylinders", [], {}),
("reentrant_cube", [], {}),
("screen", [np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]])], {}),
("cube", [], {}),
("ellipsoid", [], {}),
("reference_triangle", [], {}),
("cuboid", [], {}),
("multitrace_cube", [], {}),
("multitrace_sphere", [], {}),
("regular_sphere", [2], {}),
("sphere", [], {}),
],
)
def test_shape(gridname, args, kwargs):
getattr(bempp.api.shapes, gridname)(*args, **kwargs)
| 26.52 | 81 | 0.443439 |
01b53020527cb6a672d7d74883b59bdb03f6c321 | 3,555 | py | Python | train_pku_market.py | fandulu/IHDA | a859729cbb639a3233f3c8c7fce894bac27b19ee | [
"MIT"
] | 4 | 2020-07-11T09:21:43.000Z | 2021-09-08T08:31:09.000Z | train_pku_market.py | fandulu/IHDA | a859729cbb639a3233f3c8c7fce894bac27b19ee | [
"MIT"
] | 1 | 2021-09-08T08:29:36.000Z | 2021-09-08T08:29:36.000Z | train_pku_market.py | fandulu/IHDA | a859729cbb639a3233f3c8c7fce894bac27b19ee | [
"MIT"
] | 1 | 2021-09-08T08:31:09.000Z | 2021-09-08T08:31:09.000Z | from __future__ import print_function
import argparse
import sys
import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.utils.data as data
import torchvision
import torchvision.transforms as transforms
from config import Config_market, Config_pku
from utils import *
from datasets import *
from net.model import *
from engine.pku_market_trainer import *
import random
random.seed(1234)
###############Init Setting#########################################
args = Config_pku()
checkpoint_path = args.model_path
if not os.path.isdir(checkpoint_path):
os.makedirs(checkpoint_path)
###############Init Setting##########################################
###############Load Data##############################################
market_args = Config_market()
market_loader, n_market_clc = make_market_data_loader(market_args)
train_set, test_color_set, test_sketch_set = make_data_loader_pku(args)
n_pku_clc = len(np.unique(train_set.train_data['sketch_labels'])) # number of person in training set
n_test_color = len(test_color_set.color_labels) #number of instance in test color set
n_test_sketch = len(test_sketch_set.sketch_labels) #number of instance in test sketch set
pku_train_loader = data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, drop_last=True)
pku_test_color_loader = data.DataLoader(test_color_set, batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers)
pku_test_sketch_loader = data.DataLoader(test_sketch_set, batch_size=args.test_batch, shuffle=False, num_workers=args.num_workers)
test_color_labels = test_color_set.color_labels
test_sketch_labels = test_sketch_set.sketch_labels
###############Load Data##############################################
###############Building Model ##############################################
print('==> Building model..')
print('There are {} pids in the train'.format(n_pku_clc))
Embed_net_1 = Baseline(pretrain_choice=None)
Embed_net_2 = Baseline(pretrain_choice=None)
Classify_net = C_net(args.low_dim,n_pku_clc,0.7)
A_net = Attribute_net(dim=args.low_dim, n_att=args.num_att)
D_net = Domain_net(dim=args.low_dim)
trainer = create_trainer(args, Embed_net_1, Embed_net_2, Classify_net, A_net, D_net, n_pku_clc)
# training
best_acc = 0 # best test accuracy
start_epoch = 0
switch_point = 30
print('==> Start Training...')
for epoch in range(start_epoch, 101-start_epoch):
print('==> Preparing Data Loader...')
# training
if epoch<switch_point:
trainer.do_train(epoch, pku_train_loader, market_loader, 'softmax')
else:
trainer.do_train(epoch, pku_train_loader, market_loader, 'triplet_softmax')
trainer.adjust_learning_rate(epoch)
if epoch >= 0 and epoch%5 ==0:
print ('Test Epoch: {}'.format(epoch))
# testing
cmc, mAP = trainer.do_test(epoch, args.test_mode, pku_test_color_loader, pku_test_sketch_loader, n_test_color, n_test_sketch, test_color_labels, test_sketch_labels)
print('FC: top-1: {:.2%} | top-5: {:.2%} | top-10: {:.2%}| top-20: {:.2%}| mAP: {:.2%}'.format(
cmc[0], cmc[4], cmc[9], cmc[19], mAP))
# save model
if cmc[0] > best_acc: # not the real best for sysu-mm01
best_acc = cmc[0]
trainer.save_model(epoch, cmc, mAP, True)
# save model every args.save_epoch epochs
if epoch > 0 and epoch%args.save_epoch ==0:
trainer.save_model(epoch, cmc, mAP, False)
| 33.857143 | 172 | 0.681013 |
2d1f659994b285f40478ec009c4f3a6e13c4b520 | 176 | py | Python | OpenGLCffi/GLES3/EXT/ANGLE/translated_shader_source.py | cydenix/OpenGLCffi | c78f51ae5e6b655eb2ea98f072771cf69e2197f3 | [
"MIT"
] | null | null | null | OpenGLCffi/GLES3/EXT/ANGLE/translated_shader_source.py | cydenix/OpenGLCffi | c78f51ae5e6b655eb2ea98f072771cf69e2197f3 | [
"MIT"
] | null | null | null | OpenGLCffi/GLES3/EXT/ANGLE/translated_shader_source.py | cydenix/OpenGLCffi | c78f51ae5e6b655eb2ea98f072771cf69e2197f3 | [
"MIT"
] | null | null | null | from OpenGLCffi.GLES3 import params
@params(api='gles3', prms=['shader', 'bufsize', 'length', 'source'])
def glGetTranslatedShaderSourceANGLE(shader, bufsize, length):
pass
| 25.142857 | 68 | 0.75 |
6fedc13581cde841f536aa7c4aa6c1a5d520bbc5 | 1,083 | py | Python | swipt/people/jaime.py | stoneworksolutions/swipt | 3dfd0f1b6ba9b0f2cdba85c92098483c5d9cdd94 | [
"Unlicense"
] | null | null | null | swipt/people/jaime.py | stoneworksolutions/swipt | 3dfd0f1b6ba9b0f2cdba85c92098483c5d9cdd94 | [
"Unlicense"
] | null | null | null | swipt/people/jaime.py | stoneworksolutions/swipt | 3dfd0f1b6ba9b0f2cdba85c92098483c5d9cdd94 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
def run():
print 'Para las aventuras que puedan acontecerte:'
time.sleep(5)
print 'No guardes todo en el mismo Bucket...'
time.sleep(5)
print 'Recuerda que Buchannon siempre esta vigilando...'
time.sleep(5)
print 'Y MarkFelt tambien...'
time.sleep(5)
print 'Cornelius es un nombre feo desde que David mato a Goliath...'
time.sleep(5)
print 'pero los nombres feos tienen gancho entre desarrolladores XD'
time.sleep(5)
print 'Las cosas claras...'
time.sleep(3)
print 'el chocolate espeso...'
time.sleep(3)
print 'y el ColaCao.. EN CASA!'
time.sleep(5)
print 'Y sobre todo...'
time.sleep(5)
print 'mantente alejado de Ms Neumar.'
time.sleep(5)
print 'Y para despedir, algo mas emotivo:'
time.sleep(5)
print 'Cuando no sea tu mejor dia...'
time.sleep(5)
print 'pon a cocer arroz durante dos horas...'
time.sleep(5)
print 'veras como se te pasa.'
time.sleep(3)
run()
| 27.075 | 73 | 0.613112 |
9cfff74374a53fb8b71b9152b9b9a1568c9bf750 | 6,513 | py | Python | django_localflavor_cn/forms.py | ifanrx/django-localflavor-cn | 688c3df783682d11057d3cc53089a65a89c5416c | [
"BSD-3-Clause"
] | null | null | null | django_localflavor_cn/forms.py | ifanrx/django-localflavor-cn | 688c3df783682d11057d3cc53089a65a89c5416c | [
"BSD-3-Clause"
] | null | null | null | django_localflavor_cn/forms.py | ifanrx/django-localflavor-cn | 688c3df783682d11057d3cc53089a65a89c5416c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Chinese-specific form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
from django.contrib.localflavor.cn.cn_provinces import CN_PROVINCE_CHOICES
from django.forms import ValidationError
from django.forms.fields import CharField, RegexField, Select
from django.utils.translation import ugettext_lazy as _
__all__ = (
'CNProvinceSelect',
'CNPostCodeField',
'CNIDCardField',
'CNPhoneNumberField',
'CNCellNumberField',
)
ID_CARD_RE = r'^\d{15}(\d{2}[0-9xX])?$'
POST_CODE_RE = r'^\d{6}$'
PHONE_RE = r'^\d{3,4}-\d{7,8}(-\d+)?$'
CELL_RE = r'^1[3458]\d{9}$'
# Valid location code used in id card checking algorithm
CN_LOCATION_CODES = (
11, # Beijing
12, # Tianjin
13, # Hebei
14, # Shanxi
15, # Nei Mongol
21, # Liaoning
22, # Jilin
23, # Heilongjiang
31, # Shanghai
32, # Jiangsu
33, # Zhejiang
34, # Anhui
35, # Fujian
36, # Jiangxi
37, # Shandong
41, # Henan
42, # Hubei
43, # Hunan
44, # Guangdong
45, # Guangxi
46, # Hainan
50, # Chongqing
51, # Sichuan
52, # Guizhou
53, # Yunnan
54, # Xizang
61, # Shaanxi
62, # Gansu
63, # Qinghai
64, # Ningxia
65, # Xinjiang
71, # Taiwan
81, # Hong Kong
91, # Macao
)
class CNProvinceSelect(Select):
"""
A select widget with list of Chinese provinces as choices.
"""
def __init__(self, attrs=None):
super(CNProvinceSelect, self).__init__(
attrs, choices=CN_PROVINCE_CHOICES,
)
class CNPostCodeField(RegexField):
"""
A form field that validates as Chinese post code.
Valid code is XXXXXX where X is digit.
"""
default_error_messages = {
'invalid': _('Enter a post code in the format XXXXXX.'),
}
def __init__(self, *args, **kwargs):
super(CNPostCodeField, self).__init__(POST_CODE_RE, *args, **kwargs)
class CNIDCardField(CharField):
"""
A form field that validates as Chinese Identification Card Number.
This field would check the following restrictions:
* the length could only be 15 or 18.
* if the length is 18, the last digit could be x or X.
* has a valid checksum.(length 18 only)
* has a valid birthdate.
* has a valid location.
The checksum algorithm is described in GB11643-1999.
"""
default_error_messages = {
'invalid': _('ID Card Number consists of 15 or 18 digits.'),
'checksum': _('Invalid ID Card Number: Wrong checksum'),
'birthday': _('Invalid ID Card Number: Wrong birthdate'),
'location': _('Invalid ID Card Number: Wrong location code'),
}
def __init__(self, max_length=18, min_length=15, *args, **kwargs):
super(CNIDCardField, self).__init__(max_length, min_length, *args,
**kwargs)
def clean(self, value):
"""
Check whether the input is a valid ID Card Number.
"""
# Check the length of the ID card number.
super(CNIDCardField, self).clean(value)
if not value:
return ""
# Check whether this ID card number has valid format
if not re.match(ID_CARD_RE, value):
raise ValidationError(self.error_messages['invalid'])
# Check the birthday of the ID card number.
if not self.has_valid_birthday(value):
raise ValidationError(self.error_messages['birthday'])
# Check the location of the ID card number.
if not self.has_valid_location(value):
raise ValidationError(self.error_messages['location'])
# Check the checksum of the ID card number.
value = value.upper()
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return '%s' % value
def has_valid_birthday(self, value):
"""
This function would grab the birthdate from the ID card number and test
whether it is a valid date.
"""
from datetime import datetime
if len(value) == 15:
# 1st generation ID card
time_string = value[6:12]
format_string = "%y%m%d"
else:
# 2nd generation ID card
time_string = value[6:14]
format_string = "%Y%m%d"
try:
datetime.strptime(time_string, format_string)
return True
except ValueError:
# invalid date
return False
def has_valid_location(self, value):
"""
This method checks if the first two digits in the ID Card are valid.
"""
return int(value[:2]) in CN_LOCATION_CODES
def has_valid_checksum(self, value):
"""
This method checks if the last letter/digit in value is valid
according to the algorithm the ID Card follows.
"""
# If the length of the number is not 18, then the number is a 1st
# generation ID card number, and there is no checksum to be checked.
if len(value) != 18:
return True
checksum_index = sum(
map(
lambda a,b:a*(ord(b)-ord('0')),
(7,9,10,5,8,4,2,1,6,3,7,9,10,5,8,4,2),
value[:17],
),
) % 11
return '10X98765432'[checksum_index] == value[-1]
class CNPhoneNumberField(RegexField):
"""
A form field that validates as Chinese phone number
A valid phone number could be like:
010-55555555
Considering there might be extension phone numbers, so this could also be:
010-55555555-35
"""
default_error_messages = {
'invalid': _('Enter a valid phone number.'),
}
def __init__(self, *args, **kwargs):
super(CNPhoneNumberField, self).__init__(PHONE_RE, *args, **kwargs)
class CNCellNumberField(RegexField):
"""
A form field that validates as Chinese cell number
A valid cell number could be like:
13012345678
We used a rough rule here, the first digit should be 1, the second could be
3, 5 and 8, the rest could be what so ever.
The length of the cell number should be 11.
"""
default_error_messages = {
'invalid': _('Enter a valid cell number.'),
}
def __init__(self, *args, **kwargs):
super(CNCellNumberField, self).__init__(CELL_RE, *args, **kwargs)
| 30.293023 | 79 | 0.60172 |
750730b6edd815be590b0f937f64291a43e6b6a0 | 1,613 | py | Python | internal/notes/builtin-SAVE/packages/libuuid/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 1 | 2019-01-17T20:07:19.000Z | 2019-01-17T20:07:19.000Z | internal/notes/builtin-SAVE/packages/libuuid/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | null | null | null | internal/notes/builtin-SAVE/packages/libuuid/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 2 | 2019-08-06T18:13:57.000Z | 2021-11-05T18:19:49.000Z | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libuuid(AutotoolsPackage):
"""Portable uuid C library"""
homepage = "http://sourceforge.net/projects/libuuid/"
url = "http://downloads.sourceforge.net/project/libuuid/libuuid-1.0.3.tar.gz?r=http%3A%2F%2Fsourceforge.net%2Fprojects%2Flibuuid%2F&ts=1433881396&use_mirror=iweb"
version('1.0.3', 'd44d866d06286c08ba0846aba1086d68')
| 46.085714 | 171 | 0.686299 |
261ccfe6e136306a1289aa2dd0996f26e6b1b729 | 6,058 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/lti_provider/views.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/lti_provider/views.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/lti_provider/views.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """
LTI Provider view functions
"""
import logging
from django.conf import settings
from django.http import Http404, HttpResponseBadRequest, HttpResponseForbidden
from django.views.decorators.csrf import csrf_exempt
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from common.djangoapps.util.views import add_p3p_header
from lms.djangoapps.lti_provider.models import LtiConsumer
from lms.djangoapps.lti_provider.outcomes import store_outcome_parameters
from lms.djangoapps.lti_provider.signature_validator import SignatureValidator
from lms.djangoapps.lti_provider.users import authenticate_lti_user
from openedx.core.lib.url_utils import unquote_slashes
log = logging.getLogger("edx.lti_provider")
# LTI launch parameters that must be present for a successful launch
REQUIRED_PARAMETERS = [
'roles', 'context_id', 'oauth_version', 'oauth_consumer_key',
'oauth_signature', 'oauth_signature_method', 'oauth_timestamp',
'oauth_nonce', 'user_id'
]
OPTIONAL_PARAMETERS = [
'context_title', 'context_label', 'lis_result_sourcedid',
'lis_outcome_service_url', 'tool_consumer_instance_guid'
]
@csrf_exempt
@add_p3p_header
def lti_launch(request, course_id, usage_id):
"""
Endpoint for all requests to embed edX content via the LTI protocol. This
endpoint will be called by a POST message that contains the parameters for
an LTI launch (we support version 1.2 of the LTI specification):
http://www.imsglobal.org/lti/ltiv1p2/ltiIMGv1p2.html
An LTI launch is successful if:
- The launch contains all the required parameters
- The launch data is correctly signed using a known client key/secret
pair
"""
if not settings.FEATURES['ENABLE_LTI_PROVIDER']:
return HttpResponseForbidden()
# Check the LTI parameters, and return 400 if any required parameters are
# missing
params = get_required_parameters(request.POST)
if not params:
return HttpResponseBadRequest()
params.update(get_optional_parameters(request.POST))
# Get the consumer information from either the instance GUID or the consumer
# key
try:
lti_consumer = LtiConsumer.get_or_supplement(
params.get('tool_consumer_instance_guid', None),
params['oauth_consumer_key']
)
except LtiConsumer.DoesNotExist:
return HttpResponseForbidden()
# Check the OAuth signature on the message
if not SignatureValidator(lti_consumer).verify(request):
return HttpResponseForbidden()
# Add the course and usage keys to the parameters array
try:
course_key, usage_key = parse_course_and_usage_keys(course_id, usage_id)
except InvalidKeyError:
log.error(
'Invalid course key %s or usage key %s from request %s',
course_id,
usage_id,
request
)
raise Http404() # lint-amnesty, pylint: disable=raise-missing-from
params['course_key'] = course_key
params['usage_key'] = usage_key
# Create an edX account if the user identifed by the LTI launch doesn't have
# one already, and log the edX account into the platform.
authenticate_lti_user(request, params['user_id'], lti_consumer)
# Store any parameters required by the outcome service in order to report
# scores back later. We know that the consumer exists, since the record was
# used earlier to verify the oauth signature.
store_outcome_parameters(params, request.user, lti_consumer)
return render_courseware(request, params['usage_key'])
def get_required_parameters(dictionary, additional_params=None):
"""
Extract all required LTI parameters from a dictionary and verify that none
are missing.
:param dictionary: The dictionary that should contain all required parameters
:param additional_params: Any expected parameters, beyond those required for
the LTI launch.
:return: A new dictionary containing all the required parameters from the
original dictionary and additional parameters, or None if any expected
parameters are missing.
"""
params = {}
additional_params = additional_params or []
for key in REQUIRED_PARAMETERS + additional_params:
if key not in dictionary:
return None
params[key] = dictionary[key]
return params
def get_optional_parameters(dictionary):
"""
Extract all optional LTI parameters from a dictionary. This method does not
fail if any parameters are missing.
:param dictionary: A dictionary containing zero or more optional parameters.
:return: A new dictionary containing all optional parameters from the
original dictionary, or an empty dictionary if no optional parameters
were present.
"""
return {key: dictionary[key] for key in OPTIONAL_PARAMETERS if key in dictionary}
def render_courseware(request, usage_key):
"""
Render the content requested for the LTI launch.
TODO: This method depends on the current refactoring work on the
courseware/courseware.html template. It's signature may change depending on
the requirements for that template once the refactoring is complete.
Return an HttpResponse object that contains the template and necessary
context to render the courseware.
"""
# return an HttpResponse object that contains the template and necessary context to render the courseware.
from lms.djangoapps.courseware.views.views import render_xblock
return render_xblock(request, str(usage_key), check_if_enrolled=False)
def parse_course_and_usage_keys(course_id, usage_id):
"""
Convert course and usage ID strings into key objects. Return a tuple of
(course_key, usage_key), or throw an InvalidKeyError if the translation
fails.
"""
course_key = CourseKey.from_string(course_id)
usage_id = unquote_slashes(usage_id)
usage_key = UsageKey.from_string(usage_id).map_into_course(course_key)
return course_key, usage_key
| 37.627329 | 110 | 0.740343 |
12ebf35dcaa511becfb67d70ee99a886810b2f95 | 8,650 | py | Python | xpark/plan/dataframe/optimized.py | xaviermathew/Xpark | 13f4c2e2f1c323a6baf772b81d5deefb9e3a9ebc | [
"MIT"
] | 1 | 2020-10-30T09:15:49.000Z | 2020-10-30T09:15:49.000Z | xpark/plan/dataframe/optimized.py | xaviermathew/Xpark | 13f4c2e2f1c323a6baf772b81d5deefb9e3a9ebc | [
"MIT"
] | 4 | 2021-06-08T22:54:18.000Z | 2022-03-12T00:52:29.000Z | xpark/plan/dataframe/optimized.py | xaviermathew/Xpark | 13f4c2e2f1c323a6baf772b81d5deefb9e3a9ebc | [
"MIT"
] | null | null | null | from collections import defaultdict
import logging
import networkx as nx
from networkx.classes.function import add_path
from python_cypher import python_cypher
from xpark import settings
from xpark.plan.base import BaseOptimizedPlan
from xpark.utils.iter import take_pairs
_LOG = logging.getLogger(__name__)
PARSER = python_cypher.CypherToNetworkx()
class OptimizationRule(object):
rule_str = None
def __init__(self):
self.rule = PARSER.parse(' '.join(self.rule_str.split()))
def __repr__(self):
return '<rule: %s\n%s>' % (self.__class__.__name__, self.rule_str.strip())
def transform_path(self, path, g):
return path, {}
def replace_path(self, g, path, new_path):
g = g.copy()
paths = list(take_pairs(path))
g.remove_edges_from(paths)
add_path(g, new_path)
g.remove_nodes_from(list(nx.isolates(g)))
return g
def apply(self, g):
paths = list(PARSER.yield_return_values(g, self.rule))
has_changed = False
all_path_stats = []
if paths:
_LOG.info('[%s] paths matched rule:%s', len(paths), self)
for path in paths:
new_path, stats = self.transform_path(path, g)
if path != new_path:
has_changed = True
g = self.replace_path(g, path, new_path)
all_path_stats.append(stats)
return g, has_changed, all_path_stats
@staticmethod
def apply_all(g):
all_rule_stats = defaultdict(list)
for rule_class in rule_classes:
i = 0
while True:
_LOG.info('Checking rule:[%s] iteration:[%s]', rule_class, i)
g, has_changed, all_path_stats = rule_class().apply(g)
if has_changed:
all_rule_stats[rule_class].append(all_path_stats)
else:
break
i += 1
return g, all_rule_stats
class PruneSerialization(OptimizationRule):
rule_str = '''
MATCH (start: {is_pure_compute: "True"})-->(n1:Serializechunkop)-->(n2:Deserializechunkop)-->(end: {is_pure_compute: "True"})
RETURN start, n1, n2, end
'''
def transform_path(self, path, g):
start, n1, n2, end = path
new_path = [start, end]
stats = {'old_path': path, 'new_path': new_path}
return new_path, stats
class PushDownSelect(OptimizationRule):
rule_str = '''
MATCH (first)-->(read:Readdatasetop)-[*]->(select:Selectchunkop)-->(last)
RETURN first, read, nodes, select, last
'''
def transform_path(self, path, g):
from xpark.plan.dataframe.physical import ReadDatasetOp
first_op = path[0]
read_op = path[1]
select_op = path[-2]
last_op = path[-1]
new_schema = {}
new_read_op = ReadDatasetOp(dataset=read_op.dataset, schema=new_schema,
plan=read_op.plan, part_id=read_op.part_id)
for col in select_op.ac_kwargs['cols']:
new_schema[col] = read_op.schema[col]
new_path = [first_op, new_read_op] + path[2:-2] + [last_op]
stats = {'old_schema': read_op.schema, 'new_schema': new_schema, 'dataset': read_op.dataset}
return new_path, stats
class PushDownImplicitSelect(OptimizationRule):
rule_str = '''
MATCH (read:Readdatasetop)-[*]->(last)
RETURN read, nodes, last
'''
# '''
# MATCH p=(read:Readdatasetop)-[*]->(last)
# WHERE any(node in nodes(p) WHERE labels(node) IN ["Filterchunkop"])
# RETURN nodes(p)
# '''
def transform_path(self, path, g):
return path, {}
class PushDownCount(OptimizationRule):
rule_str = '''
MATCH (start:Physicalstartop)-->(read:Readdatasetop)-->(count:Countchunkop)-->(sum:Sumop)
RETURN start, read, count, sum
'''
# '''
# MATCH p=(read:Readdatasetop)-[*]->(count:Countchunkop)-->(sum:Sumop)
# WHERE none(node in nodes(p) WHERE labels(node) IN ["Filterchunkop"])
# RETURN start, read, count, sum
# '''
def transform_path(self, path, g):
from xpark.dataset.files import FileList
from xpark.plan.dataframe.physical import ReadDatasetCountOp
start_op = path[0]
read_op = path[1]
sum_op = path[-1]
if read_op.dataset.file_list.file_type == FileList.FILE_TYPE_PARQUET:
new_read_op = ReadDatasetCountOp(dataset=read_op.dataset, schema=read_op.schema,
plan=read_op.plan, part_id=read_op.part_id)
new_path = [start_op, new_read_op, sum_op]
stats = {'dataset': read_op.dataset}
return new_path, stats
else:
return path, {}
class PruneChunks(OptimizationRule):
rule_str = '''
MATCH (first)-->(read:Readdatasetop)-[*]->(last: {is_terminal: "True"})
RETURN first, read, nodes, last
'''
# '''
# MATCH p=(read:Readdatasetop)-[*]->(filter:Filterchunkop)-[*]->(last)
# RETURN nodes(p)
# '''
def transform_path(self, path, g):
from xpark.dataset import FileDataset
from xpark.dataset.files import FileList
from xpark.dataset.utils import pq_prune_chunks_min_max
from xpark.plan.dataframe.physical import FilterChunkOp
first_op = path[0]
read_op = path[1]
dataset = read_op.dataset
filter_ops = [op for op in path if isinstance(op, FilterChunkOp)]
combined_filter = None
if filter_ops and isinstance(dataset, FileDataset) and dataset.file_list.file_type == FileList.FILE_TYPE_PARQUET:
for op in filter_ops:
op_filter = op.ac_kwargs['expr']
if combined_filter is None:
combined_filter = op_filter
else:
combined_filter = combined_filter & op_filter
to_keep = set(pq_prune_chunks_min_max(read_op.dataset, combined_filter))
if read_op.part_id not in to_keep:
new_path = [first_op]
stats = {'skip': read_op.part_id}
return new_path, stats
return path, {}
class UseIndexForFilter(OptimizationRule):
rule_str = '''
MATCH (first)-->(read:Readdatasetop)-[*]->(last: {is_terminal: "True"})
RETURN first, read, nodes, last
'''
# '''
# MATCH p=(read:Readdatasetop)-[*]->(filter:Filterchunkop)-[*]->(last)
# RETURN nodes(p)
# '''
def transform_path(self, path, g):
from xpark.dataset.tables import Table
from xpark.plan.dataframe.physical import PostIndexFilterChunkOp, ReadIndexFilterChunkOp, FilterChunkOp
first_op = path[0]
read_op = path[1]
table = read_op.dataset
filter_ops = [(i, op) for i, op in enumerate(path) if isinstance(op, FilterChunkOp)]
if filter_ops and isinstance(table, Table):
op_idx, filter_op = filter_ops[0]
expr = filter_op.ac_kwargs['expr']
indexed_cols, index_expr, extra_cols, extra_expr = table.extract_cols_from_expr(expr, filter_op.schema)
if indexed_cols:
new_path = [first_op]
if extra_cols:
new_path.append(read_op)
new_path.extend(path[2:op_idx])
new_index_filter_op = ReadIndexFilterChunkOp(
plan=filter_op.plan,
schema=filter_op.schema,
part_id=filter_op.part_id,
table=table,
expr=index_expr,
augment_cols=indexed_cols + extra_cols
)
new_path.append(new_index_filter_op)
if extra_expr:
new_filter_op = PostIndexFilterChunkOp(
plan=filter_op.plan, schema=filter_op.schema,
part_id=filter_op.part_id, expr=extra_expr
)
new_path.append(new_filter_op)
new_path.extend(path[op_idx + 1:])
stats = {
'indexed_cols': indexed_cols,
'index_expr': index_expr,
'extra_cols': extra_cols,
'extra_expr': extra_expr
}
return new_path, stats
return path, {}
rule_classes = []
if settings.ENABLE_OPTIMIZER:
rule_classes.extend([
PruneSerialization,
PushDownImplicitSelect,
PushDownSelect,
PruneChunks,
UseIndexForFilter,
PushDownCount,
])
class OptimizedPlan(BaseOptimizedPlan):
stats = None
| 34.879032 | 129 | 0.592254 |
c985df20af8273aadaf980bbce0b0f16ad5b1c66 | 1,924 | py | Python | TubeSleuth/config.py | stevewillows/limnoria-plugins | d2a148e4bce14b042e0bdd92bcdb2ddd27e2d6b5 | [
"MIT"
] | 9 | 2016-05-05T11:58:17.000Z | 2020-05-09T08:43:28.000Z | TubeSleuth/config.py | stevewillows/limnoria-plugins | d2a148e4bce14b042e0bdd92bcdb2ddd27e2d6b5 | [
"MIT"
] | 40 | 2015-09-18T08:12:09.000Z | 2021-12-20T07:11:43.000Z | TubeSleuth/config.py | stevewillows/limnoria-plugins | d2a148e4bce14b042e0bdd92bcdb2ddd27e2d6b5 | [
"MIT"
] | 22 | 2015-10-18T01:31:15.000Z | 2022-03-19T09:28:24.000Z | ###
# Copyright (c) 2015, butterscotchstallion
# All rights reserved.
#
#
###
import supybot.conf as conf
import supybot.registry as registry
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('TubeSleuth')
except:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x: x
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified themself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('TubeSleuth', True)
TubeSleuth = conf.registerPlugin('TubeSleuth')
conf.registerGlobalValue(TubeSleuth, 'developerKey',
registry.String("", _("""Google API key. Required.""")))
conf.registerGlobalValue(TubeSleuth, 'sortOrder',
registry.String("relevance", _("""The order parameter specifies the method that will be used to order resources in the API response.""")))
conf.registerGlobalValue(TubeSleuth, 'template',
registry.String("$yt_logo $link :: $title", _("""Template used for search result replies""")))
conf.registerGlobalValue(TubeSleuth, 'useBold',
registry.Boolean(False, _("""Use bold in replies""")))
conf.registerGlobalValue(TubeSleuth, 'noResultsMessage',
registry.String("No results for that query", _("""Message reply when there are no results""")))
conf.registerGlobalValue(TubeSleuth, 'safeSearch',
registry.String("moderate", _("""Safe search filtering: none, moderate, strict""")))
conf.registerGlobalValue(TubeSleuth, 'respondToPrivateMessages',
registry.Boolean(False, _("""Whether the bot should respond to this command in private messages""")))
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| 37.72549 | 143 | 0.735967 |
f3c82b105796f1e64fe7466935a1363466e66a8c | 55 | py | Python | apps/jobs/constants.py | kaustubh-s1/EvalAI | 1884811e7759e0d095f7afb68188a7f010fa65dc | [
"BSD-3-Clause"
] | 1,470 | 2016-10-21T01:21:45.000Z | 2022-03-30T14:08:29.000Z | apps/jobs/constants.py | kaustubh-s1/EvalAI | 1884811e7759e0d095f7afb68188a7f010fa65dc | [
"BSD-3-Clause"
] | 2,594 | 2016-11-02T03:36:01.000Z | 2022-03-31T15:30:04.000Z | apps/jobs/constants.py | kaustubh-s1/EvalAI | 1884811e7759e0d095f7afb68188a7f010fa65dc | [
"BSD-3-Clause"
] | 865 | 2016-11-09T17:46:32.000Z | 2022-03-30T13:06:52.000Z | submission_status_to_exclude = ["failed", "cancelled"]
| 27.5 | 54 | 0.781818 |
1eb601ab4b35c203cf0a3d5902b6b5cce91f3bd2 | 2,973 | py | Python | src/injecta/tag/TaggedArgumentResolverTest.py | DataSentics/injecta | 090eeac6c76c43d40be71df678222a07b0a3c783 | [
"MIT"
] | 3 | 2021-09-27T12:55:00.000Z | 2022-01-31T19:13:23.000Z | src/injecta/tag/TaggedArgumentResolverTest.py | DataSentics/injecta | 090eeac6c76c43d40be71df678222a07b0a3c783 | [
"MIT"
] | null | null | null | src/injecta/tag/TaggedArgumentResolverTest.py | DataSentics/injecta | 090eeac6c76c43d40be71df678222a07b0a3c783 | [
"MIT"
] | 1 | 2021-03-04T09:12:05.000Z | 2021-03-04T09:12:05.000Z | import unittest
from injecta.container.ContainerBuild import ContainerBuild
from injecta.dtype.DType import DType
from injecta.service.Service import Service
from injecta.service.argument.ListArgument import ListArgument
from injecta.service.argument.PrimitiveArgument import PrimitiveArgument
from injecta.service.argument.TaggedServicesArgument import TaggedServicesArgument
from injecta.service.class_.InspectedArgument import InspectedArgument
from injecta.service.resolved.ResolvedArgument import ResolvedArgument
from injecta.tag.TaggedArgumentResolver import TaggedArgumentResolver
class TaggedArgumentResolverTest(unittest.TestCase):
def setUp(self):
self.__tagged_argument_resolver = TaggedArgumentResolver()
def test_no_change(self):
resolved_argument = ResolvedArgument("my_number", PrimitiveArgument(123), InspectedArgument("my_number", DType("builtins", "int")))
container_build = ContainerBuild({}, [], {}, {}, {})
new_resolved_argument = self.__tagged_argument_resolver.resolve(resolved_argument, container_build)
self.assertEqual(resolved_argument, new_resolved_argument)
def test_basic(self):
resolved_argument = ResolvedArgument(
"my_number", TaggedServicesArgument("my_service_tag"), InspectedArgument("my_tagged_services", DType("builtins", "list"))
)
new_resolved_argument = self.__tagged_argument_resolver.resolve(resolved_argument, self.__create_container_build())
list_argument = new_resolved_argument.argument
self.assertIsInstance(list_argument, ListArgument)
self.assertEqual(None, list_argument.name)
self.assertEqual("injecta.mocks.Bar", list_argument.items[0].service_name)
self.assertEqual("injecta.mocks.Bar.new", list_argument.items[1].service_name)
def test_named_argument(self):
resolved_argument = ResolvedArgument(
"my_number",
TaggedServicesArgument("my_service_tag", "my_tagged_services"),
InspectedArgument("my_tagged_services", DType("builtins", "list")),
)
new_resolved_argument = self.__tagged_argument_resolver.resolve(resolved_argument, self.__create_container_build())
list_argument = new_resolved_argument.argument
self.assertIsInstance(list_argument, ListArgument)
self.assertEqual("my_tagged_services", list_argument.name)
self.assertEqual("injecta.mocks.Bar", list_argument.items[0].service_name)
self.assertEqual("injecta.mocks.Bar.new", list_argument.items[1].service_name)
def __create_container_build(self):
tags2_services = {
"my_service_tag": [
Service("injecta.mocks.Bar", DType("injecta.mocks.Bar", "Bar")),
Service("injecta.mocks.Bar.new", DType("injecta.mocks.Bar", "Bar")),
]
}
return ContainerBuild({}, [], {}, {}, tags2_services)
if __name__ == "__main__":
unittest.main()
| 43.720588 | 139 | 0.736293 |
72d383d5b14f331907545f504250db457ef4f65c | 1,369 | py | Python | linter.py | SublimeLinter/SublimeLinter-pyyaml | 2b0c1aa17a4df224120597fbcb561052492d417e | [
"MIT"
] | 15 | 2015-09-28T11:29:39.000Z | 2021-07-13T20:27:57.000Z | linter.py | SublimeLinter/SublimeLinter-pyyaml | 2b0c1aa17a4df224120597fbcb561052492d417e | [
"MIT"
] | 8 | 2015-03-19T01:11:14.000Z | 2017-06-09T17:49:23.000Z | linter.py | SublimeLinter/SublimeLinter-pyyaml | 2b0c1aa17a4df224120597fbcb561052492d417e | [
"MIT"
] | 4 | 2015-01-20T17:49:11.000Z | 2016-12-20T20:47:11.000Z | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by NotSqrt
# Copyright (c) 2013 NotSqrt
#
# License: MIT
#
"""This module exports the Pyyaml plugin class."""
from SublimeLinter.lint import PythonLinter, persist
class Pyyaml(PythonLinter):
"""Provides an interface to pyyaml."""
syntax = 'yaml'
cmd = None
regex = r'^:(?P<line>\d+):(?P<col>\d+): (?P<message>.+)'
line_col_base = (0, 0) # the lines and columns are 0-based
module = 'yaml'
def check(self, code, filename):
"""
Call directly the yaml module, and handles the exception. Return str.
Very similar to the SublimeLinter-json linter, except yaml is not in the python core library.
"""
yaml = self.module
try:
list(yaml.parse(code, Loader=yaml.BaseLoader))
except yaml.error.YAMLError as exc:
if persist.settings.get('debug'):
persist.printf('{} - {} : {}'.format(self.name, type(exc), exc))
message = '{} : {} {}'.format(type(exc).__name__, exc.problem, exc.context)
return ':{}:{}: {}\n'.format(exc.problem_mark.line, exc.problem_mark.column, message)
except Exception as exc:
persist.printf('{} - uncaught exception - {} : {}'.format(self.name, type(exc), exc))
return ''
| 31.113636 | 101 | 0.607012 |
0825f764a9e86d4033ff166584c12af7599f5750 | 3,687 | py | Python | ranzen/hydra/utils.py | predictive-analytics-lab/mantra | 6c63d1d1e01745f31dbdc7c34f6c7932bcdccef8 | [
"Apache-2.0"
] | null | null | null | ranzen/hydra/utils.py | predictive-analytics-lab/mantra | 6c63d1d1e01745f31dbdc7c34f6c7932bcdccef8 | [
"Apache-2.0"
] | 4 | 2021-11-03T18:48:36.000Z | 2022-03-16T14:01:45.000Z | ranzen/hydra/utils.py | wearepal/ranzen | e249220026ccb5c05218c7202866690b5447d37e | [
"Apache-2.0"
] | null | null | null | """Functions for dealing with hydra."""
from __future__ import annotations
from collections.abc import MutableMapping
from contextlib import contextmanager
from dataclasses import asdict
from enum import Enum
import shlex
from typing import Any, Iterator, Sequence
from hydra.core.config_store import ConfigStore
from hydra.core.hydra_config import HydraConfig
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
__all__ = [
"GroupRegistration",
"SchemaRegistration",
"as_pretty_dict",
"reconstruct_cmd",
"recursively_instantiate",
]
def _clean_up_dict(obj: Any) -> Any:
"""Convert enums to strings and filter out _target_."""
if isinstance(obj, MutableMapping):
return {key: _clean_up_dict(value) for key, value in obj.items() if key != "_target_"}
elif isinstance(obj, Enum):
return str(f"{obj.name}")
elif OmegaConf.is_config(obj): # hydra stores lists as omegaconf.ListConfig, so we convert here
return OmegaConf.to_container(obj, resolve=True, enum_to_str=True)
return obj
def as_pretty_dict(data_class: object) -> dict:
"""Convert dataclass to a pretty dictionary."""
return _clean_up_dict(asdict(data_class))
def reconstruct_cmd() -> str:
"""Reconstruct the python command that was used to start this program."""
internal_config = HydraConfig.get()
program = internal_config.job.name + ".py"
args = internal_config.overrides.task
return _join([program] + OmegaConf.to_container(args)) # type: ignore[operator]
def _join(split_command: list[str]) -> str:
"""Concatenate the tokens of the list split_command and return a string."""
return " ".join(shlex.quote(arg) for arg in split_command)
def recursively_instantiate(
hydra_config: DictConfig, *, keys_to_exclude: Sequence[str] = ()
) -> dict[str, Any]:
return {
str(k): instantiate(v, _convert_="partial")
for k, v in hydra_config.items()
if k not in ("_target_",) + tuple(keys_to_exclude)
}
class SchemaRegistration:
"""Register hydra schemas.
:example:
>>> sr = SchemaRegistration()
>>> sr.register(Config, path="experiment_schema")
>>> sr.register(TrainerConf, path="trainer/trainer_schema")
>>>
>>> with sr.new_group("schema/data", target_path="data") as group:
>>> group.add_option(CelebaDataConf, name="celeba")
>>> group.add_option(WaterbirdsDataConf, name="waterbirds")
"""
def __init__(self) -> None:
self._cs = ConfigStore.instance()
def register(self, config_class: type, *, path: str) -> None:
"""Register a schema."""
if "." in path:
raise ValueError(f"Separate path with '/' and not '.': {path}")
parts = path.split("/")
name = parts[-1]
package = ".".join(parts[:-1])
self._cs.store(name=name, node=config_class, package=package)
@contextmanager
def new_group(self, group_name: str, *, target_path: str) -> Iterator[GroupRegistration]:
"""Return a context manager for a new group."""
package = target_path.replace("/", ".")
yield GroupRegistration(self._cs, group_name=group_name, package=package)
class GroupRegistration:
"""Helper for registering a group in hydra."""
def __init__(self, cs: ConfigStore, *, group_name: str, package: str):
self._cs = cs
self._group_name = group_name
self._package = package
def add_option(self, config_class: type, *, name: str) -> None:
"""Register a schema as an option for this group."""
self._cs.store(group=self._group_name, name=name, node=config_class, package=self._package)
| 34.138889 | 100 | 0.679957 |
e054815906126eef55059eb469a8eeb1266603cf | 1,473 | py | Python | Probability-of-the-Loan-Defaulters/code.py | pankhilmaru/dsmp-pre-work | 0023be0f44f95e7d5bbb68854ee28e61a8ca1a85 | [
"MIT"
] | null | null | null | Probability-of-the-Loan-Defaulters/code.py | pankhilmaru/dsmp-pre-work | 0023be0f44f95e7d5bbb68854ee28e61a8ca1a85 | [
"MIT"
] | null | null | null | Probability-of-the-Loan-Defaulters/code.py | pankhilmaru/dsmp-pre-work | 0023be0f44f95e7d5bbb68854ee28e61a8ca1a85 | [
"MIT"
] | null | null | null | # --------------
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# code starts here
df = pd.read_csv(path)
p_a = len(df[df['fico']>700])/len(df)
p_b = len(df[df['purpose']=='debt_consolidation'])/len(df)
df1 = df[df['purpose']=='debt_consolidation']
p_a_b = (len(df[(df['fico']==700) & (df['purpose']=='debt_consolidation')])/len(df))/p_a
p_b_a = (len(df[(df['fico']==700) & (df['purpose']=='debt_consolidation')])/len(df))/p_b
result = p_b_a==p_b
print(result)
# code ends here
# --------------
# code starts here
prob_lp = df[df['paid.back.loan']=='Yes'].shape[0]/df.shape[0]
prob_cs = df[df['credit.policy']=='Yes'].shape[0]/df.shape[0]
new_df = df[df['paid.back.loan']=='Yes']
prob_pd_cs_df = df[(df['paid.back.loan'] == 'Yes') & (df['credit.policy'] == 'Yes')]
p_num = prob_pd_cs_df.shape[0]/df.shape[0]
prob_pd_cs = p_num/prob_lp
bayes = prob_pd_cs*prob_lp/prob_cs
print(bayes)
# code ends here
# --------------
# code starts here
vc = df['purpose'].value_counts()
plt.bar(x=vc.index,height=vc.values,align='center')
plt.show()
df1 = df[df['paid.back.loan']=='No']
vc_df1 = df1['purpose'].value_counts()
plt.bar(x=vc_df1.index,height=vc_df1.values,align='center')
plt.show()
# code ends here
# --------------
# code starts here
inst_median = df['installment'].median()
inst_mean = df['installment'].mean()
plt.hist(x=df['installment'])
plt.show()
plt.hist(x=df['log.annual.inc'])
plt.show()
# code ends here
| 18.185185 | 88 | 0.636117 |
c3fec55a9e0ecff7bbb78c55cd826f2b926e18b0 | 14,140 | py | Python | payments/payment_gateways/doctype/paypal_settings/paypal_settings.py | phot0n/payments | a6a3940365a160669a66b23d7717fd306f8e5b80 | [
"MIT"
] | null | null | null | payments/payment_gateways/doctype/paypal_settings/paypal_settings.py | phot0n/payments | a6a3940365a160669a66b23d7717fd306f8e5b80 | [
"MIT"
] | 1 | 2022-03-09T10:26:04.000Z | 2022-03-09T10:26:04.000Z | payments/payment_gateways/doctype/paypal_settings/paypal_settings.py | phot0n/frappe_payments | a6a3940365a160669a66b23d7717fd306f8e5b80 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# License: MIT. See LICENSE
"""
# Integrating PayPal
### 1. Validate Currency Support
Example:
from frappe.integrations.utils import get_payment_gateway_controller
controller = get_payment_gateway_controller("PayPal")
controller().validate_transaction_currency(currency)
### 2. Redirect for payment
Example:
payment_details = {
"amount": 600,
"title": "Payment for bill : 111",
"description": "payment via cart",
"reference_doctype": "Payment Request",
"reference_docname": "PR0001",
"payer_email": "NuranVerkleij@example.com",
"payer_name": "Nuran Verkleij",
"order_id": "111",
"currency": "USD",
"payment_gateway": "Razorpay",
"subscription_details": {
"plan_id": "plan_12313", # if Required
"start_date": "2018-08-30",
"billing_period": "Month" #(Day, Week, SemiMonth, Month, Year),
"billing_frequency": 1,
"customer_notify": 1,
"upfront_amount": 1000
}
}
# redirect the user to this url
url = controller().get_payment_url(**payment_details)
### 3. On Completion of Payment
Write a method for `on_payment_authorized` in the reference doctype
Example:
def on_payment_authorized(payment_status):
# your code to handle callback
##### Note:
payment_status - payment gateway will put payment status on callback.
For paypal payment status parameter is one from: [Completed, Cancelled, Failed]
More Details:
<div class="small">For details on how to get your API credentials, follow this link: <a href="https://developer.paypal.com/docs/classic/api/apiCredentials/" target="_blank">https://developer.paypal.com/docs/classic/api/apiCredentials/</a></div>
"""
import frappe
import json
import pytz
from frappe import _
from urllib.parse import urlencode
from frappe.model.document import Document
from payments.utils import create_request_log, create_payment_gateway
from frappe.integrations.utils import make_post_request
from frappe.utils import get_url, call_hook_method, cint, get_datetime
api_path = '/api/method/payments.payment_gateways.doctype.paypal_settings.paypal_settings'
class PayPalSettings(Document):
supported_currencies = ["AUD", "BRL", "CAD", "CZK", "DKK", "EUR", "HKD", "HUF", "ILS", "JPY", "MYR", "MXN",
"TWD", "NZD", "NOK", "PHP", "PLN", "GBP", "RUB", "SGD", "SEK", "CHF", "THB", "TRY", "USD"]
def __setup__(self):
setattr(self, "use_sandbox", 0)
def setup_sandbox_env(self, token):
data = json.loads(frappe.db.get_value("Integration Request", token, "data"))
setattr(self, "use_sandbox", cint(frappe._dict(data).use_sandbox) or 0)
def validate(self):
create_payment_gateway("PayPal")
call_hook_method('payment_gateway_enabled', gateway="PayPal")
if not self.flags.ignore_mandatory:
self.validate_paypal_credentails()
def on_update(self):
pass
def validate_transaction_currency(self, currency):
if currency not in self.supported_currencies:
frappe.throw(_("Please select another payment method. PayPal does not support transactions in currency '{0}'").format(currency))
def get_paypal_params_and_url(self):
params = {
"USER": self.api_username,
"PWD": self.get_password(fieldname="api_password", raise_exception=False),
"SIGNATURE": self.signature,
"VERSION": "98",
"METHOD": "GetPalDetails"
}
if hasattr(self, "use_sandbox") and self.use_sandbox:
params.update({
"USER": frappe.conf.sandbox_api_username,
"PWD": frappe.conf.sandbox_api_password,
"SIGNATURE": frappe.conf.sandbox_signature
})
api_url = "https://api-3t.sandbox.paypal.com/nvp" if (self.paypal_sandbox or self.use_sandbox) else "https://api-3t.paypal.com/nvp"
return params, api_url
def validate_paypal_credentails(self):
params, url = self.get_paypal_params_and_url()
params = urlencode(params)
try:
res = make_post_request(url=url, data=params.encode("utf-8"))
if res["ACK"][0] == "Failure":
raise Exception
except Exception:
frappe.throw(_("Invalid payment gateway credentials"))
def get_payment_url(self, **kwargs):
setattr(self, "use_sandbox", cint(kwargs.get("use_sandbox", 0)))
response = self.execute_set_express_checkout(**kwargs)
if self.paypal_sandbox or self.use_sandbox:
return_url = "https://www.sandbox.paypal.com/cgi-bin/webscr?cmd=_express-checkout&token={0}"
else:
return_url = "https://www.paypal.com/cgi-bin/webscr?cmd=_express-checkout&token={0}"
kwargs.update({
"token": response.get("TOKEN")[0],
"correlation_id": response.get("CORRELATIONID")[0]
})
self.integration_request = create_request_log(kwargs, "Remote", "PayPal", response.get("TOKEN")[0])
return return_url.format(kwargs["token"])
def execute_set_express_checkout(self, **kwargs):
params, url = self.get_paypal_params_and_url()
params.update({
"METHOD": "SetExpressCheckout",
"returnUrl": get_url("{0}.get_express_checkout_details".format(api_path)),
"cancelUrl": get_url("/payment-cancel"),
"PAYMENTREQUEST_0_PAYMENTACTION": "SALE",
"PAYMENTREQUEST_0_AMT": kwargs['amount'],
"PAYMENTREQUEST_0_CURRENCYCODE": kwargs['currency'].upper()
})
if kwargs.get('subscription_details'):
self.configure_recurring_payments(params, kwargs)
params = urlencode(params)
response = make_post_request(url, data=params.encode("utf-8"))
if response.get("ACK")[0] != "Success":
frappe.throw(_("Looks like something is wrong with this site's Paypal configuration."))
return response
def configure_recurring_payments(self, params, kwargs):
# removing the params as we have to setup rucurring payments
for param in ('PAYMENTREQUEST_0_PAYMENTACTION', 'PAYMENTREQUEST_0_AMT',
'PAYMENTREQUEST_0_CURRENCYCODE'):
del params[param]
params.update({
"L_BILLINGTYPE0": "RecurringPayments", #The type of billing agreement
"L_BILLINGAGREEMENTDESCRIPTION0": kwargs['description']
})
def get_paypal_and_transaction_details(token):
doc = frappe.get_doc("PayPal Settings")
doc.setup_sandbox_env(token)
params, url = doc.get_paypal_params_and_url()
integration_request = frappe.get_doc("Integration Request", token)
data = json.loads(integration_request.data)
return data, params, url
def setup_redirect(data, redirect_url, custom_redirect_to=None, redirect=True):
redirect_to = data.get('redirect_to') or None
redirect_message = data.get('redirect_message') or None
if custom_redirect_to:
redirect_to = custom_redirect_to
if redirect_to:
redirect_url += '&' + urlencode({'redirect_to': redirect_to})
if redirect_message:
redirect_url += '&' + urlencode({'redirect_message': redirect_message})
# this is done so that functions called via hooks can update flags.redirect_to
if redirect:
frappe.local.response["type"] = "redirect"
frappe.local.response["location"] = get_url(redirect_url)
@frappe.whitelist(allow_guest=True, xss_safe=True)
def get_express_checkout_details(token):
try:
doc = frappe.get_doc("PayPal Settings")
doc.setup_sandbox_env(token)
params, url = doc.get_paypal_params_and_url()
params.update({
"METHOD": "GetExpressCheckoutDetails",
"TOKEN": token
})
response = make_post_request(url, data=params)
if response.get("ACK")[0] != "Success":
frappe.respond_as_web_page(_("Something went wrong"),
_("Looks like something went wrong during the transaction. Since we haven't confirmed the payment, Paypal will automatically refund you this amount. If it doesn't, please send us an email and mention the Correlation ID: {0}.").format(response.get("CORRELATIONID", [None])[0]),
indicator_color='red',
http_status_code=frappe.ValidationError.http_status_code)
return
doc = frappe.get_doc("Integration Request", token)
update_integration_request_status(token, {
"payerid": response.get("PAYERID")[0],
"payer_email": response.get("EMAIL")[0]
}, "Authorized", doc=doc)
frappe.local.response["type"] = "redirect"
frappe.local.response["location"] = get_redirect_uri(doc, token, response.get("PAYERID")[0])
except Exception:
frappe.log_error(frappe.get_traceback())
@frappe.whitelist(allow_guest=True, xss_safe=True)
def confirm_payment(token):
try:
custom_redirect_to = None
data, params, url = get_paypal_and_transaction_details(token)
params.update({
"METHOD": "DoExpressCheckoutPayment",
"PAYERID": data.get("payerid"),
"TOKEN": token,
"PAYMENTREQUEST_0_PAYMENTACTION": "SALE",
"PAYMENTREQUEST_0_AMT": data.get("amount"),
"PAYMENTREQUEST_0_CURRENCYCODE": data.get("currency").upper()
})
response = make_post_request(url, data=params)
if response.get("ACK")[0] == "Success":
update_integration_request_status(token, {
"transaction_id": response.get("PAYMENTINFO_0_TRANSACTIONID")[0],
"correlation_id": response.get("CORRELATIONID")[0]
}, "Completed")
if data.get("reference_doctype") and data.get("reference_docname"):
custom_redirect_to = frappe.get_doc(data.get("reference_doctype"),
data.get("reference_docname")).run_method("on_payment_authorized", "Completed")
frappe.db.commit()
redirect_url = '/payment-success?doctype={0}&docname={1}'.format(data.get("reference_doctype"), data.get("reference_docname"))
else:
redirect_url = "/payment-failed"
setup_redirect(data, redirect_url, custom_redirect_to)
except Exception:
frappe.log_error(frappe.get_traceback())
@frappe.whitelist(allow_guest=True, xss_safe=True)
def create_recurring_profile(token, payerid):
try:
custom_redirect_to = None
updating = False
data, params, url = get_paypal_and_transaction_details(token)
addons = data.get("addons")
subscription_details = data.get("subscription_details")
if data.get('subscription_id'):
if addons:
updating = True
manage_recurring_payment_profile_status(data['subscription_id'], 'Cancel', params, url)
params.update({
"METHOD": "CreateRecurringPaymentsProfile",
"PAYERID": payerid,
"TOKEN": token,
"DESC": data.get("description"),
"BILLINGPERIOD": subscription_details.get("billing_period"),
"BILLINGFREQUENCY": subscription_details.get("billing_frequency"),
"AMT": data.get("amount") if data.get("subscription_amount") == data.get("amount") else data.get("subscription_amount"),
"CURRENCYCODE": data.get("currency").upper(),
"INITAMT": data.get("upfront_amount")
})
status_changed_to = 'Completed' if data.get("starting_immediately") or updating else 'Verified'
starts_at = get_datetime(subscription_details.get("start_date")) or frappe.utils.now_datetime()
starts_at = starts_at.replace(tzinfo=pytz.timezone(frappe.utils.get_time_zone())).astimezone(pytz.utc)
#"PROFILESTARTDATE": datetime.utcfromtimestamp(get_timestamp(starts_at)).isoformat()
params.update({
"PROFILESTARTDATE": starts_at.isoformat()
})
response = make_post_request(url, data=params)
if response.get("ACK")[0] == "Success":
update_integration_request_status(token, {
"profile_id": response.get("PROFILEID")[0],
}, "Completed")
if data.get("reference_doctype") and data.get("reference_docname"):
data['subscription_id'] = response.get("PROFILEID")[0]
frappe.flags.data = data
custom_redirect_to = frappe.get_doc(data.get("reference_doctype"),
data.get("reference_docname")).run_method("on_payment_authorized", status_changed_to)
frappe.db.commit()
redirect_url = '/payment-success?doctype={0}&docname={1}'.format(data.get("reference_doctype"), data.get("reference_docname"))
else:
redirect_url = "/payment-failed"
setup_redirect(data, redirect_url, custom_redirect_to)
except Exception:
frappe.log_error(frappe.get_traceback())
def update_integration_request_status(token, data, status, error=False, doc=None):
if not doc:
doc = frappe.get_doc("Integration Request", token)
doc.update_status(data, status)
def get_redirect_uri(doc, token, payerid):
data = json.loads(doc.data)
if data.get("subscription_details") or data.get("subscription_id"):
return get_url("{0}.create_recurring_profile?token={1}&payerid={2}".format(api_path, token, payerid))
else:
return get_url("{0}.confirm_payment?token={1}".format(api_path, token))
def manage_recurring_payment_profile_status(profile_id, action, args, url):
args.update({
"METHOD": "ManageRecurringPaymentsProfileStatus",
"PROFILEID": profile_id,
"ACTION": action
})
response = make_post_request(url, data=args)
# error code 11556 indicates profile is not in active state(or already cancelled)
# thus could not cancel the subscription.
# thus raise an exception only if the error code is not equal to 11556
if response.get("ACK")[0] != "Success" and response.get("L_ERRORCODE0", [])[0] != '11556':
frappe.throw(_("Failed while amending subscription"))
@frappe.whitelist(allow_guest=True)
def ipn_handler():
try:
data = frappe.local.form_dict
validate_ipn_request(data)
data.update({
"payment_gateway": "PayPal"
})
doc = frappe.get_doc({
"data": json.dumps(frappe.local.form_dict),
"doctype": "Integration Request",
"integration_type": "Subscription Notification",
"status": "Queued"
}).insert(ignore_permissions=True)
frappe.db.commit()
frappe.enqueue(method='payments.payment_gateways.doctype.paypal_settings.paypal_settings.handle_subscription_notification',
queue='long', timeout=600, is_async=True, **{"doctype": "Integration Request", "docname": doc.name})
except frappe.InvalidStatusError:
pass
except Exception as e:
frappe.log(frappe.log_error(title=e))
def validate_ipn_request(data):
def _throw():
frappe.throw(_("In Valid Request"), exc=frappe.InvalidStatusError)
if not data.get("recurring_payment_id"):
_throw()
doc = frappe.get_doc("PayPal Settings")
params, url = doc.get_paypal_params_and_url()
params.update({
"METHOD": "GetRecurringPaymentsProfileDetails",
"PROFILEID": data.get("recurring_payment_id")
})
params = urlencode(params)
res = make_post_request(url=url, data=params.encode("utf-8"))
if res['ACK'][0] != 'Success':
_throw()
def handle_subscription_notification(doctype, docname):
call_hook_method("handle_subscription_notification", doctype=doctype, docname=docname)
| 33.114754 | 280 | 0.737199 |
fa6e01e2559de73f57b381a202105214299177fd | 7,036 | py | Python | backend/news_29341/settings.py | crowdbotics-apps/news-29341 | 9581a5b0d691753d2d28003c0f4e4c9baf16d93a | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/news_29341/settings.py | crowdbotics-apps/news-29341 | 9581a5b0d691753d2d28003c0f4e4c9baf16d93a | [
"FTL",
"AML",
"RSA-MD"
] | 26 | 2021-08-02T20:17:22.000Z | 2022-01-23T13:45:59.000Z | backend/news_29341/settings.py | crowdbotics-apps/news-29341 | 9581a5b0d691753d2d28003c0f4e4c9baf16d93a | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
Django settings for news_29341 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"task_category",
"task",
"wallet",
"task_profile",
"location",
"tasker_business",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
"storages",
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "news_29341.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "web_build")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "news_29341.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
os.path.join(BASE_DIR, "web_build/static"),
]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID
and AWS_SECRET_ACCESS_KEY
and AWS_STORAGE_BUCKET_NAME
and AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = "/mediafiles/"
MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning(
"You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
)
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| 28.954733 | 99 | 0.72527 |
1b2762e71f360c33a9be078a7c7efd3cba9810e4 | 328 | py | Python | lattedb/status/migrations/0003_remove_baryon2pt_type.py | callat-qcd/lattedb | 75c06748f3d59332a84ec1b5794c215c5974a46f | [
"BSD-3-Clause"
] | 1 | 2019-12-11T02:33:23.000Z | 2019-12-11T02:33:23.000Z | lattedb/status/migrations/0003_remove_baryon2pt_type.py | callat-qcd/lattedb | 75c06748f3d59332a84ec1b5794c215c5974a46f | [
"BSD-3-Clause"
] | 10 | 2020-01-29T17:06:01.000Z | 2021-05-31T14:41:19.000Z | lattedb/status/migrations/0003_remove_baryon2pt_type.py | callat-qcd/lattedb | 75c06748f3d59332a84ec1b5794c215c5974a46f | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.2.8 on 2020-01-30 17:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('status', '0002_auto_20191107_0016'),
]
operations = [
migrations.RemoveField(
model_name='baryon2pt',
name='type',
),
]
| 18.222222 | 47 | 0.591463 |
9222b35a14769fb82f9f5fd949ae5493a0dd16d6 | 512 | py | Python | env/lib/python3.8/site-packages/plotly/validators/scatter3d/marker/colorbar/_ticklen.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/scatter3d/marker/colorbar/_ticklen.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/scatter3d/marker/colorbar/_ticklen.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ticklen", parent_name="scatter3d.marker.colorbar", **kwargs
):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
| 32 | 86 | 0.626953 |
708d78b18b92673a95939409a278a2dffc504c8b | 98,093 | py | Python | src/borg/archive.py | axapaxa/borg | 18f0729ed83bfa2c9b29023dadae6503035991d6 | [
"BSD-3-Clause"
] | null | null | null | src/borg/archive.py | axapaxa/borg | 18f0729ed83bfa2c9b29023dadae6503035991d6 | [
"BSD-3-Clause"
] | null | null | null | src/borg/archive.py | axapaxa/borg | 18f0729ed83bfa2c9b29023dadae6503035991d6 | [
"BSD-3-Clause"
] | null | null | null | import json
import os
import socket
import stat
import sys
import time
from collections import OrderedDict
from contextlib import contextmanager
from datetime import datetime, timezone, timedelta
from functools import partial
from getpass import getuser
from io import BytesIO
from itertools import groupby, zip_longest
from shutil import get_terminal_size
from .platformflags import is_win32, is_linux, is_freebsd, is_darwin
from .logger import create_logger
logger = create_logger()
from . import xattr
from .chunker import get_chunker, max_chunk_size
from .cache import ChunkListEntry
from .crypto.key import key_factory
from .compress import Compressor, CompressionSpec
from .constants import * # NOQA
from .crypto.low_level import IntegrityError as IntegrityErrorBase
from .hashindex import ChunkIndex, ChunkIndexEntry, CacheSynchronizer
from .helpers import Manifest
from .helpers import hardlinkable
from .helpers import ChunkIteratorFileWrapper, open_item
from .helpers import Error, IntegrityError, set_ec
from .platform import uid2user, user2uid, gid2group, group2gid
from .helpers import parse_timestamp, to_localtime
from .helpers import OutputTimestamp, format_timedelta, format_file_size, file_status, FileSize
from .helpers import safe_encode, safe_decode, make_path_safe, remove_surrogates
from .helpers import StableDict
from .helpers import bin_to_hex
from .helpers import safe_ns
from .helpers import ellipsis_truncate, ProgressIndicatorPercent, log_multi
from .helpers import os_open, flags_normal, flags_dir
from .helpers import msgpack
from .helpers import sig_int
from .patterns import PathPrefixPattern, FnmatchPattern, IECommand
from .item import Item, ArchiveItem, ItemDiff
from .platform import acl_get, acl_set, set_flags, get_flags, swidth, hostname
from .remote import cache_if_remote
from .repository import Repository, LIST_SCAN_LIMIT
has_lchmod = hasattr(os, 'lchmod')
has_link = hasattr(os, 'link')
class Statistics:
def __init__(self, output_json=False):
self.output_json = output_json
self.osize = self.csize = self.usize = self.nfiles = 0
self.osize_parts = self.csize_parts = self.usize_parts = self.nfiles_parts = 0
self.last_progress = 0 # timestamp when last progress was shown
def update(self, size, csize, unique, part=False):
if not part:
self.osize += size
self.csize += csize
if unique:
self.usize += csize
else:
self.osize_parts += size
self.csize_parts += csize
if unique:
self.usize_parts += csize
def __add__(self, other):
if not isinstance(other, Statistics):
raise TypeError('can only add Statistics objects')
stats = Statistics(self.output_json)
stats.osize = self.osize + other.osize
stats.csize = self.csize + other.csize
stats.usize = self.usize + other.usize
stats.nfiles = self.nfiles + other.nfiles
stats.osize_parts = self.osize_parts + other.osize_parts
stats.csize_parts = self.csize_parts + other.csize_parts
stats.usize_parts = self.usize_parts + other.usize_parts
stats.nfiles_parts = self.nfiles_parts + other.nfiles_parts
return stats
summary = "{label:15} {stats.osize_fmt:>20s} {stats.csize_fmt:>20s} {stats.usize_fmt:>20s}"
def __str__(self):
return self.summary.format(stats=self, label='This archive:')
def __repr__(self):
return "<{cls} object at {hash:#x} ({self.osize}, {self.csize}, {self.usize})>".format(
cls=type(self).__name__, hash=id(self), self=self)
def as_dict(self):
return {
'original_size': FileSize(self.osize),
'compressed_size': FileSize(self.csize),
'deduplicated_size': FileSize(self.usize),
'nfiles': self.nfiles,
}
@property
def osize_fmt(self):
return format_file_size(self.osize)
@property
def usize_fmt(self):
return format_file_size(self.usize)
@property
def csize_fmt(self):
return format_file_size(self.csize)
def show_progress(self, item=None, final=False, stream=None, dt=None):
now = time.monotonic()
if dt is None or now - self.last_progress > dt:
self.last_progress = now
if self.output_json:
data = self.as_dict()
data.update({
'time': time.time(),
'type': 'archive_progress',
'path': remove_surrogates(item.path if item else ''),
})
msg = json.dumps(data)
end = '\n'
else:
columns, lines = get_terminal_size()
if not final:
msg = '{0.osize_fmt} O {0.csize_fmt} C {0.usize_fmt} D {0.nfiles} N '.format(self)
path = remove_surrogates(item.path) if item else ''
space = columns - swidth(msg)
if space < 12:
msg = ''
space = columns - swidth(msg)
if space >= 8:
msg += ellipsis_truncate(path, space)
else:
msg = ' ' * columns
end = '\r'
print(msg, end=end, file=stream or sys.stderr, flush=True)
def is_special(mode):
# file types that get special treatment in --read-special mode
return stat.S_ISBLK(mode) or stat.S_ISCHR(mode) or stat.S_ISFIFO(mode)
class BackupError(Exception):
"""
Exception raised for non-OSError-based exceptions while accessing backup files.
"""
class BackupOSError(Exception):
"""
Wrapper for OSError raised while accessing backup files.
Borg does different kinds of IO, and IO failures have different consequences.
This wrapper represents failures of input file or extraction IO.
These are non-critical and are only reported (exit code = 1, warning).
Any unwrapped IO error is critical and aborts execution (for example repository IO failure).
"""
def __init__(self, op, os_error):
self.op = op
self.os_error = os_error
self.errno = os_error.errno
self.strerror = os_error.strerror
self.filename = os_error.filename
def __str__(self):
if self.op:
return '%s: %s' % (self.op, self.os_error)
else:
return str(self.os_error)
class BackupIO:
op = ''
def __call__(self, op=''):
self.op = op
return self
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type and issubclass(exc_type, OSError):
raise BackupOSError(self.op, exc_val) from exc_val
backup_io = BackupIO()
def backup_io_iter(iterator):
backup_io.op = 'read'
while True:
with backup_io:
try:
item = next(iterator)
except StopIteration:
return
yield item
def stat_update_check(st_old, st_curr):
"""
this checks for some race conditions between the first filename-based stat()
we did before dispatching to the (hopefully correct) file type backup handler
and the (hopefully) fd-based fstat() we did in the handler.
if there is a problematic difference (e.g. file type changed), we rather
skip the file than being tricked into a security problem.
such races should only happen if:
- we are backing up a live filesystem (no snapshot, not inactive)
- if files change due to normal fs activity at an unfortunate time
- if somebody is doing an attack against us
"""
# assuming that a file type change implicates a different inode change AND that inode numbers
# are not duplicate in a short timeframe, this check is redundant and solved by the ino check:
if stat.S_IFMT(st_old.st_mode) != stat.S_IFMT(st_curr.st_mode):
# in this case, we dispatched to wrong handler - abort
raise BackupError('file type changed (race condition), skipping file')
if st_old.st_ino != st_curr.st_ino:
# in this case, the hardlinks-related code in create_helper has the wrong inode - abort!
raise BackupError('file inode changed (race condition), skipping file')
# looks ok, we are still dealing with the same thing - return current stat:
return st_curr
@contextmanager
def OsOpen(*, flags, path=None, parent_fd=None, name=None, noatime=False, op='open'):
with backup_io(op):
fd = os_open(path=path, parent_fd=parent_fd, name=name, flags=flags, noatime=noatime)
try:
yield fd
finally:
# On windows fd is None for directories.
if fd is not None:
os.close(fd)
class DownloadPipeline:
def __init__(self, repository, key):
self.repository = repository
self.key = key
def unpack_many(self, ids, filter=None, partial_extract=False, preload=False, hardlink_masters=None):
"""
Return iterator of items.
*ids* is a chunk ID list of an item stream. *filter* is a callable
to decide whether an item will be yielded. *preload* preloads the data chunks of every yielded item.
Warning: if *preload* is True then all data chunks of every yielded item have to be retrieved,
otherwise preloaded chunks will accumulate in RemoteRepository and create a memory leak.
"""
def _preload(chunks):
self.repository.preload([c.id for c in chunks])
masters_preloaded = set()
unpacker = msgpack.Unpacker(use_list=False)
for data in self.fetch_many(ids):
unpacker.feed(data)
items = [Item(internal_dict=item) for item in unpacker]
for item in items:
if 'chunks' in item:
item.chunks = [ChunkListEntry(*e) for e in item.chunks]
if filter:
items = [item for item in items if filter(item)]
if preload:
if filter and partial_extract:
# if we do only a partial extraction, it gets a bit
# complicated with computing the preload items: if a hardlink master item is not
# selected (== not extracted), we will still need to preload its chunks if a
# corresponding hardlink slave is selected (== is extracted).
# due to a side effect of the filter() call, we now have hardlink_masters dict populated.
for item in items:
if 'chunks' in item: # regular file, maybe a hardlink master
_preload(item.chunks)
# if this is a hardlink master, remember that we already preloaded it:
if 'source' not in item and hardlinkable(item.mode) and item.get('hardlink_master', True):
masters_preloaded.add(item.path)
elif 'source' in item and hardlinkable(item.mode): # hardlink slave
source = item.source
if source not in masters_preloaded:
# we only need to preload *once* (for the 1st selected slave)
chunks, _ = hardlink_masters[source]
if chunks is not None:
_preload(chunks)
masters_preloaded.add(source)
else:
# easy: we do not have a filter, thus all items are selected, thus we need to preload all chunks.
for item in items:
if 'chunks' in item:
_preload(item.chunks)
for item in items:
yield item
def fetch_many(self, ids, is_preloaded=False):
for id_, data in zip(ids, self.repository.get_many(ids, is_preloaded=is_preloaded)):
yield self.key.decrypt(id_, data)
class ChunkBuffer:
BUFFER_SIZE = 8 * 1024 * 1024
def __init__(self, key, chunker_params=ITEMS_CHUNKER_PARAMS):
self.buffer = BytesIO()
self.packer = msgpack.Packer()
self.chunks = []
self.key = key
self.chunker = get_chunker(*chunker_params, seed=self.key.chunk_seed)
def add(self, item):
self.buffer.write(self.packer.pack(item.as_dict()))
if self.is_full():
self.flush()
def write_chunk(self, chunk):
raise NotImplementedError
def flush(self, flush=False):
if self.buffer.tell() == 0:
return
self.buffer.seek(0)
# The chunker returns a memoryview to its internal buffer,
# thus a copy is needed before resuming the chunker iterator.
chunks = list(bytes(s) for s in self.chunker.chunkify(self.buffer))
self.buffer.seek(0)
self.buffer.truncate(0)
# Leave the last partial chunk in the buffer unless flush is True
end = None if flush or len(chunks) == 1 else -1
for chunk in chunks[:end]:
self.chunks.append(self.write_chunk(chunk))
if end == -1:
self.buffer.write(chunks[-1])
def is_full(self):
return self.buffer.tell() > self.BUFFER_SIZE
class CacheChunkBuffer(ChunkBuffer):
def __init__(self, cache, key, stats, chunker_params=ITEMS_CHUNKER_PARAMS):
super().__init__(key, chunker_params)
self.cache = cache
self.stats = stats
def write_chunk(self, chunk):
id_, _, _ = self.cache.add_chunk(self.key.id_hash(chunk), chunk, self.stats, wait=False)
self.cache.repository.async_response(wait=False)
return id_
class Archive:
class DoesNotExist(Error):
"""Archive {} does not exist"""
class AlreadyExists(Error):
"""Archive {} already exists"""
class IncompatibleFilesystemEncodingError(Error):
"""Failed to encode filename "{}" into file system encoding "{}". Consider configuring the LANG environment variable."""
def __init__(self, repository, key, manifest, name, cache=None, create=False,
checkpoint_interval=1800, numeric_owner=False, noatime=False, noctime=False, noflags=False,
progress=False, chunker_params=CHUNKER_PARAMS, start=None, start_monotonic=None, end=None,
consider_part_files=False, log_json=False):
self.cwd = os.getcwd()
self.key = key
self.repository = repository
self.cache = cache
self.manifest = manifest
self.hard_links = {}
self.stats = Statistics(output_json=log_json)
self.show_progress = progress
self.name = name # overwritten later with name from archive metadata
self.name_in_manifest = name # can differ from .name later (if borg check fixed duplicate archive names)
self.comment = None
self.checkpoint_interval = checkpoint_interval
self.numeric_owner = numeric_owner
self.noatime = noatime
self.noctime = noctime
self.noflags = noflags
assert (start is None) == (start_monotonic is None), 'Logic error: if start is given, start_monotonic must be given as well and vice versa.'
if start is None:
start = datetime.utcnow()
start_monotonic = time.monotonic()
self.chunker_params = chunker_params
self.start = start
self.start_monotonic = start_monotonic
if end is None:
end = datetime.utcnow()
self.end = end
self.consider_part_files = consider_part_files
self.pipeline = DownloadPipeline(self.repository, self.key)
self.create = create
if self.create:
self.items_buffer = CacheChunkBuffer(self.cache, self.key, self.stats)
if name in manifest.archives:
raise self.AlreadyExists(name)
i = 0
while True:
self.checkpoint_name = '%s.checkpoint%s' % (name, i and ('.%d' % i) or '')
if self.checkpoint_name not in manifest.archives:
break
i += 1
else:
info = self.manifest.archives.get(name)
if info is None:
raise self.DoesNotExist(name)
self.load(info.id)
self.zeros = None
def _load_meta(self, id):
data = self.key.decrypt(id, self.repository.get(id))
metadata = ArchiveItem(internal_dict=msgpack.unpackb(data))
if metadata.version != 1:
raise Exception('Unknown archive metadata version')
return metadata
def load(self, id):
self.id = id
self.metadata = self._load_meta(self.id)
self.metadata.cmdline = [safe_decode(arg) for arg in self.metadata.cmdline]
self.name = self.metadata.name
self.comment = self.metadata.get('comment', '')
@property
def ts(self):
"""Timestamp of archive creation (start) in UTC"""
ts = self.metadata.time
return parse_timestamp(ts)
@property
def ts_end(self):
"""Timestamp of archive creation (end) in UTC"""
# fall back to time if there is no time_end present in metadata
ts = self.metadata.get('time_end') or self.metadata.time
return parse_timestamp(ts)
@property
def fpr(self):
return bin_to_hex(self.id)
@property
def duration(self):
return format_timedelta(self.end - self.start)
@property
def duration_from_meta(self):
return format_timedelta(self.ts_end - self.ts)
def info(self):
if self.create:
stats = self.stats
start = self.start.replace(tzinfo=timezone.utc)
end = self.end.replace(tzinfo=timezone.utc)
else:
stats = self.calc_stats(self.cache)
start = self.ts
end = self.ts_end
info = {
'name': self.name,
'id': self.fpr,
'start': OutputTimestamp(start),
'end': OutputTimestamp(end),
'duration': (end - start).total_seconds(),
'stats': stats.as_dict(),
'limits': {
'max_archive_size': self.cache.chunks[self.id].csize / MAX_DATA_SIZE,
},
}
if self.create:
info['command_line'] = sys.argv
else:
info.update({
'command_line': self.metadata.cmdline,
'hostname': self.metadata.hostname,
'username': self.metadata.username,
'comment': self.metadata.get('comment', ''),
'chunker_params': self.metadata.get('chunker_params', ''),
})
return info
def __str__(self):
return '''\
Repository: {location}
Archive name: {0.name}
Archive fingerprint: {0.fpr}
Time (start): {start}
Time (end): {end}
Duration: {0.duration}
Number of files: {0.stats.nfiles}
Utilization of max. archive size: {csize_max:.0%}
'''.format(
self,
start=OutputTimestamp(self.start.replace(tzinfo=timezone.utc)),
end=OutputTimestamp(self.end.replace(tzinfo=timezone.utc)),
csize_max=self.cache.chunks[self.id].csize / MAX_DATA_SIZE,
location=self.repository._location.canonical_path()
)
def __repr__(self):
return 'Archive(%r)' % self.name
def item_filter(self, item, filter=None):
if not self.consider_part_files and 'part' in item:
# this is a part(ial) file, we usually don't want to consider it.
return False
return filter(item) if filter else True
def iter_items(self, filter=None, partial_extract=False, preload=False, hardlink_masters=None):
# note: when calling this with preload=True, later fetch_many() must be called with
# is_preloaded=True or the RemoteRepository code will leak memory!
assert not (filter and partial_extract and preload) or hardlink_masters is not None
for item in self.pipeline.unpack_many(self.metadata.items, partial_extract=partial_extract,
preload=preload, hardlink_masters=hardlink_masters,
filter=lambda item: self.item_filter(item, filter)):
yield item
def add_item(self, item, show_progress=True, stats=None):
if show_progress and self.show_progress:
if stats is None:
stats = self.stats
stats.show_progress(item=item, dt=0.2)
self.items_buffer.add(item)
def write_checkpoint(self):
self.save(self.checkpoint_name)
del self.manifest.archives[self.checkpoint_name]
self.cache.chunk_decref(self.id, self.stats)
def save(self, name=None, comment=None, timestamp=None, stats=None, additional_metadata=None):
name = name or self.name
if name in self.manifest.archives:
raise self.AlreadyExists(name)
self.items_buffer.flush(flush=True)
duration = timedelta(seconds=time.monotonic() - self.start_monotonic)
if timestamp is None:
end = datetime.utcnow()
start = end - duration
else:
end = timestamp + duration
start = timestamp
self.start = start
self.end = end
metadata = {
'version': 1,
'name': name,
'comment': comment or '',
'items': self.items_buffer.chunks,
'cmdline': sys.argv,
'hostname': hostname,
'username': getuser(),
'time': start.strftime(ISO_FORMAT),
'time_end': end.strftime(ISO_FORMAT),
'chunker_params': self.chunker_params,
}
if stats is not None:
metadata.update({
'size': stats.osize,
'csize': stats.csize,
'nfiles': stats.nfiles,
'size_parts': stats.osize_parts,
'csize_parts': stats.csize_parts,
'nfiles_parts': stats.nfiles_parts})
metadata.update(additional_metadata or {})
metadata = ArchiveItem(metadata)
data = self.key.pack_and_authenticate_metadata(metadata.as_dict(), context=b'archive')
self.id = self.key.id_hash(data)
try:
self.cache.add_chunk(self.id, data, self.stats)
except IntegrityError as err:
err_msg = str(err)
# hack to avoid changing the RPC protocol by introducing new (more specific) exception class
if 'More than allowed put data' in err_msg:
raise Error('%s - archive too big (issue #1473)!' % err_msg)
else:
raise
while self.repository.async_response(wait=True) is not None:
pass
self.manifest.archives[name] = (self.id, metadata.time)
self.manifest.write()
self.repository.commit(compact=False)
self.cache.commit()
def calc_stats(self, cache, want_unique=True):
have_borg12_meta = self.metadata.get('nfiles') is not None
if have_borg12_meta and not want_unique:
unique_csize = 0
else:
def add(id):
entry = cache.chunks[id]
archive_index.add(id, 1, entry.size, entry.csize)
archive_index = ChunkIndex()
sync = CacheSynchronizer(archive_index)
add(self.id)
pi = ProgressIndicatorPercent(total=len(self.metadata.items), msg='Calculating statistics... %3d%%',
msgid='archive.calc_stats')
for id, chunk in zip(self.metadata.items, self.repository.get_many(self.metadata.items)):
pi.show(increase=1)
add(id)
data = self.key.decrypt(id, chunk)
sync.feed(data)
unique_csize = archive_index.stats_against(cache.chunks)[3]
pi.finish()
stats = Statistics()
stats.usize = unique_csize # the part files use same chunks as the full file
if not have_borg12_meta:
if self.consider_part_files:
stats.nfiles = sync.num_files_totals
stats.osize = sync.size_totals
stats.csize = sync.csize_totals
else:
stats.nfiles = sync.num_files_totals - sync.num_files_parts
stats.osize = sync.size_totals - sync.size_parts
stats.csize = sync.csize_totals - sync.csize_parts
else:
if self.consider_part_files:
stats.nfiles = self.metadata.nfiles_parts + self.metadata.nfiles
stats.osize = self.metadata.size_parts + self.metadata.size
stats.csize = self.metadata.csize_parts + self.metadata.csize
else:
stats.nfiles = self.metadata.nfiles
stats.osize = self.metadata.size
stats.csize = self.metadata.csize
return stats
@contextmanager
def extract_helper(self, dest, item, path, stripped_components, original_path, hardlink_masters):
hardlink_set = False
# Hard link?
if 'source' in item:
source = os.path.join(dest, *item.source.split(os.sep)[stripped_components:])
chunks, link_target = hardlink_masters.get(item.source, (None, source))
if link_target and has_link:
# Hard link was extracted previously, just link
with backup_io('link'):
os.link(link_target, path)
hardlink_set = True
elif chunks is not None:
# assign chunks to this item, since the item which had the chunks was not extracted
item.chunks = chunks
yield hardlink_set
if not hardlink_set and hardlink_masters:
if has_link:
# Update master entry with extracted item path, so that following hardlinks don't extract twice.
# We have hardlinking support, so we will hardlink not extract.
hardlink_masters[item.get('source') or original_path] = (None, path)
else:
# Broken platform with no hardlinking support.
# In this case, we *want* to extract twice, because there is no other way.
pass
def extract_item(self, item, restore_attrs=True, dry_run=False, stdout=False, sparse=False,
hardlink_masters=None, stripped_components=0, original_path=None, pi=None):
"""
Extract archive item.
:param item: the item to extract
:param restore_attrs: restore file attributes
:param dry_run: do not write any data
:param stdout: write extracted data to stdout
:param sparse: write sparse files (chunk-granularity, independent of the original being sparse)
:param hardlink_masters: maps paths to (chunks, link_target) for extracting subtrees with hardlinks correctly
:param stripped_components: stripped leading path components to correct hard link extraction
:param original_path: 'path' key as stored in archive
:param pi: ProgressIndicatorPercent (or similar) for file extraction progress (in bytes)
"""
hardlink_masters = hardlink_masters or {}
has_damaged_chunks = 'chunks_healthy' in item
if dry_run or stdout:
if 'chunks' in item:
item_chunks_size = 0
for data in self.pipeline.fetch_many([c.id for c in item.chunks], is_preloaded=True):
if pi:
pi.show(increase=len(data), info=[remove_surrogates(item.path)])
if stdout:
sys.stdout.buffer.write(data)
item_chunks_size += len(data)
if stdout:
sys.stdout.buffer.flush()
if 'size' in item:
item_size = item.size
if item_size != item_chunks_size:
raise BackupError('Size inconsistency detected: size {}, chunks size {}'.format(
item_size, item_chunks_size))
if has_damaged_chunks:
raise BackupError('File has damaged (all-zero) chunks. Try running borg check --repair.')
return
original_path = original_path or item.path
dest = self.cwd
if item.path.startswith(('/', '../')):
raise Exception('Path should be relative and local')
path = os.path.join(dest, item.path)
# Attempt to remove existing files, ignore errors on failure
try:
st = os.stat(path, follow_symlinks=False)
if stat.S_ISDIR(st.st_mode):
os.rmdir(path)
else:
os.unlink(path)
except UnicodeEncodeError:
raise self.IncompatibleFilesystemEncodingError(path, sys.getfilesystemencoding()) from None
except OSError:
pass
def make_parent(path):
parent_dir = os.path.dirname(path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
mode = item.mode
if stat.S_ISREG(mode):
with backup_io('makedirs'):
make_parent(path)
with self.extract_helper(dest, item, path, stripped_components, original_path,
hardlink_masters) as hardlink_set:
if hardlink_set:
return
if sparse and self.zeros is None:
self.zeros = b'\0' * max_chunk_size(*self.chunker_params)
with backup_io('open'):
fd = open(path, 'wb')
with fd:
ids = [c.id for c in item.chunks]
for data in self.pipeline.fetch_many(ids, is_preloaded=True):
if pi:
pi.show(increase=len(data), info=[remove_surrogates(item.path)])
with backup_io('write'):
if sparse and self.zeros.startswith(data):
# all-zero chunk: create a hole in a sparse file
fd.seek(len(data), 1)
else:
fd.write(data)
with backup_io('truncate_and_attrs'):
pos = item_chunks_size = fd.tell()
fd.truncate(pos)
fd.flush()
self.restore_attrs(path, item, fd=fd.fileno())
if 'size' in item:
item_size = item.size
if item_size != item_chunks_size:
raise BackupError('Size inconsistency detected: size {}, chunks size {}'.format(
item_size, item_chunks_size))
if has_damaged_chunks:
raise BackupError('File has damaged (all-zero) chunks. Try running borg check --repair.')
return
with backup_io:
# No repository access beyond this point.
if stat.S_ISDIR(mode):
make_parent(path)
if not os.path.exists(path):
os.mkdir(path)
if restore_attrs:
self.restore_attrs(path, item)
elif stat.S_ISLNK(mode):
make_parent(path)
source = item.source
try:
os.symlink(source, path)
except UnicodeEncodeError:
raise self.IncompatibleFilesystemEncodingError(source, sys.getfilesystemencoding()) from None
self.restore_attrs(path, item, symlink=True)
elif stat.S_ISFIFO(mode):
make_parent(path)
with self.extract_helper(dest, item, path, stripped_components, original_path,
hardlink_masters) as hardlink_set:
if hardlink_set:
return
os.mkfifo(path)
self.restore_attrs(path, item)
elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode):
make_parent(path)
with self.extract_helper(dest, item, path, stripped_components, original_path,
hardlink_masters) as hardlink_set:
if hardlink_set:
return
os.mknod(path, item.mode, item.rdev)
self.restore_attrs(path, item)
else:
raise Exception('Unknown archive item type %r' % item.mode)
def restore_attrs(self, path, item, symlink=False, fd=None):
"""
Restore filesystem attributes on *path* (*fd*) from *item*.
Does not access the repository.
"""
backup_io.op = 'attrs'
uid = gid = None
if not self.numeric_owner:
uid = user2uid(item.user)
gid = group2gid(item.group)
uid = item.uid if uid is None else uid
gid = item.gid if gid is None else gid
# This code is a bit of a mess due to os specific differences
if not is_win32:
try:
if fd:
os.fchown(fd, uid, gid)
else:
os.chown(path, uid, gid, follow_symlinks=False)
except OSError:
pass
if fd:
os.fchmod(fd, item.mode)
elif not symlink:
os.chmod(path, item.mode)
elif has_lchmod: # Not available on Linux
os.lchmod(path, item.mode)
mtime = item.mtime
if 'atime' in item:
atime = item.atime
else:
# old archives only had mtime in item metadata
atime = mtime
if 'birthtime' in item:
birthtime = item.birthtime
try:
# This should work on FreeBSD, NetBSD, and Darwin and be harmless on other platforms.
# See utimes(2) on either of the BSDs for details.
if fd:
os.utime(fd, None, ns=(atime, birthtime))
else:
os.utime(path, None, ns=(atime, birthtime), follow_symlinks=False)
except OSError:
# some systems don't support calling utime on a symlink
pass
try:
if fd:
os.utime(fd, None, ns=(atime, mtime))
else:
os.utime(path, None, ns=(atime, mtime), follow_symlinks=False)
except OSError:
# some systems don't support calling utime on a symlink
pass
acl_set(path, item, self.numeric_owner, fd=fd)
# chown removes Linux capabilities, so set the extended attributes at the end, after chown, since they include
# the Linux capabilities in the "security.capability" attribute.
warning = xattr.set_all(fd or path, item.get('xattrs', {}), follow_symlinks=False)
if warning:
set_ec(EXIT_WARNING)
# bsdflags include the immutable flag and need to be set last:
if not self.noflags and 'bsdflags' in item:
try:
set_flags(path, item.bsdflags, fd=fd)
except OSError:
pass
def set_meta(self, key, value):
metadata = self._load_meta(self.id)
setattr(metadata, key, value)
data = msgpack.packb(metadata.as_dict())
new_id = self.key.id_hash(data)
self.cache.add_chunk(new_id, data, self.stats)
self.manifest.archives[self.name] = (new_id, metadata.time)
self.cache.chunk_decref(self.id, self.stats)
self.id = new_id
def rename(self, name):
if name in self.manifest.archives:
raise self.AlreadyExists(name)
oldname = self.name
self.name = name
self.set_meta('name', name)
del self.manifest.archives[oldname]
def delete(self, stats, progress=False, forced=False):
class ChunksIndexError(Error):
"""Chunk ID {} missing from chunks index, corrupted chunks index - aborting transaction."""
exception_ignored = object()
def fetch_async_response(wait=True):
try:
return self.repository.async_response(wait=wait)
except Repository.ObjectNotFound:
nonlocal error
# object not in repo - strange, but we wanted to delete it anyway.
if forced == 0:
raise
error = True
return exception_ignored # must not return None here
def chunk_decref(id, stats, part=False):
try:
self.cache.chunk_decref(id, stats, wait=False, part=part)
except KeyError:
cid = bin_to_hex(id)
raise ChunksIndexError(cid)
else:
fetch_async_response(wait=False)
error = False
try:
unpacker = msgpack.Unpacker(use_list=False)
items_ids = self.metadata.items
pi = ProgressIndicatorPercent(total=len(items_ids), msg="Decrementing references %3.0f%%", msgid='archive.delete')
for (i, (items_id, data)) in enumerate(zip(items_ids, self.repository.get_many(items_ids))):
if progress:
pi.show(i)
data = self.key.decrypt(items_id, data)
unpacker.feed(data)
chunk_decref(items_id, stats)
try:
for item in unpacker:
item = Item(internal_dict=item)
if 'chunks' in item:
part = not self.consider_part_files and 'part' in item
for chunk_id, size, csize in item.chunks:
chunk_decref(chunk_id, stats, part=part)
except (TypeError, ValueError):
# if items metadata spans multiple chunks and one chunk got dropped somehow,
# it could be that unpacker yields bad types
if forced == 0:
raise
error = True
if progress:
pi.finish()
except (msgpack.UnpackException, Repository.ObjectNotFound):
# items metadata corrupted
if forced == 0:
raise
error = True
# in forced delete mode, we try hard to delete at least the manifest entry,
# if possible also the archive superblock, even if processing the items raises
# some harmless exception.
chunk_decref(self.id, stats)
del self.manifest.archives[self.name]
while fetch_async_response(wait=True) is not None:
# we did async deletes, process outstanding results (== exceptions),
# so there is nothing pending when we return and our caller wants to commit.
pass
if error:
logger.warning('forced deletion succeeded, but the deleted archive was corrupted.')
logger.warning('borg check --repair is required to free all space.')
@staticmethod
def compare_archives_iter(archive1, archive2, matcher=None, can_compare_chunk_ids=False):
"""
Yields tuples with a path and an ItemDiff instance describing changes/indicating equality.
:param matcher: PatternMatcher class to restrict results to only matching paths.
:param can_compare_chunk_ids: Whether --chunker-params are the same for both archives.
"""
def hardlink_master_seen(item):
return 'source' not in item or not hardlinkable(item.mode) or item.source in hardlink_masters
def is_hardlink_master(item):
return item.get('hardlink_master', True) and 'source' not in item
def update_hardlink_masters(item1, item2):
if is_hardlink_master(item1) or is_hardlink_master(item2):
hardlink_masters[item1.path] = (item1, item2)
def has_hardlink_master(item, hardlink_masters):
return hardlinkable(item.mode) and item.get('source') in hardlink_masters
def compare_items(item1, item2):
if has_hardlink_master(item1, hardlink_masters):
item1 = hardlink_masters[item1.source][0]
if has_hardlink_master(item2, hardlink_masters):
item2 = hardlink_masters[item2.source][1]
return ItemDiff(item1, item2,
archive1.pipeline.fetch_many([c.id for c in item1.get('chunks', [])]),
archive2.pipeline.fetch_many([c.id for c in item2.get('chunks', [])]),
can_compare_chunk_ids=can_compare_chunk_ids)
def defer_if_necessary(item1, item2):
"""Adds item tuple to deferred if necessary and returns True, if items were deferred"""
update_hardlink_masters(item1, item2)
defer = not hardlink_master_seen(item1) or not hardlink_master_seen(item2)
if defer:
deferred.append((item1, item2))
return defer
orphans_archive1 = OrderedDict()
orphans_archive2 = OrderedDict()
deferred = []
hardlink_masters = {}
for item1, item2 in zip_longest(
archive1.iter_items(lambda item: matcher.match(item.path)),
archive2.iter_items(lambda item: matcher.match(item.path)),
):
if item1 and item2 and item1.path == item2.path:
if not defer_if_necessary(item1, item2):
yield (item1.path, compare_items(item1, item2))
continue
if item1:
matching_orphan = orphans_archive2.pop(item1.path, None)
if matching_orphan:
if not defer_if_necessary(item1, matching_orphan):
yield (item1.path, compare_items(item1, matching_orphan))
else:
orphans_archive1[item1.path] = item1
if item2:
matching_orphan = orphans_archive1.pop(item2.path, None)
if matching_orphan:
if not defer_if_necessary(matching_orphan, item2):
yield (matching_orphan.path, compare_items(matching_orphan, item2))
else:
orphans_archive2[item2.path] = item2
# At this point orphans_* contain items that had no matching partner in the other archive
for added in orphans_archive2.values():
path = added.path
deleted_item = Item.create_deleted(path)
update_hardlink_masters(deleted_item, added)
yield (path, compare_items(deleted_item, added))
for deleted in orphans_archive1.values():
path = deleted.path
deleted_item = Item.create_deleted(path)
update_hardlink_masters(deleted, deleted_item)
yield (path, compare_items(deleted, deleted_item))
for item1, item2 in deferred:
assert hardlink_master_seen(item1)
assert hardlink_master_seen(item2)
yield (path, compare_items(item1, item2))
class MetadataCollector:
def __init__(self, *, noatime, noctime, numeric_owner, noflags, nobirthtime):
self.noatime = noatime
self.noctime = noctime
self.numeric_owner = numeric_owner
self.noflags = noflags
self.nobirthtime = nobirthtime
def stat_simple_attrs(self, st):
attrs = dict(
mode=st.st_mode,
uid=st.st_uid,
gid=st.st_gid,
mtime=safe_ns(st.st_mtime_ns),
)
# borg can work with archives only having mtime (older attic archives do not have
# atime/ctime). it can be useful to omit atime/ctime, if they change without the
# file content changing - e.g. to get better metadata deduplication.
if not self.noatime:
attrs['atime'] = safe_ns(st.st_atime_ns)
if not self.noctime:
attrs['ctime'] = safe_ns(st.st_ctime_ns)
if not self.nobirthtime and hasattr(st, 'st_birthtime'):
# sadly, there's no stat_result.st_birthtime_ns
attrs['birthtime'] = safe_ns(int(st.st_birthtime * 10**9))
if self.numeric_owner:
attrs['user'] = attrs['group'] = None
else:
attrs['user'] = uid2user(st.st_uid)
attrs['group'] = gid2group(st.st_gid)
return attrs
def stat_ext_attrs(self, st, path, fd=None):
attrs = {}
flags = 0
with backup_io('extended stat'):
if not self.noflags:
flags = get_flags(path, st, fd=fd)
xattrs = xattr.get_all(fd or path, follow_symlinks=False)
acl_get(path, attrs, st, self.numeric_owner, fd=fd)
if xattrs:
attrs['xattrs'] = StableDict(xattrs)
if flags:
attrs['bsdflags'] = flags
return attrs
def stat_attrs(self, st, path, fd=None):
attrs = self.stat_simple_attrs(st)
attrs.update(self.stat_ext_attrs(st, path, fd=fd))
return attrs
class ChunksProcessor:
# Processes an iterator of chunks for an Item
def __init__(self, *, key, cache,
add_item, write_checkpoint,
checkpoint_interval, rechunkify):
self.key = key
self.cache = cache
self.add_item = add_item
self.write_checkpoint = write_checkpoint
self.checkpoint_interval = checkpoint_interval
self.last_checkpoint = time.monotonic()
self.rechunkify = rechunkify
def write_part_file(self, item, from_chunk, number):
item = Item(internal_dict=item.as_dict())
length = len(item.chunks)
# the item should only have the *additional* chunks we processed after the last partial item:
item.chunks = item.chunks[from_chunk:]
# for borg recreate, we already have a size member in the source item (giving the total file size),
# but we consider only a part of the file here, thus we must recompute the size from the chunks:
item.get_size(memorize=True, from_chunks=True)
item.path += '.borg_part_%d' % number
item.part = number
number += 1
self.add_item(item, show_progress=False)
self.write_checkpoint()
return length, number
def maybe_checkpoint(self, item, from_chunk, part_number, forced=False):
sig_int_triggered = sig_int and sig_int.action_triggered()
if forced or sig_int_triggered or \
self.checkpoint_interval and time.monotonic() - self.last_checkpoint > self.checkpoint_interval:
if sig_int_triggered:
logger.info('checkpoint requested: starting checkpoint creation...')
from_chunk, part_number = self.write_part_file(item, from_chunk, part_number)
self.last_checkpoint = time.monotonic()
if sig_int_triggered:
sig_int.action_completed()
logger.info('checkpoint requested: finished checkpoint creation!')
return from_chunk, part_number
def process_file_chunks(self, item, cache, stats, show_progress, chunk_iter, chunk_processor=None):
if not chunk_processor:
def chunk_processor(data):
chunk_entry = cache.add_chunk(self.key.id_hash(data), data, stats, wait=False)
self.cache.repository.async_response(wait=False)
return chunk_entry
item.chunks = []
# if we rechunkify, we'll get a fundamentally different chunks list, thus we need
# to get rid of .chunks_healthy, as it might not correspond to .chunks any more.
if self.rechunkify and 'chunks_healthy' in item:
del item.chunks_healthy
from_chunk = 0
part_number = 1
for data in chunk_iter:
item.chunks.append(chunk_processor(data))
if show_progress:
stats.show_progress(item=item, dt=0.2)
from_chunk, part_number = self.maybe_checkpoint(item, from_chunk, part_number, forced=False)
else:
if part_number > 1:
if item.chunks[from_chunk:]:
# if we already have created a part item inside this file, we want to put the final
# chunks (if any) into a part item also (so all parts can be concatenated to get
# the complete file):
from_chunk, part_number = self.maybe_checkpoint(item, from_chunk, part_number, forced=True)
# if we created part files, we have referenced all chunks from the part files,
# but we also will reference the same chunks also from the final, complete file:
for chunk in item.chunks:
cache.chunk_incref(chunk.id, stats, size=chunk.size, part=True)
stats.nfiles_parts += part_number - 1
class FilesystemObjectProcessors:
# When ported to threading, then this doesn't need chunker, cache, key any more.
# write_checkpoint should then be in the item buffer,
# and process_file becomes a callback passed to __init__.
def __init__(self, *, metadata_collector, cache, key,
add_item, process_file_chunks,
chunker_params, show_progress):
self.metadata_collector = metadata_collector
self.cache = cache
self.key = key
self.add_item = add_item
self.process_file_chunks = process_file_chunks
self.show_progress = show_progress
self.hard_links = {}
self.stats = Statistics() # threading: done by cache (including progress)
self.cwd = os.getcwd()
self.chunker = get_chunker(*chunker_params, seed=key.chunk_seed)
@contextmanager
def create_helper(self, path, st, status=None, hardlinkable=True):
safe_path = make_path_safe(path)
item = Item(path=safe_path)
hardlink_master = False
hardlinked = hardlinkable and st.st_nlink > 1
if hardlinked:
source = self.hard_links.get((st.st_ino, st.st_dev))
if source is not None:
item.source = source
status = 'h' # hardlink (to already seen inodes)
else:
hardlink_master = True
yield item, status, hardlinked, hardlink_master
# if we get here, "with"-block worked ok without error/exception, the item was processed ok...
self.add_item(item, stats=self.stats)
# ... and added to the archive, so we can remember it to refer to it later in the archive:
if hardlink_master:
self.hard_links[(st.st_ino, st.st_dev)] = safe_path
def process_dir_with_fd(self, *, path, fd, st):
with self.create_helper(path, st, 'd', hardlinkable=False) as (item, status, hardlinked, hardlink_master):
item.update(self.metadata_collector.stat_attrs(st, path, fd=fd))
return status
def process_dir(self, *, path, parent_fd, name, st):
with self.create_helper(path, st, 'd', hardlinkable=False) as (item, status, hardlinked, hardlink_master):
with OsOpen(path=path, parent_fd=parent_fd, name=name, flags=flags_dir,
noatime=True, op='dir_open') as fd:
# fd is None for directories on windows, in that case a race condition check is not possible.
if fd is not None:
with backup_io('fstat'):
st = stat_update_check(st, os.fstat(fd))
item.update(self.metadata_collector.stat_attrs(st, path, fd=fd))
return status
def process_fifo(self, *, path, parent_fd, name, st):
with self.create_helper(path, st, 'f') as (item, status, hardlinked, hardlink_master): # fifo
with OsOpen(path=path, parent_fd=parent_fd, name=name, flags=flags_normal, noatime=True) as fd:
with backup_io('fstat'):
st = stat_update_check(st, os.fstat(fd))
item.update(self.metadata_collector.stat_attrs(st, path, fd=fd))
return status
def process_dev(self, *, path, parent_fd, name, st, dev_type):
with self.create_helper(path, st, dev_type) as (item, status, hardlinked, hardlink_master): # char/block device
# looks like we can not work fd-based here without causing issues when trying to open/close the device
with backup_io('stat'):
st = stat_update_check(st, os.stat(name, dir_fd=parent_fd, follow_symlinks=False))
item.rdev = st.st_rdev
item.update(self.metadata_collector.stat_attrs(st, path))
return status
def process_symlink(self, *, path, parent_fd, name, st):
# note: using hardlinkable=False because we can not support hardlinked symlinks,
# due to the dual-use of item.source, see issue #2343:
# hardlinked symlinks will be archived [and extracted] as non-hardlinked symlinks.
with self.create_helper(path, st, 's', hardlinkable=False) as (item, status, hardlinked, hardlink_master):
fname = name if name is not None and parent_fd is not None else path
with backup_io('readlink'):
source = os.readlink(fname, dir_fd=parent_fd)
item.source = source
item.update(self.metadata_collector.stat_attrs(st, path)) # can't use FD here?
return status
def process_pipe(self, *, path, cache, fd, mode, user, group):
uid = user2uid(user)
if uid is None:
raise Error("no such user: %s" % user)
gid = group2gid(group)
if gid is None:
raise Error("no such group: %s" % group)
t = int(time.time()) * 1000000000
item = Item(
path=path,
mode=mode & 0o107777 | 0o100000, # forcing regular file mode
uid=uid, user=user,
gid=gid, group=group,
mtime=t, atime=t, ctime=t,
)
self.process_file_chunks(item, cache, self.stats, self.show_progress, backup_io_iter(self.chunker.chunkify(fd)))
item.get_size(memorize=True)
self.stats.nfiles += 1
self.add_item(item, stats=self.stats)
return 'i' # stdin
def process_file(self, *, path, parent_fd, name, st, cache, flags=flags_normal):
with self.create_helper(path, st, None) as (item, status, hardlinked, hardlink_master): # no status yet
with OsOpen(path=path, parent_fd=parent_fd, name=name, flags=flags, noatime=True) as fd:
with backup_io('fstat'):
st = stat_update_check(st, os.fstat(fd))
item.update(self.metadata_collector.stat_simple_attrs(st))
is_special_file = is_special(st.st_mode)
if is_special_file:
# we process a special file like a regular file. reflect that in mode,
# so it can be extracted / accessed in FUSE mount like a regular file.
# this needs to be done early, so that part files also get the patched mode.
item.mode = stat.S_IFREG | stat.S_IMODE(item.mode)
if not hardlinked or hardlink_master:
if not is_special_file:
path_hash = self.key.id_hash(safe_encode(os.path.join(self.cwd, path)))
known, ids = cache.file_known_and_unchanged(path_hash, st)
else:
# in --read-special mode, we may be called for special files.
# there should be no information in the cache about special files processed in
# read-special mode, but we better play safe as this was wrong in the past:
path_hash = None
known, ids = False, None
chunks = None
if ids is not None:
# Make sure all ids are available
for id_ in ids:
if not cache.seen_chunk(id_):
status = 'M' # cache said it is unmodified, but we lost a chunk: process file like modified
break
else:
chunks = [cache.chunk_incref(id_, self.stats) for id_ in ids]
status = 'U' # regular file, unchanged
else:
status = 'M' if known else 'A' # regular file, modified or added
item.hardlink_master = hardlinked
# Only chunkify the file if needed
if chunks is not None:
item.chunks = chunks
else:
with backup_io('read'):
self.process_file_chunks(item, cache, self.stats, self.show_progress, backup_io_iter(self.chunker.chunkify(None, fd)))
if is_win32:
changed_while_backup = False # TODO
else:
with backup_io('fstat2'):
st2 = os.fstat(fd)
# special files:
# - fifos change naturally, because they are fed from the other side. no problem.
# - blk/chr devices don't change ctime anyway.
changed_while_backup = not is_special_file and st.st_ctime_ns != st2.st_ctime_ns
if changed_while_backup:
status = 'C' # regular file changed while we backed it up, might be inconsistent/corrupt!
if not is_special_file and not changed_while_backup:
# we must not memorize special files, because the contents of e.g. a
# block or char device will change without its mtime/size/inode changing.
# also, we must not memorize a potentially inconsistent/corrupt file that
# changed while we backed it up.
cache.memorize_file(path_hash, st, [c.id for c in item.chunks])
self.stats.nfiles += 1
item.update(self.metadata_collector.stat_ext_attrs(st, path, fd=fd))
item.get_size(memorize=True)
return status
def valid_msgpacked_dict(d, keys_serialized):
"""check if the data <d> looks like a msgpacked dict"""
d_len = len(d)
if d_len == 0:
return False
if d[0] & 0xf0 == 0x80: # object is a fixmap (up to 15 elements)
offs = 1
elif d[0] == 0xde: # object is a map16 (up to 2^16-1 elements)
offs = 3
else:
# object is not a map (dict)
# note: we must not have dicts with > 2^16-1 elements
return False
if d_len <= offs:
return False
# is the first dict key a bytestring?
if d[offs] & 0xe0 == 0xa0: # key is a small bytestring (up to 31 chars)
pass
elif d[offs] in (0xd9, 0xda, 0xdb): # key is a str8, str16 or str32
pass
else:
# key is not a bytestring
return False
# is the bytestring any of the expected key names?
key_serialized = d[offs:]
return any(key_serialized.startswith(pattern) for pattern in keys_serialized)
class RobustUnpacker:
"""A restartable/robust version of the streaming msgpack unpacker
"""
def __init__(self, validator, item_keys):
super().__init__()
self.item_keys = [msgpack.packb(name.encode()) for name in item_keys]
self.validator = validator
self._buffered_data = []
self._resync = False
self._unpacker = msgpack.Unpacker(object_hook=StableDict)
def resync(self):
self._buffered_data = []
self._resync = True
def feed(self, data):
if self._resync:
self._buffered_data.append(data)
else:
self._unpacker.feed(data)
def __iter__(self):
return self
def __next__(self):
if self._resync:
data = b''.join(self._buffered_data)
while self._resync:
if not data:
raise StopIteration
# Abort early if the data does not look like a serialized item dict
if not valid_msgpacked_dict(data, self.item_keys):
data = data[1:]
continue
self._unpacker = msgpack.Unpacker(object_hook=StableDict)
self._unpacker.feed(data)
try:
item = next(self._unpacker)
except (msgpack.UnpackException, StopIteration):
# as long as we are resyncing, we also ignore StopIteration
pass
else:
if self.validator(item):
self._resync = False
return item
data = data[1:]
else:
return next(self._unpacker)
class ArchiveChecker:
def __init__(self):
self.error_found = False
self.possibly_superseded = set()
def check(self, repository, repair=False, archive=None, first=0, last=0, sort_by='', glob=None,
verify_data=False, save_space=False):
"""Perform a set of checks on 'repository'
:param repair: enable repair mode, write updated or corrected data into repository
:param archive: only check this archive
:param first/last/sort_by: only check this number of first/last archives ordered by sort_by
:param glob: only check archives matching this glob
:param verify_data: integrity verification of data referenced by archives
:param save_space: Repository.commit(save_space)
"""
logger.info('Starting archive consistency check...')
self.check_all = archive is None and not any((first, last, glob))
self.repair = repair
self.repository = repository
self.init_chunks()
if not self.chunks:
logger.error('Repository contains no apparent data at all, cannot continue check/repair.')
return False
self.key = self.identify_key(repository)
if verify_data:
self.verify_data()
if Manifest.MANIFEST_ID not in self.chunks:
logger.error("Repository manifest not found!")
self.error_found = True
self.manifest = self.rebuild_manifest()
else:
try:
self.manifest, _ = Manifest.load(repository, (Manifest.Operation.CHECK,), key=self.key)
except IntegrityErrorBase as exc:
logger.error('Repository manifest is corrupted: %s', exc)
self.error_found = True
del self.chunks[Manifest.MANIFEST_ID]
self.manifest = self.rebuild_manifest()
self.rebuild_refcounts(archive=archive, first=first, last=last, sort_by=sort_by, glob=glob)
self.orphan_chunks_check()
self.finish(save_space=save_space)
if self.error_found:
logger.error('Archive consistency check complete, problems found.')
else:
logger.info('Archive consistency check complete, no problems found.')
return self.repair or not self.error_found
def init_chunks(self):
"""Fetch a list of all object keys from repository
"""
# Explicitly set the initial usable hash table capacity to avoid performance issues
# due to hash table "resonance".
# Since reconstruction of archive items can add some new chunks, add 10 % headroom.
self.chunks = ChunkIndex(usable=len(self.repository) * 1.1)
marker = None
while True:
result = self.repository.list(limit=LIST_SCAN_LIMIT, marker=marker)
if not result:
break
marker = result[-1]
init_entry = ChunkIndexEntry(refcount=0, size=0, csize=0)
for id_ in result:
self.chunks[id_] = init_entry
def identify_key(self, repository):
try:
some_chunkid, _ = next(self.chunks.iteritems())
except StopIteration:
# repo is completely empty, no chunks
return None
cdata = repository.get(some_chunkid)
return key_factory(repository, cdata)
def verify_data(self):
logger.info('Starting cryptographic data integrity verification...')
chunks_count_index = len(self.chunks)
chunks_count_segments = 0
errors = 0
defect_chunks = []
pi = ProgressIndicatorPercent(total=chunks_count_index, msg="Verifying data %6.2f%%", step=0.01,
msgid='check.verify_data')
marker = None
while True:
chunk_ids = self.repository.scan(limit=100, marker=marker)
if not chunk_ids:
break
chunks_count_segments += len(chunk_ids)
marker = chunk_ids[-1]
chunk_data_iter = self.repository.get_many(chunk_ids)
chunk_ids_revd = list(reversed(chunk_ids))
while chunk_ids_revd:
pi.show()
chunk_id = chunk_ids_revd.pop(-1) # better efficiency
try:
encrypted_data = next(chunk_data_iter)
except (Repository.ObjectNotFound, IntegrityErrorBase) as err:
self.error_found = True
errors += 1
logger.error('chunk %s: %s', bin_to_hex(chunk_id), err)
if isinstance(err, IntegrityErrorBase):
defect_chunks.append(chunk_id)
# as the exception killed our generator, make a new one for remaining chunks:
if chunk_ids_revd:
chunk_ids = list(reversed(chunk_ids_revd))
chunk_data_iter = self.repository.get_many(chunk_ids)
else:
_chunk_id = None if chunk_id == Manifest.MANIFEST_ID else chunk_id
try:
self.key.decrypt(_chunk_id, encrypted_data)
except IntegrityErrorBase as integrity_error:
self.error_found = True
errors += 1
logger.error('chunk %s, integrity error: %s', bin_to_hex(chunk_id), integrity_error)
defect_chunks.append(chunk_id)
pi.finish()
if chunks_count_index != chunks_count_segments:
logger.error('Repo/Chunks index object count vs. segment files object count mismatch.')
logger.error('Repo/Chunks index: %d objects != segment files: %d objects',
chunks_count_index, chunks_count_segments)
if defect_chunks:
if self.repair:
# if we kill the defect chunk here, subsequent actions within this "borg check"
# run will find missing chunks and replace them with all-zero replacement
# chunks and flag the files as "repaired".
# if another backup is done later and the missing chunks get backupped again,
# a "borg check" afterwards can heal all files where this chunk was missing.
logger.warning('Found defect chunks. They will be deleted now, so affected files can '
'get repaired now and maybe healed later.')
for defect_chunk in defect_chunks:
# remote repo (ssh): retry might help for strange network / NIC / RAM errors
# as the chunk will be retransmitted from remote server.
# local repo (fs): as chunks.iteritems loop usually pumps a lot of data through,
# a defect chunk is likely not in the fs cache any more and really gets re-read
# from the underlying media.
try:
encrypted_data = self.repository.get(defect_chunk)
_chunk_id = None if defect_chunk == Manifest.MANIFEST_ID else defect_chunk
self.key.decrypt(_chunk_id, encrypted_data)
except IntegrityErrorBase:
# failed twice -> get rid of this chunk
del self.chunks[defect_chunk]
self.repository.delete(defect_chunk)
logger.debug('chunk %s deleted.', bin_to_hex(defect_chunk))
else:
logger.warning('chunk %s not deleted, did not consistently fail.')
else:
logger.warning('Found defect chunks. With --repair, they would get deleted, so affected '
'files could get repaired then and maybe healed later.')
for defect_chunk in defect_chunks:
logger.debug('chunk %s is defect.', bin_to_hex(defect_chunk))
log = logger.error if errors else logger.info
log('Finished cryptographic data integrity verification, verified %d chunks with %d integrity errors.',
chunks_count_segments, errors)
def rebuild_manifest(self):
"""Rebuild the manifest object if it is missing
Iterates through all objects in the repository looking for archive metadata blocks.
"""
required_archive_keys = frozenset(key.encode() for key in REQUIRED_ARCHIVE_KEYS)
def valid_archive(obj):
if not isinstance(obj, dict):
return False
keys = set(obj)
return required_archive_keys.issubset(keys)
logger.info('Rebuilding missing manifest, this might take some time...')
# as we have lost the manifest, we do not know any more what valid item keys we had.
# collecting any key we encounter in a damaged repo seems unwise, thus we just use
# the hardcoded list from the source code. thus, it is not recommended to rebuild a
# lost manifest on a older borg version than the most recent one that was ever used
# within this repository (assuming that newer borg versions support more item keys).
manifest = Manifest(self.key, self.repository)
archive_keys_serialized = [msgpack.packb(name.encode()) for name in ARCHIVE_KEYS]
pi = ProgressIndicatorPercent(total=len(self.chunks), msg="Rebuilding manifest %6.2f%%", step=0.01,
msgid='check.rebuild_manifest')
for chunk_id, _ in self.chunks.iteritems():
pi.show()
cdata = self.repository.get(chunk_id)
try:
data = self.key.decrypt(chunk_id, cdata)
except IntegrityErrorBase as exc:
logger.error('Skipping corrupted chunk: %s', exc)
self.error_found = True
continue
if not valid_msgpacked_dict(data, archive_keys_serialized):
continue
if b'cmdline' not in data or b'\xa7version\x01' not in data:
continue
try:
archive = msgpack.unpackb(data)
# Ignore exceptions that might be raised when feeding msgpack with invalid data
except msgpack.UnpackException:
continue
if valid_archive(archive):
archive = ArchiveItem(internal_dict=archive)
name = archive.name
logger.info('Found archive %s', name)
if name in manifest.archives:
i = 1
while True:
new_name = '%s.%d' % (name, i)
if new_name not in manifest.archives:
break
i += 1
logger.warning('Duplicate archive name %s, storing as %s', name, new_name)
name = new_name
manifest.archives[name] = (chunk_id, archive.time)
pi.finish()
logger.info('Manifest rebuild complete.')
return manifest
def rebuild_refcounts(self, archive=None, first=0, last=0, sort_by='', glob=None):
"""Rebuild object reference counts by walking the metadata
Missing and/or incorrect data is repaired when detected
"""
# Exclude the manifest from chunks (manifest entry might be already deleted from self.chunks)
self.chunks.pop(Manifest.MANIFEST_ID, None)
def mark_as_possibly_superseded(id_):
if self.chunks.get(id_, ChunkIndexEntry(0, 0, 0)).refcount == 0:
self.possibly_superseded.add(id_)
def add_callback(chunk):
id_ = self.key.id_hash(chunk)
cdata = self.key.encrypt(chunk)
add_reference(id_, len(chunk), len(cdata), cdata)
return id_
def add_reference(id_, size, csize, cdata=None):
try:
self.chunks.incref(id_)
except KeyError:
assert cdata is not None
self.chunks[id_] = ChunkIndexEntry(refcount=1, size=size, csize=csize)
if self.repair:
self.repository.put(id_, cdata)
def verify_file_chunks(archive_name, item):
"""Verifies that all file chunks are present.
Missing file chunks will be replaced with new chunks of the same length containing all zeros.
If a previously missing file chunk re-appears, the replacement chunk is replaced by the correct one.
"""
def replacement_chunk(size):
data = bytes(size)
chunk_id = self.key.id_hash(data)
cdata = self.key.encrypt(data)
csize = len(cdata)
return chunk_id, size, csize, cdata
offset = 0
chunk_list = []
chunks_replaced = False
has_chunks_healthy = 'chunks_healthy' in item
chunks_current = item.chunks
chunks_healthy = item.chunks_healthy if has_chunks_healthy else chunks_current
if has_chunks_healthy and len(chunks_current) != len(chunks_healthy):
# should never happen, but there was issue #3218.
logger.warning('{}: {}: Invalid chunks_healthy metadata removed!'.format(archive_name, item.path))
del item.chunks_healthy
has_chunks_healthy = False
chunks_healthy = chunks_current
for chunk_current, chunk_healthy in zip(chunks_current, chunks_healthy):
chunk_id, size, csize = chunk_healthy
if chunk_id not in self.chunks:
# a chunk of the healthy list is missing
if chunk_current == chunk_healthy:
logger.error('{}: {}: New missing file chunk detected (Byte {}-{}). '
'Replacing with all-zero chunk.'.format(
archive_name, item.path, offset, offset + size))
self.error_found = chunks_replaced = True
chunk_id, size, csize, cdata = replacement_chunk(size)
add_reference(chunk_id, size, csize, cdata)
else:
logger.info('{}: {}: Previously missing file chunk is still missing (Byte {}-{}). It has a '
'all-zero replacement chunk already.'.format(
archive_name, item.path, offset, offset + size))
chunk_id, size, csize = chunk_current
if chunk_id in self.chunks:
add_reference(chunk_id, size, csize)
else:
logger.warning('{}: {}: Missing all-zero replacement chunk detected (Byte {}-{}). '
'Generating new replacement chunk.'.format(
archive_name, item.path, offset, offset + size))
self.error_found = chunks_replaced = True
chunk_id, size, csize, cdata = replacement_chunk(size)
add_reference(chunk_id, size, csize, cdata)
else:
if chunk_current == chunk_healthy:
# normal case, all fine.
add_reference(chunk_id, size, csize)
else:
logger.info('{}: {}: Healed previously missing file chunk! '
'(Byte {}-{}).'.format(archive_name, item.path, offset, offset + size))
add_reference(chunk_id, size, csize)
mark_as_possibly_superseded(chunk_current[0]) # maybe orphaned the all-zero replacement chunk
chunk_list.append([chunk_id, size, csize]) # list-typed element as chunks_healthy is list-of-lists
offset += size
if chunks_replaced and not has_chunks_healthy:
# if this is first repair, remember the correct chunk IDs, so we can maybe heal the file later
item.chunks_healthy = item.chunks
if has_chunks_healthy and chunk_list == chunks_healthy:
logger.info('{}: {}: Completely healed previously damaged file!'.format(archive_name, item.path))
del item.chunks_healthy
item.chunks = chunk_list
if 'size' in item:
item_size = item.size
item_chunks_size = item.get_size(compressed=False, from_chunks=True)
if item_size != item_chunks_size:
# just warn, but keep the inconsistency, so that borg extract can warn about it.
logger.warning('{}: {}: size inconsistency detected: size {}, chunks size {}'.format(
archive_name, item.path, item_size, item_chunks_size))
def robust_iterator(archive):
"""Iterates through all archive items
Missing item chunks will be skipped and the msgpack stream will be restarted
"""
item_keys = frozenset(key.encode() for key in self.manifest.item_keys)
required_item_keys = frozenset(key.encode() for key in REQUIRED_ITEM_KEYS)
unpacker = RobustUnpacker(lambda item: isinstance(item, StableDict) and b'path' in item,
self.manifest.item_keys)
_state = 0
def missing_chunk_detector(chunk_id):
nonlocal _state
if _state % 2 != int(chunk_id not in self.chunks):
_state += 1
return _state
def report(msg, chunk_id, chunk_no):
cid = bin_to_hex(chunk_id)
msg += ' [chunk: %06d_%s]' % (chunk_no, cid) # see "debug dump-archive-items"
self.error_found = True
logger.error(msg)
def list_keys_safe(keys):
return ', '.join((k.decode(errors='replace') if isinstance(k, bytes) else str(k) for k in keys))
def valid_item(obj):
if not isinstance(obj, StableDict):
return False, 'not a dictionary'
# A bug in Attic up to and including release 0.13 added a (meaningless) b'acl' key to every item.
# We ignore it here, should it exist. See test_attic013_acl_bug for details.
obj.pop(b'acl', None)
keys = set(obj)
if not required_item_keys.issubset(keys):
return False, 'missing required keys: ' + list_keys_safe(required_item_keys - keys)
if not keys.issubset(item_keys):
return False, 'invalid keys: ' + list_keys_safe(keys - item_keys)
return True, ''
i = 0
for state, items in groupby(archive.items, missing_chunk_detector):
items = list(items)
if state % 2:
for chunk_id in items:
report('item metadata chunk missing', chunk_id, i)
i += 1
continue
if state > 0:
unpacker.resync()
for chunk_id, cdata in zip(items, repository.get_many(items)):
data = self.key.decrypt(chunk_id, cdata)
unpacker.feed(data)
try:
for item in unpacker:
valid, reason = valid_item(item)
if valid:
yield Item(internal_dict=item)
else:
report('Did not get expected metadata dict when unpacking item metadata (%s)' % reason, chunk_id, i)
except msgpack.UnpackException:
report('Unpacker crashed while unpacking item metadata, trying to resync...', chunk_id, i)
unpacker.resync()
except Exception:
report('Exception while unpacking item metadata', chunk_id, i)
raise
i += 1
if archive is None:
sort_by = sort_by.split(',')
if any((first, last, glob)):
archive_infos = self.manifest.archives.list(sort_by=sort_by, glob=glob, first=first, last=last)
if glob and not archive_infos:
logger.warning('--glob-archives %s does not match any archives', glob)
if first and len(archive_infos) < first:
logger.warning('--first %d archives: only found %d archives', first, len(archive_infos))
if last and len(archive_infos) < last:
logger.warning('--last %d archives: only found %d archives', last, len(archive_infos))
else:
archive_infos = self.manifest.archives.list(sort_by=sort_by)
else:
# we only want one specific archive
try:
archive_infos = [self.manifest.archives[archive]]
except KeyError:
logger.error("Archive '%s' not found.", archive)
self.error_found = True
return
num_archives = len(archive_infos)
with cache_if_remote(self.repository) as repository:
for i, info in enumerate(archive_infos):
logger.info('Analyzing archive {} ({}/{})'.format(info.name, i + 1, num_archives))
archive_id = info.id
if archive_id not in self.chunks:
logger.error('Archive metadata block is missing!')
self.error_found = True
del self.manifest.archives[info.name]
continue
mark_as_possibly_superseded(archive_id)
cdata = self.repository.get(archive_id)
data = self.key.decrypt(archive_id, cdata)
archive = ArchiveItem(internal_dict=msgpack.unpackb(data))
if archive.version != 1:
raise Exception('Unknown archive metadata version')
archive.cmdline = [safe_decode(arg) for arg in archive.cmdline]
items_buffer = ChunkBuffer(self.key)
items_buffer.write_chunk = add_callback
for item in robust_iterator(archive):
if 'chunks' in item:
verify_file_chunks(info.name, item)
items_buffer.add(item)
items_buffer.flush(flush=True)
for previous_item_id in archive.items:
mark_as_possibly_superseded(previous_item_id)
archive.items = items_buffer.chunks
data = msgpack.packb(archive.as_dict())
new_archive_id = self.key.id_hash(data)
cdata = self.key.encrypt(data)
add_reference(new_archive_id, len(data), len(cdata), cdata)
self.manifest.archives[info.name] = (new_archive_id, info.ts)
def orphan_chunks_check(self):
if self.check_all:
unused = {id_ for id_, entry in self.chunks.iteritems() if entry.refcount == 0}
orphaned = unused - self.possibly_superseded
if orphaned:
logger.error('{} orphaned objects found!'.format(len(orphaned)))
self.error_found = True
if self.repair and unused:
logger.info('Deleting %d orphaned and %d superseded objects...' % (
len(orphaned), len(self.possibly_superseded)))
for id_ in unused:
self.repository.delete(id_)
logger.info('Finished deleting orphaned/superseded objects.')
else:
logger.info('Orphaned objects check skipped (needs all archives checked).')
def finish(self, save_space=False):
if self.repair:
logger.info('Writing Manifest.')
self.manifest.write()
logger.info('Committing repo.')
self.repository.commit(compact=False, save_space=save_space)
class ArchiveRecreater:
class Interrupted(Exception):
def __init__(self, metadata=None):
self.metadata = metadata or {}
@staticmethod
def is_temporary_archive(archive_name):
return archive_name.endswith('.recreate')
def __init__(self, repository, manifest, key, cache, matcher,
exclude_caches=False, exclude_if_present=None, keep_exclude_tags=False,
chunker_params=None, compression=None, recompress=False, always_recompress=False,
dry_run=False, stats=False, progress=False, file_status_printer=None,
timestamp=None, checkpoint_interval=1800):
self.repository = repository
self.key = key
self.manifest = manifest
self.cache = cache
self.matcher = matcher
self.exclude_caches = exclude_caches
self.exclude_if_present = exclude_if_present or []
self.keep_exclude_tags = keep_exclude_tags
self.rechunkify = chunker_params is not None
if self.rechunkify:
logger.debug('Rechunking archives to %s', chunker_params)
self.chunker_params = chunker_params or CHUNKER_PARAMS
self.recompress = recompress
self.always_recompress = always_recompress
self.compression = compression or CompressionSpec('none')
self.seen_chunks = set()
self.timestamp = timestamp
self.dry_run = dry_run
self.stats = stats
self.progress = progress
self.print_file_status = file_status_printer or (lambda *args: None)
self.checkpoint_interval = None if dry_run else checkpoint_interval
def recreate(self, archive_name, comment=None, target_name=None):
assert not self.is_temporary_archive(archive_name)
archive = self.open_archive(archive_name)
target = self.create_target(archive, target_name)
if self.exclude_if_present or self.exclude_caches:
self.matcher_add_tagged_dirs(archive)
if self.matcher.empty() and not self.recompress and not target.recreate_rechunkify and comment is None:
return False
self.process_items(archive, target)
replace_original = target_name is None
self.save(archive, target, comment, replace_original=replace_original)
return True
def process_items(self, archive, target):
matcher = self.matcher
target_is_subset = not matcher.empty()
hardlink_masters = {} if target_is_subset else None
def item_is_hardlink_master(item):
return (target_is_subset and
hardlinkable(item.mode) and
item.get('hardlink_master', True) and
'source' not in item)
for item in archive.iter_items():
if not matcher.match(item.path):
self.print_file_status('x', item.path)
if item_is_hardlink_master(item):
hardlink_masters[item.path] = (item.get('chunks'), item.get('chunks_healthy'), None)
continue
if target_is_subset and hardlinkable(item.mode) and item.get('source') in hardlink_masters:
# master of this hard link is outside the target subset
chunks, chunks_healthy, new_source = hardlink_masters[item.source]
if new_source is None:
# First item to use this master, move the chunks
item.chunks = chunks
if chunks_healthy is not None:
item.chunks_healthy = chunks_healthy
hardlink_masters[item.source] = (None, None, item.path)
del item.source
else:
# Master was already moved, only update this item's source
item.source = new_source
if self.dry_run:
self.print_file_status('-', item.path)
else:
self.process_item(archive, target, item)
if self.progress:
target.stats.show_progress(final=True)
def process_item(self, archive, target, item):
if 'chunks' in item:
self.process_chunks(archive, target, item)
target.stats.nfiles += 1
target.add_item(item, stats=target.stats)
self.print_file_status(file_status(item.mode), item.path)
def process_chunks(self, archive, target, item):
if not self.recompress and not target.recreate_rechunkify:
for chunk_id, size, csize in item.chunks:
self.cache.chunk_incref(chunk_id, target.stats)
return item.chunks
chunk_iterator = self.iter_chunks(archive, target, list(item.chunks))
chunk_processor = partial(self.chunk_processor, target)
target.process_file_chunks(item, self.cache, target.stats, self.progress, chunk_iterator, chunk_processor)
def chunk_processor(self, target, data):
chunk_id = self.key.id_hash(data)
if chunk_id in self.seen_chunks:
return self.cache.chunk_incref(chunk_id, target.stats)
overwrite = self.recompress
if self.recompress and not self.always_recompress and chunk_id in self.cache.chunks:
# Check if this chunk is already compressed the way we want it
old_chunk = self.key.decrypt(None, self.repository.get(chunk_id), decompress=False)
if Compressor.detect(old_chunk).name == self.key.compressor.decide(data).name:
# Stored chunk has the same compression we wanted
overwrite = False
chunk_entry = self.cache.add_chunk(chunk_id, data, target.stats, overwrite=overwrite, wait=False)
self.cache.repository.async_response(wait=False)
self.seen_chunks.add(chunk_entry.id)
return chunk_entry
def iter_chunks(self, archive, target, chunks):
chunk_iterator = archive.pipeline.fetch_many([chunk_id for chunk_id, _, _ in chunks])
if target.recreate_rechunkify:
# The target.chunker will read the file contents through ChunkIteratorFileWrapper chunk-by-chunk
# (does not load the entire file into memory)
file = ChunkIteratorFileWrapper(chunk_iterator)
yield from target.chunker.chunkify(file)
else:
for chunk in chunk_iterator:
yield chunk
def save(self, archive, target, comment=None, replace_original=True):
if self.dry_run:
return
if comment is None:
comment = archive.metadata.get('comment', '')
# Keep for the statistics if necessary
if self.stats:
_start = target.start
if self.timestamp is None:
additional_metadata = {
'time': archive.metadata.time,
'time_end': archive.metadata.get('time_end') or archive.metadata.time,
'cmdline': archive.metadata.cmdline,
# but also remember recreate metadata:
'recreate_cmdline': sys.argv,
}
else:
additional_metadata = {
'cmdline': archive.metadata.cmdline,
# but also remember recreate metadata:
'recreate_cmdline': sys.argv,
}
target.save(comment=comment, timestamp=self.timestamp,
stats=target.stats, additional_metadata=additional_metadata)
if replace_original:
archive.delete(Statistics(), progress=self.progress)
target.rename(archive.name)
if self.stats:
target.start = _start
target.end = datetime.utcnow()
log_multi(DASHES,
str(target),
DASHES,
str(target.stats),
str(self.cache),
DASHES)
def matcher_add_tagged_dirs(self, archive):
"""Add excludes to the matcher created by exclude_cache and exclude_if_present."""
def exclude(dir, tag_item):
if self.keep_exclude_tags:
tag_files.append(PathPrefixPattern(tag_item.path, recurse_dir=False))
tagged_dirs.append(FnmatchPattern(dir + '/', recurse_dir=False))
else:
tagged_dirs.append(PathPrefixPattern(dir, recurse_dir=False))
matcher = self.matcher
tag_files = []
tagged_dirs = []
# to support reading hard-linked CACHEDIR.TAGs (aka CACHE_TAG_NAME), similar to hardlink_masters:
cachedir_masters = {}
if self.exclude_caches:
# sadly, due to how CACHEDIR.TAG works (filename AND file [header] contents) and
# how borg deals with hardlinks (slave hardlinks referring back to master hardlinks),
# we need to pass over the archive collecting hardlink master paths.
# as seen in issue #4911, the master paths can have an arbitrary filenames,
# not just CACHEDIR.TAG.
for item in archive.iter_items(filter=lambda item: os.path.basename(item.path) == CACHE_TAG_NAME):
if stat.S_ISREG(item.mode) and 'chunks' not in item and 'source' in item:
# this is a hardlink slave, referring back to its hardlink master (via item.source)
cachedir_masters[item.source] = None # we know the key (path), but not the value (item) yet
for item in archive.iter_items(
filter=lambda item: os.path.basename(item.path) == CACHE_TAG_NAME or matcher.match(item.path)):
if self.exclude_caches and item.path in cachedir_masters:
cachedir_masters[item.path] = item
dir, tag_file = os.path.split(item.path)
if tag_file in self.exclude_if_present:
exclude(dir, item)
elif self.exclude_caches and tag_file == CACHE_TAG_NAME and stat.S_ISREG(item.mode):
content_item = item if 'chunks' in item else cachedir_masters[item.source]
file = open_item(archive, content_item)
if file.read(len(CACHE_TAG_CONTENTS)) == CACHE_TAG_CONTENTS:
exclude(dir, item)
matcher.add(tag_files, IECommand.Include)
matcher.add(tagged_dirs, IECommand.ExcludeNoRecurse)
def create_target(self, archive, target_name=None):
"""Create target archive."""
target_name = target_name or archive.name + '.recreate'
target = self.create_target_archive(target_name)
# If the archives use the same chunker params, then don't rechunkify
source_chunker_params = tuple(archive.metadata.get('chunker_params', []))
if len(source_chunker_params) == 4 and isinstance(source_chunker_params[0], int):
# this is a borg < 1.2 chunker_params tuple, no chunker algo specified, but we only had buzhash:
source_chunker_params = (CH_BUZHASH, ) + source_chunker_params
target.recreate_rechunkify = self.rechunkify and source_chunker_params != target.chunker_params
if target.recreate_rechunkify:
logger.debug('Rechunking archive from %s to %s', source_chunker_params or '(unknown)', target.chunker_params)
target.process_file_chunks = ChunksProcessor(
cache=self.cache, key=self.key,
add_item=target.add_item, write_checkpoint=target.write_checkpoint,
checkpoint_interval=self.checkpoint_interval, rechunkify=target.recreate_rechunkify).process_file_chunks
target.chunker = get_chunker(*target.chunker_params, seed=self.key.chunk_seed)
return target
def create_target_archive(self, name):
target = Archive(self.repository, self.key, self.manifest, name, create=True,
progress=self.progress, chunker_params=self.chunker_params, cache=self.cache,
checkpoint_interval=self.checkpoint_interval)
return target
def open_archive(self, name, **kwargs):
return Archive(self.repository, self.key, self.manifest, name, cache=self.cache, **kwargs)
| 46.248468 | 148 | 0.588758 |
a89fc49f5b4f04ba18ec4efaac1056af4f62e450 | 2,213 | py | Python | Chapt1-Movies.py | jarrodolson/PythonForDataAnlysis | 0d3ea8055366c44791ed377ac409b6dc8f4043b6 | [
"MIT"
] | 1 | 2018-03-17T08:28:41.000Z | 2018-03-17T08:28:41.000Z | Chapt1-Movies.py | jarrodolson/PythonForDataAnlysis | 0d3ea8055366c44791ed377ac409b6dc8f4043b6 | [
"MIT"
] | null | null | null | Chapt1-Movies.py | jarrodolson/PythonForDataAnlysis | 0d3ea8055366c44791ed377ac409b6dc8f4043b6 | [
"MIT"
] | 1 | 2021-06-27T21:46:35.000Z | 2021-06-27T21:46:35.000Z | #=======================================
# Movielens examples from Python for Data Analysis
import pandas as pd
unames = ['user_id','gender','age','occupation','zip']
users = pd.read_table('ml-1m/users.dat',sep='::',header=None,names=unames)
rnames = ['user_id','movie_id','rating','timestamp']
ratings = pd.read_table('ml-1m/ratings.dat',sep="::",header=None,names=rnames)
mnames = ['movie_id','title','genres']
movies = pd.read_table('ml-1m/movies.dat',sep="::",header=None,names=mnames)
##Merge
## should go automatically and detect common column names, but not working
## so i specified it, which seems to be causing problems
data = pd.merge(pd.merge(ratings,users),movies)
##data = pd.merge(pd.merge(ratings,
## users,
## left_index="user_id",
## right_index="user_id",how="left"),
## movies,
## left_index="movie_id",
## right_index="movie_id",
## how="left")
##Look at column names
data.ix[0]
##Loook at ratings by movie and gender
mean_ratings = data.pivot_table('rating',rows='title',cols='gender',aggfunc='mean')
mean_ratings[:5]
##Find number of ratings per moview, regardless of gender
ratings_by_title = data.groupby('title').size()
ratings_by_title[:10]
##Find movies that have received >=250 reviews
active_titles = ratings_by_title.index[ratings_by_title>=250]
>>> active_titles
##Just use movies with >=250 reviews
mean_ratings = mean_ratings.ix[active_titles]
mean_ratings
##Find top female reviews
top_female_ratings = mean_ratings.sort_index(by='F',ascending=False)
top_female_ratings[:10]
########################
##Measuring disagreement
##Create variable
mean_ratings['diff'] = mean_ratings['M']-mean_ratings['F']
##See biggest difference (things women like more than men)
sorted_by_diff = mean_ratings.sort_index(by="diff")
sorted_by_diff[:15]
##Now see biggest difference in reverse order
sorted_by_diff[::-1][:15]
##Standard deviation of ratings, grouped by title
rating_std_by_title = data.groupby('title')['rating'].std()
rating_std_by_title = rating_std_by_title.ix[active_titles]
print(rating_std_by_title.order(ascending=False)[:10])
| 33.029851 | 83 | 0.68188 |
a815f033e60bcd1eb2369d2ca112d323524bdf4f | 57,511 | py | Python | Lib/logging/handlers.py | yungyu/cpython | 97588f439d35a918c95f6785c0fa17d3fbc40c31 | [
"PSF-2.0"
] | 1 | 2021-07-12T23:56:40.000Z | 2021-07-12T23:56:40.000Z | Lib/logging/handlers.py | yungyu/cpython | 97588f439d35a918c95f6785c0fa17d3fbc40c31 | [
"PSF-2.0"
] | null | null | null | Lib/logging/handlers.py | yungyu/cpython | 97588f439d35a918c95f6785c0fa17d3fbc40c31 | [
"PSF-2.0"
] | null | null | null | # Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
import threading
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=False):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
self.namer = None
self.rotator = None
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
# The following line added because the filename passed in could be a
# path object (see Issue #27493), but self.baseFilename will be a string
filename = self.baseFilename
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def reopenIfNeeded(self):
"""
Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
def emit(self, record):
"""
Emit a record.
If underlying file has changed, reopen the file before emitting the
record to it.
"""
self.reopenIfNeeded()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
# Issue #25685: delete 'message' if present: redundant with 'msg'
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
# Syslog server may be unavailable during handler initialisation.
# C's openlog() function also ignores connection errors.
# Moreover, we ignore these errors while logging, so it not worse
# to ignore it also here.
try:
self._connect_unixsocket(address)
except OSError:
pass
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
host, port = address
ress = socket.getaddrinfo(host, port, 0, socktype)
if not ress:
raise OSError("getaddrinfo returns an empty list")
for res in ress:
af, socktype, proto, _, sa = res
err = sock = None
try:
sock = socket.socket(af, socktype, proto)
if socktype == socket.SOCK_STREAM:
sock.connect(sa)
break
except OSError as exc:
err = exc
if sock is not None:
sock.close()
if err is not None:
raise err
self.socket = sock
self.socktype = socktype
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change in the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.message import EmailMessage
import email.utils
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None,
context=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
if not secure and context is not None:
raise ValueError("context parameter only makes sense "
"with secure=True")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import http.client, urllib.parse
host = self.host
if self.secure:
h = http.client.HTTPSConnection(host, context=self.context)
else:
h = http.client.HTTPConnection(host)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
flushOnClose=True):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
The ``flushOnClose`` argument is ``True`` for backward compatibility
reasons - the old behaviour is that when the handler is closed, the
buffer is flushed, even if the flush level hasn't been exceeded nor the
capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
# See Issue #26559 for why this has been added
self.flushOnClose = flushOnClose
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, if appropriately configured, set the target to None and lose the
buffer.
"""
try:
if self.flushOnClose:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also returns the formatted
# message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
msg = self.format(record)
record.message = msg
record.msg = msg
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers, respect_handler_level=False):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._thread = None
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self.enqueue_sentinel()
self._thread.join()
self._thread = None
| 38.289614 | 122 | 0.582028 |
8499be98cfc03cd67d7a7efc241bcfcb05f154d1 | 2,586 | py | Python | others/old/train_RNN.py | YuRui8879/CPSC2021_python | bfa4c565ec3113528e73b064041082863cd228b4 | [
"MIT"
] | 4 | 2021-12-20T12:52:02.000Z | 2021-12-29T09:34:42.000Z | others/old/train_RNN.py | YuRui8879/CPSC2021_python | bfa4c565ec3113528e73b064041082863cd228b4 | [
"MIT"
] | null | null | null | others/old/train_RNN.py | YuRui8879/CPSC2021_python | bfa4c565ec3113528e73b064041082863cd228b4 | [
"MIT"
] | 1 | 2021-11-20T12:20:55.000Z | 2021-11-20T12:20:55.000Z | import torch
import torch.nn as nn
from read_code import *
from DataAdapter import RNNAdapter
import torch.utils.data as Data
from torch.optim.lr_scheduler import CosineAnnealingLR,MultiStepLR
from model import RNN
import time
from batch import cal_rnn_batch
import torch.optim as optim
from EarlyStopping import EarlyStopping
from Regularization import Regularization
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
data_path = r'C:\Users\yurui\Desktop\item\cpsc\data\all_data'
batch_size = 64
epochs = 80
learning_rate = 0.001
patience = 10
res = get_signal(data_path,0)
train_samp,valid_samp,test_samp = gen_sample(res)
train_set = RNNAdapter(train_samp)
valid_set = RNNAdapter(valid_samp)
train_loader = Data.DataLoader(train_set,batch_size = batch_size,shuffle = True,num_workers = 0)
valid_loader = Data.DataLoader(valid_set,batch_size = batch_size,shuffle = False,num_workers = 0)
model = RNN()
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
early_stopping = EarlyStopping(patience, verbose=False)
# clr = CosineAnnealingLR(optimizer,T_max = 32)
clr = MultiStepLR(optimizer,[20,50],gamma=0.1)
reg_loss = Regularization(model, 0.001)
best_loss = 100
for epoch in range(1,epochs + 1):
time_all=0
start_time = time.time()
# 训练模型
train_res = cal_rnn_batch(train_loader, model, criterion, device, optimizer, reg_loss, True)
# 验证集测试模型
clr.step()
valid_res = cal_rnn_batch(valid_loader, model, criterion, device, optimizer, reg_loss, False)
time_all = time.time()-start_time
# 打印训练及测试结果
print('- Epoch: %d - Train_loss: %.5f - Train_mean_acc: %.5f - Train_F1: %.5f - Val_loss: %.5f - Val_mean_acc: %5f - Val_F1: %.5f - T_Time: %.3f' \
%(epoch,train_res['loss'],train_res['acc'],train_res['F1'],valid_res['loss'],valid_res['acc'],valid_res['F1'],time_all))
print('当前学习率:%f' %optimizer.state_dict()['param_groups'][0]['lr'])
# 保存最优模型
if valid_res['loss'] < best_loss:
best_loss = valid_res['loss']
print('Find better model in Epoch {0}, saving model.'.format(epoch))
torch.save(model.state_dict(), r'.\model\RNN_best_model.pt')
early_stopping(valid_res['loss'], model)
# 若满足 early stopping 要求
if early_stopping.early_stop:
print("Early stopping")
# 结束模型训练
break
print('RNN Training Finished')
result = cal_rnn_batch(test_loader, model, criterion, device, optimizer, reg_loss, False)
print('confusion_matrix:',result['confusion_matrix'])
print('acc:',result['acc']) | 35.916667 | 151 | 0.727765 |
151a7f895ba4b0870c97b35b4e903e1065b4f55c | 12,550 | py | Python | src/queens8/Queens8.py | DanLBates/queens8 | 396967557c1ecb24fba1ef82530cb5c74b407a8b | [
"MIT"
] | null | null | null | src/queens8/Queens8.py | DanLBates/queens8 | 396967557c1ecb24fba1ef82530cb5c74b407a8b | [
"MIT"
] | null | null | null | src/queens8/Queens8.py | DanLBates/queens8 | 396967557c1ecb24fba1ef82530cb5c74b407a8b | [
"MIT"
] | null | null | null | #!python
''' queens8.py Places 8 queens on chessboard so that they are mutually
unthreatening. Allows user to place the 1st queen anywhere on the
1st row; then the program places the rest of the queens by a
recursive backtracking algorithm. Prints out all the moves,
including the moves that may be "taken back" as well as the moves
that "take back". The algorithm, due to Dijkstra and Wirth, is
justly famous for its elegant method of keeping track of
threatened squares.
28-Jan-2022 2:47:48 pm added queens8all and selection diaglog
'''
# tkinter used to show boards.
import sys
import tkinter as tk
roots = list()
def Queens8 (all : bool, sym=False, equiv=0):
global roots, AllBoards
roots = list()
out = not(all or sym or (equiv > 0))
def Initialize ():
# the following variables belong to the scope of Queens8 but
# are initialized here. Often with an unused element index 0.
# It is easier and more intuitive to waste an element than
# to program around it.
nonlocal Rows, Cols, UpDiagSafe, DownDiagSafe, RowSafe
nonlocal Ctr, QueensPlaced
Ctr = 0
QueensPlaced = 0
RowSafe = [False] + [True] * 8
UpDiagSafe = [True] * 17 # R+C {1..16}
DownDiagSafe = UpDiagSafe.copy() # R-C {-7..7}
Rows = [None] + [0] * 8
Cols = Rows.copy()
def PlaceQueenOn (r: int, c: int):
# RowSafe, UpDiagSafe, DownDiagSafe are all True.
# So, we place a queen here and make all the above False.
nonlocal Rows, Cols, UpDiagSafe, DownDiagSafe, RowSafe
nonlocal Ctr, QueensPlaced
Ctr += 1
if out:
print(f'{Ctr:3}: Place Queen {c:1} on Row {r:1}')
QueensPlaced += 1
RowSafe[r] = False
UpDiagSafe[r + c] = False
DownDiagSafe[r - c] = False
Rows[r] = c
Cols[c] = r
def TakeQueen (r, c):
# Can't proceed so we take back a Queen and relocate it.
nonlocal Rows, Cols, UpDiagSafe, DownDiagSafe, RowSafe
nonlocal Ctr, QueensPlaced
if out:
print(f'{Ctr:3}: Take Queen {c:1} from row {r:1}')
Ctr += 1
QueensPlaced -= 1
RowSafe[r] = True
UpDiagSafe[r + c] = True
DownDiagSafe[r - c] = True
Rows[r] = 0
Cols[c] = 0
def PlaceRestOfQueens ():
global AllBoards
nonlocal Ctr, QueensPlaced, NumberSolutions
c = QueensPlaced + 1
if all:
for r in range(1, 9):
if RowSafe[r] and UpDiagSafe[r + c] and DownDiagSafe[r - c]:
PlaceQueenOn(r, c)
if QueensPlaced < 8: # More to go. Call ourself to complete.
PlaceRestOfQueens()
if QueensPlaced == 8: # Done! So return.
if Cols not in AllBoards:
for i in range(1, 9):
if out:
print(Cols[i], end="")
print()
LogAllSymmetries(Cols)
NumberSolutions += 1
if display_queens:
DoDisplayBoard(Cols)
TakeQueen(r, c) # must backtrack
else:
for r in range(1, 9):
if RowSafe[r] and UpDiagSafe[r + c] and DownDiagSafe[r - c]:
PlaceQueenOn(r, c)
if QueensPlaced < 8: # More to go. Call ourself to complete.
PlaceRestOfQueens()
if QueensPlaced == 8: # Done! So return.
return
TakeQueen(r, c) # must backtrack
return
# Let these variables exist in the Queens8 scope. Any attempt to
# reference one while it still is None, will be futile.
Rows = None
Cols = None
UpDiagSafe = None
DownDiagSafe = None
RowSafe = None
QueensPlaced = None
Ctr = None
NumberSolutions = 0
if all:
AllBoards = list()
display_queens = input("Display Queens Y or N?").upper()[0] == 'Y'
for first in range(1, 9):
Initialize()
PlaceQueenOn(first, 1)
PlaceRestOfQueens()
TakeQueen(first, 1)
print('Number of Distinct Solutions = ', end='')
print(f'{NumberSolutions} Total Solutions = {len(AllBoards)}')
input("Enter to Proceed!")
if display_queens:
KillBoards()
else:
while True:
Initialize()
if equiv > 0:
first=equiv
else:
print('On which row do you want the first Queen?')
print('valid entries an in range 1..8')
print('enter any other integer to halt.')
while True:
first = input('?:')
if first.isnumeric():
first = int(first)
break
print("Not numeric. Try Again.")
if first in range(1, 9):
PlaceQueenOn(first, 1)
PlaceRestOfQueens()
if QueensPlaced == 8:
if sym:
DoColSym(Cols)
elif equiv > 0:
return Cols
else:
for i in range(1, 9):
print(Rows[i], end=' ')
print()
DoDisplayBoard(Cols)
input("Enter to Proceed")
KillBoards()
else:
return
# a global so the symmetry functions, especially Log1State has access
AllBoards = list()
def KillBoards ():
global roots
while len(roots) > 0:
root = roots.pop()
root.destroy()
root.update()
#del roots.pop(0)
BLACKQUEEN = '\u265b'
WHITEQUEEN = '\u2655'
WHITESPACE = '\u3000' # This white space big as Queen. Ascii ' ' not so.
# Tkinter windows interface to display chess board
# Tkinter will be discussed in Volume 2
# until then this is Windows magic.
# each row it's own frame. Each column a label in all
# frames at the same index.
def DoDisplayBoard (cols):
global roots
root = tk.Tk()
roots.append(root)
root.title('Chess Board')
ROW = [None] * 9
Board = [ROW[:] for i in range(9)]
del ROW
F = [None] * 9
F[8] = tk.Frame(root)
F[8].pack()
for i in range(7, 0, -1):
F[i] = tk.Frame(root)
F[i].pack(side='top')
for i in range(1, 9):
for j in range(1, 9):
Board[i][j] = tk.Label(F[i], text=WHITESPACE,
fg=['white', 'black'][(i + j) % 2],
bg=['black', 'white'][(i + j) % 2],
font=('Times', 24))
Board[i][j].pack(side='left')
for i in range(1, 9):
Board[i][cols[i]].config(text=[BLACKQUEEN, WHITEQUEEN][(i+cols[i])%2]) # noqa
root.update()
def DoColSym (cols):
global AllBoards, roots
AllBoards = list()
Log1State(cols)
index = 0
while index < len(AllBoards):
board = AllBoards[index]
LogAllSymmetries(board)
index += 1
for board in AllBoards:
DoDisplayBoard(board)
input("Enter to Continue")
KillBoards()
def Queens8All ():
Queens8(True)
def Queens8Simple ():
Queens8(False)
def RowCol (row : list) -> list:
'''
For a 8 Queens board representation there is a list 0 to 9 indexed by
column (with index 0) not being used. In each list element is what row the
Queen with that element is stored. For some opporations indexing by rows
makes more sense. RowCol will take either representation and return the
other.
'''
col = [None] * 9
for i in range(1, 9):
col[row[i]] = i
return col
def Reflect_x (row : list) -> list:
'''
Take a board indexed by column and flip it around the x-axis. This axis is
the line dividing row 4 from row 5. the new row being 9 - old row
'''
rx = [None] * 9
for i in range(1, 9):
rx[i] = 9 - row[i]
return rx
def Reflect_y (row : list) -> list:
'''
Take a board indexed by column and flip it around the y-axis. This axis is
between row 4 and 5. Also called the king column and the queen column.
To do this we convert to row indexed representation and let Reflect_x do
the work. Then convert the result back to column indexed.
'''
col = RowCol(row)
col = Reflect_x(col)
return RowCol(col)
def Rot90 (row : list) -> list:
'''
Rotate the board by 90 degrees counter clock wise.
we make a row indexed representation of the Board where each column
is 9 - the origonal row of the same number in the column with the
same index. Then convert the row indexed representation into a
column indexed representation.
'''
col = row[:]
for i in range(1, 9):
col[i] = 9 - col[i]
return RowCol(col)
def RotDiagUp (row : list) -> list:
'''
Rotate the board along the up diagonal. This is Kings Rook 1 to Queens
rook 8. Reflect on x-axis and rotate CCW 90 degrees.
'''
return Rot90(Reflect_x(row))
def RotDiagDn (row : list) -> list:
'''
Rotate the board along the down diagonal. Kings Rook 8 to Queens rook 1.
Rotate CCW by 90 degrees, then reflect on the x-axis.
'''
return Reflect_x(Rot90(row))
def LogAllRotations (board : list):
work = board[:] # our work space is a copy of board
work[0] = None # make sure the in tests will really really work
Log1State(work) # add the base case
work = Rot90(work) # and all three 90 degree CCW rotations
Log1State(work)
work = Rot90(work)
Log1State(work)
work = Rot90(work)
Log1State(work)
def Log1State (board : list):
global AllBoards
work = board[:]
if work in AllBoards:
return
AllBoards.append(work)
def LogAllSymmetries (board : list):
work = board[:]
LogAllRotations(work)
LogAllRotations(Reflect_x(work))
LogAllRotations(Reflect_y(work))
LogAllRotations(RotDiagUp(work))
LogAllRotations(RotDiagDn(work))
def Queens8Sym ():
Queens8(False, sym=True)
def SetOfBoards (board : list) -> set:
global AllBoards
AllBoards = list()
AllBoards.append(board)
LogAllSymmetries(board)
aSetOfBoards = set()
for brd in AllBoards:
tup = tuple(brd[1:8])
aSetOfBoards.add(tup)
return aSetOfBoards
def print_answer (a : list):
if a is None:
return
print("{", end="")
l = len(a)
for x in a:
l -= 1
print(x, end="")
if l == 0:
print("}")
else:
print(", ", end="")
def Queens8Equivalence ():
BaseBoards = [None]
for i in range(1, 9):
BaseBoards.append(Queens8(False, equiv=i))
Set = [None]
for i in range(1, 9):
Set.append(SetOfBoards(BaseBoards[i]))
answers = [None, [1], [2], [3], [4], [5], [6], [7], [8]]
for i in range(1,8):
if answers[i] is None:
continue
for j in range((i+1),9):
if Set[i].isdisjoint(Set[j]):
continue
answers[i].append(j)
answers[j] = None
Set[i].union(Set[j])
Set[j] = set()
for i in range(1, 9):
print_answer(answers[i])
def NoEntry ():
print("Not an Option, Try again")
selection = {'A' : (Queens8Simple, "The orginal Dijkstra and Wirth solution to the 8 queens problem."),
'B' : (Queens8All, "Do all solutions with no symmetries"),
'C' : (Queens8Sym, "Like Queens8 only show the symmetries too"),
'E' : (Queens8Equivalence, "Of the 8 returned by Queens8 as equivalence sets (no graphics)"),
'Q' : (sys.exit, "To Quit "),
'X' : (sys.exit, "Same as Q"),
}
def Go ():
while True:
for k, i in selection.items():
print(k, " :", i[1])
reply = input("select:").upper()
func = selection.get(reply, (NoEntry, ))[0]
func()
if __name__ == "__main__":
Go()
| 31.218905 | 108 | 0.536733 |
671cde28e9b51b343092f5c1e4b37c656ba4b8b5 | 129 | bzl | Python | tensorflow-yolo-ios/dependencies/tensorflow/core/platform/default/build_config.bzl | initialz/tensorflow-yolo-face-ios | ba74cf39168d0128e91318e65a1b88ce4d65a167 | [
"MIT"
] | 27 | 2017-06-07T19:07:32.000Z | 2020-10-15T10:09:12.000Z | tensorflow-yolo-ios/dependencies/tensorflow/core/platform/default/build_config.bzl | initialz/tensorflow-yolo-face-ios | ba74cf39168d0128e91318e65a1b88ce4d65a167 | [
"MIT"
] | 3 | 2017-08-25T17:39:46.000Z | 2017-11-18T03:40:55.000Z | tensorflow-yolo-ios/dependencies/tensorflow/core/platform/default/build_config.bzl | initialz/tensorflow-yolo-face-ios | ba74cf39168d0128e91318e65a1b88ce4d65a167 | [
"MIT"
] | 10 | 2017-06-16T18:04:45.000Z | 2018-07-05T17:33:01.000Z | version https://git-lfs.github.com/spec/v1
oid sha256:c307af26b4642242493ffcbe32bd060c7f5789fe2299a864159c967f096131e6
size 7038
| 32.25 | 75 | 0.883721 |
49c381b8a3aa751b365c3c603915789ee251b662 | 1,086 | py | Python | 6 kyu - Find the Mine!.py | ricardopizzimenti/Codewars | bbebc3f62642e22109d6a5ac0cdd638bca02e6ca | [
"MIT"
] | null | null | null | 6 kyu - Find the Mine!.py | ricardopizzimenti/Codewars | bbebc3f62642e22109d6a5ac0cdd638bca02e6ca | [
"MIT"
] | null | null | null | 6 kyu - Find the Mine!.py | ricardopizzimenti/Codewars | bbebc3f62642e22109d6a5ac0cdd638bca02e6ca | [
"MIT"
] | null | null | null | """
You've just discovered a square (NxN) field and you notice a warning sign.
The sign states that there's a single bomb in the 2D grid-like field in front of you.
Write a function mineLocation/MineLocation that accepts a 2D array, and returns the location of the mine.
The mine is represented as the integer 1 in the 2D array.
Areas in the 2D array that are not the mine will be represented as 0s.
The location returned should be an array (Tuple<int, int> in C#) where the first element is the row index,
and the second element is the column index of the bomb location (both should be 0 based).
All 2D arrays passed into your function will be square (NxN), and there will only be one mine in the array.
Examples:
mineLocation( [ [1, 0, 0], [0, 0, 0], [0, 0, 0] ] ) => returns [0, 0]
mineLocation( [ [0, 0, 0], [0, 1, 0], [0, 0, 0] ] ) => returns [1, 1]
mineLocation( [ [0, 0, 0], [0, 0, 0], [0, 1, 0] ] ) => returns [2, 1]
"""
def mineLocation(field):
for i in field:
if 1 in i:
return [field.index(i), i.index(1)]
print(mineLocation([[1, 0], [0, 0]]))
| 40.222222 | 107 | 0.667587 |
48587ba40b3b94bff0f662bfc0869014b61107b8 | 46,117 | py | Python | certbot/storage.py | cikupamart/certbot | 8f1939c08a318fd64395bc1368eab541904d8d53 | [
"Apache-2.0"
] | 1 | 2021-06-16T19:10:05.000Z | 2021-06-16T19:10:05.000Z | certbot/storage.py | kaduk/certbot | 944d0e05c854e51c0de6bdd44bc00af39e8db672 | [
"Apache-2.0"
] | null | null | null | certbot/storage.py | kaduk/certbot | 944d0e05c854e51c0de6bdd44bc00af39e8db672 | [
"Apache-2.0"
] | null | null | null | """Renewable certificates storage."""
import datetime
import glob
import logging
import os
import re
import shutil
import stat
import configobj
import parsedatetime
import pytz
import six
import certbot
from certbot import cli
from certbot import constants
from certbot import crypto_util
from certbot import error_handler
from certbot import errors
from certbot import util
from certbot.compat import misc
from certbot.plugins import common as plugins_common
from certbot.plugins import disco as plugins_disco
logger = logging.getLogger(__name__)
ALL_FOUR = ("cert", "privkey", "chain", "fullchain")
README = "README"
CURRENT_VERSION = util.get_strict_version(certbot.__version__)
BASE_PRIVKEY_MODE = 0o600
def renewal_conf_files(config):
"""Build a list of all renewal configuration files.
:param certbot.interfaces.IConfig config: Configuration object
:returns: list of renewal configuration files
:rtype: `list` of `str`
"""
result = glob.glob(os.path.join(config.renewal_configs_dir, "*.conf"))
result.sort()
return result
def renewal_file_for_certname(config, certname):
"""Return /path/to/certname.conf in the renewal conf directory"""
path = os.path.join(config.renewal_configs_dir, "{0}.conf".format(certname))
if not os.path.exists(path):
raise errors.CertStorageError("No certificate found with name {0} (expected "
"{1}).".format(certname, path))
return path
def cert_path_for_cert_name(config, cert_name):
""" If `--cert-name` was specified, but you need a value for `--cert-path`.
:param `configuration.NamespaceConfig` config: parsed command line arguments
:param str cert_name: cert name.
"""
cert_name_implied_conf = renewal_file_for_certname(config, cert_name)
fullchain_path = configobj.ConfigObj(cert_name_implied_conf)["fullchain"]
with open(fullchain_path) as f:
cert_path = (fullchain_path, f.read())
return cert_path
def config_with_defaults(config=None):
"""Merge supplied config, if provided, on top of builtin defaults."""
defaults_copy = configobj.ConfigObj(constants.RENEWER_DEFAULTS)
defaults_copy.merge(config if config is not None else configobj.ConfigObj())
return defaults_copy
def add_time_interval(base_time, interval, textparser=parsedatetime.Calendar()):
"""Parse the time specified time interval, and add it to the base_time
The interval can be in the English-language format understood by
parsedatetime, e.g., '10 days', '3 weeks', '6 months', '9 hours', or
a sequence of such intervals like '6 months 1 week' or '3 days 12
hours'. If an integer is found with no associated unit, it is
interpreted by default as a number of days.
:param datetime.datetime base_time: The time to be added with the interval.
:param str interval: The time interval to parse.
:returns: The base_time plus the interpretation of the time interval.
:rtype: :class:`datetime.datetime`"""
if interval.strip().isdigit():
interval += " days"
# try to use the same timezone, but fallback to UTC
tzinfo = base_time.tzinfo or pytz.UTC
return textparser.parseDT(interval, base_time, tzinfo=tzinfo)[0]
def write_renewal_config(o_filename, n_filename, archive_dir, target, relevant_data):
"""Writes a renewal config file with the specified name and values.
:param str o_filename: Absolute path to the previous version of config file
:param str n_filename: Absolute path to the new destination of config file
:param str archive_dir: Absolute path to the archive directory
:param dict target: Maps ALL_FOUR to their symlink paths
:param dict relevant_data: Renewal configuration options to save
:returns: Configuration object for the new config file
:rtype: configobj.ConfigObj
"""
config = configobj.ConfigObj(o_filename)
config["version"] = certbot.__version__
config["archive_dir"] = archive_dir
for kind in ALL_FOUR:
config[kind] = target[kind]
if "renewalparams" not in config:
config["renewalparams"] = {}
config.comments["renewalparams"] = ["",
"Options used in "
"the renewal process"]
config["renewalparams"].update(relevant_data)
for k in config["renewalparams"].keys():
if k not in relevant_data:
del config["renewalparams"][k]
if "renew_before_expiry" not in config:
default_interval = constants.RENEWER_DEFAULTS["renew_before_expiry"]
config.initial_comment = ["renew_before_expiry = " + default_interval]
# TODO: add human-readable comments explaining other available
# parameters
logger.debug("Writing new config %s.", n_filename)
# Ensure that the file exists
open(n_filename, 'a').close()
# Copy permissions from the old version of the file, if it exists.
if os.path.exists(o_filename):
current_permissions = stat.S_IMODE(os.lstat(o_filename).st_mode)
os.chmod(n_filename, current_permissions)
with open(n_filename, "wb") as f:
config.write(outfile=f)
return config
def rename_renewal_config(prev_name, new_name, cli_config):
"""Renames cli_config.certname's config to cli_config.new_certname.
:param .NamespaceConfig cli_config: parsed command line
arguments
"""
prev_filename = renewal_filename_for_lineagename(cli_config, prev_name)
new_filename = renewal_filename_for_lineagename(cli_config, new_name)
if os.path.exists(new_filename):
raise errors.ConfigurationError("The new certificate name "
"is already in use.")
try:
os.rename(prev_filename, new_filename)
except OSError:
raise errors.ConfigurationError("Please specify a valid filename "
"for the new certificate name.")
def update_configuration(lineagename, archive_dir, target, cli_config):
"""Modifies lineagename's config to contain the specified values.
:param str lineagename: Name of the lineage being modified
:param str archive_dir: Absolute path to the archive directory
:param dict target: Maps ALL_FOUR to their symlink paths
:param .NamespaceConfig cli_config: parsed command line
arguments
:returns: Configuration object for the updated config file
:rtype: configobj.ConfigObj
"""
config_filename = renewal_filename_for_lineagename(cli_config, lineagename)
temp_filename = config_filename + ".new"
# If an existing tempfile exists, delete it
if os.path.exists(temp_filename):
os.unlink(temp_filename)
# Save only the config items that are relevant to renewal
values = relevant_values(vars(cli_config.namespace))
write_renewal_config(config_filename, temp_filename, archive_dir, target, values)
misc.os_rename(temp_filename, config_filename)
return configobj.ConfigObj(config_filename)
def get_link_target(link):
"""Get an absolute path to the target of link.
:param str link: Path to a symbolic link
:returns: Absolute path to the target of link
:rtype: str
:raises .CertStorageError: If link does not exists.
"""
try:
target = os.readlink(link)
except OSError:
raise errors.CertStorageError(
"Expected {0} to be a symlink".format(link))
if not os.path.isabs(target):
target = os.path.join(os.path.dirname(link), target)
return os.path.abspath(target)
def _write_live_readme_to(readme_path, is_base_dir=False):
prefix = ""
if is_base_dir:
prefix = "[cert name]/"
with open(readme_path, "w") as f:
logger.debug("Writing README to %s.", readme_path)
f.write("This directory contains your keys and certificates.\n\n"
"`{prefix}privkey.pem` : the private key for your certificate.\n"
"`{prefix}fullchain.pem`: the certificate file used in most server software.\n"
"`{prefix}chain.pem` : used for OCSP stapling in Nginx >=1.3.7.\n"
"`{prefix}cert.pem` : will break many server configurations, and "
"should not be used\n"
" without reading further documentation (see link below).\n\n"
"WARNING: DO NOT MOVE OR RENAME THESE FILES!\n"
" Certbot expects these files to remain in this location in order\n"
" to function properly!\n\n"
"We recommend not moving these files. For more information, see the Certbot\n"
"User Guide at https://certbot.eff.org/docs/using.html#where-are-my-"
"certificates.\n".format(prefix=prefix))
def _relevant(namespaces, option):
"""
Is this option one that could be restored for future renewal purposes?
:param namespaces: plugin namespaces for configuration options
:type namespaces: `list` of `str`
:param str option: the name of the option
:rtype: bool
"""
from certbot import renewal
return (option in renewal.CONFIG_ITEMS or
any(option.startswith(namespace) for namespace in namespaces))
def relevant_values(all_values):
"""Return a new dict containing only items relevant for renewal.
:param dict all_values: The original values.
:returns: A new dictionary containing items that can be used in renewal.
:rtype dict:
"""
plugins = plugins_disco.PluginsRegistry.find_all()
namespaces = [plugins_common.dest_namespace(plugin) for plugin in plugins]
rv = dict(
(option, value)
for option, value in six.iteritems(all_values)
if _relevant(namespaces, option) and cli.option_was_set(option, value))
# We always save the server value to help with forward compatibility
# and behavioral consistency when versions of Certbot with different
# server defaults are used.
rv["server"] = all_values["server"]
return rv
def lineagename_for_filename(config_filename):
"""Returns the lineagename for a configuration filename.
"""
if not config_filename.endswith(".conf"):
raise errors.CertStorageError(
"renewal config file name must end in .conf")
return os.path.basename(config_filename[:-len(".conf")])
def renewal_filename_for_lineagename(config, lineagename):
"""Returns the lineagename for a configuration filename.
"""
return os.path.join(config.renewal_configs_dir, lineagename) + ".conf"
def _relpath_from_file(archive_dir, from_file):
"""Path to a directory from a file"""
return os.path.relpath(archive_dir, os.path.dirname(from_file))
def full_archive_path(config_obj, cli_config, lineagename):
"""Returns the full archive path for a lineagename
Uses cli_config to determine archive path if not available from config_obj.
:param configobj.ConfigObj config_obj: Renewal conf file contents (can be None)
:param configuration.NamespaceConfig cli_config: Main config file
:param str lineagename: Certificate name
"""
if config_obj and "archive_dir" in config_obj:
return config_obj["archive_dir"]
else:
return os.path.join(cli_config.default_archive_dir, lineagename)
def _full_live_path(cli_config, lineagename):
"""Returns the full default live path for a lineagename"""
return os.path.join(cli_config.live_dir, lineagename)
def delete_files(config, certname):
"""Delete all files related to the certificate.
If some files are not found, ignore them and continue.
"""
renewal_filename = renewal_file_for_certname(config, certname)
# file exists
full_default_archive_dir = full_archive_path(None, config, certname)
full_default_live_dir = _full_live_path(config, certname)
try:
renewal_config = configobj.ConfigObj(renewal_filename)
except configobj.ConfigObjError:
# config is corrupted
logger.warning("Could not parse %s. You may wish to manually "
"delete the contents of %s and %s.", renewal_filename,
full_default_live_dir, full_default_archive_dir)
raise errors.CertStorageError(
"error parsing {0}".format(renewal_filename))
finally:
# we couldn't read it, but let's at least delete it
# if this was going to fail, it already would have.
os.remove(renewal_filename)
logger.debug("Removed %s", renewal_filename)
# cert files and (hopefully) live directory
# it's not guaranteed that the files are in our default storage
# structure. so, first delete the cert files.
directory_names = set()
for kind in ALL_FOUR:
link = renewal_config.get(kind)
try:
os.remove(link)
logger.debug("Removed %s", link)
except OSError:
logger.debug("Unable to delete %s", link)
directory = os.path.dirname(link)
directory_names.add(directory)
# if all four were in the same directory, and the only thing left
# is the README file (or nothing), delete that directory.
# this will be wrong in very few but some cases.
if len(directory_names) == 1:
# delete the README file
directory = directory_names.pop()
readme_path = os.path.join(directory, README)
try:
os.remove(readme_path)
logger.debug("Removed %s", readme_path)
except OSError:
logger.debug("Unable to delete %s", readme_path)
# if it's now empty, delete the directory
try:
os.rmdir(directory) # only removes empty directories
logger.debug("Removed %s", directory)
except OSError:
logger.debug("Unable to remove %s; may not be empty.", directory)
# archive directory
try:
archive_path = full_archive_path(renewal_config, config, certname)
shutil.rmtree(archive_path)
logger.debug("Removed %s", archive_path)
except OSError:
logger.debug("Unable to remove %s", archive_path)
class RenewableCert(object):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Renewable certificate.
Represents a lineage of certificates that is under the management of
Certbot, indicated by the existence of an associated renewal
configuration file.
Note that the notion of "current version" for a lineage is
maintained on disk in the structure of symbolic links, and is not
explicitly stored in any instance variable in this object. The
RenewableCert object is able to determine information about the
current (or other) version by accessing data on disk, but does not
inherently know any of this information except by examining the
symbolic links as needed. The instance variables mentioned below
point to symlinks that reflect the notion of "current version" of
each managed object, and it is these paths that should be used when
configuring servers to use the certificate managed in a lineage.
These paths are normally within the "live" directory, and their
symlink targets -- the actual cert files -- are normally found
within the "archive" directory.
:ivar str cert: The path to the symlink representing the current
version of the certificate managed by this lineage.
:ivar str privkey: The path to the symlink representing the current
version of the private key managed by this lineage.
:ivar str chain: The path to the symlink representing the current version
of the chain managed by this lineage.
:ivar str fullchain: The path to the symlink representing the
current version of the fullchain (combined chain and cert)
managed by this lineage.
:ivar configobj.ConfigObj configuration: The renewal configuration
options associated with this lineage, obtained from parsing the
renewal configuration file and/or systemwide defaults.
"""
def __init__(self, config_filename, cli_config, update_symlinks=False):
"""Instantiate a RenewableCert object from an existing lineage.
:param str config_filename: the path to the renewal config file
that defines this lineage.
:param .NamespaceConfig: parsed command line arguments
:raises .CertStorageError: if the configuration file's name didn't end
in ".conf", or the file is missing or broken.
"""
self.cli_config = cli_config
self.lineagename = lineagename_for_filename(config_filename)
# self.configuration should be used to read parameters that
# may have been chosen based on default values from the
# systemwide renewal configuration; self.configfile should be
# used to make and save changes.
try:
self.configfile = configobj.ConfigObj(config_filename)
except configobj.ConfigObjError:
raise errors.CertStorageError(
"error parsing {0}".format(config_filename))
# TODO: Do we actually use anything from defaults and do we want to
# read further defaults from the systemwide renewal configuration
# file at this stage?
self.configuration = config_with_defaults(self.configfile)
if not all(x in self.configuration for x in ALL_FOUR):
raise errors.CertStorageError(
"renewal config file {0} is missing a required "
"file reference".format(self.configfile))
conf_version = self.configuration.get("version")
if (conf_version is not None and
util.get_strict_version(conf_version) > CURRENT_VERSION):
logger.info(
"Attempting to parse the version %s renewal configuration "
"file found at %s with version %s of Certbot. This might not "
"work.", conf_version, config_filename, certbot.__version__)
self.cert = self.configuration["cert"]
self.privkey = self.configuration["privkey"]
self.chain = self.configuration["chain"]
self.fullchain = self.configuration["fullchain"]
self.live_dir = os.path.dirname(self.cert)
self._fix_symlinks()
if update_symlinks:
self._update_symlinks()
self._check_symlinks()
@property
def key_path(self):
"""Duck type for self.privkey"""
return self.privkey
@property
def cert_path(self):
"""Duck type for self.cert"""
return self.cert
@property
def chain_path(self):
"""Duck type for self.chain"""
return self.chain
@property
def fullchain_path(self):
"""Duck type for self.fullchain"""
return self.fullchain
@property
def target_expiry(self):
"""The current target certificate's expiration datetime
:returns: Expiration datetime of the current target certificate
:rtype: :class:`datetime.datetime`
"""
return crypto_util.notAfter(self.current_target("cert"))
@property
def archive_dir(self):
"""Returns the default or specified archive directory"""
return full_archive_path(self.configuration,
self.cli_config, self.lineagename)
def relative_archive_dir(self, from_file):
"""Returns the default or specified archive directory as a relative path
Used for creating symbolic links.
"""
return _relpath_from_file(self.archive_dir, from_file)
@property
def is_test_cert(self):
"""Returns true if this is a test cert from a staging server."""
server = self.configuration["renewalparams"].get("server", None)
if server:
return util.is_staging(server)
else:
return False
def _check_symlinks(self):
"""Raises an exception if a symlink doesn't exist"""
for kind in ALL_FOUR:
link = getattr(self, kind)
if not os.path.islink(link):
raise errors.CertStorageError(
"expected {0} to be a symlink".format(link))
target = get_link_target(link)
if not os.path.exists(target):
raise errors.CertStorageError("target {0} of symlink {1} does "
"not exist".format(target, link))
def _update_symlinks(self):
"""Updates symlinks to use archive_dir"""
for kind in ALL_FOUR:
link = getattr(self, kind)
previous_link = get_link_target(link)
new_link = os.path.join(self.relative_archive_dir(link),
os.path.basename(previous_link))
os.unlink(link)
os.symlink(new_link, link)
def _consistent(self):
"""Are the files associated with this lineage self-consistent?
:returns: Whether the files stored in connection with this
lineage appear to be correct and consistent with one
another.
:rtype: bool
"""
# Each element must be referenced with an absolute path
for x in (self.cert, self.privkey, self.chain, self.fullchain):
if not os.path.isabs(x):
logger.debug("Element %s is not referenced with an "
"absolute path.", x)
return False
# Each element must exist and be a symbolic link
for x in (self.cert, self.privkey, self.chain, self.fullchain):
if not os.path.islink(x):
logger.debug("Element %s is not a symbolic link.", x)
return False
for kind in ALL_FOUR:
link = getattr(self, kind)
target = get_link_target(link)
# Each element's link must point within the cert lineage's
# directory within the official archive directory
if not os.path.samefile(os.path.dirname(target), self.archive_dir):
logger.debug("Element's link does not point within the "
"cert lineage's directory within the "
"official archive directory. Link: %s, "
"target directory: %s, "
"archive directory: %s. If you've specified "
"the archive directory in the renewal configuration "
"file, you may need to update links by running "
"certbot update_symlinks.",
link, os.path.dirname(target), self.archive_dir)
return False
# The link must point to a file that exists
if not os.path.exists(target):
logger.debug("Link %s points to file %s that does not exist.",
link, target)
return False
# The link must point to a file that follows the archive
# naming convention
pattern = re.compile(r"^{0}([0-9]+)\.pem$".format(kind))
if not pattern.match(os.path.basename(target)):
logger.debug("%s does not follow the archive naming "
"convention.", target)
return False
# It is NOT required that the link's target be a regular
# file (it may itself be a symlink). But we should probably
# do a recursive check that ultimately the target does
# exist?
# XXX: Additional possible consistency checks (e.g.
# cryptographic validation of the chain being a chain,
# the chain matching the cert, and the cert matching
# the subject key)
# XXX: All four of the targets are in the same directory
# (This check is redundant with the check that they
# are all in the desired directory!)
# len(set(os.path.basename(self.current_target(x)
# for x in ALL_FOUR))) == 1
return True
def _fix(self):
"""Attempt to fix defects or inconsistencies in this lineage.
.. todo:: Currently unimplemented.
"""
# TODO: Figure out what kinds of fixes are possible. For
# example, checking if there is a valid version that
# we can update the symlinks to. (Maybe involve
# parsing keys and certs to see if they exist and
# if a key corresponds to the subject key of a cert?)
# TODO: In general, the symlink-reading functions below are not
# cautious enough about the possibility that links or their
# targets may not exist. (This shouldn't happen, but might
# happen as a result of random tampering by a sysadmin, or
# filesystem errors, or crashes.)
def _previous_symlinks(self):
"""Returns the kind and path of all symlinks used in recovery.
:returns: list of (kind, symlink) tuples
:rtype: list
"""
previous_symlinks = []
for kind in ALL_FOUR:
link_dir = os.path.dirname(getattr(self, kind))
link_base = "previous_{0}.pem".format(kind)
previous_symlinks.append((kind, os.path.join(link_dir, link_base)))
return previous_symlinks
def _fix_symlinks(self):
"""Fixes symlinks in the event of an incomplete version update.
If there is no problem with the current symlinks, this function
has no effect.
"""
previous_symlinks = self._previous_symlinks()
if all(os.path.exists(link[1]) for link in previous_symlinks):
for kind, previous_link in previous_symlinks:
current_link = getattr(self, kind)
if os.path.lexists(current_link):
os.unlink(current_link)
os.symlink(os.readlink(previous_link), current_link)
for _, link in previous_symlinks:
if os.path.exists(link):
os.unlink(link)
def current_target(self, kind):
"""Returns full path to which the specified item currently points.
:param str kind: the lineage member item ("cert", "privkey",
"chain", or "fullchain")
:returns: The path to the current version of the specified
member.
:rtype: str or None
"""
if kind not in ALL_FOUR:
raise errors.CertStorageError("unknown kind of item")
link = getattr(self, kind)
if not os.path.exists(link):
logger.debug("Expected symlink %s for %s does not exist.",
link, kind)
return None
return get_link_target(link)
def current_version(self, kind):
"""Returns numerical version of the specified item.
For example, if kind is "chain" and the current chain link
points to a file named "chain7.pem", returns the integer 7.
:param str kind: the lineage member item ("cert", "privkey",
"chain", or "fullchain")
:returns: the current version of the specified member.
:rtype: int
"""
if kind not in ALL_FOUR:
raise errors.CertStorageError("unknown kind of item")
pattern = re.compile(r"^{0}([0-9]+)\.pem$".format(kind))
target = self.current_target(kind)
if target is None or not os.path.exists(target):
logger.debug("Current-version target for %s "
"does not exist at %s.", kind, target)
target = ""
matches = pattern.match(os.path.basename(target))
if matches:
return int(matches.groups()[0])
else:
logger.debug("No matches for target %s.", kind)
return None
def version(self, kind, version):
"""The filename that corresponds to the specified version and kind.
.. warning:: The specified version may not exist in this
lineage. There is no guarantee that the file path returned
by this method actually exists.
:param str kind: the lineage member item ("cert", "privkey",
"chain", or "fullchain")
:param int version: the desired version
:returns: The path to the specified version of the specified member.
:rtype: str
"""
if kind not in ALL_FOUR:
raise errors.CertStorageError("unknown kind of item")
where = os.path.dirname(self.current_target(kind))
return os.path.join(where, "{0}{1}.pem".format(kind, version))
def available_versions(self, kind):
"""Which alternative versions of the specified kind of item exist?
The archive directory where the current version is stored is
consulted to obtain the list of alternatives.
:param str kind: the lineage member item (
``cert``, ``privkey``, ``chain``, or ``fullchain``)
:returns: all of the version numbers that currently exist
:rtype: `list` of `int`
"""
if kind not in ALL_FOUR:
raise errors.CertStorageError("unknown kind of item")
where = os.path.dirname(self.current_target(kind))
files = os.listdir(where)
pattern = re.compile(r"^{0}([0-9]+)\.pem$".format(kind))
matches = [pattern.match(f) for f in files]
return sorted([int(m.groups()[0]) for m in matches if m])
def newest_available_version(self, kind):
"""Newest available version of the specified kind of item?
:param str kind: the lineage member item (``cert``,
``privkey``, ``chain``, or ``fullchain``)
:returns: the newest available version of this member
:rtype: int
"""
return max(self.available_versions(kind))
def latest_common_version(self):
"""Newest version for which all items are available?
:returns: the newest available version for which all members
(``cert, ``privkey``, ``chain``, and ``fullchain``) exist
:rtype: int
"""
# TODO: this can raise CertStorageError if there is no version overlap
# (it should probably return None instead)
# TODO: this can raise a spurious AttributeError if the current
# link for any kind is missing (it should probably return None)
versions = [self.available_versions(x) for x in ALL_FOUR]
return max(n for n in versions[0] if all(n in v for v in versions[1:]))
def next_free_version(self):
"""Smallest version newer than all full or partial versions?
:returns: the smallest version number that is larger than any
version of any item currently stored in this lineage
:rtype: int
"""
# TODO: consider locking/mutual exclusion between updating processes
# This isn't self.latest_common_version() + 1 because we don't want
# collide with a version that might exist for one file type but not
# for the others.
return max(self.newest_available_version(x) for x in ALL_FOUR) + 1
def ensure_deployed(self):
"""Make sure we've deployed the latest version.
:returns: False if a change was needed, True otherwise
:rtype: bool
May need to recover from rare interrupted / crashed states."""
if self.has_pending_deployment():
logger.warning("Found a new cert /archive/ that was not linked to in /live/; "
"fixing...")
self.update_all_links_to(self.latest_common_version())
return False
return True
def has_pending_deployment(self):
"""Is there a later version of all of the managed items?
:returns: ``True`` if there is a complete version of this
lineage with a larger version number than the current
version, and ``False`` otherwise
:rtype: bool
"""
# TODO: consider whether to assume consistency or treat
# inconsistent/consistent versions differently
smallest_current = min(self.current_version(x) for x in ALL_FOUR)
return smallest_current < self.latest_common_version()
def _update_link_to(self, kind, version):
"""Make the specified item point at the specified version.
(Note that this method doesn't verify that the specified version
exists.)
:param str kind: the lineage member item ("cert", "privkey",
"chain", or "fullchain")
:param int version: the desired version
"""
if kind not in ALL_FOUR:
raise errors.CertStorageError("unknown kind of item")
link = getattr(self, kind)
filename = "{0}{1}.pem".format(kind, version)
# Relative rather than absolute target directory
target_directory = os.path.dirname(os.readlink(link))
# TODO: it could be safer to make the link first under a temporary
# filename, then unlink the old link, then rename the new link
# to the old link; this ensures that this process is able to
# create symlinks.
# TODO: we might also want to check consistency of related links
# for the other corresponding items
os.unlink(link)
os.symlink(os.path.join(target_directory, filename), link)
def update_all_links_to(self, version):
"""Change all member objects to point to the specified version.
:param int version: the desired version
"""
with error_handler.ErrorHandler(self._fix_symlinks):
previous_links = self._previous_symlinks()
for kind, link in previous_links:
os.symlink(self.current_target(kind), link)
for kind in ALL_FOUR:
self._update_link_to(kind, version)
for _, link in previous_links:
os.unlink(link)
def names(self, version=None):
"""What are the subject names of this certificate?
(If no version is specified, use the current version.)
:param int version: the desired version number
:returns: the subject names
:rtype: `list` of `str`
:raises .CertStorageError: if could not find cert file.
"""
if version is None:
target = self.current_target("cert")
else:
target = self.version("cert", version)
if target is None:
raise errors.CertStorageError("could not find cert file")
with open(target) as f:
return crypto_util.get_names_from_cert(f.read())
def ocsp_revoked(self, version=None):
# pylint: disable=no-self-use,unused-argument
"""Is the specified cert version revoked according to OCSP?
Also returns True if the cert version is declared as intended
to be revoked according to Let's Encrypt OCSP extensions.
(If no version is specified, uses the current version.)
This method is not yet implemented and currently always returns
False.
:param int version: the desired version number
:returns: whether the certificate is or will be revoked
:rtype: bool
"""
# XXX: This query and its associated network service aren't
# implemented yet, so we currently return False (indicating that the
# certificate is not revoked).
return False
def autorenewal_is_enabled(self):
"""Is automatic renewal enabled for this cert?
If autorenew is not specified, defaults to True.
:returns: True if automatic renewal is enabled
:rtype: bool
"""
return ("autorenew" not in self.configuration["renewalparams"] or
self.configuration["renewalparams"].as_bool("autorenew"))
def should_autorenew(self):
"""Should we now try to autorenew the most recent cert version?
This is a policy question and does not only depend on whether
the cert is expired. (This considers whether autorenewal is
enabled, whether the cert is revoked, and whether the time
interval for autorenewal has been reached.)
Note that this examines the numerically most recent cert version,
not the currently deployed version.
:returns: whether an attempt should now be made to autorenew the
most current cert version in this lineage
:rtype: bool
"""
if self.autorenewal_is_enabled():
# Consider whether to attempt to autorenew this cert now
# Renewals on the basis of revocation
if self.ocsp_revoked(self.latest_common_version()):
logger.debug("Should renew, certificate is revoked.")
return True
# Renews some period before expiry time
default_interval = constants.RENEWER_DEFAULTS["renew_before_expiry"]
interval = self.configuration.get("renew_before_expiry", default_interval)
expiry = crypto_util.notAfter(self.version(
"cert", self.latest_common_version()))
now = pytz.UTC.fromutc(datetime.datetime.utcnow())
if expiry < add_time_interval(now, interval):
logger.debug("Should renew, less than %s before certificate "
"expiry %s.", interval,
expiry.strftime("%Y-%m-%d %H:%M:%S %Z"))
return True
return False
@classmethod
def new_lineage(cls, lineagename, cert, privkey, chain, cli_config):
# pylint: disable=too-many-locals
"""Create a new certificate lineage.
Attempts to create a certificate lineage -- enrolled for
potential future renewal -- with the (suggested) lineage name
lineagename, and the associated cert, privkey, and chain (the
associated fullchain will be created automatically). Optional
configurator and renewalparams record the configuration that was
originally used to obtain this cert, so that it can be reused
later during automated renewal.
Returns a new RenewableCert object referring to the created
lineage. (The actual lineage name, as well as all the relevant
file paths, will be available within this object.)
:param str lineagename: the suggested name for this lineage
(normally the current cert's first subject DNS name)
:param str cert: the initial certificate version in PEM format
:param str privkey: the private key in PEM format
:param str chain: the certificate chain in PEM format
:param .NamespaceConfig cli_config: parsed command line
arguments
:returns: the newly-created RenewalCert object
:rtype: :class:`storage.renewableCert`
"""
# Examine the configuration and find the new lineage's name
for i in (cli_config.renewal_configs_dir, cli_config.default_archive_dir,
cli_config.live_dir):
if not os.path.exists(i):
os.makedirs(i, 0o700)
logger.debug("Creating directory %s.", i)
config_file, config_filename = util.unique_lineage_name(
cli_config.renewal_configs_dir, lineagename)
base_readme_path = os.path.join(cli_config.live_dir, README)
if not os.path.exists(base_readme_path):
_write_live_readme_to(base_readme_path, is_base_dir=True)
# Determine where on disk everything will go
# lineagename will now potentially be modified based on which
# renewal configuration file could actually be created
lineagename = lineagename_for_filename(config_filename)
archive = full_archive_path(None, cli_config, lineagename)
live_dir = _full_live_path(cli_config, lineagename)
if os.path.exists(archive):
config_file.close()
raise errors.CertStorageError(
"archive directory exists for " + lineagename)
if os.path.exists(live_dir):
config_file.close()
raise errors.CertStorageError(
"live directory exists for " + lineagename)
os.mkdir(archive)
os.mkdir(live_dir)
logger.debug("Archive directory %s and live "
"directory %s created.", archive, live_dir)
# Put the data into the appropriate files on disk
target = dict([(kind, os.path.join(live_dir, kind + ".pem"))
for kind in ALL_FOUR])
archive_target = dict([(kind, os.path.join(archive, kind + "1.pem"))
for kind in ALL_FOUR])
for kind in ALL_FOUR:
os.symlink(_relpath_from_file(archive_target[kind], target[kind]), target[kind])
with open(target["cert"], "wb") as f:
logger.debug("Writing certificate to %s.", target["cert"])
f.write(cert)
with util.safe_open(archive_target["privkey"], "wb", chmod=BASE_PRIVKEY_MODE) as f:
logger.debug("Writing private key to %s.", target["privkey"])
f.write(privkey)
# XXX: Let's make sure to get the file permissions right here
with open(target["chain"], "wb") as f:
logger.debug("Writing chain to %s.", target["chain"])
f.write(chain)
with open(target["fullchain"], "wb") as f:
# assumes that OpenSSL.crypto.dump_certificate includes
# ending newline character
logger.debug("Writing full chain to %s.", target["fullchain"])
f.write(cert + chain)
# Write a README file to the live directory
readme_path = os.path.join(live_dir, README)
_write_live_readme_to(readme_path)
# Document what we've done in a new renewal config file
config_file.close()
# Save only the config items that are relevant to renewal
values = relevant_values(vars(cli_config.namespace))
new_config = write_renewal_config(config_filename, config_filename, archive,
target, values)
return cls(new_config.filename, cli_config)
def save_successor(self, prior_version, new_cert,
new_privkey, new_chain, cli_config):
"""Save new cert and chain as a successor of a prior version.
Returns the new version number that was created.
.. note:: this function does NOT update links to deploy this
version
:param int prior_version: the old version to which this version
is regarded as a successor (used to choose a privkey, if the
key has not changed, but otherwise this information is not
permanently recorded anywhere)
:param bytes new_cert: the new certificate, in PEM format
:param bytes new_privkey: the new private key, in PEM format,
or ``None``, if the private key has not changed
:param bytes new_chain: the new chain, in PEM format
:param .NamespaceConfig cli_config: parsed command line
arguments
:returns: the new version number that was created
:rtype: int
"""
# XXX: assumes official archive location rather than examining links
# XXX: consider using os.open for availability of os.O_EXCL
# XXX: ensure file permissions are correct; also create directories
# if needed (ensuring their permissions are correct)
# Figure out what the new version is and hence where to save things
self.cli_config = cli_config
target_version = self.next_free_version()
target = dict(
[(kind,
os.path.join(self.archive_dir, "{0}{1}.pem".format(kind, target_version)))
for kind in ALL_FOUR])
old_privkey = os.path.join(
self.archive_dir, "privkey{0}.pem".format(prior_version))
# Distinguish the cases where the privkey has changed and where it
# has not changed (in the latter case, making an appropriate symlink
# to an earlier privkey version)
if new_privkey is None:
# The behavior below keeps the prior key by creating a new
# symlink to the old key or the target of the old key symlink.
if os.path.islink(old_privkey):
old_privkey = os.readlink(old_privkey)
else:
old_privkey = "privkey{0}.pem".format(prior_version)
logger.debug("Writing symlink to old private key, %s.", old_privkey)
os.symlink(old_privkey, target["privkey"])
else:
with util.safe_open(target["privkey"], "wb", chmod=BASE_PRIVKEY_MODE) as f:
logger.debug("Writing new private key to %s.", target["privkey"])
f.write(new_privkey)
# Preserve gid and (mode & 074) from previous privkey in this lineage.
old_mode = stat.S_IMODE(os.stat(old_privkey).st_mode) & \
(stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | \
stat.S_IROTH)
mode = BASE_PRIVKEY_MODE | old_mode
os.chown(target["privkey"], -1, os.stat(old_privkey).st_gid)
os.chmod(target["privkey"], mode)
# Save everything else
with open(target["cert"], "wb") as f:
logger.debug("Writing certificate to %s.", target["cert"])
f.write(new_cert)
with open(target["chain"], "wb") as f:
logger.debug("Writing chain to %s.", target["chain"])
f.write(new_chain)
with open(target["fullchain"], "wb") as f:
logger.debug("Writing full chain to %s.", target["fullchain"])
f.write(new_cert + new_chain)
symlinks = dict((kind, self.configuration[kind]) for kind in ALL_FOUR)
# Update renewal config file
self.configfile = update_configuration(
self.lineagename, self.archive_dir, symlinks, cli_config)
self.configuration = config_with_defaults(self.configfile)
return target_version
| 40.595951 | 95 | 0.643299 |
38ec501b5f7faa8c9b5ad354bfa085947bf20809 | 43 | py | Python | src/alchemical/__init__.py | mindflayer/alchemical | c269030b123d870f8da767ff699b5e5e2d28f9d6 | [
"MIT"
] | 47 | 2021-05-31T00:18:10.000Z | 2022-03-23T01:34:30.000Z | src/alchemical/__init__.py | mindflayer/alchemical | c269030b123d870f8da767ff699b5e5e2d28f9d6 | [
"MIT"
] | 4 | 2021-05-31T05:07:12.000Z | 2022-03-01T17:27:46.000Z | src/alchemical/__init__.py | mindflayer/alchemical | c269030b123d870f8da767ff699b5e5e2d28f9d6 | [
"MIT"
] | 1 | 2022-02-24T11:35:02.000Z | 2022-02-24T11:35:02.000Z | from .core import Alchemical # noqa: F401
| 21.5 | 42 | 0.744186 |
ea6ebe0d6337c60a377e5268a295a31b1acf4dbb | 16,097 | py | Python | gammapy/estimators/points/tests/test_sed.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | 1 | 2017-11-22T17:07:56.000Z | 2017-11-22T17:07:56.000Z | gammapy/estimators/points/tests/test_sed.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | null | null | null | gammapy/estimators/points/tests/test_sed.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | 1 | 2019-09-04T14:03:33.000Z | 2019-09-04T14:03:33.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.coordinates import SkyCoord
from gammapy.data import Observation
from gammapy.datasets import MapDataset, SpectrumDatasetOnOff
from gammapy.estimators import FluxPoints, FluxPointsEstimator
from gammapy.irf import EDispKernelMap, EffectiveAreaTable2D, load_cta_irfs
from gammapy.makers import MapDatasetMaker
from gammapy.makers.utils import make_map_exposure_true_energy
from gammapy.maps import MapAxis, RegionGeom, RegionNDMap, WcsGeom
from gammapy.modeling import Fit
from gammapy.modeling.models import (
ExpCutoffPowerLawSpectralModel,
FoVBackgroundModel,
GaussianSpatialModel,
PowerLawSpectralModel,
SkyModel,
)
from gammapy.utils.testing import requires_data, requires_dependency
# TODO: use pre-generated data instead
def simulate_spectrum_dataset(model, random_state=0):
energy_edges = np.logspace(-0.5, 1.5, 21) * u.TeV
energy_axis = MapAxis.from_edges(energy_edges, interp="log", name="energy")
energy_axis_true = energy_axis.copy(name="energy_true")
aeff = EffectiveAreaTable2D.from_parametrization(energy_axis_true=energy_axis_true)
bkg_model = SkyModel(
spectral_model=PowerLawSpectralModel(
index=2.5, amplitude="1e-12 cm-2 s-1 TeV-1"
),
name="background",
)
bkg_model.spectral_model.amplitude.frozen = True
bkg_model.spectral_model.index.frozen = True
geom = RegionGeom.create(region="icrs;circle(0, 0, 0.1)", axes=[energy_axis])
acceptance = RegionNDMap.from_geom(geom=geom, data=1)
edisp = EDispKernelMap.from_diagonal_response(
energy_axis=energy_axis,
energy_axis_true=energy_axis_true,
geom=geom,
)
geom_true = RegionGeom.create(
region="icrs;circle(0, 0, 0.1)", axes=[energy_axis_true]
)
exposure = make_map_exposure_true_energy(
pointing=SkyCoord("0d", "0d"), aeff=aeff, livetime=100 * u.h, geom=geom_true
)
mask_safe = RegionNDMap.from_geom(geom=geom, dtype=bool)
mask_safe.data += True
acceptance_off = RegionNDMap.from_geom(geom=geom, data=5)
dataset = SpectrumDatasetOnOff(
name="test_onoff",
exposure=exposure,
acceptance=acceptance,
acceptance_off=acceptance_off,
edisp=edisp,
mask_safe=mask_safe,
)
dataset.models = bkg_model
bkg_npred = dataset.npred_signal()
dataset.models = model
dataset.fake(
random_state=random_state,
npred_background=bkg_npred,
)
return dataset
def create_fpe(model):
model = SkyModel(spectral_model=model, name="source")
dataset = simulate_spectrum_dataset(model)
energy_edges = [0.1, 1, 10, 100] * u.TeV
dataset.models = model
fpe = FluxPointsEstimator(
energy_edges=energy_edges,
norm_n_values=11,
source="source",
selection_optional="all",
fit=Fit(backend="minuit", optimize_opts=dict(tol=0.2, strategy=1)),
)
datasets = [dataset]
return datasets, fpe
def simulate_map_dataset(random_state=0, name=None):
irfs = load_cta_irfs(
"$GAMMAPY_DATA/cta-1dc/caldb/data/cta/1dc/bcf/South_z20_50h/irf_file.fits"
)
skydir = SkyCoord("0 deg", "0 deg", frame="galactic")
energy_edges = np.logspace(-1, 2, 15) * u.TeV
energy_axis = MapAxis.from_edges(edges=energy_edges, name="energy", interp="log")
geom = WcsGeom.create(
skydir=skydir, width=(4, 4), binsz=0.1, axes=[energy_axis], frame="galactic"
)
gauss = GaussianSpatialModel(
lon_0="0 deg", lat_0="0 deg", sigma="0.4 deg", frame="galactic"
)
pwl = PowerLawSpectralModel(amplitude="1e-11 cm-2 s-1 TeV-1")
skymodel = SkyModel(spatial_model=gauss, spectral_model=pwl, name="source")
obs = Observation.create(pointing=skydir, livetime=1 * u.h, irfs=irfs)
empty = MapDataset.create(geom, name=name)
maker = MapDatasetMaker(selection=["exposure", "background", "psf", "edisp"])
dataset = maker.run(empty, obs)
bkg_model = FoVBackgroundModel(dataset_name=dataset.name)
dataset.models = [bkg_model, skymodel]
dataset.fake(random_state=random_state)
return dataset
@pytest.fixture(scope="session")
def fpe_map_pwl():
dataset_1 = simulate_map_dataset(name="test-map-pwl")
dataset_2 = dataset_1.copy(name="test-map-pwl-2")
dataset_2.models = dataset_1.models
dataset_2.mask_safe = RegionNDMap.from_geom(dataset_2.counts.geom, dtype=bool)
energy_edges = [0.1, 1, 10, 100] * u.TeV
datasets = [dataset_1, dataset_2]
fpe = FluxPointsEstimator(
energy_edges=energy_edges,
norm_n_values=3,
source="source",
selection_optional="all",
)
return datasets, fpe
@pytest.fixture(scope="session")
def fpe_map_pwl_reoptimize():
dataset = simulate_map_dataset()
energy_edges = [1, 10] * u.TeV
dataset.models.parameters["lon_0"].frozen = True
dataset.models.parameters["lat_0"].frozen = True
dataset.models.parameters["sigma"].frozen = True
datasets = [dataset]
fpe = FluxPointsEstimator(
energy_edges=energy_edges,
norm_values=[0.8, 1, 1.2],
reoptimize=True,
source="source",
)
return datasets, fpe
@pytest.fixture(scope="session")
def fpe_pwl():
return create_fpe(PowerLawSpectralModel())
@pytest.fixture(scope="session")
def fpe_ecpl():
return create_fpe(ExpCutoffPowerLawSpectralModel(lambda_="1 TeV-1"))
def test_str(fpe_pwl):
datasets, fpe = fpe_pwl
assert "FluxPointsEstimator" in str(fpe)
@requires_dependency("iminuit")
def test_run_pwl(fpe_pwl, tmpdir):
datasets, fpe = fpe_pwl
fp = fpe.run(datasets)
table = fp.to_table()
actual = table["e_min"].data
assert_allclose(actual, [0.316228, 1.0, 10.0], rtol=1e-5)
actual = table["e_max"].data
assert_allclose(actual, [1.0, 10.0, 31.622777], rtol=1e-5)
actual = table["e_ref"].data
assert_allclose(actual, [0.562341, 3.162278, 17.782794], rtol=1e-3)
actual = table["ref_flux"].quantity
desired = [2.162278e-12, 9.000000e-13, 6.837722e-14] * u.Unit("1 / (cm2 s)")
assert_allclose(actual, desired, rtol=1e-3)
actual = table["ref_dnde"].quantity
desired = [3.162278e-12, 1.000000e-13, 3.162278e-15] * u.Unit("1 / (cm2 s TeV)")
assert_allclose(actual, desired, rtol=1e-3)
actual = table["ref_eflux"].quantity
desired = [1.151293e-12, 2.302585e-12, 1.151293e-12] * u.Unit("TeV / (cm2 s)")
assert_allclose(actual, desired, rtol=1e-3)
actual = table["norm"].data
assert_allclose(actual, [1.081434, 0.91077, 0.922176], rtol=1e-3)
actual = table["norm_err"].data
assert_allclose(actual, [0.066374, 0.061025, 0.179729], rtol=1e-2)
actual = table["norm_errn"].data
assert_allclose(actual, [0.065803, 0.060403, 0.171376], rtol=1e-2)
actual = table["norm_errp"].data
assert_allclose(actual, [0.06695, 0.061652, 0.18839], rtol=1e-2)
actual = table["counts"].data.squeeze()
assert_allclose(actual, [1490, 748, 43])
actual = table["norm_ul"].data
assert_allclose(actual, [1.216227, 1.035472, 1.316878], rtol=1e-2)
actual = table["sqrt_ts"].data
assert_allclose(actual, [18.568429, 18.054651, 7.057121], rtol=1e-2)
actual = table["norm_scan"][0][[0, 5, -1]]
assert_allclose(actual, [0.2, 1.0, 5.0])
actual = table["stat_scan"][0][[0, 5, -1]]
assert_allclose(actual, [220.369, 4.301, 1881.626], rtol=1e-2)
actual = table["npred"].data
assert_allclose(actual, [[1492.966], [749.459], [43.105]], rtol=1e-3)
actual = table["npred_excess"].data
assert_allclose(actual, [[660.5625], [421.5402], [34.3258]], rtol=1e-3)
actual = table.meta["UL_CONF"]
assert_allclose(actual, 0.9544997)
npred_excess_err = fp.npred_excess_err.data.squeeze()
assert_allclose(npred_excess_err, [40.541334, 28.244024, 6.690005], rtol=1e-3)
npred_excess_errp = fp.npred_excess_errp.data.squeeze()
assert_allclose(npred_excess_errp, [40.838806, 28.549508, 7.013377], rtol=1e-3)
npred_excess_errn = fp.npred_excess_errn.data.squeeze()
assert_allclose(npred_excess_errn, [40.247313, 27.932033, 6.378465], rtol=1e-3)
npred_excess_ul = fp.npred_excess_ul.data.squeeze()
assert_allclose(npred_excess_ul, [742.87486, 479.169719, 49.019125], rtol=1e-3)
# test GADF I/O
fp.write(tmpdir / "test.fits", format="gadf-sed")
fp_new = FluxPoints.read(tmpdir / "test.fits")
assert fp_new.meta["sed_type_init"] == "likelihood"
@requires_dependency("iminuit")
def test_run_ecpl(fpe_ecpl, tmpdir):
datasets, fpe = fpe_ecpl
fp = fpe.run(datasets)
table = fp.to_table()
actual = table["ref_flux"].quantity
desired = [9.024362e-13, 1.781341e-13, 1.260298e-18] * u.Unit("1 / (cm2 s)")
assert_allclose(actual, desired, rtol=1e-3)
actual = table["ref_dnde"].quantity
desired = [1.351382e-12, 7.527318e-15, 2.523659e-22] * u.Unit("1 / (cm2 s TeV)")
assert_allclose(actual, desired, rtol=1e-3)
actual = table["ref_eflux"].quantity
desired = [4.770557e-13, 2.787695e-13, 1.371963e-17] * u.Unit("TeV / (cm2 s)")
assert_allclose(actual, desired, rtol=1e-3)
actual = table["norm"].data
assert_allclose(actual, [1.001683, 1.061821, 1.237512e03], rtol=1e-3)
actual = table["norm_err"].data
assert_allclose(actual, [1.386091e-01, 2.394241e-01, 3.259756e03], rtol=1e-2)
actual = table["norm_errn"].data
assert_allclose(actual, [1.374962e-01, 2.361246e-01, 2.888978e03], rtol=1e-2)
actual = table["norm_errp"].data
assert_allclose(actual, [1.397358e-01, 2.428481e-01, 3.716550e03], rtol=1e-2)
actual = table["norm_ul"].data
assert_allclose(actual, [1.283433e00, 1.555117e00, 9.698645e03], rtol=1e-2)
actual = table["sqrt_ts"].data
assert_allclose(actual, [7.678454, 4.735691, 0.399243], rtol=1e-2)
# test GADF I/O
fp.write(tmpdir / "test.fits", format="gadf-sed")
fp_new = FluxPoints.read(tmpdir / "test.fits")
assert fp_new.meta["sed_type_init"] == "likelihood"
@requires_dependency("iminuit")
@requires_data()
def test_run_map_pwl(fpe_map_pwl, tmpdir):
datasets, fpe = fpe_map_pwl
fp = fpe.run(datasets)
table = fp.to_table()
actual = table["e_min"].data
assert_allclose(actual, [0.1, 1.178769, 8.48342], rtol=1e-5)
actual = table["e_max"].data
assert_allclose(actual, [1.178769, 8.483429, 100.0], rtol=1e-5)
actual = table["e_ref"].data
assert_allclose(actual, [0.343332, 3.162278, 29.126327], rtol=1e-5)
actual = table["norm"].data
assert_allclose(actual, [0.974726, 0.96342, 0.994251], rtol=1e-2)
actual = table["norm_err"].data
assert_allclose(actual, [0.067637, 0.052022, 0.087059], rtol=3e-2)
actual = table["counts"].data
assert_allclose(actual, [[44611, 0], [1923, 0], [282, 0]])
actual = table["norm_ul"].data
assert_allclose(actual, [1.111852, 1.07004, 1.17829], rtol=1e-2)
actual = table["sqrt_ts"].data
assert_allclose(actual, [16.681221, 28.408676, 21.91912], rtol=1e-2)
actual = table["norm_scan"][0]
assert_allclose(actual, [0.2, 1.0, 5])
actual = table["stat_scan"][0] - table["stat"][0]
assert_allclose(actual, [1.628398e02, 1.452456e-01, 2.008018e03], rtol=1e-2)
# test GADF I/O
fp.write(tmpdir / "test.fits", format="gadf-sed")
fp_new = FluxPoints.read(tmpdir / "test.fits")
assert fp_new.meta["sed_type_init"] == "likelihood"
@requires_dependency("iminuit")
@requires_data()
def test_run_map_pwl_reoptimize(fpe_map_pwl_reoptimize):
datasets, fpe = fpe_map_pwl_reoptimize
fpe = fpe.copy()
fpe.selection_optional = ["scan"]
fp = fpe.run(datasets)
table = fp.to_table()
actual = table["norm"].data
assert_allclose(actual, 0.962368, rtol=1e-2)
actual = table["norm_err"].data
assert_allclose(actual, 0.053878, rtol=1e-2)
actual = table["sqrt_ts"].data
assert_allclose(actual, 25.196585, rtol=1e-2)
actual = table["norm_scan"][0]
assert_allclose(actual, [0.8, 1, 1.2])
actual = table["stat_scan"][0] - table["stat"][0]
assert_allclose(actual, [9.788123, 0.486066, 17.603708], rtol=1e-2)
@requires_dependency("iminuit")
@requires_data()
def test_flux_points_estimator_no_norm_scan(fpe_pwl, tmpdir):
datasets, fpe = fpe_pwl
fpe.selection_optional = None
fp = fpe.run(datasets)
assert_allclose(fpe.fit.optimize_opts["tol"], 0.2)
assert_allclose(fpe.fit.minuit.tol, 0.2)
assert fp.sed_type_init == "likelihood"
assert "stat_scan" not in fp._data
# test GADF I/O
fp.write(tmpdir / "test.fits", format="gadf-sed")
fp_new = FluxPoints.read(tmpdir / "test.fits")
assert fp_new.meta["sed_type_init"] == "likelihood"
def test_no_likelihood_contribution():
dataset = simulate_spectrum_dataset(
SkyModel(spectral_model=PowerLawSpectralModel(), name="source")
)
dataset_2 = dataset.slice_by_idx(slices={"energy": slice(0, 5)})
dataset.mask_safe = RegionNDMap.from_geom(dataset.counts.geom, dtype=bool)
fpe = FluxPointsEstimator(energy_edges=[1.0, 3.0, 10.0] * u.TeV, source="source")
table = fpe.run([dataset, dataset_2]).to_table()
assert np.isnan(table["norm"]).all()
assert np.isnan(table["norm_err"]).all()
assert_allclose(table["counts"], 0)
def test_mask_shape():
axis = MapAxis.from_edges([1.0, 3.0, 10.0], unit="TeV", interp="log", name="energy")
geom_1 = WcsGeom.create(binsz=1, width=3, axes=[axis])
geom_2 = WcsGeom.create(binsz=1, width=5, axes=[axis])
dataset_1 = MapDataset.create(geom_1)
dataset_2 = MapDataset.create(geom_2)
dataset_1.gti = None
dataset_2.gti = None
dataset_1.psf = None
dataset_2.psf = None
dataset_1.edisp = None
dataset_2.edisp = None
model = SkyModel(
spectral_model=PowerLawSpectralModel(),
spatial_model=GaussianSpatialModel(),
name="source",
)
dataset_1.models = model
dataset_2.models = model
fpe = FluxPointsEstimator(energy_edges=[1, 10] * u.TeV, source="source")
fp = fpe.run([dataset_2, dataset_1])
table = fp.to_table()
assert_allclose(table["counts"], 0)
@requires_dependency("iminuit")
def test_run_pwl_parameter_range(fpe_pwl):
pl = PowerLawSpectralModel(amplitude="1e-16 cm-2s-1TeV-1")
datasets, fpe = create_fpe(pl)
fp = fpe.run(datasets)
table_no_bounds = fp.to_table()
pl.amplitude.min = 0
pl.amplitude.max = 1e-12
fp = fpe.run(datasets)
table_with_bounds = fp.to_table()
actual = table_with_bounds["norm"].data
assert_allclose(actual, [0.0, 0.0, 0.0], atol=1e-2)
actual = table_with_bounds["norm_errp"].data
assert_allclose(actual, [212.593368, 298.383045, 449.951747], rtol=1e-2)
actual = table_with_bounds["norm_ul"].data
assert_allclose(actual, [640.067576, 722.571371, 1414.22209], rtol=1e-2)
actual = table_with_bounds["sqrt_ts"].data
assert_allclose(actual, [0.0, 0.0, 0.0], atol=1e-2)
actual = table_no_bounds["norm"].data
assert_allclose(actual, [-511.76675, -155.75408, -853.547117], rtol=1e-3)
actual = table_no_bounds["norm_err"].data
assert_allclose(actual, [504.601499, 416.69248, 851.223077], rtol=1e-2)
actual = table_no_bounds["norm_ul"].data
assert_allclose(actual, [514.957128, 707.888477, 1167.105962], rtol=1e-2)
actual = table_no_bounds["sqrt_ts"].data
assert_allclose(actual, [-1.006081, -0.364848, -0.927819], rtol=1e-2)
@requires_dependency("iminuit")
def test_flux_points_estimator_small_edges():
pl = PowerLawSpectralModel(amplitude="1e-11 cm-2s-1TeV-1")
datasets, fpe = create_fpe(pl)
fpe.energy_edges = datasets[0].counts.geom.axes["energy"].upsample(3).edges[1:4]
fpe.selection_optional = []
fp = fpe.run(datasets)
assert_allclose(fp.ts.data[0, 0, 0], 2156.96959291)
assert np.isnan(fp.ts.data[1, 0, 0])
assert np.isnan(fp.npred.data[1, 0, 0])
| 32.453629 | 88 | 0.682301 |
4f65515ba5645397026de526c9d8bd32869cdb57 | 4,728 | py | Python | vmchecker/kvm_executor.py | cojocar/vmchecker | cf151435100c04a56e6dbfa02f7e28e5c801e889 | [
"MIT"
] | 1 | 2017-11-27T04:01:45.000Z | 2017-11-27T04:01:45.000Z | vmchecker/kvm_executor.py | alexukf/vmchecker | 948df54ec130d38a206f2730a19beb25815f06e6 | [
"MIT"
] | null | null | null | vmchecker/kvm_executor.py | alexukf/vmchecker | 948df54ec130d38a206f2730a19beb25815f06e6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A script that starts a vm, reverts it to a known snapshot, tests a
submission bundle (submission + tests), and closes the vm"""
from __future__ import with_statement
# Use simplejson or Python 2.6 json, prefer simplejson.
try:
import simplejson as json
except ImportError:
import json
import os
import sys
import time
import logging
import signal
import ConfigParser
from threading import Thread
from subprocess import Popen
import serial
from subprocess import Popen, PIPE, STDOUT
from vmchecker.config import VmwareMachineConfig, CourseConfig, VmwareConfig
from vmchecker.generic_executor import VM, Host
_logger = logging.getLogger('vm_executor')
class kvmHost(Host):
def getVM(self, bundle_dir, vmcfg, assignment):
return kvmVM(self, bundle_dir, vmcfg, assignment)
class kvmVM(VM):
hostname = 'kvm2'
def __init__(self, host, bundle_dir, vmcfg, assignment):
VM.__init__(self, host, bundle_dir, vmcfg, assignment)
self.hostname = self.machinecfg.get_vmx_path()
self.path = self.getPath()
print self.path
def executeCommand(self,cmd):
_logger.info("executeCommand: %s" % cmd)
return self.host.executeCommand("ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "+self.username+"@"+self.IP+" "+cmd)
def power_on_kvm(self):
o = self.host.executeCommand("virsh start kvm2")
if "started" in o:
print "Exit"
sys.exit()
def start(self):
power_thd = Thread(target = self.power_on_kvm)
power_thd.start()
power_thd.join()
self.IP = self.getIP()
def stop(self):
self.host.executeCommand("virsh destroy "+self.hostname)
def revert(self, number = None):
self.stop() # just in case it's on
self.host.executeCommand("rm -f "+os.path.join(self.path,"run.qcow2"))
self.host.executeCommand("cp "+os.path.join(self.path,"image.qcow2")+" "+os.path.join(self.path,"run.qcow2"))
def copyTo(self, sourceDir, targetDir, files):
""" Copy files from host(source) to guest(target) """
for f in files:
host_path = os.path.join(sourceDir, f)
guest_path = os.path.join(targetDir, f)
if not os.path.exists(host_path):
_logger.error('host file (to send) "%s" does not exist' % host_path)
return
_logger.info('copy file %s from host to guest at %s' % (host_path, guest_path))
self.host.executeCommand("scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r "+host_path+" "+self.username+"@"+self.IP+":"+guest_path)
def copyFrom(self, sourceDir, targetDir, files):
""" Copy files from guest(source) to host(target) """
for f in files:
host_path = os.path.join(targetDir, f)
guest_path = os.path.join(sourceDir, f)
_logger.info('copy file %s from guest to host at %s' % (guest_path, host_path))
self.host.executeCommand("scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -r "+self.username+"@"+self.IP+":"+guest_path+" "+host_path)
if not os.path.exists(host_path):
_logger.error('host file (received) "%s" does not exist' % host_path)
def run(self, shell, executable_file, timeout):
self.executeCommand("chmod +x "+ executable_file)
_logger.info('executing on the remote: prog=%s args=[%s] timeout=%d' % (shell, executable_file, timeout))
thd = Thread(target = self.executeCommand, args = (executable_file,))
thd.start()
if timeout==None:
thd.join()
else:
thd.join(timeout)
return thd.isAlive()
def getMac(self):
mac = self.host.executeCommand("virsh dumpxml "+self.hostname)
mac = mac[mac.find("<mac address=")+14:]
mac = mac[:mac.find("'/>")]
return mac.strip()
def getPath(self):
path = self.host.executeCommand("virsh dumpxml "+self.hostname)
path = path[path.find("<source file='")+14:]
path = path[:path.find("'/>")]
return os.path.dirname(path)
def getIP(self):
mac = self.getMac()
while True:
arps = self.host.executeCommand("arp -a").split("\n")
time.sleep(1)
for arp in arps:
if mac in arp:
IP = arp[arp.find("(")+1:arp.find(")")]
_logger.info("IP: %s" % IP)
return IP
def getIPfromIfconfig(self,string):
s = string[string.find("inet addr:")+10:]
s = s[0:s.find(" ")]
return s
| 36.651163 | 163 | 0.614848 |
77e71eb21ec9abcc63f8e0cd9d6ba4d5bdae7f12 | 1,387 | py | Python | kTsnn/src/experiments/test_synth.py | dkesada/kTsnn | 5f7c2b9d2ae2cd5cb0e32c68da2cca1ffb87b69d | [
"MIT"
] | 1 | 2020-06-30T17:19:23.000Z | 2020-06-30T17:19:23.000Z | kTsnn/src/experiments/test_synth.py | dkesada/kTsnn | 5f7c2b9d2ae2cd5cb0e32c68da2cca1ffb87b69d | [
"MIT"
] | null | null | null | kTsnn/src/experiments/test_synth.py | dkesada/kTsnn | 5f7c2b9d2ae2cd5cb0e32c68da2cca1ffb87b69d | [
"MIT"
] | null | null | null | from kTsnn.src.utils import *
import tensorflow as tf
import os
import random as rn
import numpy as np
# Synthetic data experiments
DT_FILE = 'dt_synth_unfolded.csv'
INFO_FILE = 'exec_info_synth.txt'
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(42)
rn.seed(4242)
session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
tf.random.set_seed(424242)
if __name__ == '__main__':
dt = load_dt(DT_FILE)
info = load_info(INFO_FILE)
res = []
# Settings
# learning_rate=0.01 In the LSTM file
out_steps = 90
units = 32
input_width = 10
ini = 0
length = 90
max_epochs = 300
patience = 10
model_arch = None
num_features = dt.shape[1]-1
model_arch = tf.keras.Sequential([
tf.keras.layers.Lambda(lambda x: x[:, -1:, :]),
tf.keras.layers.LSTM(units),
tf.keras.layers.Dense(out_steps * num_features,
kernel_initializer=tf.initializers.zeros),
tf.keras.layers.Reshape([out_steps, num_features])])
for cv in info['cv']:
cv_res, _ = main_pipeline_synth(dt, cv, info['idx_cyc'], info['obj_var'], ini, length,
out_steps, units, input_width, max_epochs, patience, model_arch, mode=2)
res.append(cv_res.mean())
print(np.mean(res))
| 22.370968 | 112 | 0.6323 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.