max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
pitchfork/serializers.py
|
lemoswilson/NewsAggregator
| 0
|
6626051
|
<gh_stars>0
from rest_framework import serializers
from .models import Pitchfork_model
class PitchforkSerializer(serializers.ModelSerializer):
class Meta:
model = Pitchfork_model
fields = ("link", "headline", "description", "date", "tags")
|
from rest_framework import serializers
from .models import Pitchfork_model
class PitchforkSerializer(serializers.ModelSerializer):
class Meta:
model = Pitchfork_model
fields = ("link", "headline", "description", "date", "tags")
|
none
| 1
| 2.087067
| 2
|
|
sahara/plugins/hdp/versions/versionhandlerfactory.py
|
esikachev/sahara-backup
| 0
|
6626052
|
<reponame>esikachev/sahara-backup
# Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from sahara.utils import general
class VersionHandlerFactory(object):
versions = None
modules = None
initialized = False
@staticmethod
def get_instance():
if not VersionHandlerFactory.initialized:
src_dir = os.path.join(os.path.dirname(__file__), '')
versions = [name[8:].replace('_', '.')
for name in os.listdir(src_dir)
if os.path.isdir(os.path.join(src_dir, name))
and name.startswith('version_')]
versions.sort(key=general.natural_sort_key)
VersionHandlerFactory.versions = versions
VersionHandlerFactory.modules = {}
for version in VersionHandlerFactory.versions:
module_name = ('sahara.plugins.hdp.versions.version_{0}.'
'versionhandler'.format(
version.replace('.', '_')))
module_class = getattr(
__import__(module_name, fromlist=['sahara']),
'VersionHandler')
module = module_class()
# would prefer to use __init__ or some constructor, but keep
# getting exceptions...
module._set_version(version)
key = version.replace('_', '.')
VersionHandlerFactory.modules[key] = module
VersionHandlerFactory.initialized = True
return VersionHandlerFactory()
def get_versions(self):
return VersionHandlerFactory.versions
def get_version_handler(self, version):
return VersionHandlerFactory.modules[version]
|
# Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from sahara.utils import general
class VersionHandlerFactory(object):
versions = None
modules = None
initialized = False
@staticmethod
def get_instance():
if not VersionHandlerFactory.initialized:
src_dir = os.path.join(os.path.dirname(__file__), '')
versions = [name[8:].replace('_', '.')
for name in os.listdir(src_dir)
if os.path.isdir(os.path.join(src_dir, name))
and name.startswith('version_')]
versions.sort(key=general.natural_sort_key)
VersionHandlerFactory.versions = versions
VersionHandlerFactory.modules = {}
for version in VersionHandlerFactory.versions:
module_name = ('sahara.plugins.hdp.versions.version_{0}.'
'versionhandler'.format(
version.replace('.', '_')))
module_class = getattr(
__import__(module_name, fromlist=['sahara']),
'VersionHandler')
module = module_class()
# would prefer to use __init__ or some constructor, but keep
# getting exceptions...
module._set_version(version)
key = version.replace('_', '.')
VersionHandlerFactory.modules[key] = module
VersionHandlerFactory.initialized = True
return VersionHandlerFactory()
def get_versions(self):
return VersionHandlerFactory.versions
def get_version_handler(self, version):
return VersionHandlerFactory.modules[version]
|
en
| 0.86972
|
# Copyright (c) 2013 Hortonworks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # would prefer to use __init__ or some constructor, but keep # getting exceptions...
| 1.86255
| 2
|
igibson/external/motion/__init__.py
|
mamadbiabon/iGibson
| 360
|
6626053
|
"""
Developed by <NAME> in pybullet-planning repository (https://github.com/caelan/pybullet-planning)
and adapted by iGibson team.
"""
|
"""
Developed by <NAME> in pybullet-planning repository (https://github.com/caelan/pybullet-planning)
and adapted by iGibson team.
"""
|
en
| 0.875557
|
Developed by <NAME> in pybullet-planning repository (https://github.com/caelan/pybullet-planning) and adapted by iGibson team.
| 1.205642
| 1
|
Pyron/Engine/Saving/SaveScene.py
|
Dmunch04/PyronEngine
| 1
|
6626054
|
import os
def SaveScene (_Scene, _Path = ''):
"""
Saves the given window/scene into a folder called 'Build'
Files in the folder:
Scene.eve
Models
- Model1.obj
- Model2.obj
- etc.
"""
if _Path.endswith ('/'):
Path = _Path + 'Build'
elif _Path == '' or _Path == ' ':
Path = _Path + 'Build'
else:
Path = _Path + '/Build'
if not os.path.exists (Path):
os.makedirs (Path)
ModelPath = Path + '/Models'
if not os.path.exists (ModelPath):
os.makedirs (ModelPath)
Models = []
Index = 1
for Model in _Scene.ModelMeshes:
ModelSavePath = ModelPath + f'/Model{str (Index)}.egg'
with open (ModelSavePath, 'w+') as File:
File.write (Model)
Models.append (f"'{ModelSavePath}'")
Index += 1
Models = ', '.join (Models)
"""
Camera = [
f"'{_Scene.Camera.znear}'",
f"'{_Scene.Camera.zfar}'",
f"'{_Scene.Camera.name}'",
f"'{_Scene.Camera.yfov}'",
f"'{_Scene.Camera.aspectRatio}'"
]
Camera = ', '.join (Camera)
Light = [
f"'{_Scene.Light.name}'",
#f'{_Scene.Light.color}',
f"'{_Scene.Light.intensity}'",
f"'{_Scene.Light._shadow_camera}'",
f"'{_Scene.Light._shadow_texture}'"
]
Light = ', '.join (Light)
Data = "[\n 'Title' :: '{0}'\n 'Version' :: '{1}'\n 'Camera' :: ({2})\n 'Lighting' :: ({3})\n 'Models' :: ({4})\n];".format (
_Scene.Title,
_Scene.Version,
Camera,
Light,
Models
)
"""
Data = "[\n 'Title' :: '{0}'\n 'Version' :: '{1}'\n 'Models' :: ({4})\n];".format (
_Scene.Title,
_Scene.Version,
Models
)
Path += '/Scene.eve'
with open (Path, 'w+') as File:
File.write (Data)
|
import os
def SaveScene (_Scene, _Path = ''):
"""
Saves the given window/scene into a folder called 'Build'
Files in the folder:
Scene.eve
Models
- Model1.obj
- Model2.obj
- etc.
"""
if _Path.endswith ('/'):
Path = _Path + 'Build'
elif _Path == '' or _Path == ' ':
Path = _Path + 'Build'
else:
Path = _Path + '/Build'
if not os.path.exists (Path):
os.makedirs (Path)
ModelPath = Path + '/Models'
if not os.path.exists (ModelPath):
os.makedirs (ModelPath)
Models = []
Index = 1
for Model in _Scene.ModelMeshes:
ModelSavePath = ModelPath + f'/Model{str (Index)}.egg'
with open (ModelSavePath, 'w+') as File:
File.write (Model)
Models.append (f"'{ModelSavePath}'")
Index += 1
Models = ', '.join (Models)
"""
Camera = [
f"'{_Scene.Camera.znear}'",
f"'{_Scene.Camera.zfar}'",
f"'{_Scene.Camera.name}'",
f"'{_Scene.Camera.yfov}'",
f"'{_Scene.Camera.aspectRatio}'"
]
Camera = ', '.join (Camera)
Light = [
f"'{_Scene.Light.name}'",
#f'{_Scene.Light.color}',
f"'{_Scene.Light.intensity}'",
f"'{_Scene.Light._shadow_camera}'",
f"'{_Scene.Light._shadow_texture}'"
]
Light = ', '.join (Light)
Data = "[\n 'Title' :: '{0}'\n 'Version' :: '{1}'\n 'Camera' :: ({2})\n 'Lighting' :: ({3})\n 'Models' :: ({4})\n];".format (
_Scene.Title,
_Scene.Version,
Camera,
Light,
Models
)
"""
Data = "[\n 'Title' :: '{0}'\n 'Version' :: '{1}'\n 'Models' :: ({4})\n];".format (
_Scene.Title,
_Scene.Version,
Models
)
Path += '/Scene.eve'
with open (Path, 'w+') as File:
File.write (Data)
|
en
| 0.463232
|
Saves the given window/scene into a folder called 'Build' Files in the folder: Scene.eve Models - Model1.obj - Model2.obj - etc. Camera = [ f"'{_Scene.Camera.znear}'", f"'{_Scene.Camera.zfar}'", f"'{_Scene.Camera.name}'", f"'{_Scene.Camera.yfov}'", f"'{_Scene.Camera.aspectRatio}'" ] Camera = ', '.join (Camera) Light = [ f"'{_Scene.Light.name}'", #f'{_Scene.Light.color}', f"'{_Scene.Light.intensity}'", f"'{_Scene.Light._shadow_camera}'", f"'{_Scene.Light._shadow_texture}'" ] Light = ', '.join (Light) Data = "[\n 'Title' :: '{0}'\n 'Version' :: '{1}'\n 'Camera' :: ({2})\n 'Lighting' :: ({3})\n 'Models' :: ({4})\n];".format ( _Scene.Title, _Scene.Version, Camera, Light, Models )
| 3.265284
| 3
|
official/modeling/tf_utils.py
|
akshit-protonn/models
| 8
|
6626055
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common TF utilities."""
import six
import tensorflow as tf
from tensorflow.python.util import deprecation
from official.modeling import activations
@deprecation.deprecated(
None,
"tf.keras.layers.Layer supports multiple positional args and kwargs as "
"input tensors. pack/unpack inputs to override __call__ is no longer "
"needed.")
def pack_inputs(inputs):
"""Pack a list of `inputs` tensors to a tuple.
Args:
inputs: a list of tensors.
Returns:
a tuple of tensors. if any input is None, replace it with a special constant
tensor.
"""
inputs = tf.nest.flatten(inputs)
outputs = []
for x in inputs:
if x is None:
outputs.append(tf.constant(0, shape=[], dtype=tf.int32))
else:
outputs.append(x)
return tuple(outputs)
@deprecation.deprecated(
None,
"tf.keras.layers.Layer supports multiple positional args and kwargs as "
"input tensors. pack/unpack inputs to override __call__ is no longer "
"needed.")
def unpack_inputs(inputs):
"""unpack a tuple of `inputs` tensors to a tuple.
Args:
inputs: a list of tensors.
Returns:
a tuple of tensors. if any input is a special constant tensor, replace it
with None.
"""
inputs = tf.nest.flatten(inputs)
outputs = []
for x in inputs:
if is_special_none_tensor(x):
outputs.append(None)
else:
outputs.append(x)
x = tuple(outputs)
# To trick the very pointless 'unbalanced-tuple-unpacking' pylint check
# from triggering.
if len(x) == 1:
return x[0]
return tuple(outputs)
def is_special_none_tensor(tensor):
"""Checks if a tensor is a special None Tensor."""
return tensor.shape.ndims == 0 and tensor.dtype == tf.int32
def get_activation(identifier, use_keras_layer=False):
"""Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`.
It checks string first and if it is one of customized activation not in TF,
the corresponding activation will be returned. For non-customized activation
names and callable identifiers, always fallback to tf.keras.activations.get.
Prefers using keras layers when use_keras_layer=True. Now it only supports
'relu', 'linear', 'identity', 'swish'.
Args:
identifier: String name of the activation function or callable.
use_keras_layer: If True, use keras layer if identifier is allow-listed.
Returns:
A Python function corresponding to the activation function or a keras
activation layer when use_keras_layer=True.
"""
if isinstance(identifier, six.string_types):
identifier = str(identifier).lower()
if use_keras_layer:
keras_layer_allowlist = {
"relu": "relu",
"linear": "linear",
"identity": "linear",
"swish": "swish",
"relu6": tf.nn.relu6,
}
if identifier in keras_layer_allowlist:
return tf.keras.layers.Activation(keras_layer_allowlist[identifier])
name_to_fn = {
"gelu": activations.gelu,
"simple_swish": activations.simple_swish,
"hard_swish": activations.hard_swish,
"relu6": activations.relu6,
"hard_sigmoid": activations.hard_sigmoid,
"identity": activations.identity,
}
if identifier in name_to_fn:
return tf.keras.activations.get(name_to_fn[identifier])
return tf.keras.activations.get(identifier)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
raise ValueError(
"For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not "
"equal to the expected tensor rank `%s`" %
(name, actual_rank, str(tensor.shape), str(expected_rank)))
def safe_mean(losses):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total = tf.reduce_sum(losses)
num_elements = tf.cast(tf.size(losses), dtype=losses.dtype)
return tf.math.divide_no_nan(total, num_elements)
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common TF utilities."""
import six
import tensorflow as tf
from tensorflow.python.util import deprecation
from official.modeling import activations
@deprecation.deprecated(
None,
"tf.keras.layers.Layer supports multiple positional args and kwargs as "
"input tensors. pack/unpack inputs to override __call__ is no longer "
"needed.")
def pack_inputs(inputs):
"""Pack a list of `inputs` tensors to a tuple.
Args:
inputs: a list of tensors.
Returns:
a tuple of tensors. if any input is None, replace it with a special constant
tensor.
"""
inputs = tf.nest.flatten(inputs)
outputs = []
for x in inputs:
if x is None:
outputs.append(tf.constant(0, shape=[], dtype=tf.int32))
else:
outputs.append(x)
return tuple(outputs)
@deprecation.deprecated(
None,
"tf.keras.layers.Layer supports multiple positional args and kwargs as "
"input tensors. pack/unpack inputs to override __call__ is no longer "
"needed.")
def unpack_inputs(inputs):
"""unpack a tuple of `inputs` tensors to a tuple.
Args:
inputs: a list of tensors.
Returns:
a tuple of tensors. if any input is a special constant tensor, replace it
with None.
"""
inputs = tf.nest.flatten(inputs)
outputs = []
for x in inputs:
if is_special_none_tensor(x):
outputs.append(None)
else:
outputs.append(x)
x = tuple(outputs)
# To trick the very pointless 'unbalanced-tuple-unpacking' pylint check
# from triggering.
if len(x) == 1:
return x[0]
return tuple(outputs)
def is_special_none_tensor(tensor):
"""Checks if a tensor is a special None Tensor."""
return tensor.shape.ndims == 0 and tensor.dtype == tf.int32
def get_activation(identifier, use_keras_layer=False):
"""Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`.
It checks string first and if it is one of customized activation not in TF,
the corresponding activation will be returned. For non-customized activation
names and callable identifiers, always fallback to tf.keras.activations.get.
Prefers using keras layers when use_keras_layer=True. Now it only supports
'relu', 'linear', 'identity', 'swish'.
Args:
identifier: String name of the activation function or callable.
use_keras_layer: If True, use keras layer if identifier is allow-listed.
Returns:
A Python function corresponding to the activation function or a keras
activation layer when use_keras_layer=True.
"""
if isinstance(identifier, six.string_types):
identifier = str(identifier).lower()
if use_keras_layer:
keras_layer_allowlist = {
"relu": "relu",
"linear": "linear",
"identity": "linear",
"swish": "swish",
"relu6": tf.nn.relu6,
}
if identifier in keras_layer_allowlist:
return tf.keras.layers.Activation(keras_layer_allowlist[identifier])
name_to_fn = {
"gelu": activations.gelu,
"simple_swish": activations.simple_swish,
"hard_swish": activations.hard_swish,
"relu6": activations.relu6,
"hard_sigmoid": activations.hard_sigmoid,
"identity": activations.identity,
}
if identifier in name_to_fn:
return tf.keras.activations.get(name_to_fn[identifier])
return tf.keras.activations.get(identifier)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
raise ValueError(
"For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not "
"equal to the expected tensor rank `%s`" %
(name, actual_rank, str(tensor.shape), str(expected_rank)))
def safe_mean(losses):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total = tf.reduce_sum(losses)
num_elements = tf.cast(tf.size(losses), dtype=losses.dtype)
return tf.math.divide_no_nan(total, num_elements)
|
en
| 0.744561
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Common TF utilities. Pack a list of `inputs` tensors to a tuple. Args: inputs: a list of tensors. Returns: a tuple of tensors. if any input is None, replace it with a special constant tensor. unpack a tuple of `inputs` tensors to a tuple. Args: inputs: a list of tensors. Returns: a tuple of tensors. if any input is a special constant tensor, replace it with None. # To trick the very pointless 'unbalanced-tuple-unpacking' pylint check # from triggering. Checks if a tensor is a special None Tensor. Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`. It checks string first and if it is one of customized activation not in TF, the corresponding activation will be returned. For non-customized activation names and callable identifiers, always fallback to tf.keras.activations.get. Prefers using keras layers when use_keras_layer=True. Now it only supports 'relu', 'linear', 'identity', 'swish'. Args: identifier: String name of the activation function or callable. use_keras_layer: If True, use keras layer if identifier is allow-listed. Returns: A Python function corresponding to the activation function or a keras activation layer when use_keras_layer=True. Returns a list of the shape of tensor, preferring static dimensions. Args: tensor: A tf.Tensor object to find the shape of. expected_rank: (optional) int. The expected rank of `tensor`. If this is specified and the `tensor` has a different rank, and exception will be thrown. name: Optional name of the tensor for the error message. Returns: A list of dimensions of the shape of tensor. All static dimensions will be returned as python integers, and dynamic dimensions will be returned as tf.Tensor scalars. Raises an exception if the tensor rank is not of the expected rank. Args: tensor: A tf.Tensor to check the rank of. expected_rank: Python integer or list of integers, expected rank. name: Optional name of the tensor for the error message. Raises: ValueError: If the expected shape doesn't match the actual shape. Computes a safe mean of the losses. Args: losses: `Tensor` whose elements contain individual loss measurements. Returns: A scalar representing the mean of `losses`. If `num_present` is zero, then zero is returned.
| 2.451432
| 2
|
userbot/modules/qoroskop.py
|
im-bb/CyberUserBot
| 2
|
6626056
|
<reponame>im-bb/CyberUserBot<gh_stars>1-10
# Copyright (C) 2021 CyberUserBot.
#
# All rights reserved.
import pyaztro
from userbot.events import register
from userbot.cmdhelp import CmdHelp
CYBER = ""
@register(outgoing=True, disable_errors=True, pattern=r"^\.qoroskop (.*)")
async def cyber(e):
await e.edit("Məlumatlar hazırlanır..\nBu biraz vaxt apara bilər.")
if not e.pattern_match.group(1):
x = CYBER
if not x:
await e.edit("Bağışlayın, heçnə tapa bilmədim.")
return
else:
x = e.pattern_match.group(1)
horoscope = pyaztro.Aztro(sign=x)
mood = horoscope.mood
lt = horoscope.lucky_time
desc = horoscope.description
col = horoscope.color
com = horoscope.compatibility
ln = horoscope.lucky_number
result = (
f"**`{x}`** üçün məlumat:\n"
f"**Mood :** `{mood}`\n"
f"**Şanslı vaxt :** `{lt}`\n"
f"**Şanslı rəng :** `{col}`\n"
f"**Şanslı rəqəm :** `{ln}`\n"
f"**Uyğunluq :** `{com}`\n"
f"**Haqqında :** `{desc}`\n"
)
await e.edit(result)
Help = CmdHelp('qoroskop')
Help.add_command('qoroskop', None, 'Yazdığınız bürc haqqında məlumat verər.')
Help.add_info('Qeyd: Bürc adları İngilis dilində yazılmalıdır. Bürclərin siyahısı: https://t.me/TheCyberSupport/11400')
Help.add()
|
# Copyright (C) 2021 CyberUserBot.
#
# All rights reserved.
import pyaztro
from userbot.events import register
from userbot.cmdhelp import CmdHelp
CYBER = ""
@register(outgoing=True, disable_errors=True, pattern=r"^\.qoroskop (.*)")
async def cyber(e):
await e.edit("Məlumatlar hazırlanır..\nBu biraz vaxt apara bilər.")
if not e.pattern_match.group(1):
x = CYBER
if not x:
await e.edit("Bağışlayın, heçnə tapa bilmədim.")
return
else:
x = e.pattern_match.group(1)
horoscope = pyaztro.Aztro(sign=x)
mood = horoscope.mood
lt = horoscope.lucky_time
desc = horoscope.description
col = horoscope.color
com = horoscope.compatibility
ln = horoscope.lucky_number
result = (
f"**`{x}`** üçün məlumat:\n"
f"**Mood :** `{mood}`\n"
f"**Şanslı vaxt :** `{lt}`\n"
f"**Şanslı rəng :** `{col}`\n"
f"**Şanslı rəqəm :** `{ln}`\n"
f"**Uyğunluq :** `{com}`\n"
f"**Haqqında :** `{desc}`\n"
)
await e.edit(result)
Help = CmdHelp('qoroskop')
Help.add_command('qoroskop', None, 'Yazdığınız bürc haqqında məlumat verər.')
Help.add_info('Qeyd: Bürc adları İngilis dilində yazılmalıdır. Bürclərin siyahısı: https://t.me/TheCyberSupport/11400')
Help.add()
|
en
| 0.74155
|
# Copyright (C) 2021 CyberUserBot. # # All rights reserved.
| 2.238956
| 2
|
Lab 2/coding_class_1/coding_class_1.py
|
BrendonPurchase2002-CS/Projects-
| 0
|
6626057
|
<filename>Lab 2/coding_class_1/coding_class_1.py
# author: <NAME>
# Student Number: 119473576
def all_pairs(s1, s2):
"""
:param s1: = list/string
:param s2: = list/string
:return: = a list with all pairs of elements from list s1 and s2 respectively, with the second element varying more
rapidly than the first
"""
combo = []
i = 0
j = 0
while i < len(s1):
while j < len(s2):
combo.append(str(s1[i]) + str(s2[j]))
j += 1
i += 1
return False, combo
print(all_pairs([1, 2], "abc"))
|
<filename>Lab 2/coding_class_1/coding_class_1.py
# author: <NAME>
# Student Number: 119473576
def all_pairs(s1, s2):
"""
:param s1: = list/string
:param s2: = list/string
:return: = a list with all pairs of elements from list s1 and s2 respectively, with the second element varying more
rapidly than the first
"""
combo = []
i = 0
j = 0
while i < len(s1):
while j < len(s2):
combo.append(str(s1[i]) + str(s2[j]))
j += 1
i += 1
return False, combo
print(all_pairs([1, 2], "abc"))
|
en
| 0.804111
|
# author: <NAME> # Student Number: 119473576 :param s1: = list/string :param s2: = list/string :return: = a list with all pairs of elements from list s1 and s2 respectively, with the second element varying more rapidly than the first
| 3.670386
| 4
|
Mundo 2/ex036.py
|
Alef-Martins/Exercicios_python_mundo-2
| 0
|
6626058
|
#Escreva um programa para aprovar o empréstimo bancário para a compra de uma casa. Pergunte o valor da casa, o salário do comprador e em quantos anos ele vai pagar. A prestação mensal não pode exeder 30% do salário do comprador ou o empréstimo será negado.
valor = float(input('Informe o valor da casa: '))
salario = float(input('Informe o salário: '))
anos = int(input('Em quantos anos deseja pagar? '))
prestaçao = valor / anos / 12
if prestaçao <= salario * 30 / 100:
print(f'Empréstimo aprovado! \nO valor das parcelas será de: {prestaçao:.2f}')
else:
print('Empréstimo negado! \nO valor das parcela é maior que 30% do salário informado')
print(f'{prestaçao:.2f}')
|
#Escreva um programa para aprovar o empréstimo bancário para a compra de uma casa. Pergunte o valor da casa, o salário do comprador e em quantos anos ele vai pagar. A prestação mensal não pode exeder 30% do salário do comprador ou o empréstimo será negado.
valor = float(input('Informe o valor da casa: '))
salario = float(input('Informe o salário: '))
anos = int(input('Em quantos anos deseja pagar? '))
prestaçao = valor / anos / 12
if prestaçao <= salario * 30 / 100:
print(f'Empréstimo aprovado! \nO valor das parcelas será de: {prestaçao:.2f}')
else:
print('Empréstimo negado! \nO valor das parcela é maior que 30% do salário informado')
print(f'{prestaçao:.2f}')
|
pt
| 0.9954
|
#Escreva um programa para aprovar o empréstimo bancário para a compra de uma casa. Pergunte o valor da casa, o salário do comprador e em quantos anos ele vai pagar. A prestação mensal não pode exeder 30% do salário do comprador ou o empréstimo será negado.
| 4.151836
| 4
|
code/processing/growth_rates/2021-07-25_r1_SingleKO_glucose/processing.py
|
cremerlab/useless_expression
| 0
|
6626059
|
#%%
import numpy as np
import pandas as pd
import futileprot.io
import futileprot.viz
import altair as alt
import altair_saver
colors, palette = futileprot.viz.altair_style()
# Define experiment parameters
DATE = '2021-07-25'
STRAINS = 'SingleKO'
MEDIUM = 'glucose'
RUN_NO = 1
ROOT = '../../../..'
SKIPROWS = 28
OD_BOUNDS = [0.03, 0.35]
# Add the well identifiers
MAP = {'GC030': ['C3', 'D3', 'E3'],
'GC029': ['C4', 'D4', 'E4'],
'GC050': ['C5', 'D5', 'E5'],
'GC048': ['C6', 'D6', 'E6'],
'GC053': ['C7', 'D7', 'E7'],
'GC032': ['C8', 'D8', 'E8'],
'GC049': ['C9', 'D9', 'E9'],
'GC052': ['C10', 'D10' ,'E10'],
'GC055': ['F3', 'F4', 'F5'],
'GC047': ['F6', 'F7', 'F8'],
'GC001': ['F9', 'F10', 'F11']}
# Generate a list of all valid wells
wells = [f'{letter}{number}' for letter in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] for number in np.arange(1,13)]
# Load the data
data = pd.read_csv(f'{ROOT}/data/growth_rates/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}/{DATE}_r{RUN_NO}.csv',
skiprows=SKIPROWS)
# Melt and drop unnecessary stuff
melted = data.melt(id_vars=['Time'], var_name='well', value_name='od_600nm')
melted = melted.loc[melted['well'].isin(wells)]
melted.dropna(inplace=True)
# Add strain identifier and replicates
melted['strain'] = 'blank'
melted['replicate'] = 0
for strain, wells in MAP.items():
for idx, well in enumerate(wells):
melted.loc[melted['well']==well, 'strain'] = strain
melted.loc[melted['well']==well, 'replicate'] = idx + 1
# Add information regarding date and growth medium
melted['growth_medium'] = MEDIUM
melted['date'] = DATE
melted['run_number'] = RUN_NO
# Convert time to elapsed time
melted['time_sec'] = pd.to_timedelta(melted['Time'].values)
melted['time_sec'] = melted['time_sec'].dt.total_seconds()
melted['elapsed_time_hr'] = (melted['time_sec'] - melted['time_sec'].min())/3600
# Drop unnecessary Time columns
melted.drop(columns=['Time', 'time_sec'], inplace=True)
# Reformat blank value as average eentry per time
measurement = []
for g, d in melted.groupby(['elapsed_time_hr']):
d = d.copy()
avg_blank = d[d['strain']=='blank']
meas = d[d['strain']!='blank']
meas['avg_blank_value'] = avg_blank['od_600nm'].mean()
measurement.append(meas)
measurement = pd.concat(measurement, sort=False)
measurement.rename(columns={'strain':'identifier'}, inplace=True)
# Add shorthand strain information and class identifier
strain_shorthand, _, strain_class = futileprot.io.standardize_strains(measurement['identifier'].values)
measurement['strain'] = strain_shorthand
measurement['class'] = strain_class
# Save to disk
measurement.to_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_measurements.csv', index=False)
#%%
# Perform the blank subtraction
measurement['od_600nm_subtracted'] = measurement['od_600nm'].values - measurement['avg_blank_value'].values
# Given truncation, recalculated elapsed time and save truncated data
trunc = []
for g, d in measurement.groupby(['strain', 'replicate']):
d = d.copy()
d = d[(d['od_600nm_subtracted'] >= OD_BOUNDS[0]) &
(d['od_600nm_subtracted'] <= OD_BOUNDS[1])]
d['elapsed_time_hr'] -= d['elapsed_time_hr'].min()
trunc.append(d)
trunc = pd.concat(trunc, sort=False)
trunc = trunc[['strain', 'elapsed_time_hr',
'od_600nm_subtracted', 'replicate', 'growth_medium',
'date', 'run_number', 'identifier', 'class']]
trunc.rename(columns={'od_600nm_subtracted':'od_600nm',
'replicate':'technical_replicate'}, inplace=True)
trunc.to_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv', index=False)
# %%
# Generate a figure of all of the raw traces
raw_traces = alt.Chart(
data=measurement,
width=400,
height=200
).mark_line(
point=True,
opacity=0.75
).encode(
x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),
y=alt.Y('od_600nm:Q', title='optical density [a.u.]'),
color=alt.Color('replicate:N', title='technical replicate')
).facet(
row='strain'
)
altair_saver.save(raw_traces, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_raw_traces.png',
scale_factor=2)
# %%
|
#%%
import numpy as np
import pandas as pd
import futileprot.io
import futileprot.viz
import altair as alt
import altair_saver
colors, palette = futileprot.viz.altair_style()
# Define experiment parameters
DATE = '2021-07-25'
STRAINS = 'SingleKO'
MEDIUM = 'glucose'
RUN_NO = 1
ROOT = '../../../..'
SKIPROWS = 28
OD_BOUNDS = [0.03, 0.35]
# Add the well identifiers
MAP = {'GC030': ['C3', 'D3', 'E3'],
'GC029': ['C4', 'D4', 'E4'],
'GC050': ['C5', 'D5', 'E5'],
'GC048': ['C6', 'D6', 'E6'],
'GC053': ['C7', 'D7', 'E7'],
'GC032': ['C8', 'D8', 'E8'],
'GC049': ['C9', 'D9', 'E9'],
'GC052': ['C10', 'D10' ,'E10'],
'GC055': ['F3', 'F4', 'F5'],
'GC047': ['F6', 'F7', 'F8'],
'GC001': ['F9', 'F10', 'F11']}
# Generate a list of all valid wells
wells = [f'{letter}{number}' for letter in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] for number in np.arange(1,13)]
# Load the data
data = pd.read_csv(f'{ROOT}/data/growth_rates/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}/{DATE}_r{RUN_NO}.csv',
skiprows=SKIPROWS)
# Melt and drop unnecessary stuff
melted = data.melt(id_vars=['Time'], var_name='well', value_name='od_600nm')
melted = melted.loc[melted['well'].isin(wells)]
melted.dropna(inplace=True)
# Add strain identifier and replicates
melted['strain'] = 'blank'
melted['replicate'] = 0
for strain, wells in MAP.items():
for idx, well in enumerate(wells):
melted.loc[melted['well']==well, 'strain'] = strain
melted.loc[melted['well']==well, 'replicate'] = idx + 1
# Add information regarding date and growth medium
melted['growth_medium'] = MEDIUM
melted['date'] = DATE
melted['run_number'] = RUN_NO
# Convert time to elapsed time
melted['time_sec'] = pd.to_timedelta(melted['Time'].values)
melted['time_sec'] = melted['time_sec'].dt.total_seconds()
melted['elapsed_time_hr'] = (melted['time_sec'] - melted['time_sec'].min())/3600
# Drop unnecessary Time columns
melted.drop(columns=['Time', 'time_sec'], inplace=True)
# Reformat blank value as average eentry per time
measurement = []
for g, d in melted.groupby(['elapsed_time_hr']):
d = d.copy()
avg_blank = d[d['strain']=='blank']
meas = d[d['strain']!='blank']
meas['avg_blank_value'] = avg_blank['od_600nm'].mean()
measurement.append(meas)
measurement = pd.concat(measurement, sort=False)
measurement.rename(columns={'strain':'identifier'}, inplace=True)
# Add shorthand strain information and class identifier
strain_shorthand, _, strain_class = futileprot.io.standardize_strains(measurement['identifier'].values)
measurement['strain'] = strain_shorthand
measurement['class'] = strain_class
# Save to disk
measurement.to_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_measurements.csv', index=False)
#%%
# Perform the blank subtraction
measurement['od_600nm_subtracted'] = measurement['od_600nm'].values - measurement['avg_blank_value'].values
# Given truncation, recalculated elapsed time and save truncated data
trunc = []
for g, d in measurement.groupby(['strain', 'replicate']):
d = d.copy()
d = d[(d['od_600nm_subtracted'] >= OD_BOUNDS[0]) &
(d['od_600nm_subtracted'] <= OD_BOUNDS[1])]
d['elapsed_time_hr'] -= d['elapsed_time_hr'].min()
trunc.append(d)
trunc = pd.concat(trunc, sort=False)
trunc = trunc[['strain', 'elapsed_time_hr',
'od_600nm_subtracted', 'replicate', 'growth_medium',
'date', 'run_number', 'identifier', 'class']]
trunc.rename(columns={'od_600nm_subtracted':'od_600nm',
'replicate':'technical_replicate'}, inplace=True)
trunc.to_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_exponential_phase.csv', index=False)
# %%
# Generate a figure of all of the raw traces
raw_traces = alt.Chart(
data=measurement,
width=400,
height=200
).mark_line(
point=True,
opacity=0.75
).encode(
x=alt.X('elapsed_time_hr:Q', title='elapsed time [hr]'),
y=alt.Y('od_600nm:Q', title='optical density [a.u.]'),
color=alt.Color('replicate:N', title='technical replicate')
).facet(
row='strain'
)
altair_saver.save(raw_traces, f'output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_raw_traces.png',
scale_factor=2)
# %%
|
en
| 0.576668
|
#%% # Define experiment parameters # Add the well identifiers # Generate a list of all valid wells # Load the data # Melt and drop unnecessary stuff # Add strain identifier and replicates # Add information regarding date and growth medium # Convert time to elapsed time # Drop unnecessary Time columns # Reformat blank value as average eentry per time # Add shorthand strain information and class identifier # Save to disk #%% # Perform the blank subtraction # Given truncation, recalculated elapsed time and save truncated data # %% # Generate a figure of all of the raw traces # %%
| 2.261472
| 2
|
ample/util/logging_util.py
|
fsimkovic/ample
| 6
|
6626060
|
from enum import Enum
import json
import logging.config
import os
import sys
from ample.constants import AMPLE_LOGGER_CONFIG
class LogColors(Enum):
"""Color container for log messages"""
CRITICAL = 31
DEBUG = 34
DEFAULT = 0
ERROR = 31
WARNING = 33
class LogColorFormatter(logging.Formatter):
"""Formatter for log messages"""
def format(self, record):
if record.levelname in LogColors.__members__:
prefix = '\033[1;{}m'.format(LogColors[record.levelname].value)
postfix = '\033[{}m'.format(LogColors["DEFAULT"].value)
record.msg = os.linesep.join([prefix + msg + postfix for msg in str(record.msg).splitlines()])
return logging.Formatter.format(self, record)
def setup_logging(argso):
"""Read JSON config for logger and return root logger
Also sets the path to the AMPLE logfile in the dictionary (required for pyrvapi)"""
if not os.path.isfile(AMPLE_LOGGER_CONFIG):
raise RuntimeError("Cannot find AMPLE_LOGGER_CONFIG file: {}".format(AMPLE_LOGGER_CONFIG))
with open(AMPLE_LOGGER_CONFIG, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
try:
argso['ample_log'] = os.path.abspath(config['handlers']['file_handler']['filename'])
except KeyError:
argso['ample_log'] = None
return logging.getLogger()
def setup_console_logging(level=logging.INFO, formatstr='%(message)s\n'):
"""
Set up logging to the console - required for the individual modules.
Parameters
----------
level : int
Sets the threshold for the console output to level.
formatstr : str
The string used to format the log messages
Returns
-------
logger : :obj:logging.logger
The root logger
"""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Seems they changed the api in python 2.6->2.7
try:
cl = logging.StreamHandler(stream=sys.stdout)
except TypeError:
cl = logging.StreamHandler(stream=sys.stdout)
cl.setLevel(level)
formatter = logging.Formatter(formatstr)
cl.setFormatter(formatter)
logger.addHandler(cl)
return logger
def setup_file_logging(logfile, level=logging.DEBUG, formatstr='%(asctime)s - %(name)s - %(levelname)s - %(message)s'):
"""
Set up logging to a file - required for the individual modules.
Parameters
----------
logfile : str
The path to the logfile that output will be written to.
level : int
Sets the threshold for the console output to level.
formatstr : str
The string used to format the log messages
Returns
-------
logger : :obj:logging.logger
The root logger
"""
logger = logging.getLogger()
fl = logging.FileHandler(logfile)
fl.setLevel(level)
formatter = logging.Formatter(formatstr)
fl.setFormatter(formatter)
logger.addHandler(fl)
return logger
|
from enum import Enum
import json
import logging.config
import os
import sys
from ample.constants import AMPLE_LOGGER_CONFIG
class LogColors(Enum):
"""Color container for log messages"""
CRITICAL = 31
DEBUG = 34
DEFAULT = 0
ERROR = 31
WARNING = 33
class LogColorFormatter(logging.Formatter):
"""Formatter for log messages"""
def format(self, record):
if record.levelname in LogColors.__members__:
prefix = '\033[1;{}m'.format(LogColors[record.levelname].value)
postfix = '\033[{}m'.format(LogColors["DEFAULT"].value)
record.msg = os.linesep.join([prefix + msg + postfix for msg in str(record.msg).splitlines()])
return logging.Formatter.format(self, record)
def setup_logging(argso):
"""Read JSON config for logger and return root logger
Also sets the path to the AMPLE logfile in the dictionary (required for pyrvapi)"""
if not os.path.isfile(AMPLE_LOGGER_CONFIG):
raise RuntimeError("Cannot find AMPLE_LOGGER_CONFIG file: {}".format(AMPLE_LOGGER_CONFIG))
with open(AMPLE_LOGGER_CONFIG, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
try:
argso['ample_log'] = os.path.abspath(config['handlers']['file_handler']['filename'])
except KeyError:
argso['ample_log'] = None
return logging.getLogger()
def setup_console_logging(level=logging.INFO, formatstr='%(message)s\n'):
"""
Set up logging to the console - required for the individual modules.
Parameters
----------
level : int
Sets the threshold for the console output to level.
formatstr : str
The string used to format the log messages
Returns
-------
logger : :obj:logging.logger
The root logger
"""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Seems they changed the api in python 2.6->2.7
try:
cl = logging.StreamHandler(stream=sys.stdout)
except TypeError:
cl = logging.StreamHandler(stream=sys.stdout)
cl.setLevel(level)
formatter = logging.Formatter(formatstr)
cl.setFormatter(formatter)
logger.addHandler(cl)
return logger
def setup_file_logging(logfile, level=logging.DEBUG, formatstr='%(asctime)s - %(name)s - %(levelname)s - %(message)s'):
"""
Set up logging to a file - required for the individual modules.
Parameters
----------
logfile : str
The path to the logfile that output will be written to.
level : int
Sets the threshold for the console output to level.
formatstr : str
The string used to format the log messages
Returns
-------
logger : :obj:logging.logger
The root logger
"""
logger = logging.getLogger()
fl = logging.FileHandler(logfile)
fl.setLevel(level)
formatter = logging.Formatter(formatstr)
fl.setFormatter(formatter)
logger.addHandler(fl)
return logger
|
en
| 0.553172
|
Color container for log messages Formatter for log messages Read JSON config for logger and return root logger Also sets the path to the AMPLE logfile in the dictionary (required for pyrvapi) Set up logging to the console - required for the individual modules. Parameters ---------- level : int Sets the threshold for the console output to level. formatstr : str The string used to format the log messages Returns ------- logger : :obj:logging.logger The root logger # Seems they changed the api in python 2.6->2.7 Set up logging to a file - required for the individual modules. Parameters ---------- logfile : str The path to the logfile that output will be written to. level : int Sets the threshold for the console output to level. formatstr : str The string used to format the log messages Returns ------- logger : :obj:logging.logger The root logger
| 2.596921
| 3
|
setup.py
|
johncollins/metric-learn
| 4
|
6626061
|
<gh_stars>1-10
# setup.py
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Create the setup dict
setup_args = {
'name' : 'metric-learn',
'version' : '0.0.0',
'author' : '<NAME>',
'author_email' : '<EMAIL>',
'packages' : ['metric_learn', 'metric_learn.test'],
'scripts' :[],
'url' : 'github.com/johncollins/metric-learn',
'license' :'LICENSE.txt',
'description' : 'Learn mahalanobis style metrics parameterized by some learned matrix A',
'long_description' : open('README.md', 'r').read(),
'requires' : ['numpy']
}
setup(**setup_args)
|
# setup.py
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Create the setup dict
setup_args = {
'name' : 'metric-learn',
'version' : '0.0.0',
'author' : '<NAME>',
'author_email' : '<EMAIL>',
'packages' : ['metric_learn', 'metric_learn.test'],
'scripts' :[],
'url' : 'github.com/johncollins/metric-learn',
'license' :'LICENSE.txt',
'description' : 'Learn mahalanobis style metrics parameterized by some learned matrix A',
'long_description' : open('README.md', 'r').read(),
'requires' : ['numpy']
}
setup(**setup_args)
|
en
| 0.658495
|
# setup.py # Create the setup dict
| 1.404083
| 1
|
torch_points_kernels/gridding.py
|
maskjp/torch-points-kernels
| 0
|
6626062
|
import torch
from torch.cuda.amp import custom_bwd,custom_fwd
if torch.cuda.is_available():
import torch_points_kernels.points_cuda as tpcuda
class GriddingFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.half)
def forward(ctx, ptcloud, scale):
if not torch.cuda.is_available():
raise NotImplementedError("CPU version is not available for Chamfer Distance")
grid, grid_pt_weights, grid_pt_indexes = tpcuda.gridding(
-scale, scale - 1, -scale, scale - 1, -scale, scale - 1, ptcloud
)
# print(grid.size()) # torch.Size(batch_size, n_grid_vertices)
# print(grid_pt_weights.size()) # torch.Size(batch_size, n_pts, 8, 3)
# print(grid_pt_indexes.size()) # torch.Size(batch_size, n_pts, 8)
ctx.save_for_backward(grid_pt_weights, grid_pt_indexes)
return grid
@staticmethod
@custom_bwd
def backward(ctx, grad_grid):
grid_pt_weights, grid_pt_indexes = ctx.saved_tensors
grad_ptcloud = tpcuda.gridding_grad(grid_pt_weights, grid_pt_indexes, grad_grid)
# print(grad_ptcloud.size()) # torch.Size(batch_size, n_pts, 3)
return grad_ptcloud, None
def gridding(ptcloud, scale):
r"""
Converts the input point clouds into 3D grids by trilinear interpolcation.
Please refer to https://arxiv.org/pdf/2006.03761 for more information
Parameters
----------
ptcloud : torch.Tensor (dtype=torch.float32)
(B, n_pts, 3) B point clouds containing n_pts points
scale : Int
the resolution of the 3D grid
Returns
-------
grid: torch.Tensor
(B, scale, scale, scale): the grid of the resolution of scale * scale * scale
"""
if len(ptcloud.shape) != 3 or ptcloud.size(2) != 3:
raise ValueError("The input point cloud should be of size (B, n_pts, 3)")
ptcloud = ptcloud * scale
_ptcloud = torch.split(ptcloud, 1, dim=0)
grids = []
for p in _ptcloud:
non_zeros = torch.sum(p, dim=2).ne(0)
p = p[non_zeros].unsqueeze(dim=0)
grids.append(GriddingFunction.apply(p, scale))
return torch.cat(grids, dim=0).contiguous()
|
import torch
from torch.cuda.amp import custom_bwd,custom_fwd
if torch.cuda.is_available():
import torch_points_kernels.points_cuda as tpcuda
class GriddingFunction(torch.autograd.Function):
@staticmethod
@custom_fwd(cast_inputs=torch.half)
def forward(ctx, ptcloud, scale):
if not torch.cuda.is_available():
raise NotImplementedError("CPU version is not available for Chamfer Distance")
grid, grid_pt_weights, grid_pt_indexes = tpcuda.gridding(
-scale, scale - 1, -scale, scale - 1, -scale, scale - 1, ptcloud
)
# print(grid.size()) # torch.Size(batch_size, n_grid_vertices)
# print(grid_pt_weights.size()) # torch.Size(batch_size, n_pts, 8, 3)
# print(grid_pt_indexes.size()) # torch.Size(batch_size, n_pts, 8)
ctx.save_for_backward(grid_pt_weights, grid_pt_indexes)
return grid
@staticmethod
@custom_bwd
def backward(ctx, grad_grid):
grid_pt_weights, grid_pt_indexes = ctx.saved_tensors
grad_ptcloud = tpcuda.gridding_grad(grid_pt_weights, grid_pt_indexes, grad_grid)
# print(grad_ptcloud.size()) # torch.Size(batch_size, n_pts, 3)
return grad_ptcloud, None
def gridding(ptcloud, scale):
r"""
Converts the input point clouds into 3D grids by trilinear interpolcation.
Please refer to https://arxiv.org/pdf/2006.03761 for more information
Parameters
----------
ptcloud : torch.Tensor (dtype=torch.float32)
(B, n_pts, 3) B point clouds containing n_pts points
scale : Int
the resolution of the 3D grid
Returns
-------
grid: torch.Tensor
(B, scale, scale, scale): the grid of the resolution of scale * scale * scale
"""
if len(ptcloud.shape) != 3 or ptcloud.size(2) != 3:
raise ValueError("The input point cloud should be of size (B, n_pts, 3)")
ptcloud = ptcloud * scale
_ptcloud = torch.split(ptcloud, 1, dim=0)
grids = []
for p in _ptcloud:
non_zeros = torch.sum(p, dim=2).ne(0)
p = p[non_zeros].unsqueeze(dim=0)
grids.append(GriddingFunction.apply(p, scale))
return torch.cat(grids, dim=0).contiguous()
|
en
| 0.70657
|
# print(grid.size()) # torch.Size(batch_size, n_grid_vertices) # print(grid_pt_weights.size()) # torch.Size(batch_size, n_pts, 8, 3) # print(grid_pt_indexes.size()) # torch.Size(batch_size, n_pts, 8) # print(grad_ptcloud.size()) # torch.Size(batch_size, n_pts, 3) Converts the input point clouds into 3D grids by trilinear interpolcation. Please refer to https://arxiv.org/pdf/2006.03761 for more information Parameters ---------- ptcloud : torch.Tensor (dtype=torch.float32) (B, n_pts, 3) B point clouds containing n_pts points scale : Int the resolution of the 3D grid Returns ------- grid: torch.Tensor (B, scale, scale, scale): the grid of the resolution of scale * scale * scale
| 2.613104
| 3
|
sdk/python/pulumi_azure_native/appconfiguration/v20200601/get_configuration_store.py
|
sebtelko/pulumi-azure-native
| 0
|
6626063
|
<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetConfigurationStoreResult',
'AwaitableGetConfigurationStoreResult',
'get_configuration_store',
]
@pulumi.output_type
class GetConfigurationStoreResult:
"""
The configuration store along with all resource properties. The Configuration Store will have all information to begin utilizing it.
"""
def __init__(__self__, creation_date=None, encryption=None, endpoint=None, id=None, identity=None, location=None, name=None, private_endpoint_connections=None, provisioning_state=None, public_network_access=None, sku=None, tags=None, type=None):
if creation_date and not isinstance(creation_date, str):
raise TypeError("Expected argument 'creation_date' to be a str")
pulumi.set(__self__, "creation_date", creation_date)
if encryption and not isinstance(encryption, dict):
raise TypeError("Expected argument 'encryption' to be a dict")
pulumi.set(__self__, "encryption", encryption)
if endpoint and not isinstance(endpoint, str):
raise TypeError("Expected argument 'endpoint' to be a str")
pulumi.set(__self__, "endpoint", endpoint)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint_connections and not isinstance(private_endpoint_connections, list):
raise TypeError("Expected argument 'private_endpoint_connections' to be a list")
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_network_access and not isinstance(public_network_access, str):
raise TypeError("Expected argument 'public_network_access' to be a str")
pulumi.set(__self__, "public_network_access", public_network_access)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> str:
"""
The creation date of configuration store.
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter
def encryption(self) -> Optional['outputs.EncryptionPropertiesResponse']:
"""
The encryption settings of the configuration store.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def endpoint(self) -> str:
"""
The DNS endpoint where the configuration store API will be available.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ResourceIdentityResponse']:
"""
The managed identity information, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
The location of the resource. This cannot be changed after the resource is created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Sequence['outputs.PrivateEndpointConnectionReferenceResponse']:
"""
The list of private endpoint connections that are set up for this resource.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the configuration store.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[str]:
"""
Control permission for data plane traffic coming from public networks while private endpoint is enabled.
"""
return pulumi.get(self, "public_network_access")
@property
@pulumi.getter
def sku(self) -> 'outputs.SkuResponse':
"""
The sku of the configuration store.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetConfigurationStoreResult(GetConfigurationStoreResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConfigurationStoreResult(
creation_date=self.creation_date,
encryption=self.encryption,
endpoint=self.endpoint,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
private_endpoint_connections=self.private_endpoint_connections,
provisioning_state=self.provisioning_state,
public_network_access=self.public_network_access,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_configuration_store(config_store_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConfigurationStoreResult:
"""
The configuration store along with all resource properties. The Configuration Store will have all information to begin utilizing it.
:param str config_store_name: The name of the configuration store.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
"""
__args__ = dict()
__args__['configStoreName'] = config_store_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:appconfiguration/v20200601:getConfigurationStore', __args__, opts=opts, typ=GetConfigurationStoreResult).value
return AwaitableGetConfigurationStoreResult(
creation_date=__ret__.creation_date,
encryption=__ret__.encryption,
endpoint=__ret__.endpoint,
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
private_endpoint_connections=__ret__.private_endpoint_connections,
provisioning_state=__ret__.provisioning_state,
public_network_access=__ret__.public_network_access,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetConfigurationStoreResult',
'AwaitableGetConfigurationStoreResult',
'get_configuration_store',
]
@pulumi.output_type
class GetConfigurationStoreResult:
"""
The configuration store along with all resource properties. The Configuration Store will have all information to begin utilizing it.
"""
def __init__(__self__, creation_date=None, encryption=None, endpoint=None, id=None, identity=None, location=None, name=None, private_endpoint_connections=None, provisioning_state=None, public_network_access=None, sku=None, tags=None, type=None):
if creation_date and not isinstance(creation_date, str):
raise TypeError("Expected argument 'creation_date' to be a str")
pulumi.set(__self__, "creation_date", creation_date)
if encryption and not isinstance(encryption, dict):
raise TypeError("Expected argument 'encryption' to be a dict")
pulumi.set(__self__, "encryption", encryption)
if endpoint and not isinstance(endpoint, str):
raise TypeError("Expected argument 'endpoint' to be a str")
pulumi.set(__self__, "endpoint", endpoint)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint_connections and not isinstance(private_endpoint_connections, list):
raise TypeError("Expected argument 'private_endpoint_connections' to be a list")
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_network_access and not isinstance(public_network_access, str):
raise TypeError("Expected argument 'public_network_access' to be a str")
pulumi.set(__self__, "public_network_access", public_network_access)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> str:
"""
The creation date of configuration store.
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter
def encryption(self) -> Optional['outputs.EncryptionPropertiesResponse']:
"""
The encryption settings of the configuration store.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def endpoint(self) -> str:
"""
The DNS endpoint where the configuration store API will be available.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ResourceIdentityResponse']:
"""
The managed identity information, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
The location of the resource. This cannot be changed after the resource is created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Sequence['outputs.PrivateEndpointConnectionReferenceResponse']:
"""
The list of private endpoint connections that are set up for this resource.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the configuration store.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[str]:
"""
Control permission for data plane traffic coming from public networks while private endpoint is enabled.
"""
return pulumi.get(self, "public_network_access")
@property
@pulumi.getter
def sku(self) -> 'outputs.SkuResponse':
"""
The sku of the configuration store.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetConfigurationStoreResult(GetConfigurationStoreResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConfigurationStoreResult(
creation_date=self.creation_date,
encryption=self.encryption,
endpoint=self.endpoint,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
private_endpoint_connections=self.private_endpoint_connections,
provisioning_state=self.provisioning_state,
public_network_access=self.public_network_access,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_configuration_store(config_store_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConfigurationStoreResult:
"""
The configuration store along with all resource properties. The Configuration Store will have all information to begin utilizing it.
:param str config_store_name: The name of the configuration store.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
"""
__args__ = dict()
__args__['configStoreName'] = config_store_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:appconfiguration/v20200601:getConfigurationStore', __args__, opts=opts, typ=GetConfigurationStoreResult).value
return AwaitableGetConfigurationStoreResult(
creation_date=__ret__.creation_date,
encryption=__ret__.encryption,
endpoint=__ret__.endpoint,
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
private_endpoint_connections=__ret__.private_endpoint_connections,
provisioning_state=__ret__.provisioning_state,
public_network_access=__ret__.public_network_access,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
|
en
| 0.853487
|
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The configuration store along with all resource properties. The Configuration Store will have all information to begin utilizing it. The creation date of configuration store. The encryption settings of the configuration store. The DNS endpoint where the configuration store API will be available. The resource ID. The managed identity information, if configured. The location of the resource. This cannot be changed after the resource is created. The name of the resource. The list of private endpoint connections that are set up for this resource. The provisioning state of the configuration store. Control permission for data plane traffic coming from public networks while private endpoint is enabled. The sku of the configuration store. The tags of the resource. The type of the resource. # pylint: disable=using-constant-test The configuration store along with all resource properties. The Configuration Store will have all information to begin utilizing it. :param str config_store_name: The name of the configuration store. :param str resource_group_name: The name of the resource group to which the container registry belongs.
| 1.731057
| 2
|
lexicon/tests/providers/test_transip.py
|
nextgens/lexicon
| 0
|
6626064
|
<filename>lexicon/tests/providers/test_transip.py
"""Integration tests for Transip"""
import re
from unittest import TestCase
import pytest
from lexicon.tests.providers.integration_tests import (
IntegrationTestsV2,
vcr_integration_test,
)
FAKE_KEY = """
-----<KEY>
"""
# Currently TransipProviderTests class is configured to use a fake key so that the CI system does
# not need an actual key when integration tests are run with the recorded cassettes.
# If you want to run again manually the integration tests against the live API, and so use a valid
# a real key, please modify the following elements in this file:
# - comment out the setUp function,
# - comment out the tearDown function,
# - remove auth_api_key entry from the dict returned by _test_parameters_overrides function.
class TransipProviderTests(TestCase, IntegrationTestsV2):
"""TestCase for Transip"""
provider_name = "transip"
domain = "nuvius.nl"
@vcr_integration_test
def test_provider_when_calling_create_record_for_CNAME_with_valid_name_and_content(
self,
):
provider = self._construct_authenticated_provider()
# TransIP CNAME records values must be a FQDN with trailing dot for external domains.
assert provider.create_record("CNAME", "docs", "docs.example.com.")
@pytest.fixture(autouse=True)
def _generate_fake_key(self, tmp_path):
self._fake_key = tmp_path / "key.pem"
self._fake_key.write_text(FAKE_KEY)
def _filter_headers(self):
return ["Signature", "Authorization"]
def _filter_post_data_parameters(self):
return ["login"]
def _filter_response(self, response):
response["body"]["string"] = re.sub(
rb'"token":"[\w.-]+"',
b'"token":"TOKEN"',
response["body"]["string"],
)
response["body"]["string"] = re.sub(
rb'"authCode":"[\w.-]+"',
b'"authCode":"AUTH_CODE"',
response["body"]["string"],
)
return response
def _test_parameters_overrides(self):
return {"auth_api_key": str(self._fake_key), "auth_key_is_global": True}
|
<filename>lexicon/tests/providers/test_transip.py
"""Integration tests for Transip"""
import re
from unittest import TestCase
import pytest
from lexicon.tests.providers.integration_tests import (
IntegrationTestsV2,
vcr_integration_test,
)
FAKE_KEY = """
-----<KEY>
"""
# Currently TransipProviderTests class is configured to use a fake key so that the CI system does
# not need an actual key when integration tests are run with the recorded cassettes.
# If you want to run again manually the integration tests against the live API, and so use a valid
# a real key, please modify the following elements in this file:
# - comment out the setUp function,
# - comment out the tearDown function,
# - remove auth_api_key entry from the dict returned by _test_parameters_overrides function.
class TransipProviderTests(TestCase, IntegrationTestsV2):
"""TestCase for Transip"""
provider_name = "transip"
domain = "nuvius.nl"
@vcr_integration_test
def test_provider_when_calling_create_record_for_CNAME_with_valid_name_and_content(
self,
):
provider = self._construct_authenticated_provider()
# TransIP CNAME records values must be a FQDN with trailing dot for external domains.
assert provider.create_record("CNAME", "docs", "docs.example.com.")
@pytest.fixture(autouse=True)
def _generate_fake_key(self, tmp_path):
self._fake_key = tmp_path / "key.pem"
self._fake_key.write_text(FAKE_KEY)
def _filter_headers(self):
return ["Signature", "Authorization"]
def _filter_post_data_parameters(self):
return ["login"]
def _filter_response(self, response):
response["body"]["string"] = re.sub(
rb'"token":"[\w.-]+"',
b'"token":"TOKEN"',
response["body"]["string"],
)
response["body"]["string"] = re.sub(
rb'"authCode":"[\w.-]+"',
b'"authCode":"AUTH_CODE"',
response["body"]["string"],
)
return response
def _test_parameters_overrides(self):
return {"auth_api_key": str(self._fake_key), "auth_key_is_global": True}
|
en
| 0.793803
|
Integration tests for Transip -----<KEY> # Currently TransipProviderTests class is configured to use a fake key so that the CI system does # not need an actual key when integration tests are run with the recorded cassettes. # If you want to run again manually the integration tests against the live API, and so use a valid # a real key, please modify the following elements in this file: # - comment out the setUp function, # - comment out the tearDown function, # - remove auth_api_key entry from the dict returned by _test_parameters_overrides function. TestCase for Transip # TransIP CNAME records values must be a FQDN with trailing dot for external domains.
| 2.218884
| 2
|
app_widget.py
|
project-k-0-1/project-k
| 1
|
6626065
|
<gh_stars>1-10
""" App widget functionalities """
from app_head import get_head
from app_body import get_body
from app_page import set_page
from app_loading import get_loading_head, get_loading_body
from app_ogp import set_ogp
from app_title import get_title
from app_metatags import get_metatags
from bootstrap import get_bootstrap
from font_awesome import get_font_awesome
from app_navbar import navbar
from googleanalytics import get_googleanalytics
from app_stylesheet import get_stylesheet
from app_cookie import get_sa_theme
from sa_func import redirect_if_not_logged_in
#-------------------------------------------------------------------------------
# Insert here module of the widget to load
#-------------------------------------------------------------------------------
from tradingview_chart import get_tradingview_chart
from tradingview_ecocal import get_tradingview_ecocal
from tradingview_fxcross import get_tradingview_fxcross
from tradingview_fxheatmap import get_tradingview_fxheatmap
from tradingview_screener import get_tradingview_screener
from tradingview_watchlist import get_tradingview_watchlist
from trades_tab import get_trades_box
from news_feed import get_newsfeed
#-------------------------------------------------------------------------------
def get_widget_content(burl, nonavbar, funcname, noflexheight):
""" xxx """
box_content = ''
box_class = 'box'
box_vh = 'height:100vh;width:100vw;margin-left:-15px;'+\
'overflow-x:hidden;overflow-y:hidden;'
if nonavbar is None:
box_class = 'box-top'
box_vh = 'height:89vh;'
if noflexheight is not None:
box_vh = ''
refresh_once = ''+\
'<script>'+\
'window.onresize = function(){ location.reload(); };'+\
'if(!window.location.hash) {'+\
' window.location = window.location + "#";'+\
' window.location.reload();'+\
'}'+\
'</script>'
box_content = refresh_once +\
'<div class="'+ box_class +'"></div>' +\
' <div style="'+ box_vh +'">'+\
eval(funcname)+\
' </div>'
return box_content
def get_widget_page(appname,
burl,
nonavbar,
funcname,
refresh_in_second,
noflexheight,
terminal):
""" xxx """
return_data = ''
navbarcontent = ''
metarefresh = ''
if nonavbar is None:
navbarcontent = navbar(burl, 0, terminal)
if refresh_in_second is not None:
metarefresh = '<meta http-equiv="refresh" content="'+ str(refresh_in_second) +'">'
return_data = get_head(get_loading_head() +\
get_googleanalytics() +\
get_title(appname) +\
metarefresh +\
get_metatags(burl) +\
redirect_if_not_logged_in(burl, '') +\
set_ogp(burl, 1, '', '') +\
get_bootstrap(get_sa_theme(), burl) +\
get_font_awesome() +\
get_stylesheet(burl))
return_data = return_data + get_body(get_loading_body(), navbarcontent +\
get_widget_content(burl,
nonavbar,
funcname,
noflexheight),'')
return_data = set_page(return_data)
return return_data
|
""" App widget functionalities """
from app_head import get_head
from app_body import get_body
from app_page import set_page
from app_loading import get_loading_head, get_loading_body
from app_ogp import set_ogp
from app_title import get_title
from app_metatags import get_metatags
from bootstrap import get_bootstrap
from font_awesome import get_font_awesome
from app_navbar import navbar
from googleanalytics import get_googleanalytics
from app_stylesheet import get_stylesheet
from app_cookie import get_sa_theme
from sa_func import redirect_if_not_logged_in
#-------------------------------------------------------------------------------
# Insert here module of the widget to load
#-------------------------------------------------------------------------------
from tradingview_chart import get_tradingview_chart
from tradingview_ecocal import get_tradingview_ecocal
from tradingview_fxcross import get_tradingview_fxcross
from tradingview_fxheatmap import get_tradingview_fxheatmap
from tradingview_screener import get_tradingview_screener
from tradingview_watchlist import get_tradingview_watchlist
from trades_tab import get_trades_box
from news_feed import get_newsfeed
#-------------------------------------------------------------------------------
def get_widget_content(burl, nonavbar, funcname, noflexheight):
""" xxx """
box_content = ''
box_class = 'box'
box_vh = 'height:100vh;width:100vw;margin-left:-15px;'+\
'overflow-x:hidden;overflow-y:hidden;'
if nonavbar is None:
box_class = 'box-top'
box_vh = 'height:89vh;'
if noflexheight is not None:
box_vh = ''
refresh_once = ''+\
'<script>'+\
'window.onresize = function(){ location.reload(); };'+\
'if(!window.location.hash) {'+\
' window.location = window.location + "#";'+\
' window.location.reload();'+\
'}'+\
'</script>'
box_content = refresh_once +\
'<div class="'+ box_class +'"></div>' +\
' <div style="'+ box_vh +'">'+\
eval(funcname)+\
' </div>'
return box_content
def get_widget_page(appname,
burl,
nonavbar,
funcname,
refresh_in_second,
noflexheight,
terminal):
""" xxx """
return_data = ''
navbarcontent = ''
metarefresh = ''
if nonavbar is None:
navbarcontent = navbar(burl, 0, terminal)
if refresh_in_second is not None:
metarefresh = '<meta http-equiv="refresh" content="'+ str(refresh_in_second) +'">'
return_data = get_head(get_loading_head() +\
get_googleanalytics() +\
get_title(appname) +\
metarefresh +\
get_metatags(burl) +\
redirect_if_not_logged_in(burl, '') +\
set_ogp(burl, 1, '', '') +\
get_bootstrap(get_sa_theme(), burl) +\
get_font_awesome() +\
get_stylesheet(burl))
return_data = return_data + get_body(get_loading_body(), navbarcontent +\
get_widget_content(burl,
nonavbar,
funcname,
noflexheight),'')
return_data = set_page(return_data)
return return_data
|
en
| 0.102377
|
App widget functionalities #------------------------------------------------------------------------------- # Insert here module of the widget to load #------------------------------------------------------------------------------- #------------------------------------------------------------------------------- xxx xxx
| 1.368214
| 1
|
run_predictions.py
|
MingshuLiang/caltech-ee148-spring2020-hw01
| 0
|
6626066
|
import os
import numpy as np
import json
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
def detect_red_light(I, kernel):
'''
This function takes a numpy array <I> and a numpy array <kernel> and returns a list <bounding_boxes>.
The list <bounding_boxes> should have one element for each red light in the
image. Each element of <bounding_boxes> should itself be a list, containing
four integers that specify a bounding box: the row and column index of the
top left corner and the row and column index of the bottom right corner (in
that order). See the code below for an example.
Note that PIL loads images in RGB order, so:
I[:,:,0] is the red channel
I[:,:,1] is the green channel
I[:,:,2] is the blue channel
'''
bounding_boxes = [] # This should be a list of lists, each of length 4. See format example below.
'''
BEGIN YOUR CODE
'''
(n_rows_k,n_cols_k,n_channels_k) = np.shape(kernel) # "k" for "kernel"
(n_rows,n_cols,n_channels) = np.shape(I)
conv_map = np.zeros((n_rows-n_rows_k+1,n_cols-n_cols_k+1,n_channels))
kernel_n = np.zeros((n_rows_k,n_cols_k,n_channels_k))
threshold = 0.92
# normalize kernel
# "ch" for channel
for ch in range(n_channels_k):
norm_k = np.linalg.norm(kernel[:,:,ch])
kernel_n[:,:,ch] = kernel[:,:,ch]/norm_k
for i in range(n_rows-n_rows_k+1):
for j in range(n_cols-n_cols_k+1):
for ch in range(n_channels):
# normalize cropped image
norm_I = np.linalg.norm(I[i:i+n_rows_k,j:j+n_cols_k,ch])
I_cropped_n = I[i:i+n_rows_k,j:j+n_cols_k,ch]/norm_I
conv_map[i][j][ch] = np.sum(kernel_n[:,:,ch]*I_cropped_n)
# Weighted combinatioin of RGB channels
conv_map_rgb = 0.8*conv_map[:,:,0]+0.1*conv_map[:,:,1]+0.1*conv_map[:,:,2]
# Apply threshold
conv_map_rgb_t = np.where(conv_map_rgb > threshold, conv_map_rgb ,0)
# Find local maximums
while np.any(conv_map_rgb_t != 0):
idx = np.where(conv_map_rgb_t == np.amax(conv_map_rgb_t))
tl_row = int(idx[0])
tl_col = int(idx[1])
#print(tl_row,tl_col)
br_row = tl_row + n_rows_k
br_col = tl_col + n_cols_k
bounding_boxes.append([tl_col,tl_row,br_col,br_row])
top = np.max([tl_row-n_rows_k,0])
bottom = np.min([tl_row+n_rows_k,n_rows-n_rows_k+1])
left = np.max([tl_col-n_cols_k,0])
right = np.min([tl_col+n_cols_k,n_cols-n_cols_k+1])
conv_map_rgb_t[top:bottom,left:right] = 0
'''
END YOUR CODE
'''
for i in range(len(bounding_boxes)):
assert len(bounding_boxes[i]) == 4
return bounding_boxes
# set the path to the downloaded data:
data_path = '../data/RedLights2011_Medium'
# set a path for saving predictions:
preds_path = '../data/hw01_preds'
os.makedirs(preds_path,exist_ok=True) # create directory if needed
# get sorted list of files:
file_names = sorted(os.listdir(data_path))
# remove any non-JPEG files:
file_names = [f for f in file_names if '.jpg' in f]
preds = {}
for i in range(len(file_names)):
# read image using PIL:
I = Image.open(os.path.join(data_path,file_names[i]))
# convert to numpy array:
I = np.asarray(I)
preds[file_names[i]] = detect_red_light(I)
# save preds (overwrites any previous predictions!)
with open(os.path.join(preds_path,'preds.json'),'w') as f:
json.dump(preds,f)
|
import os
import numpy as np
import json
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
def detect_red_light(I, kernel):
'''
This function takes a numpy array <I> and a numpy array <kernel> and returns a list <bounding_boxes>.
The list <bounding_boxes> should have one element for each red light in the
image. Each element of <bounding_boxes> should itself be a list, containing
four integers that specify a bounding box: the row and column index of the
top left corner and the row and column index of the bottom right corner (in
that order). See the code below for an example.
Note that PIL loads images in RGB order, so:
I[:,:,0] is the red channel
I[:,:,1] is the green channel
I[:,:,2] is the blue channel
'''
bounding_boxes = [] # This should be a list of lists, each of length 4. See format example below.
'''
BEGIN YOUR CODE
'''
(n_rows_k,n_cols_k,n_channels_k) = np.shape(kernel) # "k" for "kernel"
(n_rows,n_cols,n_channels) = np.shape(I)
conv_map = np.zeros((n_rows-n_rows_k+1,n_cols-n_cols_k+1,n_channels))
kernel_n = np.zeros((n_rows_k,n_cols_k,n_channels_k))
threshold = 0.92
# normalize kernel
# "ch" for channel
for ch in range(n_channels_k):
norm_k = np.linalg.norm(kernel[:,:,ch])
kernel_n[:,:,ch] = kernel[:,:,ch]/norm_k
for i in range(n_rows-n_rows_k+1):
for j in range(n_cols-n_cols_k+1):
for ch in range(n_channels):
# normalize cropped image
norm_I = np.linalg.norm(I[i:i+n_rows_k,j:j+n_cols_k,ch])
I_cropped_n = I[i:i+n_rows_k,j:j+n_cols_k,ch]/norm_I
conv_map[i][j][ch] = np.sum(kernel_n[:,:,ch]*I_cropped_n)
# Weighted combinatioin of RGB channels
conv_map_rgb = 0.8*conv_map[:,:,0]+0.1*conv_map[:,:,1]+0.1*conv_map[:,:,2]
# Apply threshold
conv_map_rgb_t = np.where(conv_map_rgb > threshold, conv_map_rgb ,0)
# Find local maximums
while np.any(conv_map_rgb_t != 0):
idx = np.where(conv_map_rgb_t == np.amax(conv_map_rgb_t))
tl_row = int(idx[0])
tl_col = int(idx[1])
#print(tl_row,tl_col)
br_row = tl_row + n_rows_k
br_col = tl_col + n_cols_k
bounding_boxes.append([tl_col,tl_row,br_col,br_row])
top = np.max([tl_row-n_rows_k,0])
bottom = np.min([tl_row+n_rows_k,n_rows-n_rows_k+1])
left = np.max([tl_col-n_cols_k,0])
right = np.min([tl_col+n_cols_k,n_cols-n_cols_k+1])
conv_map_rgb_t[top:bottom,left:right] = 0
'''
END YOUR CODE
'''
for i in range(len(bounding_boxes)):
assert len(bounding_boxes[i]) == 4
return bounding_boxes
# set the path to the downloaded data:
data_path = '../data/RedLights2011_Medium'
# set a path for saving predictions:
preds_path = '../data/hw01_preds'
os.makedirs(preds_path,exist_ok=True) # create directory if needed
# get sorted list of files:
file_names = sorted(os.listdir(data_path))
# remove any non-JPEG files:
file_names = [f for f in file_names if '.jpg' in f]
preds = {}
for i in range(len(file_names)):
# read image using PIL:
I = Image.open(os.path.join(data_path,file_names[i]))
# convert to numpy array:
I = np.asarray(I)
preds[file_names[i]] = detect_red_light(I)
# save preds (overwrites any previous predictions!)
with open(os.path.join(preds_path,'preds.json'),'w') as f:
json.dump(preds,f)
|
en
| 0.676291
|
This function takes a numpy array <I> and a numpy array <kernel> and returns a list <bounding_boxes>. The list <bounding_boxes> should have one element for each red light in the image. Each element of <bounding_boxes> should itself be a list, containing four integers that specify a bounding box: the row and column index of the top left corner and the row and column index of the bottom right corner (in that order). See the code below for an example. Note that PIL loads images in RGB order, so: I[:,:,0] is the red channel I[:,:,1] is the green channel I[:,:,2] is the blue channel # This should be a list of lists, each of length 4. See format example below. BEGIN YOUR CODE # "k" for "kernel" # normalize kernel # "ch" for channel # normalize cropped image # Weighted combinatioin of RGB channels # Apply threshold # Find local maximums #print(tl_row,tl_col) END YOUR CODE # set the path to the downloaded data: # set a path for saving predictions: # create directory if needed # get sorted list of files: # remove any non-JPEG files: # read image using PIL: # convert to numpy array: # save preds (overwrites any previous predictions!)
| 3.314515
| 3
|
Wrapping/Generators/Python/itk/support/itkBase.py
|
okaerin/ITK
| 0
|
6626067
|
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import os
import sys
from sys import stderr as system_error_stream
# Required to work around weird import error with xarray
from typing import Dict, Any, List, Optional, Sequence, Union
import pkg_resources
import itkConfig
from itk.support.itkTemplate import itkTemplate
def create_itk_module(name: str):
from importlib.util import module_from_spec as ilu_module_from_spec
from importlib.util import spec_from_file_location as ilu_spec_from_file_location
swig_module_name: str = f"itk.{name}Python"
spec = ilu_spec_from_file_location(
swig_module_name,
os.path.join(os.path.dirname(__file__), "..", f"{name}Python.py"),
)
l_module = ilu_module_from_spec(spec)
return l_module
def itk_load_swig_module(name: str, namespace=None):
"""This function causes a SWIG module to be loaded into memory after its
dependencies are satisfied. Information about the templates defined therein
is looked up from a config file, and PyTemplate instances for each are
created. These template_feature instances are placed in a module with the given
name that is either looked up from sys.modules or created and placed there
if it does not already exist.
Optionally, a 'namespace' parameter can be provided. If it is provided,
this namespace will be updated with the new template_feature instantiations.
The raw classes loaded from the named module's SWIG interface are placed in
a 'swig' sub-module. If the namespace parameter is provided, this
information will be placed in a sub-module named 'swig' therein as well.
This later submodule will be created if it does not already exist."""
swig_module_name: str = f"itk.{name}Python"
# find the module's name in sys.modules, or create a new module so named
this_module = sys.modules.setdefault(swig_module_name, create_itk_module(name))
# if this library and it's template_feature instantiations have already been loaded
# into sys.modules, bail out after loading the defined symbols into
# 'namespace'
if hasattr(this_module, "__templates_loaded"):
if namespace is not None:
swig = namespace.setdefault("swig", {})
if hasattr(this_module, "swig"):
swig.update(this_module.swig)
# don't worry about overwriting the symbols in namespace -- any
# common symbols should be of type itkTemplate, which is a
# singleton type. That is, they are all identical, so replacing one
# with the other isn't a problem.
for k, v in this_module.__dict__.items():
if not (k.startswith("_") or k.startswith("itk") or k == "swig"):
namespace[k] = v
return
# We're definitely going to load the templates. We set templates_loaded
# here instead of at the end of the file to protect against cyclical
# dependencies that could kill the recursive lookup below.
this_module.__templates_loaded = True
# Now, we definitely need to load the template_feature instantiations from the
# named module, and possibly also load the underlying SWIG module. Before
# we can load the template_feature instantiations of this module, we need to load
# those of the modules on which this one depends. Ditto for the SWIG
# modules.
# So, we recursively satisfy the dependencies of named module and create
# the template_feature instantiations.
# Dependencies are looked up from the auto-generated configuration files,
# via the itk_base_global_module_data instance defined at the bottom of this file, which
# knows how to find those configuration files.
l_data = itk_base_global_module_data[name]
if l_data:
deps = l_data.get_module_dependencies()
for dep in deps:
itk_load_swig_module(dep, namespace)
if itkConfig.ImportCallback:
itkConfig.ImportCallback(name, 0)
# SWIG-generated modules have 'Python' appended. Only load the SWIG module
# if we haven't already.
loader = LibraryLoader()
l_module = loader.load(swig_module_name)
# OK, now the modules on which this one depends are loaded and
# template_feature-instantiated, and the SWIG module for this one is also loaded.
# We're going to put the things we load and create in two places: the
# optional 'namespace' parameter, and the this_module variable's namespace.
# Populate the 'swig' sub-module namespace for this_module. Also look up or create a
# different 'swig' namespace for 'namespace'. Since 'namespace' may be used to
# collect symbols from multiple different ITK modules, we don't want to
# stomp on an existing 'swig' namespace, nor do we want to share 'swig'
# namespaces between this_module and namespace.
if namespace is None:
for k, v in l_module.__dict__.items():
if not (k.startswith("__") or k.startswith("itk")):
this_module.swig[k] = v
else:
swig = namespace.setdefault("swig", {})
for k, v in l_module.__dict__.items():
if not (k.startswith("__") or k.startswith("itk")):
this_module.swig[k] = v
swig[k] = v
l_data: ITKModuleInfo = itk_base_global_module_data[name]
for template_feature in l_data.get_all_template_features():
if template_feature.is_itk_class():
# Get the attribute associated with the class name if it exists,
# otherwise make a new templated class
# template_container = this_module.'py_class_name'
template_container = getattr(
this_module,
template_feature.get_python_class_name(),
# Create a new template_container if not already found
itkTemplate(template_feature.get_cpp_class_name()),
)
try:
template_container.__add__(
template_feature.get_template_parameters(),
getattr(l_module, template_feature.get_swig_class_name()),
)
# Now set the updated template_container to this_module
setattr(
this_module,
template_feature.get_python_class_name(),
template_container,
)
if namespace is not None:
current_value = namespace.get(
template_feature.get_python_class_name()
)
if (
current_value is not None
and current_value != template_container
):
debug_print_error(
f"Namespace already has a value for "
f"{template_feature.get_python_class_name()}, which is not an itkTemplate "
f"instance for class {template_feature.get_cpp_class_name()}. "
f"Overwriting old value."
)
namespace[
template_feature.get_python_class_name()
] = template_container
except Exception as e:
debug_print_error(
f"{template_feature.get_swig_class_name()} not loaded from module {name} because of "
f"exception:\n {e}"
)
pass
else:
# this is a description of a non-templated class
try:
swig_class = getattr(l_module, template_feature.get_swig_class_name())
itkTemplate.registerNoTpl(
template_feature.get_cpp_class_name(), swig_class
)
setattr(
this_module,
template_feature.get_python_class_name(),
swig_class,
)
if namespace is not None:
current_value = namespace.get(
template_feature.get_python_class_name()
)
if current_value is not None and current_value != swig_class:
debug_print_error(
f"Namespace already has a value for"
f" {template_feature.get_python_class_name()}, which is not class {template_feature.get_cpp_class_name()}. "
f"Overwriting old value."
)
namespace[template_feature.get_python_class_name()] = swig_class
except Exception as e:
debug_print_error(
f"{template_feature.get_swig_class_name()} not found in module {name} because of "
f"exception:\n {e}"
)
for snakeCaseFunction in l_data.get_snake_case_functions():
namespace[snakeCaseFunction] = getattr(l_module, snakeCaseFunction)
init_name = snakeCaseFunction + "_init_docstring"
init_function = getattr(l_module, init_name)
try:
init_function()
except AttributeError:
pass
if itkConfig.ImportCallback:
itkConfig.ImportCallback(name, 1)
def debug_print_error(error):
if itkConfig.DebugLevel == itkConfig.WARN:
print(error, file=system_error_stream)
elif itkConfig.DebugLevel == itkConfig.ERROR:
raise RuntimeError(error)
class LibraryLoader(object):
"""Do all the work to set up the environment so that a SWIG-generated
library can be properly loaded. This involves setting paths defined in
itkConfig."""
def __init__(self) -> None:
self.old_path = sys.path
self.old_cwd = os.getcwd()
def setup(self):
self.old_cwd = os.getcwd()
try:
os.chdir(itkConfig.swig_lib)
except OSError:
# silently pass to avoid the case where the dir is not there
pass
self.old_path = sys.path
sys.path = [itkConfig.swig_lib, itkConfig.swig_py] + itkConfig.path + sys.path
def load(self, name: str):
self.setup()
try:
import importlib
l_module = importlib.import_module(name)
# since version 3.4: Use importlib.util.find_spec() instead.
l_spec = importlib.util.find_spec(name)
l_spec.loader.exec_module(l_module) # pytype: disable=attribute-error
return l_module
finally:
self.cleanup()
def cleanup(self):
os.chdir(self.old_cwd)
sys.path = self.old_path
class ITKTemplateFeatures:
"""
Objects to hold the 'template' features specified in the '*Config.py'
files generated during swig configuration.
(py_class_name, cpp_class_name, swig_class_name, class_in_module, template_parameters)
('Image', 'itk::Image', 'itkImageSS2', True, 'signed short,2'),
"""
def __init__(self, feature_tuple: Sequence[Union[str, bool]]) -> None:
feature_length: int = len(feature_tuple)
# ITK classes have exactly 5 elements in the tuple, otherwise they are swig classes
self._is_itk_class: bool = feature_length == 5
if feature_length < 3 or feature_length > 5:
raise Exception(
f"ERROR: Ivalid number of features specified (3 <= {feature_length} <= 5): {feature_tuple}."
)
self._py_class_name: str = feature_tuple[0]
self._cpp_class_name: str = feature_tuple[1]
self._swig_class_name: str = feature_tuple[2]
self._class_in_module: bool = feature_tuple[3] if feature_length >= 4 else False
self._template_parameters: Optional[str] = (
feature_tuple[4] if feature_length == 5 else None
)
def is_itk_class(self) -> bool:
return self._is_itk_class
def get_python_class_name(self) -> str:
return self._py_class_name
def get_cpp_class_name(self) -> str:
return self._cpp_class_name
def get_swig_class_name(self) -> str:
return self._swig_class_name
def get_class_in_module(self) -> bool:
return self._class_in_module
def get_template_parameters(self) -> str:
return self._template_parameters
class ITKModuleInfo:
"""
A structure to hold information loaded from the *Config.py
files generated during swig wrapping. The *Config.py
files define actual names of the swig wrapped classes
so that they may be used to build convenience dispatch
factories from the itkTemplate base class.
"""
def __init__(self, path: str, snake_path: str) -> None:
# Store paths for debugging ease, not used in the code outside this function
self._module_config_path: str = path
self._module_snake_path: str = snake_path
module_content_info: Dict[str, Any] = {}
with open(path, "rb") as module_file:
exec(module_file.read(), module_content_info)
_templates = module_content_info.get(
"templates", tuple()
) # Template Definitions
self._depends = sorted(
module_content_info.get("depends", tuple())
) # The sorted dependencies of this module on other modules
self._template_feature_tuples: List[ITKTemplateFeatures] = [
ITKTemplateFeatures(tfeat) for tfeat in _templates
]
snake_data: Dict[str, Any] = {}
if os.path.exists(snake_path):
with open(snake_path, "rb") as snake_module_file:
exec(snake_module_file.read(), snake_data)
self._snake_case_functions: Sequence[str] = snake_data.get(
"snake_case_functions", []
)
def get_module_dependencies(self) -> Sequence[str]:
return self._depends
def get_all_template_features(self) -> Sequence[ITKTemplateFeatures]:
return self._template_feature_tuples
def get_snake_case_functions(self) -> Sequence[str]:
return self._snake_case_functions
def _initialize(l_module_data):
# Make a list of all know modules (described in *Config.py files in the
# config_py directory) and load the information described in those Config.py
# files.
dirs = [p for p in itkConfig.path if os.path.isdir(p)]
for d in dirs:
# NOT USED OR NEEDED candidate_lib_path: str = os.path.join(os.path.dirname(d), "lib")
# NOT USED OR NEEDED if not os.path.isdir(candidate_lib_path):
# NOT USED OR NEEDED print(f"WARNING: Invalid directory for python lib files specified: {candidate_lib_path}")
# NOT USED OR NEEDED raise RuntimeError(f"WARNING: Invalid directory for python lib files specified: {candidate_lib_path}")
# NOT USED OR NEEDED sys.path.append(candidate_lib_path)
candidate_config_path: str = os.path.join(d, "Configuration")
if not os.path.isdir(candidate_config_path):
error_message: str = f"WARNING: Invalid configuration directory requested: {candidate_config_path}"
raise RuntimeError(error_message)
sys.path.append(d)
files = os.listdir(os.path.join(d, "Configuration"))
known_modules: List[str] = sorted(
[f[:-9] for f in files if f.endswith("Config.py")]
)
for module in known_modules:
conf: str = f"{module}Config.py"
path: str = os.path.join(d, "Configuration", conf)
snake_conf = f"{module}_snake_case.py"
snake_path = os.path.join(d, "Configuration", snake_conf)
l_module_data[module] = ITKModuleInfo(path, snake_path)
itk_base_global_lazy_attributes: Dict[str, Any] = {}
itk_base_global_module_data: Dict[str, ITKModuleInfo] = {}
_initialize(itk_base_global_module_data)
del _initialize
|
# ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import os
import sys
from sys import stderr as system_error_stream
# Required to work around weird import error with xarray
from typing import Dict, Any, List, Optional, Sequence, Union
import pkg_resources
import itkConfig
from itk.support.itkTemplate import itkTemplate
def create_itk_module(name: str):
from importlib.util import module_from_spec as ilu_module_from_spec
from importlib.util import spec_from_file_location as ilu_spec_from_file_location
swig_module_name: str = f"itk.{name}Python"
spec = ilu_spec_from_file_location(
swig_module_name,
os.path.join(os.path.dirname(__file__), "..", f"{name}Python.py"),
)
l_module = ilu_module_from_spec(spec)
return l_module
def itk_load_swig_module(name: str, namespace=None):
"""This function causes a SWIG module to be loaded into memory after its
dependencies are satisfied. Information about the templates defined therein
is looked up from a config file, and PyTemplate instances for each are
created. These template_feature instances are placed in a module with the given
name that is either looked up from sys.modules or created and placed there
if it does not already exist.
Optionally, a 'namespace' parameter can be provided. If it is provided,
this namespace will be updated with the new template_feature instantiations.
The raw classes loaded from the named module's SWIG interface are placed in
a 'swig' sub-module. If the namespace parameter is provided, this
information will be placed in a sub-module named 'swig' therein as well.
This later submodule will be created if it does not already exist."""
swig_module_name: str = f"itk.{name}Python"
# find the module's name in sys.modules, or create a new module so named
this_module = sys.modules.setdefault(swig_module_name, create_itk_module(name))
# if this library and it's template_feature instantiations have already been loaded
# into sys.modules, bail out after loading the defined symbols into
# 'namespace'
if hasattr(this_module, "__templates_loaded"):
if namespace is not None:
swig = namespace.setdefault("swig", {})
if hasattr(this_module, "swig"):
swig.update(this_module.swig)
# don't worry about overwriting the symbols in namespace -- any
# common symbols should be of type itkTemplate, which is a
# singleton type. That is, they are all identical, so replacing one
# with the other isn't a problem.
for k, v in this_module.__dict__.items():
if not (k.startswith("_") or k.startswith("itk") or k == "swig"):
namespace[k] = v
return
# We're definitely going to load the templates. We set templates_loaded
# here instead of at the end of the file to protect against cyclical
# dependencies that could kill the recursive lookup below.
this_module.__templates_loaded = True
# Now, we definitely need to load the template_feature instantiations from the
# named module, and possibly also load the underlying SWIG module. Before
# we can load the template_feature instantiations of this module, we need to load
# those of the modules on which this one depends. Ditto for the SWIG
# modules.
# So, we recursively satisfy the dependencies of named module and create
# the template_feature instantiations.
# Dependencies are looked up from the auto-generated configuration files,
# via the itk_base_global_module_data instance defined at the bottom of this file, which
# knows how to find those configuration files.
l_data = itk_base_global_module_data[name]
if l_data:
deps = l_data.get_module_dependencies()
for dep in deps:
itk_load_swig_module(dep, namespace)
if itkConfig.ImportCallback:
itkConfig.ImportCallback(name, 0)
# SWIG-generated modules have 'Python' appended. Only load the SWIG module
# if we haven't already.
loader = LibraryLoader()
l_module = loader.load(swig_module_name)
# OK, now the modules on which this one depends are loaded and
# template_feature-instantiated, and the SWIG module for this one is also loaded.
# We're going to put the things we load and create in two places: the
# optional 'namespace' parameter, and the this_module variable's namespace.
# Populate the 'swig' sub-module namespace for this_module. Also look up or create a
# different 'swig' namespace for 'namespace'. Since 'namespace' may be used to
# collect symbols from multiple different ITK modules, we don't want to
# stomp on an existing 'swig' namespace, nor do we want to share 'swig'
# namespaces between this_module and namespace.
if namespace is None:
for k, v in l_module.__dict__.items():
if not (k.startswith("__") or k.startswith("itk")):
this_module.swig[k] = v
else:
swig = namespace.setdefault("swig", {})
for k, v in l_module.__dict__.items():
if not (k.startswith("__") or k.startswith("itk")):
this_module.swig[k] = v
swig[k] = v
l_data: ITKModuleInfo = itk_base_global_module_data[name]
for template_feature in l_data.get_all_template_features():
if template_feature.is_itk_class():
# Get the attribute associated with the class name if it exists,
# otherwise make a new templated class
# template_container = this_module.'py_class_name'
template_container = getattr(
this_module,
template_feature.get_python_class_name(),
# Create a new template_container if not already found
itkTemplate(template_feature.get_cpp_class_name()),
)
try:
template_container.__add__(
template_feature.get_template_parameters(),
getattr(l_module, template_feature.get_swig_class_name()),
)
# Now set the updated template_container to this_module
setattr(
this_module,
template_feature.get_python_class_name(),
template_container,
)
if namespace is not None:
current_value = namespace.get(
template_feature.get_python_class_name()
)
if (
current_value is not None
and current_value != template_container
):
debug_print_error(
f"Namespace already has a value for "
f"{template_feature.get_python_class_name()}, which is not an itkTemplate "
f"instance for class {template_feature.get_cpp_class_name()}. "
f"Overwriting old value."
)
namespace[
template_feature.get_python_class_name()
] = template_container
except Exception as e:
debug_print_error(
f"{template_feature.get_swig_class_name()} not loaded from module {name} because of "
f"exception:\n {e}"
)
pass
else:
# this is a description of a non-templated class
try:
swig_class = getattr(l_module, template_feature.get_swig_class_name())
itkTemplate.registerNoTpl(
template_feature.get_cpp_class_name(), swig_class
)
setattr(
this_module,
template_feature.get_python_class_name(),
swig_class,
)
if namespace is not None:
current_value = namespace.get(
template_feature.get_python_class_name()
)
if current_value is not None and current_value != swig_class:
debug_print_error(
f"Namespace already has a value for"
f" {template_feature.get_python_class_name()}, which is not class {template_feature.get_cpp_class_name()}. "
f"Overwriting old value."
)
namespace[template_feature.get_python_class_name()] = swig_class
except Exception as e:
debug_print_error(
f"{template_feature.get_swig_class_name()} not found in module {name} because of "
f"exception:\n {e}"
)
for snakeCaseFunction in l_data.get_snake_case_functions():
namespace[snakeCaseFunction] = getattr(l_module, snakeCaseFunction)
init_name = snakeCaseFunction + "_init_docstring"
init_function = getattr(l_module, init_name)
try:
init_function()
except AttributeError:
pass
if itkConfig.ImportCallback:
itkConfig.ImportCallback(name, 1)
def debug_print_error(error):
if itkConfig.DebugLevel == itkConfig.WARN:
print(error, file=system_error_stream)
elif itkConfig.DebugLevel == itkConfig.ERROR:
raise RuntimeError(error)
class LibraryLoader(object):
"""Do all the work to set up the environment so that a SWIG-generated
library can be properly loaded. This involves setting paths defined in
itkConfig."""
def __init__(self) -> None:
self.old_path = sys.path
self.old_cwd = os.getcwd()
def setup(self):
self.old_cwd = os.getcwd()
try:
os.chdir(itkConfig.swig_lib)
except OSError:
# silently pass to avoid the case where the dir is not there
pass
self.old_path = sys.path
sys.path = [itkConfig.swig_lib, itkConfig.swig_py] + itkConfig.path + sys.path
def load(self, name: str):
self.setup()
try:
import importlib
l_module = importlib.import_module(name)
# since version 3.4: Use importlib.util.find_spec() instead.
l_spec = importlib.util.find_spec(name)
l_spec.loader.exec_module(l_module) # pytype: disable=attribute-error
return l_module
finally:
self.cleanup()
def cleanup(self):
os.chdir(self.old_cwd)
sys.path = self.old_path
class ITKTemplateFeatures:
"""
Objects to hold the 'template' features specified in the '*Config.py'
files generated during swig configuration.
(py_class_name, cpp_class_name, swig_class_name, class_in_module, template_parameters)
('Image', 'itk::Image', 'itkImageSS2', True, 'signed short,2'),
"""
def __init__(self, feature_tuple: Sequence[Union[str, bool]]) -> None:
feature_length: int = len(feature_tuple)
# ITK classes have exactly 5 elements in the tuple, otherwise they are swig classes
self._is_itk_class: bool = feature_length == 5
if feature_length < 3 or feature_length > 5:
raise Exception(
f"ERROR: Ivalid number of features specified (3 <= {feature_length} <= 5): {feature_tuple}."
)
self._py_class_name: str = feature_tuple[0]
self._cpp_class_name: str = feature_tuple[1]
self._swig_class_name: str = feature_tuple[2]
self._class_in_module: bool = feature_tuple[3] if feature_length >= 4 else False
self._template_parameters: Optional[str] = (
feature_tuple[4] if feature_length == 5 else None
)
def is_itk_class(self) -> bool:
return self._is_itk_class
def get_python_class_name(self) -> str:
return self._py_class_name
def get_cpp_class_name(self) -> str:
return self._cpp_class_name
def get_swig_class_name(self) -> str:
return self._swig_class_name
def get_class_in_module(self) -> bool:
return self._class_in_module
def get_template_parameters(self) -> str:
return self._template_parameters
class ITKModuleInfo:
"""
A structure to hold information loaded from the *Config.py
files generated during swig wrapping. The *Config.py
files define actual names of the swig wrapped classes
so that they may be used to build convenience dispatch
factories from the itkTemplate base class.
"""
def __init__(self, path: str, snake_path: str) -> None:
# Store paths for debugging ease, not used in the code outside this function
self._module_config_path: str = path
self._module_snake_path: str = snake_path
module_content_info: Dict[str, Any] = {}
with open(path, "rb") as module_file:
exec(module_file.read(), module_content_info)
_templates = module_content_info.get(
"templates", tuple()
) # Template Definitions
self._depends = sorted(
module_content_info.get("depends", tuple())
) # The sorted dependencies of this module on other modules
self._template_feature_tuples: List[ITKTemplateFeatures] = [
ITKTemplateFeatures(tfeat) for tfeat in _templates
]
snake_data: Dict[str, Any] = {}
if os.path.exists(snake_path):
with open(snake_path, "rb") as snake_module_file:
exec(snake_module_file.read(), snake_data)
self._snake_case_functions: Sequence[str] = snake_data.get(
"snake_case_functions", []
)
def get_module_dependencies(self) -> Sequence[str]:
return self._depends
def get_all_template_features(self) -> Sequence[ITKTemplateFeatures]:
return self._template_feature_tuples
def get_snake_case_functions(self) -> Sequence[str]:
return self._snake_case_functions
def _initialize(l_module_data):
# Make a list of all know modules (described in *Config.py files in the
# config_py directory) and load the information described in those Config.py
# files.
dirs = [p for p in itkConfig.path if os.path.isdir(p)]
for d in dirs:
# NOT USED OR NEEDED candidate_lib_path: str = os.path.join(os.path.dirname(d), "lib")
# NOT USED OR NEEDED if not os.path.isdir(candidate_lib_path):
# NOT USED OR NEEDED print(f"WARNING: Invalid directory for python lib files specified: {candidate_lib_path}")
# NOT USED OR NEEDED raise RuntimeError(f"WARNING: Invalid directory for python lib files specified: {candidate_lib_path}")
# NOT USED OR NEEDED sys.path.append(candidate_lib_path)
candidate_config_path: str = os.path.join(d, "Configuration")
if not os.path.isdir(candidate_config_path):
error_message: str = f"WARNING: Invalid configuration directory requested: {candidate_config_path}"
raise RuntimeError(error_message)
sys.path.append(d)
files = os.listdir(os.path.join(d, "Configuration"))
known_modules: List[str] = sorted(
[f[:-9] for f in files if f.endswith("Config.py")]
)
for module in known_modules:
conf: str = f"{module}Config.py"
path: str = os.path.join(d, "Configuration", conf)
snake_conf = f"{module}_snake_case.py"
snake_path = os.path.join(d, "Configuration", snake_conf)
l_module_data[module] = ITKModuleInfo(path, snake_path)
itk_base_global_lazy_attributes: Dict[str, Any] = {}
itk_base_global_module_data: Dict[str, ITKModuleInfo] = {}
_initialize(itk_base_global_module_data)
del _initialize
|
en
| 0.744796
|
# ========================================================================== # # Copyright NumFOCUS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ==========================================================================*/ # Required to work around weird import error with xarray This function causes a SWIG module to be loaded into memory after its dependencies are satisfied. Information about the templates defined therein is looked up from a config file, and PyTemplate instances for each are created. These template_feature instances are placed in a module with the given name that is either looked up from sys.modules or created and placed there if it does not already exist. Optionally, a 'namespace' parameter can be provided. If it is provided, this namespace will be updated with the new template_feature instantiations. The raw classes loaded from the named module's SWIG interface are placed in a 'swig' sub-module. If the namespace parameter is provided, this information will be placed in a sub-module named 'swig' therein as well. This later submodule will be created if it does not already exist. # find the module's name in sys.modules, or create a new module so named # if this library and it's template_feature instantiations have already been loaded # into sys.modules, bail out after loading the defined symbols into # 'namespace' # don't worry about overwriting the symbols in namespace -- any # common symbols should be of type itkTemplate, which is a # singleton type. That is, they are all identical, so replacing one # with the other isn't a problem. # We're definitely going to load the templates. We set templates_loaded # here instead of at the end of the file to protect against cyclical # dependencies that could kill the recursive lookup below. # Now, we definitely need to load the template_feature instantiations from the # named module, and possibly also load the underlying SWIG module. Before # we can load the template_feature instantiations of this module, we need to load # those of the modules on which this one depends. Ditto for the SWIG # modules. # So, we recursively satisfy the dependencies of named module and create # the template_feature instantiations. # Dependencies are looked up from the auto-generated configuration files, # via the itk_base_global_module_data instance defined at the bottom of this file, which # knows how to find those configuration files. # SWIG-generated modules have 'Python' appended. Only load the SWIG module # if we haven't already. # OK, now the modules on which this one depends are loaded and # template_feature-instantiated, and the SWIG module for this one is also loaded. # We're going to put the things we load and create in two places: the # optional 'namespace' parameter, and the this_module variable's namespace. # Populate the 'swig' sub-module namespace for this_module. Also look up or create a # different 'swig' namespace for 'namespace'. Since 'namespace' may be used to # collect symbols from multiple different ITK modules, we don't want to # stomp on an existing 'swig' namespace, nor do we want to share 'swig' # namespaces between this_module and namespace. # Get the attribute associated with the class name if it exists, # otherwise make a new templated class # template_container = this_module.'py_class_name' # Create a new template_container if not already found # Now set the updated template_container to this_module # this is a description of a non-templated class Do all the work to set up the environment so that a SWIG-generated library can be properly loaded. This involves setting paths defined in itkConfig. # silently pass to avoid the case where the dir is not there # since version 3.4: Use importlib.util.find_spec() instead. # pytype: disable=attribute-error Objects to hold the 'template' features specified in the '*Config.py' files generated during swig configuration. (py_class_name, cpp_class_name, swig_class_name, class_in_module, template_parameters) ('Image', 'itk::Image', 'itkImageSS2', True, 'signed short,2'), # ITK classes have exactly 5 elements in the tuple, otherwise they are swig classes A structure to hold information loaded from the *Config.py files generated during swig wrapping. The *Config.py files define actual names of the swig wrapped classes so that they may be used to build convenience dispatch factories from the itkTemplate base class. # Store paths for debugging ease, not used in the code outside this function # Template Definitions # The sorted dependencies of this module on other modules # Make a list of all know modules (described in *Config.py files in the # config_py directory) and load the information described in those Config.py # files. # NOT USED OR NEEDED candidate_lib_path: str = os.path.join(os.path.dirname(d), "lib") # NOT USED OR NEEDED if not os.path.isdir(candidate_lib_path): # NOT USED OR NEEDED print(f"WARNING: Invalid directory for python lib files specified: {candidate_lib_path}") # NOT USED OR NEEDED raise RuntimeError(f"WARNING: Invalid directory for python lib files specified: {candidate_lib_path}") # NOT USED OR NEEDED sys.path.append(candidate_lib_path)
| 1.942755
| 2
|
models/ClassicNetwork/blocks/SK_block.py
|
Dou-Yu-xuan/deep-learning-visal
| 150
|
6626068
|
import torch.nn as nn
from functools import reduce
class SKConv(nn.Module):
def __init__(self,in_channels,out_channels,stride=1,M=2,r=16,L=32):
super(SKConv,self).__init__()
d=max(in_channels//r,L)
self.M=M
self.out_channels=out_channels
self.conv=nn.ModuleList()
for i in range(M):
self.conv.append(nn.Sequential(nn.Conv2d(in_channels,out_channels,3,stride,padding=1+i,dilation=1+i,groups=32,bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)))
self.global_pool=nn.AdaptiveAvgPool2d(1)
self.fc1=nn.Sequential(nn.Conv2d(out_channels,d,1,bias=False),
nn.BatchNorm2d(d),
nn.ReLU(inplace=True))
self.fc2=nn.Conv2d(d,out_channels*M,1,1,bias=False)
self.softmax=nn.Softmax(dim=1)
def forward(self, input):
batch_size=input.size(0)
output=[]
#the part of split
for i,conv in enumerate(self.conv):
#print(i,conv(input).size())
output.append(conv(input))
#the part of fusion
U=reduce(lambda x,y:x+y,output)
s=self.global_pool(U)
z=self.fc1(s)
a_b=self.fc2(z)
a_b=a_b.reshape(batch_size,self.M,self.out_channels,-1)
a_b=self.softmax(a_b)
#the part of selection
a_b=list(a_b.chunk(self.M,dim=1))#split to a and b
a_b=list(map(lambda x:x.reshape(batch_size,self.out_channels,1,1),a_b))
V=list(map(lambda x,y:x*y,output,a_b))
V=reduce(lambda x,y:x+y,V)
return V
import torch
class SKConv(nn.Module):
def __init__(self, features, M=2, G=32, r=16, stride=1, L=32):
""" Constructor
Args:
features: input channel dimensionality.
M: the number of branchs.
G: num of convolution groups.
r: the ratio for compute d, the length of z.
stride: stride, default 1.
L: the minimum dim of the vector z in paper, default 32.
"""
super(SKConv, self).__init__()
d = max(int(features / r), L)
self.M = M
self.features = features
self.convs = nn.ModuleList([])
for i in range(M):
self.convs.append(nn.Sequential(
nn.Conv2d(features, features, kernel_size=3, stride=stride, padding=1 + i, dilation=1 + i, groups=G,
bias=False),
nn.BatchNorm2d(features),
nn.ReLU(inplace=False)
))
self.gap = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Sequential(nn.Conv2d(features, d, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(d),
nn.ReLU(inplace=False))
self.fcs = nn.ModuleList([])
for i in range(M):
self.fcs.append(
nn.Conv2d(d, features, kernel_size=1, stride=1)
)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
batch_size = x.shape[0]
feats = [conv(x) for conv in self.convs]
feats = torch.cat(feats, dim=1)
feats = feats.view(batch_size, self.M, self.features, feats.shape[2], feats.shape[3])
feats_U = torch.sum(feats, dim=1)
feats_S = self.gap(feats_U)
feats_Z = self.fc(feats_S)
attention_vectors = [fc(feats_Z) for fc in self.fcs]
attention_vectors = torch.cat(attention_vectors, dim=1)
attention_vectors = attention_vectors.view(batch_size, self.M, self.features, 1, 1)
attention_vectors = self.softmax(attention_vectors)
feats_V = torch.sum(feats * attention_vectors, dim=1)
return feats_V
class ECA(nn.Module):
"""Constructs a ECA module.
Args:
channel: Number of channels of the input feature map
k_size: Adaptive selection of kernel size
"""
def __init__(self, channel, k_size=3):
super(ECA, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# x: input features with shape [b, c, h, w]
b, c, h, w = x.size()
# feature descriptor on the global spatial information
y = self.avg_pool(x)
# Two different branches of ECA module
y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
# Multi-scale information fusion
y = self.sigmoid(y)
return y#x * y.expand_as(x)
class SF(nn.Module):
def __init__(self,channels,M=2):
super(SF,self).__init__()
# self.M=M
self.convs=nn.ModuleList()
for i in range(M):
self.convs.append(nn.Sequential(
# nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1 + i, dilation=1 + i, groups=32,bias=False),
# nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, dilation=1, groups=32,bias=False),
nn.Conv2d(channels, channels, kernel_size=3 + 2 * i, stride=1, padding=1 + i, dilation=1, groups=32,bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(inplace=False)
))
self.eca=ECA(channels)
def forward(self, input):
batch_size=input.size(0)
#the part of split
output = [conv(input) for conv in self.convs]
#the part of fusion
U=reduce(lambda x,y:x+y,output)
# print(U.size())
return self.eca(U)
|
import torch.nn as nn
from functools import reduce
class SKConv(nn.Module):
def __init__(self,in_channels,out_channels,stride=1,M=2,r=16,L=32):
super(SKConv,self).__init__()
d=max(in_channels//r,L)
self.M=M
self.out_channels=out_channels
self.conv=nn.ModuleList()
for i in range(M):
self.conv.append(nn.Sequential(nn.Conv2d(in_channels,out_channels,3,stride,padding=1+i,dilation=1+i,groups=32,bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)))
self.global_pool=nn.AdaptiveAvgPool2d(1)
self.fc1=nn.Sequential(nn.Conv2d(out_channels,d,1,bias=False),
nn.BatchNorm2d(d),
nn.ReLU(inplace=True))
self.fc2=nn.Conv2d(d,out_channels*M,1,1,bias=False)
self.softmax=nn.Softmax(dim=1)
def forward(self, input):
batch_size=input.size(0)
output=[]
#the part of split
for i,conv in enumerate(self.conv):
#print(i,conv(input).size())
output.append(conv(input))
#the part of fusion
U=reduce(lambda x,y:x+y,output)
s=self.global_pool(U)
z=self.fc1(s)
a_b=self.fc2(z)
a_b=a_b.reshape(batch_size,self.M,self.out_channels,-1)
a_b=self.softmax(a_b)
#the part of selection
a_b=list(a_b.chunk(self.M,dim=1))#split to a and b
a_b=list(map(lambda x:x.reshape(batch_size,self.out_channels,1,1),a_b))
V=list(map(lambda x,y:x*y,output,a_b))
V=reduce(lambda x,y:x+y,V)
return V
import torch
class SKConv(nn.Module):
def __init__(self, features, M=2, G=32, r=16, stride=1, L=32):
""" Constructor
Args:
features: input channel dimensionality.
M: the number of branchs.
G: num of convolution groups.
r: the ratio for compute d, the length of z.
stride: stride, default 1.
L: the minimum dim of the vector z in paper, default 32.
"""
super(SKConv, self).__init__()
d = max(int(features / r), L)
self.M = M
self.features = features
self.convs = nn.ModuleList([])
for i in range(M):
self.convs.append(nn.Sequential(
nn.Conv2d(features, features, kernel_size=3, stride=stride, padding=1 + i, dilation=1 + i, groups=G,
bias=False),
nn.BatchNorm2d(features),
nn.ReLU(inplace=False)
))
self.gap = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Sequential(nn.Conv2d(features, d, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(d),
nn.ReLU(inplace=False))
self.fcs = nn.ModuleList([])
for i in range(M):
self.fcs.append(
nn.Conv2d(d, features, kernel_size=1, stride=1)
)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
batch_size = x.shape[0]
feats = [conv(x) for conv in self.convs]
feats = torch.cat(feats, dim=1)
feats = feats.view(batch_size, self.M, self.features, feats.shape[2], feats.shape[3])
feats_U = torch.sum(feats, dim=1)
feats_S = self.gap(feats_U)
feats_Z = self.fc(feats_S)
attention_vectors = [fc(feats_Z) for fc in self.fcs]
attention_vectors = torch.cat(attention_vectors, dim=1)
attention_vectors = attention_vectors.view(batch_size, self.M, self.features, 1, 1)
attention_vectors = self.softmax(attention_vectors)
feats_V = torch.sum(feats * attention_vectors, dim=1)
return feats_V
class ECA(nn.Module):
"""Constructs a ECA module.
Args:
channel: Number of channels of the input feature map
k_size: Adaptive selection of kernel size
"""
def __init__(self, channel, k_size=3):
super(ECA, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# x: input features with shape [b, c, h, w]
b, c, h, w = x.size()
# feature descriptor on the global spatial information
y = self.avg_pool(x)
# Two different branches of ECA module
y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
# Multi-scale information fusion
y = self.sigmoid(y)
return y#x * y.expand_as(x)
class SF(nn.Module):
def __init__(self,channels,M=2):
super(SF,self).__init__()
# self.M=M
self.convs=nn.ModuleList()
for i in range(M):
self.convs.append(nn.Sequential(
# nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1 + i, dilation=1 + i, groups=32,bias=False),
# nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, dilation=1, groups=32,bias=False),
nn.Conv2d(channels, channels, kernel_size=3 + 2 * i, stride=1, padding=1 + i, dilation=1, groups=32,bias=False),
nn.BatchNorm2d(channels),
nn.ReLU(inplace=False)
))
self.eca=ECA(channels)
def forward(self, input):
batch_size=input.size(0)
#the part of split
output = [conv(input) for conv in self.convs]
#the part of fusion
U=reduce(lambda x,y:x+y,output)
# print(U.size())
return self.eca(U)
|
en
| 0.659338
|
#the part of split #print(i,conv(input).size()) #the part of fusion #the part of selection #split to a and b Constructor
Args:
features: input channel dimensionality.
M: the number of branchs.
G: num of convolution groups.
r: the ratio for compute d, the length of z.
stride: stride, default 1.
L: the minimum dim of the vector z in paper, default 32. Constructs a ECA module.
Args:
channel: Number of channels of the input feature map
k_size: Adaptive selection of kernel size # x: input features with shape [b, c, h, w] # feature descriptor on the global spatial information # Two different branches of ECA module # Multi-scale information fusion #x * y.expand_as(x) # self.M=M # nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1 + i, dilation=1 + i, groups=32,bias=False), # nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, dilation=1, groups=32,bias=False), #the part of split #the part of fusion # print(U.size())
| 2.641925
| 3
|
resolwe/flow/executors/startup_communication_container.py
|
gregorjerse/resolwe
| 0
|
6626069
|
"""Communication container startup script."""
import asyncio
import functools
import logging
import os
import shutil
import signal
import sys
from contextlib import suppress
from distutils.util import strtobool
from pathlib import Path
from typing import Optional
import zmq
import zmq.asyncio
from executors.collect import collect_files
from executors.socket_utils import (
BaseCommunicator,
BaseProtocol,
Message,
PeerIdentity,
Response,
SocketCommunicator,
)
from executors.transfer import transfer_data
from executors.zeromq_utils import ZMQCommunicator
# Socket used to connect with the processing container.
SOCKETS_PATH = Path(os.getenv("SOCKETS_VOLUME", "/sockets"))
PROCESSING_SOCKET = SOCKETS_PATH / os.getenv(
"COMMUNICATION_PROCESSING_SOCKET", "_socket1.s"
)
PROCESSING_CONTAINER_TIMEOUT = int(os.getenv("CONTAINER_TIMEOUT", 300))
# Listener IP and port are read from environment.
LISTENER_IP = os.getenv("LISTENER_IP", "127.0.0.1")
LISTENER_PORT = os.getenv("LISTENER_PORT", "53893")
LISTENER_PROTOCOL = os.getenv("LISTENER_PROTOCOL", "tcp")
DATA_ID = int(os.getenv("DATA_ID", "-1"))
KEEP_DATA = bool(strtobool(os.environ.get("FLOW_MANAGER_KEEP_DATA", "False")))
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
logger.info("Starting communication container for data with id %d.", DATA_ID)
def purge_secrets():
"""Purge the content of the secrets directory.
The directory itself is mounted as volume, so it can not be deleted.
"""
def handle_error(func, path, exc_info):
"""Handle permission errors while removing data directories."""
if isinstance(exc_info[1], PermissionError):
os.chmod(path, 0o700)
shutil.rmtree(path)
try:
for root, dirs, files in os.walk(os.environ.get("SECRETS_DIR", "/secrets")):
for f in files:
os.chmod(os.path.join(root, f), 0o700)
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d), onerror=handle_error)
except OSError:
logger.exception("Manager exception while removing data runtime directory.")
class ListenerProtocol(BaseProtocol):
"""Listener protocol."""
def __init__(
self, communicator: BaseCommunicator, processing_communicator: BaseCommunicator
):
"""Initialization."""
super().__init__(communicator, logger)
self.processing_communicator = processing_communicator
async def get_script(self) -> str:
"""Get the script from the listener."""
response = await self.communicator.send_command(
Message.command("get_script", "")
)
return response.message_data
async def finish(self, return_code: int):
"""Send finish command."""
await self.communicator.send_command(
Message.command("finish", {"rc": return_code})
)
async def handle_terminate(
self, message: Message, identity: PeerIdentity
) -> Response[str]:
"""Handle terminate command."""
response = await self.processing_communicator.send_command(
Message.command("terminate", "")
)
response.uuid = message.uuid
return response
class ProcessingProtocol(BaseProtocol):
"""Processing protocol."""
def __init__(
self, communicator: BaseCommunicator, listener_communicator: BaseCommunicator
):
"""Initialization."""
super().__init__(communicator, logger)
self.listener_communicator = listener_communicator
async def default_command_handler(
self, message: Message, identity: PeerIdentity
) -> Response:
"""Proxy command to the listener."""
return await self.listener_communicator.send_command(message, identity)
async def process_script(self, script: str) -> int:
"""Send the script to the processing container.
This method can be very long running as it waits for the return code
the processing container.
:returns: return code of the process running the script.
"""
try:
response = await self.communicator.send_command(
Message.command("process_script", script), response_timeout=None
)
return response.message_data
except asyncio.CancelledError:
return 1
async def terminate(self):
"""Terminate the processing container."""
await self.communicator.send_command(Message.command("terminate", ""))
class Manager:
"""Main class.
Communicate with the listener and with the processing container.
"""
def __init__(self):
"""Initialization."""
self.processing_communicator: Optional[BaseCommunicator] = None
self.listener_communicator: Optional[BaseCommunicator] = None
self.processing_container_connected = asyncio.Event()
self._process_script_task: Optional[asyncio.Task] = None
async def _handle_processing_container_connection(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
):
"""Handle incoming connection from the processing container.
Python process starts a single connection while Resolwe runtime utils
starts a new connection for every request.
"""
logger.debug("Processing container connected")
# Notify the processing container that the connection is ready.
writer.write(b"PING\n")
await writer.drain()
self.processing_container_connected.set()
self.processing_communicator = SocketCommunicator(
reader, writer, "(self <-> processing)", logger
)
async def start_processing_socket(self):
"""Start listening on the processing file socket.
The socket is used by the processing container to communicatite with
us.
"""
await asyncio.start_unix_server(
self._handle_processing_container_connection, os.fspath(PROCESSING_SOCKET)
)
logger.debug("Started listening on %s.", PROCESSING_SOCKET)
async def open_listener_connection(self) -> ZMQCommunicator:
"""Connect to the listener service.
We are using data id as identity. This implies only one process per
data object at any given point in time can be running.
"""
zmq_context = zmq.asyncio.Context.instance()
zmq_socket = zmq_context.socket(zmq.DEALER)
zmq_socket.setsockopt(zmq.IDENTITY, str(DATA_ID).encode())
connect_string = f"{LISTENER_PROTOCOL}://{LISTENER_IP}:{LISTENER_PORT}"
logger.debug("Opening listener connection to %s", connect_string)
zmq_socket.connect(connect_string)
return ZMQCommunicator(zmq_socket, "worker <-> listener", logger)
async def transfer_missing_data(self):
"""Transfer missing data.
Log error re-raise exception on failure.
:raises: RuntimeError on failure.
"""
try:
await transfer_data(self.listener_communicator)
except RuntimeError:
with suppress(Exception):
await self.listener_communicator.send_command(
Message.command(
"process_log", {"error": ["Error transfering missing data."]}
)
)
raise
async def collect_produced_files(self):
"""Collect files produced by the worker.
Keep only files that are referenced in the data model.
Log error re-raise exception on failure.
:raises: RuntimeError on failure.
"""
try:
logger.debug("Collecting files")
await collect_files(self.listener_communicator)
logger.debug("Collected files")
return True
except RuntimeError:
with suppress(Exception):
await self.listener_communicator.send_command(
Message.command(
"process_log",
{"error": ["Error collecting produced files."]},
)
)
return False
def _communicator_stopped(self, future: asyncio.Future):
"""Stop processing if necessary."""
if self._process_script_task:
logger.debug("Communicator closed, cancelling script processing.")
self._process_script_task.cancel()
async def start(self) -> int:
"""Start the main program."""
try:
return_code = 1
await self.start_processing_socket()
self.listener_communicator = await self.open_listener_connection()
try:
logger.debug("Waiting for the processing container to connect")
await asyncio.wait_for(
self.processing_container_connected.wait(),
PROCESSING_CONTAINER_TIMEOUT,
)
except asyncio.TimeoutError:
message = "Unable to connect to the processing container."
logger.critical(message)
with suppress(Exception):
await self.listener_communicator.send_command(
Message.command("process_log", {"error": [message]})
)
sys.exit(1)
logger.debug("Connected to the processing container.")
listener = ListenerProtocol(
self.listener_communicator, self.processing_communicator
)
processing = ProcessingProtocol(
self.processing_communicator, self.listener_communicator
)
try:
# Start listening for messages from the communication and the
# processing container.
listener_task = asyncio.ensure_future(listener.communicate())
processing_task = asyncio.ensure_future(processing.communicate())
listener_task.add_done_callback(self._communicator_stopped)
processing_task.add_done_callback(self._communicator_stopped)
await self.listener_communicator.send_command(
Message.command("update_status", "PR")
)
await self.transfer_missing_data()
script = await listener.get_script()
self._process_script_task = asyncio.create_task(
processing.process_script(script)
)
return_code = await self._process_script_task
self._process_script_task = None
except RuntimeError:
logger.exception("Error processing script.")
with suppress(Exception):
await self.listener_communicator.send_command(
Message.command(
"process_log",
{"error": ["Runtime error in communication container."]},
)
)
except Exception:
logger.exception("While running communication container")
finally:
if not KEEP_DATA:
purge_secrets()
if not await self.collect_produced_files():
if return_code == 0:
return_code = 1
# Notify listener that the processing is finished.
with suppress(Exception):
await listener.finish(return_code)
listener.stop_communicate()
processing.stop_communicate()
# Wait for up to 10 seconds to close the tasks.
with suppress(asyncio.TimeoutError):
await asyncio.wait_for(
asyncio.gather(listener_task, processing_task), timeout=10
)
return return_code
def sig_term_handler(manager_task: asyncio.Task):
# type: (asyncio.Future) -> None
"""Gracefully terminate the running process."""
logger.debug("SIG_INT received, shutting down.")
manager_task.cancel()
async def start_communication_container():
# type: () -> None
"""Start the processing manager and set SIGINT handler."""
manager = Manager()
manager_task = asyncio.create_task(manager.start())
asyncio.get_event_loop().add_signal_handler(
signal.SIGINT, functools.partial(sig_term_handler, manager_task)
)
# Wait for the manager task to finish.
return await manager_task
if __name__ == "__main__":
sys.exit(asyncio.run(start_communication_container()))
|
"""Communication container startup script."""
import asyncio
import functools
import logging
import os
import shutil
import signal
import sys
from contextlib import suppress
from distutils.util import strtobool
from pathlib import Path
from typing import Optional
import zmq
import zmq.asyncio
from executors.collect import collect_files
from executors.socket_utils import (
BaseCommunicator,
BaseProtocol,
Message,
PeerIdentity,
Response,
SocketCommunicator,
)
from executors.transfer import transfer_data
from executors.zeromq_utils import ZMQCommunicator
# Socket used to connect with the processing container.
SOCKETS_PATH = Path(os.getenv("SOCKETS_VOLUME", "/sockets"))
PROCESSING_SOCKET = SOCKETS_PATH / os.getenv(
"COMMUNICATION_PROCESSING_SOCKET", "_socket1.s"
)
PROCESSING_CONTAINER_TIMEOUT = int(os.getenv("CONTAINER_TIMEOUT", 300))
# Listener IP and port are read from environment.
LISTENER_IP = os.getenv("LISTENER_IP", "127.0.0.1")
LISTENER_PORT = os.getenv("LISTENER_PORT", "53893")
LISTENER_PROTOCOL = os.getenv("LISTENER_PROTOCOL", "tcp")
DATA_ID = int(os.getenv("DATA_ID", "-1"))
KEEP_DATA = bool(strtobool(os.environ.get("FLOW_MANAGER_KEEP_DATA", "False")))
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
logger.info("Starting communication container for data with id %d.", DATA_ID)
def purge_secrets():
"""Purge the content of the secrets directory.
The directory itself is mounted as volume, so it can not be deleted.
"""
def handle_error(func, path, exc_info):
"""Handle permission errors while removing data directories."""
if isinstance(exc_info[1], PermissionError):
os.chmod(path, 0o700)
shutil.rmtree(path)
try:
for root, dirs, files in os.walk(os.environ.get("SECRETS_DIR", "/secrets")):
for f in files:
os.chmod(os.path.join(root, f), 0o700)
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d), onerror=handle_error)
except OSError:
logger.exception("Manager exception while removing data runtime directory.")
class ListenerProtocol(BaseProtocol):
"""Listener protocol."""
def __init__(
self, communicator: BaseCommunicator, processing_communicator: BaseCommunicator
):
"""Initialization."""
super().__init__(communicator, logger)
self.processing_communicator = processing_communicator
async def get_script(self) -> str:
"""Get the script from the listener."""
response = await self.communicator.send_command(
Message.command("get_script", "")
)
return response.message_data
async def finish(self, return_code: int):
"""Send finish command."""
await self.communicator.send_command(
Message.command("finish", {"rc": return_code})
)
async def handle_terminate(
self, message: Message, identity: PeerIdentity
) -> Response[str]:
"""Handle terminate command."""
response = await self.processing_communicator.send_command(
Message.command("terminate", "")
)
response.uuid = message.uuid
return response
class ProcessingProtocol(BaseProtocol):
"""Processing protocol."""
def __init__(
self, communicator: BaseCommunicator, listener_communicator: BaseCommunicator
):
"""Initialization."""
super().__init__(communicator, logger)
self.listener_communicator = listener_communicator
async def default_command_handler(
self, message: Message, identity: PeerIdentity
) -> Response:
"""Proxy command to the listener."""
return await self.listener_communicator.send_command(message, identity)
async def process_script(self, script: str) -> int:
"""Send the script to the processing container.
This method can be very long running as it waits for the return code
the processing container.
:returns: return code of the process running the script.
"""
try:
response = await self.communicator.send_command(
Message.command("process_script", script), response_timeout=None
)
return response.message_data
except asyncio.CancelledError:
return 1
async def terminate(self):
"""Terminate the processing container."""
await self.communicator.send_command(Message.command("terminate", ""))
class Manager:
"""Main class.
Communicate with the listener and with the processing container.
"""
def __init__(self):
"""Initialization."""
self.processing_communicator: Optional[BaseCommunicator] = None
self.listener_communicator: Optional[BaseCommunicator] = None
self.processing_container_connected = asyncio.Event()
self._process_script_task: Optional[asyncio.Task] = None
async def _handle_processing_container_connection(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
):
"""Handle incoming connection from the processing container.
Python process starts a single connection while Resolwe runtime utils
starts a new connection for every request.
"""
logger.debug("Processing container connected")
# Notify the processing container that the connection is ready.
writer.write(b"PING\n")
await writer.drain()
self.processing_container_connected.set()
self.processing_communicator = SocketCommunicator(
reader, writer, "(self <-> processing)", logger
)
async def start_processing_socket(self):
"""Start listening on the processing file socket.
The socket is used by the processing container to communicatite with
us.
"""
await asyncio.start_unix_server(
self._handle_processing_container_connection, os.fspath(PROCESSING_SOCKET)
)
logger.debug("Started listening on %s.", PROCESSING_SOCKET)
async def open_listener_connection(self) -> ZMQCommunicator:
"""Connect to the listener service.
We are using data id as identity. This implies only one process per
data object at any given point in time can be running.
"""
zmq_context = zmq.asyncio.Context.instance()
zmq_socket = zmq_context.socket(zmq.DEALER)
zmq_socket.setsockopt(zmq.IDENTITY, str(DATA_ID).encode())
connect_string = f"{LISTENER_PROTOCOL}://{LISTENER_IP}:{LISTENER_PORT}"
logger.debug("Opening listener connection to %s", connect_string)
zmq_socket.connect(connect_string)
return ZMQCommunicator(zmq_socket, "worker <-> listener", logger)
async def transfer_missing_data(self):
"""Transfer missing data.
Log error re-raise exception on failure.
:raises: RuntimeError on failure.
"""
try:
await transfer_data(self.listener_communicator)
except RuntimeError:
with suppress(Exception):
await self.listener_communicator.send_command(
Message.command(
"process_log", {"error": ["Error transfering missing data."]}
)
)
raise
async def collect_produced_files(self):
"""Collect files produced by the worker.
Keep only files that are referenced in the data model.
Log error re-raise exception on failure.
:raises: RuntimeError on failure.
"""
try:
logger.debug("Collecting files")
await collect_files(self.listener_communicator)
logger.debug("Collected files")
return True
except RuntimeError:
with suppress(Exception):
await self.listener_communicator.send_command(
Message.command(
"process_log",
{"error": ["Error collecting produced files."]},
)
)
return False
def _communicator_stopped(self, future: asyncio.Future):
"""Stop processing if necessary."""
if self._process_script_task:
logger.debug("Communicator closed, cancelling script processing.")
self._process_script_task.cancel()
async def start(self) -> int:
"""Start the main program."""
try:
return_code = 1
await self.start_processing_socket()
self.listener_communicator = await self.open_listener_connection()
try:
logger.debug("Waiting for the processing container to connect")
await asyncio.wait_for(
self.processing_container_connected.wait(),
PROCESSING_CONTAINER_TIMEOUT,
)
except asyncio.TimeoutError:
message = "Unable to connect to the processing container."
logger.critical(message)
with suppress(Exception):
await self.listener_communicator.send_command(
Message.command("process_log", {"error": [message]})
)
sys.exit(1)
logger.debug("Connected to the processing container.")
listener = ListenerProtocol(
self.listener_communicator, self.processing_communicator
)
processing = ProcessingProtocol(
self.processing_communicator, self.listener_communicator
)
try:
# Start listening for messages from the communication and the
# processing container.
listener_task = asyncio.ensure_future(listener.communicate())
processing_task = asyncio.ensure_future(processing.communicate())
listener_task.add_done_callback(self._communicator_stopped)
processing_task.add_done_callback(self._communicator_stopped)
await self.listener_communicator.send_command(
Message.command("update_status", "PR")
)
await self.transfer_missing_data()
script = await listener.get_script()
self._process_script_task = asyncio.create_task(
processing.process_script(script)
)
return_code = await self._process_script_task
self._process_script_task = None
except RuntimeError:
logger.exception("Error processing script.")
with suppress(Exception):
await self.listener_communicator.send_command(
Message.command(
"process_log",
{"error": ["Runtime error in communication container."]},
)
)
except Exception:
logger.exception("While running communication container")
finally:
if not KEEP_DATA:
purge_secrets()
if not await self.collect_produced_files():
if return_code == 0:
return_code = 1
# Notify listener that the processing is finished.
with suppress(Exception):
await listener.finish(return_code)
listener.stop_communicate()
processing.stop_communicate()
# Wait for up to 10 seconds to close the tasks.
with suppress(asyncio.TimeoutError):
await asyncio.wait_for(
asyncio.gather(listener_task, processing_task), timeout=10
)
return return_code
def sig_term_handler(manager_task: asyncio.Task):
# type: (asyncio.Future) -> None
"""Gracefully terminate the running process."""
logger.debug("SIG_INT received, shutting down.")
manager_task.cancel()
async def start_communication_container():
# type: () -> None
"""Start the processing manager and set SIGINT handler."""
manager = Manager()
manager_task = asyncio.create_task(manager.start())
asyncio.get_event_loop().add_signal_handler(
signal.SIGINT, functools.partial(sig_term_handler, manager_task)
)
# Wait for the manager task to finish.
return await manager_task
if __name__ == "__main__":
sys.exit(asyncio.run(start_communication_container()))
|
en
| 0.852417
|
Communication container startup script. # Socket used to connect with the processing container. # Listener IP and port are read from environment. Purge the content of the secrets directory. The directory itself is mounted as volume, so it can not be deleted. Handle permission errors while removing data directories. Listener protocol. Initialization. Get the script from the listener. Send finish command. Handle terminate command. Processing protocol. Initialization. Proxy command to the listener. Send the script to the processing container. This method can be very long running as it waits for the return code the processing container. :returns: return code of the process running the script. Terminate the processing container. Main class. Communicate with the listener and with the processing container. Initialization. Handle incoming connection from the processing container. Python process starts a single connection while Resolwe runtime utils starts a new connection for every request. # Notify the processing container that the connection is ready. Start listening on the processing file socket. The socket is used by the processing container to communicatite with us. Connect to the listener service. We are using data id as identity. This implies only one process per data object at any given point in time can be running. Transfer missing data. Log error re-raise exception on failure. :raises: RuntimeError on failure. Collect files produced by the worker. Keep only files that are referenced in the data model. Log error re-raise exception on failure. :raises: RuntimeError on failure. Stop processing if necessary. Start the main program. # Start listening for messages from the communication and the # processing container. # Notify listener that the processing is finished. # Wait for up to 10 seconds to close the tasks. # type: (asyncio.Future) -> None Gracefully terminate the running process. # type: () -> None Start the processing manager and set SIGINT handler. # Wait for the manager task to finish.
| 2.25125
| 2
|
slack_sdk/oauth/installation_store/async_cacheable_installation_store.py
|
timgates42/python-slack-sdk
| 0
|
6626070
|
from logging import Logger
from typing import Optional, Dict
from slack_sdk.oauth.installation_store import Bot, Installation
from slack_sdk.oauth.installation_store.async_installation_store import (
AsyncInstallationStore,
)
class AsyncCacheableInstallationStore(AsyncInstallationStore):
underlying: AsyncInstallationStore
cached_bots: Dict[str, Bot]
def __init__(self, installation_store: AsyncInstallationStore):
"""A simple memory cache wrapper for any installation stores.
:param installation_store: the installation store to wrap
"""
self.underlying = installation_store
self.cached_bots = {}
@property
def logger(self) -> Logger:
return self.underlying.logger
async def async_save(self, installation: Installation):
return await self.underlying.async_save(installation)
async def async_find_bot(
self, *, enterprise_id: Optional[str], team_id: Optional[str]
) -> Optional[Bot]:
key = f"{enterprise_id}-{team_id}"
if key in self.cached_bots:
return self.cached_bots[key]
bot = await self.underlying.async_find_bot(
enterprise_id=enterprise_id, team_id=team_id
)
if bot:
self.cached_bots[key] = bot
return bot
|
from logging import Logger
from typing import Optional, Dict
from slack_sdk.oauth.installation_store import Bot, Installation
from slack_sdk.oauth.installation_store.async_installation_store import (
AsyncInstallationStore,
)
class AsyncCacheableInstallationStore(AsyncInstallationStore):
underlying: AsyncInstallationStore
cached_bots: Dict[str, Bot]
def __init__(self, installation_store: AsyncInstallationStore):
"""A simple memory cache wrapper for any installation stores.
:param installation_store: the installation store to wrap
"""
self.underlying = installation_store
self.cached_bots = {}
@property
def logger(self) -> Logger:
return self.underlying.logger
async def async_save(self, installation: Installation):
return await self.underlying.async_save(installation)
async def async_find_bot(
self, *, enterprise_id: Optional[str], team_id: Optional[str]
) -> Optional[Bot]:
key = f"{enterprise_id}-{team_id}"
if key in self.cached_bots:
return self.cached_bots[key]
bot = await self.underlying.async_find_bot(
enterprise_id=enterprise_id, team_id=team_id
)
if bot:
self.cached_bots[key] = bot
return bot
|
en
| 0.470335
|
A simple memory cache wrapper for any installation stores. :param installation_store: the installation store to wrap
| 2.226038
| 2
|
lesson-07/pwd_strength_v3.0.py
|
hemiaoio/pylearning
| 1
|
6626071
|
"""
3.0:存储密码到文件
"""
def check_number_exists(str):
isnumeric = False
for c in str:
if c.isnumeric():
isnumeric = True
break
return isnumeric
def check_letter_exists(str):
isalpha = False
for c in str:
if c.isalpha():
isalpha = True
break
return isalpha
def main():
try_times = 5
while try_times > 0:
password_str = input("请输入密码:")
strength_level = 0
# 规则一:密码长度大于8
if len(password_str) >= 8:
strength_level += 1
else:
print('密码长度要求至少8位!')
try_times -= 1
# 规则二:包含数字
if check_number_exists(password_str):
strength_level += 1
else:
print('密码中必须包含数字!')
try_times -= 1
# 规则三:包含字母
if check_letter_exists(password_str):
strength_level += 1
else:
print('密码中必须包含字母!')
try_times -= 1
f = open('password.txt', 'a')
f.write('密码:{} 强度:{}\n'.format(password_str, strength_level))
f.close()
if(strength_level == 3):
print('恭喜,您的密码符合规范!')
break
if(try_times <= 0):
print('尝试次数过多!')
if __name__ == '__main__':
main()
|
"""
3.0:存储密码到文件
"""
def check_number_exists(str):
isnumeric = False
for c in str:
if c.isnumeric():
isnumeric = True
break
return isnumeric
def check_letter_exists(str):
isalpha = False
for c in str:
if c.isalpha():
isalpha = True
break
return isalpha
def main():
try_times = 5
while try_times > 0:
password_str = input("请输入密码:")
strength_level = 0
# 规则一:密码长度大于8
if len(password_str) >= 8:
strength_level += 1
else:
print('密码长度要求至少8位!')
try_times -= 1
# 规则二:包含数字
if check_number_exists(password_str):
strength_level += 1
else:
print('密码中必须包含数字!')
try_times -= 1
# 规则三:包含字母
if check_letter_exists(password_str):
strength_level += 1
else:
print('密码中必须包含字母!')
try_times -= 1
f = open('password.txt', 'a')
f.write('密码:{} 强度:{}\n'.format(password_str, strength_level))
f.close()
if(strength_level == 3):
print('恭喜,您的密码符合规范!')
break
if(try_times <= 0):
print('尝试次数过多!')
if __name__ == '__main__':
main()
|
zh
| 0.931002
|
3.0:存储密码到文件 # 规则一:密码长度大于8 # 规则二:包含数字 # 规则三:包含字母
| 3.906351
| 4
|
venv/Lib/site-packages/jupyter_client/tests/test_public_api.py
|
ajayiagbebaku/NFL-Model
| 7
|
6626072
|
<gh_stars>1-10
"""Test the jupyter_client public API
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import jupyter_client
from jupyter_client import connect
from jupyter_client import launcher
def test_kms():
for base in ("", "Async", "Multi"):
KM = base + "KernelManager"
assert KM in dir(jupyter_client)
def test_kcs():
for base in ("", "Blocking", "Async"):
KM = base + "KernelClient"
assert KM in dir(jupyter_client)
def test_launcher():
for name in launcher.__all__:
assert name in dir(jupyter_client)
def test_connect():
for name in connect.__all__:
assert name in dir(jupyter_client)
|
"""Test the jupyter_client public API
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import jupyter_client
from jupyter_client import connect
from jupyter_client import launcher
def test_kms():
for base in ("", "Async", "Multi"):
KM = base + "KernelManager"
assert KM in dir(jupyter_client)
def test_kcs():
for base in ("", "Blocking", "Async"):
KM = base + "KernelClient"
assert KM in dir(jupyter_client)
def test_launcher():
for name in launcher.__all__:
assert name in dir(jupyter_client)
def test_connect():
for name in connect.__all__:
assert name in dir(jupyter_client)
|
en
| 0.800516
|
Test the jupyter_client public API # Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License.
| 2.336477
| 2
|
setup.py
|
DocWinter/THSF2017
| 0
|
6626073
|
from setuptools import setup
setup(
name = 'THSF Radio',
version = '1.0.0',
author = '<NAME>',
author_email = '<EMAIL>',
description = ("A simple webapp to quickly send RDS data"
"to the RPI."),
license = "BSD",
packages = ['flask', 'flask-sqlalchemy', 'flask-wtf']
)
|
from setuptools import setup
setup(
name = 'THSF Radio',
version = '1.0.0',
author = '<NAME>',
author_email = '<EMAIL>',
description = ("A simple webapp to quickly send RDS data"
"to the RPI."),
license = "BSD",
packages = ['flask', 'flask-sqlalchemy', 'flask-wtf']
)
|
none
| 1
| 1.066096
| 1
|
|
tests/python/pants_test/backend/jvm/tasks/jvm_compile/test_jvm_compile.py
|
anthonyjpratti/pants
| 0
|
6626074
|
<reponame>anthonyjpratti/pants<gh_stars>0
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from pants.backend.jvm.subsystems.zinc import Zinc
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_compile import BaseZincCompile
from pants.backend.jvm.tasks.nailgun_task import NailgunTaskBase
from pants.base.build_environment import get_buildroot
from pants.testutil.jvm.nailgun_task_test_base import NailgunTaskTestBase
class DummyJvmCompile(JvmCompile):
pass
class JvmCompileTest(NailgunTaskTestBase):
DEFAULT_CONF = 'default'
@classmethod
def task_type(cls):
return DummyJvmCompile
def test_if_runtime_classpath_exists(self):
target = self.make_target(
'java/classpath:java_lib',
target_type=JavaLibrary,
sources=['com/foo/Bar.java'],
)
context = self.context(target_roots=[target])
compile_classpath = context.products.get_data('compile_classpath', ClasspathProducts.init_func(self.pants_workdir))
compile_entry = os.path.join(self.pants_workdir, 'compile-entry')
pre_init_runtime_entry = os.path.join(self.pants_workdir, 'pre-inited-runtime-entry')
compile_classpath.add_for_targets([target], [('default', compile_entry)])
runtime_classpath = context.products.get_data('runtime_classpath', ClasspathProducts.init_func(self.pants_workdir))
runtime_classpath.add_for_targets([target], [('default', pre_init_runtime_entry)])
task = self.create_task(context)
resulting_classpath = task.create_runtime_classpath()
self.assertEqual([('default', pre_init_runtime_entry), ('default', compile_entry)],
resulting_classpath.get_for_target(target))
class BaseZincCompileJDKTest(NailgunTaskTestBase):
DEFAULT_CONF = 'default'
old_cwd = os.getcwd()
@classmethod
def task_type(cls):
return BaseZincCompile
def setUp(self):
os.chdir(get_buildroot())
super().setUp()
def tearDown(self):
os.chdir(self.old_cwd)
super().tearDown()
def test_subprocess_compile_jdk_being_symlink(self):
context = self.context(target_roots=[])
zinc = Zinc.Factory.global_instance().create(
context.products, NailgunTaskBase.ExecutionStrategy.subprocess
)
self.assertTrue(os.path.islink(zinc.dist.home))
def test_hermetic_jdk_being_underlying_dist(self):
context = self.context(target_roots=[])
zinc = Zinc.Factory.global_instance().create(
context.products, NailgunTaskBase.ExecutionStrategy.hermetic
)
self.assertFalse(
os.path.islink(zinc.dist.home),
f"Expected {zinc.dist.home} to not be a link, it was."
)
|
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from pants.backend.jvm.subsystems.zinc import Zinc
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.jvm.tasks.jvm_compile.jvm_compile import JvmCompile
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_compile import BaseZincCompile
from pants.backend.jvm.tasks.nailgun_task import NailgunTaskBase
from pants.base.build_environment import get_buildroot
from pants.testutil.jvm.nailgun_task_test_base import NailgunTaskTestBase
class DummyJvmCompile(JvmCompile):
pass
class JvmCompileTest(NailgunTaskTestBase):
DEFAULT_CONF = 'default'
@classmethod
def task_type(cls):
return DummyJvmCompile
def test_if_runtime_classpath_exists(self):
target = self.make_target(
'java/classpath:java_lib',
target_type=JavaLibrary,
sources=['com/foo/Bar.java'],
)
context = self.context(target_roots=[target])
compile_classpath = context.products.get_data('compile_classpath', ClasspathProducts.init_func(self.pants_workdir))
compile_entry = os.path.join(self.pants_workdir, 'compile-entry')
pre_init_runtime_entry = os.path.join(self.pants_workdir, 'pre-inited-runtime-entry')
compile_classpath.add_for_targets([target], [('default', compile_entry)])
runtime_classpath = context.products.get_data('runtime_classpath', ClasspathProducts.init_func(self.pants_workdir))
runtime_classpath.add_for_targets([target], [('default', pre_init_runtime_entry)])
task = self.create_task(context)
resulting_classpath = task.create_runtime_classpath()
self.assertEqual([('default', pre_init_runtime_entry), ('default', compile_entry)],
resulting_classpath.get_for_target(target))
class BaseZincCompileJDKTest(NailgunTaskTestBase):
DEFAULT_CONF = 'default'
old_cwd = os.getcwd()
@classmethod
def task_type(cls):
return BaseZincCompile
def setUp(self):
os.chdir(get_buildroot())
super().setUp()
def tearDown(self):
os.chdir(self.old_cwd)
super().tearDown()
def test_subprocess_compile_jdk_being_symlink(self):
context = self.context(target_roots=[])
zinc = Zinc.Factory.global_instance().create(
context.products, NailgunTaskBase.ExecutionStrategy.subprocess
)
self.assertTrue(os.path.islink(zinc.dist.home))
def test_hermetic_jdk_being_underlying_dist(self):
context = self.context(target_roots=[])
zinc = Zinc.Factory.global_instance().create(
context.products, NailgunTaskBase.ExecutionStrategy.hermetic
)
self.assertFalse(
os.path.islink(zinc.dist.home),
f"Expected {zinc.dist.home} to not be a link, it was."
)
|
en
| 0.528453
|
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE).
| 1.875303
| 2
|
PycharmProjects/PythonExercicios/ex047.py
|
RodrigoMASRamos/Projects.py
| 0
|
6626075
|
<gh_stars>0
# Exercício Python #047 - Contagem de pares
#
# Crie um programa que mostre na tela TODOS OS NÚMEROS PARES que estão no intervalo entre 1 e 50.
for num in range(0, 51, 2):
print(num,end=' ')
print('\033[1;30mFoi?\033[m')
|
# Exercício Python #047 - Contagem de pares
#
# Crie um programa que mostre na tela TODOS OS NÚMEROS PARES que estão no intervalo entre 1 e 50.
for num in range(0, 51, 2):
print(num,end=' ')
print('\033[1;30mFoi?\033[m')
|
pt
| 0.822812
|
# Exercício Python #047 - Contagem de pares # # Crie um programa que mostre na tela TODOS OS NÚMEROS PARES que estão no intervalo entre 1 e 50.
| 3.861029
| 4
|
colorList.py
|
handsomeguyu/get_image_color
| 0
|
6626076
|
<filename>colorList.py
import numpy as np
import collections
# 定义字典存放颜色分量上下限
# 例如:{颜色 [min分量, max分量]}
# {'red' [array([160, 43, 46]), array([179, 255, 255])]}
def getColorList():
dict = collections.defaultdict(list)
# 黑色
lower_black = np.array([0, 0, 0])
upper_black = np.array([180, 255, 46])
color_list = []
color_list.append(lower_black)
color_list.append(upper_black)
dict['black'] = color_list
# #灰色
# lower_gray = np.array([0, 0, 46])
# upper_gray = np.array([180, 43, 220])
# color_list = []
# color_list.append(lower_gray)
# color_list.append(upper_gray)
# dict['gray']=color_list
# 白色
lower_white = np.array([0, 0, 221])
upper_white = np.array([180, 30, 255])
color_list = []
color_list.append(lower_white)
color_list.append(upper_white)
dict['white'] = color_list
# 红色
lower_red = np.array([156, 43, 46])
upper_red = np.array([180, 255, 255])
color_list = []
color_list.append(lower_red)
color_list.append(upper_red)
dict['red'] = color_list
# 红色2
lower_red = np.array([0, 43, 46])
upper_red = np.array([10, 255, 255])
color_list = []
color_list.append(lower_red)
color_list.append(upper_red)
dict['red2'] = color_list
# 橙色
lower_orange = np.array([11, 43, 46])
upper_orange = np.array([25, 255, 255])
color_list = []
color_list.append(lower_orange)
color_list.append(upper_orange)
dict['orange'] = color_list
# 黄色
lower_yellow = np.array([26, 43, 46])
upper_yellow = np.array([34, 255, 255])
color_list = []
color_list.append(lower_yellow)
color_list.append(upper_yellow)
dict['yellow'] = color_list
# 绿色
lower_green = np.array([35, 43, 46])
upper_green = np.array([77, 255, 255])
color_list = []
color_list.append(lower_green)
color_list.append(upper_green)
dict['green'] = color_list
# 青色
lower_cyan = np.array([78, 43, 46])
upper_cyan = np.array([99, 255, 255])
color_list = []
color_list.append(lower_cyan)
color_list.append(upper_cyan)
dict['cyan'] = color_list
# 蓝色
lower_blue = np.array([100, 43, 46])
upper_blue = np.array([124, 255, 255])
color_list = []
color_list.append(lower_blue)
color_list.append(upper_blue)
dict['blue'] = color_list
# 紫色
lower_purple = np.array([125, 43, 46])
upper_purple = np.array([155, 255, 255])
color_list = []
color_list.append(lower_purple)
color_list.append(upper_purple)
dict['purple'] = color_list
return dict
if __name__ == '__main__':
color_dict = getColorList()
print(color_dict)
num = len(color_dict)
print('num=', num)
for d in color_dict:
print('key=', d)
print('value=', color_dict[d][1])
|
<filename>colorList.py
import numpy as np
import collections
# 定义字典存放颜色分量上下限
# 例如:{颜色 [min分量, max分量]}
# {'red' [array([160, 43, 46]), array([179, 255, 255])]}
def getColorList():
dict = collections.defaultdict(list)
# 黑色
lower_black = np.array([0, 0, 0])
upper_black = np.array([180, 255, 46])
color_list = []
color_list.append(lower_black)
color_list.append(upper_black)
dict['black'] = color_list
# #灰色
# lower_gray = np.array([0, 0, 46])
# upper_gray = np.array([180, 43, 220])
# color_list = []
# color_list.append(lower_gray)
# color_list.append(upper_gray)
# dict['gray']=color_list
# 白色
lower_white = np.array([0, 0, 221])
upper_white = np.array([180, 30, 255])
color_list = []
color_list.append(lower_white)
color_list.append(upper_white)
dict['white'] = color_list
# 红色
lower_red = np.array([156, 43, 46])
upper_red = np.array([180, 255, 255])
color_list = []
color_list.append(lower_red)
color_list.append(upper_red)
dict['red'] = color_list
# 红色2
lower_red = np.array([0, 43, 46])
upper_red = np.array([10, 255, 255])
color_list = []
color_list.append(lower_red)
color_list.append(upper_red)
dict['red2'] = color_list
# 橙色
lower_orange = np.array([11, 43, 46])
upper_orange = np.array([25, 255, 255])
color_list = []
color_list.append(lower_orange)
color_list.append(upper_orange)
dict['orange'] = color_list
# 黄色
lower_yellow = np.array([26, 43, 46])
upper_yellow = np.array([34, 255, 255])
color_list = []
color_list.append(lower_yellow)
color_list.append(upper_yellow)
dict['yellow'] = color_list
# 绿色
lower_green = np.array([35, 43, 46])
upper_green = np.array([77, 255, 255])
color_list = []
color_list.append(lower_green)
color_list.append(upper_green)
dict['green'] = color_list
# 青色
lower_cyan = np.array([78, 43, 46])
upper_cyan = np.array([99, 255, 255])
color_list = []
color_list.append(lower_cyan)
color_list.append(upper_cyan)
dict['cyan'] = color_list
# 蓝色
lower_blue = np.array([100, 43, 46])
upper_blue = np.array([124, 255, 255])
color_list = []
color_list.append(lower_blue)
color_list.append(upper_blue)
dict['blue'] = color_list
# 紫色
lower_purple = np.array([125, 43, 46])
upper_purple = np.array([155, 255, 255])
color_list = []
color_list.append(lower_purple)
color_list.append(upper_purple)
dict['purple'] = color_list
return dict
if __name__ == '__main__':
color_dict = getColorList()
print(color_dict)
num = len(color_dict)
print('num=', num)
for d in color_dict:
print('key=', d)
print('value=', color_dict[d][1])
|
zh
| 0.642157
|
# 定义字典存放颜色分量上下限 # 例如:{颜色 [min分量, max分量]} # {'red' [array([160, 43, 46]), array([179, 255, 255])]} # 黑色 # #灰色 # lower_gray = np.array([0, 0, 46]) # upper_gray = np.array([180, 43, 220]) # color_list = [] # color_list.append(lower_gray) # color_list.append(upper_gray) # dict['gray']=color_list # 白色 # 红色 # 红色2 # 橙色 # 黄色 # 绿色 # 青色 # 蓝色 # 紫色
| 3.315881
| 3
|
setup.py
|
hackingmaterials/flows
| 2
|
6626077
|
<filename>setup.py
from pathlib import Path
from setuptools import find_packages, setup
module_dir = Path(__file__).resolve().parent
with open(module_dir / "README.md") as f:
long_desc = f.read()
setup(
name="jobflow",
description="jobflow is library for writing computational workflows",
long_description=long_desc,
use_scm_version={"version_scheme": "python-simplified-semver"},
setup_requires=["setuptools_scm"],
long_description_content_type="text/markdown",
url="https://materialsproject.github.io/jobflow",
author="<NAME>",
author_email="<EMAIL>",
license="modified BSD",
packages=find_packages("src"),
package_dir={"": "src"},
package_data={"jobflow": ["py.typed"]},
zip_safe=False,
include_package_data=True,
install_requires=[
"setuptools",
"monty>=2021.5.9",
"pydash",
"networkx",
"maggma>=0.38.1",
"pydantic",
"PyYAML",
],
extras_require={
"docs": [
"sphinx==4.3.2",
"furo==2022.1.2",
"m2r2==0.3.2",
"ipython==7.31.0",
"nbsphinx==0.8.8",
"nbsphinx-link==1.3.0",
"FireWorks==1.9.8",
"autodoc_pydantic==1.6.0",
],
"tests": [
"pytest==6.2.5",
"pytest-cov==3.0.0",
"FireWorks==1.9.8",
"matplotlib==3.5.1",
"pydot==1.4.2",
],
"dev": ["pre-commit>=2.12.1"],
"vis": ["matplotlib", "pydot"],
"fireworks": ["fireworks"],
},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Intended Audience :: Information Technology",
"Operating System :: OS Independent",
"Topic :: Other/Nonlisted Topic",
"Topic :: Database :: Front-Ends",
"Topic :: Scientific/Engineering",
],
python_requires=">=3.7",
tests_require=["pytest"],
)
|
<filename>setup.py
from pathlib import Path
from setuptools import find_packages, setup
module_dir = Path(__file__).resolve().parent
with open(module_dir / "README.md") as f:
long_desc = f.read()
setup(
name="jobflow",
description="jobflow is library for writing computational workflows",
long_description=long_desc,
use_scm_version={"version_scheme": "python-simplified-semver"},
setup_requires=["setuptools_scm"],
long_description_content_type="text/markdown",
url="https://materialsproject.github.io/jobflow",
author="<NAME>",
author_email="<EMAIL>",
license="modified BSD",
packages=find_packages("src"),
package_dir={"": "src"},
package_data={"jobflow": ["py.typed"]},
zip_safe=False,
include_package_data=True,
install_requires=[
"setuptools",
"monty>=2021.5.9",
"pydash",
"networkx",
"maggma>=0.38.1",
"pydantic",
"PyYAML",
],
extras_require={
"docs": [
"sphinx==4.3.2",
"furo==2022.1.2",
"m2r2==0.3.2",
"ipython==7.31.0",
"nbsphinx==0.8.8",
"nbsphinx-link==1.3.0",
"FireWorks==1.9.8",
"autodoc_pydantic==1.6.0",
],
"tests": [
"pytest==6.2.5",
"pytest-cov==3.0.0",
"FireWorks==1.9.8",
"matplotlib==3.5.1",
"pydot==1.4.2",
],
"dev": ["pre-commit>=2.12.1"],
"vis": ["matplotlib", "pydot"],
"fireworks": ["fireworks"],
},
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Intended Audience :: Information Technology",
"Operating System :: OS Independent",
"Topic :: Other/Nonlisted Topic",
"Topic :: Database :: Front-Ends",
"Topic :: Scientific/Engineering",
],
python_requires=">=3.7",
tests_require=["pytest"],
)
|
none
| 1
| 1.422599
| 1
|
|
apps/shortener_app/migrations/0010_auto_20190123_1908.py
|
escrichov/shortener
| 6
|
6626078
|
# Generated by Django 2.1.5 on 2019-01-23 19:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shortener_app', '0009_auto_20190123_1903'),
]
operations = [
migrations.AlterField(
model_name='shorturl',
name='url_active_last_checked',
field=models.DateTimeField(blank=True, null=True),
),
]
|
# Generated by Django 2.1.5 on 2019-01-23 19:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shortener_app', '0009_auto_20190123_1903'),
]
operations = [
migrations.AlterField(
model_name='shorturl',
name='url_active_last_checked',
field=models.DateTimeField(blank=True, null=True),
),
]
|
en
| 0.769091
|
# Generated by Django 2.1.5 on 2019-01-23 19:08
| 1.438601
| 1
|
sardine/resolvers/manifest/base.py
|
JavierLuna/sardine
| 0
|
6626079
|
from abc import ABCMeta, abstractmethod
from typing import Optional, Dict
from sardine.lang.manifest.builder import StackManifestBuilder
from sardine.lang.parser.objects import StackDeclaration
from sardine.lang.parser.parser import Parser
from sardine.lang.tokenizer.tokenizer import Tokenizer
from sardine.types import STACK_DECLARATIONS_TYPE
_cached_manifest: Optional[Dict[str, StackDeclaration]] = None
class BaseManifestResolver(metaclass=ABCMeta):
@classmethod
@abstractmethod
def manifest_exists(cls) -> bool:
raise NotImplementedError
@classmethod
def load_manifest(cls) -> STACK_DECLARATIONS_TYPE:
global _cached_manifest
if _cached_manifest is None:
unparsed_manifest = cls._load_manifest()
tokenized_manifest = Tokenizer.tokenize(unparsed_manifest)
parsed_declarations = Parser(tokenized_manifest).parse()
stack_declarations = StackManifestBuilder.build(parsed_declarations)
_cached_manifest = stack_declarations
return _cached_manifest
@classmethod
@abstractmethod
def create_directories(cls) -> None:
raise NotImplementedError
@classmethod
@abstractmethod
def _load_manifest(cls) -> str:
raise NotImplementedError
@classmethod
@abstractmethod
def _manifest_path(cls) -> str:
raise NotImplementedError
|
from abc import ABCMeta, abstractmethod
from typing import Optional, Dict
from sardine.lang.manifest.builder import StackManifestBuilder
from sardine.lang.parser.objects import StackDeclaration
from sardine.lang.parser.parser import Parser
from sardine.lang.tokenizer.tokenizer import Tokenizer
from sardine.types import STACK_DECLARATIONS_TYPE
_cached_manifest: Optional[Dict[str, StackDeclaration]] = None
class BaseManifestResolver(metaclass=ABCMeta):
@classmethod
@abstractmethod
def manifest_exists(cls) -> bool:
raise NotImplementedError
@classmethod
def load_manifest(cls) -> STACK_DECLARATIONS_TYPE:
global _cached_manifest
if _cached_manifest is None:
unparsed_manifest = cls._load_manifest()
tokenized_manifest = Tokenizer.tokenize(unparsed_manifest)
parsed_declarations = Parser(tokenized_manifest).parse()
stack_declarations = StackManifestBuilder.build(parsed_declarations)
_cached_manifest = stack_declarations
return _cached_manifest
@classmethod
@abstractmethod
def create_directories(cls) -> None:
raise NotImplementedError
@classmethod
@abstractmethod
def _load_manifest(cls) -> str:
raise NotImplementedError
@classmethod
@abstractmethod
def _manifest_path(cls) -> str:
raise NotImplementedError
|
none
| 1
| 2.187362
| 2
|
|
tensorflow_federated/python/core/utils/test.py
|
federated-learning-experiments/federated
| 0
|
6626080
|
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General purpose test utilities for TFF."""
from absl import logging
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl import executor_stacks
from tensorflow_federated.python.core.impl.wrappers import set_default_executor
tf.compat.v1.enable_v2_behavior() # Required to create a local executor.
def tf1_and_tf2(fn):
"""A decorator for creating test parameterized by TF computation decorators.
Args:
fn: A test function to be decorated. It must accept two arguments: self (a
`TestCase`), and tf_computation (either a `tff.tf_computation` or
`tff.tf2_computation`).
Returns:
A decorated function, which executes `fn` using both decorators.
"""
def wrapped_fn(self):
logging.info('Testing under tff.tf2_computation')
fn(self, computations.tf2_computation)
logging.info('Testing under tff.tf_computation')
fn(self, computations.tf_computation)
return wrapped_fn
def tf1(fn):
"""A decorator for testing the `tff.tf_computation` decorator."""
def wrapped_fn(self):
fn(self, computations.tf_computation)
return wrapped_fn
def tf2(fn):
"""A decorator for testing the `tff.tf2_computation` decorator."""
def wrapped_fn(self):
fn(self, computations.tf2_computation)
return wrapped_fn
def executors(*args):
"""A decorator for creating tests parameterized by executors.
NOTE: To use this decorator your test is required to inherit from
`parameterized.TestCase`.
1. The decorator can be specified without arguments:
```
@executors
def foo(self):
...
```
2. The decorator can be called with arguments:
```
@executors(
('label', executor),
...
)
def foo(self):
...
```
If the decorator is specified without arguments or is called with no
arguments, the default this decorator with parameterize the test by the
following executors:
* reference executor
* local executor
If the decorator is called with arguments the arguments must be in a form that
is accpeted by `parameterized.named_parameters`.
Args:
*args: Either a test function to be decorated or named executors for the
decorated method, either a single iterable, or a list of tuples or dicts.
Returns:
A test generator to be handled by `parameterized.TestGeneratorMetaclass`.
"""
def executor_decorator(fn):
def wrapped_fn(self, executor):
set_default_executor.set_default_executor(executor)
fn(self)
return wrapped_fn
def decorator(fn, named_executors=None):
if not named_executors:
named_executors = [
('reference', None),
('local', executor_stacks.create_local_executor()),
]
named_parameters_decorator = parameterized.named_parameters(named_executors)
fn = executor_decorator(fn)
fn = named_parameters_decorator(fn)
return fn
if len(args) == 1 and callable(args[0]):
return decorator(args[0])
else:
return lambda x: decorator(x, *args)
|
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General purpose test utilities for TFF."""
from absl import logging
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl import executor_stacks
from tensorflow_federated.python.core.impl.wrappers import set_default_executor
tf.compat.v1.enable_v2_behavior() # Required to create a local executor.
def tf1_and_tf2(fn):
"""A decorator for creating test parameterized by TF computation decorators.
Args:
fn: A test function to be decorated. It must accept two arguments: self (a
`TestCase`), and tf_computation (either a `tff.tf_computation` or
`tff.tf2_computation`).
Returns:
A decorated function, which executes `fn` using both decorators.
"""
def wrapped_fn(self):
logging.info('Testing under tff.tf2_computation')
fn(self, computations.tf2_computation)
logging.info('Testing under tff.tf_computation')
fn(self, computations.tf_computation)
return wrapped_fn
def tf1(fn):
"""A decorator for testing the `tff.tf_computation` decorator."""
def wrapped_fn(self):
fn(self, computations.tf_computation)
return wrapped_fn
def tf2(fn):
"""A decorator for testing the `tff.tf2_computation` decorator."""
def wrapped_fn(self):
fn(self, computations.tf2_computation)
return wrapped_fn
def executors(*args):
"""A decorator for creating tests parameterized by executors.
NOTE: To use this decorator your test is required to inherit from
`parameterized.TestCase`.
1. The decorator can be specified without arguments:
```
@executors
def foo(self):
...
```
2. The decorator can be called with arguments:
```
@executors(
('label', executor),
...
)
def foo(self):
...
```
If the decorator is specified without arguments or is called with no
arguments, the default this decorator with parameterize the test by the
following executors:
* reference executor
* local executor
If the decorator is called with arguments the arguments must be in a form that
is accpeted by `parameterized.named_parameters`.
Args:
*args: Either a test function to be decorated or named executors for the
decorated method, either a single iterable, or a list of tuples or dicts.
Returns:
A test generator to be handled by `parameterized.TestGeneratorMetaclass`.
"""
def executor_decorator(fn):
def wrapped_fn(self, executor):
set_default_executor.set_default_executor(executor)
fn(self)
return wrapped_fn
def decorator(fn, named_executors=None):
if not named_executors:
named_executors = [
('reference', None),
('local', executor_stacks.create_local_executor()),
]
named_parameters_decorator = parameterized.named_parameters(named_executors)
fn = executor_decorator(fn)
fn = named_parameters_decorator(fn)
return fn
if len(args) == 1 and callable(args[0]):
return decorator(args[0])
else:
return lambda x: decorator(x, *args)
|
en
| 0.765746
|
# Lint as: python3 # Copyright 2019, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. General purpose test utilities for TFF. # Required to create a local executor. A decorator for creating test parameterized by TF computation decorators. Args: fn: A test function to be decorated. It must accept two arguments: self (a `TestCase`), and tf_computation (either a `tff.tf_computation` or `tff.tf2_computation`). Returns: A decorated function, which executes `fn` using both decorators. A decorator for testing the `tff.tf_computation` decorator. A decorator for testing the `tff.tf2_computation` decorator. A decorator for creating tests parameterized by executors. NOTE: To use this decorator your test is required to inherit from `parameterized.TestCase`. 1. The decorator can be specified without arguments: ``` @executors def foo(self): ... ``` 2. The decorator can be called with arguments: ``` @executors( ('label', executor), ... ) def foo(self): ... ``` If the decorator is specified without arguments or is called with no arguments, the default this decorator with parameterize the test by the following executors: * reference executor * local executor If the decorator is called with arguments the arguments must be in a form that is accpeted by `parameterized.named_parameters`. Args: *args: Either a test function to be decorated or named executors for the decorated method, either a single iterable, or a list of tuples or dicts. Returns: A test generator to be handled by `parameterized.TestGeneratorMetaclass`.
| 2.375416
| 2
|
fluent_contents/plugins/googledocsviewer/models.py
|
vinnyrose/django-fluent-contents
| 0
|
6626081
|
<filename>fluent_contents/plugins/googledocsviewer/models.py
from future.utils import python_2_unicode_compatible
from django.db import models
from django.utils.translation import ugettext_lazy as _
from fluent_contents.models import ContentItem
from fluent_contents.utils import validate_html_size
@python_2_unicode_compatible
class GoogleDocsViewerItem(ContentItem):
"""
A Google Docs viewer that is displayed at the page.
"""
url = models.URLField(_("File URL"), help_text=_("Specify the URL of an online document, for example a PDF or DOCX file."))
width = models.CharField(_("Width"), max_length=10, validators=[validate_html_size], default="100%", help_text=_("Specify the size in pixels, or a percentage of the container area size."))
height = models.CharField(_("Height"), max_length=10, validators=[validate_html_size], default="600", help_text=_("Specify the size in pixels."))
class Meta:
verbose_name = _("Embedded document")
verbose_name_plural = _("Embedded document")
def __str__(self):
return self.url
|
<filename>fluent_contents/plugins/googledocsviewer/models.py
from future.utils import python_2_unicode_compatible
from django.db import models
from django.utils.translation import ugettext_lazy as _
from fluent_contents.models import ContentItem
from fluent_contents.utils import validate_html_size
@python_2_unicode_compatible
class GoogleDocsViewerItem(ContentItem):
"""
A Google Docs viewer that is displayed at the page.
"""
url = models.URLField(_("File URL"), help_text=_("Specify the URL of an online document, for example a PDF or DOCX file."))
width = models.CharField(_("Width"), max_length=10, validators=[validate_html_size], default="100%", help_text=_("Specify the size in pixels, or a percentage of the container area size."))
height = models.CharField(_("Height"), max_length=10, validators=[validate_html_size], default="600", help_text=_("Specify the size in pixels."))
class Meta:
verbose_name = _("Embedded document")
verbose_name_plural = _("Embedded document")
def __str__(self):
return self.url
|
en
| 0.767079
|
A Google Docs viewer that is displayed at the page.
| 2.144134
| 2
|
model/roi_module.py
|
mahavird/vehicle_detection
| 274
|
6626082
|
from collections import namedtuple
from string import Template
import cupy, torch
import cupy as cp
import torch as t
from torch.autograd import Function
from model.utils.roi_cupy import kernel_backward, kernel_forward
Stream = namedtuple('Stream', ['ptr'])
@cupy.util.memoize(for_each_device=True)
def load_kernel(kernel_name, code, **kwargs):
cp.cuda.runtime.free(0)
code = Template(code).substitute(**kwargs)
kernel_code = cupy.cuda.compile_with_cache(code)
return kernel_code.get_function(kernel_name)
CUDA_NUM_THREADS = 1024
def GET_BLOCKS(N, K=CUDA_NUM_THREADS):
return (N + K - 1) // K
class RoI(Function):
"""
NOTE:only CUDA-compatible
"""
def __init__(self, outh, outw, spatial_scale):
self.forward_fn = load_kernel('roi_forward', kernel_forward)
self.backward_fn = load_kernel('roi_backward', kernel_backward)
self.outh, self.outw, self.spatial_scale = outh, outw, spatial_scale
def forward(self, x, rois):
# NOTE: MAKE SURE input is contiguous too
x = x.contiguous()
rois = rois.contiguous()
self.in_size = B, C, H, W = x.size()
self.N = N = rois.size(0)
output = t.zeros(N, C, self.outh, self.outw).cuda()
self.argmax_data = t.zeros(N, C, self.outh, self.outw).int().cuda()
self.rois = rois
args = [x.data_ptr(), rois.data_ptr(),
output.data_ptr(),
self.argmax_data.data_ptr(),
self.spatial_scale, C, H, W,
self.outh, self.outw,
output.numel()]
stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
self.forward_fn(args=args,
block=(CUDA_NUM_THREADS, 1, 1),
grid=(GET_BLOCKS(output.numel()), 1, 1),
stream=stream)
return output
def backward(self, grad_output):
##NOTE: IMPORTANT CONTIGUOUS
# TODO: input
grad_output = grad_output.contiguous()
B, C, H, W = self.in_size
grad_input = t.zeros(self.in_size).cuda()
stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
args = [grad_output.data_ptr(),
self.argmax_data.data_ptr(),
self.rois.data_ptr(),
grad_input.data_ptr(),
self.N, self.spatial_scale, C, H, W, self.outh, self.outw,
grad_input.numel()]
self.backward_fn(args=args,
block=(CUDA_NUM_THREADS, 1, 1),
grid=(GET_BLOCKS(grad_input.numel()), 1, 1),
stream=stream
)
return grad_input, None
class RoIPooling2D(t.nn.Module):
def __init__(self, outh, outw, spatial_scale):
super(RoIPooling2D, self).__init__()
self.RoI = RoI(outh, outw, spatial_scale)
def forward(self, x, rois):
return self.RoI(x, rois)
def test_roi_module():
## fake data###
B, N, C, H, W, PH, PW = 2, 8, 4, 32, 32, 7, 7
bottom_data = t.randn(B, C, H, W).cuda()
bottom_rois = t.randn(N, 5)
bottom_rois[:int(N / 2), 0] = 0
bottom_rois[int(N / 2):, 0] = 1
bottom_rois[:, 1:] = (t.rand(N, 4) * 100).float()
bottom_rois = bottom_rois.cuda()
spatial_scale = 1. / 16
outh, outw = PH, PW
# pytorch version
module = RoIPooling2D(outh, outw, spatial_scale)
x = t.autograd.Variable(bottom_data, requires_grad=True)
rois = t.autograd.Variable(bottom_rois)
output = module(x, rois)
output.sum().backward()
def t2c(variable):
npa = variable.data.cpu().numpy()
return cp.array(npa)
def test_eq(variable, array, info):
cc = cp.asnumpy(array)
neq = (cc != variable.data.cpu().numpy())
assert neq.sum() == 0, 'test failed: %s' % info
# chainer version,if you're going to run this
# pip install chainer
import chainer.functions as F
from chainer import Variable
x_cn = Variable(t2c(x))
o_cn = F.roi_pooling_2d(x_cn, t2c(rois), outh, outw, spatial_scale)
test_eq(output, o_cn.array, 'forward')
F.sum(o_cn).backward()
test_eq(x.grad, x_cn.grad, 'backward')
print('test pass')
|
from collections import namedtuple
from string import Template
import cupy, torch
import cupy as cp
import torch as t
from torch.autograd import Function
from model.utils.roi_cupy import kernel_backward, kernel_forward
Stream = namedtuple('Stream', ['ptr'])
@cupy.util.memoize(for_each_device=True)
def load_kernel(kernel_name, code, **kwargs):
cp.cuda.runtime.free(0)
code = Template(code).substitute(**kwargs)
kernel_code = cupy.cuda.compile_with_cache(code)
return kernel_code.get_function(kernel_name)
CUDA_NUM_THREADS = 1024
def GET_BLOCKS(N, K=CUDA_NUM_THREADS):
return (N + K - 1) // K
class RoI(Function):
"""
NOTE:only CUDA-compatible
"""
def __init__(self, outh, outw, spatial_scale):
self.forward_fn = load_kernel('roi_forward', kernel_forward)
self.backward_fn = load_kernel('roi_backward', kernel_backward)
self.outh, self.outw, self.spatial_scale = outh, outw, spatial_scale
def forward(self, x, rois):
# NOTE: MAKE SURE input is contiguous too
x = x.contiguous()
rois = rois.contiguous()
self.in_size = B, C, H, W = x.size()
self.N = N = rois.size(0)
output = t.zeros(N, C, self.outh, self.outw).cuda()
self.argmax_data = t.zeros(N, C, self.outh, self.outw).int().cuda()
self.rois = rois
args = [x.data_ptr(), rois.data_ptr(),
output.data_ptr(),
self.argmax_data.data_ptr(),
self.spatial_scale, C, H, W,
self.outh, self.outw,
output.numel()]
stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
self.forward_fn(args=args,
block=(CUDA_NUM_THREADS, 1, 1),
grid=(GET_BLOCKS(output.numel()), 1, 1),
stream=stream)
return output
def backward(self, grad_output):
##NOTE: IMPORTANT CONTIGUOUS
# TODO: input
grad_output = grad_output.contiguous()
B, C, H, W = self.in_size
grad_input = t.zeros(self.in_size).cuda()
stream = Stream(ptr=torch.cuda.current_stream().cuda_stream)
args = [grad_output.data_ptr(),
self.argmax_data.data_ptr(),
self.rois.data_ptr(),
grad_input.data_ptr(),
self.N, self.spatial_scale, C, H, W, self.outh, self.outw,
grad_input.numel()]
self.backward_fn(args=args,
block=(CUDA_NUM_THREADS, 1, 1),
grid=(GET_BLOCKS(grad_input.numel()), 1, 1),
stream=stream
)
return grad_input, None
class RoIPooling2D(t.nn.Module):
def __init__(self, outh, outw, spatial_scale):
super(RoIPooling2D, self).__init__()
self.RoI = RoI(outh, outw, spatial_scale)
def forward(self, x, rois):
return self.RoI(x, rois)
def test_roi_module():
## fake data###
B, N, C, H, W, PH, PW = 2, 8, 4, 32, 32, 7, 7
bottom_data = t.randn(B, C, H, W).cuda()
bottom_rois = t.randn(N, 5)
bottom_rois[:int(N / 2), 0] = 0
bottom_rois[int(N / 2):, 0] = 1
bottom_rois[:, 1:] = (t.rand(N, 4) * 100).float()
bottom_rois = bottom_rois.cuda()
spatial_scale = 1. / 16
outh, outw = PH, PW
# pytorch version
module = RoIPooling2D(outh, outw, spatial_scale)
x = t.autograd.Variable(bottom_data, requires_grad=True)
rois = t.autograd.Variable(bottom_rois)
output = module(x, rois)
output.sum().backward()
def t2c(variable):
npa = variable.data.cpu().numpy()
return cp.array(npa)
def test_eq(variable, array, info):
cc = cp.asnumpy(array)
neq = (cc != variable.data.cpu().numpy())
assert neq.sum() == 0, 'test failed: %s' % info
# chainer version,if you're going to run this
# pip install chainer
import chainer.functions as F
from chainer import Variable
x_cn = Variable(t2c(x))
o_cn = F.roi_pooling_2d(x_cn, t2c(rois), outh, outw, spatial_scale)
test_eq(output, o_cn.array, 'forward')
F.sum(o_cn).backward()
test_eq(x.grad, x_cn.grad, 'backward')
print('test pass')
|
en
| 0.477735
|
NOTE:only CUDA-compatible # NOTE: MAKE SURE input is contiguous too ##NOTE: IMPORTANT CONTIGUOUS # TODO: input ## fake data### # pytorch version # chainer version,if you're going to run this # pip install chainer
| 2.482296
| 2
|
EFT_Auto_Examine.py
|
xvorost/Escape-from-Tarkov-Auto-Examine
| 1
|
6626083
|
<filename>EFT_Auto_Examine.py
import os,random,string
randtitle=str(''.join(random.choice('_'+string.hexdigits) for i in range(random.randint(12,28))))
os.system('title '+randtitle)
import keyboard,sys,threading
from loguru import logger as log
from functools import partialmethod
from pyautogui import center, click, moveTo, press, locateOnScreen, click, press, scroll, move
from tinyWinToast.tinyWinToast import Toast, Button
from time import sleep as s
def output(level,type,color,mytext):
log.level(type, no=level, color=color)
log.__class__.type = partialmethod(log.__class__.log, type)
log.remove()
log.add(sys.stdout, format="<level>{level}</level> | <white>{time:HH:mm}</white> <level>|</level><light-white>| {message}</light-white>", level=type)
log.type(mytext)
output(1, 'Info','<light-yellow>','Developed by Avnsx on GitHub: https://github.com/Avnsx/Escape-from-Tarkov-Auto-Examine\n')
output(None, 'Info','<light-yellow>','Starting... make sure a EFT Traders showcase is visible on your screen!')
output(None, 'Info','<light-yellow>','You can press and hold Key F2 at any time to end.\n')
s(random.uniform(3,5))
def error_exit(reason):
output(2, 'Error','<light-red>', reason)
s(15)
os._exit(0)
def threaded():
threading.Timer(1, threaded).start()
if keyboard.is_pressed('f2') == True:
toast = Toast()
toast.setAppID(randtitle)
toast.addButton(Button(content='Visit on GitHub', activationType='protocol', arguments='https://github.com/Avnsx/Escape-from-Tarkov-Auto-Examine', pendingUpdate=False))
toast.setIcon(src=os.path.dirname(os.path.abspath(__file__))+'\\img_samples\\icon.ico', crop='round') #filepath fix
toast.setDuration('short')
toast.setTitle('Please leave Feedback!')
toast.setMessage('Liked it? Have suggestions for improvements? Or maybe would like to support by giving a Star?')
toast.show()
os._exit(0)
else:return False
loopn=0
runtime=0
try:
def examine_item():
for image in ['img_samples/blackgrey.png','img_samples/questionmark.png']:
imagecords=locateOnScreen(image, confidence=0.9) # edit here; if issues with image detection 1
if imagecords != None:
click(imagecords, button='middle')
move(random.uniform(180,220), random.uniform(-180,-220), random.uniform(.23,.65)) # avoids overlapping
s(random.uniform(1,2)) # avoids examine errors & timeouts
output(None, 'Info','<light-green>','examined item')
return True
else:return False
while True:
if loopn == 0:
threaded()
loopn+=1
if examine_item() == False:
showcase=locateOnScreen('img_samples/showcase.png', confidence=0.7) # edit here; if issues with image detection 2
if showcase != None:
scroll_length=random.randint(10,11) # edit here; if too much or too less scrolling
for __ in range(scroll_length):
barcoords=center(showcase)
moveTo(barcoords[0]+random.uniform(150,190), barcoords[1]+random.uniform(30,40))
for __ in range(7):scroll(-10)
examine_item()
runtime+=1
output(None, 'Info','<light-magenta>','scroll number: '+str(runtime))
if runtime == scroll_length:
press('f5')
output(None, 'Info','<light-cyan>','refreshed trader\n')
for image in ['img_samples/1.png','img_samples/topall.png']:
try:
click(center(locateOnScreen(image, confidence=0.7))) # edit here; if issues with image detection 3
s(random.uniform(2,3)) # tabs load up time
except Exception as e:
print(e)
error_exit('ERROR: '+image+' not found. Read the additional knowledge paragraph on the GitHub repo.\n')
runtime=0
elif showcase == None:error_exit('ERROR: showcase.png not found. Read the additional knowledge paragraph on the GitHub repo.\n')
except Exception as e: #not proud on exception handling
if 'Pillow' in str(e):
os.system('pip install pillow --upgrade')
error_exit('Dependency ERROR: Pillow was missing; attempted installing Pillow. Please re-launch!')
if 'OpenCV' in str(e):
os.system('pip install opencv-python')
error_exit('Dependency ERROR: OpenCV was missing; attempted installing OpenCV. Please re-launch!')
else:
print(e)
error_exit('Unexpected ERROR: If you do not know what this was caused by open an Issue ticket in the GitHub repository.')
|
<filename>EFT_Auto_Examine.py
import os,random,string
randtitle=str(''.join(random.choice('_'+string.hexdigits) for i in range(random.randint(12,28))))
os.system('title '+randtitle)
import keyboard,sys,threading
from loguru import logger as log
from functools import partialmethod
from pyautogui import center, click, moveTo, press, locateOnScreen, click, press, scroll, move
from tinyWinToast.tinyWinToast import Toast, Button
from time import sleep as s
def output(level,type,color,mytext):
log.level(type, no=level, color=color)
log.__class__.type = partialmethod(log.__class__.log, type)
log.remove()
log.add(sys.stdout, format="<level>{level}</level> | <white>{time:HH:mm}</white> <level>|</level><light-white>| {message}</light-white>", level=type)
log.type(mytext)
output(1, 'Info','<light-yellow>','Developed by Avnsx on GitHub: https://github.com/Avnsx/Escape-from-Tarkov-Auto-Examine\n')
output(None, 'Info','<light-yellow>','Starting... make sure a EFT Traders showcase is visible on your screen!')
output(None, 'Info','<light-yellow>','You can press and hold Key F2 at any time to end.\n')
s(random.uniform(3,5))
def error_exit(reason):
output(2, 'Error','<light-red>', reason)
s(15)
os._exit(0)
def threaded():
threading.Timer(1, threaded).start()
if keyboard.is_pressed('f2') == True:
toast = Toast()
toast.setAppID(randtitle)
toast.addButton(Button(content='Visit on GitHub', activationType='protocol', arguments='https://github.com/Avnsx/Escape-from-Tarkov-Auto-Examine', pendingUpdate=False))
toast.setIcon(src=os.path.dirname(os.path.abspath(__file__))+'\\img_samples\\icon.ico', crop='round') #filepath fix
toast.setDuration('short')
toast.setTitle('Please leave Feedback!')
toast.setMessage('Liked it? Have suggestions for improvements? Or maybe would like to support by giving a Star?')
toast.show()
os._exit(0)
else:return False
loopn=0
runtime=0
try:
def examine_item():
for image in ['img_samples/blackgrey.png','img_samples/questionmark.png']:
imagecords=locateOnScreen(image, confidence=0.9) # edit here; if issues with image detection 1
if imagecords != None:
click(imagecords, button='middle')
move(random.uniform(180,220), random.uniform(-180,-220), random.uniform(.23,.65)) # avoids overlapping
s(random.uniform(1,2)) # avoids examine errors & timeouts
output(None, 'Info','<light-green>','examined item')
return True
else:return False
while True:
if loopn == 0:
threaded()
loopn+=1
if examine_item() == False:
showcase=locateOnScreen('img_samples/showcase.png', confidence=0.7) # edit here; if issues with image detection 2
if showcase != None:
scroll_length=random.randint(10,11) # edit here; if too much or too less scrolling
for __ in range(scroll_length):
barcoords=center(showcase)
moveTo(barcoords[0]+random.uniform(150,190), barcoords[1]+random.uniform(30,40))
for __ in range(7):scroll(-10)
examine_item()
runtime+=1
output(None, 'Info','<light-magenta>','scroll number: '+str(runtime))
if runtime == scroll_length:
press('f5')
output(None, 'Info','<light-cyan>','refreshed trader\n')
for image in ['img_samples/1.png','img_samples/topall.png']:
try:
click(center(locateOnScreen(image, confidence=0.7))) # edit here; if issues with image detection 3
s(random.uniform(2,3)) # tabs load up time
except Exception as e:
print(e)
error_exit('ERROR: '+image+' not found. Read the additional knowledge paragraph on the GitHub repo.\n')
runtime=0
elif showcase == None:error_exit('ERROR: showcase.png not found. Read the additional knowledge paragraph on the GitHub repo.\n')
except Exception as e: #not proud on exception handling
if 'Pillow' in str(e):
os.system('pip install pillow --upgrade')
error_exit('Dependency ERROR: Pillow was missing; attempted installing Pillow. Please re-launch!')
if 'OpenCV' in str(e):
os.system('pip install opencv-python')
error_exit('Dependency ERROR: OpenCV was missing; attempted installing OpenCV. Please re-launch!')
else:
print(e)
error_exit('Unexpected ERROR: If you do not know what this was caused by open an Issue ticket in the GitHub repository.')
|
en
| 0.786741
|
#filepath fix # edit here; if issues with image detection 1 # avoids overlapping # avoids examine errors & timeouts # edit here; if issues with image detection 2 # edit here; if too much or too less scrolling # edit here; if issues with image detection 3 # tabs load up time #not proud on exception handling
| 2.47931
| 2
|
constants.py
|
tatarinovst2/2021-2-level-ctlr
| 0
|
6626084
|
<reponame>tatarinovst2/2021-2-level-ctlr<filename>constants.py
"""
Useful constant variables
"""
from pathlib import Path
PROJECT_ROOT = Path(__file__).parent
ASSETS_PATH = PROJECT_ROOT / 'tmp' / 'articles'
CRAWLER_CONFIG_PATH = PROJECT_ROOT / 'scrapper_config.json'
ROOT_URL = "http://www.vestnik.unn.ru/"
RUSSIAN_ROOT_URL = ROOT_URL + 'ru'
DOMAIN_URL = RUSSIAN_ROOT_URL + "/nomera"
|
"""
Useful constant variables
"""
from pathlib import Path
PROJECT_ROOT = Path(__file__).parent
ASSETS_PATH = PROJECT_ROOT / 'tmp' / 'articles'
CRAWLER_CONFIG_PATH = PROJECT_ROOT / 'scrapper_config.json'
ROOT_URL = "http://www.vestnik.unn.ru/"
RUSSIAN_ROOT_URL = ROOT_URL + 'ru'
DOMAIN_URL = RUSSIAN_ROOT_URL + "/nomera"
|
en
| 0.553361
|
Useful constant variables
| 1.823529
| 2
|
commentary/admin.py
|
mangadventure/django-user-comments
| 0
|
6626085
|
<filename>commentary/admin.py
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _, ngettext
from commentary import get_model
from commentary.views.moderation import (
perform_flag, perform_approve, perform_delete
)
USERNAME_FIELD = 'user__' + get_user_model().USERNAME_FIELD
class CommentsAdmin(admin.ModelAdmin):
list_display = (
'user', 'content_type', 'object_pk', 'parent',
'submit_date', 'is_public', 'is_removed'
)
list_filter = (
'submit_date', 'user', 'content_type',
'is_public', 'is_removed'
)
date_hierarchy = 'submit_date'
ordering = ('-submit_date',)
raw_id_fields = ('user', 'parent')
search_fields = ('body', USERNAME_FIELD)
actions = ('flag_comments', 'approve_comments', 'remove_comments')
def get_actions(self, request):
actions = super(CommentsAdmin, self).get_actions(request)
# Only superusers should be able to delete the comments from the DB.
if not request.user.is_superuser:
actions.pop('delete_selected', None)
if not request.user.has_perm('commentary.can_moderate'):
actions.pop('approve_comments', None)
actions.pop('remove_comments', None)
return actions
def flag_comments(self, request, queryset):
self._bulk_flag(
request, queryset, perform_flag,
lambda n: ngettext('flagged', 'flagged', n)
)
flag_comments.short_description = _('Flag selected comments')
def approve_comments(self, request, queryset):
self._bulk_flag(
request, queryset, perform_approve,
lambda n: ngettext('approved', 'approved', n)
)
approve_comments.short_description = _('Approve selected comments')
def remove_comments(self, request, queryset):
self._bulk_flag(
request, queryset, perform_delete,
lambda n: ngettext('removed', 'removed', n)
)
remove_comments.short_description = _('Remove selected comments')
def _bulk_flag(self, request, queryset, action, done_message):
"""
Flag, approve, or remove some comments from an admin action.
Actually calls the `action` argument to perform the heavy lifting.
"""
n_comments = 0
for comment in queryset:
action(request, comment)
n_comments += 1
msg = ngettext(
'%(count)s comment was successfully %(action)s.',
'%(count)s comments were successfully %(action)s.', n_comments
)
self.message_user(request, msg % {
'count': n_comments, 'action': done_message(n_comments)
})
# Only register the default admin if the model is the built-in comment model
# (this won't be true if there's a custom comment app).
Klass = get_model()
if Klass._meta.app_label == 'commentary':
admin.site.register(Klass, CommentsAdmin)
|
<filename>commentary/admin.py
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _, ngettext
from commentary import get_model
from commentary.views.moderation import (
perform_flag, perform_approve, perform_delete
)
USERNAME_FIELD = 'user__' + get_user_model().USERNAME_FIELD
class CommentsAdmin(admin.ModelAdmin):
list_display = (
'user', 'content_type', 'object_pk', 'parent',
'submit_date', 'is_public', 'is_removed'
)
list_filter = (
'submit_date', 'user', 'content_type',
'is_public', 'is_removed'
)
date_hierarchy = 'submit_date'
ordering = ('-submit_date',)
raw_id_fields = ('user', 'parent')
search_fields = ('body', USERNAME_FIELD)
actions = ('flag_comments', 'approve_comments', 'remove_comments')
def get_actions(self, request):
actions = super(CommentsAdmin, self).get_actions(request)
# Only superusers should be able to delete the comments from the DB.
if not request.user.is_superuser:
actions.pop('delete_selected', None)
if not request.user.has_perm('commentary.can_moderate'):
actions.pop('approve_comments', None)
actions.pop('remove_comments', None)
return actions
def flag_comments(self, request, queryset):
self._bulk_flag(
request, queryset, perform_flag,
lambda n: ngettext('flagged', 'flagged', n)
)
flag_comments.short_description = _('Flag selected comments')
def approve_comments(self, request, queryset):
self._bulk_flag(
request, queryset, perform_approve,
lambda n: ngettext('approved', 'approved', n)
)
approve_comments.short_description = _('Approve selected comments')
def remove_comments(self, request, queryset):
self._bulk_flag(
request, queryset, perform_delete,
lambda n: ngettext('removed', 'removed', n)
)
remove_comments.short_description = _('Remove selected comments')
def _bulk_flag(self, request, queryset, action, done_message):
"""
Flag, approve, or remove some comments from an admin action.
Actually calls the `action` argument to perform the heavy lifting.
"""
n_comments = 0
for comment in queryset:
action(request, comment)
n_comments += 1
msg = ngettext(
'%(count)s comment was successfully %(action)s.',
'%(count)s comments were successfully %(action)s.', n_comments
)
self.message_user(request, msg % {
'count': n_comments, 'action': done_message(n_comments)
})
# Only register the default admin if the model is the built-in comment model
# (this won't be true if there's a custom comment app).
Klass = get_model()
if Klass._meta.app_label == 'commentary':
admin.site.register(Klass, CommentsAdmin)
|
en
| 0.875428
|
# Only superusers should be able to delete the comments from the DB. Flag, approve, or remove some comments from an admin action. Actually calls the `action` argument to perform the heavy lifting. # Only register the default admin if the model is the built-in comment model # (this won't be true if there's a custom comment app).
| 2.057128
| 2
|
pic/model/register.py
|
hankyul2/pytorch-image-classification
| 0
|
6626086
|
import sys
from inspect import signature, _empty
from typing import Iterable
_name_to_model = {}
_argument_of_model = []
def register_model(fn):
# 1. load config dict
module = sys.modules[fn.__module__]
config = getattr(module, 'model_config', None)
if config is None:
raise ValueError(f"please define model_config dictionary for {fn.__name__}")
for model_name, model_config in config.items():
if model_name in _name_to_model:
raise ValueError(f"please change {model_name} to another name, it already exists in model_list")
# 2. parse for argument parser
parse_for_argparser(fn, model_config, model_name)
# 3. parse for create_model
parse_for_creator(fn, model_config, model_name)
return fn
def parse_for_creator(fn, model_config, model_name):
model_parameter = dict({k: v.default for k, v in signature(fn).parameters.items()})
parameter, etc = model_config['parameter'], model_config['etc']
for k, v in parameter.items():
if k not in model_parameter:
raise ValueError(f"{k} does not appear in {fn.__name__}, please update signature of model")
model_parameter[k] = v
assert _empty not in model_parameter, f"some required argument in {fn.__name__} does not in config"
_name_to_model[model_name] = (fn, model_parameter, etc)
def parse_for_argparser(fn, model_config, model_name):
model_parameter = dict({k: (v.default, v.annotation) for k, v in signature(fn).parameters.items()})
parameter = model_config['parameter']
argument_list = []
for name, (default, val_type) in model_parameter.items():
if val_type in [int, float, str]:
parse_option = (val_type, None)
elif type in [Iterable[int], Iterable[float], Iterable[str]]:
index = [Iterable[int], Iterable[float], Iterable[str]].index(val_type)
val_type = [int, float, str][index]
parse_option = (val_type, '+')
# Todo: support for boolean type
else:
parse_option = None
if parse_option:
default = parameter.get(name, default)
argument_list.append((name, default,)+parse_option)
_argument_of_model.append((model_name, argument_list))
def get_argument_of_model():
return _argument_of_model
def create_model(model_name, **kwargs):
# 1. load model and config
creator, parameter, etc = _name_to_model.get(model_name, (None, None))
if creator is None:
raise ValueError(f"{model_name} is not found in list of models")
# 2. update parameter by kwargs only if it appears in model config
parameter = dict({k:kwargs.get(k, v) for k, v in parameter.items()})
# Todo: Add pretrain handler
return creator(**parameter)
|
import sys
from inspect import signature, _empty
from typing import Iterable
_name_to_model = {}
_argument_of_model = []
def register_model(fn):
# 1. load config dict
module = sys.modules[fn.__module__]
config = getattr(module, 'model_config', None)
if config is None:
raise ValueError(f"please define model_config dictionary for {fn.__name__}")
for model_name, model_config in config.items():
if model_name in _name_to_model:
raise ValueError(f"please change {model_name} to another name, it already exists in model_list")
# 2. parse for argument parser
parse_for_argparser(fn, model_config, model_name)
# 3. parse for create_model
parse_for_creator(fn, model_config, model_name)
return fn
def parse_for_creator(fn, model_config, model_name):
model_parameter = dict({k: v.default for k, v in signature(fn).parameters.items()})
parameter, etc = model_config['parameter'], model_config['etc']
for k, v in parameter.items():
if k not in model_parameter:
raise ValueError(f"{k} does not appear in {fn.__name__}, please update signature of model")
model_parameter[k] = v
assert _empty not in model_parameter, f"some required argument in {fn.__name__} does not in config"
_name_to_model[model_name] = (fn, model_parameter, etc)
def parse_for_argparser(fn, model_config, model_name):
model_parameter = dict({k: (v.default, v.annotation) for k, v in signature(fn).parameters.items()})
parameter = model_config['parameter']
argument_list = []
for name, (default, val_type) in model_parameter.items():
if val_type in [int, float, str]:
parse_option = (val_type, None)
elif type in [Iterable[int], Iterable[float], Iterable[str]]:
index = [Iterable[int], Iterable[float], Iterable[str]].index(val_type)
val_type = [int, float, str][index]
parse_option = (val_type, '+')
# Todo: support for boolean type
else:
parse_option = None
if parse_option:
default = parameter.get(name, default)
argument_list.append((name, default,)+parse_option)
_argument_of_model.append((model_name, argument_list))
def get_argument_of_model():
return _argument_of_model
def create_model(model_name, **kwargs):
# 1. load model and config
creator, parameter, etc = _name_to_model.get(model_name, (None, None))
if creator is None:
raise ValueError(f"{model_name} is not found in list of models")
# 2. update parameter by kwargs only if it appears in model config
parameter = dict({k:kwargs.get(k, v) for k, v in parameter.items()})
# Todo: Add pretrain handler
return creator(**parameter)
|
en
| 0.341263
|
# 1. load config dict # 2. parse for argument parser # 3. parse for create_model # Todo: support for boolean type # 1. load model and config # 2. update parameter by kwargs only if it appears in model config # Todo: Add pretrain handler
| 2.735039
| 3
|
atlssncli/issuehandler.py
|
bkryza/atlssncli
| 1
|
6626087
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import click
import logging as LOG
import json
from humanfriendly.tables import format_pretty_table, format_robust_table
from requests.auth import HTTPBasicAuth
from .config import Config
from .commandhandler import CommandHandler
from .querybuilder import QueryBuilder
from .rest.jiraclient import JiraClient
from . import util
class IssueHandler(CommandHandler):
def __init__(self, config):
super(IssueHandler, self).__init__(config)
self.client = JiraClient(config.get_endpoint("jira"))
self.client._set_auth(HTTPBasicAuth(*config.get_auth()))
def get_issue_types(self, project_id=None):
"""Show issue types."""
if not project_id:
project_id = self.config.get_project()
if not project_id:
LOG.error("Cannot list issue types without project_id")
raise "Cannot list issue types without project_id"
LOG.debug("Getting issue types for project: %s", project_id)
res = self.client.get_issue_types(project_id)
self._render_issue_types(res)
def get_issue(self, issue_id):
"""Show issue."""
LOG.debug("Getting issue: %s", issue_id)
res = self.client.get_issue(issue_id)
self._render_issues([res])
def assign_issue(self, issue_id, assignee):
"""Assign issue."""
LOG.debug("Assigning issue: %s", issue_id, assignee)
self.client.assign_issue(issue_id, {"name": assignee})
def _render_issue_types(self, issuetypes):
"""Render issue types."""
column_names = ["ID", "Name", "Description"]
values = []
for it in issuetypes:
values.append([str(it["id"]), it["name"], it["description"]])
click.echo(format_pretty_table(values, column_names))
def _render_issues(self, issues):
"""Render sprint issues."""
column_names = [
"ID",
"Key",
"Summary",
"Status",
"Assignee",
"Progress",
]
values = []
for issue in issues:
values.append(
[
str(issue["id"]),
str(issue["key"]),
str(util.get(issue, "-", "fields", "summary")),
str(util.get(issue, "-", "fields", "status", "name")),
str(
util.get(
issue, "Unassigned", "fields", "assignee", "key"
)
),
str(util.get(issue, "-", "fields", "progress", "progress")),
]
)
if len(values) > 1:
click.echo(format_pretty_table(values, column_names))
else:
click.echo(format_robust_table(values, column_names))
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import click
import logging as LOG
import json
from humanfriendly.tables import format_pretty_table, format_robust_table
from requests.auth import HTTPBasicAuth
from .config import Config
from .commandhandler import CommandHandler
from .querybuilder import QueryBuilder
from .rest.jiraclient import JiraClient
from . import util
class IssueHandler(CommandHandler):
def __init__(self, config):
super(IssueHandler, self).__init__(config)
self.client = JiraClient(config.get_endpoint("jira"))
self.client._set_auth(HTTPBasicAuth(*config.get_auth()))
def get_issue_types(self, project_id=None):
"""Show issue types."""
if not project_id:
project_id = self.config.get_project()
if not project_id:
LOG.error("Cannot list issue types without project_id")
raise "Cannot list issue types without project_id"
LOG.debug("Getting issue types for project: %s", project_id)
res = self.client.get_issue_types(project_id)
self._render_issue_types(res)
def get_issue(self, issue_id):
"""Show issue."""
LOG.debug("Getting issue: %s", issue_id)
res = self.client.get_issue(issue_id)
self._render_issues([res])
def assign_issue(self, issue_id, assignee):
"""Assign issue."""
LOG.debug("Assigning issue: %s", issue_id, assignee)
self.client.assign_issue(issue_id, {"name": assignee})
def _render_issue_types(self, issuetypes):
"""Render issue types."""
column_names = ["ID", "Name", "Description"]
values = []
for it in issuetypes:
values.append([str(it["id"]), it["name"], it["description"]])
click.echo(format_pretty_table(values, column_names))
def _render_issues(self, issues):
"""Render sprint issues."""
column_names = [
"ID",
"Key",
"Summary",
"Status",
"Assignee",
"Progress",
]
values = []
for issue in issues:
values.append(
[
str(issue["id"]),
str(issue["key"]),
str(util.get(issue, "-", "fields", "summary")),
str(util.get(issue, "-", "fields", "status", "name")),
str(
util.get(
issue, "Unassigned", "fields", "assignee", "key"
)
),
str(util.get(issue, "-", "fields", "progress", "progress")),
]
)
if len(values) > 1:
click.echo(format_pretty_table(values, column_names))
else:
click.echo(format_robust_table(values, column_names))
|
en
| 0.841704
|
# -*- coding: utf-8 -*- # # Copyright 2019 <NAME> <<EMAIL>> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Show issue types. Show issue. Assign issue. Render issue types. Render sprint issues.
| 2.153785
| 2
|
Data Processing.py
|
biof309/group-project-ajk
| 0
|
6626088
|
<gh_stars>0
# import packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# import excel file and create dataframe called NCHS
NCHS = pd.read_csv('NCHS_-_Leading_Causes_of_Death__United_States.csv')
# examine the data
NCHS.info()
'''
Select the column names of interest.
Group this new dataset by year and by cause name to create the x-axis and different types in each bar in bar plot
Sum this new group to create the y-axis in bar plot
'''
year = NCHS[['Cause Name', 'Year', 'Deaths']]
year_group = year.groupby(['Year','Cause Name'])
year_group_total = year_group.sum()
"""
print(year_group_total)
print(year_group_total.unstack()) -> Print this code to see what the individual statistics that is being summarized in graph
"""
"""
Create a bar graph of the group of deaths due to cause names in each year
"""
barplot = year_group_total.unstack().plot(kind='bar',stacked=True,title='Number of Deaths by Cause Name each year (1999-2016)',figsize=(9,7))
barplot.set_xlabel('Year')
barplot.set_ylabel('Deaths')
barplot.set_ylim((0,10000000))
barplot.legend(["All causes","Alzheimer's disease","CLRD","Cancer", "Diabetes","Heart disease", "Influenza and pneumonia", "Kidney disease", "Stroke", "Suicide", "Unintentional injuries"])
plt.show()
|
# import packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# import excel file and create dataframe called NCHS
NCHS = pd.read_csv('NCHS_-_Leading_Causes_of_Death__United_States.csv')
# examine the data
NCHS.info()
'''
Select the column names of interest.
Group this new dataset by year and by cause name to create the x-axis and different types in each bar in bar plot
Sum this new group to create the y-axis in bar plot
'''
year = NCHS[['Cause Name', 'Year', 'Deaths']]
year_group = year.groupby(['Year','Cause Name'])
year_group_total = year_group.sum()
"""
print(year_group_total)
print(year_group_total.unstack()) -> Print this code to see what the individual statistics that is being summarized in graph
"""
"""
Create a bar graph of the group of deaths due to cause names in each year
"""
barplot = year_group_total.unstack().plot(kind='bar',stacked=True,title='Number of Deaths by Cause Name each year (1999-2016)',figsize=(9,7))
barplot.set_xlabel('Year')
barplot.set_ylabel('Deaths')
barplot.set_ylim((0,10000000))
barplot.legend(["All causes","Alzheimer's disease","CLRD","Cancer", "Diabetes","Heart disease", "Influenza and pneumonia", "Kidney disease", "Stroke", "Suicide", "Unintentional injuries"])
plt.show()
|
en
| 0.844558
|
# import packages # import excel file and create dataframe called NCHS # examine the data Select the column names of interest. Group this new dataset by year and by cause name to create the x-axis and different types in each bar in bar plot Sum this new group to create the y-axis in bar plot print(year_group_total) print(year_group_total.unstack()) -> Print this code to see what the individual statistics that is being summarized in graph Create a bar graph of the group of deaths due to cause names in each year
| 3.887026
| 4
|
VAE_fit.py
|
BrianNingUT/ArbFreeIV-VAE
| 2
|
6626089
|
<filename>VAE_fit.py
import torch
from torch import nn
from torch.nn import functional as F
import random
from typing import List
from torch import Tensor
import numpy as np
class VAE(nn.Module):
"""
The main VAE class used to fit the VAE model, class parameters include:
beta: The beta value used in the beta VAE, constant across epochs
latent_dim: The number of latent dimensions (z)
in_channels: The dimension of input parameters (x)
norm_mean: normalized inputs mean, used in decoding only during evaluation mode
norm_std: normalized inputs standard deviation, used in decoding only during evaluation mode
"""
def __init__(self,
in_channels: int,
latent_dim: int,
beta: float = 1.0,
hidden_dims: List = None,
norm_mean = None,
norm_std = None,
) -> None:
super().__init__()
self.beta = beta
self.latent_dim = latent_dim
self.in_channels = in_channels
self.norm_mean = norm_mean
self.norm_std = norm_std
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Linear(in_channels, h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1], latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1], latent_dim)
# Build Decoder
modules = []
hidden_dims.reverse()
self.decoder_input = nn.Linear(latent_dim, hidden_dims[0])
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.Linear(hidden_dims[i], hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Linear(hidden_dims[-1], self.in_channels)
def encode(self, input) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent values
"""
self.encoder = self.encoder.float()
result = self.encoder(input)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z) -> Tensor:
"""
Maps the given latent values onto the parameter space, if in eval mode renormalize
"""
result = self.decoder_input(z)
result = self.decoder(result)
result = self.final_layer(result)
# Un-normalize if decoding in eval mode
if not self.training:
if self.norm_mean is None or self.norm_std is None:
raise Exception('Did not set norm constants before eval')
else:
result = self.norm_std * result + self.norm_mean
return result
def reparameterize(self, mu, logvar) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
recons,
input,
mu,
log_var,
) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
"""
recons_loss = F.mse_loss(recons, input)*self.in_channels
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
loss = recons_loss + self.beta * kld_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}
def sample(self, num_samples) -> Tensor:
"""
Samples from the latent space and return the corresponding parameter map
"""
z = torch.randn(num_samples, self.latent_dim)
samples = self.decode(z)
return samples
def generate(self, z) -> Tensor:
"""
Given an latent code z, returns the reconstructed parameter set
"""
return self.decode(z)
def fit_VAE(full_data, epochs, latent_dim, hidden_dims, batch_size, lr, beta, weight_decay):
"""
Function used to fit a VAE based on provided parameters:
full_data: historical data used for training
epochs: number of epochs to run
latent_dim: number of latent dimensions
hidden_dims: list of hidden dimensions of encoder/decoder networks
batch_size: size of random batch to be sampled from historical data
lr: learning rate
beta: fixed beta vlue
weight_decay: Decay factor used in optimizer (AdamW)
"""
running_losses = []
losses = []
vae = VAE(full_data.size(1), latent_dim, beta, hidden_dims)
optimizer = torch.optim.AdamW(vae.parameters(), lr=lr, weight_decay = weight_decay)
for epoch in range(epochs):
batch = torch.stack(random.sample(list(full_data), min(batch_size, full_data.size(0))), axis = 0)
optimizer.zero_grad()
recon, input, mu, log_var = vae(batch)
loss = vae.loss_function(recon, input, mu, log_var)
loss['loss'].backward()
optimizer.step()
running_losses.append(loss['loss'].item())
losses.append({'loss': loss['loss'].item(), 'Reconstruction_Loss':loss['Reconstruction_Loss'].item(), 'KLD':loss['KLD'].item()})
return losses, vae
|
<filename>VAE_fit.py
import torch
from torch import nn
from torch.nn import functional as F
import random
from typing import List
from torch import Tensor
import numpy as np
class VAE(nn.Module):
"""
The main VAE class used to fit the VAE model, class parameters include:
beta: The beta value used in the beta VAE, constant across epochs
latent_dim: The number of latent dimensions (z)
in_channels: The dimension of input parameters (x)
norm_mean: normalized inputs mean, used in decoding only during evaluation mode
norm_std: normalized inputs standard deviation, used in decoding only during evaluation mode
"""
def __init__(self,
in_channels: int,
latent_dim: int,
beta: float = 1.0,
hidden_dims: List = None,
norm_mean = None,
norm_std = None,
) -> None:
super().__init__()
self.beta = beta
self.latent_dim = latent_dim
self.in_channels = in_channels
self.norm_mean = norm_mean
self.norm_std = norm_std
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Linear(in_channels, h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1], latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1], latent_dim)
# Build Decoder
modules = []
hidden_dims.reverse()
self.decoder_input = nn.Linear(latent_dim, hidden_dims[0])
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.Linear(hidden_dims[i], hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Linear(hidden_dims[-1], self.in_channels)
def encode(self, input) -> List[Tensor]:
"""
Encodes the input by passing through the encoder network
and returns the latent values
"""
self.encoder = self.encoder.float()
result = self.encoder(input)
# Split the result into mu and var components
# of the latent Gaussian distribution
mu = self.fc_mu(result)
log_var = self.fc_var(result)
return [mu, log_var]
def decode(self, z) -> Tensor:
"""
Maps the given latent values onto the parameter space, if in eval mode renormalize
"""
result = self.decoder_input(z)
result = self.decoder(result)
result = self.final_layer(result)
# Un-normalize if decoding in eval mode
if not self.training:
if self.norm_mean is None or self.norm_std is None:
raise Exception('Did not set norm constants before eval')
else:
result = self.norm_std * result + self.norm_mean
return result
def reparameterize(self, mu, logvar) -> Tensor:
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def forward(self, input, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(input)
z = self.reparameterize(mu, log_var)
return [self.decode(z), input, mu, log_var]
def loss_function(self,
recons,
input,
mu,
log_var,
) -> dict:
"""
Computes the VAE loss function.
KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2}
"""
recons_loss = F.mse_loss(recons, input)*self.in_channels
kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0)
loss = recons_loss + self.beta * kld_loss
return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':-kld_loss}
def sample(self, num_samples) -> Tensor:
"""
Samples from the latent space and return the corresponding parameter map
"""
z = torch.randn(num_samples, self.latent_dim)
samples = self.decode(z)
return samples
def generate(self, z) -> Tensor:
"""
Given an latent code z, returns the reconstructed parameter set
"""
return self.decode(z)
def fit_VAE(full_data, epochs, latent_dim, hidden_dims, batch_size, lr, beta, weight_decay):
"""
Function used to fit a VAE based on provided parameters:
full_data: historical data used for training
epochs: number of epochs to run
latent_dim: number of latent dimensions
hidden_dims: list of hidden dimensions of encoder/decoder networks
batch_size: size of random batch to be sampled from historical data
lr: learning rate
beta: fixed beta vlue
weight_decay: Decay factor used in optimizer (AdamW)
"""
running_losses = []
losses = []
vae = VAE(full_data.size(1), latent_dim, beta, hidden_dims)
optimizer = torch.optim.AdamW(vae.parameters(), lr=lr, weight_decay = weight_decay)
for epoch in range(epochs):
batch = torch.stack(random.sample(list(full_data), min(batch_size, full_data.size(0))), axis = 0)
optimizer.zero_grad()
recon, input, mu, log_var = vae(batch)
loss = vae.loss_function(recon, input, mu, log_var)
loss['loss'].backward()
optimizer.step()
running_losses.append(loss['loss'].item())
losses.append({'loss': loss['loss'].item(), 'Reconstruction_Loss':loss['Reconstruction_Loss'].item(), 'KLD':loss['KLD'].item()})
return losses, vae
|
en
| 0.576717
|
The main VAE class used to fit the VAE model, class parameters include: beta: The beta value used in the beta VAE, constant across epochs latent_dim: The number of latent dimensions (z) in_channels: The dimension of input parameters (x) norm_mean: normalized inputs mean, used in decoding only during evaluation mode norm_std: normalized inputs standard deviation, used in decoding only during evaluation mode # Build Encoder # Build Decoder Encodes the input by passing through the encoder network and returns the latent values # Split the result into mu and var components # of the latent Gaussian distribution Maps the given latent values onto the parameter space, if in eval mode renormalize # Un-normalize if decoding in eval mode Reparameterization trick to sample from N(mu, var) from N(0,1). Computes the VAE loss function. KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2} Samples from the latent space and return the corresponding parameter map Given an latent code z, returns the reconstructed parameter set Function used to fit a VAE based on provided parameters: full_data: historical data used for training epochs: number of epochs to run latent_dim: number of latent dimensions hidden_dims: list of hidden dimensions of encoder/decoder networks batch_size: size of random batch to be sampled from historical data lr: learning rate beta: fixed beta vlue weight_decay: Decay factor used in optimizer (AdamW)
| 2.910861
| 3
|
study/python/OtherTutorials/decorator_example.py
|
YoungsAppWorkshop/dev_environment_setup
| 0
|
6626090
|
# Decorators
from functools import wraps
def my_logger(orig_func):
import logging
logging.basicConfig(filename='{}.log'.format(orig_func.__name__),
level=logging.INFO)
@wraps(orig_func)
def wrapper(*args, **kwargs):
logging.info(
'Ran with args: {}, and kwargs: {}'.format(args, kwargs))
return orig_func(*args, **kwargs)
return wrapper
def my_timer(orig_func):
import time
@wraps(orig_func)
def wrapper(*args, **kwargs):
t1 = time.time()
result = orig_func(*args, **kwargs)
t2 = time.time() - t1
print('{} ran in: {} sec'.format(orig_func.__name__, t2))
return result
return wrapper
import time
@my_logger
@my_timer
def display_info(name, age):
time.sleep(1)
print('display_info ran with arguments ({}, {})'.format(name, age))
display_info('Tom', 22)
|
# Decorators
from functools import wraps
def my_logger(orig_func):
import logging
logging.basicConfig(filename='{}.log'.format(orig_func.__name__),
level=logging.INFO)
@wraps(orig_func)
def wrapper(*args, **kwargs):
logging.info(
'Ran with args: {}, and kwargs: {}'.format(args, kwargs))
return orig_func(*args, **kwargs)
return wrapper
def my_timer(orig_func):
import time
@wraps(orig_func)
def wrapper(*args, **kwargs):
t1 = time.time()
result = orig_func(*args, **kwargs)
t2 = time.time() - t1
print('{} ran in: {} sec'.format(orig_func.__name__, t2))
return result
return wrapper
import time
@my_logger
@my_timer
def display_info(name, age):
time.sleep(1)
print('display_info ran with arguments ({}, {})'.format(name, age))
display_info('Tom', 22)
|
en
| 0.649361
|
# Decorators
| 3.22953
| 3
|
Python_Aulas_3,6+/07_Formatação _de_strings.py
|
PASSINP/Python_3-Django
| 1
|
6626091
|
<reponame>PASSINP/Python_3-Django
nome = 'Paulo'
idade = 21
altura = 1.89
# Exsitem algumas manerieas de se fazer concatenação. Uma delas, que foi
# implementada na Versão 3.6 do Python, se chama f string.
# Para se fazer isso, se começa colocando f antes das aspas. Em seguida você
# coloca entre chaves a variavel que deseja utilizar {assim}
print(f' Ola {nome}, me disseram que você tem {idade} anos e {altura} metros de altura')
x = 3.14159265359
g = 8001
# Outra coisa que podemos utilizar na formatação é dimuir numeros(ou palavras)
# muito longos. Utiliza junto a f string.
print(f'{x:.2f}')
print(f'{nome:.2s}')
# Caso seja necessaria uma concatenação em uma versão mais antiga do Python da
# para se fazer este tipo de concatenação. Aviso, não sei se vale para Python 2.
# Para fazer isso iremos colocar chaves dentro do texto, e com o auxilio da função
# ajudante .format iremos definir as variaveisa serem utilizadas naquele espaço
# e sua ordem
print('{} tem {} anos e tem {} de altura'.format(nome, idade, altura))
# Outra coisa que pode ser feita caso se utilize este tipo de concatenação é
# colocar a posição do que esta dentro do .format
# E vale ressaltar que posição começa em 0 e não 1
print('{0} {2} {2} tem {1} {0} anos e tem {2} de altura'.format(nome, idade, altura))
|
nome = 'Paulo'
idade = 21
altura = 1.89
# Exsitem algumas manerieas de se fazer concatenação. Uma delas, que foi
# implementada na Versão 3.6 do Python, se chama f string.
# Para se fazer isso, se começa colocando f antes das aspas. Em seguida você
# coloca entre chaves a variavel que deseja utilizar {assim}
print(f' Ola {nome}, me disseram que você tem {idade} anos e {altura} metros de altura')
x = 3.14159265359
g = 8001
# Outra coisa que podemos utilizar na formatação é dimuir numeros(ou palavras)
# muito longos. Utiliza junto a f string.
print(f'{x:.2f}')
print(f'{nome:.2s}')
# Caso seja necessaria uma concatenação em uma versão mais antiga do Python da
# para se fazer este tipo de concatenação. Aviso, não sei se vale para Python 2.
# Para fazer isso iremos colocar chaves dentro do texto, e com o auxilio da função
# ajudante .format iremos definir as variaveisa serem utilizadas naquele espaço
# e sua ordem
print('{} tem {} anos e tem {} de altura'.format(nome, idade, altura))
# Outra coisa que pode ser feita caso se utilize este tipo de concatenação é
# colocar a posição do que esta dentro do .format
# E vale ressaltar que posição começa em 0 e não 1
print('{0} {2} {2} tem {1} {0} anos e tem {2} de altura'.format(nome, idade, altura))
|
pt
| 0.997477
|
# Exsitem algumas manerieas de se fazer concatenação. Uma delas, que foi # implementada na Versão 3.6 do Python, se chama f string. # Para se fazer isso, se começa colocando f antes das aspas. Em seguida você # coloca entre chaves a variavel que deseja utilizar {assim} # Outra coisa que podemos utilizar na formatação é dimuir numeros(ou palavras) # muito longos. Utiliza junto a f string. # Caso seja necessaria uma concatenação em uma versão mais antiga do Python da # para se fazer este tipo de concatenação. Aviso, não sei se vale para Python 2. # Para fazer isso iremos colocar chaves dentro do texto, e com o auxilio da função # ajudante .format iremos definir as variaveisa serem utilizadas naquele espaço # e sua ordem # Outra coisa que pode ser feita caso se utilize este tipo de concatenação é # colocar a posição do que esta dentro do .format # E vale ressaltar que posição começa em 0 e não 1
| 4.262618
| 4
|
src/ramstk/models/dbtables/commondb_stakeholders_table.py
|
weibullguy/ramstk
| 4
|
6626092
|
# -*- coding: utf-8 -*-
#
# ramstk.models.commondb.stakeholders.table.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Stakeholders Table Model."""
# Standard Library Imports
from typing import Type
# RAMSTK Local Imports
from ..dbrecords import RAMSTKStakeholdersRecord
from .basetable import RAMSTKBaseTable
class RAMSTKStakeholdersTable(RAMSTKBaseTable):
"""Contain the attributes and methods of the Option data manager."""
# Define private dict class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_db_id_colname = "fld_stakeholders_id"
_db_tablename = "ramstk_stakeholders"
_select_msg = "request_get_stakeholders"
_tag = "stakeholders"
# Define public dict class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self, **kwargs) -> None:
"""Initialize a Options data manager instance."""
RAMSTKBaseTable.__init__(self, **kwargs)
# Initialize private dictionary attributes.
# Initialize private list attributes.
self._lst_id_columns = [
"stakeholders_id",
]
# Initialize private scalar attributes.
self._record: Type[RAMSTKStakeholdersRecord] = RAMSTKStakeholdersRecord
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.pkey = "stakeholders_id"
# Subscribe to PyPubSub messages.
|
# -*- coding: utf-8 -*-
#
# ramstk.models.commondb.stakeholders.table.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com
"""Stakeholders Table Model."""
# Standard Library Imports
from typing import Type
# RAMSTK Local Imports
from ..dbrecords import RAMSTKStakeholdersRecord
from .basetable import RAMSTKBaseTable
class RAMSTKStakeholdersTable(RAMSTKBaseTable):
"""Contain the attributes and methods of the Option data manager."""
# Define private dict class attributes.
# Define private list class attributes.
# Define private scalar class attributes.
_db_id_colname = "fld_stakeholders_id"
_db_tablename = "ramstk_stakeholders"
_select_msg = "request_get_stakeholders"
_tag = "stakeholders"
# Define public dict class attributes.
# Define public list class attributes.
# Define public scalar class attributes.
def __init__(self, **kwargs) -> None:
"""Initialize a Options data manager instance."""
RAMSTKBaseTable.__init__(self, **kwargs)
# Initialize private dictionary attributes.
# Initialize private list attributes.
self._lst_id_columns = [
"stakeholders_id",
]
# Initialize private scalar attributes.
self._record: Type[RAMSTKStakeholdersRecord] = RAMSTKStakeholdersRecord
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.pkey = "stakeholders_id"
# Subscribe to PyPubSub messages.
|
en
| 0.599126
|
# -*- coding: utf-8 -*- # # ramstk.models.commondb.stakeholders.table.py is part of The RAMSTK Project # # All rights reserved. # Copyright since 2007 Doyle "weibullguy" Rowland doyle.rowland <AT> reliaqual <DOT> com Stakeholders Table Model. # Standard Library Imports # RAMSTK Local Imports Contain the attributes and methods of the Option data manager. # Define private dict class attributes. # Define private list class attributes. # Define private scalar class attributes. # Define public dict class attributes. # Define public list class attributes. # Define public scalar class attributes. Initialize a Options data manager instance. # Initialize private dictionary attributes. # Initialize private list attributes. # Initialize private scalar attributes. # Initialize public dictionary attributes. # Initialize public list attributes. # Initialize public scalar attributes. # Subscribe to PyPubSub messages.
| 2.040912
| 2
|
src/insulaudit/console/__init__.py
|
bewest/insulaudit
| 22
|
6626093
|
from command import Command
from subcommand import Subcommand
from application import Application
import utils
import device
__all__ = [ 'Command', 'Subcommand'
, 'Application'
, 'Device'
, 'utils' ]
#####
# EOF
|
from command import Command
from subcommand import Subcommand
from application import Application
import utils
import device
__all__ = [ 'Command', 'Subcommand'
, 'Application'
, 'Device'
, 'utils' ]
#####
# EOF
|
de
| 0.735042
|
##### # EOF
| 1.562139
| 2
|
main.py
|
serge-v/soilsensor
| 0
|
6626094
|
import machine
import time
import esp32
import tinypico as TinyPICO
from machine import RTC, WDT
from micropython import const
import blink, wifi, soilsensor
wdt = WDT(timeout=30000)
wdt.feed()
print("blink.init")
blink.init()
reset = machine.reset_cause()
if reset == machine.DEEPSLEEP_RESET:
blink.blink(1, blink.MAGENTA)
print("deepsleep reset")
elif reset == machine.WDT_RESET:
blink.blink(3, blink.MAGENTA)
print("watchdog reset")
else:
blink.blink(reset, blink.RED)
print("reset cause:", machine.reset_cause())
moisture = soilsensor.get_moisture()
print("wifi.connect")
wifi.connect()
blink.blink(3, blink.GREEN)
time.sleep(2)
rtc = RTC()
ts = rtc.datetime()
volts = TinyPICO.get_battery_voltage()
msg = '{:04}-{:02}-{:02}+{:02}:{:02}:{:02}'.format(ts[0], ts[1], ts[2], ts[4], ts[5], ts[6])
msg += "+reset:{}+t:{}F+hall:{}".format(reset, esp32.raw_temperature(), esp32.hall_sensor())
msg += "+batv:{:.2f}V".format(volts)
msg += "+moisture:{}".format(moisture)
print("sending:", msg)
wifi.send_msg(msg)
TinyPICO.go_deepsleep(60000)
|
import machine
import time
import esp32
import tinypico as TinyPICO
from machine import RTC, WDT
from micropython import const
import blink, wifi, soilsensor
wdt = WDT(timeout=30000)
wdt.feed()
print("blink.init")
blink.init()
reset = machine.reset_cause()
if reset == machine.DEEPSLEEP_RESET:
blink.blink(1, blink.MAGENTA)
print("deepsleep reset")
elif reset == machine.WDT_RESET:
blink.blink(3, blink.MAGENTA)
print("watchdog reset")
else:
blink.blink(reset, blink.RED)
print("reset cause:", machine.reset_cause())
moisture = soilsensor.get_moisture()
print("wifi.connect")
wifi.connect()
blink.blink(3, blink.GREEN)
time.sleep(2)
rtc = RTC()
ts = rtc.datetime()
volts = TinyPICO.get_battery_voltage()
msg = '{:04}-{:02}-{:02}+{:02}:{:02}:{:02}'.format(ts[0], ts[1], ts[2], ts[4], ts[5], ts[6])
msg += "+reset:{}+t:{}F+hall:{}".format(reset, esp32.raw_temperature(), esp32.hall_sensor())
msg += "+batv:{:.2f}V".format(volts)
msg += "+moisture:{}".format(moisture)
print("sending:", msg)
wifi.send_msg(msg)
TinyPICO.go_deepsleep(60000)
|
none
| 1
| 2.53793
| 3
|
|
server/test.py
|
tbor8080/pyprog
| 0
|
6626095
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
from wsgiref import simple_server,util
from wsgiref.handlers import CGIHandler
from wsgiref.handlers import SimpleHandler
def app(environs, start_response):
status='200 OK'
headers=[('Content-type', 'application/x-httpd-cgi charset=utf-8')]
body='test'.encode('utf-8')
start_response(status, headers)
return [body]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
from wsgiref import simple_server,util
from wsgiref.handlers import CGIHandler
from wsgiref.handlers import SimpleHandler
def app(environs, start_response):
status='200 OK'
headers=[('Content-type', 'application/x-httpd-cgi charset=utf-8')]
body='test'.encode('utf-8')
start_response(status, headers)
return [body]
|
en
| 0.308914
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*-
| 2.087944
| 2
|
corehq/apps/couch_sql_migration/tests/test_lrudict.py
|
dimagilg/commcare-hq
| 1
|
6626096
|
from nose.plugins.skip import SkipTest
from testil import assert_raises, eq
from ..lrudict import LRUDict
def test_getitem():
lru = LRUDict(3)
for x in range(4):
lru[x] = x * 2
eq(lru[0], 0, "x: %s" % x)
with assert_raises(KeyError):
lru[-1]
assert_items_equal(lru, [(2, 4), (3, 6), (0, 0)])
def test_get():
lru = LRUDict(3)
for x in range(4):
lru[x] = x * 2
eq(lru.get(0), 0, "x: %s" % x)
eq(lru.get(-1, -2), -2)
assert_items_equal(lru, [(2, 4), (3, 6), (0, 0)])
def test_setitem():
lru = LRUDict(3)
for x in range(4):
lru[x] = x * 2
lru[2] = 5
assert_items_equal(lru, [(1, 2), (3, 6), (2, 5)])
def test_setdefault():
lru = LRUDict(3)
for x in range(4):
lru.setdefault(x, x * 2)
lru.setdefault(0, 1000)
assert_items_equal(lru, [(2, 4), (3, 6), (0, 0)])
def test_update():
lru = LRUDict(3)
lru.update((x, x * 2) for x in range(4))
assert_items_equal(lru, [(1, 2), (2, 4), (3, 6)])
def test_update_with_duplicate_keys():
raise SkipTest("broken edge case")
lru = LRUDict(3)
lru.update((x, x) for x in [0, 1, 2, 3, 0])
assert_items_equal(lru, [(2, 4), (3, 6), (0, 0)])
def assert_items_equal(lru, items):
keys = [k for k, v in items]
values = [v for k, v in items]
eq(list(lru.items()), items)
eq(list(lru), keys)
eq(list(lru.keys()), keys)
eq(list(lru.values()), values)
if hasattr(lru, "iterkeys"):
eq(list(lru.iterkeys()), keys)
eq(list(lru.itervalues()), values)
|
from nose.plugins.skip import SkipTest
from testil import assert_raises, eq
from ..lrudict import LRUDict
def test_getitem():
lru = LRUDict(3)
for x in range(4):
lru[x] = x * 2
eq(lru[0], 0, "x: %s" % x)
with assert_raises(KeyError):
lru[-1]
assert_items_equal(lru, [(2, 4), (3, 6), (0, 0)])
def test_get():
lru = LRUDict(3)
for x in range(4):
lru[x] = x * 2
eq(lru.get(0), 0, "x: %s" % x)
eq(lru.get(-1, -2), -2)
assert_items_equal(lru, [(2, 4), (3, 6), (0, 0)])
def test_setitem():
lru = LRUDict(3)
for x in range(4):
lru[x] = x * 2
lru[2] = 5
assert_items_equal(lru, [(1, 2), (3, 6), (2, 5)])
def test_setdefault():
lru = LRUDict(3)
for x in range(4):
lru.setdefault(x, x * 2)
lru.setdefault(0, 1000)
assert_items_equal(lru, [(2, 4), (3, 6), (0, 0)])
def test_update():
lru = LRUDict(3)
lru.update((x, x * 2) for x in range(4))
assert_items_equal(lru, [(1, 2), (2, 4), (3, 6)])
def test_update_with_duplicate_keys():
raise SkipTest("broken edge case")
lru = LRUDict(3)
lru.update((x, x) for x in [0, 1, 2, 3, 0])
assert_items_equal(lru, [(2, 4), (3, 6), (0, 0)])
def assert_items_equal(lru, items):
keys = [k for k, v in items]
values = [v for k, v in items]
eq(list(lru.items()), items)
eq(list(lru), keys)
eq(list(lru.keys()), keys)
eq(list(lru.values()), values)
if hasattr(lru, "iterkeys"):
eq(list(lru.iterkeys()), keys)
eq(list(lru.itervalues()), values)
|
none
| 1
| 2.197047
| 2
|
|
__main_v2__.py
|
iomegak12/pythondockertry
| 0
|
6626097
|
<reponame>iomegak12/pythondockertry
from injector import Injector, Inject
import crmsystem
def configure(binder):
binder.bind(crmsystem.CustomerService,
to=crmsystem.CustomerService(lambda customers:
print(crmsystem.PrettyTableGenerator.get_customer_table(customers))))
binder.bind(crmsystem.OrderService,
to=crmsystem.OrderService(lambda orders:
print(crmsystem.PrettyTableGenerator.get_order_table(orders))))
def main():
injector = Injector([configure])
dataController = injector.get(crmsystem.DataController)
dataController.process(
lambda: print('Both Customer and Order Services Completed Processing!'))
if __name__ == "__main__":
main()
|
from injector import Injector, Inject
import crmsystem
def configure(binder):
binder.bind(crmsystem.CustomerService,
to=crmsystem.CustomerService(lambda customers:
print(crmsystem.PrettyTableGenerator.get_customer_table(customers))))
binder.bind(crmsystem.OrderService,
to=crmsystem.OrderService(lambda orders:
print(crmsystem.PrettyTableGenerator.get_order_table(orders))))
def main():
injector = Injector([configure])
dataController = injector.get(crmsystem.DataController)
dataController.process(
lambda: print('Both Customer and Order Services Completed Processing!'))
if __name__ == "__main__":
main()
|
none
| 1
| 2.354097
| 2
|
|
html_parsing/exclusive_games/xbox_360__gematsu_com.py
|
DazEB2/SimplePyScripts
| 117
|
6626098
|
<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from common import get_game_list_from
url = 'https://gematsu.com/exclusives/xbox-360'
exclusive_games = get_game_list_from(url, 'fullexclusive', 'platformexclusive', 'consoleexclusive')
print(f'Games ({len(exclusive_games)}):')
for i, game in enumerate(exclusive_games, 1):
print(f' {i:3}. {game}')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from common import get_game_list_from
url = 'https://gematsu.com/exclusives/xbox-360'
exclusive_games = get_game_list_from(url, 'fullexclusive', 'platformexclusive', 'consoleexclusive')
print(f'Games ({len(exclusive_games)}):')
for i, game in enumerate(exclusive_games, 1):
print(f' {i:3}. {game}')
|
en
| 0.308914
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*-
| 3.023313
| 3
|
settings/kobo_conf.py
|
philipsales/couchbase-python-elasticsearch
| 0
|
6626099
|
#SERVER Configuration
KoboENV = "production"
KoboConfig = {
'local': {
'USERNAME': '',
'PASSWORD': '',
'PROTOCOL': 'http',
'SCHEME': 'kobo',
'IP': '127.0.0.1',
'HOST': '',
'PORT': '',
'VERSION':'v1',
'TIMEOUT': 7200,
'FORM': ''
},
'development': {
'USERNAME': 'admin',
'PASSWORD': '<PASSWORD>',
'PROTOCOL': 'https',
'SCHEME': 'kobo',
'IP': '',
'HOST': 'kc.aqm.space',
'PORT': '',
'VERSION':'v1',
'TIMEOUT': 7200,
'FORM': '12'
},
'uat': {
'USERNAME': '',
'PASSWORD': '',
'PROTOCOL': 'http',
'SCHEME': 'kobo',
'IP': '',
'HOST': '',
'PORT': '',
'VERSION':'v1',
'TIMEOUT': 7200,
'FORM': ''
},
'production': {
'USERNAME': 'admin',
'PASSWORD': '<PASSWORD>',
'PROTOCOL': 'https',
'SCHEME': 'kobo',
'IP': '',
'HOST': 'kc.aqm.space',
'PORT': '',
'VERSION':'v1',
'TIMEOUT': 7200,
'FORM': '13'
}
}
# FORM: 33 for testing 2 set
# FORM: 12 for actual testing
# FROM: 13 for production
|
#SERVER Configuration
KoboENV = "production"
KoboConfig = {
'local': {
'USERNAME': '',
'PASSWORD': '',
'PROTOCOL': 'http',
'SCHEME': 'kobo',
'IP': '127.0.0.1',
'HOST': '',
'PORT': '',
'VERSION':'v1',
'TIMEOUT': 7200,
'FORM': ''
},
'development': {
'USERNAME': 'admin',
'PASSWORD': '<PASSWORD>',
'PROTOCOL': 'https',
'SCHEME': 'kobo',
'IP': '',
'HOST': 'kc.aqm.space',
'PORT': '',
'VERSION':'v1',
'TIMEOUT': 7200,
'FORM': '12'
},
'uat': {
'USERNAME': '',
'PASSWORD': '',
'PROTOCOL': 'http',
'SCHEME': 'kobo',
'IP': '',
'HOST': '',
'PORT': '',
'VERSION':'v1',
'TIMEOUT': 7200,
'FORM': ''
},
'production': {
'USERNAME': 'admin',
'PASSWORD': '<PASSWORD>',
'PROTOCOL': 'https',
'SCHEME': 'kobo',
'IP': '',
'HOST': 'kc.aqm.space',
'PORT': '',
'VERSION':'v1',
'TIMEOUT': 7200,
'FORM': '13'
}
}
# FORM: 33 for testing 2 set
# FORM: 12 for actual testing
# FROM: 13 for production
|
en
| 0.522845
|
#SERVER Configuration # FORM: 33 for testing 2 set # FORM: 12 for actual testing # FROM: 13 for production
| 1.453563
| 1
|
trains/utilities/check_updates.py
|
doliveralg/trains
| 0
|
6626100
|
<filename>trains/utilities/check_updates.py<gh_stars>0
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
import requests
import six
if six.PY3:
from math import inf
else:
inf = float('inf')
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
_Version = collections.namedtuple(
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
)
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class Version(_BaseVersion):
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
_local_version_separators = re.compile(r"[\._-]")
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=self._parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=self._parse_letter_version(
match.group("post_l") or '', match.group("post_n1") or match.group("post_n2") or ''
),
dev=self._parse_letter_version(match.group("dev_l") or '', match.group("dev_n") or ''),
local=self._parse_local_version(match.group("local") or ''),
)
# Generate a key which will be used for sorting
self._key = self._cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self.epoch != 0:
parts.append("{0}!".format(self.epoch))
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(".post{0}".format(self.post))
# Development release
if self.dev is not None:
parts.append(".dev{0}".format(self.dev))
# Local version segment
if self.local is not None:
parts.append("+{0}".format(self.local))
return "".join(parts)
@property
def epoch(self):
return self._version.epoch
@property
def release(self):
return self._version.release
@property
def pre(self):
return self._version.pre
@property
def post(self):
return self._version.post[1] if self._version.post else None
@property
def dev(self):
return self._version.dev[1] if self._version.dev else None
@property
def local(self):
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self.epoch != 0:
parts.append("{0}!".format(self.epoch))
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
def is_prerelease(self):
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self):
return self.post is not None
@property
def is_devrelease(self):
return self.dev is not None
@staticmethod
def _parse_letter_version(letter, number):
if not letter and not number:
return None
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
@classmethod
def _parse_local_version(cls, local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
local = tuple(
part.lower() if not part.isdigit() else int(part)
for part in cls._local_version_separators.split(local)
)
if not local or not local[0]:
return None
return local
return None
@staticmethod
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
# release = tuple(
# reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
# )
# Versions without a pre-release (except as noted above) should sort after
# those with one.
if not pre:
pre = inf
elif pre:
pre = pre[1]
# Versions without a post segment should sort before those with one.
if not post:
post = -inf
else:
post = post[1]
# Versions without a development segment should sort after those with one.
if not dev:
dev = inf
else:
dev = dev[1]
if not local:
# Versions without a local segment should sort before those with one.
local = inf
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = local[1]
return epoch, release, pre, post, dev, local
class CheckPackageUpdates(object):
_package_version_checked = False
@classmethod
def check_new_package_available(cls, only_once=False):
"""
:return: true if there is a newer package in PyPI
"""
if only_once and cls._package_version_checked:
return None
# noinspection PyBroadException
try:
cls._package_version_checked = True
releases = requests.get('https://pypi.python.org/pypi/trains/json').json()['releases'].keys()
releases = [Version(r) for r in releases]
latest_version = sorted(releases)
from ..version import __version__
cur_version = Version(__version__)
if not cur_version.is_devrelease and not cur_version.is_prerelease:
latest_version = [r for r in latest_version if not r.is_devrelease and not r.is_prerelease]
if cur_version >= latest_version[-1]:
return None
not_patch_upgrade = latest_version[-1].release[:2] != cur_version.release[:2]
return str(latest_version[-1]), not_patch_upgrade
except Exception:
return None
|
<filename>trains/utilities/check_updates.py<gh_stars>0
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
import requests
import six
if six.PY3:
from math import inf
else:
inf = float('inf')
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
_Version = collections.namedtuple(
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
)
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class Version(_BaseVersion):
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
_local_version_separators = re.compile(r"[\._-]")
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=self._parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=self._parse_letter_version(
match.group("post_l") or '', match.group("post_n1") or match.group("post_n2") or ''
),
dev=self._parse_letter_version(match.group("dev_l") or '', match.group("dev_n") or ''),
local=self._parse_local_version(match.group("local") or ''),
)
# Generate a key which will be used for sorting
self._key = self._cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self.epoch != 0:
parts.append("{0}!".format(self.epoch))
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(".post{0}".format(self.post))
# Development release
if self.dev is not None:
parts.append(".dev{0}".format(self.dev))
# Local version segment
if self.local is not None:
parts.append("+{0}".format(self.local))
return "".join(parts)
@property
def epoch(self):
return self._version.epoch
@property
def release(self):
return self._version.release
@property
def pre(self):
return self._version.pre
@property
def post(self):
return self._version.post[1] if self._version.post else None
@property
def dev(self):
return self._version.dev[1] if self._version.dev else None
@property
def local(self):
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self.epoch != 0:
parts.append("{0}!".format(self.epoch))
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
def is_prerelease(self):
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self):
return self.post is not None
@property
def is_devrelease(self):
return self.dev is not None
@staticmethod
def _parse_letter_version(letter, number):
if not letter and not number:
return None
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
@classmethod
def _parse_local_version(cls, local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
local = tuple(
part.lower() if not part.isdigit() else int(part)
for part in cls._local_version_separators.split(local)
)
if not local or not local[0]:
return None
return local
return None
@staticmethod
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
# release = tuple(
# reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
# )
# Versions without a pre-release (except as noted above) should sort after
# those with one.
if not pre:
pre = inf
elif pre:
pre = pre[1]
# Versions without a post segment should sort before those with one.
if not post:
post = -inf
else:
post = post[1]
# Versions without a development segment should sort after those with one.
if not dev:
dev = inf
else:
dev = dev[1]
if not local:
# Versions without a local segment should sort before those with one.
local = inf
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = local[1]
return epoch, release, pre, post, dev, local
class CheckPackageUpdates(object):
_package_version_checked = False
@classmethod
def check_new_package_available(cls, only_once=False):
"""
:return: true if there is a newer package in PyPI
"""
if only_once and cls._package_version_checked:
return None
# noinspection PyBroadException
try:
cls._package_version_checked = True
releases = requests.get('https://pypi.python.org/pypi/trains/json').json()['releases'].keys()
releases = [Version(r) for r in releases]
latest_version = sorted(releases)
from ..version import __version__
cur_version = Version(__version__)
if not cur_version.is_devrelease and not cur_version.is_prerelease:
latest_version = [r for r in latest_version if not r.is_devrelease and not r.is_prerelease]
if cur_version >= latest_version[-1]:
return None
not_patch_upgrade = latest_version[-1].release[:2] != cur_version.release[:2]
return str(latest_version[-1]), not_patch_upgrade
except Exception:
return None
|
en
| 0.827823
|
An invalid version was found, users should refer to PEP 440. v? (?: (?:(?P<epoch>[0-9]+)!)? # epoch (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment (?P<pre> # pre-release [-_\.]? (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) [-_\.]? (?P<pre_n>[0-9]+)? )? (?P<post> # post release (?:-(?P<post_n1>[0-9]+)) | (?: [-_\.]? (?P<post_l>post|rev|r) [-_\.]? (?P<post_n2>[0-9]+)? ) )? (?P<dev> # dev release [-_\.]? (?P<dev_l>dev) [-_\.]? (?P<dev_n>[0-9]+)? )? ) (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version # Validate the version and parse it into pieces # Store the parsed out pieces of the version # Generate a key which will be used for sorting # Epoch # Release segment # Pre-release # Post-release # Development release # Local version segment # Epoch # Release segment # We consider there to be an implicit 0 in a pre-release if there is # not a numeral associated with it. # We normalize any letters to their lower case form # We consider some words to be alternate spellings of other words and # in those cases we want to normalize the spellings to our preferred # spelling. # We assume if we are given a number, but we are not given a letter # then this is using the implicit post release syntax (e.g. 1.0-1) Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). # When we compare a release version, we want to compare it with all of the # trailing zeros removed. So we'll use a reverse the list, drop all the now # leading zeros until we come to something non zero, then take the rest # re-reverse it back into the correct order and make it a tuple and use # that for our sorting key. # release = tuple( # reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))) # ) # Versions without a pre-release (except as noted above) should sort after # those with one. # Versions without a post segment should sort before those with one. # Versions without a development segment should sort after those with one. # Versions without a local segment should sort before those with one. # Versions with a local segment need that segment parsed to implement # the sorting rules in PEP440. # - Alpha numeric segments sort before numeric segments # - Alpha numeric segments sort lexicographically # - Numeric segments sort numerically # - Shorter versions sort before longer versions when the prefixes # match exactly :return: true if there is a newer package in PyPI # noinspection PyBroadException
| 2.345894
| 2
|
tensorflow_federated/python/core/impl/executors/executor_service.py
|
j35tor/federated
| 1
|
6626101
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A service wrapper around an executor that makes it accessible over gRPC."""
import asyncio
import functools
import threading
import traceback
import uuid
import weakref
from absl import logging
import grpc
from tensorflow_federated.proto.v0 import executor_pb2
from tensorflow_federated.proto.v0 import executor_pb2_grpc
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.common_libs import tracing
from tensorflow_federated.python.core.impl.executors import executor_factory
from tensorflow_federated.python.core.impl.executors import executor_serialization
def _set_invalid_arg_err(context: grpc.ServicerContext, err):
logging.error(traceback.format_exc())
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(str(err))
def _set_unknown_err(context: grpc.ServicerContext, err):
logging.error(traceback.format_exc())
context.set_code(grpc.StatusCode.UNKNOWN)
context.set_details(str(err))
class ExecutorService(executor_pb2_grpc.ExecutorServicer):
"""A wrapper around a target executor that makes it into a gRPC service."""
def __init__(self, ex_factory: executor_factory.ExecutorFactory, *args,
**kwargs):
py_typecheck.check_type(ex_factory, executor_factory.ExecutorFactory)
super().__init__(*args, **kwargs)
self._ex_factory = ex_factory
self._executor = None
self._lock = threading.Lock()
# The keys in this dictionary are value ids (the same as what we return
# in the gRPC responses), and the values are `concurrent.futures.Future`
# instances (this may, and probably will change as we flesh out the rest
# of this implementation).
self._values = {}
def run_loop(loop):
loop.run_forever()
loop.close()
self._event_loop = asyncio.new_event_loop()
self._event_loop.set_task_factory(
tracing.propagate_trace_context_task_factory)
self._thread = threading.Thread(
target=functools.partial(run_loop, self._event_loop), daemon=True)
self._thread.start()
def finalize(loop, thread):
loop.call_soon_threadsafe(loop.stop)
thread.join()
weakref.finalize(self, finalize, self._event_loop, self._thread)
def _run_coro_threadsafe_with_tracing(self, coro):
"""Runs `coro` on `self._event_loop` inside the current trace spans."""
with tracing.with_trace_context_from_rpc():
return asyncio.run_coroutine_threadsafe(
tracing.wrap_coroutine_in_current_trace_context(coro),
self._event_loop)
@property
def executor(self):
if self._executor is None:
raise RuntimeError('The executor service has not yet been configured '
'with cardinalities and cannot execute any '
'concrete requests.')
return self._executor
def SetCardinalities(
self,
request: executor_pb2.SetCardinalitiesRequest,
context: grpc.ServicerContext,
) -> executor_pb2.SetCardinalitiesResponse:
"""Sets the cartinality for the executor service."""
py_typecheck.check_type(request, executor_pb2.SetCardinalitiesRequest)
try:
cardinalities_dict = executor_serialization.deserialize_cardinalities(
request.cardinalities)
self._executor = self._ex_factory.create_executor(cardinalities_dict)
return executor_pb2.SetCardinalitiesResponse()
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.SetCardinalitiesResponse()
def ClearExecutor(
self,
request: executor_pb2.ClearExecutorRequest,
context: grpc.ServicerContext,
) -> executor_pb2.ClearExecutorResponse:
"""Clears the service Executor-related state."""
py_typecheck.check_type(request, executor_pb2.ClearExecutorRequest)
self._executor = None
self._ex_factory.clean_up_executors()
return executor_pb2.ClearExecutorResponse()
def CreateValue(
self,
request: executor_pb2.CreateValueRequest,
context: grpc.ServicerContext,
) -> executor_pb2.CreateValueResponse:
"""Creates a value embedded in the executor."""
py_typecheck.check_type(request, executor_pb2.CreateValueRequest)
try:
with tracing.span('ExecutorService.CreateValue', 'deserialize_value'):
value, value_type = (
executor_serialization.deserialize_value(request.value))
value_id = str(uuid.uuid4())
coro = self.executor.create_value(value, value_type)
future_val = self._run_coro_threadsafe_with_tracing(coro)
with self._lock:
self._values[value_id] = future_val
return executor_pb2.CreateValueResponse(
value_ref=executor_pb2.ValueRef(id=value_id))
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.CreateValueResponse()
def CreateCall(
self,
request: executor_pb2.CreateCallRequest,
context: grpc.ServicerContext,
) -> executor_pb2.CreateCallResponse:
"""Creates a call embedded in the executor."""
py_typecheck.check_type(request, executor_pb2.CreateCallRequest)
try:
function_id = str(request.function_ref.id)
argument_id = str(request.argument_ref.id)
with self._lock:
function_val = self._values[function_id]
argument_val = self._values[argument_id] if argument_id else None
async def _processing():
function = await asyncio.wrap_future(function_val)
argument = await asyncio.wrap_future(
argument_val) if argument_val is not None else None
return await self.executor.create_call(function, argument)
coro = _processing()
result_fut = self._run_coro_threadsafe_with_tracing(coro)
result_id = str(uuid.uuid4())
with self._lock:
self._values[result_id] = result_fut
return executor_pb2.CreateCallResponse(
value_ref=executor_pb2.ValueRef(id=result_id))
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.CreateCallResponse()
def CreateStruct(
self,
request: executor_pb2.CreateStructRequest,
context: grpc.ServicerContext,
) -> executor_pb2.CreateStructResponse:
"""Creates a struct embedded in the executor."""
py_typecheck.check_type(request, executor_pb2.CreateStructRequest)
try:
with self._lock:
elem_futures = [self._values[e.value_ref.id] for e in request.element]
elem_names = [
str(elem.name) if elem.name else None for elem in request.element
]
async def _processing():
elem_values = await asyncio.gather(
*[asyncio.wrap_future(v) for v in elem_futures])
elements = list(zip(elem_names, elem_values))
struct = structure.Struct(elements)
return await self.executor.create_struct(struct)
result_fut = self._run_coro_threadsafe_with_tracing(_processing())
result_id = str(uuid.uuid4())
with self._lock:
self._values[result_id] = result_fut
return executor_pb2.CreateStructResponse(
value_ref=executor_pb2.ValueRef(id=result_id))
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.CreateStructResponse()
def CreateSelection(
self,
request: executor_pb2.CreateSelectionRequest,
context: grpc.ServicerContext,
) -> executor_pb2.CreateSelectionResponse:
"""Creates a selection embedded in the executor."""
py_typecheck.check_type(request, executor_pb2.CreateSelectionRequest)
try:
with self._lock:
source_fut = self._values[request.source_ref.id]
async def _processing():
source = await asyncio.wrap_future(source_fut)
return await self.executor.create_selection(source, request.index)
result_fut = self._run_coro_threadsafe_with_tracing(_processing())
result_id = str(uuid.uuid4())
with self._lock:
self._values[result_id] = result_fut
return executor_pb2.CreateSelectionResponse(
value_ref=executor_pb2.ValueRef(id=result_id))
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.CreateSelectionResponse()
def Compute(
self,
request: executor_pb2.ComputeRequest,
context: grpc.ServicerContext,
) -> executor_pb2.ComputeResponse:
"""Computes a value embedded in the executor."""
return self._run_coro_threadsafe_with_tracing(
self._Compute(request, context)).result()
async def _Compute(
self,
request: executor_pb2.ComputeRequest,
context: grpc.ServicerContext,
) -> executor_pb2.ComputeResponse:
"""Asynchronous implemention of `Compute`."""
py_typecheck.check_type(request, executor_pb2.ComputeRequest)
try:
value_id = str(request.value_ref.id)
with self._lock:
future_val = asyncio.wrap_future(self._values[value_id])
val = await future_val
result_val = await val.compute()
val_type = val.type_signature
value_proto, _ = executor_serialization.serialize_value(
result_val, val_type)
return executor_pb2.ComputeResponse(value=value_proto)
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.ComputeResponse()
def Dispose(
self,
request: executor_pb2.DisposeRequest,
context: grpc.ServicerContext,
) -> executor_pb2.DisposeResponse:
"""Disposes of a value, making it no longer available for future calls."""
py_typecheck.check_type(request, executor_pb2.DisposeRequest)
try:
with self._lock:
for value_ref in request.value_ref:
del self._values[value_ref.id]
except KeyError as err:
_set_invalid_arg_err(context, err)
return executor_pb2.DisposeResponse()
|
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A service wrapper around an executor that makes it accessible over gRPC."""
import asyncio
import functools
import threading
import traceback
import uuid
import weakref
from absl import logging
import grpc
from tensorflow_federated.proto.v0 import executor_pb2
from tensorflow_federated.proto.v0 import executor_pb2_grpc
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.common_libs import tracing
from tensorflow_federated.python.core.impl.executors import executor_factory
from tensorflow_federated.python.core.impl.executors import executor_serialization
def _set_invalid_arg_err(context: grpc.ServicerContext, err):
logging.error(traceback.format_exc())
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(str(err))
def _set_unknown_err(context: grpc.ServicerContext, err):
logging.error(traceback.format_exc())
context.set_code(grpc.StatusCode.UNKNOWN)
context.set_details(str(err))
class ExecutorService(executor_pb2_grpc.ExecutorServicer):
"""A wrapper around a target executor that makes it into a gRPC service."""
def __init__(self, ex_factory: executor_factory.ExecutorFactory, *args,
**kwargs):
py_typecheck.check_type(ex_factory, executor_factory.ExecutorFactory)
super().__init__(*args, **kwargs)
self._ex_factory = ex_factory
self._executor = None
self._lock = threading.Lock()
# The keys in this dictionary are value ids (the same as what we return
# in the gRPC responses), and the values are `concurrent.futures.Future`
# instances (this may, and probably will change as we flesh out the rest
# of this implementation).
self._values = {}
def run_loop(loop):
loop.run_forever()
loop.close()
self._event_loop = asyncio.new_event_loop()
self._event_loop.set_task_factory(
tracing.propagate_trace_context_task_factory)
self._thread = threading.Thread(
target=functools.partial(run_loop, self._event_loop), daemon=True)
self._thread.start()
def finalize(loop, thread):
loop.call_soon_threadsafe(loop.stop)
thread.join()
weakref.finalize(self, finalize, self._event_loop, self._thread)
def _run_coro_threadsafe_with_tracing(self, coro):
"""Runs `coro` on `self._event_loop` inside the current trace spans."""
with tracing.with_trace_context_from_rpc():
return asyncio.run_coroutine_threadsafe(
tracing.wrap_coroutine_in_current_trace_context(coro),
self._event_loop)
@property
def executor(self):
if self._executor is None:
raise RuntimeError('The executor service has not yet been configured '
'with cardinalities and cannot execute any '
'concrete requests.')
return self._executor
def SetCardinalities(
self,
request: executor_pb2.SetCardinalitiesRequest,
context: grpc.ServicerContext,
) -> executor_pb2.SetCardinalitiesResponse:
"""Sets the cartinality for the executor service."""
py_typecheck.check_type(request, executor_pb2.SetCardinalitiesRequest)
try:
cardinalities_dict = executor_serialization.deserialize_cardinalities(
request.cardinalities)
self._executor = self._ex_factory.create_executor(cardinalities_dict)
return executor_pb2.SetCardinalitiesResponse()
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.SetCardinalitiesResponse()
def ClearExecutor(
self,
request: executor_pb2.ClearExecutorRequest,
context: grpc.ServicerContext,
) -> executor_pb2.ClearExecutorResponse:
"""Clears the service Executor-related state."""
py_typecheck.check_type(request, executor_pb2.ClearExecutorRequest)
self._executor = None
self._ex_factory.clean_up_executors()
return executor_pb2.ClearExecutorResponse()
def CreateValue(
self,
request: executor_pb2.CreateValueRequest,
context: grpc.ServicerContext,
) -> executor_pb2.CreateValueResponse:
"""Creates a value embedded in the executor."""
py_typecheck.check_type(request, executor_pb2.CreateValueRequest)
try:
with tracing.span('ExecutorService.CreateValue', 'deserialize_value'):
value, value_type = (
executor_serialization.deserialize_value(request.value))
value_id = str(uuid.uuid4())
coro = self.executor.create_value(value, value_type)
future_val = self._run_coro_threadsafe_with_tracing(coro)
with self._lock:
self._values[value_id] = future_val
return executor_pb2.CreateValueResponse(
value_ref=executor_pb2.ValueRef(id=value_id))
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.CreateValueResponse()
def CreateCall(
self,
request: executor_pb2.CreateCallRequest,
context: grpc.ServicerContext,
) -> executor_pb2.CreateCallResponse:
"""Creates a call embedded in the executor."""
py_typecheck.check_type(request, executor_pb2.CreateCallRequest)
try:
function_id = str(request.function_ref.id)
argument_id = str(request.argument_ref.id)
with self._lock:
function_val = self._values[function_id]
argument_val = self._values[argument_id] if argument_id else None
async def _processing():
function = await asyncio.wrap_future(function_val)
argument = await asyncio.wrap_future(
argument_val) if argument_val is not None else None
return await self.executor.create_call(function, argument)
coro = _processing()
result_fut = self._run_coro_threadsafe_with_tracing(coro)
result_id = str(uuid.uuid4())
with self._lock:
self._values[result_id] = result_fut
return executor_pb2.CreateCallResponse(
value_ref=executor_pb2.ValueRef(id=result_id))
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.CreateCallResponse()
def CreateStruct(
self,
request: executor_pb2.CreateStructRequest,
context: grpc.ServicerContext,
) -> executor_pb2.CreateStructResponse:
"""Creates a struct embedded in the executor."""
py_typecheck.check_type(request, executor_pb2.CreateStructRequest)
try:
with self._lock:
elem_futures = [self._values[e.value_ref.id] for e in request.element]
elem_names = [
str(elem.name) if elem.name else None for elem in request.element
]
async def _processing():
elem_values = await asyncio.gather(
*[asyncio.wrap_future(v) for v in elem_futures])
elements = list(zip(elem_names, elem_values))
struct = structure.Struct(elements)
return await self.executor.create_struct(struct)
result_fut = self._run_coro_threadsafe_with_tracing(_processing())
result_id = str(uuid.uuid4())
with self._lock:
self._values[result_id] = result_fut
return executor_pb2.CreateStructResponse(
value_ref=executor_pb2.ValueRef(id=result_id))
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.CreateStructResponse()
def CreateSelection(
self,
request: executor_pb2.CreateSelectionRequest,
context: grpc.ServicerContext,
) -> executor_pb2.CreateSelectionResponse:
"""Creates a selection embedded in the executor."""
py_typecheck.check_type(request, executor_pb2.CreateSelectionRequest)
try:
with self._lock:
source_fut = self._values[request.source_ref.id]
async def _processing():
source = await asyncio.wrap_future(source_fut)
return await self.executor.create_selection(source, request.index)
result_fut = self._run_coro_threadsafe_with_tracing(_processing())
result_id = str(uuid.uuid4())
with self._lock:
self._values[result_id] = result_fut
return executor_pb2.CreateSelectionResponse(
value_ref=executor_pb2.ValueRef(id=result_id))
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.CreateSelectionResponse()
def Compute(
self,
request: executor_pb2.ComputeRequest,
context: grpc.ServicerContext,
) -> executor_pb2.ComputeResponse:
"""Computes a value embedded in the executor."""
return self._run_coro_threadsafe_with_tracing(
self._Compute(request, context)).result()
async def _Compute(
self,
request: executor_pb2.ComputeRequest,
context: grpc.ServicerContext,
) -> executor_pb2.ComputeResponse:
"""Asynchronous implemention of `Compute`."""
py_typecheck.check_type(request, executor_pb2.ComputeRequest)
try:
value_id = str(request.value_ref.id)
with self._lock:
future_val = asyncio.wrap_future(self._values[value_id])
val = await future_val
result_val = await val.compute()
val_type = val.type_signature
value_proto, _ = executor_serialization.serialize_value(
result_val, val_type)
return executor_pb2.ComputeResponse(value=value_proto)
except (ValueError, TypeError) as err:
_set_invalid_arg_err(context, err)
return executor_pb2.ComputeResponse()
def Dispose(
self,
request: executor_pb2.DisposeRequest,
context: grpc.ServicerContext,
) -> executor_pb2.DisposeResponse:
"""Disposes of a value, making it no longer available for future calls."""
py_typecheck.check_type(request, executor_pb2.DisposeRequest)
try:
with self._lock:
for value_ref in request.value_ref:
del self._values[value_ref.id]
except KeyError as err:
_set_invalid_arg_err(context, err)
return executor_pb2.DisposeResponse()
|
en
| 0.854227
|
# Copyright 2019, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. A service wrapper around an executor that makes it accessible over gRPC. A wrapper around a target executor that makes it into a gRPC service. # The keys in this dictionary are value ids (the same as what we return # in the gRPC responses), and the values are `concurrent.futures.Future` # instances (this may, and probably will change as we flesh out the rest # of this implementation). Runs `coro` on `self._event_loop` inside the current trace spans. Sets the cartinality for the executor service. Clears the service Executor-related state. Creates a value embedded in the executor. Creates a call embedded in the executor. Creates a struct embedded in the executor. Creates a selection embedded in the executor. Computes a value embedded in the executor. Asynchronous implemention of `Compute`. Disposes of a value, making it no longer available for future calls.
| 1.599204
| 2
|
src/tests/api/conftest.py
|
n0emis/pretix
| 0
|
6626102
|
<filename>src/tests/api/conftest.py
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: <NAME>
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
from datetime import datetime
import pytest
from django.test import utils
from django.utils.timezone import now
from django_scopes import scopes_disabled
from pytz import UTC
from rest_framework.test import APIClient
from pretix.base.models import Device, Event, Organizer, Team, User
from pretix.base.models.devices import generate_api_token
@pytest.fixture
def client():
return APIClient()
@pytest.fixture
@scopes_disabled()
def organizer():
return Organizer.objects.create(name='Dummy', slug='dummy')
@pytest.fixture
@scopes_disabled()
def meta_prop(organizer):
return organizer.meta_properties.create(name="type", default="Concert")
@pytest.fixture
@scopes_disabled()
def event(organizer, meta_prop):
e = Event.objects.create(
organizer=organizer, name='Dummy', slug='dummy',
date_from=datetime(2017, 12, 27, 10, 0, 0, tzinfo=UTC),
plugins='pretix.plugins.banktransfer,pretix.plugins.ticketoutputpdf',
is_public=True
)
e.meta_values.create(property=meta_prop, value="Conference")
e.item_meta_properties.create(name="day", default="Monday")
e.settings.timezone = 'Europe/Berlin'
return e
@pytest.fixture
@scopes_disabled()
def event2(organizer, meta_prop):
e = Event.objects.create(
organizer=organizer, name='Dummy2', slug='dummy2',
date_from=datetime(2017, 12, 27, 10, 0, 0, tzinfo=UTC),
plugins='pretix.plugins.banktransfer,pretix.plugins.ticketoutputpdf'
)
e.meta_values.create(property=meta_prop, value="Conference")
return e
@pytest.fixture
@scopes_disabled()
def event3(organizer, meta_prop):
e = Event.objects.create(
organizer=organizer, name='Dummy3', slug='dummy3',
date_from=datetime(2017, 12, 27, 10, 0, 0, tzinfo=UTC),
plugins='pretix.plugins.banktransfer,pretix.plugins.ticketoutputpdf'
)
e.meta_values.create(property=meta_prop, value="Conference")
return e
@pytest.fixture
@scopes_disabled()
def team(organizer):
return Team.objects.create(
organizer=organizer,
name="Test-Team",
can_change_teams=True,
can_manage_gift_cards=True,
can_change_items=True,
can_create_events=True,
can_change_event_settings=True,
can_change_vouchers=True,
can_view_vouchers=True,
can_change_orders=True,
can_manage_customers=True,
can_change_organizer_settings=True
)
@pytest.fixture
@scopes_disabled()
def device(organizer):
return Device.objects.create(
organizer=organizer,
all_events=True,
name='Foo',
initialized=now(),
api_token=generate_api_token()
)
@pytest.fixture
def user():
return User.objects.create_user('<EMAIL>', 'dummy')
@pytest.fixture
@scopes_disabled()
def user_client(client, team, user):
team.can_view_orders = True
team.can_view_vouchers = True
team.all_events = True
team.save()
team.members.add(user)
client.force_authenticate(user=user)
return client
@pytest.fixture
@scopes_disabled()
def token_client(client, team):
team.can_view_orders = True
team.can_view_vouchers = True
team.all_events = True
team.save()
t = team.tokens.create(name='Foo')
client.credentials(HTTP_AUTHORIZATION='Token ' + t.token)
return client
@pytest.fixture
def device_client(client, device):
client.credentials(HTTP_AUTHORIZATION='Device ' + device.api_token)
return client
@pytest.fixture
@scopes_disabled()
def subevent(event, meta_prop):
event.has_subevents = True
event.save()
se = event.subevents.create(name="Foobar", date_from=datetime(2017, 12, 27, 10, 0, 0, tzinfo=UTC))
se.meta_values.create(property=meta_prop, value="Workshop")
return se
@pytest.fixture
@scopes_disabled()
def subevent2(event2, meta_prop):
event2.has_subevents = True
event2.save()
se = event2.subevents.create(name="Foobar", date_from=datetime(2017, 12, 27, 10, 0, 0, tzinfo=UTC))
se.meta_values.create(property=meta_prop, value="Workshop")
return se
@pytest.fixture
@scopes_disabled()
def taxrule(event):
return event.tax_rules.create(name="VAT", rate=19)
@pytest.fixture
@scopes_disabled()
def taxrule0(event):
return event.tax_rules.create(name="VAT", rate=0)
@pytest.fixture
@scopes_disabled()
def taxrule2(event2):
return event2.tax_rules.create(name="VAT", rate=25)
@pytest.fixture
@scopes_disabled()
def item(event):
return event.items.create(name='foo', default_price=3)
@pytest.fixture
@scopes_disabled()
def membership_type(organizer):
return organizer.membership_types.create(name='foo')
utils.setup_databases = scopes_disabled()(utils.setup_databases)
|
<filename>src/tests/api/conftest.py
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: <NAME>
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
from datetime import datetime
import pytest
from django.test import utils
from django.utils.timezone import now
from django_scopes import scopes_disabled
from pytz import UTC
from rest_framework.test import APIClient
from pretix.base.models import Device, Event, Organizer, Team, User
from pretix.base.models.devices import generate_api_token
@pytest.fixture
def client():
return APIClient()
@pytest.fixture
@scopes_disabled()
def organizer():
return Organizer.objects.create(name='Dummy', slug='dummy')
@pytest.fixture
@scopes_disabled()
def meta_prop(organizer):
return organizer.meta_properties.create(name="type", default="Concert")
@pytest.fixture
@scopes_disabled()
def event(organizer, meta_prop):
e = Event.objects.create(
organizer=organizer, name='Dummy', slug='dummy',
date_from=datetime(2017, 12, 27, 10, 0, 0, tzinfo=UTC),
plugins='pretix.plugins.banktransfer,pretix.plugins.ticketoutputpdf',
is_public=True
)
e.meta_values.create(property=meta_prop, value="Conference")
e.item_meta_properties.create(name="day", default="Monday")
e.settings.timezone = 'Europe/Berlin'
return e
@pytest.fixture
@scopes_disabled()
def event2(organizer, meta_prop):
e = Event.objects.create(
organizer=organizer, name='Dummy2', slug='dummy2',
date_from=datetime(2017, 12, 27, 10, 0, 0, tzinfo=UTC),
plugins='pretix.plugins.banktransfer,pretix.plugins.ticketoutputpdf'
)
e.meta_values.create(property=meta_prop, value="Conference")
return e
@pytest.fixture
@scopes_disabled()
def event3(organizer, meta_prop):
e = Event.objects.create(
organizer=organizer, name='Dummy3', slug='dummy3',
date_from=datetime(2017, 12, 27, 10, 0, 0, tzinfo=UTC),
plugins='pretix.plugins.banktransfer,pretix.plugins.ticketoutputpdf'
)
e.meta_values.create(property=meta_prop, value="Conference")
return e
@pytest.fixture
@scopes_disabled()
def team(organizer):
return Team.objects.create(
organizer=organizer,
name="Test-Team",
can_change_teams=True,
can_manage_gift_cards=True,
can_change_items=True,
can_create_events=True,
can_change_event_settings=True,
can_change_vouchers=True,
can_view_vouchers=True,
can_change_orders=True,
can_manage_customers=True,
can_change_organizer_settings=True
)
@pytest.fixture
@scopes_disabled()
def device(organizer):
return Device.objects.create(
organizer=organizer,
all_events=True,
name='Foo',
initialized=now(),
api_token=generate_api_token()
)
@pytest.fixture
def user():
return User.objects.create_user('<EMAIL>', 'dummy')
@pytest.fixture
@scopes_disabled()
def user_client(client, team, user):
team.can_view_orders = True
team.can_view_vouchers = True
team.all_events = True
team.save()
team.members.add(user)
client.force_authenticate(user=user)
return client
@pytest.fixture
@scopes_disabled()
def token_client(client, team):
team.can_view_orders = True
team.can_view_vouchers = True
team.all_events = True
team.save()
t = team.tokens.create(name='Foo')
client.credentials(HTTP_AUTHORIZATION='Token ' + t.token)
return client
@pytest.fixture
def device_client(client, device):
client.credentials(HTTP_AUTHORIZATION='Device ' + device.api_token)
return client
@pytest.fixture
@scopes_disabled()
def subevent(event, meta_prop):
event.has_subevents = True
event.save()
se = event.subevents.create(name="Foobar", date_from=datetime(2017, 12, 27, 10, 0, 0, tzinfo=UTC))
se.meta_values.create(property=meta_prop, value="Workshop")
return se
@pytest.fixture
@scopes_disabled()
def subevent2(event2, meta_prop):
event2.has_subevents = True
event2.save()
se = event2.subevents.create(name="Foobar", date_from=datetime(2017, 12, 27, 10, 0, 0, tzinfo=UTC))
se.meta_values.create(property=meta_prop, value="Workshop")
return se
@pytest.fixture
@scopes_disabled()
def taxrule(event):
return event.tax_rules.create(name="VAT", rate=19)
@pytest.fixture
@scopes_disabled()
def taxrule0(event):
return event.tax_rules.create(name="VAT", rate=0)
@pytest.fixture
@scopes_disabled()
def taxrule2(event2):
return event2.tax_rules.create(name="VAT", rate=25)
@pytest.fixture
@scopes_disabled()
def item(event):
return event.items.create(name='foo', default_price=3)
@pytest.fixture
@scopes_disabled()
def membership_type(organizer):
return organizer.membership_types.create(name='foo')
utils.setup_databases = scopes_disabled()(utils.setup_databases)
|
en
| 0.902868
|
# # This file is part of pretix (Community Edition). # # Copyright (C) 2014-2020 <NAME> and contributors # Copyright (C) 2020-2021 rami.io GmbH and contributors # # This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General # Public License as published by the Free Software Foundation in version 3 of the License. # # ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are # applicable granting you additional permissions and placing additional restrictions on your usage of this software. # Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive # this file, see <https://pretix.eu/about/en/license>. # # This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied # warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License along with this program. If not, see # <https://www.gnu.org/licenses/>. # # This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of # the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>. # # This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A # full history of changes and contributors is available at <https://github.com/pretix/pretix>. # # This file contains Apache-licensed contributions copyrighted by: <NAME> # # Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under the License.
| 1.436081
| 1
|
pythonProject/desafio082.py
|
maisalobao/Python
| 1
|
6626103
|
num = list()
pares = list()
impares = list()
while True:
num.append(int(input('Digite um valor:')))
escolha = str(input('Deseja continuar?')).upper().strip()[0]
if escolha in 'N':
break
print(f'Os valores da lista são: {num}')
for i,v in enumerate(num):
if v % 2 == 0:
pares.append(v)
elif v % 2 == 1:
impares.append(v)
print(f'Apenas os pares: {pares}')
print(f'Apenas os ímpares: {impares}')
|
num = list()
pares = list()
impares = list()
while True:
num.append(int(input('Digite um valor:')))
escolha = str(input('Deseja continuar?')).upper().strip()[0]
if escolha in 'N':
break
print(f'Os valores da lista são: {num}')
for i,v in enumerate(num):
if v % 2 == 0:
pares.append(v)
elif v % 2 == 1:
impares.append(v)
print(f'Apenas os pares: {pares}')
print(f'Apenas os ímpares: {impares}')
|
none
| 1
| 3.864744
| 4
|
|
exoctk/tests/test_atmospheric_retrievals.py
|
bourque/exoctk
| 21
|
6626104
|
<reponame>bourque/exoctk
#! /usr/bin/env python
"""Tests for the ``atmopshric_retrievals`` package.
Authors
-------
<NAME>
Use
---
These tests can be run via the command line (omit the ``-s`` to
suppress verbose output to stdout):
::
pytest -s test_atmospheric_retrievals.py
"""
import numpy as np
import os
from platon.constants import R_sun, R_jup, M_jup
import pytest
from ..atmospheric_retrievals.aws_tools import get_config
from ..atmospheric_retrievals.platon_wrapper import _apply_factors
from ..atmospheric_retrievals.platon_wrapper import PlatonWrapper
ON_GITHUB_ACTIONS = os.path.expanduser('~') in ['/home/runner', '/Users/runner']
def initialize_platon_wrapper_object():
"""Return a ``PlatonWrapper`` object for use by the tests within
this module.
The ``PlatonWrapper`` object contains basic attributes for a simple
example to test with.
Returns
-------
pw : obj
The ``PlatonWrapper`` object
"""
# Define the fit parameters
params = {
'Rs': 1.19, # Required
'Mp': 0.73, # Required
'Rp': 1.4, # Required
'T': 1200.0, # Required
'logZ': 0, # Optional
'CO_ratio': 0.53, # Optional
'log_cloudtop_P': 4, # Optional
'log_scatt_factor': 0, # Optional
'scatt_slope': 4, # Optional
'error_multiple': 1, # Optional
'T_star': 6091} # Optional
R_guess = 1.4 * R_jup
T_guess = 1200
# Initialize the object and set the parameters
pw = PlatonWrapper()
pw.set_parameters(params)
pw.fit_info.add_gaussian_fit_param('Rs', 0.02*R_sun)
pw.fit_info.add_gaussian_fit_param('Mp', 0.04*M_jup)
pw.fit_info.add_uniform_fit_param('Rp', 0.9*R_guess, 1.1*R_guess)
pw.fit_info.add_uniform_fit_param('T', 0.5*T_guess, 1.5*T_guess)
pw.fit_info.add_uniform_fit_param("log_scatt_factor", 0, 1)
pw.fit_info.add_uniform_fit_param("logZ", -1, 3)
pw.fit_info.add_uniform_fit_param("log_cloudtop_P", -0.99, 5)
pw.fit_info.add_uniform_fit_param("error_multiple", 0.5, 5)
# Define bins, depths, and errors
pw.wavelengths = 1e-6*np.array([1.119, 1.1387])
pw.bins = [[w-0.0095e-6, w+0.0095e-6] for w in pw.wavelengths]
pw.depths = 1e-6 * np.array([14512.7, 14546.5])
pw.errors = 1e-6 * np.array([50.6, 35.5])
return pw
def test_apply_factors():
"""Test the ``_apply_factors()`` function in ``platon_wrapper``
module.
"""
params = {'Rs': 1.19, 'Mp': 0.73, 'Rp': 1.4}
params = _apply_factors(params)
assert isinstance(params, dict)
assert params['Rs'] == 827883000.0
assert params['Mp'] == 1.3856787e+27
assert params['Rp'] == 100088800.0
def test_get_config():
"""Tests the ``get_config`` function in ``aws_tools`` module."""
settings = get_config()
assert isinstance(settings, dict)
assert 'ec2_id' in settings
assert 'ssh_file' in settings
@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Test takes too long on Travis server. Try testing locally.')
def test_retrieve_emcee():
"""Test that the ``emcee`` method of ``platon_wrapper``
produces results for a small example.
"""
pw = initialize_platon_wrapper_object()
pw.retrieve('emcee')
assert pw.result
@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Test takes too long on Travis server. Try testing locally.')
def test_retrieve_multinest():
"""Test that the ``multinest`` method of ``platon_wrapper``
produces results for a small example.
"""
pw = initialize_platon_wrapper_object()
pw.retrieve('multinest')
assert pw.result
|
#! /usr/bin/env python
"""Tests for the ``atmopshric_retrievals`` package.
Authors
-------
<NAME>
Use
---
These tests can be run via the command line (omit the ``-s`` to
suppress verbose output to stdout):
::
pytest -s test_atmospheric_retrievals.py
"""
import numpy as np
import os
from platon.constants import R_sun, R_jup, M_jup
import pytest
from ..atmospheric_retrievals.aws_tools import get_config
from ..atmospheric_retrievals.platon_wrapper import _apply_factors
from ..atmospheric_retrievals.platon_wrapper import PlatonWrapper
ON_GITHUB_ACTIONS = os.path.expanduser('~') in ['/home/runner', '/Users/runner']
def initialize_platon_wrapper_object():
"""Return a ``PlatonWrapper`` object for use by the tests within
this module.
The ``PlatonWrapper`` object contains basic attributes for a simple
example to test with.
Returns
-------
pw : obj
The ``PlatonWrapper`` object
"""
# Define the fit parameters
params = {
'Rs': 1.19, # Required
'Mp': 0.73, # Required
'Rp': 1.4, # Required
'T': 1200.0, # Required
'logZ': 0, # Optional
'CO_ratio': 0.53, # Optional
'log_cloudtop_P': 4, # Optional
'log_scatt_factor': 0, # Optional
'scatt_slope': 4, # Optional
'error_multiple': 1, # Optional
'T_star': 6091} # Optional
R_guess = 1.4 * R_jup
T_guess = 1200
# Initialize the object and set the parameters
pw = PlatonWrapper()
pw.set_parameters(params)
pw.fit_info.add_gaussian_fit_param('Rs', 0.02*R_sun)
pw.fit_info.add_gaussian_fit_param('Mp', 0.04*M_jup)
pw.fit_info.add_uniform_fit_param('Rp', 0.9*R_guess, 1.1*R_guess)
pw.fit_info.add_uniform_fit_param('T', 0.5*T_guess, 1.5*T_guess)
pw.fit_info.add_uniform_fit_param("log_scatt_factor", 0, 1)
pw.fit_info.add_uniform_fit_param("logZ", -1, 3)
pw.fit_info.add_uniform_fit_param("log_cloudtop_P", -0.99, 5)
pw.fit_info.add_uniform_fit_param("error_multiple", 0.5, 5)
# Define bins, depths, and errors
pw.wavelengths = 1e-6*np.array([1.119, 1.1387])
pw.bins = [[w-0.0095e-6, w+0.0095e-6] for w in pw.wavelengths]
pw.depths = 1e-6 * np.array([14512.7, 14546.5])
pw.errors = 1e-6 * np.array([50.6, 35.5])
return pw
def test_apply_factors():
"""Test the ``_apply_factors()`` function in ``platon_wrapper``
module.
"""
params = {'Rs': 1.19, 'Mp': 0.73, 'Rp': 1.4}
params = _apply_factors(params)
assert isinstance(params, dict)
assert params['Rs'] == 827883000.0
assert params['Mp'] == 1.3856787e+27
assert params['Rp'] == 100088800.0
def test_get_config():
"""Tests the ``get_config`` function in ``aws_tools`` module."""
settings = get_config()
assert isinstance(settings, dict)
assert 'ec2_id' in settings
assert 'ssh_file' in settings
@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Test takes too long on Travis server. Try testing locally.')
def test_retrieve_emcee():
"""Test that the ``emcee`` method of ``platon_wrapper``
produces results for a small example.
"""
pw = initialize_platon_wrapper_object()
pw.retrieve('emcee')
assert pw.result
@pytest.mark.skipif(ON_GITHUB_ACTIONS, reason='Test takes too long on Travis server. Try testing locally.')
def test_retrieve_multinest():
"""Test that the ``multinest`` method of ``platon_wrapper``
produces results for a small example.
"""
pw = initialize_platon_wrapper_object()
pw.retrieve('multinest')
assert pw.result
|
en
| 0.517606
|
#! /usr/bin/env python Tests for the ``atmopshric_retrievals`` package. Authors ------- <NAME> Use --- These tests can be run via the command line (omit the ``-s`` to suppress verbose output to stdout): :: pytest -s test_atmospheric_retrievals.py Return a ``PlatonWrapper`` object for use by the tests within this module. The ``PlatonWrapper`` object contains basic attributes for a simple example to test with. Returns ------- pw : obj The ``PlatonWrapper`` object # Define the fit parameters # Required # Required # Required # Required # Optional # Optional # Optional # Optional # Optional # Optional # Optional # Initialize the object and set the parameters # Define bins, depths, and errors Test the ``_apply_factors()`` function in ``platon_wrapper`` module. Tests the ``get_config`` function in ``aws_tools`` module. Test that the ``emcee`` method of ``platon_wrapper`` produces results for a small example. Test that the ``multinest`` method of ``platon_wrapper`` produces results for a small example.
| 2.271282
| 2
|
clairvoyante/callVarBam.py
|
strixy16/Clairvoyante
| 171
|
6626105
|
<gh_stars>100-1000
import os
import sys
import argparse
import param
import shlex
import subprocess
import multiprocessing
import signal
import random
import time
class InstancesClass(object):
def __init__(self):
self.EVCInstance = None
self.CTInstance = None
self.CVInstance = None
def poll(self):
self.EVCInstance.poll()
self.CTInstance.poll()
self.CVInstance.poll()
c = InstancesClass();
def CheckRtCode(signum, frame):
c.poll()
#print >> sys.stderr, c.EVCInstance.returncode, c.CTInstance.returncode, c.CVInstance.returncode
if c.EVCInstance.returncode != None and c.EVCInstance.returncode != 0:
c.CTInstance.kill(); c.CVInstance.kill()
sys.exit("ExtractVariantCandidates.py or GetTruth.py exited with exceptions. Exiting...");
if c.CTInstance.returncode != None and c.CTInstance.returncode != 0:
c.EVCInstance.kill(); c.CVInstance.kill()
sys.exit("CreateTensors.py exited with exceptions. Exiting...");
if c.CVInstance.returncode != None and c.CVInstance.returncode != 0:
c.EVCInstance.kill(); c.CTInstance.kill()
sys.exit("callVar.py exited with exceptions. Exiting...");
if c.EVCInstance.returncode == None or c.CTInstance.returncode == None or c.CVInstance.returncode == None:
signal.alarm(5)
def CheckFileExist(fn, sfx=""):
if not os.path.isfile(fn+sfx):
sys.exit("Error: %s not found" % (fn+sfx))
return os.path.abspath(fn)
def CheckCmdExist(cmd):
try:
subprocess.check_output("which %s" % (cmd), shell=True)
except:
sys.exit("Error: %s executable not found" % (cmd))
return cmd
def Run(args):
basedir = os.path.dirname(__file__)
EVCBin = CheckFileExist(basedir + "/../dataPrepScripts/ExtractVariantCandidates.py")
GTBin = CheckFileExist(basedir + "/../dataPrepScripts/GetTruth.py")
CTBin = CheckFileExist(basedir + "/../dataPrepScripts/CreateTensor.py")
CVBin = CheckFileExist(basedir + "/callVar.py")
pypyBin = CheckCmdExist(args.pypy)
samtoolsBin = CheckCmdExist(args.samtools)
chkpnt_fn = CheckFileExist(args.chkpnt_fn, sfx=".meta")
bam_fn = CheckFileExist(args.bam_fn)
ref_fn = CheckFileExist(args.ref_fn)
if args.bed_fn == None:
bed_fn = ""
else:
bed_fn = CheckFileExist(args.bed_fn)
bed_fn = "--bed_fn %s" % (bed_fn)
vcf_fn = None
if args.vcf_fn != None:
vcf_fn = CheckFileExist(args.vcf_fn)
call_fn = args.call_fn
threshold = args.threshold
minCoverage = args.minCoverage
sampleName = args.sampleName
ctgName = args.ctgName
if ctgName == None:
sys.exit("--ctgName must be specified. You can call variants on multiple chromosomes simultaneously.")
if args.considerleftedge:
considerleftedge = "--considerleftedge"
else:
considerleftedge = ""
if args.qual:
qual = "--qual %d" % (args.qual)
else:
qual = ""
if args.ctgStart != None and args.ctgEnd != None and int(args.ctgStart) <= int(args.ctgEnd):
ctgRange = "--ctgStart %s --ctgEnd %s" % (args.ctgStart, args.ctgEnd)
else:
ctgRange = ""
dcov = args.dcov
maxCpus = multiprocessing.cpu_count()
if args.threads == None: numCpus = multiprocessing.cpu_count()
else: numCpus = args.threads if args.threads < multiprocessing.cpu_count() else multiprocessing.cpu_count()
cpuSet = ",".join(str(x) for x in random.sample(xrange(0, maxCpus), numCpus))
taskSet = "taskset -c %s" % cpuSet
try:
subprocess.check_output("which %s" % ("taskset"), shell=True)
except:
taskSet = ""
if args.delay > 0:
delay = random.randrange(0, args.delay)
print >> sys.stderr, "Delay %d seconds before starting variant calling ..." % (delay)
time.sleep(delay)
try:
if vcf_fn == None:
c.EVCInstance = subprocess.Popen(\
shlex.split("%s %s --bam_fn %s --ref_fn %s %s --ctgName %s %s --threshold %s --minCoverage %s --samtools %s" %\
(pypyBin, EVCBin, bam_fn, ref_fn, bed_fn, ctgName, ctgRange, threshold, minCoverage, samtoolsBin) ),\
stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=8388608)
else:
c.EVCInstance = subprocess.Popen(\
shlex.split("%s %s --vcf_fn %s --ctgName %s %s" %\
(pypyBin, GTBin, vcf_fn, ctgName, ctgRange) ),\
stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=8388608)
c.CTInstance = subprocess.Popen(\
shlex.split("%s %s --bam_fn %s --ref_fn %s --ctgName %s %s %s --samtools %s --dcov %d" %\
(pypyBin, CTBin, bam_fn, ref_fn, ctgName, ctgRange, considerleftedge, samtoolsBin, dcov) ),\
stdin=c.EVCInstance.stdout, stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=8388608)
c.CVInstance = subprocess.Popen(\
shlex.split("%s python %s --chkpnt_fn %s --call_fn %s --sampleName %s --threads %d --ref_fn %s %s" %\
(taskSet, CVBin, chkpnt_fn, call_fn, sampleName, numCpus, ref_fn, qual) ),\
stdin=c.CTInstance.stdout, stdout=sys.stderr, stderr=sys.stderr, bufsize=8388608)
except Exception as e:
print >> sys.stderr, e
sys.exit("Failed to start required processes. Exiting...")
signal.signal(signal.SIGALRM, CheckRtCode)
signal.alarm(2)
c.CVInstance.wait()
c.CTInstance.stdout.close()
c.CTInstance.wait()
c.EVCInstance.stdout.close()
c.EVCInstance.wait()
def main():
parser = argparse.ArgumentParser(
description="Call variants using a trained Clairvoyante model and a BAM file" )
parser.add_argument('--chkpnt_fn', type=str, default = None,
help="Input a Clairvoyante model")
parser.add_argument('--ref_fn', type=str, default="ref.fa",
help="Reference fasta file input, default: %(default)s")
parser.add_argument('--bed_fn', type=str, default=None,
help="Call variant only in these regions, works in intersection with ctgName, ctgStart and ctgEnd, optional, default: as defined by ctgName, ctgStart and ctgEnd")
parser.add_argument('--bam_fn', type=str, default="bam.bam",
help="BAM file input, default: %(default)s")
parser.add_argument('--call_fn', type=str, default = None,
help="Output variant predictions")
parser.add_argument('--vcf_fn', type=str, default=None,
help="Candidate sites VCF file input, if provided, variants will only be called at the sites in the VCF file, default: %(default)s")
parser.add_argument('--threshold', type=float, default=0.125,
help="Minimum allele frequence of the 1st non-reference allele for a site to be considered as a condidate site, default: %(default)f")
parser.add_argument('--minCoverage', type=float, default=4,
help="Minimum coverage required to call a variant, default: %(default)d")
parser.add_argument('--qual', type=int, default = None,
help="If set, variant with equal or higher quality will be marked PASS, or LowQual otherwise, optional")
parser.add_argument('--sampleName', type=str, default = "SAMPLE",
help="Define the sample name to be shown in the VCF file")
parser.add_argument('--ctgName', type=str, default=None,
help="The name of sequence to be processed, default: %(default)s")
parser.add_argument('--ctgStart', type=int, default=None,
help="The 1-bsae starting position of the sequence to be processed")
parser.add_argument('--ctgEnd', type=int, default=None,
help="The inclusive ending position of the sequence to be processed")
parser.add_argument('--considerleftedge', type=param.str2bool, nargs='?', const=True, default=True,
help="Count the left-most base-pairs of a read for coverage even if the starting position of a read is after the starting position of a tensor, default: %(default)s")
parser.add_argument('--dcov', type=int, default=250,
help="Cap depth per position at %(default)s")
parser.add_argument('--samtools', type=str, default="samtools",
help="Path to the 'samtools', default: %(default)s")
parser.add_argument('--pypy', type=str, default="pypy",
help="Path to the 'pypy', default: %(default)s")
parser.add_argument('--v3', type=param.str2bool, nargs='?', const=True, default = True,
help="Use Clairvoyante version 3")
parser.add_argument('--v2', type=param.str2bool, nargs='?', const=True, default = False,
help="Use Clairvoyante version 2")
parser.add_argument('--slim', type=param.str2bool, nargs='?', const=True, default = False,
help="Train using the slim version of Clairvoyante, optional")
parser.add_argument('--threads', type=int, default = None,
help="Number of threads, optional")
parser.add_argument('--delay', type=int, default = 10,
help="Wait a short while for no more than %(default)s to start the job. This is to avoid starting multiple jobs simultaneously that might use up the maximum number of threads allowed, because Tensorflow will create more threads than needed at the beginning of running the program.")
args = parser.parse_args()
if len(sys.argv[1:]) == 0:
parser.print_help()
sys.exit(1)
Run(args)
if __name__ == "__main__":
main()
|
import os
import sys
import argparse
import param
import shlex
import subprocess
import multiprocessing
import signal
import random
import time
class InstancesClass(object):
def __init__(self):
self.EVCInstance = None
self.CTInstance = None
self.CVInstance = None
def poll(self):
self.EVCInstance.poll()
self.CTInstance.poll()
self.CVInstance.poll()
c = InstancesClass();
def CheckRtCode(signum, frame):
c.poll()
#print >> sys.stderr, c.EVCInstance.returncode, c.CTInstance.returncode, c.CVInstance.returncode
if c.EVCInstance.returncode != None and c.EVCInstance.returncode != 0:
c.CTInstance.kill(); c.CVInstance.kill()
sys.exit("ExtractVariantCandidates.py or GetTruth.py exited with exceptions. Exiting...");
if c.CTInstance.returncode != None and c.CTInstance.returncode != 0:
c.EVCInstance.kill(); c.CVInstance.kill()
sys.exit("CreateTensors.py exited with exceptions. Exiting...");
if c.CVInstance.returncode != None and c.CVInstance.returncode != 0:
c.EVCInstance.kill(); c.CTInstance.kill()
sys.exit("callVar.py exited with exceptions. Exiting...");
if c.EVCInstance.returncode == None or c.CTInstance.returncode == None or c.CVInstance.returncode == None:
signal.alarm(5)
def CheckFileExist(fn, sfx=""):
if not os.path.isfile(fn+sfx):
sys.exit("Error: %s not found" % (fn+sfx))
return os.path.abspath(fn)
def CheckCmdExist(cmd):
try:
subprocess.check_output("which %s" % (cmd), shell=True)
except:
sys.exit("Error: %s executable not found" % (cmd))
return cmd
def Run(args):
basedir = os.path.dirname(__file__)
EVCBin = CheckFileExist(basedir + "/../dataPrepScripts/ExtractVariantCandidates.py")
GTBin = CheckFileExist(basedir + "/../dataPrepScripts/GetTruth.py")
CTBin = CheckFileExist(basedir + "/../dataPrepScripts/CreateTensor.py")
CVBin = CheckFileExist(basedir + "/callVar.py")
pypyBin = CheckCmdExist(args.pypy)
samtoolsBin = CheckCmdExist(args.samtools)
chkpnt_fn = CheckFileExist(args.chkpnt_fn, sfx=".meta")
bam_fn = CheckFileExist(args.bam_fn)
ref_fn = CheckFileExist(args.ref_fn)
if args.bed_fn == None:
bed_fn = ""
else:
bed_fn = CheckFileExist(args.bed_fn)
bed_fn = "--bed_fn %s" % (bed_fn)
vcf_fn = None
if args.vcf_fn != None:
vcf_fn = CheckFileExist(args.vcf_fn)
call_fn = args.call_fn
threshold = args.threshold
minCoverage = args.minCoverage
sampleName = args.sampleName
ctgName = args.ctgName
if ctgName == None:
sys.exit("--ctgName must be specified. You can call variants on multiple chromosomes simultaneously.")
if args.considerleftedge:
considerleftedge = "--considerleftedge"
else:
considerleftedge = ""
if args.qual:
qual = "--qual %d" % (args.qual)
else:
qual = ""
if args.ctgStart != None and args.ctgEnd != None and int(args.ctgStart) <= int(args.ctgEnd):
ctgRange = "--ctgStart %s --ctgEnd %s" % (args.ctgStart, args.ctgEnd)
else:
ctgRange = ""
dcov = args.dcov
maxCpus = multiprocessing.cpu_count()
if args.threads == None: numCpus = multiprocessing.cpu_count()
else: numCpus = args.threads if args.threads < multiprocessing.cpu_count() else multiprocessing.cpu_count()
cpuSet = ",".join(str(x) for x in random.sample(xrange(0, maxCpus), numCpus))
taskSet = "taskset -c %s" % cpuSet
try:
subprocess.check_output("which %s" % ("taskset"), shell=True)
except:
taskSet = ""
if args.delay > 0:
delay = random.randrange(0, args.delay)
print >> sys.stderr, "Delay %d seconds before starting variant calling ..." % (delay)
time.sleep(delay)
try:
if vcf_fn == None:
c.EVCInstance = subprocess.Popen(\
shlex.split("%s %s --bam_fn %s --ref_fn %s %s --ctgName %s %s --threshold %s --minCoverage %s --samtools %s" %\
(pypyBin, EVCBin, bam_fn, ref_fn, bed_fn, ctgName, ctgRange, threshold, minCoverage, samtoolsBin) ),\
stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=8388608)
else:
c.EVCInstance = subprocess.Popen(\
shlex.split("%s %s --vcf_fn %s --ctgName %s %s" %\
(pypyBin, GTBin, vcf_fn, ctgName, ctgRange) ),\
stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=8388608)
c.CTInstance = subprocess.Popen(\
shlex.split("%s %s --bam_fn %s --ref_fn %s --ctgName %s %s %s --samtools %s --dcov %d" %\
(pypyBin, CTBin, bam_fn, ref_fn, ctgName, ctgRange, considerleftedge, samtoolsBin, dcov) ),\
stdin=c.EVCInstance.stdout, stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=8388608)
c.CVInstance = subprocess.Popen(\
shlex.split("%s python %s --chkpnt_fn %s --call_fn %s --sampleName %s --threads %d --ref_fn %s %s" %\
(taskSet, CVBin, chkpnt_fn, call_fn, sampleName, numCpus, ref_fn, qual) ),\
stdin=c.CTInstance.stdout, stdout=sys.stderr, stderr=sys.stderr, bufsize=8388608)
except Exception as e:
print >> sys.stderr, e
sys.exit("Failed to start required processes. Exiting...")
signal.signal(signal.SIGALRM, CheckRtCode)
signal.alarm(2)
c.CVInstance.wait()
c.CTInstance.stdout.close()
c.CTInstance.wait()
c.EVCInstance.stdout.close()
c.EVCInstance.wait()
def main():
parser = argparse.ArgumentParser(
description="Call variants using a trained Clairvoyante model and a BAM file" )
parser.add_argument('--chkpnt_fn', type=str, default = None,
help="Input a Clairvoyante model")
parser.add_argument('--ref_fn', type=str, default="ref.fa",
help="Reference fasta file input, default: %(default)s")
parser.add_argument('--bed_fn', type=str, default=None,
help="Call variant only in these regions, works in intersection with ctgName, ctgStart and ctgEnd, optional, default: as defined by ctgName, ctgStart and ctgEnd")
parser.add_argument('--bam_fn', type=str, default="bam.bam",
help="BAM file input, default: %(default)s")
parser.add_argument('--call_fn', type=str, default = None,
help="Output variant predictions")
parser.add_argument('--vcf_fn', type=str, default=None,
help="Candidate sites VCF file input, if provided, variants will only be called at the sites in the VCF file, default: %(default)s")
parser.add_argument('--threshold', type=float, default=0.125,
help="Minimum allele frequence of the 1st non-reference allele for a site to be considered as a condidate site, default: %(default)f")
parser.add_argument('--minCoverage', type=float, default=4,
help="Minimum coverage required to call a variant, default: %(default)d")
parser.add_argument('--qual', type=int, default = None,
help="If set, variant with equal or higher quality will be marked PASS, or LowQual otherwise, optional")
parser.add_argument('--sampleName', type=str, default = "SAMPLE",
help="Define the sample name to be shown in the VCF file")
parser.add_argument('--ctgName', type=str, default=None,
help="The name of sequence to be processed, default: %(default)s")
parser.add_argument('--ctgStart', type=int, default=None,
help="The 1-bsae starting position of the sequence to be processed")
parser.add_argument('--ctgEnd', type=int, default=None,
help="The inclusive ending position of the sequence to be processed")
parser.add_argument('--considerleftedge', type=param.str2bool, nargs='?', const=True, default=True,
help="Count the left-most base-pairs of a read for coverage even if the starting position of a read is after the starting position of a tensor, default: %(default)s")
parser.add_argument('--dcov', type=int, default=250,
help="Cap depth per position at %(default)s")
parser.add_argument('--samtools', type=str, default="samtools",
help="Path to the 'samtools', default: %(default)s")
parser.add_argument('--pypy', type=str, default="pypy",
help="Path to the 'pypy', default: %(default)s")
parser.add_argument('--v3', type=param.str2bool, nargs='?', const=True, default = True,
help="Use Clairvoyante version 3")
parser.add_argument('--v2', type=param.str2bool, nargs='?', const=True, default = False,
help="Use Clairvoyante version 2")
parser.add_argument('--slim', type=param.str2bool, nargs='?', const=True, default = False,
help="Train using the slim version of Clairvoyante, optional")
parser.add_argument('--threads', type=int, default = None,
help="Number of threads, optional")
parser.add_argument('--delay', type=int, default = 10,
help="Wait a short while for no more than %(default)s to start the job. This is to avoid starting multiple jobs simultaneously that might use up the maximum number of threads allowed, because Tensorflow will create more threads than needed at the beginning of running the program.")
args = parser.parse_args()
if len(sys.argv[1:]) == 0:
parser.print_help()
sys.exit(1)
Run(args)
if __name__ == "__main__":
main()
|
en
| 0.206371
|
#print >> sys.stderr, c.EVCInstance.returncode, c.CTInstance.returncode, c.CVInstance.returncode
| 2.315008
| 2
|
edk2/BaseTools/Source/Python/UPT/Library/ParserValidate.py
|
awwiniot/Aw1689UEFI
| 21
|
6626106
|
<reponame>awwiniot/Aw1689UEFI<filename>edk2/BaseTools/Source/Python/UPT/Library/ParserValidate.py
## @file ParserValidate.py
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
PaserValidate
'''
import os.path
import re
import platform
from Library.DataType import MODULE_LIST
from Library.DataType import COMPONENT_TYPE_LIST
from Library.DataType import PCD_USAGE_TYPE_LIST_OF_MODULE
from Library.DataType import TAB_SPACE_SPLIT
from Library.String import GetSplitValueList
from Library.ExpressionValidate import IsValidBareCString
from Library.ExpressionValidate import IsValidFeatureFlagExp
## __HexDigit() method
#
# Whether char input is a Hex data bit
#
# @param TempChar: The char to test
#
def __HexDigit(TempChar):
if (TempChar >= 'a' and TempChar <= 'f') or \
(TempChar >= 'A' and TempChar <= 'F') \
or (TempChar >= '0' and TempChar <= '9'):
return True
else:
return False
## IsValidHex() method
#
# Whether char input is a Hex data.
#
# @param TempChar: The char to test
#
def IsValidHex(HexStr):
if not HexStr.upper().startswith("0X"):
return False
CharList = [c for c in HexStr[2:] if not __HexDigit(c)]
if len(CharList) == 0:
return True
else:
return False
## Judge the input string is valid bool type or not.
#
# <TRUE> ::= {"TRUE"} {"true"} {"True"} {"0x1"} {"0x01"}
# <FALSE> ::= {"FALSE"} {"false"} {"False"} {"0x0"} {"0x00"}
# <BoolType> ::= {<TRUE>} {<FALSE>}
#
# @param BoolString: A string contained the value need to be judged.
#
def IsValidBoolType(BoolString):
#
# Valid Ture
#
if BoolString == 'TRUE' or \
BoolString == 'True' or \
BoolString == 'true' or \
BoolString == '0x1' or \
BoolString == '0x01':
return True
#
# Valid False
#
elif BoolString == 'FALSE' or \
BoolString == 'False' or \
BoolString == 'false' or \
BoolString == '0x0' or \
BoolString == '0x00':
return True
#
# Invalid bool type
#
else:
return False
## Is Valid Module Type List or not
#
# @param ModuleTypeList: A list contain ModuleType strings need to be
# judged.
#
def IsValidInfMoudleTypeList(ModuleTypeList):
for ModuleType in ModuleTypeList:
return IsValidInfMoudleType(ModuleType)
## Is Valid Module Type or not
#
# @param ModuleType: A string contain ModuleType need to be judged.
#
def IsValidInfMoudleType(ModuleType):
if ModuleType in MODULE_LIST:
return True
else:
return False
## Is Valid Component Type or not
#
# @param ComponentType: A string contain ComponentType need to be judged.
#
def IsValidInfComponentType(ComponentType):
if ComponentType.upper() in COMPONENT_TYPE_LIST:
return True
else:
return False
## Is valid Tool Family or not
#
# @param ToolFamily: A string contain Tool Family need to be judged.
# Famlily := [A-Z]([a-zA-Z0-9])*
#
def IsValidToolFamily(ToolFamily):
ReIsValieFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL)
if ReIsValieFamily.match(ToolFamily) == None:
return False
return True
## Is valid Tool TagName or not
#
# The TagName sample is MYTOOLS and VS2005.
#
# @param TagName: A string contain Tool TagName need to be judged.
#
def IsValidToolTagName(TagName):
if TagName.strip() == '':
return True
if TagName.strip() == '*':
return True
if not IsValidWord(TagName):
return False
return True
## Is valid arch or not
#
# @param Arch The arch string need to be validated
# <OA> ::= (a-zA-Z)(A-Za-z0-9){0,}
# <arch> ::= {"IA32"} {"X64"} {"IPF"} {"EBC"} {<OA>}
# {"common"}
# @param Arch: Input arch
#
def IsValidArch(Arch):
if Arch == 'common':
return True
ReIsValieArch = re.compile(r"^[a-zA-Z]+[a-zA-Z0-9]{0,}$", re.DOTALL)
if ReIsValieArch.match(Arch) == None:
return False
return True
## Is valid family or not
#
# <Family> ::= {"MSFT"} {"GCC"} {"INTEL"} {<Usr>} {"*"}
# <Usr> ::= [A-Z][A-Za-z0-9]{0,}
#
# @param family: The family string need to be validated
#
def IsValidFamily(Family):
Family = Family.strip()
if Family == '*':
return True
if Family == '':
return True
ReIsValidFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL)
if ReIsValidFamily.match(Family) == None:
return False
return True
## Is valid build option name or not
#
# @param BuildOptionName: The BuildOptionName string need to be validated
#
def IsValidBuildOptionName(BuildOptionName):
if not BuildOptionName:
return False
ToolOptionList = GetSplitValueList(BuildOptionName, '_', 4)
if len(ToolOptionList) != 5:
return False
ReIsValidBuildOption1 = re.compile(r"^\s*(\*)|([A-Z][a-zA-Z0-9]*)$")
ReIsValidBuildOption2 = re.compile(r"^\s*(\*)|([a-zA-Z][a-zA-Z0-9]*)$")
if ReIsValidBuildOption1.match(ToolOptionList[0]) == None:
return False
if ReIsValidBuildOption1.match(ToolOptionList[1]) == None:
return False
if ReIsValidBuildOption2.match(ToolOptionList[2]) == None:
return False
if ToolOptionList[3] == "*" and ToolOptionList[4] not in ['FAMILY', 'DLL', 'DPATH']:
return False
return True
## IsValidToken
#
# Check if pattern string matches total token
#
# @param ReString: regular string
# @param Token: Token to be matched
#
def IsValidToken(ReString, Token):
Match = re.compile(ReString).match(Token)
return Match and Match.start() == 0 and Match.end() == len(Token)
## IsValidPath
#
# Check if path exist
#
# @param Path: Absolute path or relative path to be checked
# @param Root: Root path
#
def IsValidPath(Path, Root):
Path = Path.strip()
OrigPath = Path.replace('\\', '/')
Path = os.path.normpath(Path).replace('\\', '/')
Root = os.path.normpath(Root).replace('\\', '/')
FullPath = os.path.normpath(os.path.join(Root, Path)).replace('\\', '/')
if not os.path.exists(FullPath):
return False
#
# If Path is absolute path.
# It should be in Root.
#
if os.path.isabs(Path):
if not Path.startswith(Root):
return False
return True
#
# Check illegal character
#
for Rel in ['/', './', '../']:
if OrigPath.startswith(Rel):
return False
for Rel in ['//', '/./', '/../']:
if Rel in OrigPath:
return False
for Rel in ['/.', '/..', '/']:
if OrigPath.endswith(Rel):
return False
Path = Path.rstrip('/')
#
# Check relative path
#
for Word in Path.split('/'):
if not IsValidWord(Word):
return False
return True
## IsValidInstallPath
#
# Check if an install path valid or not.
#
# Absolute path or path starts with '.' or path contains '..' are invalid.
#
# @param Path: path to be checked
#
def IsValidInstallPath(Path):
if platform.platform().find("Windows") >= 0:
if os.path.isabs(Path):
return False
else:
if Path[1:2] == ':':
return False
if os.path.isabs(Path):
return False
if Path.startswith('.'):
return False
if Path.find('..') != -1:
return False
return True
## IsValidCFormatGuid
#
# Check if GUID format has the from of {8,4,4,{2,2,2,2,2,2,2,2}}
#
# @param Guid: Guid to be checked
#
def IsValidCFormatGuid(Guid):
#
# Valid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# 0xaf, 0x48, 0xce }}
# Invalid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# 0xaf, 0x48, 0xce }} 0x123
# Invalid: { 0xf0b1 1735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# 0xaf, 0x48, 0xce }}
#
List = ['{', 10, ',', 6, ',', 6, ',{', 4, ',', 4, ',', 4,
',', 4, ',', 4, ',', 4, ',', 4, ',', 4, '}}']
Index = 0
Value = ''
SepValue = ''
for Char in Guid:
if Char not in '{},\t ':
Value += Char
continue
if Value:
try:
#
# Index may out of bound
#
if not SepValue or SepValue != List[Index]:
return False
Index += 1
SepValue = ''
if not Value.startswith('0x') and not Value.startswith('0X'):
return False
#
# Index may out of bound
#
if type(List[Index]) != type(1) or \
len(Value) > List[Index] or len(Value) < 3:
return False
#
# Check if string can be converted to integer
# Throw exception if not
#
int(Value, 16)
except BaseException:
#
# Exception caught means invalid format
#
return False
Value = ''
Index += 1
if Char in '{},':
SepValue += Char
return SepValue == '}}' and Value == ''
## IsValidPcdType
#
# Check whether the PCD type is valid
#
# @param PcdTypeString: The PcdType string need to be checked.
#
def IsValidPcdType(PcdTypeString):
if PcdTypeString.upper() in PCD_USAGE_TYPE_LIST_OF_MODULE:
return True
else:
return False
## IsValidWord
#
# Check whether the word is valid.
# <Word> ::= (a-zA-Z0-9_)(a-zA-Z0-9_-){0,} Alphanumeric characters with
# optional
# dash "-" and/or underscore "_" characters. No whitespace
# characters are permitted.
#
# @param Word: The word string need to be checked.
#
def IsValidWord(Word):
if not Word:
return False
#
# The first char should be alpha, _ or Digit.
#
if not Word[0].isalnum() and \
not Word[0] == '_' and \
not Word[0].isdigit():
return False
LastChar = ''
for Char in Word[1:]:
if (not Char.isalpha()) and \
(not Char.isdigit()) and \
Char != '-' and \
Char != '_' and \
Char != '.':
return False
if Char == '.' and LastChar == '.':
return False
LastChar = Char
return True
## IsValidSimpleWord
#
# Check whether the SimpleWord is valid.
# <SimpleWord> ::= (a-zA-Z0-9)(a-zA-Z0-9_-){0,}
# A word that cannot contain a period character.
#
# @param Word: The word string need to be checked.
#
def IsValidSimpleWord(Word):
ReIsValidSimpleWord = \
re.compile(r"^[0-9A-Za-z][0-9A-Za-z\-_]*$", re.DOTALL)
Word = Word.strip()
if not Word:
return False
if not ReIsValidSimpleWord.match(Word):
return False
return True
## IsValidDecVersion
#
# Check whether the decimal version is valid.
# <DecVersion> ::= (0-9){1,} ["." (0-9){1,}]
#
# @param Word: The word string need to be checked.
#
def IsValidDecVersion(Word):
if Word.find('.') > -1:
ReIsValidDecVersion = re.compile(r"[0-9]+\.?[0-9]+$")
else:
ReIsValidDecVersion = re.compile(r"[0-9]+$")
if ReIsValidDecVersion.match(Word) == None:
return False
return True
## IsValidHexVersion
#
# Check whether the hex version is valid.
# <HexVersion> ::= "0x" <Major> <Minor>
# <Major> ::= <HexDigit>{4}
# <Minor> ::= <HexDigit>{4}
#
# @param Word: The word string need to be checked.
#
def IsValidHexVersion(Word):
ReIsValidHexVersion = re.compile(r"[0][xX][0-9A-Fa-f]{8}$", re.DOTALL)
if ReIsValidHexVersion.match(Word) == None:
return False
return True
## IsValidBuildNumber
#
# Check whether the BUILD_NUMBER is valid.
# ["BUILD_NUMBER" "=" <Integer>{1,4} <EOL>]
#
# @param Word: The BUILD_NUMBER string need to be checked.
#
def IsValidBuildNumber(Word):
ReIsValieBuildNumber = re.compile(r"[0-9]{1,4}$", re.DOTALL)
if ReIsValieBuildNumber.match(Word) == None:
return False
return True
## IsValidDepex
#
# Check whether the Depex is valid.
#
# @param Word: The Depex string need to be checked.
#
def IsValidDepex(Word):
Index = Word.upper().find("PUSH")
if Index > -1:
return IsValidCFormatGuid(Word[Index+4:].strip())
ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_\s\.]*$", re.DOTALL)
if ReIsValidCName.match(Word) == None:
return False
return True
## IsValidNormalizedString
#
# Check
# <NormalizedString> ::= <DblQuote> [{<Word>} {<Space>}]{1,} <DblQuote>
# <Space> ::= 0x20
#
# @param String: string to be checked
#
def IsValidNormalizedString(String):
if String == '':
return True
for Char in String:
if Char == '\t':
return False
StringList = GetSplitValueList(String, TAB_SPACE_SPLIT)
for Item in StringList:
if not Item:
continue
if not IsValidWord(Item):
return False
return True
## IsValidIdString
#
# Check whether the IdString is valid.
#
# @param IdString: The IdString need to be checked.
#
def IsValidIdString(String):
if IsValidSimpleWord(String.strip()):
return True
if String.strip().startswith('"') and \
String.strip().endswith('"'):
String = String[1:-1]
if String.strip() == "":
return True
if IsValidNormalizedString(String):
return True
return False
## IsValidVersionString
#
# Check whether the VersionString is valid.
# <AsciiString> ::= [ [<WhiteSpace>]{0,} [<AsciiChars>]{0,} ] {0,}
# <WhiteSpace> ::= {<Tab>} {<Space>}
# <Tab> ::= 0x09
# <Space> ::= 0x20
# <AsciiChars> ::= (0x21 - 0x7E)
#
# @param VersionString: The VersionString need to be checked.
#
def IsValidVersionString(VersionString):
VersionString = VersionString.strip()
for Char in VersionString:
if not (Char >= 0x21 and Char <= 0x7E):
return False
return True
## IsValidPcdValue
#
# Check whether the PcdValue is valid.
#
# @param VersionString: The PcdValue need to be checked.
#
def IsValidPcdValue(PcdValue):
for Char in PcdValue:
if Char == '\n' or Char == '\t' or Char == '\f':
return False
#
# <Boolean>
#
if IsValidFeatureFlagExp(PcdValue, True)[0]:
return True
#
# <Number> ::= {<Integer>} {<HexNumber>}
# <Integer> ::= {(0-9)} {(1-9)(0-9){1,}}
# <HexNumber> ::= "0x" <HexDigit>{1,}
# <HexDigit> ::= (a-fA-F0-9)
#
if IsValidHex(PcdValue):
return True
ReIsValidIntegerSingle = re.compile(r"^\s*[0-9]\s*$", re.DOTALL)
if ReIsValidIntegerSingle.match(PcdValue) != None:
return True
ReIsValidIntegerMulti = re.compile(r"^\s*[1-9][0-9]+\s*$", re.DOTALL)
if ReIsValidIntegerMulti.match(PcdValue) != None:
return True
#
# <StringVal> ::= {<StringType>} {<Array>} {"$(" <MACRO> ")"}
# <StringType> ::= {<UnicodeString>} {<CString>}
#
ReIsValidStringType = re.compile(r"^\s*[\"L].*[\"]\s*$")
if ReIsValidStringType.match(PcdValue):
IsTrue = False
if PcdValue.strip().startswith('L\"'):
StringValue = PcdValue.strip().lstrip('L\"').rstrip('\"')
if IsValidBareCString(StringValue):
IsTrue = True
elif PcdValue.strip().startswith('\"'):
StringValue = PcdValue.strip().lstrip('\"').rstrip('\"')
if IsValidBareCString(StringValue):
IsTrue = True
if IsTrue:
return IsTrue
#
# <Array> ::= {<CArray>} {<NList>} {<CFormatGUID>}
# <CArray> ::= "{" [<NList>] <CArray>{0,} "}"
# <NList> ::= <HexByte> ["," <HexByte>]{0,}
# <HexDigit> ::= (a-fA-F0-9)
# <HexByte> ::= "0x" <HexDigit>{1,2}
#
if IsValidCFormatGuid(PcdValue):
return True
ReIsValidByteHex = re.compile(r"^\s*0x[0-9a-fA-F]{1,2}\s*$", re.DOTALL)
if PcdValue.strip().startswith('{') and PcdValue.strip().endswith('}') :
StringValue = PcdValue.strip().lstrip('{').rstrip('}')
ValueList = StringValue.split(',')
AllValidFlag = True
for ValueItem in ValueList:
if not ReIsValidByteHex.match(ValueItem.strip()):
AllValidFlag = False
if AllValidFlag:
return True
#
# NList
#
AllValidFlag = True
ValueList = PcdValue.split(',')
for ValueItem in ValueList:
if not ReIsValidByteHex.match(ValueItem.strip()):
AllValidFlag = False
if AllValidFlag:
return True
return False
## IsValidCVariableName
#
# Check whether the PcdValue is valid.
#
# @param VersionString: The PcdValue need to be checked.
#
def IsValidCVariableName(CName):
ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_]*$", re.DOTALL)
if ReIsValidCName.match(CName) == None:
return False
return True
## IsValidIdentifier
#
# <Identifier> ::= <NonDigit> <Chars>{0,}
# <Chars> ::= (a-zA-Z0-9_)
# <NonDigit> ::= (a-zA-Z_)
#
# @param Ident: identifier to be checked
#
def IsValidIdentifier(Ident):
ReIdent = re.compile(r"^[A-Za-z_][0-9A-Za-z_]*$", re.DOTALL)
if ReIdent.match(Ident) == None:
return False
return True
## IsValidDecVersionVal
#
# {(0-9){1,} "." (0-99)}
#
# @param Ver: version to be checked
#
def IsValidDecVersionVal(Ver):
ReVersion = re.compile(r"[0-9]+(\.[0-9]{1,2})$")
if ReVersion.match(Ver) == None:
return False
return True
## IsValidLibName
#
# (A-Z)(a-zA-Z0-9){0,} and could not be "NULL"
#
def IsValidLibName(LibName):
if LibName == 'NULL':
return False
ReLibName = re.compile("^[A-Z]+[a-zA-Z0-9]*$")
if not ReLibName.match(LibName):
return False
return True
# IsValidUserId
#
# <UserId> ::= (a-zA-Z)(a-zA-Z0-9_.){0,}
# Words that contain period "." must be encapsulated in double quotation marks.
#
def IsValidUserId(UserId):
UserId = UserId.strip()
Quoted = False
if UserId.startswith('"') and UserId.endswith('"'):
Quoted = True
UserId = UserId[1:-1]
if not UserId or not UserId[0].isalpha():
return False
for Char in UserId[1:]:
if not Char.isalnum() and not Char in '_.':
return False
if Char == '.' and not Quoted:
return False
return True
|
## @file ParserValidate.py
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
PaserValidate
'''
import os.path
import re
import platform
from Library.DataType import MODULE_LIST
from Library.DataType import COMPONENT_TYPE_LIST
from Library.DataType import PCD_USAGE_TYPE_LIST_OF_MODULE
from Library.DataType import TAB_SPACE_SPLIT
from Library.String import GetSplitValueList
from Library.ExpressionValidate import IsValidBareCString
from Library.ExpressionValidate import IsValidFeatureFlagExp
## __HexDigit() method
#
# Whether char input is a Hex data bit
#
# @param TempChar: The char to test
#
def __HexDigit(TempChar):
if (TempChar >= 'a' and TempChar <= 'f') or \
(TempChar >= 'A' and TempChar <= 'F') \
or (TempChar >= '0' and TempChar <= '9'):
return True
else:
return False
## IsValidHex() method
#
# Whether char input is a Hex data.
#
# @param TempChar: The char to test
#
def IsValidHex(HexStr):
if not HexStr.upper().startswith("0X"):
return False
CharList = [c for c in HexStr[2:] if not __HexDigit(c)]
if len(CharList) == 0:
return True
else:
return False
## Judge the input string is valid bool type or not.
#
# <TRUE> ::= {"TRUE"} {"true"} {"True"} {"0x1"} {"0x01"}
# <FALSE> ::= {"FALSE"} {"false"} {"False"} {"0x0"} {"0x00"}
# <BoolType> ::= {<TRUE>} {<FALSE>}
#
# @param BoolString: A string contained the value need to be judged.
#
def IsValidBoolType(BoolString):
#
# Valid Ture
#
if BoolString == 'TRUE' or \
BoolString == 'True' or \
BoolString == 'true' or \
BoolString == '0x1' or \
BoolString == '0x01':
return True
#
# Valid False
#
elif BoolString == 'FALSE' or \
BoolString == 'False' or \
BoolString == 'false' or \
BoolString == '0x0' or \
BoolString == '0x00':
return True
#
# Invalid bool type
#
else:
return False
## Is Valid Module Type List or not
#
# @param ModuleTypeList: A list contain ModuleType strings need to be
# judged.
#
def IsValidInfMoudleTypeList(ModuleTypeList):
for ModuleType in ModuleTypeList:
return IsValidInfMoudleType(ModuleType)
## Is Valid Module Type or not
#
# @param ModuleType: A string contain ModuleType need to be judged.
#
def IsValidInfMoudleType(ModuleType):
if ModuleType in MODULE_LIST:
return True
else:
return False
## Is Valid Component Type or not
#
# @param ComponentType: A string contain ComponentType need to be judged.
#
def IsValidInfComponentType(ComponentType):
if ComponentType.upper() in COMPONENT_TYPE_LIST:
return True
else:
return False
## Is valid Tool Family or not
#
# @param ToolFamily: A string contain Tool Family need to be judged.
# Famlily := [A-Z]([a-zA-Z0-9])*
#
def IsValidToolFamily(ToolFamily):
ReIsValieFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL)
if ReIsValieFamily.match(ToolFamily) == None:
return False
return True
## Is valid Tool TagName or not
#
# The TagName sample is MYTOOLS and VS2005.
#
# @param TagName: A string contain Tool TagName need to be judged.
#
def IsValidToolTagName(TagName):
if TagName.strip() == '':
return True
if TagName.strip() == '*':
return True
if not IsValidWord(TagName):
return False
return True
## Is valid arch or not
#
# @param Arch The arch string need to be validated
# <OA> ::= (a-zA-Z)(A-Za-z0-9){0,}
# <arch> ::= {"IA32"} {"X64"} {"IPF"} {"EBC"} {<OA>}
# {"common"}
# @param Arch: Input arch
#
def IsValidArch(Arch):
if Arch == 'common':
return True
ReIsValieArch = re.compile(r"^[a-zA-Z]+[a-zA-Z0-9]{0,}$", re.DOTALL)
if ReIsValieArch.match(Arch) == None:
return False
return True
## Is valid family or not
#
# <Family> ::= {"MSFT"} {"GCC"} {"INTEL"} {<Usr>} {"*"}
# <Usr> ::= [A-Z][A-Za-z0-9]{0,}
#
# @param family: The family string need to be validated
#
def IsValidFamily(Family):
Family = Family.strip()
if Family == '*':
return True
if Family == '':
return True
ReIsValidFamily = re.compile(r"^[A-Z]+[A-Za-z0-9]{0,}$", re.DOTALL)
if ReIsValidFamily.match(Family) == None:
return False
return True
## Is valid build option name or not
#
# @param BuildOptionName: The BuildOptionName string need to be validated
#
def IsValidBuildOptionName(BuildOptionName):
if not BuildOptionName:
return False
ToolOptionList = GetSplitValueList(BuildOptionName, '_', 4)
if len(ToolOptionList) != 5:
return False
ReIsValidBuildOption1 = re.compile(r"^\s*(\*)|([A-Z][a-zA-Z0-9]*)$")
ReIsValidBuildOption2 = re.compile(r"^\s*(\*)|([a-zA-Z][a-zA-Z0-9]*)$")
if ReIsValidBuildOption1.match(ToolOptionList[0]) == None:
return False
if ReIsValidBuildOption1.match(ToolOptionList[1]) == None:
return False
if ReIsValidBuildOption2.match(ToolOptionList[2]) == None:
return False
if ToolOptionList[3] == "*" and ToolOptionList[4] not in ['FAMILY', 'DLL', 'DPATH']:
return False
return True
## IsValidToken
#
# Check if pattern string matches total token
#
# @param ReString: regular string
# @param Token: Token to be matched
#
def IsValidToken(ReString, Token):
Match = re.compile(ReString).match(Token)
return Match and Match.start() == 0 and Match.end() == len(Token)
## IsValidPath
#
# Check if path exist
#
# @param Path: Absolute path or relative path to be checked
# @param Root: Root path
#
def IsValidPath(Path, Root):
Path = Path.strip()
OrigPath = Path.replace('\\', '/')
Path = os.path.normpath(Path).replace('\\', '/')
Root = os.path.normpath(Root).replace('\\', '/')
FullPath = os.path.normpath(os.path.join(Root, Path)).replace('\\', '/')
if not os.path.exists(FullPath):
return False
#
# If Path is absolute path.
# It should be in Root.
#
if os.path.isabs(Path):
if not Path.startswith(Root):
return False
return True
#
# Check illegal character
#
for Rel in ['/', './', '../']:
if OrigPath.startswith(Rel):
return False
for Rel in ['//', '/./', '/../']:
if Rel in OrigPath:
return False
for Rel in ['/.', '/..', '/']:
if OrigPath.endswith(Rel):
return False
Path = Path.rstrip('/')
#
# Check relative path
#
for Word in Path.split('/'):
if not IsValidWord(Word):
return False
return True
## IsValidInstallPath
#
# Check if an install path valid or not.
#
# Absolute path or path starts with '.' or path contains '..' are invalid.
#
# @param Path: path to be checked
#
def IsValidInstallPath(Path):
if platform.platform().find("Windows") >= 0:
if os.path.isabs(Path):
return False
else:
if Path[1:2] == ':':
return False
if os.path.isabs(Path):
return False
if Path.startswith('.'):
return False
if Path.find('..') != -1:
return False
return True
## IsValidCFormatGuid
#
# Check if GUID format has the from of {8,4,4,{2,2,2,2,2,2,2,2}}
#
# @param Guid: Guid to be checked
#
def IsValidCFormatGuid(Guid):
#
# Valid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# 0xaf, 0x48, 0xce }}
# Invalid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# 0xaf, 0x48, 0xce }} 0x123
# Invalid: { 0xf0b1 1735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38,
# 0xaf, 0x48, 0xce }}
#
List = ['{', 10, ',', 6, ',', 6, ',{', 4, ',', 4, ',', 4,
',', 4, ',', 4, ',', 4, ',', 4, ',', 4, '}}']
Index = 0
Value = ''
SepValue = ''
for Char in Guid:
if Char not in '{},\t ':
Value += Char
continue
if Value:
try:
#
# Index may out of bound
#
if not SepValue or SepValue != List[Index]:
return False
Index += 1
SepValue = ''
if not Value.startswith('0x') and not Value.startswith('0X'):
return False
#
# Index may out of bound
#
if type(List[Index]) != type(1) or \
len(Value) > List[Index] or len(Value) < 3:
return False
#
# Check if string can be converted to integer
# Throw exception if not
#
int(Value, 16)
except BaseException:
#
# Exception caught means invalid format
#
return False
Value = ''
Index += 1
if Char in '{},':
SepValue += Char
return SepValue == '}}' and Value == ''
## IsValidPcdType
#
# Check whether the PCD type is valid
#
# @param PcdTypeString: The PcdType string need to be checked.
#
def IsValidPcdType(PcdTypeString):
if PcdTypeString.upper() in PCD_USAGE_TYPE_LIST_OF_MODULE:
return True
else:
return False
## IsValidWord
#
# Check whether the word is valid.
# <Word> ::= (a-zA-Z0-9_)(a-zA-Z0-9_-){0,} Alphanumeric characters with
# optional
# dash "-" and/or underscore "_" characters. No whitespace
# characters are permitted.
#
# @param Word: The word string need to be checked.
#
def IsValidWord(Word):
if not Word:
return False
#
# The first char should be alpha, _ or Digit.
#
if not Word[0].isalnum() and \
not Word[0] == '_' and \
not Word[0].isdigit():
return False
LastChar = ''
for Char in Word[1:]:
if (not Char.isalpha()) and \
(not Char.isdigit()) and \
Char != '-' and \
Char != '_' and \
Char != '.':
return False
if Char == '.' and LastChar == '.':
return False
LastChar = Char
return True
## IsValidSimpleWord
#
# Check whether the SimpleWord is valid.
# <SimpleWord> ::= (a-zA-Z0-9)(a-zA-Z0-9_-){0,}
# A word that cannot contain a period character.
#
# @param Word: The word string need to be checked.
#
def IsValidSimpleWord(Word):
ReIsValidSimpleWord = \
re.compile(r"^[0-9A-Za-z][0-9A-Za-z\-_]*$", re.DOTALL)
Word = Word.strip()
if not Word:
return False
if not ReIsValidSimpleWord.match(Word):
return False
return True
## IsValidDecVersion
#
# Check whether the decimal version is valid.
# <DecVersion> ::= (0-9){1,} ["." (0-9){1,}]
#
# @param Word: The word string need to be checked.
#
def IsValidDecVersion(Word):
if Word.find('.') > -1:
ReIsValidDecVersion = re.compile(r"[0-9]+\.?[0-9]+$")
else:
ReIsValidDecVersion = re.compile(r"[0-9]+$")
if ReIsValidDecVersion.match(Word) == None:
return False
return True
## IsValidHexVersion
#
# Check whether the hex version is valid.
# <HexVersion> ::= "0x" <Major> <Minor>
# <Major> ::= <HexDigit>{4}
# <Minor> ::= <HexDigit>{4}
#
# @param Word: The word string need to be checked.
#
def IsValidHexVersion(Word):
ReIsValidHexVersion = re.compile(r"[0][xX][0-9A-Fa-f]{8}$", re.DOTALL)
if ReIsValidHexVersion.match(Word) == None:
return False
return True
## IsValidBuildNumber
#
# Check whether the BUILD_NUMBER is valid.
# ["BUILD_NUMBER" "=" <Integer>{1,4} <EOL>]
#
# @param Word: The BUILD_NUMBER string need to be checked.
#
def IsValidBuildNumber(Word):
ReIsValieBuildNumber = re.compile(r"[0-9]{1,4}$", re.DOTALL)
if ReIsValieBuildNumber.match(Word) == None:
return False
return True
## IsValidDepex
#
# Check whether the Depex is valid.
#
# @param Word: The Depex string need to be checked.
#
def IsValidDepex(Word):
Index = Word.upper().find("PUSH")
if Index > -1:
return IsValidCFormatGuid(Word[Index+4:].strip())
ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_\s\.]*$", re.DOTALL)
if ReIsValidCName.match(Word) == None:
return False
return True
## IsValidNormalizedString
#
# Check
# <NormalizedString> ::= <DblQuote> [{<Word>} {<Space>}]{1,} <DblQuote>
# <Space> ::= 0x20
#
# @param String: string to be checked
#
def IsValidNormalizedString(String):
if String == '':
return True
for Char in String:
if Char == '\t':
return False
StringList = GetSplitValueList(String, TAB_SPACE_SPLIT)
for Item in StringList:
if not Item:
continue
if not IsValidWord(Item):
return False
return True
## IsValidIdString
#
# Check whether the IdString is valid.
#
# @param IdString: The IdString need to be checked.
#
def IsValidIdString(String):
if IsValidSimpleWord(String.strip()):
return True
if String.strip().startswith('"') and \
String.strip().endswith('"'):
String = String[1:-1]
if String.strip() == "":
return True
if IsValidNormalizedString(String):
return True
return False
## IsValidVersionString
#
# Check whether the VersionString is valid.
# <AsciiString> ::= [ [<WhiteSpace>]{0,} [<AsciiChars>]{0,} ] {0,}
# <WhiteSpace> ::= {<Tab>} {<Space>}
# <Tab> ::= 0x09
# <Space> ::= 0x20
# <AsciiChars> ::= (0x21 - 0x7E)
#
# @param VersionString: The VersionString need to be checked.
#
def IsValidVersionString(VersionString):
VersionString = VersionString.strip()
for Char in VersionString:
if not (Char >= 0x21 and Char <= 0x7E):
return False
return True
## IsValidPcdValue
#
# Check whether the PcdValue is valid.
#
# @param VersionString: The PcdValue need to be checked.
#
def IsValidPcdValue(PcdValue):
for Char in PcdValue:
if Char == '\n' or Char == '\t' or Char == '\f':
return False
#
# <Boolean>
#
if IsValidFeatureFlagExp(PcdValue, True)[0]:
return True
#
# <Number> ::= {<Integer>} {<HexNumber>}
# <Integer> ::= {(0-9)} {(1-9)(0-9){1,}}
# <HexNumber> ::= "0x" <HexDigit>{1,}
# <HexDigit> ::= (a-fA-F0-9)
#
if IsValidHex(PcdValue):
return True
ReIsValidIntegerSingle = re.compile(r"^\s*[0-9]\s*$", re.DOTALL)
if ReIsValidIntegerSingle.match(PcdValue) != None:
return True
ReIsValidIntegerMulti = re.compile(r"^\s*[1-9][0-9]+\s*$", re.DOTALL)
if ReIsValidIntegerMulti.match(PcdValue) != None:
return True
#
# <StringVal> ::= {<StringType>} {<Array>} {"$(" <MACRO> ")"}
# <StringType> ::= {<UnicodeString>} {<CString>}
#
ReIsValidStringType = re.compile(r"^\s*[\"L].*[\"]\s*$")
if ReIsValidStringType.match(PcdValue):
IsTrue = False
if PcdValue.strip().startswith('L\"'):
StringValue = PcdValue.strip().lstrip('L\"').rstrip('\"')
if IsValidBareCString(StringValue):
IsTrue = True
elif PcdValue.strip().startswith('\"'):
StringValue = PcdValue.strip().lstrip('\"').rstrip('\"')
if IsValidBareCString(StringValue):
IsTrue = True
if IsTrue:
return IsTrue
#
# <Array> ::= {<CArray>} {<NList>} {<CFormatGUID>}
# <CArray> ::= "{" [<NList>] <CArray>{0,} "}"
# <NList> ::= <HexByte> ["," <HexByte>]{0,}
# <HexDigit> ::= (a-fA-F0-9)
# <HexByte> ::= "0x" <HexDigit>{1,2}
#
if IsValidCFormatGuid(PcdValue):
return True
ReIsValidByteHex = re.compile(r"^\s*0x[0-9a-fA-F]{1,2}\s*$", re.DOTALL)
if PcdValue.strip().startswith('{') and PcdValue.strip().endswith('}') :
StringValue = PcdValue.strip().lstrip('{').rstrip('}')
ValueList = StringValue.split(',')
AllValidFlag = True
for ValueItem in ValueList:
if not ReIsValidByteHex.match(ValueItem.strip()):
AllValidFlag = False
if AllValidFlag:
return True
#
# NList
#
AllValidFlag = True
ValueList = PcdValue.split(',')
for ValueItem in ValueList:
if not ReIsValidByteHex.match(ValueItem.strip()):
AllValidFlag = False
if AllValidFlag:
return True
return False
## IsValidCVariableName
#
# Check whether the PcdValue is valid.
#
# @param VersionString: The PcdValue need to be checked.
#
def IsValidCVariableName(CName):
ReIsValidCName = re.compile(r"^[A-Za-z_][0-9A-Za-z_]*$", re.DOTALL)
if ReIsValidCName.match(CName) == None:
return False
return True
## IsValidIdentifier
#
# <Identifier> ::= <NonDigit> <Chars>{0,}
# <Chars> ::= (a-zA-Z0-9_)
# <NonDigit> ::= (a-zA-Z_)
#
# @param Ident: identifier to be checked
#
def IsValidIdentifier(Ident):
ReIdent = re.compile(r"^[A-Za-z_][0-9A-Za-z_]*$", re.DOTALL)
if ReIdent.match(Ident) == None:
return False
return True
## IsValidDecVersionVal
#
# {(0-9){1,} "." (0-99)}
#
# @param Ver: version to be checked
#
def IsValidDecVersionVal(Ver):
ReVersion = re.compile(r"[0-9]+(\.[0-9]{1,2})$")
if ReVersion.match(Ver) == None:
return False
return True
## IsValidLibName
#
# (A-Z)(a-zA-Z0-9){0,} and could not be "NULL"
#
def IsValidLibName(LibName):
if LibName == 'NULL':
return False
ReLibName = re.compile("^[A-Z]+[a-zA-Z0-9]*$")
if not ReLibName.match(LibName):
return False
return True
# IsValidUserId
#
# <UserId> ::= (a-zA-Z)(a-zA-Z0-9_.){0,}
# Words that contain period "." must be encapsulated in double quotation marks.
#
def IsValidUserId(UserId):
UserId = UserId.strip()
Quoted = False
if UserId.startswith('"') and UserId.endswith('"'):
Quoted = True
UserId = UserId[1:-1]
if not UserId or not UserId[0].isalpha():
return False
for Char in UserId[1:]:
if not Char.isalnum() and not Char in '_.':
return False
if Char == '.' and not Quoted:
return False
return True
|
en
| 0.431366
|
## @file ParserValidate.py # # Copyright (c) 2011, Intel Corporation. All rights reserved.<BR> # # This program and the accompanying materials are licensed and made available # under the terms and conditions of the BSD License which accompanies this # distribution. The full text of the license may be found at # http://opensource.org/licenses/bsd-license.php # # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. # PaserValidate ## __HexDigit() method # # Whether char input is a Hex data bit # # @param TempChar: The char to test # ## IsValidHex() method # # Whether char input is a Hex data. # # @param TempChar: The char to test # ## Judge the input string is valid bool type or not. # # <TRUE> ::= {"TRUE"} {"true"} {"True"} {"0x1"} {"0x01"} # <FALSE> ::= {"FALSE"} {"false"} {"False"} {"0x0"} {"0x00"} # <BoolType> ::= {<TRUE>} {<FALSE>} # # @param BoolString: A string contained the value need to be judged. # # # Valid Ture # # # Valid False # # # Invalid bool type # ## Is Valid Module Type List or not # # @param ModuleTypeList: A list contain ModuleType strings need to be # judged. # ## Is Valid Module Type or not # # @param ModuleType: A string contain ModuleType need to be judged. # ## Is Valid Component Type or not # # @param ComponentType: A string contain ComponentType need to be judged. # ## Is valid Tool Family or not # # @param ToolFamily: A string contain Tool Family need to be judged. # Famlily := [A-Z]([a-zA-Z0-9])* # ## Is valid Tool TagName or not # # The TagName sample is MYTOOLS and VS2005. # # @param TagName: A string contain Tool TagName need to be judged. # ## Is valid arch or not # # @param Arch The arch string need to be validated # <OA> ::= (a-zA-Z)(A-Za-z0-9){0,} # <arch> ::= {"IA32"} {"X64"} {"IPF"} {"EBC"} {<OA>} # {"common"} # @param Arch: Input arch # ## Is valid family or not # # <Family> ::= {"MSFT"} {"GCC"} {"INTEL"} {<Usr>} {"*"} # <Usr> ::= [A-Z][A-Za-z0-9]{0,} # # @param family: The family string need to be validated # ## Is valid build option name or not # # @param BuildOptionName: The BuildOptionName string need to be validated # ## IsValidToken # # Check if pattern string matches total token # # @param ReString: regular string # @param Token: Token to be matched # ## IsValidPath # # Check if path exist # # @param Path: Absolute path or relative path to be checked # @param Root: Root path # # # If Path is absolute path. # It should be in Root. # # # Check illegal character # # # Check relative path # ## IsValidInstallPath # # Check if an install path valid or not. # # Absolute path or path starts with '.' or path contains '..' are invalid. # # @param Path: path to be checked # ## IsValidCFormatGuid # # Check if GUID format has the from of {8,4,4,{2,2,2,2,2,2,2,2}} # # @param Guid: Guid to be checked # # # Valid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38, # 0xaf, 0x48, 0xce }} # Invalid: { 0xf0b11735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38, # 0xaf, 0x48, 0xce }} 0x123 # Invalid: { 0xf0b1 1735, 0x87a0, 0x4193, {0xb2, 0x66, 0x53, 0x8c, 0x38, # 0xaf, 0x48, 0xce }} # # # Index may out of bound # # # Index may out of bound # # # Check if string can be converted to integer # Throw exception if not # # # Exception caught means invalid format # ## IsValidPcdType # # Check whether the PCD type is valid # # @param PcdTypeString: The PcdType string need to be checked. # ## IsValidWord # # Check whether the word is valid. # <Word> ::= (a-zA-Z0-9_)(a-zA-Z0-9_-){0,} Alphanumeric characters with # optional # dash "-" and/or underscore "_" characters. No whitespace # characters are permitted. # # @param Word: The word string need to be checked. # # # The first char should be alpha, _ or Digit. # ## IsValidSimpleWord # # Check whether the SimpleWord is valid. # <SimpleWord> ::= (a-zA-Z0-9)(a-zA-Z0-9_-){0,} # A word that cannot contain a period character. # # @param Word: The word string need to be checked. # ## IsValidDecVersion # # Check whether the decimal version is valid. # <DecVersion> ::= (0-9){1,} ["." (0-9){1,}] # # @param Word: The word string need to be checked. # ## IsValidHexVersion # # Check whether the hex version is valid. # <HexVersion> ::= "0x" <Major> <Minor> # <Major> ::= <HexDigit>{4} # <Minor> ::= <HexDigit>{4} # # @param Word: The word string need to be checked. # ## IsValidBuildNumber # # Check whether the BUILD_NUMBER is valid. # ["BUILD_NUMBER" "=" <Integer>{1,4} <EOL>] # # @param Word: The BUILD_NUMBER string need to be checked. # ## IsValidDepex # # Check whether the Depex is valid. # # @param Word: The Depex string need to be checked. # ## IsValidNormalizedString # # Check # <NormalizedString> ::= <DblQuote> [{<Word>} {<Space>}]{1,} <DblQuote> # <Space> ::= 0x20 # # @param String: string to be checked # ## IsValidIdString # # Check whether the IdString is valid. # # @param IdString: The IdString need to be checked. # ## IsValidVersionString # # Check whether the VersionString is valid. # <AsciiString> ::= [ [<WhiteSpace>]{0,} [<AsciiChars>]{0,} ] {0,} # <WhiteSpace> ::= {<Tab>} {<Space>} # <Tab> ::= 0x09 # <Space> ::= 0x20 # <AsciiChars> ::= (0x21 - 0x7E) # # @param VersionString: The VersionString need to be checked. # ## IsValidPcdValue # # Check whether the PcdValue is valid. # # @param VersionString: The PcdValue need to be checked. # # # <Boolean> # # # <Number> ::= {<Integer>} {<HexNumber>} # <Integer> ::= {(0-9)} {(1-9)(0-9){1,}} # <HexNumber> ::= "0x" <HexDigit>{1,} # <HexDigit> ::= (a-fA-F0-9) # # # <StringVal> ::= {<StringType>} {<Array>} {"$(" <MACRO> ")"} # <StringType> ::= {<UnicodeString>} {<CString>} # # # <Array> ::= {<CArray>} {<NList>} {<CFormatGUID>} # <CArray> ::= "{" [<NList>] <CArray>{0,} "}" # <NList> ::= <HexByte> ["," <HexByte>]{0,} # <HexDigit> ::= (a-fA-F0-9) # <HexByte> ::= "0x" <HexDigit>{1,2} # # # NList # ## IsValidCVariableName # # Check whether the PcdValue is valid. # # @param VersionString: The PcdValue need to be checked. # ## IsValidIdentifier # # <Identifier> ::= <NonDigit> <Chars>{0,} # <Chars> ::= (a-zA-Z0-9_) # <NonDigit> ::= (a-zA-Z_) # # @param Ident: identifier to be checked # ## IsValidDecVersionVal # # {(0-9){1,} "." (0-99)} # # @param Ver: version to be checked # ## IsValidLibName # # (A-Z)(a-zA-Z0-9){0,} and could not be "NULL" # # IsValidUserId # # <UserId> ::= (a-zA-Z)(a-zA-Z0-9_.){0,} # Words that contain period "." must be encapsulated in double quotation marks. #
| 3.072659
| 3
|
cloud_ml_sdk/cloud_ml_sdk/models/quota.py
|
NanCSN/cloud-ml-sdk
| 32
|
6626107
|
<filename>cloud_ml_sdk/cloud_ml_sdk/models/quota.py
# Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from . import constant
class Quota(object):
def __init__(self,
org_id,
org_name=None,
train_memory_quota=None,
train_cpu_quota=None,
train_gpu_quota=None,
train_count_quota=None,
model_memory_quota=None,
model_cpu_quota=None,
model_gpu_quota=None,
model_count_quota=None,
dev_memory_quota=None,
dev_cpu_quota=None,
dev_gpu_quota=None,
dev_count_quota=None,
tensorboard_quota=None,
total_memory_quota=None,
total_cpu_quota=None,
total_gpu_quota=None):
self.org_id = org_id
self.org_name = org_name
self.train_memory_quota = train_memory_quota
self.train_cpu_quota = train_cpu_quota
self.train_gpu_quota = train_gpu_quota
self.train_count_quota = train_count_quota
self.model_memory_quota = model_memory_quota
self.model_cpu_quota = model_cpu_quota
self.model_gpu_quota = model_gpu_quota
self.model_count_quota = model_count_quota
self.dev_memory_quota = dev_memory_quota
self.dev_cpu_quota = dev_cpu_quota
self.dev_gpu_quota = dev_gpu_quota
self.dev_count_quota = dev_count_quota
self.tensorboard_quota = tensorboard_quota
self.total_memory_quota = total_memory_quota
self.total_cpu_quota = total_cpu_quota
self.total_gpu_quota = total_gpu_quota
@property
def org_id(self):
return self._org_id
@org_id.setter
def org_id(self, value):
"""Function for setting org_id.
Args:
value: Org id.
Raises:
ValueError: Org_id must be a string.
"""
if isinstance(value, str):
self._org_id = value
else:
raise ValueError("org_id must be a string")
@property
def org_name(self):
return self._org_name
@org_name.setter
def org_name(self, value):
"""Function for setting org_name.
Args:
value: Org name.
Raises:
ValueError: Org_name must be a string.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("Org_name must be a string")
self._org_name = value
@property
def train_memory_quota(self):
return self._train_memory_quota
@train_memory_quota.setter
def train_memory_quota(self, value):
"""Function for setting train_memory_quota.
Args:
value: Train memory quota.
Raises:
ValueError: Doesn't end with K, M or G.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("train_memory_quota must be a string")
unit = value[-1:]
float_value = value[:-1]
if unit not in constant.CLOUDML_MEMORY_UNITS:
raise ValueError("train_memory_quota unit must be one of %s!" %
constant.CLOUDML_MEMORY_UNITS)
if not float_value.replace(".", "", 1).isdigit():
raise ValueError("train_memory_quota must be a number!")
self._train_memory_quota = value
@property
def train_cpu_quota(self):
return self._train_cpu_quota
@train_cpu_quota.setter
def train_cpu_quota(self, value):
"""Function for setting train_cpu_quota.
Args:
value: Train cpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("train_cpu_quota must be a string!")
if not value.replace(".", "", 1).isdigit():
raise ValueError("train_cpu_quota must be a number!")
self._train_cpu_quota = value
@property
def train_gpu_quota(self):
return self._train_gpu_quota
@train_gpu_quota.setter
def train_gpu_quota(self, value):
"""Function for setting train_gpu_quota.
Args:
value: Train gpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("train_gpu_quota must be a postive integer!")
self._train_gpu_quota = value
@property
def train_count_quota(self):
return self._train_count_quota
@train_count_quota.setter
def train_count_quota(self, value):
"""Function for setting train_count_quota.
Args:
value: Train count quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("train_count_quota must be a postive integer!")
self._train_count_quota = value
@property
def model_memory_quota(self):
return self._model_memory_quota
@model_memory_quota.setter
def model_memory_quota(self, value):
"""Function for setting model_memory_quota.
Args:
value: Model memory quota.
Raises:
ValueError: Doesn't end with K, M or G.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("model_memory_quota must be a string")
unit = value[-1:]
float_value = value[:-1]
if unit not in constant.CLOUDML_MEMORY_UNITS:
raise ValueError("model_memory_quota unit must be one of %s!" %
constant.CLOUDML_MEMORY_UNITS)
if not float_value.replace(".", "", 1).isdigit():
raise ValueError("model_memory_quota must be a number!")
self._model_memory_quota = value
@property
def model_cpu_quota(self):
return self._model_cpu_quota
@model_cpu_quota.setter
def model_cpu_quota(self, value):
"""Function for setting model_cpu_quota.
Args:
value: Model cpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("model_cpu_quota must be a string!")
if not value.replace(".", "", 1).isdigit():
raise ValueError("model_cpu_quota must be a number!")
self._model_cpu_quota = value
@property
def model_gpu_quota(self):
return self._model_gpu_quota
@model_gpu_quota.setter
def model_gpu_quota(self, value):
"""Function for setting model_gpu_quota.
Args:
value: Model gpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("model_gpu_quota must be a postive integer!")
self._model_gpu_quota = value
@property
def model_count_quota(self):
return self._model_count_quota
@model_count_quota.setter
def model_count_quota(self, value):
"""Function for setting model_count_quota.
Args:
value: Model count quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("model_count_quota must be a postive integer!")
self._model_count_quota = value
@property
def dev_memory_quota(self):
return self._dev_memory_quota
@dev_memory_quota.setter
def dev_memory_quota(self, value):
"""Function for setting dev_memory_quota.
Args:
value: Dev memory quota.
Raises:
ValueError: Doesn't end with K, M or G.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("dev_memory_quota must be a string")
unit = value[-1:]
float_value = value[:-1]
if unit not in constant.CLOUDML_MEMORY_UNITS:
raise ValueError("dev_memory_quota unit must be one of %s!" %
constant.CLOUDML_MEMORY_UNITS)
if not float_value.replace(".", "", 1).isdigit():
raise ValueError("dev_memory_quota must be a number!")
self._dev_memory_quota = value
@property
def dev_cpu_quota(self):
return self._dev_cpu_quota
@dev_cpu_quota.setter
def dev_cpu_quota(self, value):
"""Function for setting dev_cpu_quota.
Args:
value: Dev cpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("dev_cpu_quota must be a string!")
if not value.replace(".", "", 1).isdigit():
raise ValueError("dev_cpu_quota must be a number!")
self._dev_cpu_quota = value
@property
def dev_gpu_quota(self):
return self._dev_gpu_quota
@dev_gpu_quota.setter
def dev_gpu_quota(self, value):
"""Function for setting dev_gpu_quota.
Args:
value: Dev gpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("dev_gpu_quota must be a postive integer!")
self._dev_gpu_quota = value
@property
def dev_count_quota(self):
return self._dev_count_quota
@dev_count_quota.setter
def dev_count_quota(self, value):
"""Function for setting dev_count_quota.
Args:
value: Dev count quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("dev_count_quota must be a postive integer!")
self._dev_count_quota = value
@property
def total_memory_quota(self):
return self._total_memory_quota
@total_memory_quota.setter
def total_memory_quota(self, value):
"""Function for setting total_memory_quota.
Args:
value: Total memory quota.
Raises:
ValueError: Doesn't end with K, M or G.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("total_memory_quota must be a string")
unit = value[-1:]
float_value = value[:-1]
if unit not in constant.CLOUDML_MEMORY_UNITS:
raise ValueError("total_memory_quota unit must be one of %s!" %
constant.CLOUDML_MEMORY_UNITS)
if not float_value.replace(".", "", 1).isdigit():
raise ValueError("total_memory_quota must be a number!")
self._total_memory_quota = value
@property
def total_cpu_quota(self):
return self._total_cpu_quota
@total_cpu_quota.setter
def total_cpu_quota(self, value):
"""Function for setting total_cpu_quota.
Args:
value: Total cpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("total_cpu_quota must be a string!")
if not value.replace(".", "", 1).isdigit():
raise ValueError("total_cpu_quota must be a number!")
self._total_cpu_quota = value
@property
def total_gpu_quota(self):
return self._total_gpu_quota
@total_gpu_quota.setter
def total_gpu_quota(self, value):
"""Function for setting total_gpu_quota.
Args:
value: Total gpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("total_gpu_quota must be a postive integer!")
self._total_gpu_quota = value
@property
def tensorboard_quota(self):
return self._tensorboard_quota
@tensorboard_quota.setter
def tensorboard_quota(self, value):
"""Function for setting tensorboard_quota.
Args:
value: Tensorboard quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("tensorboard_quota must be a postive integer!")
self._tensorboard_quota = value
def get_json_data(self):
"""Get the needed quota data after setting necessary varibles.
Returns:
data: The json data which is necessary for the quota.
"""
data = {"org_id": self._org_id}
if self._org_name is not None:
data["org_name"] = self._org_name
if self._train_memory_quota is not None:
data["train_memory_quota"] = self._train_memory_quota
if self._train_cpu_quota is not None:
data["train_cpu_quota"] = self._train_cpu_quota
if self._train_gpu_quota is not None:
data["train_gpu_quota"] = self._train_gpu_quota
if self._train_count_quota is not None:
data["train_count_quota"] = self._train_count_quota
if self._model_memory_quota is not None:
data["model_memory_quota"] = self._model_memory_quota
if self._model_cpu_quota is not None:
data["model_cpu_quota"] = self._model_cpu_quota
if self._model_gpu_quota is not None:
data["model_gpu_quota"] = self._model_gpu_quota
if self._model_count_quota is not None:
data["model_count_quota"] = self._model_count_quota
if self._dev_memory_quota is not None:
data["dev_memory_quota"] = self._dev_memory_quota
if self._dev_cpu_quota is not None:
data["dev_cpu_quota"] = self._dev_cpu_quota
if self._dev_gpu_quota is not None:
data["dev_gpu_quota"] = self._dev_gpu_quota
if self._dev_count_quota is not None:
data["dev_count_quota"] = self._dev_count_quota
if self._tensorboard_quota is not None:
data["tensorboard_quota"] = self.tensorboard_quota
if self._total_memory_quota is not None:
data["total_memory_quota"] = self._total_memory_quota
if self._total_cpu_quota is not None:
data["total_cpu_quota"] = self._total_cpu_quota
if self._total_gpu_quota is not None:
data["total_gpu_quota"] = self._total_gpu_quota
return json.dumps(data)
|
<filename>cloud_ml_sdk/cloud_ml_sdk/models/quota.py
# Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from . import constant
class Quota(object):
def __init__(self,
org_id,
org_name=None,
train_memory_quota=None,
train_cpu_quota=None,
train_gpu_quota=None,
train_count_quota=None,
model_memory_quota=None,
model_cpu_quota=None,
model_gpu_quota=None,
model_count_quota=None,
dev_memory_quota=None,
dev_cpu_quota=None,
dev_gpu_quota=None,
dev_count_quota=None,
tensorboard_quota=None,
total_memory_quota=None,
total_cpu_quota=None,
total_gpu_quota=None):
self.org_id = org_id
self.org_name = org_name
self.train_memory_quota = train_memory_quota
self.train_cpu_quota = train_cpu_quota
self.train_gpu_quota = train_gpu_quota
self.train_count_quota = train_count_quota
self.model_memory_quota = model_memory_quota
self.model_cpu_quota = model_cpu_quota
self.model_gpu_quota = model_gpu_quota
self.model_count_quota = model_count_quota
self.dev_memory_quota = dev_memory_quota
self.dev_cpu_quota = dev_cpu_quota
self.dev_gpu_quota = dev_gpu_quota
self.dev_count_quota = dev_count_quota
self.tensorboard_quota = tensorboard_quota
self.total_memory_quota = total_memory_quota
self.total_cpu_quota = total_cpu_quota
self.total_gpu_quota = total_gpu_quota
@property
def org_id(self):
return self._org_id
@org_id.setter
def org_id(self, value):
"""Function for setting org_id.
Args:
value: Org id.
Raises:
ValueError: Org_id must be a string.
"""
if isinstance(value, str):
self._org_id = value
else:
raise ValueError("org_id must be a string")
@property
def org_name(self):
return self._org_name
@org_name.setter
def org_name(self, value):
"""Function for setting org_name.
Args:
value: Org name.
Raises:
ValueError: Org_name must be a string.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("Org_name must be a string")
self._org_name = value
@property
def train_memory_quota(self):
return self._train_memory_quota
@train_memory_quota.setter
def train_memory_quota(self, value):
"""Function for setting train_memory_quota.
Args:
value: Train memory quota.
Raises:
ValueError: Doesn't end with K, M or G.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("train_memory_quota must be a string")
unit = value[-1:]
float_value = value[:-1]
if unit not in constant.CLOUDML_MEMORY_UNITS:
raise ValueError("train_memory_quota unit must be one of %s!" %
constant.CLOUDML_MEMORY_UNITS)
if not float_value.replace(".", "", 1).isdigit():
raise ValueError("train_memory_quota must be a number!")
self._train_memory_quota = value
@property
def train_cpu_quota(self):
return self._train_cpu_quota
@train_cpu_quota.setter
def train_cpu_quota(self, value):
"""Function for setting train_cpu_quota.
Args:
value: Train cpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("train_cpu_quota must be a string!")
if not value.replace(".", "", 1).isdigit():
raise ValueError("train_cpu_quota must be a number!")
self._train_cpu_quota = value
@property
def train_gpu_quota(self):
return self._train_gpu_quota
@train_gpu_quota.setter
def train_gpu_quota(self, value):
"""Function for setting train_gpu_quota.
Args:
value: Train gpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("train_gpu_quota must be a postive integer!")
self._train_gpu_quota = value
@property
def train_count_quota(self):
return self._train_count_quota
@train_count_quota.setter
def train_count_quota(self, value):
"""Function for setting train_count_quota.
Args:
value: Train count quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("train_count_quota must be a postive integer!")
self._train_count_quota = value
@property
def model_memory_quota(self):
return self._model_memory_quota
@model_memory_quota.setter
def model_memory_quota(self, value):
"""Function for setting model_memory_quota.
Args:
value: Model memory quota.
Raises:
ValueError: Doesn't end with K, M or G.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("model_memory_quota must be a string")
unit = value[-1:]
float_value = value[:-1]
if unit not in constant.CLOUDML_MEMORY_UNITS:
raise ValueError("model_memory_quota unit must be one of %s!" %
constant.CLOUDML_MEMORY_UNITS)
if not float_value.replace(".", "", 1).isdigit():
raise ValueError("model_memory_quota must be a number!")
self._model_memory_quota = value
@property
def model_cpu_quota(self):
return self._model_cpu_quota
@model_cpu_quota.setter
def model_cpu_quota(self, value):
"""Function for setting model_cpu_quota.
Args:
value: Model cpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("model_cpu_quota must be a string!")
if not value.replace(".", "", 1).isdigit():
raise ValueError("model_cpu_quota must be a number!")
self._model_cpu_quota = value
@property
def model_gpu_quota(self):
return self._model_gpu_quota
@model_gpu_quota.setter
def model_gpu_quota(self, value):
"""Function for setting model_gpu_quota.
Args:
value: Model gpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("model_gpu_quota must be a postive integer!")
self._model_gpu_quota = value
@property
def model_count_quota(self):
return self._model_count_quota
@model_count_quota.setter
def model_count_quota(self, value):
"""Function for setting model_count_quota.
Args:
value: Model count quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("model_count_quota must be a postive integer!")
self._model_count_quota = value
@property
def dev_memory_quota(self):
return self._dev_memory_quota
@dev_memory_quota.setter
def dev_memory_quota(self, value):
"""Function for setting dev_memory_quota.
Args:
value: Dev memory quota.
Raises:
ValueError: Doesn't end with K, M or G.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("dev_memory_quota must be a string")
unit = value[-1:]
float_value = value[:-1]
if unit not in constant.CLOUDML_MEMORY_UNITS:
raise ValueError("dev_memory_quota unit must be one of %s!" %
constant.CLOUDML_MEMORY_UNITS)
if not float_value.replace(".", "", 1).isdigit():
raise ValueError("dev_memory_quota must be a number!")
self._dev_memory_quota = value
@property
def dev_cpu_quota(self):
return self._dev_cpu_quota
@dev_cpu_quota.setter
def dev_cpu_quota(self, value):
"""Function for setting dev_cpu_quota.
Args:
value: Dev cpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("dev_cpu_quota must be a string!")
if not value.replace(".", "", 1).isdigit():
raise ValueError("dev_cpu_quota must be a number!")
self._dev_cpu_quota = value
@property
def dev_gpu_quota(self):
return self._dev_gpu_quota
@dev_gpu_quota.setter
def dev_gpu_quota(self, value):
"""Function for setting dev_gpu_quota.
Args:
value: Dev gpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("dev_gpu_quota must be a postive integer!")
self._dev_gpu_quota = value
@property
def dev_count_quota(self):
return self._dev_count_quota
@dev_count_quota.setter
def dev_count_quota(self, value):
"""Function for setting dev_count_quota.
Args:
value: Dev count quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("dev_count_quota must be a postive integer!")
self._dev_count_quota = value
@property
def total_memory_quota(self):
return self._total_memory_quota
@total_memory_quota.setter
def total_memory_quota(self, value):
"""Function for setting total_memory_quota.
Args:
value: Total memory quota.
Raises:
ValueError: Doesn't end with K, M or G.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("total_memory_quota must be a string")
unit = value[-1:]
float_value = value[:-1]
if unit not in constant.CLOUDML_MEMORY_UNITS:
raise ValueError("total_memory_quota unit must be one of %s!" %
constant.CLOUDML_MEMORY_UNITS)
if not float_value.replace(".", "", 1).isdigit():
raise ValueError("total_memory_quota must be a number!")
self._total_memory_quota = value
@property
def total_cpu_quota(self):
return self._total_cpu_quota
@total_cpu_quota.setter
def total_cpu_quota(self, value):
"""Function for setting total_cpu_quota.
Args:
value: Total cpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not isinstance(value, str):
raise ValueError("total_cpu_quota must be a string!")
if not value.replace(".", "", 1).isdigit():
raise ValueError("total_cpu_quota must be a number!")
self._total_cpu_quota = value
@property
def total_gpu_quota(self):
return self._total_gpu_quota
@total_gpu_quota.setter
def total_gpu_quota(self, value):
"""Function for setting total_gpu_quota.
Args:
value: Total gpu quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("total_gpu_quota must be a postive integer!")
self._total_gpu_quota = value
@property
def tensorboard_quota(self):
return self._tensorboard_quota
@tensorboard_quota.setter
def tensorboard_quota(self, value):
"""Function for setting tensorboard_quota.
Args:
value: Tensorboard quota.
Raises:
ValueError: If value is not a positive number.
"""
if value != None:
if not (isinstance(value, int) and value > 0):
raise ValueError("tensorboard_quota must be a postive integer!")
self._tensorboard_quota = value
def get_json_data(self):
"""Get the needed quota data after setting necessary varibles.
Returns:
data: The json data which is necessary for the quota.
"""
data = {"org_id": self._org_id}
if self._org_name is not None:
data["org_name"] = self._org_name
if self._train_memory_quota is not None:
data["train_memory_quota"] = self._train_memory_quota
if self._train_cpu_quota is not None:
data["train_cpu_quota"] = self._train_cpu_quota
if self._train_gpu_quota is not None:
data["train_gpu_quota"] = self._train_gpu_quota
if self._train_count_quota is not None:
data["train_count_quota"] = self._train_count_quota
if self._model_memory_quota is not None:
data["model_memory_quota"] = self._model_memory_quota
if self._model_cpu_quota is not None:
data["model_cpu_quota"] = self._model_cpu_quota
if self._model_gpu_quota is not None:
data["model_gpu_quota"] = self._model_gpu_quota
if self._model_count_quota is not None:
data["model_count_quota"] = self._model_count_quota
if self._dev_memory_quota is not None:
data["dev_memory_quota"] = self._dev_memory_quota
if self._dev_cpu_quota is not None:
data["dev_cpu_quota"] = self._dev_cpu_quota
if self._dev_gpu_quota is not None:
data["dev_gpu_quota"] = self._dev_gpu_quota
if self._dev_count_quota is not None:
data["dev_count_quota"] = self._dev_count_quota
if self._tensorboard_quota is not None:
data["tensorboard_quota"] = self.tensorboard_quota
if self._total_memory_quota is not None:
data["total_memory_quota"] = self._total_memory_quota
if self._total_cpu_quota is not None:
data["total_cpu_quota"] = self._total_cpu_quota
if self._total_gpu_quota is not None:
data["total_gpu_quota"] = self._total_gpu_quota
return json.dumps(data)
|
en
| 0.635303
|
# Copyright 2017 Xiaomi, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Function for setting org_id. Args: value: Org id. Raises: ValueError: Org_id must be a string. Function for setting org_name. Args: value: Org name. Raises: ValueError: Org_name must be a string. Function for setting train_memory_quota. Args: value: Train memory quota. Raises: ValueError: Doesn't end with K, M or G. Function for setting train_cpu_quota. Args: value: Train cpu quota. Raises: ValueError: If value is not a positive number. Function for setting train_gpu_quota. Args: value: Train gpu quota. Raises: ValueError: If value is not a positive number. Function for setting train_count_quota. Args: value: Train count quota. Raises: ValueError: If value is not a positive number. Function for setting model_memory_quota. Args: value: Model memory quota. Raises: ValueError: Doesn't end with K, M or G. Function for setting model_cpu_quota. Args: value: Model cpu quota. Raises: ValueError: If value is not a positive number. Function for setting model_gpu_quota. Args: value: Model gpu quota. Raises: ValueError: If value is not a positive number. Function for setting model_count_quota. Args: value: Model count quota. Raises: ValueError: If value is not a positive number. Function for setting dev_memory_quota. Args: value: Dev memory quota. Raises: ValueError: Doesn't end with K, M or G. Function for setting dev_cpu_quota. Args: value: Dev cpu quota. Raises: ValueError: If value is not a positive number. Function for setting dev_gpu_quota. Args: value: Dev gpu quota. Raises: ValueError: If value is not a positive number. Function for setting dev_count_quota. Args: value: Dev count quota. Raises: ValueError: If value is not a positive number. Function for setting total_memory_quota. Args: value: Total memory quota. Raises: ValueError: Doesn't end with K, M or G. Function for setting total_cpu_quota. Args: value: Total cpu quota. Raises: ValueError: If value is not a positive number. Function for setting total_gpu_quota. Args: value: Total gpu quota. Raises: ValueError: If value is not a positive number. Function for setting tensorboard_quota. Args: value: Tensorboard quota. Raises: ValueError: If value is not a positive number. Get the needed quota data after setting necessary varibles. Returns: data: The json data which is necessary for the quota.
| 2.16979
| 2
|
Rico/RicoFormat.py
|
victor-luis/Clear-Corretora
| 1
|
6626108
|
from pprint import pprint
from db import model
from db.model import register_transaction, clear_transactions
from helpers import format_currency, parse_transaction, convert_date
provider_id = 12
def format_json(json):
applications = json['applications']
date = json['date']
in_account = format_currency(json['in_account'])
# atualiza valor da conta corrente
account = model.get_application_id(provider_id, 0)
if account:
current_account_id = account['application_id']
model.update_balance(account['current_account_id'], current_account_id, in_account, date)
# atualiza valor das aplicações (tesouro, fundo, COE...)
for application in applications:
db_application = model.get_application_id(provider_id, application['type_id'], application['description'],
convert_date(application['buy_date']))
balance = format_currency(application['balance'])
if db_application:
model.update_balance(
db_application['current_account_id'],
db_application['application_id'],
balance,
date
)
# atualiza extrato
transactions = json['movements']
clear_transactions(provider_id)
for transaction in transactions:
# pprint(transaction)
date = convert_date(transaction['date_liquidation'])
description = transaction["description"]
movement_type, amount, application_type, to_account, from_account, proceed = \
parse_transaction(description, transaction["amount"], provider_id,
date)
if to_account == 0:
to_account = current_account_id
if from_account == 0:
from_account = current_account_id
# print(to_account)
register_transaction(amount, from_account, to_account, date, description, movement_type, provider_id)
|
from pprint import pprint
from db import model
from db.model import register_transaction, clear_transactions
from helpers import format_currency, parse_transaction, convert_date
provider_id = 12
def format_json(json):
applications = json['applications']
date = json['date']
in_account = format_currency(json['in_account'])
# atualiza valor da conta corrente
account = model.get_application_id(provider_id, 0)
if account:
current_account_id = account['application_id']
model.update_balance(account['current_account_id'], current_account_id, in_account, date)
# atualiza valor das aplicações (tesouro, fundo, COE...)
for application in applications:
db_application = model.get_application_id(provider_id, application['type_id'], application['description'],
convert_date(application['buy_date']))
balance = format_currency(application['balance'])
if db_application:
model.update_balance(
db_application['current_account_id'],
db_application['application_id'],
balance,
date
)
# atualiza extrato
transactions = json['movements']
clear_transactions(provider_id)
for transaction in transactions:
# pprint(transaction)
date = convert_date(transaction['date_liquidation'])
description = transaction["description"]
movement_type, amount, application_type, to_account, from_account, proceed = \
parse_transaction(description, transaction["amount"], provider_id,
date)
if to_account == 0:
to_account = current_account_id
if from_account == 0:
from_account = current_account_id
# print(to_account)
register_transaction(amount, from_account, to_account, date, description, movement_type, provider_id)
|
pt
| 0.892469
|
# atualiza valor da conta corrente # atualiza valor das aplicações (tesouro, fundo, COE...) # atualiza extrato # pprint(transaction) # print(to_account)
| 2.367941
| 2
|
api/python/indigo/ml/mpp/run.py
|
alkaberna/Indigo
| 0
|
6626109
|
<reponame>alkaberna/Indigo
import click
import config as config
import torch # type: ignore
from datasets import MolDataset, load_data
from eval import evaluate
from models import MPNNRegressor
from tqdm import trange # type: ignore
@click.command()
@click.argument("filename", type=click.Path(exists=True))
@click.argument("smiles", type=str)
@click.argument("target", type=str)
def main(filename: str, smiles: str, target: str):
"""Simpel property prediction"""
config.file_name = filename
config.smiles = smiles
config.target = target
dataset = MolDataset()
train_loader, val_loader, test_loader = load_data(dataset)
model = MPNNRegressor(
dataset.dim_nfeats, dataset.dim_efeats, **config.MPNN_params
)
optimizer = torch.optim.Adam(model.parameters(), lr=config.LEARNING_RATE)
loss_fcn = torch.nn.SmoothL1Loss()
for epoch in trange(config.NUM_EPOCH):
losses = list()
for batched_graph, labels in train_loader:
node_feats = batched_graph.ndata["atomic"].float()
edge_feats = batched_graph.edata["ord"].float()
prediction = model(batched_graph, node_feats, edge_feats)
loss = loss_fcn(prediction, labels)
losses.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(
"\nEpoch: {}/{}.............".format(epoch, config.NUM_EPOCH),
end=" ",
)
print("Loss: {:.4f}".format(loss.mean()))
evaluate(model, test_loader)
if __name__ == "__main__":
main()
|
import click
import config as config
import torch # type: ignore
from datasets import MolDataset, load_data
from eval import evaluate
from models import MPNNRegressor
from tqdm import trange # type: ignore
@click.command()
@click.argument("filename", type=click.Path(exists=True))
@click.argument("smiles", type=str)
@click.argument("target", type=str)
def main(filename: str, smiles: str, target: str):
"""Simpel property prediction"""
config.file_name = filename
config.smiles = smiles
config.target = target
dataset = MolDataset()
train_loader, val_loader, test_loader = load_data(dataset)
model = MPNNRegressor(
dataset.dim_nfeats, dataset.dim_efeats, **config.MPNN_params
)
optimizer = torch.optim.Adam(model.parameters(), lr=config.LEARNING_RATE)
loss_fcn = torch.nn.SmoothL1Loss()
for epoch in trange(config.NUM_EPOCH):
losses = list()
for batched_graph, labels in train_loader:
node_feats = batched_graph.ndata["atomic"].float()
edge_feats = batched_graph.edata["ord"].float()
prediction = model(batched_graph, node_feats, edge_feats)
loss = loss_fcn(prediction, labels)
losses.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(
"\nEpoch: {}/{}.............".format(epoch, config.NUM_EPOCH),
end=" ",
)
print("Loss: {:.4f}".format(loss.mean()))
evaluate(model, test_loader)
if __name__ == "__main__":
main()
|
en
| 0.249627
|
# type: ignore # type: ignore Simpel property prediction
| 2.439299
| 2
|
src/eval_settings/imagenet_c.py
|
saarimrahman/imagenet-testbed
| 69
|
6626110
|
<reponame>saarimrahman/imagenet-testbed
from registry import registry
from eval_settings.eval_setting_base import EvalSetting, StandardDataset
from eval_settings.image_utils import *
from eval_settings.eval_setting_subsample import idx_subsample_list_50k_10percent
from eval_settings.image_utils import corruption_dict
on_disk_corruptions = [
'imagenet-c.brightness.1_on-disk',
'imagenet-c.brightness.2_on-disk',
'imagenet-c.brightness.3_on-disk',
'imagenet-c.brightness.4_on-disk',
'imagenet-c.brightness.5_on-disk',
'imagenet-c.contrast.1_on-disk',
'imagenet-c.contrast.2_on-disk',
'imagenet-c.contrast.3_on-disk',
'imagenet-c.contrast.4_on-disk',
'imagenet-c.contrast.5_on-disk',
'imagenet-c.defocus_blur.1_on-disk',
'imagenet-c.defocus_blur.2_on-disk',
'imagenet-c.defocus_blur.3_on-disk',
'imagenet-c.defocus_blur.4_on-disk',
'imagenet-c.defocus_blur.5_on-disk',
'imagenet-c.elastic_transform.1_on-disk',
'imagenet-c.elastic_transform.2_on-disk',
'imagenet-c.elastic_transform.3_on-disk',
'imagenet-c.elastic_transform.4_on-disk',
'imagenet-c.elastic_transform.5_on-disk',
'imagenet-c.fog.1_on-disk',
'imagenet-c.fog.2_on-disk',
'imagenet-c.fog.3_on-disk',
'imagenet-c.fog.4_on-disk',
'imagenet-c.fog.5_on-disk',
'imagenet-c.frost.1_on-disk',
'imagenet-c.frost.2_on-disk',
'imagenet-c.frost.3_on-disk',
'imagenet-c.frost.4_on-disk',
'imagenet-c.frost.5_on-disk',
'imagenet-c.gaussian_blur.1_on-disk',
'imagenet-c.gaussian_blur.2_on-disk',
'imagenet-c.gaussian_blur.3_on-disk',
'imagenet-c.gaussian_blur.4_on-disk',
'imagenet-c.gaussian_blur.5_on-disk',
'imagenet-c.gaussian_noise.1_on-disk',
'imagenet-c.gaussian_noise.2_on-disk',
'imagenet-c.gaussian_noise.3_on-disk',
'imagenet-c.gaussian_noise.4_on-disk',
'imagenet-c.gaussian_noise.5_on-disk',
'imagenet-c.glass_blur.1_on-disk',
'imagenet-c.glass_blur.2_on-disk',
'imagenet-c.glass_blur.3_on-disk',
'imagenet-c.glass_blur.4_on-disk',
'imagenet-c.glass_blur.5_on-disk',
'imagenet-c.impulse_noise.1_on-disk',
'imagenet-c.impulse_noise.2_on-disk',
'imagenet-c.impulse_noise.3_on-disk',
'imagenet-c.impulse_noise.4_on-disk',
'imagenet-c.impulse_noise.5_on-disk',
'imagenet-c.jpeg_compression.1_on-disk',
'imagenet-c.jpeg_compression.2_on-disk',
'imagenet-c.jpeg_compression.3_on-disk',
'imagenet-c.jpeg_compression.4_on-disk',
'imagenet-c.jpeg_compression.5_on-disk',
'imagenet-c.motion_blur.1_on-disk',
'imagenet-c.motion_blur.2_on-disk',
'imagenet-c.motion_blur.3_on-disk',
'imagenet-c.motion_blur.4_on-disk',
'imagenet-c.motion_blur.5_on-disk',
'imagenet-c.pixelate.1_on-disk',
'imagenet-c.pixelate.2_on-disk',
'imagenet-c.pixelate.3_on-disk',
'imagenet-c.pixelate.4_on-disk',
'imagenet-c.pixelate.5_on-disk',
'imagenet-c.saturate.1_on-disk',
'imagenet-c.saturate.2_on-disk',
'imagenet-c.saturate.3_on-disk',
'imagenet-c.saturate.4_on-disk',
'imagenet-c.saturate.5_on-disk',
'imagenet-c.shot_noise.1_on-disk',
'imagenet-c.shot_noise.2_on-disk',
'imagenet-c.shot_noise.3_on-disk',
'imagenet-c.shot_noise.4_on-disk',
'imagenet-c.shot_noise.5_on-disk',
'imagenet-c.snow.1_on-disk',
'imagenet-c.snow.2_on-disk',
'imagenet-c.snow.3_on-disk',
'imagenet-c.snow.4_on-disk',
'imagenet-c.snow.5_on-disk',
'imagenet-c.spatter.1_on-disk',
'imagenet-c.spatter.2_on-disk',
'imagenet-c.spatter.3_on-disk',
'imagenet-c.spatter.4_on-disk',
'imagenet-c.spatter.5_on-disk',
'imagenet-c.speckle_noise.1_on-disk',
'imagenet-c.speckle_noise.2_on-disk',
'imagenet-c.speckle_noise.3_on-disk',
'imagenet-c.speckle_noise.4_on-disk',
'imagenet-c.speckle_noise.5_on-disk',
'imagenet-c.zoom_blur.1_on-disk',
'imagenet-c.zoom_blur.2_on-disk',
'imagenet-c.zoom_blur.3_on-disk',
'imagenet-c.zoom_blur.4_on-disk',
'imagenet-c.zoom_blur.5_on-disk']
in_memory_corruptions_gpu = {
'imagenet-c.defocus_blur.1_in-memory': {'corruption': 'defocus_blur', 'gpu': True, 'severity': 0},
'imagenet-c.defocus_blur.2_in-memory': {'corruption': 'defocus_blur', 'gpu': True, 'severity': 1},
'imagenet-c.defocus_blur.3_in-memory': {'corruption': 'defocus_blur', 'gpu': True, 'severity': 2},
'imagenet-c.defocus_blur.4_in-memory': {'corruption': 'defocus_blur', 'gpu': True, 'severity': 3},
'imagenet-c.defocus_blur.5_in-memory': {'corruption': 'defocus_blur', 'gpu': True, 'severity': 4},
'imagenet-c.elastic_transform.1_in-memory': {'corruption': 'elastic_transform', 'gpu': True, 'severity': 0},
'imagenet-c.elastic_transform.2_in-memory': {'corruption': 'elastic_transform', 'gpu': True, 'severity': 1},
'imagenet-c.elastic_transform.3_in-memory': {'corruption': 'elastic_transform', 'gpu': True, 'severity': 2},
'imagenet-c.elastic_transform.4_in-memory': {'corruption': 'elastic_transform', 'gpu': True, 'severity': 3},
'imagenet-c.elastic_transform.5_in-memory': {'corruption': 'elastic_transform', 'gpu': True, 'severity': 4},
'imagenet-c.motion_blur.1_in-memory': {'corruption': 'motion_blur', 'gpu': True, 'severity': 0},
'imagenet-c.motion_blur.2_in-memory': {'corruption': 'motion_blur', 'gpu': True, 'severity': 1},
'imagenet-c.motion_blur.3_in-memory': {'corruption': 'motion_blur', 'gpu': True, 'severity': 2},
'imagenet-c.motion_blur.4_in-memory': {'corruption': 'motion_blur', 'gpu': True, 'severity': 3},
'imagenet-c.motion_blur.5_in-memory': {'corruption': 'motion_blur', 'gpu': True, 'severity': 4},
'imagenet-c.snow.1_in-memory': {'corruption': 'snow', 'gpu': True, 'severity': 0},
'imagenet-c.snow.2_in-memory': {'corruption': 'snow', 'gpu': True, 'severity': 1},
'imagenet-c.snow.3_in-memory': {'corruption': 'snow', 'gpu': True, 'severity': 2},
'imagenet-c.snow.4_in-memory': {'corruption': 'snow', 'gpu': True, 'severity': 3},
'imagenet-c.snow.5_in-memory': {'corruption': 'snow', 'gpu': True, 'severity': 4}}
def corr_brightness_sev_1(image):
return corruption_dict['brightness'](image, 0)
def corr_brightness_sev_2(image):
return corruption_dict['brightness'](image, 1)
def corr_brightness_sev_3(image):
return corruption_dict['brightness'](image, 2)
def corr_brightness_sev_4(image):
return corruption_dict['brightness'](image, 3)
def corr_brightness_sev_5(image):
return corruption_dict['brightness'](image, 4)
def corr_contrast_sev_1(image):
return corruption_dict['contrast'](image, 0)
def corr_contrast_sev_2(image):
return corruption_dict['contrast'](image, 1)
def corr_contrast_sev_3(image):
return corruption_dict['contrast'](image, 2)
def corr_contrast_sev_4(image):
return corruption_dict['contrast'](image, 3)
def corr_contrast_sev_5(image):
return corruption_dict['contrast'](image, 4)
def corr_fog_sev_1(image):
return corruption_dict['fog'](image, 0)
def corr_fog_sev_2(image):
return corruption_dict['fog'](image, 1)
def corr_fog_sev_3(image):
return corruption_dict['fog'](image, 2)
def corr_fog_sev_4(image):
return corruption_dict['fog'](image, 3)
def corr_fog_sev_5(image):
return corruption_dict['fog'](image, 4)
def corr_frost_sev_1(image):
return corruption_dict['frost'](image, 0)
def corr_frost_sev_2(image):
return corruption_dict['frost'](image, 1)
def corr_frost_sev_3(image):
return corruption_dict['frost'](image, 2)
def corr_frost_sev_4(image):
return corruption_dict['frost'](image, 3)
def corr_frost_sev_5(image):
return corruption_dict['frost'](image, 4)
def corr_gaussian_blur_sev_1(image):
return corruption_dict['gaussian_blur'](image, 0)
def corr_gaussian_blur_sev_2(image):
return corruption_dict['gaussian_blur'](image, 1)
def corr_gaussian_blur_sev_3(image):
return corruption_dict['gaussian_blur'](image, 2)
def corr_gaussian_blur_sev_4(image):
return corruption_dict['gaussian_blur'](image, 3)
def corr_gaussian_blur_sev_5(image):
return corruption_dict['gaussian_blur'](image, 4)
def corr_gaussian_noise_sev_1(image):
return corruption_dict['gaussian_noise'](image, 0)
def corr_gaussian_noise_sev_2(image):
return corruption_dict['gaussian_noise'](image, 1)
def corr_gaussian_noise_sev_3(image):
return corruption_dict['gaussian_noise'](image, 2)
def corr_gaussian_noise_sev_4(image):
return corruption_dict['gaussian_noise'](image, 3)
def corr_gaussian_noise_sev_5(image):
return corruption_dict['gaussian_noise'](image, 4)
def corr_impulse_noise_sev_1(image):
return corruption_dict['impulse_noise'](image, 0)
def corr_impulse_noise_sev_2(image):
return corruption_dict['impulse_noise'](image, 1)
def corr_impulse_noise_sev_3(image):
return corruption_dict['impulse_noise'](image, 2)
def corr_impulse_noise_sev_4(image):
return corruption_dict['impulse_noise'](image, 3)
def corr_impulse_noise_sev_5(image):
return corruption_dict['impulse_noise'](image, 4)
def corr_jpeg_compression_sev_1(image):
return corruption_dict['jpeg_compression'](image, 0)
def corr_jpeg_compression_sev_2(image):
return corruption_dict['jpeg_compression'](image, 1)
def corr_jpeg_compression_sev_3(image):
return corruption_dict['jpeg_compression'](image, 2)
def corr_jpeg_compression_sev_4(image):
return corruption_dict['jpeg_compression'](image, 3)
def corr_jpeg_compression_sev_5(image):
return corruption_dict['jpeg_compression'](image, 4)
def corr_pixelate_sev_1(image):
return corruption_dict['pixelate'](image, 0)
def corr_pixelate_sev_2(image):
return corruption_dict['pixelate'](image, 1)
def corr_pixelate_sev_3(image):
return corruption_dict['pixelate'](image, 2)
def corr_pixelate_sev_4(image):
return corruption_dict['pixelate'](image, 3)
def corr_pixelate_sev_5(image):
return corruption_dict['pixelate'](image, 4)
def corr_saturate_sev_1(image):
return corruption_dict['saturate'](image, 0)
def corr_saturate_sev_2(image):
return corruption_dict['saturate'](image, 1)
def corr_saturate_sev_3(image):
return corruption_dict['saturate'](image, 2)
def corr_saturate_sev_4(image):
return corruption_dict['saturate'](image, 3)
def corr_saturate_sev_5(image):
return corruption_dict['saturate'](image, 4)
def corr_shot_noise_sev_1(image):
return corruption_dict['shot_noise'](image, 0)
def corr_shot_noise_sev_2(image):
return corruption_dict['shot_noise'](image, 1)
def corr_shot_noise_sev_3(image):
return corruption_dict['shot_noise'](image, 2)
def corr_shot_noise_sev_4(image):
return corruption_dict['shot_noise'](image, 3)
def corr_shot_noise_sev_5(image):
return corruption_dict['shot_noise'](image, 4)
def corr_spatter_sev_1(image):
return corruption_dict['spatter'](image, 0)
def corr_spatter_sev_2(image):
return corruption_dict['spatter'](image, 1)
def corr_spatter_sev_3(image):
return corruption_dict['spatter'](image, 2)
def corr_spatter_sev_4(image):
return corruption_dict['spatter'](image, 3)
def corr_spatter_sev_5(image):
return corruption_dict['spatter'](image, 4)
def corr_speckle_noise_sev_1(image):
return corruption_dict['speckle_noise'](image, 0)
def corr_speckle_noise_sev_2(image):
return corruption_dict['speckle_noise'](image, 1)
def corr_speckle_noise_sev_3(image):
return corruption_dict['speckle_noise'](image, 2)
def corr_speckle_noise_sev_4(image):
return corruption_dict['speckle_noise'](image, 3)
def corr_speckle_noise_sev_5(image):
return corruption_dict['speckle_noise'](image, 4)
def corr_zoom_blur_sev_1(image):
return corruption_dict['zoom_blur'](image, 0)
def corr_zoom_blur_sev_2(image):
return corruption_dict['zoom_blur'](image, 1)
def corr_zoom_blur_sev_3(image):
return corruption_dict['zoom_blur'](image, 2)
def corr_zoom_blur_sev_4(image):
return corruption_dict['zoom_blur'](image, 3)
def corr_zoom_blur_sev_5(image):
return corruption_dict['zoom_blur'](image, 4)
in_memory_corruptions_cpu = {
'imagenet-c.brightness.1_in-memory': corr_brightness_sev_1,
'imagenet-c.brightness.2_in-memory': corr_brightness_sev_2,
'imagenet-c.brightness.3_in-memory': corr_brightness_sev_3,
'imagenet-c.brightness.4_in-memory': corr_brightness_sev_4,
'imagenet-c.brightness.5_in-memory': corr_brightness_sev_5,
'imagenet-c.contrast.1_in-memory': corr_contrast_sev_1,
'imagenet-c.contrast.2_in-memory': corr_contrast_sev_2,
'imagenet-c.contrast.3_in-memory': corr_contrast_sev_3,
'imagenet-c.contrast.4_in-memory': corr_contrast_sev_4,
'imagenet-c.contrast.5_in-memory': corr_contrast_sev_5,
'imagenet-c.fog.1_in-memory': corr_fog_sev_1,
'imagenet-c.fog.2_in-memory': corr_fog_sev_2,
'imagenet-c.fog.3_in-memory': corr_fog_sev_3,
'imagenet-c.fog.4_in-memory': corr_fog_sev_4,
'imagenet-c.fog.5_in-memory': corr_fog_sev_5,
'imagenet-c.frost.1_in-memory': corr_frost_sev_1,
'imagenet-c.frost.2_in-memory': corr_frost_sev_2,
'imagenet-c.frost.3_in-memory': corr_frost_sev_3,
'imagenet-c.frost.4_in-memory': corr_frost_sev_4,
'imagenet-c.frost.5_in-memory': corr_frost_sev_5,
'imagenet-c.gaussian_blur.1_in-memory': corr_gaussian_blur_sev_1,
'imagenet-c.gaussian_blur.2_in-memory': corr_gaussian_blur_sev_2,
'imagenet-c.gaussian_blur.3_in-memory': corr_gaussian_blur_sev_3,
'imagenet-c.gaussian_blur.4_in-memory': corr_gaussian_blur_sev_4,
'imagenet-c.gaussian_blur.5_in-memory': corr_gaussian_blur_sev_5,
'imagenet-c.gaussian_noise.1_in-memory': corr_gaussian_noise_sev_1,
'imagenet-c.gaussian_noise.2_in-memory': corr_gaussian_noise_sev_2,
'imagenet-c.gaussian_noise.3_in-memory': corr_gaussian_noise_sev_3,
'imagenet-c.gaussian_noise.4_in-memory': corr_gaussian_noise_sev_4,
'imagenet-c.gaussian_noise.5_in-memory': corr_gaussian_noise_sev_5,
'imagenet-c.impulse_noise.1_in-memory': corr_impulse_noise_sev_1,
'imagenet-c.impulse_noise.2_in-memory': corr_impulse_noise_sev_2,
'imagenet-c.impulse_noise.3_in-memory': corr_impulse_noise_sev_3,
'imagenet-c.impulse_noise.4_in-memory': corr_impulse_noise_sev_4,
'imagenet-c.impulse_noise.5_in-memory': corr_impulse_noise_sev_5,
'imagenet-c.jpeg_compression.1_in-memory': corr_jpeg_compression_sev_1,
'imagenet-c.jpeg_compression.2_in-memory': corr_jpeg_compression_sev_2,
'imagenet-c.jpeg_compression.3_in-memory': corr_jpeg_compression_sev_3,
'imagenet-c.jpeg_compression.4_in-memory': corr_jpeg_compression_sev_4,
'imagenet-c.jpeg_compression.5_in-memory': corr_jpeg_compression_sev_5,
'imagenet-c.pixelate.1_in-memory': corr_pixelate_sev_1,
'imagenet-c.pixelate.2_in-memory': corr_pixelate_sev_2,
'imagenet-c.pixelate.3_in-memory': corr_pixelate_sev_3,
'imagenet-c.pixelate.4_in-memory': corr_pixelate_sev_4,
'imagenet-c.pixelate.5_in-memory': corr_pixelate_sev_5,
'imagenet-c.saturate.1_in-memory': corr_saturate_sev_1,
'imagenet-c.saturate.2_in-memory': corr_saturate_sev_2,
'imagenet-c.saturate.3_in-memory': corr_saturate_sev_3,
'imagenet-c.saturate.4_in-memory': corr_saturate_sev_4,
'imagenet-c.saturate.5_in-memory': corr_saturate_sev_5,
'imagenet-c.shot_noise.1_in-memory': corr_shot_noise_sev_1,
'imagenet-c.shot_noise.2_in-memory': corr_shot_noise_sev_2,
'imagenet-c.shot_noise.3_in-memory': corr_shot_noise_sev_3,
'imagenet-c.shot_noise.4_in-memory': corr_shot_noise_sev_4,
'imagenet-c.shot_noise.5_in-memory': corr_shot_noise_sev_5,
'imagenet-c.spatter.1_in-memory': corr_spatter_sev_1,
'imagenet-c.spatter.2_in-memory': corr_spatter_sev_2,
'imagenet-c.spatter.3_in-memory': corr_spatter_sev_3,
'imagenet-c.spatter.4_in-memory': corr_spatter_sev_4,
'imagenet-c.spatter.5_in-memory': corr_spatter_sev_5,
'imagenet-c.speckle_noise.1_in-memory': corr_speckle_noise_sev_1,
'imagenet-c.speckle_noise.2_in-memory': corr_speckle_noise_sev_2,
'imagenet-c.speckle_noise.3_in-memory': corr_speckle_noise_sev_3,
'imagenet-c.speckle_noise.4_in-memory': corr_speckle_noise_sev_4,
'imagenet-c.speckle_noise.5_in-memory': corr_speckle_noise_sev_5,
'imagenet-c.zoom_blur.1_in-memory': corr_zoom_blur_sev_1,
'imagenet-c.zoom_blur.2_in-memory': corr_zoom_blur_sev_2,
'imagenet-c.zoom_blur.3_in-memory': corr_zoom_blur_sev_3,
'imagenet-c.zoom_blur.4_in-memory': corr_zoom_blur_sev_4,
'imagenet-c.zoom_blur.5_in-memory': corr_zoom_blur_sev_5}
for on_disk_corruption in on_disk_corruptions:
registry.add_eval_setting(
EvalSetting(
name = on_disk_corruption,
dataset = StandardDataset(name=on_disk_corruption.replace('_on-disk', '')),
size = 50000,
)
)
registry.add_eval_setting(
EvalSetting(
name = on_disk_corruption+'_10percent',
dataset = StandardDataset(name=on_disk_corruption.replace('_on-disk', '')),
size = 5000,
idx_subsample_list = idx_subsample_list_50k_10percent,
parent_eval_setting = on_disk_corruption,
)
)
def gen_corrupt_batch_gpu(corruption, severity):
def corrupt_batch_gpu(images, model):
for i in range(images.size(0)):
corr_func = corruption_dict[corruption]
images[i] = corr_func(images[i], severity, gpu=True)
return images
return corrupt_batch_gpu
for corruption_name, d in in_memory_corruptions_gpu.items():
registry.add_eval_setting(
EvalSetting(
name = corruption_name,
dataset = StandardDataset(name='val'),
size = 50000,
perturbation_fn_gpu = gen_corrupt_batch_gpu(d['corruption'], d['severity']),
)
)
registry.add_eval_setting(
EvalSetting(
name = corruption_name+'_10percent',
dataset = StandardDataset(name='val'),
size = 5000,
perturbation_fn_gpu = gen_corrupt_batch_gpu(d['corruption'], d['severity']),
idx_subsample_list = idx_subsample_list_50k_10percent,
parent_eval_setting = corruption_name,
)
)
for corruption_name, func in in_memory_corruptions_cpu.items():
registry.add_eval_setting(
EvalSetting(
name = corruption_name,
dataset = StandardDataset(name='val'),
size = 50000,
perturbation_fn_cpu = func,
)
)
registry.add_eval_setting(
EvalSetting(
name = corruption_name+'_10percent',
dataset = StandardDataset(name='val'),
size = 5000,
perturbation_fn_cpu = func,
idx_subsample_list = idx_subsample_list_50k_10percent,
parent_eval_setting = corruption_name,
)
)
|
from registry import registry
from eval_settings.eval_setting_base import EvalSetting, StandardDataset
from eval_settings.image_utils import *
from eval_settings.eval_setting_subsample import idx_subsample_list_50k_10percent
from eval_settings.image_utils import corruption_dict
on_disk_corruptions = [
'imagenet-c.brightness.1_on-disk',
'imagenet-c.brightness.2_on-disk',
'imagenet-c.brightness.3_on-disk',
'imagenet-c.brightness.4_on-disk',
'imagenet-c.brightness.5_on-disk',
'imagenet-c.contrast.1_on-disk',
'imagenet-c.contrast.2_on-disk',
'imagenet-c.contrast.3_on-disk',
'imagenet-c.contrast.4_on-disk',
'imagenet-c.contrast.5_on-disk',
'imagenet-c.defocus_blur.1_on-disk',
'imagenet-c.defocus_blur.2_on-disk',
'imagenet-c.defocus_blur.3_on-disk',
'imagenet-c.defocus_blur.4_on-disk',
'imagenet-c.defocus_blur.5_on-disk',
'imagenet-c.elastic_transform.1_on-disk',
'imagenet-c.elastic_transform.2_on-disk',
'imagenet-c.elastic_transform.3_on-disk',
'imagenet-c.elastic_transform.4_on-disk',
'imagenet-c.elastic_transform.5_on-disk',
'imagenet-c.fog.1_on-disk',
'imagenet-c.fog.2_on-disk',
'imagenet-c.fog.3_on-disk',
'imagenet-c.fog.4_on-disk',
'imagenet-c.fog.5_on-disk',
'imagenet-c.frost.1_on-disk',
'imagenet-c.frost.2_on-disk',
'imagenet-c.frost.3_on-disk',
'imagenet-c.frost.4_on-disk',
'imagenet-c.frost.5_on-disk',
'imagenet-c.gaussian_blur.1_on-disk',
'imagenet-c.gaussian_blur.2_on-disk',
'imagenet-c.gaussian_blur.3_on-disk',
'imagenet-c.gaussian_blur.4_on-disk',
'imagenet-c.gaussian_blur.5_on-disk',
'imagenet-c.gaussian_noise.1_on-disk',
'imagenet-c.gaussian_noise.2_on-disk',
'imagenet-c.gaussian_noise.3_on-disk',
'imagenet-c.gaussian_noise.4_on-disk',
'imagenet-c.gaussian_noise.5_on-disk',
'imagenet-c.glass_blur.1_on-disk',
'imagenet-c.glass_blur.2_on-disk',
'imagenet-c.glass_blur.3_on-disk',
'imagenet-c.glass_blur.4_on-disk',
'imagenet-c.glass_blur.5_on-disk',
'imagenet-c.impulse_noise.1_on-disk',
'imagenet-c.impulse_noise.2_on-disk',
'imagenet-c.impulse_noise.3_on-disk',
'imagenet-c.impulse_noise.4_on-disk',
'imagenet-c.impulse_noise.5_on-disk',
'imagenet-c.jpeg_compression.1_on-disk',
'imagenet-c.jpeg_compression.2_on-disk',
'imagenet-c.jpeg_compression.3_on-disk',
'imagenet-c.jpeg_compression.4_on-disk',
'imagenet-c.jpeg_compression.5_on-disk',
'imagenet-c.motion_blur.1_on-disk',
'imagenet-c.motion_blur.2_on-disk',
'imagenet-c.motion_blur.3_on-disk',
'imagenet-c.motion_blur.4_on-disk',
'imagenet-c.motion_blur.5_on-disk',
'imagenet-c.pixelate.1_on-disk',
'imagenet-c.pixelate.2_on-disk',
'imagenet-c.pixelate.3_on-disk',
'imagenet-c.pixelate.4_on-disk',
'imagenet-c.pixelate.5_on-disk',
'imagenet-c.saturate.1_on-disk',
'imagenet-c.saturate.2_on-disk',
'imagenet-c.saturate.3_on-disk',
'imagenet-c.saturate.4_on-disk',
'imagenet-c.saturate.5_on-disk',
'imagenet-c.shot_noise.1_on-disk',
'imagenet-c.shot_noise.2_on-disk',
'imagenet-c.shot_noise.3_on-disk',
'imagenet-c.shot_noise.4_on-disk',
'imagenet-c.shot_noise.5_on-disk',
'imagenet-c.snow.1_on-disk',
'imagenet-c.snow.2_on-disk',
'imagenet-c.snow.3_on-disk',
'imagenet-c.snow.4_on-disk',
'imagenet-c.snow.5_on-disk',
'imagenet-c.spatter.1_on-disk',
'imagenet-c.spatter.2_on-disk',
'imagenet-c.spatter.3_on-disk',
'imagenet-c.spatter.4_on-disk',
'imagenet-c.spatter.5_on-disk',
'imagenet-c.speckle_noise.1_on-disk',
'imagenet-c.speckle_noise.2_on-disk',
'imagenet-c.speckle_noise.3_on-disk',
'imagenet-c.speckle_noise.4_on-disk',
'imagenet-c.speckle_noise.5_on-disk',
'imagenet-c.zoom_blur.1_on-disk',
'imagenet-c.zoom_blur.2_on-disk',
'imagenet-c.zoom_blur.3_on-disk',
'imagenet-c.zoom_blur.4_on-disk',
'imagenet-c.zoom_blur.5_on-disk']
in_memory_corruptions_gpu = {
'imagenet-c.defocus_blur.1_in-memory': {'corruption': 'defocus_blur', 'gpu': True, 'severity': 0},
'imagenet-c.defocus_blur.2_in-memory': {'corruption': 'defocus_blur', 'gpu': True, 'severity': 1},
'imagenet-c.defocus_blur.3_in-memory': {'corruption': 'defocus_blur', 'gpu': True, 'severity': 2},
'imagenet-c.defocus_blur.4_in-memory': {'corruption': 'defocus_blur', 'gpu': True, 'severity': 3},
'imagenet-c.defocus_blur.5_in-memory': {'corruption': 'defocus_blur', 'gpu': True, 'severity': 4},
'imagenet-c.elastic_transform.1_in-memory': {'corruption': 'elastic_transform', 'gpu': True, 'severity': 0},
'imagenet-c.elastic_transform.2_in-memory': {'corruption': 'elastic_transform', 'gpu': True, 'severity': 1},
'imagenet-c.elastic_transform.3_in-memory': {'corruption': 'elastic_transform', 'gpu': True, 'severity': 2},
'imagenet-c.elastic_transform.4_in-memory': {'corruption': 'elastic_transform', 'gpu': True, 'severity': 3},
'imagenet-c.elastic_transform.5_in-memory': {'corruption': 'elastic_transform', 'gpu': True, 'severity': 4},
'imagenet-c.motion_blur.1_in-memory': {'corruption': 'motion_blur', 'gpu': True, 'severity': 0},
'imagenet-c.motion_blur.2_in-memory': {'corruption': 'motion_blur', 'gpu': True, 'severity': 1},
'imagenet-c.motion_blur.3_in-memory': {'corruption': 'motion_blur', 'gpu': True, 'severity': 2},
'imagenet-c.motion_blur.4_in-memory': {'corruption': 'motion_blur', 'gpu': True, 'severity': 3},
'imagenet-c.motion_blur.5_in-memory': {'corruption': 'motion_blur', 'gpu': True, 'severity': 4},
'imagenet-c.snow.1_in-memory': {'corruption': 'snow', 'gpu': True, 'severity': 0},
'imagenet-c.snow.2_in-memory': {'corruption': 'snow', 'gpu': True, 'severity': 1},
'imagenet-c.snow.3_in-memory': {'corruption': 'snow', 'gpu': True, 'severity': 2},
'imagenet-c.snow.4_in-memory': {'corruption': 'snow', 'gpu': True, 'severity': 3},
'imagenet-c.snow.5_in-memory': {'corruption': 'snow', 'gpu': True, 'severity': 4}}
def corr_brightness_sev_1(image):
return corruption_dict['brightness'](image, 0)
def corr_brightness_sev_2(image):
return corruption_dict['brightness'](image, 1)
def corr_brightness_sev_3(image):
return corruption_dict['brightness'](image, 2)
def corr_brightness_sev_4(image):
return corruption_dict['brightness'](image, 3)
def corr_brightness_sev_5(image):
return corruption_dict['brightness'](image, 4)
def corr_contrast_sev_1(image):
return corruption_dict['contrast'](image, 0)
def corr_contrast_sev_2(image):
return corruption_dict['contrast'](image, 1)
def corr_contrast_sev_3(image):
return corruption_dict['contrast'](image, 2)
def corr_contrast_sev_4(image):
return corruption_dict['contrast'](image, 3)
def corr_contrast_sev_5(image):
return corruption_dict['contrast'](image, 4)
def corr_fog_sev_1(image):
return corruption_dict['fog'](image, 0)
def corr_fog_sev_2(image):
return corruption_dict['fog'](image, 1)
def corr_fog_sev_3(image):
return corruption_dict['fog'](image, 2)
def corr_fog_sev_4(image):
return corruption_dict['fog'](image, 3)
def corr_fog_sev_5(image):
return corruption_dict['fog'](image, 4)
def corr_frost_sev_1(image):
return corruption_dict['frost'](image, 0)
def corr_frost_sev_2(image):
return corruption_dict['frost'](image, 1)
def corr_frost_sev_3(image):
return corruption_dict['frost'](image, 2)
def corr_frost_sev_4(image):
return corruption_dict['frost'](image, 3)
def corr_frost_sev_5(image):
return corruption_dict['frost'](image, 4)
def corr_gaussian_blur_sev_1(image):
return corruption_dict['gaussian_blur'](image, 0)
def corr_gaussian_blur_sev_2(image):
return corruption_dict['gaussian_blur'](image, 1)
def corr_gaussian_blur_sev_3(image):
return corruption_dict['gaussian_blur'](image, 2)
def corr_gaussian_blur_sev_4(image):
return corruption_dict['gaussian_blur'](image, 3)
def corr_gaussian_blur_sev_5(image):
return corruption_dict['gaussian_blur'](image, 4)
def corr_gaussian_noise_sev_1(image):
return corruption_dict['gaussian_noise'](image, 0)
def corr_gaussian_noise_sev_2(image):
return corruption_dict['gaussian_noise'](image, 1)
def corr_gaussian_noise_sev_3(image):
return corruption_dict['gaussian_noise'](image, 2)
def corr_gaussian_noise_sev_4(image):
return corruption_dict['gaussian_noise'](image, 3)
def corr_gaussian_noise_sev_5(image):
return corruption_dict['gaussian_noise'](image, 4)
def corr_impulse_noise_sev_1(image):
return corruption_dict['impulse_noise'](image, 0)
def corr_impulse_noise_sev_2(image):
return corruption_dict['impulse_noise'](image, 1)
def corr_impulse_noise_sev_3(image):
return corruption_dict['impulse_noise'](image, 2)
def corr_impulse_noise_sev_4(image):
return corruption_dict['impulse_noise'](image, 3)
def corr_impulse_noise_sev_5(image):
return corruption_dict['impulse_noise'](image, 4)
def corr_jpeg_compression_sev_1(image):
return corruption_dict['jpeg_compression'](image, 0)
def corr_jpeg_compression_sev_2(image):
return corruption_dict['jpeg_compression'](image, 1)
def corr_jpeg_compression_sev_3(image):
return corruption_dict['jpeg_compression'](image, 2)
def corr_jpeg_compression_sev_4(image):
return corruption_dict['jpeg_compression'](image, 3)
def corr_jpeg_compression_sev_5(image):
return corruption_dict['jpeg_compression'](image, 4)
def corr_pixelate_sev_1(image):
return corruption_dict['pixelate'](image, 0)
def corr_pixelate_sev_2(image):
return corruption_dict['pixelate'](image, 1)
def corr_pixelate_sev_3(image):
return corruption_dict['pixelate'](image, 2)
def corr_pixelate_sev_4(image):
return corruption_dict['pixelate'](image, 3)
def corr_pixelate_sev_5(image):
return corruption_dict['pixelate'](image, 4)
def corr_saturate_sev_1(image):
return corruption_dict['saturate'](image, 0)
def corr_saturate_sev_2(image):
return corruption_dict['saturate'](image, 1)
def corr_saturate_sev_3(image):
return corruption_dict['saturate'](image, 2)
def corr_saturate_sev_4(image):
return corruption_dict['saturate'](image, 3)
def corr_saturate_sev_5(image):
return corruption_dict['saturate'](image, 4)
def corr_shot_noise_sev_1(image):
return corruption_dict['shot_noise'](image, 0)
def corr_shot_noise_sev_2(image):
return corruption_dict['shot_noise'](image, 1)
def corr_shot_noise_sev_3(image):
return corruption_dict['shot_noise'](image, 2)
def corr_shot_noise_sev_4(image):
return corruption_dict['shot_noise'](image, 3)
def corr_shot_noise_sev_5(image):
return corruption_dict['shot_noise'](image, 4)
def corr_spatter_sev_1(image):
return corruption_dict['spatter'](image, 0)
def corr_spatter_sev_2(image):
return corruption_dict['spatter'](image, 1)
def corr_spatter_sev_3(image):
return corruption_dict['spatter'](image, 2)
def corr_spatter_sev_4(image):
return corruption_dict['spatter'](image, 3)
def corr_spatter_sev_5(image):
return corruption_dict['spatter'](image, 4)
def corr_speckle_noise_sev_1(image):
return corruption_dict['speckle_noise'](image, 0)
def corr_speckle_noise_sev_2(image):
return corruption_dict['speckle_noise'](image, 1)
def corr_speckle_noise_sev_3(image):
return corruption_dict['speckle_noise'](image, 2)
def corr_speckle_noise_sev_4(image):
return corruption_dict['speckle_noise'](image, 3)
def corr_speckle_noise_sev_5(image):
return corruption_dict['speckle_noise'](image, 4)
def corr_zoom_blur_sev_1(image):
return corruption_dict['zoom_blur'](image, 0)
def corr_zoom_blur_sev_2(image):
return corruption_dict['zoom_blur'](image, 1)
def corr_zoom_blur_sev_3(image):
return corruption_dict['zoom_blur'](image, 2)
def corr_zoom_blur_sev_4(image):
return corruption_dict['zoom_blur'](image, 3)
def corr_zoom_blur_sev_5(image):
return corruption_dict['zoom_blur'](image, 4)
in_memory_corruptions_cpu = {
'imagenet-c.brightness.1_in-memory': corr_brightness_sev_1,
'imagenet-c.brightness.2_in-memory': corr_brightness_sev_2,
'imagenet-c.brightness.3_in-memory': corr_brightness_sev_3,
'imagenet-c.brightness.4_in-memory': corr_brightness_sev_4,
'imagenet-c.brightness.5_in-memory': corr_brightness_sev_5,
'imagenet-c.contrast.1_in-memory': corr_contrast_sev_1,
'imagenet-c.contrast.2_in-memory': corr_contrast_sev_2,
'imagenet-c.contrast.3_in-memory': corr_contrast_sev_3,
'imagenet-c.contrast.4_in-memory': corr_contrast_sev_4,
'imagenet-c.contrast.5_in-memory': corr_contrast_sev_5,
'imagenet-c.fog.1_in-memory': corr_fog_sev_1,
'imagenet-c.fog.2_in-memory': corr_fog_sev_2,
'imagenet-c.fog.3_in-memory': corr_fog_sev_3,
'imagenet-c.fog.4_in-memory': corr_fog_sev_4,
'imagenet-c.fog.5_in-memory': corr_fog_sev_5,
'imagenet-c.frost.1_in-memory': corr_frost_sev_1,
'imagenet-c.frost.2_in-memory': corr_frost_sev_2,
'imagenet-c.frost.3_in-memory': corr_frost_sev_3,
'imagenet-c.frost.4_in-memory': corr_frost_sev_4,
'imagenet-c.frost.5_in-memory': corr_frost_sev_5,
'imagenet-c.gaussian_blur.1_in-memory': corr_gaussian_blur_sev_1,
'imagenet-c.gaussian_blur.2_in-memory': corr_gaussian_blur_sev_2,
'imagenet-c.gaussian_blur.3_in-memory': corr_gaussian_blur_sev_3,
'imagenet-c.gaussian_blur.4_in-memory': corr_gaussian_blur_sev_4,
'imagenet-c.gaussian_blur.5_in-memory': corr_gaussian_blur_sev_5,
'imagenet-c.gaussian_noise.1_in-memory': corr_gaussian_noise_sev_1,
'imagenet-c.gaussian_noise.2_in-memory': corr_gaussian_noise_sev_2,
'imagenet-c.gaussian_noise.3_in-memory': corr_gaussian_noise_sev_3,
'imagenet-c.gaussian_noise.4_in-memory': corr_gaussian_noise_sev_4,
'imagenet-c.gaussian_noise.5_in-memory': corr_gaussian_noise_sev_5,
'imagenet-c.impulse_noise.1_in-memory': corr_impulse_noise_sev_1,
'imagenet-c.impulse_noise.2_in-memory': corr_impulse_noise_sev_2,
'imagenet-c.impulse_noise.3_in-memory': corr_impulse_noise_sev_3,
'imagenet-c.impulse_noise.4_in-memory': corr_impulse_noise_sev_4,
'imagenet-c.impulse_noise.5_in-memory': corr_impulse_noise_sev_5,
'imagenet-c.jpeg_compression.1_in-memory': corr_jpeg_compression_sev_1,
'imagenet-c.jpeg_compression.2_in-memory': corr_jpeg_compression_sev_2,
'imagenet-c.jpeg_compression.3_in-memory': corr_jpeg_compression_sev_3,
'imagenet-c.jpeg_compression.4_in-memory': corr_jpeg_compression_sev_4,
'imagenet-c.jpeg_compression.5_in-memory': corr_jpeg_compression_sev_5,
'imagenet-c.pixelate.1_in-memory': corr_pixelate_sev_1,
'imagenet-c.pixelate.2_in-memory': corr_pixelate_sev_2,
'imagenet-c.pixelate.3_in-memory': corr_pixelate_sev_3,
'imagenet-c.pixelate.4_in-memory': corr_pixelate_sev_4,
'imagenet-c.pixelate.5_in-memory': corr_pixelate_sev_5,
'imagenet-c.saturate.1_in-memory': corr_saturate_sev_1,
'imagenet-c.saturate.2_in-memory': corr_saturate_sev_2,
'imagenet-c.saturate.3_in-memory': corr_saturate_sev_3,
'imagenet-c.saturate.4_in-memory': corr_saturate_sev_4,
'imagenet-c.saturate.5_in-memory': corr_saturate_sev_5,
'imagenet-c.shot_noise.1_in-memory': corr_shot_noise_sev_1,
'imagenet-c.shot_noise.2_in-memory': corr_shot_noise_sev_2,
'imagenet-c.shot_noise.3_in-memory': corr_shot_noise_sev_3,
'imagenet-c.shot_noise.4_in-memory': corr_shot_noise_sev_4,
'imagenet-c.shot_noise.5_in-memory': corr_shot_noise_sev_5,
'imagenet-c.spatter.1_in-memory': corr_spatter_sev_1,
'imagenet-c.spatter.2_in-memory': corr_spatter_sev_2,
'imagenet-c.spatter.3_in-memory': corr_spatter_sev_3,
'imagenet-c.spatter.4_in-memory': corr_spatter_sev_4,
'imagenet-c.spatter.5_in-memory': corr_spatter_sev_5,
'imagenet-c.speckle_noise.1_in-memory': corr_speckle_noise_sev_1,
'imagenet-c.speckle_noise.2_in-memory': corr_speckle_noise_sev_2,
'imagenet-c.speckle_noise.3_in-memory': corr_speckle_noise_sev_3,
'imagenet-c.speckle_noise.4_in-memory': corr_speckle_noise_sev_4,
'imagenet-c.speckle_noise.5_in-memory': corr_speckle_noise_sev_5,
'imagenet-c.zoom_blur.1_in-memory': corr_zoom_blur_sev_1,
'imagenet-c.zoom_blur.2_in-memory': corr_zoom_blur_sev_2,
'imagenet-c.zoom_blur.3_in-memory': corr_zoom_blur_sev_3,
'imagenet-c.zoom_blur.4_in-memory': corr_zoom_blur_sev_4,
'imagenet-c.zoom_blur.5_in-memory': corr_zoom_blur_sev_5}
for on_disk_corruption in on_disk_corruptions:
registry.add_eval_setting(
EvalSetting(
name = on_disk_corruption,
dataset = StandardDataset(name=on_disk_corruption.replace('_on-disk', '')),
size = 50000,
)
)
registry.add_eval_setting(
EvalSetting(
name = on_disk_corruption+'_10percent',
dataset = StandardDataset(name=on_disk_corruption.replace('_on-disk', '')),
size = 5000,
idx_subsample_list = idx_subsample_list_50k_10percent,
parent_eval_setting = on_disk_corruption,
)
)
def gen_corrupt_batch_gpu(corruption, severity):
def corrupt_batch_gpu(images, model):
for i in range(images.size(0)):
corr_func = corruption_dict[corruption]
images[i] = corr_func(images[i], severity, gpu=True)
return images
return corrupt_batch_gpu
for corruption_name, d in in_memory_corruptions_gpu.items():
registry.add_eval_setting(
EvalSetting(
name = corruption_name,
dataset = StandardDataset(name='val'),
size = 50000,
perturbation_fn_gpu = gen_corrupt_batch_gpu(d['corruption'], d['severity']),
)
)
registry.add_eval_setting(
EvalSetting(
name = corruption_name+'_10percent',
dataset = StandardDataset(name='val'),
size = 5000,
perturbation_fn_gpu = gen_corrupt_batch_gpu(d['corruption'], d['severity']),
idx_subsample_list = idx_subsample_list_50k_10percent,
parent_eval_setting = corruption_name,
)
)
for corruption_name, func in in_memory_corruptions_cpu.items():
registry.add_eval_setting(
EvalSetting(
name = corruption_name,
dataset = StandardDataset(name='val'),
size = 50000,
perturbation_fn_cpu = func,
)
)
registry.add_eval_setting(
EvalSetting(
name = corruption_name+'_10percent',
dataset = StandardDataset(name='val'),
size = 5000,
perturbation_fn_cpu = func,
idx_subsample_list = idx_subsample_list_50k_10percent,
parent_eval_setting = corruption_name,
)
)
|
none
| 1
| 1.55431
| 2
|
|
addons/Sprytile-6b68d00/rx/linq/observable/manyselect.py
|
trisadmeslek/V-Sekai-Blender-tools
| 733
|
6626111
|
from rx.core import Observable
from rx.core import ObservableBase
from rx.internal.basic import noop
from rx.subjects import AsyncSubject
from rx.disposables import CompositeDisposable
from rx.concurrency import immediate_scheduler, current_thread_scheduler
from rx.internal import extensionmethod
class ChainObservable(ObservableBase):
def _subscribe_core(self, observer):
g = CompositeDisposable()
def action(scheduler, state):
observer.on_next(self.head)
g.add(self.tail.merge_observable().subscribe(observer))
g.add(current_thread_scheduler.schedule(action))
return g
def __init__(self, head):
super(ChainObservable, self).__init__()
self.head = head
self.tail = AsyncSubject()
def on_completed(self):
self.on_next(Observable.empty())
def on_error(self, e):
self.on_next(Observable.throw_exception(e))
def on_next(self, v):
self.tail.on_next(v)
self.tail.on_completed()
@extensionmethod(Observable)
def many_select(self, selector, scheduler=None):
"""Comonadic bind operator. Internally projects a new observable for each
value, and it pushes each observable into the user-defined selector function
that projects/queries each observable into some result.
Keyword arguments:
selector -- {Function} A transform function to apply to each element.
scheduler -- {Object} [Optional] Scheduler used to execute the
operation. If not specified, defaults to the ImmediateScheduler.
Returns {Observable} An observable sequence which results from the
comonadic bind operation.
"""
scheduler = scheduler or immediate_scheduler
source = self
def factory():
chain = [None]
def mapper(x):
curr = ChainObservable(x)
chain[0] and chain[0].on_next(x)
chain[0] = curr
return curr
def on_error(e):
if chain[0]:
chain[0].on_error(e)
def on_completed():
if chain[0]:
chain[0].on_completed()
return source.map(
mapper
).tap(
noop, on_error, on_completed
).observe_on(
scheduler
).map(
selector
)
return Observable.defer(factory)
|
from rx.core import Observable
from rx.core import ObservableBase
from rx.internal.basic import noop
from rx.subjects import AsyncSubject
from rx.disposables import CompositeDisposable
from rx.concurrency import immediate_scheduler, current_thread_scheduler
from rx.internal import extensionmethod
class ChainObservable(ObservableBase):
def _subscribe_core(self, observer):
g = CompositeDisposable()
def action(scheduler, state):
observer.on_next(self.head)
g.add(self.tail.merge_observable().subscribe(observer))
g.add(current_thread_scheduler.schedule(action))
return g
def __init__(self, head):
super(ChainObservable, self).__init__()
self.head = head
self.tail = AsyncSubject()
def on_completed(self):
self.on_next(Observable.empty())
def on_error(self, e):
self.on_next(Observable.throw_exception(e))
def on_next(self, v):
self.tail.on_next(v)
self.tail.on_completed()
@extensionmethod(Observable)
def many_select(self, selector, scheduler=None):
"""Comonadic bind operator. Internally projects a new observable for each
value, and it pushes each observable into the user-defined selector function
that projects/queries each observable into some result.
Keyword arguments:
selector -- {Function} A transform function to apply to each element.
scheduler -- {Object} [Optional] Scheduler used to execute the
operation. If not specified, defaults to the ImmediateScheduler.
Returns {Observable} An observable sequence which results from the
comonadic bind operation.
"""
scheduler = scheduler or immediate_scheduler
source = self
def factory():
chain = [None]
def mapper(x):
curr = ChainObservable(x)
chain[0] and chain[0].on_next(x)
chain[0] = curr
return curr
def on_error(e):
if chain[0]:
chain[0].on_error(e)
def on_completed():
if chain[0]:
chain[0].on_completed()
return source.map(
mapper
).tap(
noop, on_error, on_completed
).observe_on(
scheduler
).map(
selector
)
return Observable.defer(factory)
|
en
| 0.710308
|
Comonadic bind operator. Internally projects a new observable for each value, and it pushes each observable into the user-defined selector function that projects/queries each observable into some result. Keyword arguments: selector -- {Function} A transform function to apply to each element. scheduler -- {Object} [Optional] Scheduler used to execute the operation. If not specified, defaults to the ImmediateScheduler. Returns {Observable} An observable sequence which results from the comonadic bind operation.
| 2.33538
| 2
|
company_house_ua/urls.py
|
dchaplinsky/ragoogle
| 3
|
6626112
|
<gh_stars>1-10
from django.urls import path
from django.conf.urls import url
from .views import CompanyHouseUaDetailsView
urlpatterns = [
path('<pk>', CompanyHouseUaDetailsView.as_view(), name="company_house_ua>details"),
]
|
from django.urls import path
from django.conf.urls import url
from .views import CompanyHouseUaDetailsView
urlpatterns = [
path('<pk>', CompanyHouseUaDetailsView.as_view(), name="company_house_ua>details"),
]
|
none
| 1
| 1.665691
| 2
|
|
api/users/serializers.py
|
admariner/flagsmith
| 0
|
6626113
|
<filename>api/users/serializers.py
from djoser.serializers import UserSerializer as DjoserUserSerializer
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from organisations.invites.models import Invite
from organisations.models import Organisation
from organisations.serializers import UserOrganisationSerializer
from .models import FFAdminUser, UserPermissionGroup
class UserIdSerializer(serializers.Serializer):
id = serializers.IntegerField()
def update(self, instance, validated_data):
pass
def create(self, validated_data):
organisation = Organisation.objects.get(pk=self.context.get("organisation"))
user = self._get_user(validated_data)
if user and organisation in user.organisations.all():
user.remove_organisation(organisation)
user.permission_groups.remove(
*UserPermissionGroup.objects.filter(organisation=organisation)
)
return user
def validate(self, attrs):
if not FFAdminUser.objects.filter(pk=attrs.get("id")).exists():
message = "User with id %d does not exist" % attrs.get("id")
raise ValidationError({"id": message})
return attrs
def _get_user(self, validated_data):
try:
return FFAdminUser.objects.get(pk=validated_data.get("id"))
except FFAdminUser.DoesNotExist:
return None
class UserFullSerializer(serializers.ModelSerializer):
organisations = UserOrganisationSerializer(source="userorganisation_set", many=True)
class Meta:
model = FFAdminUser
fields = ("id", "email", "first_name", "last_name", "organisations")
class UserLoginSerializer(serializers.ModelSerializer):
class Meta:
model = FFAdminUser
fields = ("email", "password")
class UserListSerializer(serializers.ModelSerializer):
role = serializers.SerializerMethodField(read_only=True)
join_date = serializers.SerializerMethodField(read_only=True)
default_fields = ("id", "email", "first_name", "last_name")
organisation_users_fields = ("role", "date_joined")
class Meta:
model = FFAdminUser
def get_field_names(self, declared_fields, info):
fields = self.default_fields
if self.context.get("organisation"):
fields += self.organisation_users_fields
return fields
def get_role(self, instance):
return instance.get_organisation_role(self.context.get("organisation"))
def get_join_date(self, instance):
return instance.get_organisation_join_date(self.context.get("organisation"))
class InviteSerializer(serializers.ModelSerializer):
class Meta:
model = Invite
fields = (
"email",
"organisation",
"frontend_base_url",
"invited_by",
"date_created",
)
class InviteListSerializer(serializers.ModelSerializer):
invited_by = UserListSerializer()
class Meta:
model = Invite
fields = ("id", "email", "date_created", "invited_by")
class UserIdsSerializer(serializers.Serializer):
user_ids = serializers.ListField(child=serializers.IntegerField())
def validate(self, data):
if not FFAdminUser.objects.filter(id__in=data["user_ids"]).count() == len(
data["user_ids"]
):
raise serializers.ValidationError(f"Some users not found")
return data
class UserPermissionGroupSerializerList(serializers.ModelSerializer):
class Meta:
model = UserPermissionGroup
fields = ("id", "name", "users")
read_only_fields = ("id",)
class UserPermissionGroupSerializerDetail(UserPermissionGroupSerializerList):
# TODO: remove users from here and just add a summary of number of users
users = UserListSerializer(many=True, read_only=True)
class CustomCurrentUserSerializer(DjoserUserSerializer):
auth_type = serializers.CharField(read_only=True)
is_superuser = serializers.BooleanField(read_only=True)
class Meta(DjoserUserSerializer.Meta):
fields = DjoserUserSerializer.Meta.fields + ("auth_type", "is_superuser")
|
<filename>api/users/serializers.py
from djoser.serializers import UserSerializer as DjoserUserSerializer
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from organisations.invites.models import Invite
from organisations.models import Organisation
from organisations.serializers import UserOrganisationSerializer
from .models import FFAdminUser, UserPermissionGroup
class UserIdSerializer(serializers.Serializer):
id = serializers.IntegerField()
def update(self, instance, validated_data):
pass
def create(self, validated_data):
organisation = Organisation.objects.get(pk=self.context.get("organisation"))
user = self._get_user(validated_data)
if user and organisation in user.organisations.all():
user.remove_organisation(organisation)
user.permission_groups.remove(
*UserPermissionGroup.objects.filter(organisation=organisation)
)
return user
def validate(self, attrs):
if not FFAdminUser.objects.filter(pk=attrs.get("id")).exists():
message = "User with id %d does not exist" % attrs.get("id")
raise ValidationError({"id": message})
return attrs
def _get_user(self, validated_data):
try:
return FFAdminUser.objects.get(pk=validated_data.get("id"))
except FFAdminUser.DoesNotExist:
return None
class UserFullSerializer(serializers.ModelSerializer):
organisations = UserOrganisationSerializer(source="userorganisation_set", many=True)
class Meta:
model = FFAdminUser
fields = ("id", "email", "first_name", "last_name", "organisations")
class UserLoginSerializer(serializers.ModelSerializer):
class Meta:
model = FFAdminUser
fields = ("email", "password")
class UserListSerializer(serializers.ModelSerializer):
role = serializers.SerializerMethodField(read_only=True)
join_date = serializers.SerializerMethodField(read_only=True)
default_fields = ("id", "email", "first_name", "last_name")
organisation_users_fields = ("role", "date_joined")
class Meta:
model = FFAdminUser
def get_field_names(self, declared_fields, info):
fields = self.default_fields
if self.context.get("organisation"):
fields += self.organisation_users_fields
return fields
def get_role(self, instance):
return instance.get_organisation_role(self.context.get("organisation"))
def get_join_date(self, instance):
return instance.get_organisation_join_date(self.context.get("organisation"))
class InviteSerializer(serializers.ModelSerializer):
class Meta:
model = Invite
fields = (
"email",
"organisation",
"frontend_base_url",
"invited_by",
"date_created",
)
class InviteListSerializer(serializers.ModelSerializer):
invited_by = UserListSerializer()
class Meta:
model = Invite
fields = ("id", "email", "date_created", "invited_by")
class UserIdsSerializer(serializers.Serializer):
user_ids = serializers.ListField(child=serializers.IntegerField())
def validate(self, data):
if not FFAdminUser.objects.filter(id__in=data["user_ids"]).count() == len(
data["user_ids"]
):
raise serializers.ValidationError(f"Some users not found")
return data
class UserPermissionGroupSerializerList(serializers.ModelSerializer):
class Meta:
model = UserPermissionGroup
fields = ("id", "name", "users")
read_only_fields = ("id",)
class UserPermissionGroupSerializerDetail(UserPermissionGroupSerializerList):
# TODO: remove users from here and just add a summary of number of users
users = UserListSerializer(many=True, read_only=True)
class CustomCurrentUserSerializer(DjoserUserSerializer):
auth_type = serializers.CharField(read_only=True)
is_superuser = serializers.BooleanField(read_only=True)
class Meta(DjoserUserSerializer.Meta):
fields = DjoserUserSerializer.Meta.fields + ("auth_type", "is_superuser")
|
en
| 0.612821
|
# TODO: remove users from here and just add a summary of number of users
| 2.187282
| 2
|
src/multimodal_ensemble/multimodal_main.py
|
tahleen-rahman/all2friends
| 0
|
6626114
|
<reponame>tahleen-rahman/all2friends
# Created by rahman at 17:20 2020-03-10 using PyCharm
import sys
from multimodal_ensemble.multimodal_utils import makeHCI, recalculate_missingHCI, write_posteriors, \
unite_posteriors, score_avg5probs, score_subsets_weighted, split_train_test_cv, split_train_test
from network_attacks.friend2vec_main import attack_network
from shared_tools.utils import DATAPATH, city
i = sys.argv[1]
cap_file, ht_file, im_file, loc_file = "extra_cap_dataset.csv", "extra_ht_dataset.csv", "extra_im_dataset.csv", "loc_dataset.csv"
# get the file suffixes for each cross val iteration subgraph of friends in the training set
# to use for node2vec for the network attack as well as for the multimodal attack later
friends_train_file = split_train_test_cv(DATAPATH, i)
network_file = attack_network(friends_train_file, i)
write_posteriors(cap_file, ht_file, im_file, loc_file, network_file, DATAPATH, i)
unite_posteriors(DATAPATH, i)
score_avg5probs(DATAPATH, i)
score_subsets_weighted(DATAPATH, i)
|
# Created by rahman at 17:20 2020-03-10 using PyCharm
import sys
from multimodal_ensemble.multimodal_utils import makeHCI, recalculate_missingHCI, write_posteriors, \
unite_posteriors, score_avg5probs, score_subsets_weighted, split_train_test_cv, split_train_test
from network_attacks.friend2vec_main import attack_network
from shared_tools.utils import DATAPATH, city
i = sys.argv[1]
cap_file, ht_file, im_file, loc_file = "extra_cap_dataset.csv", "extra_ht_dataset.csv", "extra_im_dataset.csv", "loc_dataset.csv"
# get the file suffixes for each cross val iteration subgraph of friends in the training set
# to use for node2vec for the network attack as well as for the multimodal attack later
friends_train_file = split_train_test_cv(DATAPATH, i)
network_file = attack_network(friends_train_file, i)
write_posteriors(cap_file, ht_file, im_file, loc_file, network_file, DATAPATH, i)
unite_posteriors(DATAPATH, i)
score_avg5probs(DATAPATH, i)
score_subsets_weighted(DATAPATH, i)
|
en
| 0.936927
|
# Created by rahman at 17:20 2020-03-10 using PyCharm # get the file suffixes for each cross val iteration subgraph of friends in the training set # to use for node2vec for the network attack as well as for the multimodal attack later
| 2.347625
| 2
|
sources/algorithms/sweepln/regionsweepdebug.py
|
tipech/OverlapGraph
| 0
|
6626115
|
<reponame>tipech/OverlapGraph
#!/usr/bin/env python
"""
Debugging for One-pass Sweep-line Algorithm over Regions
Implements the RegionSweepDebug class that prints a block of debugging output
for every Event broadcasted from the one-pass sweep-line algorithm, through a
subscription to RegionSweep.
Classes:
- RegionSweepDebug
"""
from typing import Tuple
from sources.abstract import Event, Subscriber
from sources.core import RegionEvent, RegionGrp
from .regionsweep import RegionSweepEvtKind
class RegionSweepDebug(Subscriber[RegionGrp]):
"""
Debugging for One-pass Sweep-line over Regions
For every Event broadcasted from the one-pass sweep-line algorithm,
prints a block of debugging output, through a subscription
to RegionSweep.
Extends:
Subscriber[RegionGrp]
Attributes:
counter: The Event sequence number.
The number of Events previously seen.
"""
counter: int
def __init__(self):
"""
Initialize this class to prints a block of debugging output for
every Event broadcasted from the one-pass sweep-line algorithm.
Sets the events as RegionSweepEvtKind.
"""
Subscriber.__init__(self, RegionSweepEvtKind)
self.counter = 0
### Methods: Event Handlers
def on_next(self, event: Event[RegionGrp]):
"""
Print Events for sweep-line algorithm.
Overrides:
Subscriber.on_next
Args:
event:
The next Event.
"""
print()
print(f'{self.counter}:')
print(f'\tkind: {event.kind.name}')
print(f'\tdepth: {event.depth}')
print(f'\tactives: {[k[0:8] for k in event.actives.keys()]}')
if isinstance(event, RegionEvent):
print(f'\tdimension: {event.dimension}, ' +
f'when: {event.when}, ' +
f'order: {event.order}')
print(f'\tcontext: {event.context.id[0:8]}, ' +
f'lower: {event.context.lower}, ' +
f'upper: {event.context.upper}')
if isinstance(event.context, Tuple):
print(f'\tcontext:')
print(f'\t\t0: {event.context[0].id[0:8]}, ' +
f'lower: {event.context[0].lower}, ' +
f'upper: {event.context[0].upper}')
print(f'\t\t0: {event.context[1].id[0:8]}, ' +
f'lower: {event.context[1].lower}, ' +
f'upper: {event.context[1].upper}')
self.counter += 1
|
#!/usr/bin/env python
"""
Debugging for One-pass Sweep-line Algorithm over Regions
Implements the RegionSweepDebug class that prints a block of debugging output
for every Event broadcasted from the one-pass sweep-line algorithm, through a
subscription to RegionSweep.
Classes:
- RegionSweepDebug
"""
from typing import Tuple
from sources.abstract import Event, Subscriber
from sources.core import RegionEvent, RegionGrp
from .regionsweep import RegionSweepEvtKind
class RegionSweepDebug(Subscriber[RegionGrp]):
"""
Debugging for One-pass Sweep-line over Regions
For every Event broadcasted from the one-pass sweep-line algorithm,
prints a block of debugging output, through a subscription
to RegionSweep.
Extends:
Subscriber[RegionGrp]
Attributes:
counter: The Event sequence number.
The number of Events previously seen.
"""
counter: int
def __init__(self):
"""
Initialize this class to prints a block of debugging output for
every Event broadcasted from the one-pass sweep-line algorithm.
Sets the events as RegionSweepEvtKind.
"""
Subscriber.__init__(self, RegionSweepEvtKind)
self.counter = 0
### Methods: Event Handlers
def on_next(self, event: Event[RegionGrp]):
"""
Print Events for sweep-line algorithm.
Overrides:
Subscriber.on_next
Args:
event:
The next Event.
"""
print()
print(f'{self.counter}:')
print(f'\tkind: {event.kind.name}')
print(f'\tdepth: {event.depth}')
print(f'\tactives: {[k[0:8] for k in event.actives.keys()]}')
if isinstance(event, RegionEvent):
print(f'\tdimension: {event.dimension}, ' +
f'when: {event.when}, ' +
f'order: {event.order}')
print(f'\tcontext: {event.context.id[0:8]}, ' +
f'lower: {event.context.lower}, ' +
f'upper: {event.context.upper}')
if isinstance(event.context, Tuple):
print(f'\tcontext:')
print(f'\t\t0: {event.context[0].id[0:8]}, ' +
f'lower: {event.context[0].lower}, ' +
f'upper: {event.context[0].upper}')
print(f'\t\t0: {event.context[1].id[0:8]}, ' +
f'lower: {event.context[1].lower}, ' +
f'upper: {event.context[1].upper}')
self.counter += 1
|
en
| 0.812444
|
#!/usr/bin/env python Debugging for One-pass Sweep-line Algorithm over Regions Implements the RegionSweepDebug class that prints a block of debugging output for every Event broadcasted from the one-pass sweep-line algorithm, through a subscription to RegionSweep. Classes: - RegionSweepDebug Debugging for One-pass Sweep-line over Regions For every Event broadcasted from the one-pass sweep-line algorithm, prints a block of debugging output, through a subscription to RegionSweep. Extends: Subscriber[RegionGrp] Attributes: counter: The Event sequence number. The number of Events previously seen. Initialize this class to prints a block of debugging output for every Event broadcasted from the one-pass sweep-line algorithm. Sets the events as RegionSweepEvtKind. ### Methods: Event Handlers Print Events for sweep-line algorithm. Overrides: Subscriber.on_next Args: event: The next Event.
| 2.723573
| 3
|
extraction/FlattenData.py
|
Neyzoter/Nelearn
| 0
|
6626116
|
import numpy as np
def getFlatData(X):
'''
输入:(x,...)的矩阵,比如图片,x张图片、像素m*n、y个通道
输出:(特征个数,x)的扁平化矩阵,即每一列是一个样本的工程
'''
# 让X转化为样本数*所有特征向量化.T
X_flatten = X.reshape(X.shape[0],-1).T
return X_flatten
|
import numpy as np
def getFlatData(X):
'''
输入:(x,...)的矩阵,比如图片,x张图片、像素m*n、y个通道
输出:(特征个数,x)的扁平化矩阵,即每一列是一个样本的工程
'''
# 让X转化为样本数*所有特征向量化.T
X_flatten = X.reshape(X.shape[0],-1).T
return X_flatten
|
zh
| 0.949001
|
输入:(x,...)的矩阵,比如图片,x张图片、像素m*n、y个通道 输出:(特征个数,x)的扁平化矩阵,即每一列是一个样本的工程 # 让X转化为样本数*所有特征向量化.T
| 3.454455
| 3
|
main.py
|
Yilmaz4/Encrypt-n-Decrypt
| 8
|
6626117
|
<reponame>Yilmaz4/Encrypt-n-Decrypt<gh_stars>1-10
from cryptpy import *
cipher = AES(key = generateKey(32))
print(cipher.encryptData(data="hi everybody"))
|
from cryptpy import *
cipher = AES(key = generateKey(32))
print(cipher.encryptData(data="hi everybody"))
|
none
| 1
| 2.765224
| 3
|
|
google/appengine/ext/datastore_admin/backup_handler.py
|
luduvigo/app-blog-code
| 0
|
6626118
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Handler for data backup operation.
Generic datastore admin console transfers control to ConfirmBackupHandler
after selection of entities. The ConfirmBackupHandler confirms with user
his choice, enters a backup name and transfers control to
DoBackupHandler. DoBackupHandler starts backup mappers and displays confirmation
page.
This module also contains actual mapper code for backing data over.
"""
from __future__ import with_statement
import cStringIO
import datetime
import itertools
import logging
import os
import re
import time
import urllib
import xml.dom.minidom
from google.appengine.datastore import entity_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import app_identity
from google.appengine.api import blobstore as blobstore_api
from google.appengine.api import capabilities
from google.appengine.api import datastore
from google.appengine.api import datastore_types
from google.appengine.api import files
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.api.files import records
from google.appengine.api.taskqueue import taskqueue_service_pb
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.ext import webapp
from google.appengine.ext.datastore_admin import backup_pb2
from google.appengine.ext.datastore_admin import utils
from google.appengine.ext.mapreduce import context
from google.appengine.ext.mapreduce import input_readers
from google.appengine.ext.mapreduce import model
from google.appengine.ext.mapreduce import operation as op
from google.appengine.ext.mapreduce import output_writers
XSRF_ACTION = 'backup'
BUCKET_PATTERN = (r'^([a-zA-Z0-9]+(\-[a-zA-Z0-9]+)*)'
r'(\.([a-zA-Z0-9]+(\-[a-zA-Z0-9]+)*))*$')
MAX_BUCKET_LEN = 222
MIN_BUCKET_LEN = 3
MAX_BUCKET_SEGMENT_LEN = 63
NUM_KINDS_DEFERRED_THRESHOLD = 10
MAX_BLOBS_PER_DELETE = 500
MEANING_TO_PRIMITIVE_TYPE = {
entity_pb.Property.GD_WHEN: backup_pb2.EntitySchema.DATE_TIME,
entity_pb.Property.GD_RATING: backup_pb2.EntitySchema.RATING,
entity_pb.Property.ATOM_LINK: backup_pb2.EntitySchema.LINK,
entity_pb.Property.ATOM_CATEGORY: backup_pb2.EntitySchema.CATEGORY,
entity_pb.Property.GD_PHONENUMBER: backup_pb2.EntitySchema.PHONE_NUMBER,
entity_pb.Property.GD_POSTALADDRESS: backup_pb2.EntitySchema.POSTAL_ADDRESS,
entity_pb.Property.GD_EMAIL: backup_pb2.EntitySchema.EMAIL,
entity_pb.Property.GD_IM: backup_pb2.EntitySchema.IM_HANDLE,
entity_pb.Property.BLOBKEY: backup_pb2.EntitySchema.BLOB_KEY,
entity_pb.Property.TEXT: backup_pb2.EntitySchema.TEXT,
entity_pb.Property.BLOB: backup_pb2.EntitySchema.BLOB,
entity_pb.Property.BYTESTRING: backup_pb2.EntitySchema.SHORT_BLOB
}
class ConfirmBackupHandler(webapp.RequestHandler):
"""Handler to deal with requests from the admin console to backup data."""
SUFFIX = 'confirm_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
namespace = handler.request.get('namespace', None)
has_namespace = namespace is not None
kinds = handler.request.get_all('kind')
sizes_known, size_total, remainder = utils.ParseKindsAndSizes(kinds)
notreadonly_warning = capabilities.CapabilitySet(
'datastore_v3', capabilities=['write']).is_enabled()
blob_warning = bool(blobstore.BlobInfo.all().count(1))
template_params = {
'form_target': DoBackupHandler.SUFFIX,
'kind_list': kinds,
'remainder': remainder,
'sizes_known': sizes_known,
'size_total': size_total,
'queues': None,
'cancel_url': handler.request.get('cancel_url'),
'has_namespace': has_namespace,
'namespace': namespace,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'notreadonly_warning': notreadonly_warning,
'blob_warning': blob_warning,
'backup_name': 'datastore_backup_%s' % time.strftime('%Y_%m_%d')
}
utils.RenderToResponse(handler, 'confirm_backup.html', template_params)
class ConfirmDeleteBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to delete a backup copy."""
SUFFIX = 'confirm_delete_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
requested_backup_ids = handler.request.get_all('backup_id')
backups = []
gs_warning = False
if requested_backup_ids:
for backup in db.get(requested_backup_ids):
if backup:
backups.append(backup)
gs_warning |= backup.filesystem == files.GS_FILESYSTEM
template_params = {
'form_target': DoBackupDeleteHandler.SUFFIX,
'cancel_url': handler.request.get('cancel_url'),
'backups': backups,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'gs_warning': gs_warning
}
utils.RenderToResponse(handler, 'confirm_delete_backup.html',
template_params)
class ConfirmAbortBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to abort a backup copy."""
SUFFIX = 'confirm_abort_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
requested_backup_ids = handler.request.get_all('backup_id')
backups = []
if requested_backup_ids:
for backup in db.get(requested_backup_ids):
if backup:
backups.append(backup)
template_params = {
'form_target': DoBackupAbortHandler.SUFFIX,
'cancel_url': handler.request.get('cancel_url'),
'backups': backups,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION)
}
utils.RenderToResponse(handler, 'confirm_abort_backup.html',
template_params)
class ConfirmRestoreFromBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to restore from backup."""
SUFFIX = 'confirm_restore_from_backup'
@classmethod
def Render(cls, handler, default_backup_id=None,
default_delete_backup_after_restore=False):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
default_backup_id: default value for handler.request
default_delete_backup_after_restore: default value for handler.request
"""
backup_id = handler.request.get('backup_id', default_backup_id)
backup = db.get(backup_id) if backup_id else None
notreadonly_warning = capabilities.CapabilitySet(
'datastore_v3', capabilities=['write']).is_enabled()
original_app_warning = backup.original_app
if os.getenv('APPLICATION_ID') == original_app_warning:
original_app_warning = None
template_params = {
'form_target': DoBackupRestoreHandler.SUFFIX,
'queues': None,
'cancel_url': handler.request.get('cancel_url'),
'backup': backup,
'delete_backup_after_restore': handler.request.get(
'delete_backup_after_restore', default_delete_backup_after_restore),
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'notreadonly_warning': notreadonly_warning,
'original_app_warning': original_app_warning
}
utils.RenderToResponse(handler, 'confirm_restore_from_backup.html',
template_params)
class ConfirmBackupImportHandler(webapp.RequestHandler):
"""Handler to import backup information."""
SUFFIX = 'backup_information'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
gs_handle = handler.request.get('gs_handle')
error = None if gs_handle else 'Google Cloud Storage path is missing'
other_backup_info_files = []
selected_backup_info_file = None
backup_info_specified = False
if not error:
try:
gs_handle = gs_handle.rstrip()
bucket_name, prefix = parse_gs_handle(gs_handle)
validate_gs_bucket_name(bucket_name)
if not is_accessible_bucket_name(bucket_name):
raise BackupValidationException(
'Bucket "%s" is not accessible' % bucket_name)
if prefix.endswith('.backup_info'):
prefix = prefix[0:prefix.rfind('/')]
backup_info_specified = True
elif prefix and not prefix.endswith('/'):
prefix += '/'
for backup_info_file in list_bucket_files(bucket_name, prefix):
if backup_info_file.endswith('.backup_info'):
backup_info_file = '/gs/%s/%s' % (bucket_name, backup_info_file)
if backup_info_specified and backup_info_file == gs_handle:
selected_backup_info_file = backup_info_file
else:
other_backup_info_files.append(backup_info_file)
except Exception, ex:
error = 'Failed to read bucket: %s' % ex
template_params = {
'error': error,
'form_target': DoBackupImportHandler.SUFFIX,
'cancel_url': handler.request.get('cancel_url'),
'selected_backup_info_file': selected_backup_info_file,
'other_backup_info_files': other_backup_info_files,
'backup_info_specified': backup_info_specified,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION)
}
utils.RenderToResponse(handler, 'confirm_backup_import.html',
template_params)
class BackupInformationHandler(webapp.RequestHandler):
"""Handler to display backup information."""
SUFFIX = 'backup_information'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
backup_ids = handler.request.get_all('backup_id')
template_params = {
'backups': db.get(backup_ids),
'back_target': handler.request.get('cancel_url'),
}
utils.RenderToResponse(handler, 'backup_information.html', template_params)
class BaseDoHandler(webapp.RequestHandler):
"""Base class for all Do*Handlers."""
MAPREDUCE_DETAIL = utils.config.MAPREDUCE_PATH + '/detail?mapreduce_id='
def get(self):
"""Handler for get requests to datastore_admin backup operations.
Status of executed jobs is displayed.
"""
jobs = self.request.get_all('job')
tasks = self.request.get_all('task')
error = self.request.get('error', '')
xsrf_error = self.request.get('xsrf_error', '')
template_params = {
'job_list': jobs,
'task_list': tasks,
'mapreduce_detail': self.MAPREDUCE_DETAIL,
'error': error,
'xsrf_error': xsrf_error,
'datastore_admin_home': utils.config.BASE_PATH,
}
utils.RenderToResponse(self, self._get_html_page, template_params)
@property
def _get_html_page(self):
"""Return the name of the HTML page for HTTP/GET requests."""
raise NotImplementedError
@property
def _get_post_html_page(self):
"""Return the name of the HTML page for HTTP/POST requests."""
raise NotImplementedError
def _ProcessPostRequest(self):
"""Process the HTTP/POST request and return the result as parametrs."""
raise NotImplementedError
def _GetBasicMapperParams(self):
return {'namespace': self.request.get('namespace', None)}
def post(self):
"""Handler for post requests to datastore_admin/backup.do.
Redirects to the get handler after processing the request.
"""
token = self.request.get('xsrf_token')
if not utils.ValidateXsrfToken(token, XSRF_ACTION):
parameters = [('xsrf_error', '1')]
else:
try:
parameters = self._ProcessPostRequest()
except Exception, e:
error = self._HandleException(e)
parameters = [('error', error)]
query = urllib.urlencode(parameters)
self.redirect('%s/%s?%s' % (utils.config.BASE_PATH,
self._get_post_html_page,
query))
def _HandleException(self, e):
"""Make exception handling overrideable by tests.
Args:
e: The exception to handle.
Returns:
The exception error string.
"""
return '%s: %s' % (type(e), e)
class BackupValidationException(Exception):
pass
def _perform_backup(kinds,
filesystem, gs_bucket_name, backup,
queue, mapper_params, max_jobs):
"""Triggers backup mapper jobs.
Args:
kinds: a sequence of kind names
filesystem: files.BLOBSTORE_FILESYSTEM or files.GS_FILESYSTEM
or None to default to blobstore
gs_bucket_name: the GS file system bucket in which to store the backup
when using the GS file system, and otherwise ignored
backup: the backup name
queue: the task queue for the backup task
mapper_params: the mapper parameters
max_jobs: if backup needs more jobs than this, defer them
Returns:
The job or task ids.
Raises:
BackupValidationException: On validation error.
Exception: On other error.
"""
BACKUP_COMPLETE_HANDLER = __name__ + '.BackupCompleteHandler'
BACKUP_HANDLER = __name__ + '.BackupEntity.map'
INPUT_READER = __name__ + '.DatastoreEntityProtoInputReader'
OUTPUT_WRITER = output_writers.__name__ + '.FileRecordsOutputWriter'
if not filesystem:
filesystem = files.BLOBSTORE_FILESYSTEM
if filesystem == files.GS_FILESYSTEM:
if not gs_bucket_name:
raise BackupValidationException('Bucket name missing.')
bucket_name, path = parse_gs_handle(gs_bucket_name)
gs_bucket_name = ('%s/%s' % (bucket_name, path)).rstrip('/')
validate_gs_bucket_name(bucket_name)
if not is_accessible_bucket_name(bucket_name):
raise BackupValidationException(
'Bucket "%s" is not accessible' % bucket_name)
elif filesystem == files.BLOBSTORE_FILESYSTEM:
pass
else:
raise BackupValidationException('Unknown filesystem "%s".' % filesystem)
job_name = 'datastore_backup_%s_%%(kind)s' % re.sub(r'[^\w]', '_', backup)
try:
job_operation = utils.StartOperation('Backup: %s' % backup)
backup_info = BackupInformation(parent=job_operation)
backup_info.filesystem = filesystem
backup_info.name = backup
backup_info.kinds = kinds
backup_info.put(force_writes=True)
mapreduce_params = {
'done_callback_handler': BACKUP_COMPLETE_HANDLER,
'backup_info_pk': str(backup_info.key()),
'force_ops_writes': True,
}
mapper_params = dict(mapper_params)
mapper_params['filesystem'] = filesystem
if filesystem == files.GS_FILESYSTEM:
mapper_params['gs_bucket_name'] = gs_bucket_name
if len(kinds) <= max_jobs:
return [('job', job) for job in _run_map_jobs(
job_operation.key(), backup_info.key(), kinds, job_name,
BACKUP_HANDLER, INPUT_READER, OUTPUT_WRITER,
mapper_params, mapreduce_params, queue)]
else:
retry_options = taskqueue.TaskRetryOptions(task_retry_limit=1)
deferred_task = deferred.defer(_run_map_jobs, job_operation.key(),
backup_info.key(), kinds, job_name,
BACKUP_HANDLER, INPUT_READER,
OUTPUT_WRITER,
mapper_params,
mapreduce_params,
queue, _queue=queue,
_url=utils.ConfigDefaults.DEFERRED_PATH,
_retry_options=retry_options)
return [('task', deferred_task.name)]
except Exception:
logging.exception('Failed to start a datastore backup job[s] for "%s".',
job_name)
if backup_info:
delete_backup_info(backup_info)
if job_operation:
job_operation.status = utils.DatastoreAdminOperation.STATUS_FAILED
job_operation.put(force_writes=True)
raise
class BackupLinkHandler(webapp.RequestHandler):
"""Handler to deal with requests to the backup link to backup data."""
SUFFIX = 'backup.create'
def get(self):
"""Handler for get requests to datastore_admin/backup.create."""
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup.create."""
try:
backup_prefix = self.request.get('name')
if not backup_prefix:
if self.request.headers.get('X-AppEngine-Cron'):
backup_prefix = 'cron-'
else:
backup_prefix = 'link-'
backup_prefix_with_date = backup_prefix + time.strftime('%Y_%m_%d')
backup_name = backup_prefix_with_date
backup_suffix_counter = 1
while BackupInformation.name_exists(backup_name):
backup_suffix_counter += 1
backup_name = backup_prefix_with_date + '-' + str(backup_suffix_counter)
kinds = self.request.get_all('kind')
if not kinds:
self.errorResponse('Backup must include at least one kind.')
return
for kind in kinds:
if not utils.IsKindNameVisible(kind):
self.errorResponse('Invalid kind %s.' % kind)
return
mapper_params = {'namespace': None}
_perform_backup(kinds,
self.request.get('filesystem'),
self.request.get('gs_bucket_name'),
backup_name,
self.request.get('queue'),
mapper_params,
1000000)
except Exception, e:
self.errorResponse(e.message)
def errorResponse(self, message):
logging.error('Could not create backup via link: %s', message)
self.response.set_status(400, message)
class DatastoreEntityProtoInputReader(input_readers.DatastoreEntityInputReader):
"""An input reader which yields datastore entity proto for a kind."""
def _iter_key_range(self, k_range):
raw_entity_kind = self._get_raw_entity_kind(self._entity_kind)
query = k_range.make_ascending_datastore_query(raw_entity_kind,
self._filters)
connection = datastore_rpc.Connection()
query_options = datastore_query.QueryOptions(batch_size=self._batch_size)
for batch in query.GetQuery().run(connection, query_options):
for entity_proto in batch.results:
key = datastore_types.Key._FromPb(entity_proto.key())
yield key, entity_proto
class DoBackupHandler(BaseDoHandler):
"""Handler to deal with requests from the admin console to backup data."""
SUFFIX = 'backup.do'
_get_html_page = 'do_backup.html'
_get_post_html_page = SUFFIX
def _ProcessPostRequest(self):
"""Triggers backup mapper jobs and returns their ids."""
try:
backup = self.request.get('backup_name').strip()
if not backup:
raise BackupValidationException('Unspecified backup name.')
if BackupInformation.name_exists(backup):
raise BackupValidationException('Backup "%s" already exists.' % backup)
mapper_params = self._GetBasicMapperParams()
backup_result = _perform_backup(self.request.get_all('kind'),
self.request.get('filesystem'),
self.request.get('gs_bucket_name'),
backup,
self.request.get('queue'),
mapper_params,
10)
return backup_result
except BackupValidationException, e:
return [('error', e.message)]
def _run_map_jobs(job_operation_key, backup_info_key, kinds, job_name,
backup_handler, input_reader, output_writer, mapper_params,
mapreduce_params, queue):
"""Creates backup/restore MR jobs for the given operation.
Args:
job_operation_key: a key of utils.DatastoreAdminOperation entity.
backup_info_key: a key of BackupInformation entity.
kinds: a list of kinds to run the M/R for.
job_name: the M/R job name prefix.
backup_handler: M/R job completion handler.
input_reader: M/R input reader.
output_writer: M/R output writer.
mapper_params: custom parameters to pass to mapper.
mapreduce_params: dictionary parameters relevant to the whole job.
queue: the name of the queue that will be used by the M/R.
Returns:
Ids of all started mapper jobs as list of strings.
"""
backup_info = BackupInformation.get(backup_info_key)
if not backup_info:
return []
jobs = utils.RunMapForKinds(
job_operation_key,
kinds,
job_name,
backup_handler,
input_reader,
output_writer,
mapper_params,
mapreduce_params,
queue_name=queue)
backup_info.active_jobs = jobs
backup_info.put(force_writes=True)
return jobs
def get_backup_files(backup_info, selected_kinds=None):
"""Returns the backup filenames for selected kinds or all if None/Empty."""
if backup_info.blob_files:
return backup_info.blob_files
else:
kinds_backup_files = backup_info.get_kind_backup_files(selected_kinds)
return list(itertools.chain(*(
kind_backup_files.files for kind_backup_files in kinds_backup_files)))
def delete_backup_files(filesystem, backup_files):
if backup_files:
if filesystem == files.BLOBSTORE_FILESYSTEM:
blob_keys = []
for fname in backup_files:
blob_key = files.blobstore.get_blob_key(fname)
if blob_key:
blob_keys.append(blob_key)
if len(blob_keys) == MAX_BLOBS_PER_DELETE:
blobstore_api.delete(blob_keys)
blob_keys = []
if blob_keys:
blobstore_api.delete(blob_keys)
def delete_backup_info(backup_info, delete_files=True):
"""Deletes a backup including its associated files and other metadata."""
if backup_info.blob_files:
delete_backup_files(backup_info.filesystem, backup_info.blob_files)
backup_info.delete(force_writes=True)
else:
kinds_backup_files = tuple(backup_info.get_kind_backup_files())
if delete_files:
delete_backup_files(backup_info.filesystem, itertools.chain(*(
kind_backup_files.files for kind_backup_files in kinds_backup_files)))
db.delete(kinds_backup_files + (backup_info,), force_writes=True)
class DoBackupDeleteHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to delete backup data."""
SUFFIX = 'backup_delete.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup_delete.do.
Deletes are executed and user is redirected to the base-path handler.
"""
backup_ids = self.request.get_all('backup_id')
token = self.request.get('xsrf_token')
error = None
if backup_ids and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
for backup_info in db.get(backup_ids):
if backup_info:
delete_backup_info(backup_info)
except Exception, e:
logging.exception('Failed to delete datastore backup.')
error = str(e)
if error:
query = urllib.urlencode([('error', error)])
self.redirect('%s?%s' % (utils.config.BASE_PATH, query))
else:
self.redirect(utils.config.BASE_PATH)
class DoBackupAbortHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to abort pending backups."""
SUFFIX = 'backup_abort.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup_abort.do.
Abort is executed and user is redirected to the base-path handler.
"""
backup_ids = self.request.get_all('backup_id')
token = self.request.get('xsrf_token')
error = None
if backup_ids and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
for backup_info in db.get(backup_ids):
if backup_info:
utils.AbortAdminOperation(backup_info.parent_key())
delete_backup_info(backup_info)
except Exception, e:
logging.exception('Failed to abort pending datastore backup.')
error = str(e)
if error:
self.redirect(utils.config.BASE_PATH + '?error=%s' % error)
else:
self.redirect(utils.config.BASE_PATH)
class DoBackupRestoreHandler(BaseDoHandler):
"""Handler to restore backup data.
Deals with requests from the admin console.
"""
SUFFIX = 'backup_restore.do'
BACKUP_RESTORE_HANDLER = __name__ + '.RestoreEntity.map'
RESTORE_COMPLETE_HANDLER = __name__ + '.RestoreCompleteHandler'
INPUT_READER = input_readers.__name__ + '.RecordsReader'
_get_html_page = 'do_restore_from_backup.html'
_get_post_html_page = SUFFIX
def _ProcessPostRequest(self):
"""Triggers backup restore mapper jobs and returns their ids."""
backup_id = self.request.get('backup_id')
if not backup_id:
return [('error', 'Unspecified Backup.')]
backup = db.get(db.Key(backup_id))
if not backup:
return [('error', 'Invalid Backup id.')]
queue = self.request.get('queue')
job_name = 'datastore_backup_restore_%s' % re.sub(r'[^\w]', '_',
backup.name)
job_operation = None
kinds = set(self.request.get_all('kind'))
if not (backup.blob_files or kinds):
return [('error', 'No kinds were selected')]
backup_kinds = set(backup.kinds)
difference = kinds.difference(backup_kinds)
if difference:
return [('error', 'Backup does not have kind[s] %s' %
', '.join(difference))]
kinds = list(kinds) if len(backup_kinds) != len(kinds) else []
try:
operation_name = 'Restoring %s from backup: %s' % (
', '.join(kinds) if kinds else 'all', backup.name)
job_operation = utils.StartOperation(operation_name)
mapper_params = self._GetBasicMapperParams()
mapper_params['files'] = get_backup_files(backup, kinds)
mapper_params['kind_filter'] = kinds
mapper_params['original_app'] = backup.original_app
mapreduce_params = {
'backup_name': backup.name,
'force_ops_writes': True
}
shard_count = min(max(utils.MAPREDUCE_MIN_SHARDS,
len(mapper_params['files'])),
utils.MAPREDUCE_MAX_SHARDS)
job = utils.StartMap(job_operation.key(), job_name,
self.BACKUP_RESTORE_HANDLER, self.INPUT_READER, None,
mapper_params, mapreduce_params, queue_name=queue,
shard_count=shard_count)
return [('job', job)]
except Exception:
logging.exception('Failed to start a restore from backup job "%s".',
job_name)
if job_operation:
job_operation.status = utils.DatastoreAdminOperation.STATUS_FAILED
job_operation.put(force_writes=True)
raise
finally:
if self.request.get('delete_backup_after_restore', '').lower() == 'true':
delete_backup_info(backup, delete_files=False)
class DoBackupImportHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to import backup info."""
SUFFIX = 'import_backup.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/import_backup.do.
Import is executed and user is redirected to the base-path handler.
"""
gs_handle = self.request.get('gs_handle')
token = self.request.get('xsrf_token')
error = None
if gs_handle and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
bucket_name, path = parse_gs_handle(gs_handle)
file_content = get_gs_object(bucket_name, path)
entities = parse_backup_info_file(file_content)
original_backup_info = entities.next()
entity = datastore.Entity(BackupInformation.kind())
entity.update(original_backup_info)
backup_info = BackupInformation.from_entity(entity)
if original_backup_info.key().app() != os.getenv('APPLICATION_ID'):
backup_info.original_app = original_backup_info.key().app()
def tx():
backup_info.put(force_writes=True)
kind_files_models = []
for entity in entities:
kind_files = backup_info.create_kind_backup_files(
entity.key().name(), entity['files'])
kind_files_models.append(kind_files)
db.put(kind_files_models, force_writes=True)
db.run_in_transaction(tx)
backup_id = str(backup_info.key())
except Exception, e:
logging.exception('Failed to Import datastore backup information.')
error = str(e)
if error:
query = urllib.urlencode([('error', error)])
self.redirect('%s?%s' % (utils.config.BASE_PATH, query))
elif self.request.get('Restore'):
ConfirmRestoreFromBackupHandler.Render(
self, default_backup_id=backup_id,
default_delete_backup_after_restore=True)
else:
self.redirect(utils.config.BASE_PATH)
class BackupInformation(db.Model):
"""An entity to keep information on successful backup operations."""
name = db.StringProperty()
kinds = db.StringListProperty()
filesystem = db.StringProperty(default=files.BLOBSTORE_FILESYSTEM)
start_time = db.DateTimeProperty(auto_now_add=True)
active_jobs = db.StringListProperty()
completed_jobs = db.StringListProperty()
complete_time = db.DateTimeProperty(default=None)
blob_files = db.StringListProperty()
original_app = db.StringProperty(default=None)
gs_handle = db.TextProperty(default=None)
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_KIND
@classmethod
def name_exists(cls, backup_name):
query = BackupInformation.all(keys_only=True)
query.filter('name =', backup_name)
return query.get() is not None
def create_kind_backup_files_key(self, kind):
return db.Key.from_path(KindBackupFiles.kind(), kind, parent=self.key())
def create_kind_backup_files(self, kind, kind_files):
return KindBackupFiles(key=self.create_kind_backup_files_key(kind),
files=kind_files)
def get_kind_backup_files(self, kinds=None):
if kinds:
return db.get([self.create_kind_backup_files_key(kind) for kind in kinds])
else:
return KindBackupFiles.all().ancestor(self).run()
class KindBackupFiles(db.Model):
"""An entity to keep files information per kind for a backup.
A key for this model should created using kind as a name and the associated
BackupInformation as a parent.
"""
files = db.StringListProperty(indexed=False)
@property
def backup_kind(self):
return self.key().name()
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_FILES_KIND
@db.transactional
def BackupCompleteHandler(operation, job_id, mapreduce_state):
"""Updates BackupInformation record for a completed mapper job."""
mapreduce_spec = mapreduce_state.mapreduce_spec
kind = mapreduce_spec.mapper.params['entity_kind']
backup_info = BackupInformation.get(mapreduce_spec.params['backup_info_pk'])
if backup_info:
if job_id in backup_info.active_jobs:
backup_info.active_jobs.remove(job_id)
backup_info.completed_jobs = list(
set(backup_info.completed_jobs + [job_id]))
filenames = mapreduce_state.writer_state['filenames']
if backup_info.filesystem == files.BLOBSTORE_FILESYSTEM:
filenames = drop_empty_files(filenames)
if backup_info.blob_files:
backup_info.blob_files = list(set(backup_info.blob_files + filenames))
backup_info.put(force_writes=True)
else:
kind_backup_files = backup_info.get_kind_backup_files([kind])[0]
if kind_backup_files:
kind_backup_files.files = list(set(kind_backup_files.files + filenames))
else:
kind_backup_files = backup_info.create_kind_backup_files(kind,
filenames)
db.put((backup_info, kind_backup_files), force_writes=True)
if operation.status == utils.DatastoreAdminOperation.STATUS_COMPLETED:
deferred.defer(finalize_backup_info, backup_info.key(),
mapreduce_spec.mapper.params,
_url=utils.ConfigDefaults.DEFERRED_PATH,
_transactional=True)
else:
logging.warn('BackupInfo was not found for %s',
mapreduce_spec.params['backup_info_pk'])
def finalize_backup_info(backup_info_pk, mapper_params):
"""Finalize the state of BackupInformation and creates info file for GS."""
def tx():
backup_info = BackupInformation.get(backup_info_pk)
if backup_info:
backup_info.complete_time = datetime.datetime.now()
if backup_info.filesystem == files.GS_FILESYSTEM:
gs_bucket = mapper_params['gs_bucket_name']
BackupInfoWriter(gs_bucket).write(backup_info)
backup_info.put(force_writes=True)
logging.info('Backup %s completed', backup_info.name)
else:
logging.warn('Backup %s could not be found', backup_info_pk)
db.run_in_transaction(tx)
def parse_backup_info_file(content):
"""Returns entities iterator from a backup_info file content."""
reader = records.RecordsReader(cStringIO.StringIO(content))
version = reader.read()
if version != '1':
raise IOError('Unsupported version')
return (datastore.Entity.FromPb(record) for record in reader)
@db.non_transactional
def drop_empty_files(filenames):
"""Deletes empty files and returns filenames minus the deleted ones."""
non_empty_filenames = []
empty_file_keys = []
blobs_info = blobstore.BlobInfo.get(
[files.blobstore.get_blob_key(fn) for fn in filenames])
for filename, blob_info in itertools.izip(filenames, blobs_info):
if blob_info:
if blob_info.size > 0:
non_empty_filenames.append(filename)
else:
empty_file_keys.append(blob_info.key())
blobstore_api.delete(empty_file_keys)
return non_empty_filenames
class BackupInfoWriter(object):
"""A class for writing Datastore backup metadata files."""
def __init__(self, gs_bucket):
"""Construct a BackupInfoWriter.
Args:
gs_bucket: Required string for the target GS bucket.
"""
self.__gs_bucket = gs_bucket
def write(self, backup_info):
"""Write the metadata files for the given backup_info.
Args:
backup_info: Required BackupInformation.
Returns:
A list with Backup info filename followed by Kind info filenames.
"""
fn = self._write_backup_info(backup_info)
return [fn] + self._write_kind_info(backup_info)
def _generate_filename(self, backup_info, suffix):
key_str = str(backup_info.key()).replace('/', '_')
return '/gs/%s/%s%s' % (self.__gs_bucket, key_str, suffix)
def _write_backup_info(self, backup_info):
"""Writes a backup_info_file.
Args:
backup_info: Required BackupInformation.
Returns:
Backup info filename.
"""
filename = self._generate_filename(backup_info, '.backup_info')
backup_info.gs_handle = filename
info_file = files.open(files.gs.create(filename), 'a', exclusive_lock=True)
try:
with records.RecordsWriter(info_file) as writer:
writer.write('1')
writer.write(db.model_to_protobuf(backup_info).SerializeToString())
for kind_files in backup_info.get_kind_backup_files():
writer.write(db.model_to_protobuf(kind_files).SerializeToString())
finally:
info_file.close(finalize=True)
return filename
def _write_kind_info(self, backup_info):
"""Writes type information schema for each kind in backup_info.
Args:
backup_info: Required BackupInformation.
Returns:
A list with all created filenames.
"""
filenames = []
for kind_backup_files in backup_info.get_kind_backup_files():
backup = self._create_kind_backup(backup_info, kind_backup_files)
filename = self._generate_filename(
backup_info, '.%s.backup_info' % kind_backup_files.backup_kind)
self._write_kind_backup_info_file(filename, backup)
filenames.append(filename)
return filenames
def _create_kind_backup(self, backup_info, kind_backup_files):
"""Creates and populate a backup_pb2.Backup."""
backup = backup_pb2.Backup()
backup.backup_info.backup_name = backup_info.name
backup.backup_info.start_timestamp = datastore_types.DatetimeToTimestamp(
backup_info.start_time)
backup.backup_info.end_timestamp = datastore_types.DatetimeToTimestamp(
backup_info.complete_time)
kind = kind_backup_files.backup_kind
kind_info = backup.kind_info.add()
kind_info.kind = kind
kind_info.entity_schema.kind = kind
kind_info.file.extend(kind_backup_files.files)
entity_type_info = EntityTypeInfo(kind=kind)
for sharded_aggregation in SchemaAggregationResult.load(
backup_info.key(), kind):
if sharded_aggregation.entity_type_info:
entity_type_info.merge(sharded_aggregation.entity_type_info)
entity_type_info.populate_entity_schema(kind_info.entity_schema)
return backup
@classmethod
def _write_kind_backup_info_file(cls, filename, backup):
"""Writes a kind backup_info.
Args:
filename: The name of the file to be created as string.
backup: apphosting.ext.datastore_admin.Backup proto.
"""
f = files.open(files.gs.create(filename), 'a', exclusive_lock=True)
try:
f.write(backup.SerializeToString())
finally:
f.close(finalize=True)
class PropertyTypeInfo(model.JsonMixin):
"""Type information for an entity property."""
def __init__(self, name, is_repeated=False, primitive_types=None,
embedded_entities=None):
"""Construct a PropertyTypeInfo instance.
Args:
name: The name of the property as a string.
is_repeated: A boolean that indicates if the property is repeated.
primitive_types: Optional list of PrimitiveType integer values.
embedded_entities: Optional list of EntityTypeInfo.
"""
self.__name = name
self.__is_repeated = is_repeated
self.__primitive_types = set(primitive_types) if primitive_types else set()
self.__embedded_entities = {}
for entity in embedded_entities or ():
if entity.kind in self.__embedded_entities:
self.__embedded_entities[entity.kind].merge(entity)
else:
self.__embedded_entities[entity.kind] = entity
@property
def name(self):
return self.__name
@property
def is_repeated(self):
return self.__is_repeated
@property
def primitive_types(self):
return self.__primitive_types
def embedded_entities_kind_iter(self):
return self.__embedded_entities.iterkeys()
def get_embedded_entity(self, kind):
return self.__embedded_entities.get(kind)
def merge(self, other):
"""Merge a PropertyTypeInfo with this instance.
Args:
other: Required PropertyTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
Raises:
ValueError: if property names do not match.
TypeError: if other is not instance of PropertyTypeInfo.
"""
if not isinstance(other, PropertyTypeInfo):
raise TypeError('Expected PropertyTypeInfo, was %r' % (other,))
if other.__name != self.__name:
raise ValueError('Property names mismatch (%s, %s)' %
(self.__name, other.__name))
changed = False
if other.__is_repeated and not self.__is_repeated:
self.__is_repeated = True
changed = True
if not other.__primitive_types.issubset(self.__primitive_types):
self.__primitive_types = self.__primitive_types.union(
other.__primitive_types)
changed = True
for kind, other_embedded_entity in other.__embedded_entities.iteritems():
embedded_entity = self.__embedded_entities.get(kind)
if embedded_entity:
changed = embedded_entity.merge(other_embedded_entity) or changed
else:
self.__embedded_entities[kind] = other_embedded_entity
changed = True
return changed
def populate_entity_schema_field(self, entity_schema):
"""Add an populate a Field to the given entity_schema.
Args:
entity_schema: apphosting.ext.datastore_admin.EntitySchema proto.
"""
if not (self.__primitive_types or self.__embedded_entities):
return
field = entity_schema.field.add()
field.name = self.__name
field_type = field.type.add()
field_type.is_list = self.__is_repeated
field_type.primitive_type.extend(self.__primitive_types)
for embedded_entity in self.__embedded_entities.itervalues():
embedded_entity_schema = field_type.embedded_schema.add()
embedded_entity.populate_entity_schema(embedded_entity_schema)
def to_json(self):
json = dict()
json['name'] = self.__name
json['is_repeated'] = self.__is_repeated
json['primitive_types'] = list(self.__primitive_types)
json['embedded_entities'] = [e.to_json() for e in
self.__embedded_entities.itervalues()]
return json
@classmethod
def from_json(cls, json):
return cls(json['name'], json['is_repeated'], json.get('primitive_types'),
[EntityTypeInfo.from_json(entity_json) for entity_json
in json.get('embedded_entities')])
class EntityTypeInfo(model.JsonMixin):
"""Type information for an entity."""
def __init__(self, kind=None, properties=None):
"""Construct an EntityTypeInfo instance.
Args:
kind: An optional kind name as string.
properties: An optional list of PropertyTypeInfo.
"""
self.__kind = kind
self.__properties = {}
for property_type_info in properties or ():
if property_type_info.name in self.__properties:
self.__properties[property_type_info.name].merge(property_type_info)
else:
self.__properties[property_type_info.name] = property_type_info
@property
def kind(self):
return self.__kind
def properties_name_iter(self):
return self.__properties.iterkeys()
def get_property(self, name):
return self.__properties.get(name)
def merge(self, other):
"""Merge an EntityTypeInfo with this instance.
Args:
other: Required EntityTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
Raises:
ValueError: if kinds do not match.
TypeError: if other is not instance of EntityTypeInfo.
"""
if not isinstance(other, EntityTypeInfo):
raise TypeError('Expected EntityTypeInfo, was %r' % (other,))
if other.__kind != self.__kind:
raise ValueError('Kinds mismatch (%s, %s)' % (self.__kind, other.__kind))
changed = False
for name, other_property in other.__properties.iteritems():
self_property = self.__properties.get(name)
if self_property:
changed = self_property.merge(other_property) or changed
else:
self.__properties[name] = other_property
changed = True
return changed
def populate_entity_schema(self, entity_schema):
"""Populates the given entity_schema with values from this instance.
Args:
entity_schema: apphosting.ext.datastore_admin.EntitySchema proto.
"""
if self.__kind:
entity_schema.kind = self.__kind
for property_type_info in self.__properties.itervalues():
property_type_info.populate_entity_schema_field(entity_schema)
def to_json(self):
return {
'kind': self.__kind,
'properties': [p.to_json() for p in self.__properties.itervalues()]
}
@classmethod
def from_json(cls, json):
kind = json.get('kind')
properties_json = json.get('properties')
if properties_json:
return cls(kind, [PropertyTypeInfo.from_json(p) for p in properties_json])
else:
return cls(kind)
@classmethod
def create_from_entity_proto(cls, entity_proto):
"""Creates and populates an EntityTypeInfo from an EntityProto."""
properties = [cls.__get_property_type_info(property_proto) for
property_proto in itertools.chain(
entity_proto.property_list(),
entity_proto.raw_property_list())]
kind = utils.get_kind_from_entity_pb(entity_proto)
return cls(kind, properties)
@classmethod
def __get_property_type_info(cls, property_proto):
"""Returns the type mapping for the provided property."""
name = property_proto.name()
is_repeated = bool(property_proto.multiple())
primitive_type = None
entity_type = None
if property_proto.has_meaning():
primitive_type = MEANING_TO_PRIMITIVE_TYPE.get(property_proto.meaning())
if primitive_type is None:
value = property_proto.value()
if value.has_int64value():
primitive_type = backup_pb2.EntitySchema.INTEGER
elif value.has_booleanvalue():
primitive_type = backup_pb2.EntitySchema.BOOLEAN
elif value.has_stringvalue():
if property_proto.meaning() == entity_pb.Property.ENTITY_PROTO:
entity_proto = entity_pb.EntityProto()
try:
entity_proto.ParsePartialFromString(value.stringvalue())
except Exception:
pass
else:
entity_type = EntityTypeInfo.create_from_entity_proto(entity_proto)
else:
primitive_type = backup_pb2.EntitySchema.STRING
elif value.has_doublevalue():
primitive_type = backup_pb2.EntitySchema.FLOAT
elif value.has_pointvalue():
primitive_type = backup_pb2.EntitySchema.GEO_POINT
elif value.has_uservalue():
primitive_type = backup_pb2.EntitySchema.USER
elif value.has_referencevalue():
primitive_type = backup_pb2.EntitySchema.REFERENCE
return PropertyTypeInfo(
name, is_repeated,
(primitive_type,) if primitive_type is not None else None,
(entity_type,) if entity_type else None)
class SchemaAggregationResult(db.Model):
"""Persistent aggregated type information for a kind.
An instance can be retrieved via the load method or created
using the create method. An instance aggregates all type information
for all seen embedded_entities via the merge method and persisted when needed
using the model put method.
"""
entity_type_info = model.JsonProperty(
EntityTypeInfo, default=EntityTypeInfo(), indexed=False)
def merge(self, other):
"""Merge a SchemaAggregationResult or an EntityTypeInfo with this instance.
Args:
other: Required SchemaAggregationResult or EntityTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
"""
if isinstance(other, SchemaAggregationResult):
other = other.entity_type_info
return self.entity_type_info.merge(other)
@classmethod
def _get_parent_key(cls, backup_id, kind_name):
return datastore_types.Key.from_path('Kind', kind_name, parent=backup_id)
@classmethod
def create(cls, backup_id, kind_name, shard_id):
"""Create SchemaAggregationResult instance.
Args:
backup_id: Required BackupInformation Key.
kind_name: Required kind name as string.
shard_id: Required shard id as string.
Returns:
A new SchemaAggregationResult instance.
"""
parent = cls._get_parent_key(backup_id, kind_name)
return SchemaAggregationResult(
key_name=shard_id, parent=parent,
entity_type_info=EntityTypeInfo(kind=kind_name))
@classmethod
def load(cls, backup_id, kind_name, shard_id=None):
"""Retrieve SchemaAggregationResult from the Datastore.
Args:
backup_id: Required BackupInformation Key.
kind_name: Required kind name as string.
shard_id: Optional shard id as string.
Returns:
SchemaAggregationResult iterator or an entity if shard_id not None.
"""
parent = cls._get_parent_key(backup_id, kind_name)
if shard_id:
key = datastore_types.Key.from_path(cls.kind(), shard_id, parent=parent)
return SchemaAggregationResult.get(key)
else:
return db.Query(cls).ancestor(parent).run()
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_KIND_TYPE_INFO
class SchemaAggregationPool(object):
"""An MR pool to aggregation type information per kind."""
def __init__(self, backup_id, kind, shard_id):
"""Construct SchemaAggregationPool instance.
Args:
backup_id: Required BackupInformation Key.
kind: Required kind name as string.
shard_id: Required shard id as string.
"""
self.__backup_id = backup_id
self.__kind = kind
self.__shard_id = shard_id
self.__aggregation = SchemaAggregationResult.load(backup_id, kind, shard_id)
if not self.__aggregation:
self.__aggregation = SchemaAggregationResult.create(backup_id, kind,
shard_id)
self.__needs_save = True
else:
self.__needs_save = False
def merge(self, entity_type_info):
"""Merge EntityTypeInfo into aggregated type information."""
if self.__aggregation.merge(entity_type_info):
self.__needs_save = True
def flush(self):
"""Save aggregated type information to the datastore if changed."""
if self.__needs_save:
def tx():
aggregation = SchemaAggregationResult.load(
self.__backup_id, self.__kind, self.__shard_id)
if aggregation:
if aggregation.merge(self.__aggregation):
aggregation.put(force_writes=True)
self.__aggregation = aggregation
else:
self.__aggregation.put(force_writes=True)
db.run_in_transaction(tx)
self.__needs_save = False
class AggregateSchema(op.Operation):
"""An MR Operation to aggregation type information for a kind.
This operation will register an MR pool, SchemaAggregationPool, if
one is not already registered and will invoke the pool's merge operation
per entity. The pool is responsible for keeping a persistent state of
type aggregation using the sharded db model, SchemaAggregationResult.
"""
def __init__(self, entity_proto):
self.__entity_info = EntityTypeInfo.create_from_entity_proto(entity_proto)
def __call__(self, ctx):
pool = ctx.get_pool('schema_aggregation_pool')
if not pool:
backup_id = datastore_types.Key(
context.get().mapreduce_spec.params['backup_info_pk'])
pool = SchemaAggregationPool(
backup_id, self.__entity_info.kind, ctx.shard_id)
ctx.register_pool('schema_aggregation_pool', pool)
pool.merge(self.__entity_info)
class BackupEntity(object):
"""A class which dumps the entity to the writer."""
def map(self, entity_proto):
"""Backup entity map handler.
Args:
entity_proto: An instance of entity_pb.EntityProto.
Yields:
A serialized entity_pb.EntityProto as a string
"""
yield entity_proto.SerializeToString()
yield AggregateSchema(entity_proto)
class RestoreEntity(object):
"""A class which restore the entity to datastore."""
def __init__(self):
self.initialized = False
self.kind_filter = None
self.app_id = None
def initialize(self):
if self.initialized:
return
mapper_params = context.get().mapreduce_spec.mapper.params
kind_filter = mapper_params.get('kind_filter')
self.kind_filter = set(kind_filter) if kind_filter else None
original_app = mapper_params.get('original_app')
if original_app and os.getenv('APPLICATION_ID') != original_app:
self.app_id = os.getenv('APPLICATION_ID')
self.initialized = True
def map(self, record):
"""Restore entity map handler.
Args:
record: A serialized entity_pb.EntityProto.
Yields:
A operation.db.Put for the mapped entity
"""
self.initialize()
pb = entity_pb.EntityProto(contents=record)
if self.app_id:
utils.FixKeys(pb, self.app_id)
entity = datastore.Entity.FromPb(pb)
if not self.kind_filter or entity.kind() in self.kind_filter:
yield op.db.Put(entity)
if self.app_id:
yield utils.AllocateMaxId(entity.key(), self.app_id)
def validate_gs_bucket_name(bucket_name):
"""Validate the format of the given bucket_name.
Validation rules are based:
https://developers.google.com/storage/docs/bucketnaming#requirements
Args:
bucket_name: The bucket name to validate.
Raises:
BackupValidationException: If the bucket name is invalid.
"""
if len(bucket_name) > MAX_BUCKET_LEN:
raise BackupValidationException(
'Bucket name length should not be longer than %d' % MAX_BUCKET_LEN)
if len(bucket_name) < MIN_BUCKET_LEN:
raise BackupValidationException(
'Bucket name length should be longer than %d' % MIN_BUCKET_LEN)
if bucket_name.lower().startswith('goog'):
raise BackupValidationException(
'Bucket name should not start with a "goog" prefix')
bucket_elements = bucket_name.split('.')
for bucket_element in bucket_elements:
if len(bucket_element) > MAX_BUCKET_SEGMENT_LEN:
raise BackupValidationException(
'Segment length of bucket name should not be longer than %d' %
MAX_BUCKET_SEGMENT_LEN)
if not re.match(BUCKET_PATTERN, bucket_name):
raise BackupValidationException('Invalid bucket name "%s"' % bucket_name)
def is_accessible_bucket_name(bucket_name):
"""Returns True if the application has access to the specified bucket."""
scope = 'https://www.googleapis.com/auth/devstorage.read_write'
url = 'https://%s.commondatastorage.googleapis.com/' % bucket_name
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(url, method=urlfetch.HEAD, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
return result and result.status_code == 200
def parse_gs_handle(gs_handle):
"""Splits [/gs/]?bucket_name[/folder]*[/file]? to (bucket_name, path | '')."""
if gs_handle.startswith('/'):
filesystem = gs_handle[1:].split('/', 1)[0]
if filesystem == 'gs':
gs_handle = gs_handle[4:]
else:
raise BackupValidationException('Unsupported filesystem: %s' % filesystem)
tokens = gs_handle.split('/', 1)
return (tokens[0], '') if len(tokens) == 1 else tuple(tokens)
def list_bucket_files(bucket_name, prefix, max_keys=1000):
"""Returns a listing of of a bucket that matches the given prefix."""
scope = 'https://www.googleapis.com/auth/devstorage.read_only'
url = 'https://%s.commondatastorage.googleapis.com/?' % bucket_name
query = [('max-keys', max_keys)]
if prefix:
query.append(('prefix', prefix))
url += urllib.urlencode(query)
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(url, method=urlfetch.GET, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
if result and result.status_code == 200:
doc = xml.dom.minidom.parseString(result.content)
return [node.childNodes[0].data for node in doc.getElementsByTagName('Key')]
raise BackupValidationException('Request to Google Cloud Storage failed')
def get_gs_object(bucket_name, path):
"""Returns a listing of of a bucket that matches the given prefix."""
scope = 'https://www.googleapis.com/auth/devstorage.read_only'
url = 'https://%s.commondatastorage.googleapis.com/%s' % (bucket_name, path)
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(url, method=urlfetch.GET, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
if result and result.status_code == 200:
return result.content
raise BackupValidationException('Requested path was not found')
def get_queue_names(app_id=None, max_rows=100):
"""Returns a list with all non-special queue names for app_id."""
rpc = apiproxy_stub_map.UserRPC('taskqueue')
request = taskqueue_service_pb.TaskQueueFetchQueuesRequest()
response = taskqueue_service_pb.TaskQueueFetchQueuesResponse()
if app_id:
request.set_app_id(app_id)
request.set_max_rows(max_rows)
queues = ['default']
try:
rpc.make_call('FetchQueues', request, response)
rpc.check_success()
for queue in response.queue_list():
if (queue.mode() == taskqueue_service_pb.TaskQueueMode.PUSH and
not queue.queue_name().startswith('__') and
queue.queue_name() != 'default'):
queues.append(queue.queue_name())
except Exception:
logging.exception('Failed to get queue names.')
return queues
def handlers_list(base_path):
return [
(r'%s/%s' % (base_path, BackupLinkHandler.SUFFIX),
BackupLinkHandler),
(r'%s/%s' % (base_path, ConfirmBackupHandler.SUFFIX),
ConfirmBackupHandler),
(r'%s/%s' % (base_path, DoBackupHandler.SUFFIX), DoBackupHandler),
(r'%s/%s' % (base_path, DoBackupRestoreHandler.SUFFIX),
DoBackupRestoreHandler),
(r'%s/%s' % (base_path, DoBackupDeleteHandler.SUFFIX),
DoBackupDeleteHandler),
(r'%s/%s' % (base_path, DoBackupAbortHandler.SUFFIX),
DoBackupAbortHandler),
(r'%s/%s' % (base_path, DoBackupImportHandler.SUFFIX),
DoBackupImportHandler),
]
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Handler for data backup operation.
Generic datastore admin console transfers control to ConfirmBackupHandler
after selection of entities. The ConfirmBackupHandler confirms with user
his choice, enters a backup name and transfers control to
DoBackupHandler. DoBackupHandler starts backup mappers and displays confirmation
page.
This module also contains actual mapper code for backing data over.
"""
from __future__ import with_statement
import cStringIO
import datetime
import itertools
import logging
import os
import re
import time
import urllib
import xml.dom.minidom
from google.appengine.datastore import entity_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import app_identity
from google.appengine.api import blobstore as blobstore_api
from google.appengine.api import capabilities
from google.appengine.api import datastore
from google.appengine.api import datastore_types
from google.appengine.api import files
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.api.files import records
from google.appengine.api.taskqueue import taskqueue_service_pb
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.ext import webapp
from google.appengine.ext.datastore_admin import backup_pb2
from google.appengine.ext.datastore_admin import utils
from google.appengine.ext.mapreduce import context
from google.appengine.ext.mapreduce import input_readers
from google.appengine.ext.mapreduce import model
from google.appengine.ext.mapreduce import operation as op
from google.appengine.ext.mapreduce import output_writers
XSRF_ACTION = 'backup'
BUCKET_PATTERN = (r'^([a-zA-Z0-9]+(\-[a-zA-Z0-9]+)*)'
r'(\.([a-zA-Z0-9]+(\-[a-zA-Z0-9]+)*))*$')
MAX_BUCKET_LEN = 222
MIN_BUCKET_LEN = 3
MAX_BUCKET_SEGMENT_LEN = 63
NUM_KINDS_DEFERRED_THRESHOLD = 10
MAX_BLOBS_PER_DELETE = 500
MEANING_TO_PRIMITIVE_TYPE = {
entity_pb.Property.GD_WHEN: backup_pb2.EntitySchema.DATE_TIME,
entity_pb.Property.GD_RATING: backup_pb2.EntitySchema.RATING,
entity_pb.Property.ATOM_LINK: backup_pb2.EntitySchema.LINK,
entity_pb.Property.ATOM_CATEGORY: backup_pb2.EntitySchema.CATEGORY,
entity_pb.Property.GD_PHONENUMBER: backup_pb2.EntitySchema.PHONE_NUMBER,
entity_pb.Property.GD_POSTALADDRESS: backup_pb2.EntitySchema.POSTAL_ADDRESS,
entity_pb.Property.GD_EMAIL: backup_pb2.EntitySchema.EMAIL,
entity_pb.Property.GD_IM: backup_pb2.EntitySchema.IM_HANDLE,
entity_pb.Property.BLOBKEY: backup_pb2.EntitySchema.BLOB_KEY,
entity_pb.Property.TEXT: backup_pb2.EntitySchema.TEXT,
entity_pb.Property.BLOB: backup_pb2.EntitySchema.BLOB,
entity_pb.Property.BYTESTRING: backup_pb2.EntitySchema.SHORT_BLOB
}
class ConfirmBackupHandler(webapp.RequestHandler):
"""Handler to deal with requests from the admin console to backup data."""
SUFFIX = 'confirm_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
namespace = handler.request.get('namespace', None)
has_namespace = namespace is not None
kinds = handler.request.get_all('kind')
sizes_known, size_total, remainder = utils.ParseKindsAndSizes(kinds)
notreadonly_warning = capabilities.CapabilitySet(
'datastore_v3', capabilities=['write']).is_enabled()
blob_warning = bool(blobstore.BlobInfo.all().count(1))
template_params = {
'form_target': DoBackupHandler.SUFFIX,
'kind_list': kinds,
'remainder': remainder,
'sizes_known': sizes_known,
'size_total': size_total,
'queues': None,
'cancel_url': handler.request.get('cancel_url'),
'has_namespace': has_namespace,
'namespace': namespace,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'notreadonly_warning': notreadonly_warning,
'blob_warning': blob_warning,
'backup_name': 'datastore_backup_%s' % time.strftime('%Y_%m_%d')
}
utils.RenderToResponse(handler, 'confirm_backup.html', template_params)
class ConfirmDeleteBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to delete a backup copy."""
SUFFIX = 'confirm_delete_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
requested_backup_ids = handler.request.get_all('backup_id')
backups = []
gs_warning = False
if requested_backup_ids:
for backup in db.get(requested_backup_ids):
if backup:
backups.append(backup)
gs_warning |= backup.filesystem == files.GS_FILESYSTEM
template_params = {
'form_target': DoBackupDeleteHandler.SUFFIX,
'cancel_url': handler.request.get('cancel_url'),
'backups': backups,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'gs_warning': gs_warning
}
utils.RenderToResponse(handler, 'confirm_delete_backup.html',
template_params)
class ConfirmAbortBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to abort a backup copy."""
SUFFIX = 'confirm_abort_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
requested_backup_ids = handler.request.get_all('backup_id')
backups = []
if requested_backup_ids:
for backup in db.get(requested_backup_ids):
if backup:
backups.append(backup)
template_params = {
'form_target': DoBackupAbortHandler.SUFFIX,
'cancel_url': handler.request.get('cancel_url'),
'backups': backups,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION)
}
utils.RenderToResponse(handler, 'confirm_abort_backup.html',
template_params)
class ConfirmRestoreFromBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to restore from backup."""
SUFFIX = 'confirm_restore_from_backup'
@classmethod
def Render(cls, handler, default_backup_id=None,
default_delete_backup_after_restore=False):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
default_backup_id: default value for handler.request
default_delete_backup_after_restore: default value for handler.request
"""
backup_id = handler.request.get('backup_id', default_backup_id)
backup = db.get(backup_id) if backup_id else None
notreadonly_warning = capabilities.CapabilitySet(
'datastore_v3', capabilities=['write']).is_enabled()
original_app_warning = backup.original_app
if os.getenv('APPLICATION_ID') == original_app_warning:
original_app_warning = None
template_params = {
'form_target': DoBackupRestoreHandler.SUFFIX,
'queues': None,
'cancel_url': handler.request.get('cancel_url'),
'backup': backup,
'delete_backup_after_restore': handler.request.get(
'delete_backup_after_restore', default_delete_backup_after_restore),
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'notreadonly_warning': notreadonly_warning,
'original_app_warning': original_app_warning
}
utils.RenderToResponse(handler, 'confirm_restore_from_backup.html',
template_params)
class ConfirmBackupImportHandler(webapp.RequestHandler):
"""Handler to import backup information."""
SUFFIX = 'backup_information'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
gs_handle = handler.request.get('gs_handle')
error = None if gs_handle else 'Google Cloud Storage path is missing'
other_backup_info_files = []
selected_backup_info_file = None
backup_info_specified = False
if not error:
try:
gs_handle = gs_handle.rstrip()
bucket_name, prefix = parse_gs_handle(gs_handle)
validate_gs_bucket_name(bucket_name)
if not is_accessible_bucket_name(bucket_name):
raise BackupValidationException(
'Bucket "%s" is not accessible' % bucket_name)
if prefix.endswith('.backup_info'):
prefix = prefix[0:prefix.rfind('/')]
backup_info_specified = True
elif prefix and not prefix.endswith('/'):
prefix += '/'
for backup_info_file in list_bucket_files(bucket_name, prefix):
if backup_info_file.endswith('.backup_info'):
backup_info_file = '/gs/%s/%s' % (bucket_name, backup_info_file)
if backup_info_specified and backup_info_file == gs_handle:
selected_backup_info_file = backup_info_file
else:
other_backup_info_files.append(backup_info_file)
except Exception, ex:
error = 'Failed to read bucket: %s' % ex
template_params = {
'error': error,
'form_target': DoBackupImportHandler.SUFFIX,
'cancel_url': handler.request.get('cancel_url'),
'selected_backup_info_file': selected_backup_info_file,
'other_backup_info_files': other_backup_info_files,
'backup_info_specified': backup_info_specified,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION)
}
utils.RenderToResponse(handler, 'confirm_backup_import.html',
template_params)
class BackupInformationHandler(webapp.RequestHandler):
"""Handler to display backup information."""
SUFFIX = 'backup_information'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
backup_ids = handler.request.get_all('backup_id')
template_params = {
'backups': db.get(backup_ids),
'back_target': handler.request.get('cancel_url'),
}
utils.RenderToResponse(handler, 'backup_information.html', template_params)
class BaseDoHandler(webapp.RequestHandler):
"""Base class for all Do*Handlers."""
MAPREDUCE_DETAIL = utils.config.MAPREDUCE_PATH + '/detail?mapreduce_id='
def get(self):
"""Handler for get requests to datastore_admin backup operations.
Status of executed jobs is displayed.
"""
jobs = self.request.get_all('job')
tasks = self.request.get_all('task')
error = self.request.get('error', '')
xsrf_error = self.request.get('xsrf_error', '')
template_params = {
'job_list': jobs,
'task_list': tasks,
'mapreduce_detail': self.MAPREDUCE_DETAIL,
'error': error,
'xsrf_error': xsrf_error,
'datastore_admin_home': utils.config.BASE_PATH,
}
utils.RenderToResponse(self, self._get_html_page, template_params)
@property
def _get_html_page(self):
"""Return the name of the HTML page for HTTP/GET requests."""
raise NotImplementedError
@property
def _get_post_html_page(self):
"""Return the name of the HTML page for HTTP/POST requests."""
raise NotImplementedError
def _ProcessPostRequest(self):
"""Process the HTTP/POST request and return the result as parametrs."""
raise NotImplementedError
def _GetBasicMapperParams(self):
return {'namespace': self.request.get('namespace', None)}
def post(self):
"""Handler for post requests to datastore_admin/backup.do.
Redirects to the get handler after processing the request.
"""
token = self.request.get('xsrf_token')
if not utils.ValidateXsrfToken(token, XSRF_ACTION):
parameters = [('xsrf_error', '1')]
else:
try:
parameters = self._ProcessPostRequest()
except Exception, e:
error = self._HandleException(e)
parameters = [('error', error)]
query = urllib.urlencode(parameters)
self.redirect('%s/%s?%s' % (utils.config.BASE_PATH,
self._get_post_html_page,
query))
def _HandleException(self, e):
"""Make exception handling overrideable by tests.
Args:
e: The exception to handle.
Returns:
The exception error string.
"""
return '%s: %s' % (type(e), e)
class BackupValidationException(Exception):
pass
def _perform_backup(kinds,
filesystem, gs_bucket_name, backup,
queue, mapper_params, max_jobs):
"""Triggers backup mapper jobs.
Args:
kinds: a sequence of kind names
filesystem: files.BLOBSTORE_FILESYSTEM or files.GS_FILESYSTEM
or None to default to blobstore
gs_bucket_name: the GS file system bucket in which to store the backup
when using the GS file system, and otherwise ignored
backup: the backup name
queue: the task queue for the backup task
mapper_params: the mapper parameters
max_jobs: if backup needs more jobs than this, defer them
Returns:
The job or task ids.
Raises:
BackupValidationException: On validation error.
Exception: On other error.
"""
BACKUP_COMPLETE_HANDLER = __name__ + '.BackupCompleteHandler'
BACKUP_HANDLER = __name__ + '.BackupEntity.map'
INPUT_READER = __name__ + '.DatastoreEntityProtoInputReader'
OUTPUT_WRITER = output_writers.__name__ + '.FileRecordsOutputWriter'
if not filesystem:
filesystem = files.BLOBSTORE_FILESYSTEM
if filesystem == files.GS_FILESYSTEM:
if not gs_bucket_name:
raise BackupValidationException('Bucket name missing.')
bucket_name, path = parse_gs_handle(gs_bucket_name)
gs_bucket_name = ('%s/%s' % (bucket_name, path)).rstrip('/')
validate_gs_bucket_name(bucket_name)
if not is_accessible_bucket_name(bucket_name):
raise BackupValidationException(
'Bucket "%s" is not accessible' % bucket_name)
elif filesystem == files.BLOBSTORE_FILESYSTEM:
pass
else:
raise BackupValidationException('Unknown filesystem "%s".' % filesystem)
job_name = 'datastore_backup_%s_%%(kind)s' % re.sub(r'[^\w]', '_', backup)
try:
job_operation = utils.StartOperation('Backup: %s' % backup)
backup_info = BackupInformation(parent=job_operation)
backup_info.filesystem = filesystem
backup_info.name = backup
backup_info.kinds = kinds
backup_info.put(force_writes=True)
mapreduce_params = {
'done_callback_handler': BACKUP_COMPLETE_HANDLER,
'backup_info_pk': str(backup_info.key()),
'force_ops_writes': True,
}
mapper_params = dict(mapper_params)
mapper_params['filesystem'] = filesystem
if filesystem == files.GS_FILESYSTEM:
mapper_params['gs_bucket_name'] = gs_bucket_name
if len(kinds) <= max_jobs:
return [('job', job) for job in _run_map_jobs(
job_operation.key(), backup_info.key(), kinds, job_name,
BACKUP_HANDLER, INPUT_READER, OUTPUT_WRITER,
mapper_params, mapreduce_params, queue)]
else:
retry_options = taskqueue.TaskRetryOptions(task_retry_limit=1)
deferred_task = deferred.defer(_run_map_jobs, job_operation.key(),
backup_info.key(), kinds, job_name,
BACKUP_HANDLER, INPUT_READER,
OUTPUT_WRITER,
mapper_params,
mapreduce_params,
queue, _queue=queue,
_url=utils.ConfigDefaults.DEFERRED_PATH,
_retry_options=retry_options)
return [('task', deferred_task.name)]
except Exception:
logging.exception('Failed to start a datastore backup job[s] for "%s".',
job_name)
if backup_info:
delete_backup_info(backup_info)
if job_operation:
job_operation.status = utils.DatastoreAdminOperation.STATUS_FAILED
job_operation.put(force_writes=True)
raise
class BackupLinkHandler(webapp.RequestHandler):
"""Handler to deal with requests to the backup link to backup data."""
SUFFIX = 'backup.create'
def get(self):
"""Handler for get requests to datastore_admin/backup.create."""
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup.create."""
try:
backup_prefix = self.request.get('name')
if not backup_prefix:
if self.request.headers.get('X-AppEngine-Cron'):
backup_prefix = 'cron-'
else:
backup_prefix = 'link-'
backup_prefix_with_date = backup_prefix + time.strftime('%Y_%m_%d')
backup_name = backup_prefix_with_date
backup_suffix_counter = 1
while BackupInformation.name_exists(backup_name):
backup_suffix_counter += 1
backup_name = backup_prefix_with_date + '-' + str(backup_suffix_counter)
kinds = self.request.get_all('kind')
if not kinds:
self.errorResponse('Backup must include at least one kind.')
return
for kind in kinds:
if not utils.IsKindNameVisible(kind):
self.errorResponse('Invalid kind %s.' % kind)
return
mapper_params = {'namespace': None}
_perform_backup(kinds,
self.request.get('filesystem'),
self.request.get('gs_bucket_name'),
backup_name,
self.request.get('queue'),
mapper_params,
1000000)
except Exception, e:
self.errorResponse(e.message)
def errorResponse(self, message):
logging.error('Could not create backup via link: %s', message)
self.response.set_status(400, message)
class DatastoreEntityProtoInputReader(input_readers.DatastoreEntityInputReader):
"""An input reader which yields datastore entity proto for a kind."""
def _iter_key_range(self, k_range):
raw_entity_kind = self._get_raw_entity_kind(self._entity_kind)
query = k_range.make_ascending_datastore_query(raw_entity_kind,
self._filters)
connection = datastore_rpc.Connection()
query_options = datastore_query.QueryOptions(batch_size=self._batch_size)
for batch in query.GetQuery().run(connection, query_options):
for entity_proto in batch.results:
key = datastore_types.Key._FromPb(entity_proto.key())
yield key, entity_proto
class DoBackupHandler(BaseDoHandler):
"""Handler to deal with requests from the admin console to backup data."""
SUFFIX = 'backup.do'
_get_html_page = 'do_backup.html'
_get_post_html_page = SUFFIX
def _ProcessPostRequest(self):
"""Triggers backup mapper jobs and returns their ids."""
try:
backup = self.request.get('backup_name').strip()
if not backup:
raise BackupValidationException('Unspecified backup name.')
if BackupInformation.name_exists(backup):
raise BackupValidationException('Backup "%s" already exists.' % backup)
mapper_params = self._GetBasicMapperParams()
backup_result = _perform_backup(self.request.get_all('kind'),
self.request.get('filesystem'),
self.request.get('gs_bucket_name'),
backup,
self.request.get('queue'),
mapper_params,
10)
return backup_result
except BackupValidationException, e:
return [('error', e.message)]
def _run_map_jobs(job_operation_key, backup_info_key, kinds, job_name,
backup_handler, input_reader, output_writer, mapper_params,
mapreduce_params, queue):
"""Creates backup/restore MR jobs for the given operation.
Args:
job_operation_key: a key of utils.DatastoreAdminOperation entity.
backup_info_key: a key of BackupInformation entity.
kinds: a list of kinds to run the M/R for.
job_name: the M/R job name prefix.
backup_handler: M/R job completion handler.
input_reader: M/R input reader.
output_writer: M/R output writer.
mapper_params: custom parameters to pass to mapper.
mapreduce_params: dictionary parameters relevant to the whole job.
queue: the name of the queue that will be used by the M/R.
Returns:
Ids of all started mapper jobs as list of strings.
"""
backup_info = BackupInformation.get(backup_info_key)
if not backup_info:
return []
jobs = utils.RunMapForKinds(
job_operation_key,
kinds,
job_name,
backup_handler,
input_reader,
output_writer,
mapper_params,
mapreduce_params,
queue_name=queue)
backup_info.active_jobs = jobs
backup_info.put(force_writes=True)
return jobs
def get_backup_files(backup_info, selected_kinds=None):
"""Returns the backup filenames for selected kinds or all if None/Empty."""
if backup_info.blob_files:
return backup_info.blob_files
else:
kinds_backup_files = backup_info.get_kind_backup_files(selected_kinds)
return list(itertools.chain(*(
kind_backup_files.files for kind_backup_files in kinds_backup_files)))
def delete_backup_files(filesystem, backup_files):
if backup_files:
if filesystem == files.BLOBSTORE_FILESYSTEM:
blob_keys = []
for fname in backup_files:
blob_key = files.blobstore.get_blob_key(fname)
if blob_key:
blob_keys.append(blob_key)
if len(blob_keys) == MAX_BLOBS_PER_DELETE:
blobstore_api.delete(blob_keys)
blob_keys = []
if blob_keys:
blobstore_api.delete(blob_keys)
def delete_backup_info(backup_info, delete_files=True):
"""Deletes a backup including its associated files and other metadata."""
if backup_info.blob_files:
delete_backup_files(backup_info.filesystem, backup_info.blob_files)
backup_info.delete(force_writes=True)
else:
kinds_backup_files = tuple(backup_info.get_kind_backup_files())
if delete_files:
delete_backup_files(backup_info.filesystem, itertools.chain(*(
kind_backup_files.files for kind_backup_files in kinds_backup_files)))
db.delete(kinds_backup_files + (backup_info,), force_writes=True)
class DoBackupDeleteHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to delete backup data."""
SUFFIX = 'backup_delete.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup_delete.do.
Deletes are executed and user is redirected to the base-path handler.
"""
backup_ids = self.request.get_all('backup_id')
token = self.request.get('xsrf_token')
error = None
if backup_ids and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
for backup_info in db.get(backup_ids):
if backup_info:
delete_backup_info(backup_info)
except Exception, e:
logging.exception('Failed to delete datastore backup.')
error = str(e)
if error:
query = urllib.urlencode([('error', error)])
self.redirect('%s?%s' % (utils.config.BASE_PATH, query))
else:
self.redirect(utils.config.BASE_PATH)
class DoBackupAbortHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to abort pending backups."""
SUFFIX = 'backup_abort.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup_abort.do.
Abort is executed and user is redirected to the base-path handler.
"""
backup_ids = self.request.get_all('backup_id')
token = self.request.get('xsrf_token')
error = None
if backup_ids and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
for backup_info in db.get(backup_ids):
if backup_info:
utils.AbortAdminOperation(backup_info.parent_key())
delete_backup_info(backup_info)
except Exception, e:
logging.exception('Failed to abort pending datastore backup.')
error = str(e)
if error:
self.redirect(utils.config.BASE_PATH + '?error=%s' % error)
else:
self.redirect(utils.config.BASE_PATH)
class DoBackupRestoreHandler(BaseDoHandler):
"""Handler to restore backup data.
Deals with requests from the admin console.
"""
SUFFIX = 'backup_restore.do'
BACKUP_RESTORE_HANDLER = __name__ + '.RestoreEntity.map'
RESTORE_COMPLETE_HANDLER = __name__ + '.RestoreCompleteHandler'
INPUT_READER = input_readers.__name__ + '.RecordsReader'
_get_html_page = 'do_restore_from_backup.html'
_get_post_html_page = SUFFIX
def _ProcessPostRequest(self):
"""Triggers backup restore mapper jobs and returns their ids."""
backup_id = self.request.get('backup_id')
if not backup_id:
return [('error', 'Unspecified Backup.')]
backup = db.get(db.Key(backup_id))
if not backup:
return [('error', 'Invalid Backup id.')]
queue = self.request.get('queue')
job_name = 'datastore_backup_restore_%s' % re.sub(r'[^\w]', '_',
backup.name)
job_operation = None
kinds = set(self.request.get_all('kind'))
if not (backup.blob_files or kinds):
return [('error', 'No kinds were selected')]
backup_kinds = set(backup.kinds)
difference = kinds.difference(backup_kinds)
if difference:
return [('error', 'Backup does not have kind[s] %s' %
', '.join(difference))]
kinds = list(kinds) if len(backup_kinds) != len(kinds) else []
try:
operation_name = 'Restoring %s from backup: %s' % (
', '.join(kinds) if kinds else 'all', backup.name)
job_operation = utils.StartOperation(operation_name)
mapper_params = self._GetBasicMapperParams()
mapper_params['files'] = get_backup_files(backup, kinds)
mapper_params['kind_filter'] = kinds
mapper_params['original_app'] = backup.original_app
mapreduce_params = {
'backup_name': backup.name,
'force_ops_writes': True
}
shard_count = min(max(utils.MAPREDUCE_MIN_SHARDS,
len(mapper_params['files'])),
utils.MAPREDUCE_MAX_SHARDS)
job = utils.StartMap(job_operation.key(), job_name,
self.BACKUP_RESTORE_HANDLER, self.INPUT_READER, None,
mapper_params, mapreduce_params, queue_name=queue,
shard_count=shard_count)
return [('job', job)]
except Exception:
logging.exception('Failed to start a restore from backup job "%s".',
job_name)
if job_operation:
job_operation.status = utils.DatastoreAdminOperation.STATUS_FAILED
job_operation.put(force_writes=True)
raise
finally:
if self.request.get('delete_backup_after_restore', '').lower() == 'true':
delete_backup_info(backup, delete_files=False)
class DoBackupImportHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to import backup info."""
SUFFIX = 'import_backup.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/import_backup.do.
Import is executed and user is redirected to the base-path handler.
"""
gs_handle = self.request.get('gs_handle')
token = self.request.get('xsrf_token')
error = None
if gs_handle and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
bucket_name, path = parse_gs_handle(gs_handle)
file_content = get_gs_object(bucket_name, path)
entities = parse_backup_info_file(file_content)
original_backup_info = entities.next()
entity = datastore.Entity(BackupInformation.kind())
entity.update(original_backup_info)
backup_info = BackupInformation.from_entity(entity)
if original_backup_info.key().app() != os.getenv('APPLICATION_ID'):
backup_info.original_app = original_backup_info.key().app()
def tx():
backup_info.put(force_writes=True)
kind_files_models = []
for entity in entities:
kind_files = backup_info.create_kind_backup_files(
entity.key().name(), entity['files'])
kind_files_models.append(kind_files)
db.put(kind_files_models, force_writes=True)
db.run_in_transaction(tx)
backup_id = str(backup_info.key())
except Exception, e:
logging.exception('Failed to Import datastore backup information.')
error = str(e)
if error:
query = urllib.urlencode([('error', error)])
self.redirect('%s?%s' % (utils.config.BASE_PATH, query))
elif self.request.get('Restore'):
ConfirmRestoreFromBackupHandler.Render(
self, default_backup_id=backup_id,
default_delete_backup_after_restore=True)
else:
self.redirect(utils.config.BASE_PATH)
class BackupInformation(db.Model):
"""An entity to keep information on successful backup operations."""
name = db.StringProperty()
kinds = db.StringListProperty()
filesystem = db.StringProperty(default=files.BLOBSTORE_FILESYSTEM)
start_time = db.DateTimeProperty(auto_now_add=True)
active_jobs = db.StringListProperty()
completed_jobs = db.StringListProperty()
complete_time = db.DateTimeProperty(default=None)
blob_files = db.StringListProperty()
original_app = db.StringProperty(default=None)
gs_handle = db.TextProperty(default=None)
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_KIND
@classmethod
def name_exists(cls, backup_name):
query = BackupInformation.all(keys_only=True)
query.filter('name =', backup_name)
return query.get() is not None
def create_kind_backup_files_key(self, kind):
return db.Key.from_path(KindBackupFiles.kind(), kind, parent=self.key())
def create_kind_backup_files(self, kind, kind_files):
return KindBackupFiles(key=self.create_kind_backup_files_key(kind),
files=kind_files)
def get_kind_backup_files(self, kinds=None):
if kinds:
return db.get([self.create_kind_backup_files_key(kind) for kind in kinds])
else:
return KindBackupFiles.all().ancestor(self).run()
class KindBackupFiles(db.Model):
"""An entity to keep files information per kind for a backup.
A key for this model should created using kind as a name and the associated
BackupInformation as a parent.
"""
files = db.StringListProperty(indexed=False)
@property
def backup_kind(self):
return self.key().name()
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_FILES_KIND
@db.transactional
def BackupCompleteHandler(operation, job_id, mapreduce_state):
"""Updates BackupInformation record for a completed mapper job."""
mapreduce_spec = mapreduce_state.mapreduce_spec
kind = mapreduce_spec.mapper.params['entity_kind']
backup_info = BackupInformation.get(mapreduce_spec.params['backup_info_pk'])
if backup_info:
if job_id in backup_info.active_jobs:
backup_info.active_jobs.remove(job_id)
backup_info.completed_jobs = list(
set(backup_info.completed_jobs + [job_id]))
filenames = mapreduce_state.writer_state['filenames']
if backup_info.filesystem == files.BLOBSTORE_FILESYSTEM:
filenames = drop_empty_files(filenames)
if backup_info.blob_files:
backup_info.blob_files = list(set(backup_info.blob_files + filenames))
backup_info.put(force_writes=True)
else:
kind_backup_files = backup_info.get_kind_backup_files([kind])[0]
if kind_backup_files:
kind_backup_files.files = list(set(kind_backup_files.files + filenames))
else:
kind_backup_files = backup_info.create_kind_backup_files(kind,
filenames)
db.put((backup_info, kind_backup_files), force_writes=True)
if operation.status == utils.DatastoreAdminOperation.STATUS_COMPLETED:
deferred.defer(finalize_backup_info, backup_info.key(),
mapreduce_spec.mapper.params,
_url=utils.ConfigDefaults.DEFERRED_PATH,
_transactional=True)
else:
logging.warn('BackupInfo was not found for %s',
mapreduce_spec.params['backup_info_pk'])
def finalize_backup_info(backup_info_pk, mapper_params):
"""Finalize the state of BackupInformation and creates info file for GS."""
def tx():
backup_info = BackupInformation.get(backup_info_pk)
if backup_info:
backup_info.complete_time = datetime.datetime.now()
if backup_info.filesystem == files.GS_FILESYSTEM:
gs_bucket = mapper_params['gs_bucket_name']
BackupInfoWriter(gs_bucket).write(backup_info)
backup_info.put(force_writes=True)
logging.info('Backup %s completed', backup_info.name)
else:
logging.warn('Backup %s could not be found', backup_info_pk)
db.run_in_transaction(tx)
def parse_backup_info_file(content):
"""Returns entities iterator from a backup_info file content."""
reader = records.RecordsReader(cStringIO.StringIO(content))
version = reader.read()
if version != '1':
raise IOError('Unsupported version')
return (datastore.Entity.FromPb(record) for record in reader)
@db.non_transactional
def drop_empty_files(filenames):
"""Deletes empty files and returns filenames minus the deleted ones."""
non_empty_filenames = []
empty_file_keys = []
blobs_info = blobstore.BlobInfo.get(
[files.blobstore.get_blob_key(fn) for fn in filenames])
for filename, blob_info in itertools.izip(filenames, blobs_info):
if blob_info:
if blob_info.size > 0:
non_empty_filenames.append(filename)
else:
empty_file_keys.append(blob_info.key())
blobstore_api.delete(empty_file_keys)
return non_empty_filenames
class BackupInfoWriter(object):
"""A class for writing Datastore backup metadata files."""
def __init__(self, gs_bucket):
"""Construct a BackupInfoWriter.
Args:
gs_bucket: Required string for the target GS bucket.
"""
self.__gs_bucket = gs_bucket
def write(self, backup_info):
"""Write the metadata files for the given backup_info.
Args:
backup_info: Required BackupInformation.
Returns:
A list with Backup info filename followed by Kind info filenames.
"""
fn = self._write_backup_info(backup_info)
return [fn] + self._write_kind_info(backup_info)
def _generate_filename(self, backup_info, suffix):
key_str = str(backup_info.key()).replace('/', '_')
return '/gs/%s/%s%s' % (self.__gs_bucket, key_str, suffix)
def _write_backup_info(self, backup_info):
"""Writes a backup_info_file.
Args:
backup_info: Required BackupInformation.
Returns:
Backup info filename.
"""
filename = self._generate_filename(backup_info, '.backup_info')
backup_info.gs_handle = filename
info_file = files.open(files.gs.create(filename), 'a', exclusive_lock=True)
try:
with records.RecordsWriter(info_file) as writer:
writer.write('1')
writer.write(db.model_to_protobuf(backup_info).SerializeToString())
for kind_files in backup_info.get_kind_backup_files():
writer.write(db.model_to_protobuf(kind_files).SerializeToString())
finally:
info_file.close(finalize=True)
return filename
def _write_kind_info(self, backup_info):
"""Writes type information schema for each kind in backup_info.
Args:
backup_info: Required BackupInformation.
Returns:
A list with all created filenames.
"""
filenames = []
for kind_backup_files in backup_info.get_kind_backup_files():
backup = self._create_kind_backup(backup_info, kind_backup_files)
filename = self._generate_filename(
backup_info, '.%s.backup_info' % kind_backup_files.backup_kind)
self._write_kind_backup_info_file(filename, backup)
filenames.append(filename)
return filenames
def _create_kind_backup(self, backup_info, kind_backup_files):
"""Creates and populate a backup_pb2.Backup."""
backup = backup_pb2.Backup()
backup.backup_info.backup_name = backup_info.name
backup.backup_info.start_timestamp = datastore_types.DatetimeToTimestamp(
backup_info.start_time)
backup.backup_info.end_timestamp = datastore_types.DatetimeToTimestamp(
backup_info.complete_time)
kind = kind_backup_files.backup_kind
kind_info = backup.kind_info.add()
kind_info.kind = kind
kind_info.entity_schema.kind = kind
kind_info.file.extend(kind_backup_files.files)
entity_type_info = EntityTypeInfo(kind=kind)
for sharded_aggregation in SchemaAggregationResult.load(
backup_info.key(), kind):
if sharded_aggregation.entity_type_info:
entity_type_info.merge(sharded_aggregation.entity_type_info)
entity_type_info.populate_entity_schema(kind_info.entity_schema)
return backup
@classmethod
def _write_kind_backup_info_file(cls, filename, backup):
"""Writes a kind backup_info.
Args:
filename: The name of the file to be created as string.
backup: apphosting.ext.datastore_admin.Backup proto.
"""
f = files.open(files.gs.create(filename), 'a', exclusive_lock=True)
try:
f.write(backup.SerializeToString())
finally:
f.close(finalize=True)
class PropertyTypeInfo(model.JsonMixin):
"""Type information for an entity property."""
def __init__(self, name, is_repeated=False, primitive_types=None,
embedded_entities=None):
"""Construct a PropertyTypeInfo instance.
Args:
name: The name of the property as a string.
is_repeated: A boolean that indicates if the property is repeated.
primitive_types: Optional list of PrimitiveType integer values.
embedded_entities: Optional list of EntityTypeInfo.
"""
self.__name = name
self.__is_repeated = is_repeated
self.__primitive_types = set(primitive_types) if primitive_types else set()
self.__embedded_entities = {}
for entity in embedded_entities or ():
if entity.kind in self.__embedded_entities:
self.__embedded_entities[entity.kind].merge(entity)
else:
self.__embedded_entities[entity.kind] = entity
@property
def name(self):
return self.__name
@property
def is_repeated(self):
return self.__is_repeated
@property
def primitive_types(self):
return self.__primitive_types
def embedded_entities_kind_iter(self):
return self.__embedded_entities.iterkeys()
def get_embedded_entity(self, kind):
return self.__embedded_entities.get(kind)
def merge(self, other):
"""Merge a PropertyTypeInfo with this instance.
Args:
other: Required PropertyTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
Raises:
ValueError: if property names do not match.
TypeError: if other is not instance of PropertyTypeInfo.
"""
if not isinstance(other, PropertyTypeInfo):
raise TypeError('Expected PropertyTypeInfo, was %r' % (other,))
if other.__name != self.__name:
raise ValueError('Property names mismatch (%s, %s)' %
(self.__name, other.__name))
changed = False
if other.__is_repeated and not self.__is_repeated:
self.__is_repeated = True
changed = True
if not other.__primitive_types.issubset(self.__primitive_types):
self.__primitive_types = self.__primitive_types.union(
other.__primitive_types)
changed = True
for kind, other_embedded_entity in other.__embedded_entities.iteritems():
embedded_entity = self.__embedded_entities.get(kind)
if embedded_entity:
changed = embedded_entity.merge(other_embedded_entity) or changed
else:
self.__embedded_entities[kind] = other_embedded_entity
changed = True
return changed
def populate_entity_schema_field(self, entity_schema):
"""Add an populate a Field to the given entity_schema.
Args:
entity_schema: apphosting.ext.datastore_admin.EntitySchema proto.
"""
if not (self.__primitive_types or self.__embedded_entities):
return
field = entity_schema.field.add()
field.name = self.__name
field_type = field.type.add()
field_type.is_list = self.__is_repeated
field_type.primitive_type.extend(self.__primitive_types)
for embedded_entity in self.__embedded_entities.itervalues():
embedded_entity_schema = field_type.embedded_schema.add()
embedded_entity.populate_entity_schema(embedded_entity_schema)
def to_json(self):
json = dict()
json['name'] = self.__name
json['is_repeated'] = self.__is_repeated
json['primitive_types'] = list(self.__primitive_types)
json['embedded_entities'] = [e.to_json() for e in
self.__embedded_entities.itervalues()]
return json
@classmethod
def from_json(cls, json):
return cls(json['name'], json['is_repeated'], json.get('primitive_types'),
[EntityTypeInfo.from_json(entity_json) for entity_json
in json.get('embedded_entities')])
class EntityTypeInfo(model.JsonMixin):
"""Type information for an entity."""
def __init__(self, kind=None, properties=None):
"""Construct an EntityTypeInfo instance.
Args:
kind: An optional kind name as string.
properties: An optional list of PropertyTypeInfo.
"""
self.__kind = kind
self.__properties = {}
for property_type_info in properties or ():
if property_type_info.name in self.__properties:
self.__properties[property_type_info.name].merge(property_type_info)
else:
self.__properties[property_type_info.name] = property_type_info
@property
def kind(self):
return self.__kind
def properties_name_iter(self):
return self.__properties.iterkeys()
def get_property(self, name):
return self.__properties.get(name)
def merge(self, other):
"""Merge an EntityTypeInfo with this instance.
Args:
other: Required EntityTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
Raises:
ValueError: if kinds do not match.
TypeError: if other is not instance of EntityTypeInfo.
"""
if not isinstance(other, EntityTypeInfo):
raise TypeError('Expected EntityTypeInfo, was %r' % (other,))
if other.__kind != self.__kind:
raise ValueError('Kinds mismatch (%s, %s)' % (self.__kind, other.__kind))
changed = False
for name, other_property in other.__properties.iteritems():
self_property = self.__properties.get(name)
if self_property:
changed = self_property.merge(other_property) or changed
else:
self.__properties[name] = other_property
changed = True
return changed
def populate_entity_schema(self, entity_schema):
"""Populates the given entity_schema with values from this instance.
Args:
entity_schema: apphosting.ext.datastore_admin.EntitySchema proto.
"""
if self.__kind:
entity_schema.kind = self.__kind
for property_type_info in self.__properties.itervalues():
property_type_info.populate_entity_schema_field(entity_schema)
def to_json(self):
return {
'kind': self.__kind,
'properties': [p.to_json() for p in self.__properties.itervalues()]
}
@classmethod
def from_json(cls, json):
kind = json.get('kind')
properties_json = json.get('properties')
if properties_json:
return cls(kind, [PropertyTypeInfo.from_json(p) for p in properties_json])
else:
return cls(kind)
@classmethod
def create_from_entity_proto(cls, entity_proto):
"""Creates and populates an EntityTypeInfo from an EntityProto."""
properties = [cls.__get_property_type_info(property_proto) for
property_proto in itertools.chain(
entity_proto.property_list(),
entity_proto.raw_property_list())]
kind = utils.get_kind_from_entity_pb(entity_proto)
return cls(kind, properties)
@classmethod
def __get_property_type_info(cls, property_proto):
"""Returns the type mapping for the provided property."""
name = property_proto.name()
is_repeated = bool(property_proto.multiple())
primitive_type = None
entity_type = None
if property_proto.has_meaning():
primitive_type = MEANING_TO_PRIMITIVE_TYPE.get(property_proto.meaning())
if primitive_type is None:
value = property_proto.value()
if value.has_int64value():
primitive_type = backup_pb2.EntitySchema.INTEGER
elif value.has_booleanvalue():
primitive_type = backup_pb2.EntitySchema.BOOLEAN
elif value.has_stringvalue():
if property_proto.meaning() == entity_pb.Property.ENTITY_PROTO:
entity_proto = entity_pb.EntityProto()
try:
entity_proto.ParsePartialFromString(value.stringvalue())
except Exception:
pass
else:
entity_type = EntityTypeInfo.create_from_entity_proto(entity_proto)
else:
primitive_type = backup_pb2.EntitySchema.STRING
elif value.has_doublevalue():
primitive_type = backup_pb2.EntitySchema.FLOAT
elif value.has_pointvalue():
primitive_type = backup_pb2.EntitySchema.GEO_POINT
elif value.has_uservalue():
primitive_type = backup_pb2.EntitySchema.USER
elif value.has_referencevalue():
primitive_type = backup_pb2.EntitySchema.REFERENCE
return PropertyTypeInfo(
name, is_repeated,
(primitive_type,) if primitive_type is not None else None,
(entity_type,) if entity_type else None)
class SchemaAggregationResult(db.Model):
"""Persistent aggregated type information for a kind.
An instance can be retrieved via the load method or created
using the create method. An instance aggregates all type information
for all seen embedded_entities via the merge method and persisted when needed
using the model put method.
"""
entity_type_info = model.JsonProperty(
EntityTypeInfo, default=EntityTypeInfo(), indexed=False)
def merge(self, other):
"""Merge a SchemaAggregationResult or an EntityTypeInfo with this instance.
Args:
other: Required SchemaAggregationResult or EntityTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
"""
if isinstance(other, SchemaAggregationResult):
other = other.entity_type_info
return self.entity_type_info.merge(other)
@classmethod
def _get_parent_key(cls, backup_id, kind_name):
return datastore_types.Key.from_path('Kind', kind_name, parent=backup_id)
@classmethod
def create(cls, backup_id, kind_name, shard_id):
"""Create SchemaAggregationResult instance.
Args:
backup_id: Required BackupInformation Key.
kind_name: Required kind name as string.
shard_id: Required shard id as string.
Returns:
A new SchemaAggregationResult instance.
"""
parent = cls._get_parent_key(backup_id, kind_name)
return SchemaAggregationResult(
key_name=shard_id, parent=parent,
entity_type_info=EntityTypeInfo(kind=kind_name))
@classmethod
def load(cls, backup_id, kind_name, shard_id=None):
"""Retrieve SchemaAggregationResult from the Datastore.
Args:
backup_id: Required BackupInformation Key.
kind_name: Required kind name as string.
shard_id: Optional shard id as string.
Returns:
SchemaAggregationResult iterator or an entity if shard_id not None.
"""
parent = cls._get_parent_key(backup_id, kind_name)
if shard_id:
key = datastore_types.Key.from_path(cls.kind(), shard_id, parent=parent)
return SchemaAggregationResult.get(key)
else:
return db.Query(cls).ancestor(parent).run()
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_KIND_TYPE_INFO
class SchemaAggregationPool(object):
"""An MR pool to aggregation type information per kind."""
def __init__(self, backup_id, kind, shard_id):
"""Construct SchemaAggregationPool instance.
Args:
backup_id: Required BackupInformation Key.
kind: Required kind name as string.
shard_id: Required shard id as string.
"""
self.__backup_id = backup_id
self.__kind = kind
self.__shard_id = shard_id
self.__aggregation = SchemaAggregationResult.load(backup_id, kind, shard_id)
if not self.__aggregation:
self.__aggregation = SchemaAggregationResult.create(backup_id, kind,
shard_id)
self.__needs_save = True
else:
self.__needs_save = False
def merge(self, entity_type_info):
"""Merge EntityTypeInfo into aggregated type information."""
if self.__aggregation.merge(entity_type_info):
self.__needs_save = True
def flush(self):
"""Save aggregated type information to the datastore if changed."""
if self.__needs_save:
def tx():
aggregation = SchemaAggregationResult.load(
self.__backup_id, self.__kind, self.__shard_id)
if aggregation:
if aggregation.merge(self.__aggregation):
aggregation.put(force_writes=True)
self.__aggregation = aggregation
else:
self.__aggregation.put(force_writes=True)
db.run_in_transaction(tx)
self.__needs_save = False
class AggregateSchema(op.Operation):
"""An MR Operation to aggregation type information for a kind.
This operation will register an MR pool, SchemaAggregationPool, if
one is not already registered and will invoke the pool's merge operation
per entity. The pool is responsible for keeping a persistent state of
type aggregation using the sharded db model, SchemaAggregationResult.
"""
def __init__(self, entity_proto):
self.__entity_info = EntityTypeInfo.create_from_entity_proto(entity_proto)
def __call__(self, ctx):
pool = ctx.get_pool('schema_aggregation_pool')
if not pool:
backup_id = datastore_types.Key(
context.get().mapreduce_spec.params['backup_info_pk'])
pool = SchemaAggregationPool(
backup_id, self.__entity_info.kind, ctx.shard_id)
ctx.register_pool('schema_aggregation_pool', pool)
pool.merge(self.__entity_info)
class BackupEntity(object):
"""A class which dumps the entity to the writer."""
def map(self, entity_proto):
"""Backup entity map handler.
Args:
entity_proto: An instance of entity_pb.EntityProto.
Yields:
A serialized entity_pb.EntityProto as a string
"""
yield entity_proto.SerializeToString()
yield AggregateSchema(entity_proto)
class RestoreEntity(object):
"""A class which restore the entity to datastore."""
def __init__(self):
self.initialized = False
self.kind_filter = None
self.app_id = None
def initialize(self):
if self.initialized:
return
mapper_params = context.get().mapreduce_spec.mapper.params
kind_filter = mapper_params.get('kind_filter')
self.kind_filter = set(kind_filter) if kind_filter else None
original_app = mapper_params.get('original_app')
if original_app and os.getenv('APPLICATION_ID') != original_app:
self.app_id = os.getenv('APPLICATION_ID')
self.initialized = True
def map(self, record):
"""Restore entity map handler.
Args:
record: A serialized entity_pb.EntityProto.
Yields:
A operation.db.Put for the mapped entity
"""
self.initialize()
pb = entity_pb.EntityProto(contents=record)
if self.app_id:
utils.FixKeys(pb, self.app_id)
entity = datastore.Entity.FromPb(pb)
if not self.kind_filter or entity.kind() in self.kind_filter:
yield op.db.Put(entity)
if self.app_id:
yield utils.AllocateMaxId(entity.key(), self.app_id)
def validate_gs_bucket_name(bucket_name):
"""Validate the format of the given bucket_name.
Validation rules are based:
https://developers.google.com/storage/docs/bucketnaming#requirements
Args:
bucket_name: The bucket name to validate.
Raises:
BackupValidationException: If the bucket name is invalid.
"""
if len(bucket_name) > MAX_BUCKET_LEN:
raise BackupValidationException(
'Bucket name length should not be longer than %d' % MAX_BUCKET_LEN)
if len(bucket_name) < MIN_BUCKET_LEN:
raise BackupValidationException(
'Bucket name length should be longer than %d' % MIN_BUCKET_LEN)
if bucket_name.lower().startswith('goog'):
raise BackupValidationException(
'Bucket name should not start with a "goog" prefix')
bucket_elements = bucket_name.split('.')
for bucket_element in bucket_elements:
if len(bucket_element) > MAX_BUCKET_SEGMENT_LEN:
raise BackupValidationException(
'Segment length of bucket name should not be longer than %d' %
MAX_BUCKET_SEGMENT_LEN)
if not re.match(BUCKET_PATTERN, bucket_name):
raise BackupValidationException('Invalid bucket name "%s"' % bucket_name)
def is_accessible_bucket_name(bucket_name):
"""Returns True if the application has access to the specified bucket."""
scope = 'https://www.googleapis.com/auth/devstorage.read_write'
url = 'https://%s.commondatastorage.googleapis.com/' % bucket_name
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(url, method=urlfetch.HEAD, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
return result and result.status_code == 200
def parse_gs_handle(gs_handle):
"""Splits [/gs/]?bucket_name[/folder]*[/file]? to (bucket_name, path | '')."""
if gs_handle.startswith('/'):
filesystem = gs_handle[1:].split('/', 1)[0]
if filesystem == 'gs':
gs_handle = gs_handle[4:]
else:
raise BackupValidationException('Unsupported filesystem: %s' % filesystem)
tokens = gs_handle.split('/', 1)
return (tokens[0], '') if len(tokens) == 1 else tuple(tokens)
def list_bucket_files(bucket_name, prefix, max_keys=1000):
"""Returns a listing of of a bucket that matches the given prefix."""
scope = 'https://www.googleapis.com/auth/devstorage.read_only'
url = 'https://%s.commondatastorage.googleapis.com/?' % bucket_name
query = [('max-keys', max_keys)]
if prefix:
query.append(('prefix', prefix))
url += urllib.urlencode(query)
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(url, method=urlfetch.GET, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
if result and result.status_code == 200:
doc = xml.dom.minidom.parseString(result.content)
return [node.childNodes[0].data for node in doc.getElementsByTagName('Key')]
raise BackupValidationException('Request to Google Cloud Storage failed')
def get_gs_object(bucket_name, path):
"""Returns a listing of of a bucket that matches the given prefix."""
scope = 'https://www.googleapis.com/auth/devstorage.read_only'
url = 'https://%s.commondatastorage.googleapis.com/%s' % (bucket_name, path)
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(url, method=urlfetch.GET, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
if result and result.status_code == 200:
return result.content
raise BackupValidationException('Requested path was not found')
def get_queue_names(app_id=None, max_rows=100):
"""Returns a list with all non-special queue names for app_id."""
rpc = apiproxy_stub_map.UserRPC('taskqueue')
request = taskqueue_service_pb.TaskQueueFetchQueuesRequest()
response = taskqueue_service_pb.TaskQueueFetchQueuesResponse()
if app_id:
request.set_app_id(app_id)
request.set_max_rows(max_rows)
queues = ['default']
try:
rpc.make_call('FetchQueues', request, response)
rpc.check_success()
for queue in response.queue_list():
if (queue.mode() == taskqueue_service_pb.TaskQueueMode.PUSH and
not queue.queue_name().startswith('__') and
queue.queue_name() != 'default'):
queues.append(queue.queue_name())
except Exception:
logging.exception('Failed to get queue names.')
return queues
def handlers_list(base_path):
return [
(r'%s/%s' % (base_path, BackupLinkHandler.SUFFIX),
BackupLinkHandler),
(r'%s/%s' % (base_path, ConfirmBackupHandler.SUFFIX),
ConfirmBackupHandler),
(r'%s/%s' % (base_path, DoBackupHandler.SUFFIX), DoBackupHandler),
(r'%s/%s' % (base_path, DoBackupRestoreHandler.SUFFIX),
DoBackupRestoreHandler),
(r'%s/%s' % (base_path, DoBackupDeleteHandler.SUFFIX),
DoBackupDeleteHandler),
(r'%s/%s' % (base_path, DoBackupAbortHandler.SUFFIX),
DoBackupAbortHandler),
(r'%s/%s' % (base_path, DoBackupImportHandler.SUFFIX),
DoBackupImportHandler),
]
|
en
| 0.769499
|
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Handler for data backup operation. Generic datastore admin console transfers control to ConfirmBackupHandler after selection of entities. The ConfirmBackupHandler confirms with user his choice, enters a backup name and transfers control to DoBackupHandler. DoBackupHandler starts backup mappers and displays confirmation page. This module also contains actual mapper code for backing data over. Handler to deal with requests from the admin console to backup data. Rendering method that can be called by main.py. Args: handler: the webapp.RequestHandler invoking the method Handler to confirm admin console requests to delete a backup copy. Rendering method that can be called by main.py. Args: handler: the webapp.RequestHandler invoking the method Handler to confirm admin console requests to abort a backup copy. Rendering method that can be called by main.py. Args: handler: the webapp.RequestHandler invoking the method Handler to confirm admin console requests to restore from backup. Rendering method that can be called by main.py. Args: handler: the webapp.RequestHandler invoking the method default_backup_id: default value for handler.request default_delete_backup_after_restore: default value for handler.request Handler to import backup information. Rendering method that can be called by main.py. Args: handler: the webapp.RequestHandler invoking the method Handler to display backup information. Rendering method that can be called by main.py. Args: handler: the webapp.RequestHandler invoking the method Base class for all Do*Handlers. Handler for get requests to datastore_admin backup operations. Status of executed jobs is displayed. Return the name of the HTML page for HTTP/GET requests. Return the name of the HTML page for HTTP/POST requests. Process the HTTP/POST request and return the result as parametrs. Handler for post requests to datastore_admin/backup.do. Redirects to the get handler after processing the request. Make exception handling overrideable by tests. Args: e: The exception to handle. Returns: The exception error string. Triggers backup mapper jobs. Args: kinds: a sequence of kind names filesystem: files.BLOBSTORE_FILESYSTEM or files.GS_FILESYSTEM or None to default to blobstore gs_bucket_name: the GS file system bucket in which to store the backup when using the GS file system, and otherwise ignored backup: the backup name queue: the task queue for the backup task mapper_params: the mapper parameters max_jobs: if backup needs more jobs than this, defer them Returns: The job or task ids. Raises: BackupValidationException: On validation error. Exception: On other error. Handler to deal with requests to the backup link to backup data. Handler for get requests to datastore_admin/backup.create. Handler for post requests to datastore_admin/backup.create. An input reader which yields datastore entity proto for a kind. Handler to deal with requests from the admin console to backup data. Triggers backup mapper jobs and returns their ids. Creates backup/restore MR jobs for the given operation. Args: job_operation_key: a key of utils.DatastoreAdminOperation entity. backup_info_key: a key of BackupInformation entity. kinds: a list of kinds to run the M/R for. job_name: the M/R job name prefix. backup_handler: M/R job completion handler. input_reader: M/R input reader. output_writer: M/R output writer. mapper_params: custom parameters to pass to mapper. mapreduce_params: dictionary parameters relevant to the whole job. queue: the name of the queue that will be used by the M/R. Returns: Ids of all started mapper jobs as list of strings. Returns the backup filenames for selected kinds or all if None/Empty. Deletes a backup including its associated files and other metadata. Handler to deal with datastore admin requests to delete backup data. Handler for post requests to datastore_admin/backup_delete.do. Deletes are executed and user is redirected to the base-path handler. Handler to deal with datastore admin requests to abort pending backups. Handler for post requests to datastore_admin/backup_abort.do. Abort is executed and user is redirected to the base-path handler. Handler to restore backup data. Deals with requests from the admin console. Triggers backup restore mapper jobs and returns their ids. Handler to deal with datastore admin requests to import backup info. Handler for post requests to datastore_admin/import_backup.do. Import is executed and user is redirected to the base-path handler. An entity to keep information on successful backup operations. An entity to keep files information per kind for a backup. A key for this model should created using kind as a name and the associated BackupInformation as a parent. Updates BackupInformation record for a completed mapper job. Finalize the state of BackupInformation and creates info file for GS. Returns entities iterator from a backup_info file content. Deletes empty files and returns filenames minus the deleted ones. A class for writing Datastore backup metadata files. Construct a BackupInfoWriter. Args: gs_bucket: Required string for the target GS bucket. Write the metadata files for the given backup_info. Args: backup_info: Required BackupInformation. Returns: A list with Backup info filename followed by Kind info filenames. Writes a backup_info_file. Args: backup_info: Required BackupInformation. Returns: Backup info filename. Writes type information schema for each kind in backup_info. Args: backup_info: Required BackupInformation. Returns: A list with all created filenames. Creates and populate a backup_pb2.Backup. Writes a kind backup_info. Args: filename: The name of the file to be created as string. backup: apphosting.ext.datastore_admin.Backup proto. Type information for an entity property. Construct a PropertyTypeInfo instance. Args: name: The name of the property as a string. is_repeated: A boolean that indicates if the property is repeated. primitive_types: Optional list of PrimitiveType integer values. embedded_entities: Optional list of EntityTypeInfo. Merge a PropertyTypeInfo with this instance. Args: other: Required PropertyTypeInfo to merge. Returns: True if anything was changed. False otherwise. Raises: ValueError: if property names do not match. TypeError: if other is not instance of PropertyTypeInfo. Add an populate a Field to the given entity_schema. Args: entity_schema: apphosting.ext.datastore_admin.EntitySchema proto. Type information for an entity. Construct an EntityTypeInfo instance. Args: kind: An optional kind name as string. properties: An optional list of PropertyTypeInfo. Merge an EntityTypeInfo with this instance. Args: other: Required EntityTypeInfo to merge. Returns: True if anything was changed. False otherwise. Raises: ValueError: if kinds do not match. TypeError: if other is not instance of EntityTypeInfo. Populates the given entity_schema with values from this instance. Args: entity_schema: apphosting.ext.datastore_admin.EntitySchema proto. Creates and populates an EntityTypeInfo from an EntityProto. Returns the type mapping for the provided property. Persistent aggregated type information for a kind. An instance can be retrieved via the load method or created using the create method. An instance aggregates all type information for all seen embedded_entities via the merge method and persisted when needed using the model put method. Merge a SchemaAggregationResult or an EntityTypeInfo with this instance. Args: other: Required SchemaAggregationResult or EntityTypeInfo to merge. Returns: True if anything was changed. False otherwise. Create SchemaAggregationResult instance. Args: backup_id: Required BackupInformation Key. kind_name: Required kind name as string. shard_id: Required shard id as string. Returns: A new SchemaAggregationResult instance. Retrieve SchemaAggregationResult from the Datastore. Args: backup_id: Required BackupInformation Key. kind_name: Required kind name as string. shard_id: Optional shard id as string. Returns: SchemaAggregationResult iterator or an entity if shard_id not None. An MR pool to aggregation type information per kind. Construct SchemaAggregationPool instance. Args: backup_id: Required BackupInformation Key. kind: Required kind name as string. shard_id: Required shard id as string. Merge EntityTypeInfo into aggregated type information. Save aggregated type information to the datastore if changed. An MR Operation to aggregation type information for a kind. This operation will register an MR pool, SchemaAggregationPool, if one is not already registered and will invoke the pool's merge operation per entity. The pool is responsible for keeping a persistent state of type aggregation using the sharded db model, SchemaAggregationResult. A class which dumps the entity to the writer. Backup entity map handler. Args: entity_proto: An instance of entity_pb.EntityProto. Yields: A serialized entity_pb.EntityProto as a string A class which restore the entity to datastore. Restore entity map handler. Args: record: A serialized entity_pb.EntityProto. Yields: A operation.db.Put for the mapped entity Validate the format of the given bucket_name. Validation rules are based: https://developers.google.com/storage/docs/bucketnaming#requirements Args: bucket_name: The bucket name to validate. Raises: BackupValidationException: If the bucket name is invalid. Returns True if the application has access to the specified bucket. Splits [/gs/]?bucket_name[/folder]*[/file]? to (bucket_name, path | ''). Returns a listing of of a bucket that matches the given prefix. Returns a listing of of a bucket that matches the given prefix. Returns a list with all non-special queue names for app_id.
| 1.682728
| 2
|
Text_Sentiment_Analysis_Web_Multinomial_Naive_Bayes/env/Lib/site-packages/pandas/tests/extension/base/getitem.py
|
ee2110/Machine_Learning_based_web_applications
| 6
|
6626119
|
<reponame>ee2110/Machine_Learning_based_web_applications
import numpy as np
import pytest
import pandas as pd
from .base import BaseExtensionTests
class BaseGetitemTests(BaseExtensionTests):
"""Tests for ExtensionArray.__getitem__."""
def test_iloc_series(self, data):
ser = pd.Series(data)
result = ser.iloc[:4]
expected = pd.Series(data[:4])
self.assert_series_equal(result, expected)
result = ser.iloc[[0, 1, 2, 3]]
self.assert_series_equal(result, expected)
def test_iloc_frame(self, data):
df = pd.DataFrame({"A": data, "B": np.arange(len(data), dtype="int64")})
expected = pd.DataFrame({"A": data[:4]})
# slice -> frame
result = df.iloc[:4, [0]]
self.assert_frame_equal(result, expected)
# sequence -> frame
result = df.iloc[[0, 1, 2, 3], [0]]
self.assert_frame_equal(result, expected)
expected = pd.Series(data[:4], name="A")
# slice -> series
result = df.iloc[:4, 0]
self.assert_series_equal(result, expected)
# sequence -> series
result = df.iloc[:4, 0]
self.assert_series_equal(result, expected)
def test_loc_series(self, data):
ser = pd.Series(data)
result = ser.loc[:3]
expected = pd.Series(data[:4])
self.assert_series_equal(result, expected)
result = ser.loc[[0, 1, 2, 3]]
self.assert_series_equal(result, expected)
def test_loc_frame(self, data):
df = pd.DataFrame({"A": data, "B": np.arange(len(data), dtype="int64")})
expected = pd.DataFrame({"A": data[:4]})
# slice -> frame
result = df.loc[:3, ["A"]]
self.assert_frame_equal(result, expected)
# sequence -> frame
result = df.loc[[0, 1, 2, 3], ["A"]]
self.assert_frame_equal(result, expected)
expected = pd.Series(data[:4], name="A")
# slice -> series
result = df.loc[:3, "A"]
self.assert_series_equal(result, expected)
# sequence -> series
result = df.loc[:3, "A"]
self.assert_series_equal(result, expected)
def test_loc_iloc_frame_single_dtype(self, data):
# GH#27110 bug in ExtensionBlock.iget caused df.iloc[n] to incorrectly
# return a scalar
df = pd.DataFrame({"A": data})
expected = pd.Series([data[2]], index=["A"], name=2, dtype=data.dtype)
result = df.loc[2]
self.assert_series_equal(result, expected)
expected = pd.Series(
[data[-1]], index=["A"], name=len(data) - 1, dtype=data.dtype
)
result = df.iloc[-1]
self.assert_series_equal(result, expected)
def test_getitem_scalar(self, data):
result = data[0]
assert isinstance(result, data.dtype.type)
result = pd.Series(data)[0]
assert isinstance(result, data.dtype.type)
def test_getitem_scalar_na(self, data_missing, na_cmp, na_value):
result = data_missing[0]
assert na_cmp(result, na_value)
def test_getitem_empty(self, data):
# Indexing with empty list
result = data[[]]
assert len(result) == 0
assert isinstance(result, type(data))
expected = data[np.array([], dtype="int64")]
self.assert_extension_array_equal(result, expected)
def test_getitem_mask(self, data):
# Empty mask, raw array
mask = np.zeros(len(data), dtype=bool)
result = data[mask]
assert len(result) == 0
assert isinstance(result, type(data))
# Empty mask, in series
mask = np.zeros(len(data), dtype=bool)
result = pd.Series(data)[mask]
assert len(result) == 0
assert result.dtype == data.dtype
# non-empty mask, raw array
mask[0] = True
result = data[mask]
assert len(result) == 1
assert isinstance(result, type(data))
# non-empty mask, in series
result = pd.Series(data)[mask]
assert len(result) == 1
assert result.dtype == data.dtype
def test_getitem_mask_raises(self, data):
mask = np.array([True, False])
with pytest.raises(IndexError):
data[mask]
mask = pd.array(mask, dtype="boolean")
with pytest.raises(IndexError):
data[mask]
def test_getitem_boolean_array_mask(self, data):
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
result = data[mask]
assert len(result) == 0
assert isinstance(result, type(data))
result = pd.Series(data)[mask]
assert len(result) == 0
assert result.dtype == data.dtype
mask[:5] = True
expected = data.take([0, 1, 2, 3, 4])
result = data[mask]
self.assert_extension_array_equal(result, expected)
expected = pd.Series(expected)
result = pd.Series(data)[mask]
self.assert_series_equal(result, expected)
def test_getitem_boolean_array_mask_raises(self, data):
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
mask[:2] = pd.NA
msg = (
"Cannot mask with a boolean indexer containing NA values|"
"cannot mask with array containing NA / NaN values"
)
with pytest.raises(ValueError, match=msg):
data[mask]
s = pd.Series(data)
with pytest.raises(ValueError):
s[mask]
@pytest.mark.parametrize(
"idx",
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
ids=["list", "integer-array", "numpy-array"],
)
def test_getitem_integer_array(self, data, idx):
result = data[idx]
assert len(result) == 3
assert isinstance(result, type(data))
expected = data.take([0, 1, 2])
self.assert_extension_array_equal(result, expected)
expected = pd.Series(expected)
result = pd.Series(data)[idx]
self.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"idx",
[[0, 1, 2, pd.NA], pd.array([0, 1, 2, pd.NA], dtype="Int64")],
ids=["list", "integer-array"],
)
def test_getitem_integer_with_missing_raises(self, data, idx):
msg = "Cannot index with an integer indexer containing NA values"
with pytest.raises(ValueError, match=msg):
data[idx]
# TODO this raises KeyError about labels not found (it tries label-based)
# import pandas._testing as tm
# s = pd.Series(data, index=[tm.rands(4) for _ in range(len(data))])
# with pytest.raises(ValueError, match=msg):
# s[idx]
def test_getitem_slice(self, data):
# getitem[slice] should return an array
result = data[slice(0)] # empty
assert isinstance(result, type(data))
result = data[slice(1)] # scalar
assert isinstance(result, type(data))
def test_get(self, data):
# GH 20882
s = pd.Series(data, index=[2 * i for i in range(len(data))])
assert s.get(4) == s.iloc[2]
result = s.get([4, 6])
expected = s.iloc[[2, 3]]
self.assert_series_equal(result, expected)
result = s.get(slice(2))
expected = s.iloc[[0, 1]]
self.assert_series_equal(result, expected)
assert s.get(-1) is None
assert s.get(s.index.max() + 1) is None
s = pd.Series(data[:6], index=list("abcdef"))
assert s.get("c") == s.iloc[2]
result = s.get(slice("b", "d"))
expected = s.iloc[[1, 2, 3]]
self.assert_series_equal(result, expected)
result = s.get("Z")
assert result is None
assert s.get(4) == s.iloc[4]
assert s.get(-1) == s.iloc[-1]
assert s.get(len(s)) is None
# GH 21257
s = pd.Series(data)
s2 = s[::2]
assert s2.get(1) is None
def test_take_sequence(self, data):
result = pd.Series(data)[[0, 1, 3]]
assert result.iloc[0] == data[0]
assert result.iloc[1] == data[1]
assert result.iloc[2] == data[3]
def test_take(self, data, na_value, na_cmp):
result = data.take([0, -1])
assert result.dtype == data.dtype
assert result[0] == data[0]
assert result[1] == data[-1]
result = data.take([0, -1], allow_fill=True, fill_value=na_value)
assert result[0] == data[0]
assert na_cmp(result[1], na_value)
with pytest.raises(IndexError, match="out of bounds"):
data.take([len(data) + 1])
def test_take_empty(self, data, na_value, na_cmp):
empty = data[:0]
result = empty.take([-1], allow_fill=True)
assert na_cmp(result[0], na_value)
with pytest.raises(IndexError):
empty.take([-1])
with pytest.raises(IndexError, match="cannot do a non-empty take"):
empty.take([0, 1])
def test_take_negative(self, data):
# https://github.com/pandas-dev/pandas/issues/20640
n = len(data)
result = data.take([0, -n, n - 1, -1])
expected = data.take([0, 0, n - 1, n - 1])
self.assert_extension_array_equal(result, expected)
def test_take_non_na_fill_value(self, data_missing):
fill_value = data_missing[1] # valid
na = data_missing[0]
array = data_missing._from_sequence(
[na, fill_value, na], dtype=data_missing.dtype
)
result = array.take([-1, 1], fill_value=fill_value, allow_fill=True)
expected = array.take([1, 1])
self.assert_extension_array_equal(result, expected)
def test_take_pandas_style_negative_raises(self, data, na_value):
with pytest.raises(ValueError):
data.take([0, -2], fill_value=na_value, allow_fill=True)
@pytest.mark.parametrize("allow_fill", [True, False])
def test_take_out_of_bounds_raises(self, data, allow_fill):
arr = data[:3]
with pytest.raises(IndexError):
arr.take(np.asarray([0, 3]), allow_fill=allow_fill)
def test_take_series(self, data):
s = pd.Series(data)
result = s.take([0, -1])
expected = pd.Series(
data._from_sequence([data[0], data[len(data) - 1]], dtype=s.dtype),
index=[0, len(data) - 1],
)
self.assert_series_equal(result, expected)
def test_reindex(self, data, na_value):
s = pd.Series(data)
result = s.reindex([0, 1, 3])
expected = pd.Series(data.take([0, 1, 3]), index=[0, 1, 3])
self.assert_series_equal(result, expected)
n = len(data)
result = s.reindex([-1, 0, n])
expected = pd.Series(
data._from_sequence([na_value, data[0], na_value], dtype=s.dtype),
index=[-1, 0, n],
)
self.assert_series_equal(result, expected)
result = s.reindex([n, n + 1])
expected = pd.Series(
data._from_sequence([na_value, na_value], dtype=s.dtype), index=[n, n + 1]
)
self.assert_series_equal(result, expected)
def test_reindex_non_na_fill_value(self, data_missing):
valid = data_missing[1]
na = data_missing[0]
array = data_missing._from_sequence([na, valid], dtype=data_missing.dtype)
ser = pd.Series(array)
result = ser.reindex([0, 1, 2], fill_value=valid)
expected = pd.Series(
data_missing._from_sequence([na, valid, valid], dtype=data_missing.dtype)
)
self.assert_series_equal(result, expected)
def test_loc_len1(self, data):
# see GH-27785 take_nd with indexer of len 1 resulting in wrong ndim
df = pd.DataFrame({"A": data})
res = df.loc[[0], "A"]
assert res._data._block.ndim == 1
def test_item(self, data):
# https://github.com/pandas-dev/pandas/pull/30175
s = pd.Series(data)
result = s[:1].item()
assert result == data[0]
msg = "can only convert an array of size 1 to a Python scalar"
with pytest.raises(ValueError, match=msg):
s[:0].item()
with pytest.raises(ValueError, match=msg):
s.item()
|
import numpy as np
import pytest
import pandas as pd
from .base import BaseExtensionTests
class BaseGetitemTests(BaseExtensionTests):
"""Tests for ExtensionArray.__getitem__."""
def test_iloc_series(self, data):
ser = pd.Series(data)
result = ser.iloc[:4]
expected = pd.Series(data[:4])
self.assert_series_equal(result, expected)
result = ser.iloc[[0, 1, 2, 3]]
self.assert_series_equal(result, expected)
def test_iloc_frame(self, data):
df = pd.DataFrame({"A": data, "B": np.arange(len(data), dtype="int64")})
expected = pd.DataFrame({"A": data[:4]})
# slice -> frame
result = df.iloc[:4, [0]]
self.assert_frame_equal(result, expected)
# sequence -> frame
result = df.iloc[[0, 1, 2, 3], [0]]
self.assert_frame_equal(result, expected)
expected = pd.Series(data[:4], name="A")
# slice -> series
result = df.iloc[:4, 0]
self.assert_series_equal(result, expected)
# sequence -> series
result = df.iloc[:4, 0]
self.assert_series_equal(result, expected)
def test_loc_series(self, data):
ser = pd.Series(data)
result = ser.loc[:3]
expected = pd.Series(data[:4])
self.assert_series_equal(result, expected)
result = ser.loc[[0, 1, 2, 3]]
self.assert_series_equal(result, expected)
def test_loc_frame(self, data):
df = pd.DataFrame({"A": data, "B": np.arange(len(data), dtype="int64")})
expected = pd.DataFrame({"A": data[:4]})
# slice -> frame
result = df.loc[:3, ["A"]]
self.assert_frame_equal(result, expected)
# sequence -> frame
result = df.loc[[0, 1, 2, 3], ["A"]]
self.assert_frame_equal(result, expected)
expected = pd.Series(data[:4], name="A")
# slice -> series
result = df.loc[:3, "A"]
self.assert_series_equal(result, expected)
# sequence -> series
result = df.loc[:3, "A"]
self.assert_series_equal(result, expected)
def test_loc_iloc_frame_single_dtype(self, data):
# GH#27110 bug in ExtensionBlock.iget caused df.iloc[n] to incorrectly
# return a scalar
df = pd.DataFrame({"A": data})
expected = pd.Series([data[2]], index=["A"], name=2, dtype=data.dtype)
result = df.loc[2]
self.assert_series_equal(result, expected)
expected = pd.Series(
[data[-1]], index=["A"], name=len(data) - 1, dtype=data.dtype
)
result = df.iloc[-1]
self.assert_series_equal(result, expected)
def test_getitem_scalar(self, data):
result = data[0]
assert isinstance(result, data.dtype.type)
result = pd.Series(data)[0]
assert isinstance(result, data.dtype.type)
def test_getitem_scalar_na(self, data_missing, na_cmp, na_value):
result = data_missing[0]
assert na_cmp(result, na_value)
def test_getitem_empty(self, data):
# Indexing with empty list
result = data[[]]
assert len(result) == 0
assert isinstance(result, type(data))
expected = data[np.array([], dtype="int64")]
self.assert_extension_array_equal(result, expected)
def test_getitem_mask(self, data):
# Empty mask, raw array
mask = np.zeros(len(data), dtype=bool)
result = data[mask]
assert len(result) == 0
assert isinstance(result, type(data))
# Empty mask, in series
mask = np.zeros(len(data), dtype=bool)
result = pd.Series(data)[mask]
assert len(result) == 0
assert result.dtype == data.dtype
# non-empty mask, raw array
mask[0] = True
result = data[mask]
assert len(result) == 1
assert isinstance(result, type(data))
# non-empty mask, in series
result = pd.Series(data)[mask]
assert len(result) == 1
assert result.dtype == data.dtype
def test_getitem_mask_raises(self, data):
mask = np.array([True, False])
with pytest.raises(IndexError):
data[mask]
mask = pd.array(mask, dtype="boolean")
with pytest.raises(IndexError):
data[mask]
def test_getitem_boolean_array_mask(self, data):
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
result = data[mask]
assert len(result) == 0
assert isinstance(result, type(data))
result = pd.Series(data)[mask]
assert len(result) == 0
assert result.dtype == data.dtype
mask[:5] = True
expected = data.take([0, 1, 2, 3, 4])
result = data[mask]
self.assert_extension_array_equal(result, expected)
expected = pd.Series(expected)
result = pd.Series(data)[mask]
self.assert_series_equal(result, expected)
def test_getitem_boolean_array_mask_raises(self, data):
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
mask[:2] = pd.NA
msg = (
"Cannot mask with a boolean indexer containing NA values|"
"cannot mask with array containing NA / NaN values"
)
with pytest.raises(ValueError, match=msg):
data[mask]
s = pd.Series(data)
with pytest.raises(ValueError):
s[mask]
@pytest.mark.parametrize(
"idx",
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
ids=["list", "integer-array", "numpy-array"],
)
def test_getitem_integer_array(self, data, idx):
result = data[idx]
assert len(result) == 3
assert isinstance(result, type(data))
expected = data.take([0, 1, 2])
self.assert_extension_array_equal(result, expected)
expected = pd.Series(expected)
result = pd.Series(data)[idx]
self.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"idx",
[[0, 1, 2, pd.NA], pd.array([0, 1, 2, pd.NA], dtype="Int64")],
ids=["list", "integer-array"],
)
def test_getitem_integer_with_missing_raises(self, data, idx):
msg = "Cannot index with an integer indexer containing NA values"
with pytest.raises(ValueError, match=msg):
data[idx]
# TODO this raises KeyError about labels not found (it tries label-based)
# import pandas._testing as tm
# s = pd.Series(data, index=[tm.rands(4) for _ in range(len(data))])
# with pytest.raises(ValueError, match=msg):
# s[idx]
def test_getitem_slice(self, data):
# getitem[slice] should return an array
result = data[slice(0)] # empty
assert isinstance(result, type(data))
result = data[slice(1)] # scalar
assert isinstance(result, type(data))
def test_get(self, data):
# GH 20882
s = pd.Series(data, index=[2 * i for i in range(len(data))])
assert s.get(4) == s.iloc[2]
result = s.get([4, 6])
expected = s.iloc[[2, 3]]
self.assert_series_equal(result, expected)
result = s.get(slice(2))
expected = s.iloc[[0, 1]]
self.assert_series_equal(result, expected)
assert s.get(-1) is None
assert s.get(s.index.max() + 1) is None
s = pd.Series(data[:6], index=list("abcdef"))
assert s.get("c") == s.iloc[2]
result = s.get(slice("b", "d"))
expected = s.iloc[[1, 2, 3]]
self.assert_series_equal(result, expected)
result = s.get("Z")
assert result is None
assert s.get(4) == s.iloc[4]
assert s.get(-1) == s.iloc[-1]
assert s.get(len(s)) is None
# GH 21257
s = pd.Series(data)
s2 = s[::2]
assert s2.get(1) is None
def test_take_sequence(self, data):
result = pd.Series(data)[[0, 1, 3]]
assert result.iloc[0] == data[0]
assert result.iloc[1] == data[1]
assert result.iloc[2] == data[3]
def test_take(self, data, na_value, na_cmp):
result = data.take([0, -1])
assert result.dtype == data.dtype
assert result[0] == data[0]
assert result[1] == data[-1]
result = data.take([0, -1], allow_fill=True, fill_value=na_value)
assert result[0] == data[0]
assert na_cmp(result[1], na_value)
with pytest.raises(IndexError, match="out of bounds"):
data.take([len(data) + 1])
def test_take_empty(self, data, na_value, na_cmp):
empty = data[:0]
result = empty.take([-1], allow_fill=True)
assert na_cmp(result[0], na_value)
with pytest.raises(IndexError):
empty.take([-1])
with pytest.raises(IndexError, match="cannot do a non-empty take"):
empty.take([0, 1])
def test_take_negative(self, data):
# https://github.com/pandas-dev/pandas/issues/20640
n = len(data)
result = data.take([0, -n, n - 1, -1])
expected = data.take([0, 0, n - 1, n - 1])
self.assert_extension_array_equal(result, expected)
def test_take_non_na_fill_value(self, data_missing):
fill_value = data_missing[1] # valid
na = data_missing[0]
array = data_missing._from_sequence(
[na, fill_value, na], dtype=data_missing.dtype
)
result = array.take([-1, 1], fill_value=fill_value, allow_fill=True)
expected = array.take([1, 1])
self.assert_extension_array_equal(result, expected)
def test_take_pandas_style_negative_raises(self, data, na_value):
with pytest.raises(ValueError):
data.take([0, -2], fill_value=na_value, allow_fill=True)
@pytest.mark.parametrize("allow_fill", [True, False])
def test_take_out_of_bounds_raises(self, data, allow_fill):
arr = data[:3]
with pytest.raises(IndexError):
arr.take(np.asarray([0, 3]), allow_fill=allow_fill)
def test_take_series(self, data):
s = pd.Series(data)
result = s.take([0, -1])
expected = pd.Series(
data._from_sequence([data[0], data[len(data) - 1]], dtype=s.dtype),
index=[0, len(data) - 1],
)
self.assert_series_equal(result, expected)
def test_reindex(self, data, na_value):
s = pd.Series(data)
result = s.reindex([0, 1, 3])
expected = pd.Series(data.take([0, 1, 3]), index=[0, 1, 3])
self.assert_series_equal(result, expected)
n = len(data)
result = s.reindex([-1, 0, n])
expected = pd.Series(
data._from_sequence([na_value, data[0], na_value], dtype=s.dtype),
index=[-1, 0, n],
)
self.assert_series_equal(result, expected)
result = s.reindex([n, n + 1])
expected = pd.Series(
data._from_sequence([na_value, na_value], dtype=s.dtype), index=[n, n + 1]
)
self.assert_series_equal(result, expected)
def test_reindex_non_na_fill_value(self, data_missing):
valid = data_missing[1]
na = data_missing[0]
array = data_missing._from_sequence([na, valid], dtype=data_missing.dtype)
ser = pd.Series(array)
result = ser.reindex([0, 1, 2], fill_value=valid)
expected = pd.Series(
data_missing._from_sequence([na, valid, valid], dtype=data_missing.dtype)
)
self.assert_series_equal(result, expected)
def test_loc_len1(self, data):
# see GH-27785 take_nd with indexer of len 1 resulting in wrong ndim
df = pd.DataFrame({"A": data})
res = df.loc[[0], "A"]
assert res._data._block.ndim == 1
def test_item(self, data):
# https://github.com/pandas-dev/pandas/pull/30175
s = pd.Series(data)
result = s[:1].item()
assert result == data[0]
msg = "can only convert an array of size 1 to a Python scalar"
with pytest.raises(ValueError, match=msg):
s[:0].item()
with pytest.raises(ValueError, match=msg):
s.item()
|
en
| 0.658485
|
Tests for ExtensionArray.__getitem__. # slice -> frame # sequence -> frame # slice -> series # sequence -> series # slice -> frame # sequence -> frame # slice -> series # sequence -> series # GH#27110 bug in ExtensionBlock.iget caused df.iloc[n] to incorrectly # return a scalar # Indexing with empty list # Empty mask, raw array # Empty mask, in series # non-empty mask, raw array # non-empty mask, in series # TODO this raises KeyError about labels not found (it tries label-based) # import pandas._testing as tm # s = pd.Series(data, index=[tm.rands(4) for _ in range(len(data))]) # with pytest.raises(ValueError, match=msg): # s[idx] # getitem[slice] should return an array # empty # scalar # GH 20882 # GH 21257 # https://github.com/pandas-dev/pandas/issues/20640 # valid # see GH-27785 take_nd with indexer of len 1 resulting in wrong ndim # https://github.com/pandas-dev/pandas/pull/30175
| 2.388879
| 2
|
app/dao/service_email_reply_to_dao.py
|
tlwr/notifications-api
| 10
|
6626120
|
from sqlalchemy import desc
from app import db
from app.dao.dao_utils import transactional
from app.errors import InvalidRequest
from app.exceptions import ArchiveValidationError
from app.models import ServiceEmailReplyTo
def dao_get_reply_to_by_service_id(service_id):
reply_to = db.session.query(
ServiceEmailReplyTo
).filter(
ServiceEmailReplyTo.service_id == service_id,
ServiceEmailReplyTo.archived == False # noqa
).order_by(desc(ServiceEmailReplyTo.is_default), desc(ServiceEmailReplyTo.created_at)).all()
return reply_to
def dao_get_reply_to_by_id(service_id, reply_to_id):
reply_to = db.session.query(
ServiceEmailReplyTo
).filter(
ServiceEmailReplyTo.service_id == service_id,
ServiceEmailReplyTo.id == reply_to_id,
ServiceEmailReplyTo.archived == False # noqa
).order_by(ServiceEmailReplyTo.created_at).one()
return reply_to
@transactional
def add_reply_to_email_address_for_service(service_id, email_address, is_default):
old_default = _get_existing_default(service_id)
if is_default:
_reset_old_default_to_false(old_default)
else:
_raise_when_no_default(old_default)
new_reply_to = ServiceEmailReplyTo(service_id=service_id, email_address=email_address, is_default=is_default)
db.session.add(new_reply_to)
return new_reply_to
@transactional
def update_reply_to_email_address(service_id, reply_to_id, email_address, is_default):
old_default = _get_existing_default(service_id)
if is_default:
_reset_old_default_to_false(old_default)
else:
if old_default.id == reply_to_id:
raise InvalidRequest("You must have at least one reply to email address as the default.", 400)
reply_to_update = ServiceEmailReplyTo.query.get(reply_to_id)
reply_to_update.email_address = email_address
reply_to_update.is_default = is_default
db.session.add(reply_to_update)
return reply_to_update
@transactional
def archive_reply_to_email_address(service_id, reply_to_id):
reply_to_archive = ServiceEmailReplyTo.query.filter_by(
id=reply_to_id,
service_id=service_id
).one()
if reply_to_archive.is_default:
raise ArchiveValidationError("You cannot delete a default email reply to address")
reply_to_archive.archived = True
db.session.add(reply_to_archive)
return reply_to_archive
def _get_existing_default(service_id):
existing_reply_to = dao_get_reply_to_by_service_id(service_id=service_id)
if existing_reply_to:
old_default = [x for x in existing_reply_to if x.is_default]
if len(old_default) == 1:
return old_default[0]
else:
raise Exception(
"There should only be one default reply to email for each service. Service {} has {}".format(
service_id, len(old_default)))
return None
def _reset_old_default_to_false(old_default):
if old_default:
old_default.is_default = False
db.session.add(old_default)
def _raise_when_no_default(old_default):
# check that the update is not updating the only default to false
if not old_default:
raise InvalidRequest("You must have at least one reply to email address as the default.", 400)
|
from sqlalchemy import desc
from app import db
from app.dao.dao_utils import transactional
from app.errors import InvalidRequest
from app.exceptions import ArchiveValidationError
from app.models import ServiceEmailReplyTo
def dao_get_reply_to_by_service_id(service_id):
reply_to = db.session.query(
ServiceEmailReplyTo
).filter(
ServiceEmailReplyTo.service_id == service_id,
ServiceEmailReplyTo.archived == False # noqa
).order_by(desc(ServiceEmailReplyTo.is_default), desc(ServiceEmailReplyTo.created_at)).all()
return reply_to
def dao_get_reply_to_by_id(service_id, reply_to_id):
reply_to = db.session.query(
ServiceEmailReplyTo
).filter(
ServiceEmailReplyTo.service_id == service_id,
ServiceEmailReplyTo.id == reply_to_id,
ServiceEmailReplyTo.archived == False # noqa
).order_by(ServiceEmailReplyTo.created_at).one()
return reply_to
@transactional
def add_reply_to_email_address_for_service(service_id, email_address, is_default):
old_default = _get_existing_default(service_id)
if is_default:
_reset_old_default_to_false(old_default)
else:
_raise_when_no_default(old_default)
new_reply_to = ServiceEmailReplyTo(service_id=service_id, email_address=email_address, is_default=is_default)
db.session.add(new_reply_to)
return new_reply_to
@transactional
def update_reply_to_email_address(service_id, reply_to_id, email_address, is_default):
old_default = _get_existing_default(service_id)
if is_default:
_reset_old_default_to_false(old_default)
else:
if old_default.id == reply_to_id:
raise InvalidRequest("You must have at least one reply to email address as the default.", 400)
reply_to_update = ServiceEmailReplyTo.query.get(reply_to_id)
reply_to_update.email_address = email_address
reply_to_update.is_default = is_default
db.session.add(reply_to_update)
return reply_to_update
@transactional
def archive_reply_to_email_address(service_id, reply_to_id):
reply_to_archive = ServiceEmailReplyTo.query.filter_by(
id=reply_to_id,
service_id=service_id
).one()
if reply_to_archive.is_default:
raise ArchiveValidationError("You cannot delete a default email reply to address")
reply_to_archive.archived = True
db.session.add(reply_to_archive)
return reply_to_archive
def _get_existing_default(service_id):
existing_reply_to = dao_get_reply_to_by_service_id(service_id=service_id)
if existing_reply_to:
old_default = [x for x in existing_reply_to if x.is_default]
if len(old_default) == 1:
return old_default[0]
else:
raise Exception(
"There should only be one default reply to email for each service. Service {} has {}".format(
service_id, len(old_default)))
return None
def _reset_old_default_to_false(old_default):
if old_default:
old_default.is_default = False
db.session.add(old_default)
def _raise_when_no_default(old_default):
# check that the update is not updating the only default to false
if not old_default:
raise InvalidRequest("You must have at least one reply to email address as the default.", 400)
|
en
| 0.530851
|
# noqa # noqa # check that the update is not updating the only default to false
| 2.054982
| 2
|
Mundo 2/exercicio054.py
|
samudoki/Python-Curso-em-Video
| 1
|
6626121
|
<reponame>samudoki/Python-Curso-em-Video
#Exercicio 54
from datetime import date
total_maior = 0
total_menor = 0
ano_atual = date.today().year
for n in range(1, 7):
ano = int(input(f'Em que ano nasceu a {n}° pessoa? '))
idade = ano_atual - ano
if idade >= 18:
total_maior += 1
else:
total_menor += 1
print(f'Existem {total_maior} pessoas maior de idade aqui \nExistem {total_menor} pessoas menor de idade')
|
#Exercicio 54
from datetime import date
total_maior = 0
total_menor = 0
ano_atual = date.today().year
for n in range(1, 7):
ano = int(input(f'Em que ano nasceu a {n}° pessoa? '))
idade = ano_atual - ano
if idade >= 18:
total_maior += 1
else:
total_menor += 1
print(f'Existem {total_maior} pessoas maior de idade aqui \nExistem {total_menor} pessoas menor de idade')
|
nl
| 0.292496
|
#Exercicio 54
| 3.742688
| 4
|
examples/retries_with_mapping.py
|
jamestwebber/prefect
| 0
|
6626122
|
"""
This example showcases a few basic Prefect concepts:
- the ability to "map" tasks across the dynamic output of upstream tasks; additionally, it is not required that the
upstream task actually passes data to the downstream task, as this example demonstrates
- task retries: `flow.run()` will perform retries, on schedule, for all tasks which require it,
including individual mapped tasks
This flow first generates a list of random length, and them maps over that list to spawn a dynamic number of
downstream tasks which randomly fail. The takeaway here is that we don't have to know a-priori how many mapped tasks
will be created prior to execution! Additionally, each failed mapped task will retry on its own.
"""
import random
from datetime import timedelta
from prefect import task, Flow
@task
def generate_random_list():
n = random.randint(15, 25)
return list(range(n))
@task(max_retries=3, retry_delay=timedelta(seconds=0))
def randomly_fail():
x = random.random()
if x > 0.7:
raise ValueError("x is too large")
with Flow("random-mapping") as f:
final = randomly_fail.map(upstream_tasks=[generate_random_list])
# should see logs suggesting that some tasks are failing and retrying
f.run()
|
"""
This example showcases a few basic Prefect concepts:
- the ability to "map" tasks across the dynamic output of upstream tasks; additionally, it is not required that the
upstream task actually passes data to the downstream task, as this example demonstrates
- task retries: `flow.run()` will perform retries, on schedule, for all tasks which require it,
including individual mapped tasks
This flow first generates a list of random length, and them maps over that list to spawn a dynamic number of
downstream tasks which randomly fail. The takeaway here is that we don't have to know a-priori how many mapped tasks
will be created prior to execution! Additionally, each failed mapped task will retry on its own.
"""
import random
from datetime import timedelta
from prefect import task, Flow
@task
def generate_random_list():
n = random.randint(15, 25)
return list(range(n))
@task(max_retries=3, retry_delay=timedelta(seconds=0))
def randomly_fail():
x = random.random()
if x > 0.7:
raise ValueError("x is too large")
with Flow("random-mapping") as f:
final = randomly_fail.map(upstream_tasks=[generate_random_list])
# should see logs suggesting that some tasks are failing and retrying
f.run()
|
en
| 0.958913
|
This example showcases a few basic Prefect concepts: - the ability to "map" tasks across the dynamic output of upstream tasks; additionally, it is not required that the upstream task actually passes data to the downstream task, as this example demonstrates - task retries: `flow.run()` will perform retries, on schedule, for all tasks which require it, including individual mapped tasks This flow first generates a list of random length, and them maps over that list to spawn a dynamic number of downstream tasks which randomly fail. The takeaway here is that we don't have to know a-priori how many mapped tasks will be created prior to execution! Additionally, each failed mapped task will retry on its own. # should see logs suggesting that some tasks are failing and retrying
| 3.426869
| 3
|
setup.py
|
aldegoeij/aws-shell
| 0
|
6626123
|
<filename>setup.py
#!/usr/bin/env python
import re
import ast
from setuptools import setup, find_packages
requires = [
'awscli>=1.16.87,<2.0.0',
'prompt-toolkit>=1.0.15,<2.0.0',
'boto3>=1.9.77,<3.0.0',
'configobj>=5.0.6,<6.0.0',
'Pygments>=2.1.3,<3.0.0',
]
with open('awsshell/__init__.py', 'r') as f:
version = str(
ast.literal_eval(
re.search(
r'__version__\s+=\s+(.*)',
f.read()).group(1)))
setup(
name='aws-shell',
version=version,
description='AWS Shell',
long_description=open('README.rst').read(),
author='<NAME>',
url='https://github.com/awslabs/aws-shell',
packages=find_packages(exclude=['tests*']),
include_package_data=True,
package_data={'awsshell': ['data/*/*.json',
'awsshellrc']},
install_requires=requires,
entry_points={
'console_scripts': [
'aws-shell = awsshell:main',
'aws-shell-mkindex = awsshell.makeindex:main',
]
},
license="Apache License 2.0",
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
),
)
|
<filename>setup.py
#!/usr/bin/env python
import re
import ast
from setuptools import setup, find_packages
requires = [
'awscli>=1.16.87,<2.0.0',
'prompt-toolkit>=1.0.15,<2.0.0',
'boto3>=1.9.77,<3.0.0',
'configobj>=5.0.6,<6.0.0',
'Pygments>=2.1.3,<3.0.0',
]
with open('awsshell/__init__.py', 'r') as f:
version = str(
ast.literal_eval(
re.search(
r'__version__\s+=\s+(.*)',
f.read()).group(1)))
setup(
name='aws-shell',
version=version,
description='AWS Shell',
long_description=open('README.rst').read(),
author='<NAME>',
url='https://github.com/awslabs/aws-shell',
packages=find_packages(exclude=['tests*']),
include_package_data=True,
package_data={'awsshell': ['data/*/*.json',
'awsshellrc']},
install_requires=requires,
entry_points={
'console_scripts': [
'aws-shell = awsshell:main',
'aws-shell-mkindex = awsshell.makeindex:main',
]
},
license="Apache License 2.0",
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
),
)
|
ru
| 0.26433
|
#!/usr/bin/env python
| 1.42295
| 1
|
azure-mgmt-batchai/azure/mgmt/batchai/models/node_state_counts_py3.py
|
JonathanGailliez/azure-sdk-for-python
| 1
|
6626124
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NodeStateCounts(Model):
"""Counts of various compute node states on the cluster.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar idle_node_count: Number of compute nodes in idle state.
:vartype idle_node_count: int
:ivar running_node_count: Number of compute nodes which are running jobs.
:vartype running_node_count: int
:ivar preparing_node_count: Number of compute nodes which are being
prepared.
:vartype preparing_node_count: int
:ivar unusable_node_count: Number of compute nodes which are unusable.
:vartype unusable_node_count: int
:ivar leaving_node_count: Number of compute nodes which are leaving the
cluster.
:vartype leaving_node_count: int
"""
_validation = {
'idle_node_count': {'readonly': True},
'running_node_count': {'readonly': True},
'preparing_node_count': {'readonly': True},
'unusable_node_count': {'readonly': True},
'leaving_node_count': {'readonly': True},
}
_attribute_map = {
'idle_node_count': {'key': 'idleNodeCount', 'type': 'int'},
'running_node_count': {'key': 'runningNodeCount', 'type': 'int'},
'preparing_node_count': {'key': 'preparingNodeCount', 'type': 'int'},
'unusable_node_count': {'key': 'unusableNodeCount', 'type': 'int'},
'leaving_node_count': {'key': 'leavingNodeCount', 'type': 'int'},
}
def __init__(self, **kwargs) -> None:
super(NodeStateCounts, self).__init__(**kwargs)
self.idle_node_count = None
self.running_node_count = None
self.preparing_node_count = None
self.unusable_node_count = None
self.leaving_node_count = None
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NodeStateCounts(Model):
"""Counts of various compute node states on the cluster.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar idle_node_count: Number of compute nodes in idle state.
:vartype idle_node_count: int
:ivar running_node_count: Number of compute nodes which are running jobs.
:vartype running_node_count: int
:ivar preparing_node_count: Number of compute nodes which are being
prepared.
:vartype preparing_node_count: int
:ivar unusable_node_count: Number of compute nodes which are unusable.
:vartype unusable_node_count: int
:ivar leaving_node_count: Number of compute nodes which are leaving the
cluster.
:vartype leaving_node_count: int
"""
_validation = {
'idle_node_count': {'readonly': True},
'running_node_count': {'readonly': True},
'preparing_node_count': {'readonly': True},
'unusable_node_count': {'readonly': True},
'leaving_node_count': {'readonly': True},
}
_attribute_map = {
'idle_node_count': {'key': 'idleNodeCount', 'type': 'int'},
'running_node_count': {'key': 'runningNodeCount', 'type': 'int'},
'preparing_node_count': {'key': 'preparingNodeCount', 'type': 'int'},
'unusable_node_count': {'key': 'unusableNodeCount', 'type': 'int'},
'leaving_node_count': {'key': 'leavingNodeCount', 'type': 'int'},
}
def __init__(self, **kwargs) -> None:
super(NodeStateCounts, self).__init__(**kwargs)
self.idle_node_count = None
self.running_node_count = None
self.preparing_node_count = None
self.unusable_node_count = None
self.leaving_node_count = None
|
en
| 0.706599
|
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- Counts of various compute node states on the cluster. Variables are only populated by the server, and will be ignored when sending a request. :ivar idle_node_count: Number of compute nodes in idle state. :vartype idle_node_count: int :ivar running_node_count: Number of compute nodes which are running jobs. :vartype running_node_count: int :ivar preparing_node_count: Number of compute nodes which are being prepared. :vartype preparing_node_count: int :ivar unusable_node_count: Number of compute nodes which are unusable. :vartype unusable_node_count: int :ivar leaving_node_count: Number of compute nodes which are leaving the cluster. :vartype leaving_node_count: int
| 1.857788
| 2
|
holoviews/plotting/bokeh/tabular.py
|
fedario/holoviews
| 0
|
6626125
|
<filename>holoviews/plotting/bokeh/tabular.py
from __future__ import absolute_import, division, unicode_literals
import param
from bokeh.models import Column
from bokeh.models.widgets import (
DataTable, TableColumn, NumberEditor, NumberFormatter, DateFormatter,
DateEditor, StringFormatter, StringEditor, IntEditor
)
from ...core import Dataset, Dimension
from ...element import ItemTable
from ...streams import Buffer
from ...core.util import dimension_sanitizer, isdatetime
from ..plot import GenericElementPlot
from .plot import BokehPlot
class TablePlot(BokehPlot, GenericElementPlot):
finalize_hooks = param.HookList(default=[], doc="""
Deprecated; use hooks options instead.""")
hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing a plot. The
hook is passed the plot object and the displayed element, and
other plotting handles can be accessed via plot.handles.""")
height = param.Number(default=300)
width = param.Number(default=400)
style_opts = ['row_headers', 'selectable', 'editable',
'sortable', 'fit_columns', 'scroll_to_selection',
'index_position']
_stream_data = True
def __init__(self, element, plot=None, **params):
super(TablePlot, self).__init__(element, **params)
self.handles = {} if plot is None else self.handles['plot']
element_ids = self.hmap.traverse(lambda x: id(x), [Dataset, ItemTable])
self.static = len(set(element_ids)) == 1 and len(self.keys) == len(self.hmap)
self.callbacks = self._construct_callbacks()
self.streaming = [s for s in self.streams if isinstance(s, Buffer)]
self.static_source = False
def get_data(self, element, ranges, style):
return ({dimension_sanitizer(d.name): element.dimension_values(d)
for d in element.dimensions()}, {}, style)
def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
element = self.hmap.last
key = self.keys[-1]
self.current_frame = element
self.current_key = key
style = self.lookup_options(element, 'style')[self.cyclic_index]
data, _, style = self.get_data(element, ranges, style)
if source is None:
source = self._init_datasource(data)
self.handles['source'] = self.handles['cds'] = source
self.handles['selected'] = source.selected
columns = self._get_columns(element, data)
style['reorderable'] = False
table = DataTable(source=source, columns=columns, height=self.height,
width=self.width, **style)
self.handles['table'] = table
self.handles['glyph_renderer'] = table
self._execute_hooks(element)
self.drawn = True
title = self._get_title_div(self.keys[-1], '10pt')
if title:
plot = Column(title, table)
self.handles['title'] = title
else:
plot = table
self.handles['plot'] = plot
for cb in self.callbacks:
cb.initialize()
return plot
def _get_columns(self, element, data):
columns = []
for d in element.dimensions():
col = dimension_sanitizer(d.name)
kind = data[col].dtype.kind
if kind == 'i':
formatter = NumberFormatter()
editor = IntEditor()
elif kind == 'f':
formatter = NumberFormatter(format='0,0.0[00000]')
editor = NumberEditor()
elif isdatetime(data[col]):
dimtype = element.get_dimension_type(col)
dformat = Dimension.type_formatters.get(dimtype, '%Y-%m-%d %H:%M:%S')
formatter = DateFormatter(format=dformat)
editor = DateEditor()
else:
formatter = StringFormatter()
editor = StringEditor()
column = TableColumn(field=dimension_sanitizer(d.name), title=d.pprint_label,
editor=editor, formatter=formatter)
columns.append(column)
return columns
def update_frame(self, key, ranges=None, plot=None):
"""
Updates an existing plot with data corresponding
to the key.
"""
element = self._get_frame(key)
self._get_title_div(key, '12pt')
# Cache frame object id to skip updating data if unchanged
previous_id = self.handles.get('previous_id', None)
current_id = element._plot_id
self.handles['previous_id'] = current_id
self.static_source = (self.dynamic and (current_id == previous_id))
if (element is None or (not self.dynamic and self.static) or
(self.streaming and self.streaming[0].data is self.current_frame.data
and not self.streaming[0]._triggering) or self.static_source):
return
source = self.handles['source']
style = self.lookup_options(element, 'style')[self.cyclic_index]
data, _, style = self.get_data(element, ranges, style)
columns = self._get_columns(element, data)
self.handles['table'].columns = columns
self._update_datasource(source, data)
|
<filename>holoviews/plotting/bokeh/tabular.py
from __future__ import absolute_import, division, unicode_literals
import param
from bokeh.models import Column
from bokeh.models.widgets import (
DataTable, TableColumn, NumberEditor, NumberFormatter, DateFormatter,
DateEditor, StringFormatter, StringEditor, IntEditor
)
from ...core import Dataset, Dimension
from ...element import ItemTable
from ...streams import Buffer
from ...core.util import dimension_sanitizer, isdatetime
from ..plot import GenericElementPlot
from .plot import BokehPlot
class TablePlot(BokehPlot, GenericElementPlot):
finalize_hooks = param.HookList(default=[], doc="""
Deprecated; use hooks options instead.""")
hooks = param.HookList(default=[], doc="""
Optional list of hooks called when finalizing a plot. The
hook is passed the plot object and the displayed element, and
other plotting handles can be accessed via plot.handles.""")
height = param.Number(default=300)
width = param.Number(default=400)
style_opts = ['row_headers', 'selectable', 'editable',
'sortable', 'fit_columns', 'scroll_to_selection',
'index_position']
_stream_data = True
def __init__(self, element, plot=None, **params):
super(TablePlot, self).__init__(element, **params)
self.handles = {} if plot is None else self.handles['plot']
element_ids = self.hmap.traverse(lambda x: id(x), [Dataset, ItemTable])
self.static = len(set(element_ids)) == 1 and len(self.keys) == len(self.hmap)
self.callbacks = self._construct_callbacks()
self.streaming = [s for s in self.streams if isinstance(s, Buffer)]
self.static_source = False
def get_data(self, element, ranges, style):
return ({dimension_sanitizer(d.name): element.dimension_values(d)
for d in element.dimensions()}, {}, style)
def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
element = self.hmap.last
key = self.keys[-1]
self.current_frame = element
self.current_key = key
style = self.lookup_options(element, 'style')[self.cyclic_index]
data, _, style = self.get_data(element, ranges, style)
if source is None:
source = self._init_datasource(data)
self.handles['source'] = self.handles['cds'] = source
self.handles['selected'] = source.selected
columns = self._get_columns(element, data)
style['reorderable'] = False
table = DataTable(source=source, columns=columns, height=self.height,
width=self.width, **style)
self.handles['table'] = table
self.handles['glyph_renderer'] = table
self._execute_hooks(element)
self.drawn = True
title = self._get_title_div(self.keys[-1], '10pt')
if title:
plot = Column(title, table)
self.handles['title'] = title
else:
plot = table
self.handles['plot'] = plot
for cb in self.callbacks:
cb.initialize()
return plot
def _get_columns(self, element, data):
columns = []
for d in element.dimensions():
col = dimension_sanitizer(d.name)
kind = data[col].dtype.kind
if kind == 'i':
formatter = NumberFormatter()
editor = IntEditor()
elif kind == 'f':
formatter = NumberFormatter(format='0,0.0[00000]')
editor = NumberEditor()
elif isdatetime(data[col]):
dimtype = element.get_dimension_type(col)
dformat = Dimension.type_formatters.get(dimtype, '%Y-%m-%d %H:%M:%S')
formatter = DateFormatter(format=dformat)
editor = DateEditor()
else:
formatter = StringFormatter()
editor = StringEditor()
column = TableColumn(field=dimension_sanitizer(d.name), title=d.pprint_label,
editor=editor, formatter=formatter)
columns.append(column)
return columns
def update_frame(self, key, ranges=None, plot=None):
"""
Updates an existing plot with data corresponding
to the key.
"""
element = self._get_frame(key)
self._get_title_div(key, '12pt')
# Cache frame object id to skip updating data if unchanged
previous_id = self.handles.get('previous_id', None)
current_id = element._plot_id
self.handles['previous_id'] = current_id
self.static_source = (self.dynamic and (current_id == previous_id))
if (element is None or (not self.dynamic and self.static) or
(self.streaming and self.streaming[0].data is self.current_frame.data
and not self.streaming[0]._triggering) or self.static_source):
return
source = self.handles['source']
style = self.lookup_options(element, 'style')[self.cyclic_index]
data, _, style = self.get_data(element, ranges, style)
columns = self._get_columns(element, data)
self.handles['table'].columns = columns
self._update_datasource(source, data)
|
en
| 0.69499
|
Deprecated; use hooks options instead. Optional list of hooks called when finalizing a plot. The hook is passed the plot object and the displayed element, and other plotting handles can be accessed via plot.handles. Initializes a new plot object with the last available frame. # Get element key and ranges for frame Updates an existing plot with data corresponding to the key. # Cache frame object id to skip updating data if unchanged
| 2.6094
| 3
|
surveys/models/surveys.py
|
jerivas/mezzanine-surveys
| 0
|
6626126
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import uuid
from builtins import range
from django.db import models
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator, MaxValueValidator
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.models import RichText, TimeStamped
from mezzanine.pages.models import Page
from ..managers import SurveyPurchaseQuerySet
class SurveyPage(Page, RichText):
"""
Survey that's available for purchase.
"""
instructions = RichTextField(_("Instructions"))
cost = models.DecimalField(_("Cost"), max_digits=7, decimal_places=2, default=0)
purchase_response = RichTextField(_("Purchase response"))
completed_message = RichTextField(
_("Completed message"),
help_text=_("Message shown to users after completing the survey"))
max_rating = models.PositiveSmallIntegerField(
_("Maximum rating"), default=5,
validators=[MinValueValidator(2), MaxValueValidator(10)],
help_text=_("For rating questions. Must be a number between 2 and 10"))
report_explanation = RichTextField(
_("Explanation"),
help_text=_("Helping content shown before the results' detail"))
def get_questions(self):
"""
Collect all questions related to this survey.
"""
from .questions import Question
return Question.objects.filter(subcategory__category__survey=self)
def get_rating_choices(self):
return range(1, self.max_rating + 1)
def get_requires_payment(self):
return self.cost > 0
class Meta:
verbose_name = _("survey page")
verbose_name_plural = _("survey pages")
@python_2_unicode_compatible
class SurveyPurchaseCode(models.Model):
"""
Code to gain access to a Survey without paying.
"""
survey = models.ForeignKey(SurveyPage, related_name="purchase_codes")
code = models.CharField(
_("Code"), max_length=20, blank=True,
help_text=_("If left blank it will be automatically generated"))
uses_remaining = models.PositiveIntegerField(_("Remaining uses"), default=0)
class Meta:
verbose_name = _("purchase code")
verbose_name_plural = _("purchase codes")
unique_together = ("survey", "code")
def __str__(self):
return self.code
def save(self, *args, **kwargs):
"""
Generate a UUID if the code hasn't been defined
"""
if not self.code:
self.code = str(uuid.uuid4()).strip("-")[4:23]
super(SurveyPurchaseCode, self).save(*args, **kwargs)
@python_2_unicode_compatible
class SurveyPurchase(TimeStamped):
"""
A record of a user purchasing a Survey.
"""
survey = models.ForeignKey(SurveyPage, on_delete=models.CASCADE, related_name="purchases")
purchaser = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="survey_purchases")
public_id = models.UUIDField(db_index=True, default=uuid.uuid4, editable=False)
transaction_id = models.CharField(_("Transaction ID"), max_length=200, blank=True)
payment_method = models.CharField(_("Payment method"), max_length=100, blank=True)
amount = models.DecimalField(
_("Amount"), max_digits=8, decimal_places=2, blank=True, null=True)
notes = models.TextField(_("Notes"), blank=True)
report_generated = models.DateTimeField(_("Report generated"), blank=True, null=True)
report_cache = models.TextField(_("Report (cached)"), default="[]")
objects = SurveyPurchaseQuerySet.as_manager()
class Meta:
verbose_name = _("purchase")
verbose_name_plural = _("purchases")
def __str__(self):
return str(self.survey)
def get_absolute_url(self):
return reverse("surveys:purchase_detail", args=[self.public_id])
def get_response_create_url(self):
return reverse("surveys:response_create", args=[self.public_id])
def get_complete_url(self):
return reverse("surveys:response_complete", args=[self.public_id])
def get_report_url(self):
return reverse("surveys:purchase_report", args=[self.public_id])
def generate_report(self):
"""
Generate a report of all responses related to this purchase.
A cached copy will be stored in self.report_cache.
The report includes nested data in the shape of Category / Subcategory / Question.
"""
from .questions import Question, QuestionResponse
rating_responses = QuestionResponse.objects.filter(
response__purchase=self, question__field_type=Question.RATING_FIELD)
text_questions = []
for question in self.survey.get_questions().filter(field_type=Question.TEXT_FIELD):
responses = question.responses.filter(response__purchase=self)
text_questions.append({
"id": question.pk,
"prompt": question.prompt,
"responses": list(responses.values_list("text_response", flat=True)),
})
report = {
"rating": {
"count": rating_responses.count(),
"average": rating_responses.get_average(),
"frequencies": rating_responses.get_frequencies(self.survey.get_rating_choices()),
},
"categories": self.survey.categories.get_rating_data(purchase=self),
"text_questions": text_questions,
}
self.report_cache = json.dumps(report)
self.report_generated = now()
self.save()
return report
def get_report_as_json(self):
"""
Load the cached report as JSON.
"""
return json.loads(self.report_cache)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import uuid
from builtins import range
from django.db import models
from django.core.urlresolvers import reverse
from django.core.validators import MinValueValidator, MaxValueValidator
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.models import RichText, TimeStamped
from mezzanine.pages.models import Page
from ..managers import SurveyPurchaseQuerySet
class SurveyPage(Page, RichText):
"""
Survey that's available for purchase.
"""
instructions = RichTextField(_("Instructions"))
cost = models.DecimalField(_("Cost"), max_digits=7, decimal_places=2, default=0)
purchase_response = RichTextField(_("Purchase response"))
completed_message = RichTextField(
_("Completed message"),
help_text=_("Message shown to users after completing the survey"))
max_rating = models.PositiveSmallIntegerField(
_("Maximum rating"), default=5,
validators=[MinValueValidator(2), MaxValueValidator(10)],
help_text=_("For rating questions. Must be a number between 2 and 10"))
report_explanation = RichTextField(
_("Explanation"),
help_text=_("Helping content shown before the results' detail"))
def get_questions(self):
"""
Collect all questions related to this survey.
"""
from .questions import Question
return Question.objects.filter(subcategory__category__survey=self)
def get_rating_choices(self):
return range(1, self.max_rating + 1)
def get_requires_payment(self):
return self.cost > 0
class Meta:
verbose_name = _("survey page")
verbose_name_plural = _("survey pages")
@python_2_unicode_compatible
class SurveyPurchaseCode(models.Model):
"""
Code to gain access to a Survey without paying.
"""
survey = models.ForeignKey(SurveyPage, related_name="purchase_codes")
code = models.CharField(
_("Code"), max_length=20, blank=True,
help_text=_("If left blank it will be automatically generated"))
uses_remaining = models.PositiveIntegerField(_("Remaining uses"), default=0)
class Meta:
verbose_name = _("purchase code")
verbose_name_plural = _("purchase codes")
unique_together = ("survey", "code")
def __str__(self):
return self.code
def save(self, *args, **kwargs):
"""
Generate a UUID if the code hasn't been defined
"""
if not self.code:
self.code = str(uuid.uuid4()).strip("-")[4:23]
super(SurveyPurchaseCode, self).save(*args, **kwargs)
@python_2_unicode_compatible
class SurveyPurchase(TimeStamped):
"""
A record of a user purchasing a Survey.
"""
survey = models.ForeignKey(SurveyPage, on_delete=models.CASCADE, related_name="purchases")
purchaser = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="survey_purchases")
public_id = models.UUIDField(db_index=True, default=uuid.uuid4, editable=False)
transaction_id = models.CharField(_("Transaction ID"), max_length=200, blank=True)
payment_method = models.CharField(_("Payment method"), max_length=100, blank=True)
amount = models.DecimalField(
_("Amount"), max_digits=8, decimal_places=2, blank=True, null=True)
notes = models.TextField(_("Notes"), blank=True)
report_generated = models.DateTimeField(_("Report generated"), blank=True, null=True)
report_cache = models.TextField(_("Report (cached)"), default="[]")
objects = SurveyPurchaseQuerySet.as_manager()
class Meta:
verbose_name = _("purchase")
verbose_name_plural = _("purchases")
def __str__(self):
return str(self.survey)
def get_absolute_url(self):
return reverse("surveys:purchase_detail", args=[self.public_id])
def get_response_create_url(self):
return reverse("surveys:response_create", args=[self.public_id])
def get_complete_url(self):
return reverse("surveys:response_complete", args=[self.public_id])
def get_report_url(self):
return reverse("surveys:purchase_report", args=[self.public_id])
def generate_report(self):
"""
Generate a report of all responses related to this purchase.
A cached copy will be stored in self.report_cache.
The report includes nested data in the shape of Category / Subcategory / Question.
"""
from .questions import Question, QuestionResponse
rating_responses = QuestionResponse.objects.filter(
response__purchase=self, question__field_type=Question.RATING_FIELD)
text_questions = []
for question in self.survey.get_questions().filter(field_type=Question.TEXT_FIELD):
responses = question.responses.filter(response__purchase=self)
text_questions.append({
"id": question.pk,
"prompt": question.prompt,
"responses": list(responses.values_list("text_response", flat=True)),
})
report = {
"rating": {
"count": rating_responses.count(),
"average": rating_responses.get_average(),
"frequencies": rating_responses.get_frequencies(self.survey.get_rating_choices()),
},
"categories": self.survey.categories.get_rating_data(purchase=self),
"text_questions": text_questions,
}
self.report_cache = json.dumps(report)
self.report_generated = now()
self.save()
return report
def get_report_as_json(self):
"""
Load the cached report as JSON.
"""
return json.loads(self.report_cache)
|
en
| 0.895917
|
# -*- coding: utf-8 -*- Survey that's available for purchase. Collect all questions related to this survey. Code to gain access to a Survey without paying. Generate a UUID if the code hasn't been defined A record of a user purchasing a Survey. Generate a report of all responses related to this purchase. A cached copy will be stored in self.report_cache. The report includes nested data in the shape of Category / Subcategory / Question. Load the cached report as JSON.
| 2.249979
| 2
|
reports/models.py
|
termtegrity/django-libreport
| 0
|
6626127
|
<filename>reports/models.py<gh_stars>0
import json
import logging
from copy import deepcopy
from datetime import datetime, time, timedelta
from importlib import import_module
from pkgutil import walk_packages
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.db.models import Q
from django.dispatch import Signal
from django.utils import timezone
from django_celery_beat.models import PeriodicTask, CrontabSchedule
from django.contrib.postgres.fields import JSONField
from .base import BaseReport
from .conf import ORG_MODEL, REPORT_PACKAGES, TYPE_CHOICES
from .utils import hashed_upload_to
logger = logging.getLogger(__name__)
report_generated = Signal(providing_args=["report"])
REPORTS = {}
# Dynamically load reports
for pkg in REPORT_PACKAGES:
path = import_module(pkg).__path__
for loader, name, ispkg in walk_packages(path):
mod = import_module(".".join([pkg, name]))
for (name, cls) in mod.__dict__.items():
if isinstance(cls, type) and issubclass(cls, BaseReport) \
and cls != BaseReport:
report_id = cls.id.strip()
if report_id in [""]:
continue
if report_id in REPORTS.keys():
msg = "Report with id \"{0}\" already registered." \
.format(report_id)
logger.error(msg)
continue
REPORTS[report_id] = cls
def report_upload_to(instance, filename):
return hashed_upload_to('reports', instance.document, filename)
class BaseReportModel(models.Model):
"""
Abstract Base Report Model for Report and ReportSchedule fields.
Contains common columns.
"""
REPORT_CHOICES = [(r.id, r.name) for r in REPORTS.values()]
name = models.CharField(max_length=64, blank=True)
report = models.CharField(max_length=64, choices=REPORT_CHOICES)
typ = models.CharField(max_length=32, choices=TYPE_CHOICES)
organization = models.ForeignKey(ORG_MODEL, on_delete=models.CASCADE)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
on_delete=models.SET_NULL
)
created_at = models.DateTimeField(default=timezone.now, editable=False)
config = JSONField(blank=True, default=dict)
emails = ArrayField(models.EmailField(max_length=255), blank=True,
null=True)
class Meta:
abstract = True
class ReportManager(models.Manager):
def failed(self):
qs = self.get_queryset()
return qs.filter(Q(document='') | Q(document=None))
class Report(BaseReportModel):
start_datetime = models.DateTimeField()
end_datetime = models.DateTimeField()
document = models.FileField(upload_to=report_upload_to, blank=True,
null=True, max_length=1024)
class Meta(object):
verbose_name = "Report"
verbose_name_plural = "Reports"
objects = ReportManager()
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return u'{0} ({1})'.format(self.name, self.get_report_display())
@property
def generated(self):
"""
Indicates that the document is generated
"""
return bool(self.document)
def save(self, *args, **kwargs):
if not self.name:
self.name = self._run_instance_method('get_report_name')
super(Report, self).save(*args, **kwargs)
def schedule_document_generation(self):
"""
Schedules a task to generate the document
"""
from .tasks import generate_document
if not self.generated:
kwargs = {'report_id': self.pk}
generate_document.apply_async(kwargs=kwargs, countdown=10)
def generate_document(self):
"""
Generate and save the document
"""
content = self._run_instance_method('generate')
name = self._run_instance_method('get_report_filename')
# Setting save to false to avoid hashed_upload_to raising an exception
# because of document not having an attached file.
self.document.save(name, content, save=False)
self.save()
report_generated.send(sender=self.__class__, report=self)
def _run_instance_method(self, method):
kwargs = deepcopy(self.config)
if not isinstance(kwargs, dict):
kwargs = json.loads(kwargs)
kwargs.update({
'typ': self.typ,
'start_datetime': self.start_datetime,
'end_datetime': self.end_datetime,
'created_by': self.created_by,
'created_at': self.created_at,
'organization': self.organization,
'config': self.config,
})
instance = REPORTS[self.report]()
return getattr(instance, method)(**kwargs)
class ReportSchedule(BaseReportModel):
PERIOD_DAILY = 'daily'
PERIOD_WEEKLY = 'weekly'
PERIOD_MONTHLY = 'monthly'
PERIOD_QUARTERLY = 'quarterly'
PERIOD_YEARLY = 'yearly'
PERIOD_CHOICES = (
(PERIOD_DAILY, PERIOD_DAILY.title()),
(PERIOD_WEEKLY, PERIOD_WEEKLY.title()),
(PERIOD_MONTHLY, PERIOD_MONTHLY.title()),
(PERIOD_QUARTERLY, PERIOD_QUARTERLY.title()),
(PERIOD_YEARLY, PERIOD_YEARLY.title()),
)
periodic_task = models.ForeignKey(
PeriodicTask, null=True, blank=True, on_delete=models.SET_NULL
)
schedule = JSONField(blank=True, default=dict)
period = models.CharField(max_length=32, choices=PERIOD_CHOICES,
default=PERIOD_WEEKLY)
report_datetime = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
if self.name:
return '{} ({})'.format(self.name, self.organization.name)
return '{}-{} ({})'.format(self.report, self.pk, self.organization.name)
def __str__(self):
return self.__unicode__()
def delete(self, *args, **kwargs):
# Clean up after ourselves when deleting a report
self.periodic_task.delete()
super(ReportSchedule, self).delete(*args, **kwargs)
@classmethod
def available_periods(cls):
"""
Simple helper for getting available periods
"""
return map(lambda x: x[0], cls.PERIOD_CHOICES)
def set_periodic_task(self):
"""
Removes existing periodic_task if exists and sets the new one.
Creates the corresponding CrontabSchedule as well if needed.
"""
if self.periodic_task:
self.periodic_task.delete()
schedule, __ = CrontabSchedule.objects.get_or_create(**self.schedule)
kwargs = json.dumps({'report_schedule_id': self.pk})
task = 'reports.tasks.schedule_task'
data = {
'name': '{}_{}'.format(task, self.pk),
'task': task,
'enabled': True,
'crontab': schedule,
'kwargs': kwargs
}
self.periodic_task, __ = PeriodicTask.objects.get_or_create(**data)
self.save()
def datetimes_by_period(self):
"""
Constructs start_datetime and end_datetime based on a self.period
:return: start_datetime, end_datetime
"""
if self.report_datetime:
end_time = self.report_datetime.time()
end_datetime = datetime.combine(timezone.now().date(), end_time)
if self.period == self.PERIOD_DAILY:
# Yesterday
start_datetime = end_datetime - timedelta(days=1)
elif self.period == self.PERIOD_WEEKLY:
# Last week starting from monday
start_datetime = end_datetime - timedelta(days=7)
elif self.period == self.PERIOD_MONTHLY:
# Last Months start and end date
start_datetime = end_datetime - relativedelta(months=1)
elif self.period == self.PERIOD_QUARTERLY:
# Getting start and end date of the last quarter
start_datetime = end_datetime - relativedelta(months=3)
elif self.period == self.PERIOD_YEARLY:
# Last year's start and end date
start_datetime = end_datetime - relativedelta(years=1)
else:
return None, None
start_datetime = start_datetime + timedelta(seconds=1)
return start_datetime, end_datetime
today = datetime.combine(timezone.now().date(), time(0, 0, 0))
if self.period == self.PERIOD_DAILY:
# Yesterday
start_datetime = today - timedelta(days=1)
end_datetime = datetime.combine(start_datetime.date(),
time(23, 59, 59))
elif self.period == self.PERIOD_WEEKLY:
# Last week starting from monday
start_datetime = today - timedelta(days=7 + today.weekday())
end_datetime = datetime.combine(
(start_datetime + timedelta(days=6)).date(),
time(23, 59, 59))
elif self.period == self.PERIOD_MONTHLY:
# Last Months start and end date
current_month_start = today.replace(day=1)
start_datetime = current_month_start - relativedelta(months=1)
end_datetime = datetime.combine(
(current_month_start - timedelta(days=1)).date(),
time(23, 59, 59)
)
elif self.period == self.PERIOD_QUARTERLY:
# Last quarter's start and end date
year = today.year
last_quarter = (today.month - 1) / 3
if last_quarter == 0:
# in this case it should be last year's Q4
last_quarter = 4
year -= 1
# Getting start and end date of the last quarter
start_date = datetime(year, 3 * last_quarter - 2, 1)
end_date = datetime(year, 3 * last_quarter, 1) + \
relativedelta(months=1) - timedelta(days=1)
start_datetime = datetime.combine(start_date.date(), time(0, 0, 0))
end_datetime = datetime.combine(end_date.date(), time(23, 59, 59))
elif self.period == self.PERIOD_YEARLY:
# Last year's start and end date
last_year = today.year - 1
start_datetime = datetime(last_year, 1, 1, 0, 0, 0)
end_datetime = datetime(last_year, 12, 31, 23, 59, 59)
else:
return None, None
return start_datetime, end_datetime
def set_schedule(self):
"""
Constructs crontab format schedule based on a period and stores
it on schedule field
"""
if self.report_datetime:
minute = str(self.report_datetime.minute)
hour = str(self.report_datetime.hour)
# Celery cron is Sunday=0, Saturday=6
# isoweekday() is Sunday=1, Saturday=7
day_of_week = str(self.report_datetime.isoweekday() - 1)
day_of_month = str(self.report_datetime.day)
month_of_year = str(self.report_datetime.month)
else:
minute = '0'
hour = '6'
day_of_week = '1'
day_of_month = '1'
month_of_year = '1'
self.schedule = {
'minute': minute,
'hour': hour
}
if self.period == self.PERIOD_DAILY:
# Runs every day at 6am
self.schedule.update({
'day_of_week': '*',
'day_of_month': '*',
'month_of_year': '*'
})
elif self.period == self.PERIOD_WEEKLY:
# Runs every Monday at 6am
self.schedule.update({
'day_of_week': day_of_week,
'day_of_month': '*',
'month_of_year': '*'
})
elif self.period == self.PERIOD_MONTHLY:
# Runs every 1st day of a Month at 6am
self.schedule.update({
'day_of_week': '*',
'day_of_month': day_of_month,
'month_of_year': '*'
})
elif self.period == self.PERIOD_QUARTERLY:
# Runs every 1st day of a quarter at 6am
month_of_year = month_of_year % 3
if not month_of_year:
month_of_year = '*'
self.schedule.update({
'day_of_week': '*',
'day_of_month': day_of_month,
'month_of_year': '{}/3'.format(month_of_year)
})
elif self.period == self.PERIOD_YEARLY:
# Runs every 1st day of a year at 6am
self.schedule.update({
'day_of_week': '*',
'day_of_month': day_of_month,
'month_of_year': month_of_year
})
self.save()
def schedule_report(self):
"""
Creates `Report` instance and schedules it.
"""
start_datetime, end_datetime = self.datetimes_by_period()
data = {
'report': self.report,
'typ': self.typ,
'organization': self.organization,
'created_by': self.created_by,
'start_datetime': start_datetime,
'end_datetime': end_datetime,
'config': self.config,
'emails': self.emails,
}
if self.name:
data['name'] = self.name
report = Report.objects.create(**data)
report.schedule_document_generation()
|
<filename>reports/models.py<gh_stars>0
import json
import logging
from copy import deepcopy
from datetime import datetime, time, timedelta
from importlib import import_module
from pkgutil import walk_packages
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
from django.db import models
from django.db.models import Q
from django.dispatch import Signal
from django.utils import timezone
from django_celery_beat.models import PeriodicTask, CrontabSchedule
from django.contrib.postgres.fields import JSONField
from .base import BaseReport
from .conf import ORG_MODEL, REPORT_PACKAGES, TYPE_CHOICES
from .utils import hashed_upload_to
logger = logging.getLogger(__name__)
report_generated = Signal(providing_args=["report"])
REPORTS = {}
# Dynamically load reports
for pkg in REPORT_PACKAGES:
path = import_module(pkg).__path__
for loader, name, ispkg in walk_packages(path):
mod = import_module(".".join([pkg, name]))
for (name, cls) in mod.__dict__.items():
if isinstance(cls, type) and issubclass(cls, BaseReport) \
and cls != BaseReport:
report_id = cls.id.strip()
if report_id in [""]:
continue
if report_id in REPORTS.keys():
msg = "Report with id \"{0}\" already registered." \
.format(report_id)
logger.error(msg)
continue
REPORTS[report_id] = cls
def report_upload_to(instance, filename):
return hashed_upload_to('reports', instance.document, filename)
class BaseReportModel(models.Model):
"""
Abstract Base Report Model for Report and ReportSchedule fields.
Contains common columns.
"""
REPORT_CHOICES = [(r.id, r.name) for r in REPORTS.values()]
name = models.CharField(max_length=64, blank=True)
report = models.CharField(max_length=64, choices=REPORT_CHOICES)
typ = models.CharField(max_length=32, choices=TYPE_CHOICES)
organization = models.ForeignKey(ORG_MODEL, on_delete=models.CASCADE)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
on_delete=models.SET_NULL
)
created_at = models.DateTimeField(default=timezone.now, editable=False)
config = JSONField(blank=True, default=dict)
emails = ArrayField(models.EmailField(max_length=255), blank=True,
null=True)
class Meta:
abstract = True
class ReportManager(models.Manager):
def failed(self):
qs = self.get_queryset()
return qs.filter(Q(document='') | Q(document=None))
class Report(BaseReportModel):
start_datetime = models.DateTimeField()
end_datetime = models.DateTimeField()
document = models.FileField(upload_to=report_upload_to, blank=True,
null=True, max_length=1024)
class Meta(object):
verbose_name = "Report"
verbose_name_plural = "Reports"
objects = ReportManager()
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return u'{0} ({1})'.format(self.name, self.get_report_display())
@property
def generated(self):
"""
Indicates that the document is generated
"""
return bool(self.document)
def save(self, *args, **kwargs):
if not self.name:
self.name = self._run_instance_method('get_report_name')
super(Report, self).save(*args, **kwargs)
def schedule_document_generation(self):
"""
Schedules a task to generate the document
"""
from .tasks import generate_document
if not self.generated:
kwargs = {'report_id': self.pk}
generate_document.apply_async(kwargs=kwargs, countdown=10)
def generate_document(self):
"""
Generate and save the document
"""
content = self._run_instance_method('generate')
name = self._run_instance_method('get_report_filename')
# Setting save to false to avoid hashed_upload_to raising an exception
# because of document not having an attached file.
self.document.save(name, content, save=False)
self.save()
report_generated.send(sender=self.__class__, report=self)
def _run_instance_method(self, method):
kwargs = deepcopy(self.config)
if not isinstance(kwargs, dict):
kwargs = json.loads(kwargs)
kwargs.update({
'typ': self.typ,
'start_datetime': self.start_datetime,
'end_datetime': self.end_datetime,
'created_by': self.created_by,
'created_at': self.created_at,
'organization': self.organization,
'config': self.config,
})
instance = REPORTS[self.report]()
return getattr(instance, method)(**kwargs)
class ReportSchedule(BaseReportModel):
PERIOD_DAILY = 'daily'
PERIOD_WEEKLY = 'weekly'
PERIOD_MONTHLY = 'monthly'
PERIOD_QUARTERLY = 'quarterly'
PERIOD_YEARLY = 'yearly'
PERIOD_CHOICES = (
(PERIOD_DAILY, PERIOD_DAILY.title()),
(PERIOD_WEEKLY, PERIOD_WEEKLY.title()),
(PERIOD_MONTHLY, PERIOD_MONTHLY.title()),
(PERIOD_QUARTERLY, PERIOD_QUARTERLY.title()),
(PERIOD_YEARLY, PERIOD_YEARLY.title()),
)
periodic_task = models.ForeignKey(
PeriodicTask, null=True, blank=True, on_delete=models.SET_NULL
)
schedule = JSONField(blank=True, default=dict)
period = models.CharField(max_length=32, choices=PERIOD_CHOICES,
default=PERIOD_WEEKLY)
report_datetime = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
if self.name:
return '{} ({})'.format(self.name, self.organization.name)
return '{}-{} ({})'.format(self.report, self.pk, self.organization.name)
def __str__(self):
return self.__unicode__()
def delete(self, *args, **kwargs):
# Clean up after ourselves when deleting a report
self.periodic_task.delete()
super(ReportSchedule, self).delete(*args, **kwargs)
@classmethod
def available_periods(cls):
"""
Simple helper for getting available periods
"""
return map(lambda x: x[0], cls.PERIOD_CHOICES)
def set_periodic_task(self):
"""
Removes existing periodic_task if exists and sets the new one.
Creates the corresponding CrontabSchedule as well if needed.
"""
if self.periodic_task:
self.periodic_task.delete()
schedule, __ = CrontabSchedule.objects.get_or_create(**self.schedule)
kwargs = json.dumps({'report_schedule_id': self.pk})
task = 'reports.tasks.schedule_task'
data = {
'name': '{}_{}'.format(task, self.pk),
'task': task,
'enabled': True,
'crontab': schedule,
'kwargs': kwargs
}
self.periodic_task, __ = PeriodicTask.objects.get_or_create(**data)
self.save()
def datetimes_by_period(self):
"""
Constructs start_datetime and end_datetime based on a self.period
:return: start_datetime, end_datetime
"""
if self.report_datetime:
end_time = self.report_datetime.time()
end_datetime = datetime.combine(timezone.now().date(), end_time)
if self.period == self.PERIOD_DAILY:
# Yesterday
start_datetime = end_datetime - timedelta(days=1)
elif self.period == self.PERIOD_WEEKLY:
# Last week starting from monday
start_datetime = end_datetime - timedelta(days=7)
elif self.period == self.PERIOD_MONTHLY:
# Last Months start and end date
start_datetime = end_datetime - relativedelta(months=1)
elif self.period == self.PERIOD_QUARTERLY:
# Getting start and end date of the last quarter
start_datetime = end_datetime - relativedelta(months=3)
elif self.period == self.PERIOD_YEARLY:
# Last year's start and end date
start_datetime = end_datetime - relativedelta(years=1)
else:
return None, None
start_datetime = start_datetime + timedelta(seconds=1)
return start_datetime, end_datetime
today = datetime.combine(timezone.now().date(), time(0, 0, 0))
if self.period == self.PERIOD_DAILY:
# Yesterday
start_datetime = today - timedelta(days=1)
end_datetime = datetime.combine(start_datetime.date(),
time(23, 59, 59))
elif self.period == self.PERIOD_WEEKLY:
# Last week starting from monday
start_datetime = today - timedelta(days=7 + today.weekday())
end_datetime = datetime.combine(
(start_datetime + timedelta(days=6)).date(),
time(23, 59, 59))
elif self.period == self.PERIOD_MONTHLY:
# Last Months start and end date
current_month_start = today.replace(day=1)
start_datetime = current_month_start - relativedelta(months=1)
end_datetime = datetime.combine(
(current_month_start - timedelta(days=1)).date(),
time(23, 59, 59)
)
elif self.period == self.PERIOD_QUARTERLY:
# Last quarter's start and end date
year = today.year
last_quarter = (today.month - 1) / 3
if last_quarter == 0:
# in this case it should be last year's Q4
last_quarter = 4
year -= 1
# Getting start and end date of the last quarter
start_date = datetime(year, 3 * last_quarter - 2, 1)
end_date = datetime(year, 3 * last_quarter, 1) + \
relativedelta(months=1) - timedelta(days=1)
start_datetime = datetime.combine(start_date.date(), time(0, 0, 0))
end_datetime = datetime.combine(end_date.date(), time(23, 59, 59))
elif self.period == self.PERIOD_YEARLY:
# Last year's start and end date
last_year = today.year - 1
start_datetime = datetime(last_year, 1, 1, 0, 0, 0)
end_datetime = datetime(last_year, 12, 31, 23, 59, 59)
else:
return None, None
return start_datetime, end_datetime
def set_schedule(self):
"""
Constructs crontab format schedule based on a period and stores
it on schedule field
"""
if self.report_datetime:
minute = str(self.report_datetime.minute)
hour = str(self.report_datetime.hour)
# Celery cron is Sunday=0, Saturday=6
# isoweekday() is Sunday=1, Saturday=7
day_of_week = str(self.report_datetime.isoweekday() - 1)
day_of_month = str(self.report_datetime.day)
month_of_year = str(self.report_datetime.month)
else:
minute = '0'
hour = '6'
day_of_week = '1'
day_of_month = '1'
month_of_year = '1'
self.schedule = {
'minute': minute,
'hour': hour
}
if self.period == self.PERIOD_DAILY:
# Runs every day at 6am
self.schedule.update({
'day_of_week': '*',
'day_of_month': '*',
'month_of_year': '*'
})
elif self.period == self.PERIOD_WEEKLY:
# Runs every Monday at 6am
self.schedule.update({
'day_of_week': day_of_week,
'day_of_month': '*',
'month_of_year': '*'
})
elif self.period == self.PERIOD_MONTHLY:
# Runs every 1st day of a Month at 6am
self.schedule.update({
'day_of_week': '*',
'day_of_month': day_of_month,
'month_of_year': '*'
})
elif self.period == self.PERIOD_QUARTERLY:
# Runs every 1st day of a quarter at 6am
month_of_year = month_of_year % 3
if not month_of_year:
month_of_year = '*'
self.schedule.update({
'day_of_week': '*',
'day_of_month': day_of_month,
'month_of_year': '{}/3'.format(month_of_year)
})
elif self.period == self.PERIOD_YEARLY:
# Runs every 1st day of a year at 6am
self.schedule.update({
'day_of_week': '*',
'day_of_month': day_of_month,
'month_of_year': month_of_year
})
self.save()
def schedule_report(self):
"""
Creates `Report` instance and schedules it.
"""
start_datetime, end_datetime = self.datetimes_by_period()
data = {
'report': self.report,
'typ': self.typ,
'organization': self.organization,
'created_by': self.created_by,
'start_datetime': start_datetime,
'end_datetime': end_datetime,
'config': self.config,
'emails': self.emails,
}
if self.name:
data['name'] = self.name
report = Report.objects.create(**data)
report.schedule_document_generation()
|
en
| 0.858095
|
# Dynamically load reports Abstract Base Report Model for Report and ReportSchedule fields. Contains common columns. Indicates that the document is generated Schedules a task to generate the document Generate and save the document # Setting save to false to avoid hashed_upload_to raising an exception # because of document not having an attached file. # Clean up after ourselves when deleting a report Simple helper for getting available periods Removes existing periodic_task if exists and sets the new one. Creates the corresponding CrontabSchedule as well if needed. Constructs start_datetime and end_datetime based on a self.period :return: start_datetime, end_datetime # Yesterday # Last week starting from monday # Last Months start and end date # Getting start and end date of the last quarter # Last year's start and end date # Yesterday # Last week starting from monday # Last Months start and end date # Last quarter's start and end date # in this case it should be last year's Q4 # Getting start and end date of the last quarter # Last year's start and end date Constructs crontab format schedule based on a period and stores it on schedule field # Celery cron is Sunday=0, Saturday=6 # isoweekday() is Sunday=1, Saturday=7 # Runs every day at 6am # Runs every Monday at 6am # Runs every 1st day of a Month at 6am # Runs every 1st day of a quarter at 6am # Runs every 1st day of a year at 6am Creates `Report` instance and schedules it.
| 2.104957
| 2
|
pdfappend.py
|
hwarpefelt/pdfappend
| 0
|
6626128
|
<gh_stars>0
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
'''
File name: pdfappend.py
Author: <NAME>
Date created: 2016-08-25
Date last modified: 2016-08-25
Python Version: 2.7.9
'''
from PyPDF2 import PdfFileMerger
import argparse
class PDFAppend:
"""
PDFAppend does the gruntwork. Basically wraps PyPDF2.
"""
def __init__(self, L, o, v=False):
"""
Construct a new 'PDFAppend' object.
:param L: A list of strings describing PDF file names
:param o: A string describing the output file name
:param v: A boolean toggle for verbose mode (optional)
:return: returns nothing
"""
self.pdfs = L
self.outname = o
self.verbose = v
self.merger = PdfFileMerger()
if self.verbose:
print "+++ PDFAppend set up to create " + str(self.outname) + " from :" + str(self.pdfs)
def process(self):
"""
Processes the input files.
:return: True if successful, False if not.
"""
if self.verbose:
print "+++ Processing initiated"
if (len(self.pdfs) > 0 and len(self.outname) > 0):
if self.verbose:
print "+++ Data valid. Continuing..."
for pdf in self.pdfs:
try:
if self.verbose:
print "+++ Appending " + str(pdf)
f = open(pdf, "rb")
self.merger.append(f)
except IOError:
print "!!! I/O Error: Cannot open file " + str(pdf)
return False
if self.verbose:
print "+++ All files appended. Proceeding to write file " + str(self.outname)
try:
fout = open(self.outname, "wb")
self.merger.write(fout)
if self.verbose:
print "+++ File " + str(self.outname) + " written."
except IOError:
print "!!! I/O Error: Cannot write to file " + str(self.outname)
return False
return True
else:
if self.verbose:
print "!!! Failed to create PDF file."
if len(self.pdfs <= 0):
print "!!! No files to add"
if len(self.outname <= 0):
print "!!! Output name not specified"
return False
def set_verbose(self):
"""
Sets the PDFAppend object to verbose mode.
"""
self.verbose = True
def main():
"""
Main class. Processes command line arguments and sets up a PDFAppend object.
"""
parser = argparse.ArgumentParser(description="pdfappend is a utility for appending PDF files.")
parser.add_argument('-o', metavar='output', type=str, default='output.pdf', help='The name of the output file')
parser.add_argument('-v', action='store_const', const=True, help='Produce verbose input')
parser.add_argument('pdf', metavar="pdf", type=str, nargs='+', help='A PDF file to be appended')
args = parser.parse_args()
p = PDFAppend(args.pdf, args.o)
if (args.v):
verbose = True
print "+++ Setting verbose mode"
p.set_verbose()
if not p.process():
print "!!! Failed to append PDF files"
if __name__ == "__main__": main()
|
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
'''
File name: pdfappend.py
Author: <NAME>
Date created: 2016-08-25
Date last modified: 2016-08-25
Python Version: 2.7.9
'''
from PyPDF2 import PdfFileMerger
import argparse
class PDFAppend:
"""
PDFAppend does the gruntwork. Basically wraps PyPDF2.
"""
def __init__(self, L, o, v=False):
"""
Construct a new 'PDFAppend' object.
:param L: A list of strings describing PDF file names
:param o: A string describing the output file name
:param v: A boolean toggle for verbose mode (optional)
:return: returns nothing
"""
self.pdfs = L
self.outname = o
self.verbose = v
self.merger = PdfFileMerger()
if self.verbose:
print "+++ PDFAppend set up to create " + str(self.outname) + " from :" + str(self.pdfs)
def process(self):
"""
Processes the input files.
:return: True if successful, False if not.
"""
if self.verbose:
print "+++ Processing initiated"
if (len(self.pdfs) > 0 and len(self.outname) > 0):
if self.verbose:
print "+++ Data valid. Continuing..."
for pdf in self.pdfs:
try:
if self.verbose:
print "+++ Appending " + str(pdf)
f = open(pdf, "rb")
self.merger.append(f)
except IOError:
print "!!! I/O Error: Cannot open file " + str(pdf)
return False
if self.verbose:
print "+++ All files appended. Proceeding to write file " + str(self.outname)
try:
fout = open(self.outname, "wb")
self.merger.write(fout)
if self.verbose:
print "+++ File " + str(self.outname) + " written."
except IOError:
print "!!! I/O Error: Cannot write to file " + str(self.outname)
return False
return True
else:
if self.verbose:
print "!!! Failed to create PDF file."
if len(self.pdfs <= 0):
print "!!! No files to add"
if len(self.outname <= 0):
print "!!! Output name not specified"
return False
def set_verbose(self):
"""
Sets the PDFAppend object to verbose mode.
"""
self.verbose = True
def main():
"""
Main class. Processes command line arguments and sets up a PDFAppend object.
"""
parser = argparse.ArgumentParser(description="pdfappend is a utility for appending PDF files.")
parser.add_argument('-o', metavar='output', type=str, default='output.pdf', help='The name of the output file')
parser.add_argument('-v', action='store_const', const=True, help='Produce verbose input')
parser.add_argument('pdf', metavar="pdf", type=str, nargs='+', help='A PDF file to be appended')
args = parser.parse_args()
p = PDFAppend(args.pdf, args.o)
if (args.v):
verbose = True
print "+++ Setting verbose mode"
p.set_verbose()
if not p.process():
print "!!! Failed to append PDF files"
if __name__ == "__main__": main()
|
en
| 0.680801
|
# Copyright (c) 2016 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File name: pdfappend.py
Author: <NAME>
Date created: 2016-08-25
Date last modified: 2016-08-25
Python Version: 2.7.9 PDFAppend does the gruntwork. Basically wraps PyPDF2. Construct a new 'PDFAppend' object.
:param L: A list of strings describing PDF file names
:param o: A string describing the output file name
:param v: A boolean toggle for verbose mode (optional)
:return: returns nothing Processes the input files.
:return: True if successful, False if not. Sets the PDFAppend object to verbose mode. Main class. Processes command line arguments and sets up a PDFAppend object.
| 2.536924
| 3
|
dataset/consistent/related_process_title.py
|
open-contracting/pelican-backend
| 1
|
6626129
|
import random
from tools.checks import get_empty_result_dataset
from tools.getter import get_values
version = 1.0
examples_cap = 100
def add_item(scope, item, item_id):
if not scope:
scope = {
"original_ocid": dict(),
"related_processes": dict(),
"meta": {
"total_processed": 0,
"total_passed": 0,
"total_failed": 0,
"passed_examples": [],
"failed_examples": [],
},
}
ocid = item["ocid"]
values = get_values(item, "tender.title")
tender_title = values[0]["value"] if values else None
if ocid and tender_title:
if ocid in scope["original_ocid"]:
scope["original_ocid"][ocid]["found"] = True
scope["original_ocid"][ocid]["title"] = tender_title
for key in scope["original_ocid"][ocid]["pending_related_processes"]:
scope = pick_examples(
scope,
key,
scope["related_processes"][key]["related_title"] == scope["original_ocid"][ocid]["title"],
)
del scope["related_processes"][key]
scope["original_ocid"][ocid]["pending_related_processes"].clear()
else:
scope["original_ocid"][ocid] = {"pending_related_processes": [], "found": True, "title": tender_title}
related_processes = []
related_processes.extend(get_values(item, "relatedProcesses"))
related_processes.extend(get_values(item, "contracts.relatedProcesses"))
for related_process in related_processes:
# checking if all required fields are set
if "scheme" not in related_process["value"] or related_process["value"]["scheme"] != "ocid":
continue
if "identifier" not in related_process["value"] or related_process["value"]["identifier"] is None:
continue
if "title" not in related_process["value"] or related_process["value"]["title"] is None:
continue
key = (ocid, related_process["value"]["identifier"])
scope["related_processes"][key] = {
"ocid": ocid,
"related_ocid": related_process["value"]["identifier"],
"related_title": related_process["value"]["title"],
"related_path": related_process["path"],
}
if scope["related_processes"][key]["related_ocid"] in scope["original_ocid"]:
if scope["original_ocid"][scope["related_processes"][key]["related_ocid"]]["found"]:
scope = pick_examples(
scope,
key,
scope["related_processes"][key]["related_title"]
== scope["original_ocid"][scope["related_processes"][key]["related_ocid"]]["title"],
)
del scope["related_processes"][key]
else:
scope["original_ocid"][scope["related_processes"][key]["related_ocid"]][
"pending_related_processes"
].append(key)
else:
scope["original_ocid"][scope["related_processes"][key]["related_ocid"]] = {
"pending_related_processes": [key],
"found": False,
"title": None,
}
return scope
def get_result(scope):
result = get_empty_result_dataset(version)
for key in scope["related_processes"]:
if not scope["original_ocid"][scope["related_processes"][key]["related_ocid"]]["found"]:
continue
scope = pick_examples(
scope,
key,
scope["related_processes"][key]["related_title"]
== scope["original_ocid"][scope["related_processes"][key]["related_ocid"]]["title"],
)
if scope["meta"]["total_processed"] == 0:
result["meta"] = {"reason": "there are no pairs of related processes with check-specific properties"}
else:
result["result"] = scope["meta"]["total_passed"] == scope["meta"]["total_processed"]
result["value"] = 100 * (scope["meta"]["total_passed"] / scope["meta"]["total_processed"])
result["meta"] = scope["meta"]
return result
def pick_examples(scope, related_process_key, result):
original_process = {
"ocid": scope["related_processes"][related_process_key]["related_ocid"],
"title": scope["original_ocid"][scope["related_processes"][related_process_key]["related_ocid"]]["title"],
}
example = {
"original_process": original_process,
"related_process": scope["related_processes"][related_process_key],
"result": result,
}
if result:
if scope["meta"]["total_passed"] < examples_cap:
scope["meta"]["passed_examples"].append(example)
else:
r = random.randint(0, scope["meta"]["total_passed"])
if r < examples_cap:
scope["meta"]["passed_examples"][r] = example
scope["meta"]["total_passed"] += 1
else:
if scope["meta"]["total_failed"] < examples_cap:
scope["meta"]["failed_examples"].append(example)
else:
r = random.randint(0, scope["meta"]["total_failed"])
if r < examples_cap:
scope["meta"]["failed_examples"][r] = example
scope["meta"]["total_failed"] += 1
scope["meta"]["total_processed"] += 1
return scope
|
import random
from tools.checks import get_empty_result_dataset
from tools.getter import get_values
version = 1.0
examples_cap = 100
def add_item(scope, item, item_id):
if not scope:
scope = {
"original_ocid": dict(),
"related_processes": dict(),
"meta": {
"total_processed": 0,
"total_passed": 0,
"total_failed": 0,
"passed_examples": [],
"failed_examples": [],
},
}
ocid = item["ocid"]
values = get_values(item, "tender.title")
tender_title = values[0]["value"] if values else None
if ocid and tender_title:
if ocid in scope["original_ocid"]:
scope["original_ocid"][ocid]["found"] = True
scope["original_ocid"][ocid]["title"] = tender_title
for key in scope["original_ocid"][ocid]["pending_related_processes"]:
scope = pick_examples(
scope,
key,
scope["related_processes"][key]["related_title"] == scope["original_ocid"][ocid]["title"],
)
del scope["related_processes"][key]
scope["original_ocid"][ocid]["pending_related_processes"].clear()
else:
scope["original_ocid"][ocid] = {"pending_related_processes": [], "found": True, "title": tender_title}
related_processes = []
related_processes.extend(get_values(item, "relatedProcesses"))
related_processes.extend(get_values(item, "contracts.relatedProcesses"))
for related_process in related_processes:
# checking if all required fields are set
if "scheme" not in related_process["value"] or related_process["value"]["scheme"] != "ocid":
continue
if "identifier" not in related_process["value"] or related_process["value"]["identifier"] is None:
continue
if "title" not in related_process["value"] or related_process["value"]["title"] is None:
continue
key = (ocid, related_process["value"]["identifier"])
scope["related_processes"][key] = {
"ocid": ocid,
"related_ocid": related_process["value"]["identifier"],
"related_title": related_process["value"]["title"],
"related_path": related_process["path"],
}
if scope["related_processes"][key]["related_ocid"] in scope["original_ocid"]:
if scope["original_ocid"][scope["related_processes"][key]["related_ocid"]]["found"]:
scope = pick_examples(
scope,
key,
scope["related_processes"][key]["related_title"]
== scope["original_ocid"][scope["related_processes"][key]["related_ocid"]]["title"],
)
del scope["related_processes"][key]
else:
scope["original_ocid"][scope["related_processes"][key]["related_ocid"]][
"pending_related_processes"
].append(key)
else:
scope["original_ocid"][scope["related_processes"][key]["related_ocid"]] = {
"pending_related_processes": [key],
"found": False,
"title": None,
}
return scope
def get_result(scope):
result = get_empty_result_dataset(version)
for key in scope["related_processes"]:
if not scope["original_ocid"][scope["related_processes"][key]["related_ocid"]]["found"]:
continue
scope = pick_examples(
scope,
key,
scope["related_processes"][key]["related_title"]
== scope["original_ocid"][scope["related_processes"][key]["related_ocid"]]["title"],
)
if scope["meta"]["total_processed"] == 0:
result["meta"] = {"reason": "there are no pairs of related processes with check-specific properties"}
else:
result["result"] = scope["meta"]["total_passed"] == scope["meta"]["total_processed"]
result["value"] = 100 * (scope["meta"]["total_passed"] / scope["meta"]["total_processed"])
result["meta"] = scope["meta"]
return result
def pick_examples(scope, related_process_key, result):
original_process = {
"ocid": scope["related_processes"][related_process_key]["related_ocid"],
"title": scope["original_ocid"][scope["related_processes"][related_process_key]["related_ocid"]]["title"],
}
example = {
"original_process": original_process,
"related_process": scope["related_processes"][related_process_key],
"result": result,
}
if result:
if scope["meta"]["total_passed"] < examples_cap:
scope["meta"]["passed_examples"].append(example)
else:
r = random.randint(0, scope["meta"]["total_passed"])
if r < examples_cap:
scope["meta"]["passed_examples"][r] = example
scope["meta"]["total_passed"] += 1
else:
if scope["meta"]["total_failed"] < examples_cap:
scope["meta"]["failed_examples"].append(example)
else:
r = random.randint(0, scope["meta"]["total_failed"])
if r < examples_cap:
scope["meta"]["failed_examples"][r] = example
scope["meta"]["total_failed"] += 1
scope["meta"]["total_processed"] += 1
return scope
|
en
| 0.631349
|
# checking if all required fields are set
| 2.293822
| 2
|
great_expectations/expectations/metrics/column_aggregate_metrics/column_max.py
|
vanderGoes/great_expectations
| 2
|
6626130
|
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.metrics.column_aggregate_metric import (
ColumnMetricProvider,
column_aggregate_partial,
column_aggregate_value,
)
from great_expectations.expectations.metrics.import_manager import F, sa
class ColumnMax(ColumnMetricProvider):
metric_name = "column.max"
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.max()
@column_aggregate_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column, **kwargs):
return sa.func.max(column)
@column_aggregate_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, **kwargs):
return F.max(column)
|
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.metrics.column_aggregate_metric import (
ColumnMetricProvider,
column_aggregate_partial,
column_aggregate_value,
)
from great_expectations.expectations.metrics.import_manager import F, sa
class ColumnMax(ColumnMetricProvider):
metric_name = "column.max"
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.max()
@column_aggregate_partial(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(cls, column, **kwargs):
return sa.func.max(column)
@column_aggregate_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, **kwargs):
return F.max(column)
|
none
| 1
| 2.244062
| 2
|
|
lambdata_gagansingh23/__init__.py
|
gagansingh23/lambdata
| 0
|
6626131
|
"""
a collection of data science helper functions
"""
import pandas as pd
import numpy as np
#sample code
ONES = pd.DataFrame(np.ones(10))
ZEROS = pd.DataFrame(np.zeros(50))
|
"""
a collection of data science helper functions
"""
import pandas as pd
import numpy as np
#sample code
ONES = pd.DataFrame(np.ones(10))
ZEROS = pd.DataFrame(np.zeros(50))
|
en
| 0.456765
|
a collection of data science helper functions #sample code
| 2.912612
| 3
|
tests/parsers/bencode_plugins/transmission.py
|
pyllyukko/plaso
| 2
|
6626132
|
<reponame>pyllyukko/plaso
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the bencode parser plugin for Transmission BitTorrent files."""
import unittest
from plaso.lib import definitions
from plaso.parsers import bencode_parser
from tests.parsers.bencode_plugins import test_lib
class TransmissionPluginTest(test_lib.BencodePluginTestCase):
"""Tests for bencode parser plugin for Transmission BitTorrent files."""
def testProcess(self):
"""Tests the Process function."""
parser = bencode_parser.BencodeParser()
storage_writer = self._ParseFile(['bencode', 'transmission'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 3)
# The order in which BencodeParser generates events is nondeterministic
# hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'data_type': 'p2p:bittorrent:transmission',
'destination': '/Users/brian/Downloads',
'seedtime': 4,
'timestamp': '2013-11-08 15:31:20.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Test on second event of first torrent.
expected_event_values = {
'data_type': 'p2p:bittorrent:transmission',
'destination': '/Users/brian/Downloads',
'seedtime': 4,
'timestamp': '2013-11-08 18:24:24.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_FILE_DOWNLOADED}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the bencode parser plugin for Transmission BitTorrent files."""
import unittest
from plaso.lib import definitions
from plaso.parsers import bencode_parser
from tests.parsers.bencode_plugins import test_lib
class TransmissionPluginTest(test_lib.BencodePluginTestCase):
"""Tests for bencode parser plugin for Transmission BitTorrent files."""
def testProcess(self):
"""Tests the Process function."""
parser = bencode_parser.BencodeParser()
storage_writer = self._ParseFile(['bencode', 'transmission'], parser)
self.assertEqual(storage_writer.number_of_warnings, 0)
self.assertEqual(storage_writer.number_of_events, 3)
# The order in which BencodeParser generates events is nondeterministic
# hence we sort the events.
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'data_type': 'p2p:bittorrent:transmission',
'destination': '/Users/brian/Downloads',
'seedtime': 4,
'timestamp': '2013-11-08 15:31:20.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_ADDED}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Test on second event of first torrent.
expected_event_values = {
'data_type': 'p2p:bittorrent:transmission',
'destination': '/Users/brian/Downloads',
'seedtime': 4,
'timestamp': '2013-11-08 18:24:24.000000',
'timestamp_desc': definitions.TIME_DESCRIPTION_FILE_DOWNLOADED}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
en
| 0.735488
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- Tests for the bencode parser plugin for Transmission BitTorrent files. Tests for bencode parser plugin for Transmission BitTorrent files. Tests the Process function. # The order in which BencodeParser generates events is nondeterministic # hence we sort the events. # Test on second event of first torrent.
| 2.432962
| 2
|
setup.py
|
aroth-arsoft/trac-advancedworkflow
| 0
|
6626133
|
<filename>setup.py
#!/usr/bin/env python
#
# Copyright (C) 2008-2014 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from setuptools import setup, find_packages
extra = {}
try:
import babel
del babel
extra['message_extractors'] = {
'advancedworkflow': [
('**.py', 'python', None),
('**.html', 'genshi', None),
],
}
from trac.util.dist import get_l10n_cmdclass
extra['cmdclass'] = get_l10n_cmdclass()
except ImportError:
pass
setup(
name='TracAdvancedTicketWorkflow',
version='1.2.2',
author='<NAME>',
author_email='<EMAIL>',
license='3-Clause BSD',
description='Advanced workflow operations Trac plugin',
long_description='Provides more advanced workflow operations for Trac 1.2',
url='https://trac-hacks.org/wiki/AdvancedTicketWorkflowPlugin',
classifiers=['Framework :: Trac'],
packages=find_packages(),
package_data={
'advancedworkflow': [
'locale/*/LC_MESSAGES/*.mo',
],
},
test_suite='advancedworkflow.tests.test_suite',
entry_points={'trac.plugins': [
'advancedworkflow.controller = advancedworkflow.controller'
]},
install_requires=['Trac'],
# zip_safe = False,
**extra)
|
<filename>setup.py
#!/usr/bin/env python
#
# Copyright (C) 2008-2014 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from setuptools import setup, find_packages
extra = {}
try:
import babel
del babel
extra['message_extractors'] = {
'advancedworkflow': [
('**.py', 'python', None),
('**.html', 'genshi', None),
],
}
from trac.util.dist import get_l10n_cmdclass
extra['cmdclass'] = get_l10n_cmdclass()
except ImportError:
pass
setup(
name='TracAdvancedTicketWorkflow',
version='1.2.2',
author='<NAME>',
author_email='<EMAIL>',
license='3-Clause BSD',
description='Advanced workflow operations Trac plugin',
long_description='Provides more advanced workflow operations for Trac 1.2',
url='https://trac-hacks.org/wiki/AdvancedTicketWorkflowPlugin',
classifiers=['Framework :: Trac'],
packages=find_packages(),
package_data={
'advancedworkflow': [
'locale/*/LC_MESSAGES/*.mo',
],
},
test_suite='advancedworkflow.tests.test_suite',
entry_points={'trac.plugins': [
'advancedworkflow.controller = advancedworkflow.controller'
]},
install_requires=['Trac'],
# zip_safe = False,
**extra)
|
en
| 0.961805
|
#!/usr/bin/env python # # Copyright (C) 2008-2014 <NAME> <<EMAIL>> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # zip_safe = False,
| 1.38104
| 1
|
config.py
|
soolstafir/Applying-3D-U-Net-Architecture-to-the-Task-of-Multi-Organ-Segmentation-in-Computed-Tomography
| 17
|
6626134
|
import json
import os
from common import ModelType
def save_config(path_dir, config):
with open(os.path.join(path_dir, 'config.json'), 'w') as outfile:
json.dump(config, outfile)
def load_config(path_dir):
with open(path_dir, 'r') as json_file:
data = json.load(json_file)
return data
def get_config(model_type):
if ModelType.CNN == model_type:
return default_config_cnn()
elif ModelType.RNN == model_type:
return default_config_rnn()
else:
raise Exception('unkown model type:' + str(model_type))
def default_config_rnn():
return {'batch_size': 64,
'batch_size_val': 10,
'bptt': 35,
'n_epochs': 310,
'n_blocks': 2,
'n_nodes': 12,
'n_channels': 200,
'clip': 0.25,
'generation_size': 20,
'population_size': 20,
'keep_size': 0,
'mutation_p': 0.02,
'p_cross_over': 1.0,
'cross_over_type': 'Block',
'learning_rate': 20.0,
'weight_decay': 0.0001,
'dropout': 0.2,
'LRType': 'ExponentialLR',
'gamma': 0.96}
def default_config_cnn():
return {'batch_size': 128,
'batch_size_val': 1000,
'n_epochs': 310,
'n_blocks': 2,
'n_block_type': 3,
'n_nodes': 5,
'n_channels': 20,
'generation_size': 20,
'generation_per_epoch': 2,
'full_dataset': False,
'population_size': 20,
'keep_size': 0,
'mutation_p': 0.02,
'p_cross_over': 1.0,
'cross_over_type': 'Block',
'learning_rate': 0.1,
'lr_min': 0.0001,
'weight_decay': 0.0001,
'dropout': 0.2,
'drop_path_keep_prob': 1.0,
'drop_path_start_epoch': 50,
'cutout': True,
'n_holes': 1,
'length': 16,
'LRType': 'MultiStepLR',
'num_class': 10,
'momentum': 0.9,
'aux_loss': False,
'aux_scale': 0.4}
|
import json
import os
from common import ModelType
def save_config(path_dir, config):
with open(os.path.join(path_dir, 'config.json'), 'w') as outfile:
json.dump(config, outfile)
def load_config(path_dir):
with open(path_dir, 'r') as json_file:
data = json.load(json_file)
return data
def get_config(model_type):
if ModelType.CNN == model_type:
return default_config_cnn()
elif ModelType.RNN == model_type:
return default_config_rnn()
else:
raise Exception('unkown model type:' + str(model_type))
def default_config_rnn():
return {'batch_size': 64,
'batch_size_val': 10,
'bptt': 35,
'n_epochs': 310,
'n_blocks': 2,
'n_nodes': 12,
'n_channels': 200,
'clip': 0.25,
'generation_size': 20,
'population_size': 20,
'keep_size': 0,
'mutation_p': 0.02,
'p_cross_over': 1.0,
'cross_over_type': 'Block',
'learning_rate': 20.0,
'weight_decay': 0.0001,
'dropout': 0.2,
'LRType': 'ExponentialLR',
'gamma': 0.96}
def default_config_cnn():
return {'batch_size': 128,
'batch_size_val': 1000,
'n_epochs': 310,
'n_blocks': 2,
'n_block_type': 3,
'n_nodes': 5,
'n_channels': 20,
'generation_size': 20,
'generation_per_epoch': 2,
'full_dataset': False,
'population_size': 20,
'keep_size': 0,
'mutation_p': 0.02,
'p_cross_over': 1.0,
'cross_over_type': 'Block',
'learning_rate': 0.1,
'lr_min': 0.0001,
'weight_decay': 0.0001,
'dropout': 0.2,
'drop_path_keep_prob': 1.0,
'drop_path_start_epoch': 50,
'cutout': True,
'n_holes': 1,
'length': 16,
'LRType': 'MultiStepLR',
'num_class': 10,
'momentum': 0.9,
'aux_loss': False,
'aux_scale': 0.4}
|
none
| 1
| 2.357023
| 2
|
|
scraper.py
|
jovannypcg/python_scheduler
| 0
|
6626135
|
#!/usr/bin/env python
import requests
from BeautifulSoup import BeautifulSoup
from celery import Celery
HOST = 'amqp://guest@localhost'
QUEUE = 'celery_pages'
app = Celery(QUEUE, broker=HOST)
@app.task
def scrape(url):
print "-> Starting: [%s]" % url
r = requests.get(url)
soup = BeautifulSoup(r.text)
print "-> Extracted: %s" % soup.html.head.title
print "-> Done: [%s]" % url
|
#!/usr/bin/env python
import requests
from BeautifulSoup import BeautifulSoup
from celery import Celery
HOST = 'amqp://guest@localhost'
QUEUE = 'celery_pages'
app = Celery(QUEUE, broker=HOST)
@app.task
def scrape(url):
print "-> Starting: [%s]" % url
r = requests.get(url)
soup = BeautifulSoup(r.text)
print "-> Extracted: %s" % soup.html.head.title
print "-> Done: [%s]" % url
|
ru
| 0.26433
|
#!/usr/bin/env python
| 2.963649
| 3
|
AceAi.py
|
LeFatCat001/AceAi
| 2
|
6626136
|
############################################## _
#(_) _ __ ___ _ __ ___ _ __ | |_ ___
#| || '_ ` _ \ | '_ \ / _ \ | '__|| __|/ __|
#| || | | | | || |_) || (_) || | | |_ \__ \
#|_||_| |_| |_|| .__/ \___/ |_| \__||___/
# |_|
##############################################
from Package.StrooAi import *
from playsound import playsound
import os
import time
import webbrowser as web
##############################################
# / _|(_)| | ___ _ __ __ _ | |_ | |__
#| |_ | || | / _ \ | '_ \ / _` || __|| '_ \
#| _|| || || __/ | |_) || (_| || |_ | | | |
#|_| |_||_| \___| | .__/ \__,_| \__||_| |_|
# |_|
##############################################
dir_path = os.path.dirname(os.path.realpath(__file__))
beep = dir_path + '\sounds\!beep.mp3'
escaped = dir_path + '\escape.txt'
#############################################
# / \ ___ ___ / \ (_)
# / _ \ / __| / _ \ / _ \ | |
# / ___ \ | (__ | __/ / ___ \ | |
#/_/ \_\ \___| \___|/_/ \_\|_|
#############################################
while True:
res = obj.mic_input()
if re.search('ace', res): # when someone says ace ai it will activate the bot
playsound(beep) # and then playes a beep noice
bot = 'on'
while bot == 'on': # after a command is done it will stop the bot and you will have to say ace again
res = obj.mic_input()
#-HELLO-AND-HOW-ARE-YOU-CODE#################
if re.search('hello', res): # when someone says hello ai it will ask how are you
StrooSpeek1d('Hello how are you')
for i in range (1):
res = obj.mic_input()
if re.search('happy', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('excited', res):
StrooSpeek1d('great')
bot = 'off'
if re.search('satisfied', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('relaxed', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('gloomy', res):
StrooSpeek1d('thats not verry good')
bot = 'off'
if re.search('disappointed', res):
StrooSpeek1d('thats not verry good')
bot = 'off'
if re.search('hopeless', res):
StrooSpeek1d('thats not verry good')
bot = 'off'
if re.search('unhappy', res):
StrooSpeek1d('thats not verry good')
bot = 'off'
if re.search('sad', res):
StrooSpeek1d('thats not verry good')
bot = 'off'
if re.search('not bad', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('okay', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('alaight', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('marvellous', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('fine', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('ok', res):
StrooSpeek1d('thats good')
bot = 'off'
#-GAMES-ESCAPE-THE-ROOM-#####################
if re.search('games', res):
StrooSpeek1d('i have a couple games you can play')
StrooSpeek1d('1. ascape the room')
StrooSpeek1d('more comming soon or later i dont know')
for i in range (1):
res = obj.mic_input()
if re.search('escape the room', res):
StrooSpeek1d('welcome to ascape the room')
StrooSpeek1d('say control when you need them')
StrooSpeek1d('just say play when you are ready')
for i in range (1):
res = obj.mic_input()
if re.search('play', res):
StrooSpeek1d('on this game your progress dose not save just say room to get the contents of the room')
# The Game
while bot == 'on':
res = obj.mic_input()
if re.search('control', res):
StrooSpeek1d('controles (left to move left) (right to move right) (door to move to the door) (unlock to unlock an object)')
if re.search('room', res):
StrooSpeek1d('there is a garage door if you move foward there is a work bench if you move left and there is bits and pices if you move right')
if re.search('left', res):
StrooSpeek1d('on the work bench there is a safe hammer garden pot and some bits and peices')
for i in range (1):
res = obj.mic_input()
if re.search('unlock', res):
StrooSpeek1d('there is a key on the safe enter it or say no')
for i in range (1):
res = obj.mic_input()
if re.search('no', res):
StrooSpeek1d('your back in the center of the room')
if re.search('nice', res):
StrooSpeek1d('the safe has unlocked and it has a key in it you have picked up the key')
f = open('escape.txt', 'w')
f.write('yes')
f.close()
if re.search('right', res):
StrooSpeek1d('there is a note which says nice')
if re.search('door', res):
try:
f = open(escaped, 'r')
lines = f.readlines()
letters = []
for letter in lines:
print(letter)
letters.append(letter)
if letters[0] == 'yes':
StrooSpeek1d('the key has unlocked the door and you are free')
StrooSpeek1d('thanks for playing')
time.sleep(5)
bot = 'off'
break
except:
StrooSpeek1d('this is not a place of intrest yet')
if re.search('cmd', res):
cmd = input('Ace\Cmd> ')
web.open('https://stackoverflow.com/questions/4719438/' + cmd)
##################################################################################################################################################
|
############################################## _
#(_) _ __ ___ _ __ ___ _ __ | |_ ___
#| || '_ ` _ \ | '_ \ / _ \ | '__|| __|/ __|
#| || | | | | || |_) || (_) || | | |_ \__ \
#|_||_| |_| |_|| .__/ \___/ |_| \__||___/
# |_|
##############################################
from Package.StrooAi import *
from playsound import playsound
import os
import time
import webbrowser as web
##############################################
# / _|(_)| | ___ _ __ __ _ | |_ | |__
#| |_ | || | / _ \ | '_ \ / _` || __|| '_ \
#| _|| || || __/ | |_) || (_| || |_ | | | |
#|_| |_||_| \___| | .__/ \__,_| \__||_| |_|
# |_|
##############################################
dir_path = os.path.dirname(os.path.realpath(__file__))
beep = dir_path + '\sounds\!beep.mp3'
escaped = dir_path + '\escape.txt'
#############################################
# / \ ___ ___ / \ (_)
# / _ \ / __| / _ \ / _ \ | |
# / ___ \ | (__ | __/ / ___ \ | |
#/_/ \_\ \___| \___|/_/ \_\|_|
#############################################
while True:
res = obj.mic_input()
if re.search('ace', res): # when someone says ace ai it will activate the bot
playsound(beep) # and then playes a beep noice
bot = 'on'
while bot == 'on': # after a command is done it will stop the bot and you will have to say ace again
res = obj.mic_input()
#-HELLO-AND-HOW-ARE-YOU-CODE#################
if re.search('hello', res): # when someone says hello ai it will ask how are you
StrooSpeek1d('Hello how are you')
for i in range (1):
res = obj.mic_input()
if re.search('happy', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('excited', res):
StrooSpeek1d('great')
bot = 'off'
if re.search('satisfied', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('relaxed', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('gloomy', res):
StrooSpeek1d('thats not verry good')
bot = 'off'
if re.search('disappointed', res):
StrooSpeek1d('thats not verry good')
bot = 'off'
if re.search('hopeless', res):
StrooSpeek1d('thats not verry good')
bot = 'off'
if re.search('unhappy', res):
StrooSpeek1d('thats not verry good')
bot = 'off'
if re.search('sad', res):
StrooSpeek1d('thats not verry good')
bot = 'off'
if re.search('not bad', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('okay', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('alaight', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('marvellous', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('fine', res):
StrooSpeek1d('thats good')
bot = 'off'
if re.search('ok', res):
StrooSpeek1d('thats good')
bot = 'off'
#-GAMES-ESCAPE-THE-ROOM-#####################
if re.search('games', res):
StrooSpeek1d('i have a couple games you can play')
StrooSpeek1d('1. ascape the room')
StrooSpeek1d('more comming soon or later i dont know')
for i in range (1):
res = obj.mic_input()
if re.search('escape the room', res):
StrooSpeek1d('welcome to ascape the room')
StrooSpeek1d('say control when you need them')
StrooSpeek1d('just say play when you are ready')
for i in range (1):
res = obj.mic_input()
if re.search('play', res):
StrooSpeek1d('on this game your progress dose not save just say room to get the contents of the room')
# The Game
while bot == 'on':
res = obj.mic_input()
if re.search('control', res):
StrooSpeek1d('controles (left to move left) (right to move right) (door to move to the door) (unlock to unlock an object)')
if re.search('room', res):
StrooSpeek1d('there is a garage door if you move foward there is a work bench if you move left and there is bits and pices if you move right')
if re.search('left', res):
StrooSpeek1d('on the work bench there is a safe hammer garden pot and some bits and peices')
for i in range (1):
res = obj.mic_input()
if re.search('unlock', res):
StrooSpeek1d('there is a key on the safe enter it or say no')
for i in range (1):
res = obj.mic_input()
if re.search('no', res):
StrooSpeek1d('your back in the center of the room')
if re.search('nice', res):
StrooSpeek1d('the safe has unlocked and it has a key in it you have picked up the key')
f = open('escape.txt', 'w')
f.write('yes')
f.close()
if re.search('right', res):
StrooSpeek1d('there is a note which says nice')
if re.search('door', res):
try:
f = open(escaped, 'r')
lines = f.readlines()
letters = []
for letter in lines:
print(letter)
letters.append(letter)
if letters[0] == 'yes':
StrooSpeek1d('the key has unlocked the door and you are free')
StrooSpeek1d('thanks for playing')
time.sleep(5)
bot = 'off'
break
except:
StrooSpeek1d('this is not a place of intrest yet')
if re.search('cmd', res):
cmd = input('Ace\Cmd> ')
web.open('https://stackoverflow.com/questions/4719438/' + cmd)
##################################################################################################################################################
|
de
| 0.514872
|
############################################## _ #(_) _ __ ___ _ __ ___ _ __ | |_ ___ #| || '_ ` _ \ | '_ \ / _ \ | '__|| __|/ __| #| || | | | | || |_) || (_) || | | |_ \__ \ #|_||_| |_| |_|| .__/ \___/ |_| \__||___/ # |_| ############################################## ############################################## # / _|(_)| | ___ _ __ __ _ | |_ | |__ #| |_ | || | / _ \ | '_ \ / _` || __|| '_ \ #| _|| || || __/ | |_) || (_| || |_ | | | | #|_| |_||_| \___| | .__/ \__,_| \__||_| |_| # |_| ############################################## ############################################# # / \ ___ ___ / \ (_) # / _ \ / __| / _ \ / _ \ | | # / ___ \ | (__ | __/ / ___ \ | | #/_/ \_\ \___| \___|/_/ \_\|_| ############################################# # when someone says ace ai it will activate the bot # and then playes a beep noice # after a command is done it will stop the bot and you will have to say ace again #-HELLO-AND-HOW-ARE-YOU-CODE################# # when someone says hello ai it will ask how are you #-GAMES-ESCAPE-THE-ROOM-##################### # The Game ##################################################################################################################################################
| 2.163832
| 2
|
CarOperation.py
|
circleacid/car_sale
| 0
|
6626137
|
<gh_stars>0
import wx
import wx.grid
from mydb import Sql_operation
#跳转至管理界面
class CarOperation(wx.Frame):
'''
操作界面
'''
def __init__(self,*args,**kw):
# ensure the parent's __init__ is called
super(CarOperation,self).__init__(*args, **kw)
#设置窗口屏幕居中
self.Center()
#创建窗口
self.pnl = wx.Panel(self)
#调用操作界面函数
self.OperationInterface()
def OperationInterface(self):
#创建垂直方向box布局管理器
self.vbox = wx.BoxSizer(wx.VERTICAL)
#################################################################################
#创建logo静态文本,设置字体属性
logo = wx.StaticText(self.pnl,label="车辆信息管理")
font = logo.GetFont()
font.PointSize += 30
font = font.Bold()
logo.SetFont(font)
#添加logo静态文本到vbox布局管理中
self.vbox.Add(logo,proportion=0,flag=wx.FIXED_MINSIZE | wx.TOP | wx.CENTER,border=5)
#################################################################################
#创建左侧的静态框
text_frame = wx.StaticBox(self.pnl,label="选择操作")
#创建垂直方向box布局管理器
vbox_button = wx.StaticBoxSizer(text_frame,wx.VERTICAL)
#创建操作按钮、绑定事件处理
check_button = wx.Button(self.pnl,id=10,label="查看车辆信息",size=(150,50))
add_button = wx.Button(self.pnl,id=11,label="添加车辆信息",size=(150,50))
delete_button = wx.Button(self.pnl,id=12,label="删除车辆信息",size=(150,50))
quit_button = wx.Button(self.pnl,id=13,label="退出系统",size=(150,50))
update_button=wx.Button(self.pnl,id=14,label="修改车辆信息",size=(150,50))
self.Bind(wx.EVT_BUTTON,self.ClickButton,id=10,id2=14)
#添加操作按钮到vbox布局管理器
vbox_button.Add(check_button,0,wx.EXPAND | wx.BOTTOM,40)
vbox_button.Add(add_button,0,wx.EXPAND | wx.BOTTOM,40)
vbox_button.Add(delete_button,0,wx.EXPAND | wx.BOTTOM,40)
vbox_button.Add(update_button,0,wx.EXPAND | wx.BOTTOM,40)
vbox_button.Add(quit_button,0,wx.EXPAND | wx.BOTTOM,200)
#创建右侧静态框
right_showop = wx.StaticBox(self.pnl,label="显示/操作窗口",size=(800,500))
#创建垂直方向box布局管理器
self.vbox_showop = wx.StaticBoxSizer(right_showop,wx.VERTICAL)
#创建水平方向box布局管理器
hbox = wx.BoxSizer()
hbox.Add(vbox_button,0,wx.EXPAND | wx.BOTTOM,5)
hbox.Add(self.vbox_showop,0,wx.EXPAND | wx.BOTTOM,5)
#将hbox添加到垂直box
self.vbox.Add(hbox,proportion=0,flag=wx.CENTER)
#################################################################################
self.pnl.SetSizer(self.vbox)
#通过对应的按钮进行事件的跳转
def ClickButton(self,event):
Bid = event.GetId()
if Bid == 10:
print("查询操作!")
inquire_button = InquireOp(None,title="车辆管理系统",size=(1024,720))
inquire_button.Show()
self.Close(True)
elif Bid == 11:
print("添加操作!")
add_button = AddOp(None,title="车辆管理系统",size=(1024,720))
add_button.Show()
self.Close(True)
elif Bid == 12:
print("删除操作!")
del_button = DelOp(None,title="车辆管理系统",size=(1024,720))
del_button.Show()
self.Close(True)
elif Bid == 13:
self.Close(True)
elif Bid == 14:
print("修改操作!")
update_button=UpdateOp(None,title="用户管理系统",size=(1024,668))
update_button.Show()
self.Close(True)
#继承CarOperation类,实现初始化操作界面
"""
进行数据库的查询操作
"""
class InquireOp(CarOperation):
def __init__(self,*args,**kw):
# ensure the parent's __init__ is called
super(InquireOp,self).__init__(*args, **kw)
#创建学生信息网格
self.cgrid = self.CreateGrid()
self.cgrid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK,self.OnLabelleftClick)
#添加到vbox_showop布局管理器
self.vbox_showop.Add(self.cgrid,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,30)
#此处的ClickButton用于事件之间的跳转
def ClickButton(self,event):
Bid = event.GetId()
if Bid == 10:
pass
elif Bid == 11:
print("添加操作!")
add_button = AddOp(None,title="车辆管理系统",size=(1024,720))
add_button.Show()
self.Close(True)
elif Bid == 12:
print("删除操作!")
del_button = DelOp(None,title="车辆管理系统",size=(1024,720))
del_button.Show()
self.Close(True)
elif Bid == 13:
self.Close(True)
elif Bid == 14:
print("修改操作!")
update_button=UpdateOp(None,title="用户管理系统",size=(1024,668))
update_button.Show()
self.Close(True)
#创建用于显示数据的表格
def CreateGrid(self):
#连接car_sale数据库
op = Sql_operation("car_sale")
#获取car表中的学生信息,返回为二维元组
np = op.FindAll("car")
column_names = ("车辆编号","型号","颜色","生产厂商","出厂日期","价格")
cgrid = wx.grid.Grid(self.pnl)
#CreateGrid(行数,列数)
cgrid.CreateGrid(len(np),len(np[0])-1)
for row in range(len(np)):
#表格横向为对应表中的属性,纵向为首个属性的数据
cgrid.SetRowLabelValue(row,str(np[row][0]))
for col in range(1,len(np[row])):
cgrid.SetColLabelValue(col-1,column_names[col])
cgrid.SetCellValue(row,col-1,str(np[row][col]))
cgrid.AutoSize()
return cgrid
def OnLabelleftClick(self,event):
#连接car_sale数据库
op = Sql_operation("car_sale")
np = op.FindAll("car")
print("RowIdx: {0}".format(event.GetRow()))
print("ColIdx: {0}".format(event.GetRow()))
print(np[event.GetRow()])
event.Skip()
#继承CarOperation类,实现初始化操作界面
"""
数据库插入操作
"""
class AddOp(CarOperation):
def __init__(self,*args,**kw):
super(AddOp,self).__init__(*args, **kw)
#创建表中属性文本框
self.car_no = wx.TextCtrl(self.pnl,size = (210,25))
self.car_type = wx.TextCtrl(self.pnl,size = (210,25))
self.car_color = wx.TextCtrl(self.pnl,size = (210,25))
self.car_maner = wx.TextCtrl(self.pnl,size = (210,25))
self.car_date = wx.TextCtrl(self.pnl,size = (210,25))
self.car_price=wx.TextCtrl(self.pnl,size=(210,25))
self.add_affirm = wx.Button(self.pnl,label="添加",size=(80,25))
#为添加按钮组件绑定事件处理
self.add_affirm.Bind(wx.EVT_BUTTON,self.AddAffirm)
#创建静态框
text_no = wx.StaticBox(self.pnl,label="车辆编号")
text_type = wx.StaticBox(self.pnl,label="型 号")
text_color = wx.StaticBox(self.pnl,label="颜 色")
text_maner = wx.StaticBox(self.pnl,label="生产厂商")
text_date = wx.StaticBox(self.pnl,label="出厂日期")
text_price=wx.StaticBox(self.pnl,label="价 格")
#创建水平方向box布局管理器
hbox_no = wx.StaticBoxSizer(text_no,wx.HORIZONTAL)
hbox_type = wx.StaticBoxSizer(text_type,wx.HORIZONTAL)
hbox_color = wx.StaticBoxSizer(text_color,wx.HORIZONTAL)
hbox_maner = wx.StaticBoxSizer(text_maner,wx.HORIZONTAL)
hbox_date = wx.StaticBoxSizer(text_date,wx.HORIZONTAL)
hbox_price=wx.StaticBoxSizer(text_price,wx.HORIZONTAL)
#添加到hsbox布局管理器
hbox_no.Add(self.car_no,0,wx.EXPAND | wx.BOTTOM,5)
hbox_type.Add(self.car_type,0,wx.EXPAND | wx.BOTTOM,5)
hbox_color.Add(self.car_color,0,wx.EXPAND | wx.BOTTOM,5)
hbox_maner.Add(self.car_maner,0,wx.EXPAND | wx.BOTTOM,5)
hbox_date.Add(self.car_date,0,wx.EXPAND | wx.BOTTOM,5)
hbox_price.Add(self.car_price,0,wx.EXPAND | wx.BOTTOM,5)
#################################################################################
#添加到vbox_showop布局管理器
self.vbox_showop.Add(hbox_no,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_type,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_color,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_maner,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_date,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_price,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(self.add_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
def ClickButton(self,event):
Bid = event.GetId()
if Bid == 10:
print("查询操作!")
inquire_button = InquireOp(None,title="车辆管理系统",size=(1024,720))
inquire_button.Show()
self.Close(True)
elif Bid == 11:
pass
elif Bid == 12:
print("删除操作!")
del_button = DelOp(None,title="车辆管理系统",size=(1024,720))
del_button.Show()
self.Close(True)
elif Bid == 13:
self.Close(True)
elif Bid == 14:
print("修改操作!")
update_button=UpdateOp(None,title="用户管理系统",size=(1024,668))
update_button.Show()
self.Close(True)
def AddAffirm(self,event):
#连接car_sale数据库
op = Sql_operation("car_sale")
#向stu_information表添加学生信息
car_no = self.car_no.GetValue()
print(car_no)
car_type = self.car_type.GetValue()
print(car_type)
car_color = self.car_color.GetValue()
print(car_color)
car_maner = self.car_maner.GetValue()
print(car_maner)
car_date = self.car_date.GetValue()
print(car_date)
car_price=self.car_price.GetValue()
print(car_price)
np = op.CarInsert(car_no,car_type,car_color,car_maner,car_date,car_price)
#继承InquireOp类,实现初始化操作界面
class DelOp(InquireOp):
def __init__(self,*args,**kw):
# ensure the parent's __init__ is called
super(DelOp,self).__init__(*args, **kw)
#创建删除车辆输入框、删除按钮
self.del_id = wx.TextCtrl(self.pnl,pos = (407,400),size = (210,25))
self.del_affirm = wx.Button(self.pnl,label="删除",pos=(625,400),size=(80,25))
#为删除按钮组件绑定事件处理
self.del_affirm.Bind(wx.EVT_BUTTON,self.DelAffirm)
#################################################################################
#创建静态框
text_del = wx.StaticBox(self.pnl,label="请选择需要删除的车辆编号")
#创建水平方向box布局管理器
hbox_del = wx.StaticBoxSizer(text_del,wx.HORIZONTAL)
#添加到hbox_name布局管理器
hbox_del.Add(self.del_id,0,wx.EXPAND | wx.BOTTOM,5)
#添加到vbox_showop布局管理器
self.vbox_showop.Add(hbox_del,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(self.del_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
def ClickButton(self,event):
Bid = event.GetId()
if Bid == 10:
print("查询操作!")
inquire_button = InquireOp(None,title="车辆管理系统",size=(1024,720))
inquire_button.Show()
self.Close(True)
elif Bid == 11:
print("添加操作!")
add_button = AddOp(None,title="车辆管理系统",size=(1024,720))
add_button.Show()
self.Close(True)
elif Bid == 12:
pass
elif Bid == 13:
self.Close(True)
elif Bid == 14:
print("修改操作!")
update_button=UpdateOp(None,title="用户管理系统",size=(1024,668))
update_button.Show()
self.Close(True)
def DelAffirm(self,event):
#连接car_sale数据库
op = Sql_operation("car_sale")
del_id = self.del_id.GetValue()
print(del_id)
np = op.CarDel(int(del_id))
del_button = DelOp(None,title="车辆管理系统",size=(1024,720))
del_button.Show()
self.Close(True)
class UpdateOp(CarOperation):
def __init__(self,*args,**kw):
super(UpdateOp,self).__init__(*args, **kw)
#创建表中属性文本框
self.car_no = wx.TextCtrl(self.pnl,size = (210,25))
self.car_type = wx.TextCtrl(self.pnl,size = (210,25))
self.car_color = wx.TextCtrl(self.pnl,size = (210,25))
self.car_maner = wx.TextCtrl(self.pnl,size = (210,25))
self.car_date = wx.TextCtrl(self.pnl,size = (210,25))
self.car_price=wx.TextCtrl(self.pnl,size=(210,25))
self.update_affirm = wx.Button(self.pnl,label="修改",size=(80,25))
#为添加按钮组件绑定事件处理
self.update_affirm.Bind(wx.EVT_BUTTON,self.UpdateAffirm)
#创建静态框
text_no = wx.StaticBox(self.pnl,label="车辆编号")
text_type = wx.StaticBox(self.pnl,label="型 号")
text_color = wx.StaticBox(self.pnl,label="颜 色")
text_maner = wx.StaticBox(self.pnl,label="生产厂商")
text_date = wx.StaticBox(self.pnl,label="出厂日期")
text_price=wx.StaticBox(self.pnl,label="价 格")
#创建水平方向box布局管理器
hbox_no = wx.StaticBoxSizer(text_no,wx.HORIZONTAL)
hbox_type = wx.StaticBoxSizer(text_type,wx.HORIZONTAL)
hbox_color = wx.StaticBoxSizer(text_color,wx.HORIZONTAL)
hbox_maner = wx.StaticBoxSizer(text_maner,wx.HORIZONTAL)
hbox_date = wx.StaticBoxSizer(text_date,wx.HORIZONTAL)
hbox_price=wx.StaticBoxSizer(text_price,wx.HORIZONTAL)
#添加到hsbox布局管理器
hbox_no.Add(self.car_no,0,wx.EXPAND | wx.BOTTOM,5)
hbox_type.Add(self.car_type,0,wx.EXPAND | wx.BOTTOM,5)
hbox_color.Add(self.car_color,0,wx.EXPAND | wx.BOTTOM,5)
hbox_maner.Add(self.car_maner,0,wx.EXPAND | wx.BOTTOM,5)
hbox_date.Add(self.car_date,0,wx.EXPAND | wx.BOTTOM,5)
hbox_price.Add(self.car_price,0,wx.EXPAND | wx.BOTTOM,5)
#################################################################################
#添加到vbox_showop布局管理器
self.vbox_showop.Add(hbox_no,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_type,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_color,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_maner,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_date,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_price,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(self.update_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
def ClickButton(self,event):
Bid = event.GetId()
if Bid == 10:
print("查询操作!")
inquire_button = InquireOp(None,title="用户管理系统",size=(1024,668))
inquire_button.Show()
self.Close(True)
elif Bid == 11:
print("添加操作!")
add_button=AddOp(None,title="用户管理系统",size=(1024,668))
add_button.Show()
self.Close(True)
elif Bid == 12:
print("删除操作!")
del_button = DelOp(None,title="用户管理系统",size=(1024,668))
del_button.Show()
self.Close(True)
elif Bid == 13:
self.Close(True)
elif Bid == 14:
pass
def UpdateAffirm(self,event):
#连接car_sale数据库
op = Sql_operation("car_sale")
#向stu_information表添加学生信息
car_no = self.car_no.GetValue()
print(car_no)
car_type = self.car_type.GetValue()
print(car_type)
car_color = self.car_color.GetValue()
print(car_color)
car_maner = self.car_maner.GetValue()
print(car_maner)
car_date = self.car_date.GetValue()
print(car_date)
car_price=self.car_price.GetValue()
print(car_price)
np = op.CarUpdate(car_no,car_type,car_color,car_maner,car_date,car_price)
"""
if __name__ == '__main__':
app = wx.App()
login = CarOperation(None,title="车辆管理系统",size=(1024,720))
login.Show()
app.MainLoop()
"""
|
import wx
import wx.grid
from mydb import Sql_operation
#跳转至管理界面
class CarOperation(wx.Frame):
'''
操作界面
'''
def __init__(self,*args,**kw):
# ensure the parent's __init__ is called
super(CarOperation,self).__init__(*args, **kw)
#设置窗口屏幕居中
self.Center()
#创建窗口
self.pnl = wx.Panel(self)
#调用操作界面函数
self.OperationInterface()
def OperationInterface(self):
#创建垂直方向box布局管理器
self.vbox = wx.BoxSizer(wx.VERTICAL)
#################################################################################
#创建logo静态文本,设置字体属性
logo = wx.StaticText(self.pnl,label="车辆信息管理")
font = logo.GetFont()
font.PointSize += 30
font = font.Bold()
logo.SetFont(font)
#添加logo静态文本到vbox布局管理中
self.vbox.Add(logo,proportion=0,flag=wx.FIXED_MINSIZE | wx.TOP | wx.CENTER,border=5)
#################################################################################
#创建左侧的静态框
text_frame = wx.StaticBox(self.pnl,label="选择操作")
#创建垂直方向box布局管理器
vbox_button = wx.StaticBoxSizer(text_frame,wx.VERTICAL)
#创建操作按钮、绑定事件处理
check_button = wx.Button(self.pnl,id=10,label="查看车辆信息",size=(150,50))
add_button = wx.Button(self.pnl,id=11,label="添加车辆信息",size=(150,50))
delete_button = wx.Button(self.pnl,id=12,label="删除车辆信息",size=(150,50))
quit_button = wx.Button(self.pnl,id=13,label="退出系统",size=(150,50))
update_button=wx.Button(self.pnl,id=14,label="修改车辆信息",size=(150,50))
self.Bind(wx.EVT_BUTTON,self.ClickButton,id=10,id2=14)
#添加操作按钮到vbox布局管理器
vbox_button.Add(check_button,0,wx.EXPAND | wx.BOTTOM,40)
vbox_button.Add(add_button,0,wx.EXPAND | wx.BOTTOM,40)
vbox_button.Add(delete_button,0,wx.EXPAND | wx.BOTTOM,40)
vbox_button.Add(update_button,0,wx.EXPAND | wx.BOTTOM,40)
vbox_button.Add(quit_button,0,wx.EXPAND | wx.BOTTOM,200)
#创建右侧静态框
right_showop = wx.StaticBox(self.pnl,label="显示/操作窗口",size=(800,500))
#创建垂直方向box布局管理器
self.vbox_showop = wx.StaticBoxSizer(right_showop,wx.VERTICAL)
#创建水平方向box布局管理器
hbox = wx.BoxSizer()
hbox.Add(vbox_button,0,wx.EXPAND | wx.BOTTOM,5)
hbox.Add(self.vbox_showop,0,wx.EXPAND | wx.BOTTOM,5)
#将hbox添加到垂直box
self.vbox.Add(hbox,proportion=0,flag=wx.CENTER)
#################################################################################
self.pnl.SetSizer(self.vbox)
#通过对应的按钮进行事件的跳转
def ClickButton(self,event):
Bid = event.GetId()
if Bid == 10:
print("查询操作!")
inquire_button = InquireOp(None,title="车辆管理系统",size=(1024,720))
inquire_button.Show()
self.Close(True)
elif Bid == 11:
print("添加操作!")
add_button = AddOp(None,title="车辆管理系统",size=(1024,720))
add_button.Show()
self.Close(True)
elif Bid == 12:
print("删除操作!")
del_button = DelOp(None,title="车辆管理系统",size=(1024,720))
del_button.Show()
self.Close(True)
elif Bid == 13:
self.Close(True)
elif Bid == 14:
print("修改操作!")
update_button=UpdateOp(None,title="用户管理系统",size=(1024,668))
update_button.Show()
self.Close(True)
#继承CarOperation类,实现初始化操作界面
"""
进行数据库的查询操作
"""
class InquireOp(CarOperation):
def __init__(self,*args,**kw):
# ensure the parent's __init__ is called
super(InquireOp,self).__init__(*args, **kw)
#创建学生信息网格
self.cgrid = self.CreateGrid()
self.cgrid.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK,self.OnLabelleftClick)
#添加到vbox_showop布局管理器
self.vbox_showop.Add(self.cgrid,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,30)
#此处的ClickButton用于事件之间的跳转
def ClickButton(self,event):
Bid = event.GetId()
if Bid == 10:
pass
elif Bid == 11:
print("添加操作!")
add_button = AddOp(None,title="车辆管理系统",size=(1024,720))
add_button.Show()
self.Close(True)
elif Bid == 12:
print("删除操作!")
del_button = DelOp(None,title="车辆管理系统",size=(1024,720))
del_button.Show()
self.Close(True)
elif Bid == 13:
self.Close(True)
elif Bid == 14:
print("修改操作!")
update_button=UpdateOp(None,title="用户管理系统",size=(1024,668))
update_button.Show()
self.Close(True)
#创建用于显示数据的表格
def CreateGrid(self):
#连接car_sale数据库
op = Sql_operation("car_sale")
#获取car表中的学生信息,返回为二维元组
np = op.FindAll("car")
column_names = ("车辆编号","型号","颜色","生产厂商","出厂日期","价格")
cgrid = wx.grid.Grid(self.pnl)
#CreateGrid(行数,列数)
cgrid.CreateGrid(len(np),len(np[0])-1)
for row in range(len(np)):
#表格横向为对应表中的属性,纵向为首个属性的数据
cgrid.SetRowLabelValue(row,str(np[row][0]))
for col in range(1,len(np[row])):
cgrid.SetColLabelValue(col-1,column_names[col])
cgrid.SetCellValue(row,col-1,str(np[row][col]))
cgrid.AutoSize()
return cgrid
def OnLabelleftClick(self,event):
#连接car_sale数据库
op = Sql_operation("car_sale")
np = op.FindAll("car")
print("RowIdx: {0}".format(event.GetRow()))
print("ColIdx: {0}".format(event.GetRow()))
print(np[event.GetRow()])
event.Skip()
#继承CarOperation类,实现初始化操作界面
"""
数据库插入操作
"""
class AddOp(CarOperation):
def __init__(self,*args,**kw):
super(AddOp,self).__init__(*args, **kw)
#创建表中属性文本框
self.car_no = wx.TextCtrl(self.pnl,size = (210,25))
self.car_type = wx.TextCtrl(self.pnl,size = (210,25))
self.car_color = wx.TextCtrl(self.pnl,size = (210,25))
self.car_maner = wx.TextCtrl(self.pnl,size = (210,25))
self.car_date = wx.TextCtrl(self.pnl,size = (210,25))
self.car_price=wx.TextCtrl(self.pnl,size=(210,25))
self.add_affirm = wx.Button(self.pnl,label="添加",size=(80,25))
#为添加按钮组件绑定事件处理
self.add_affirm.Bind(wx.EVT_BUTTON,self.AddAffirm)
#创建静态框
text_no = wx.StaticBox(self.pnl,label="车辆编号")
text_type = wx.StaticBox(self.pnl,label="型 号")
text_color = wx.StaticBox(self.pnl,label="颜 色")
text_maner = wx.StaticBox(self.pnl,label="生产厂商")
text_date = wx.StaticBox(self.pnl,label="出厂日期")
text_price=wx.StaticBox(self.pnl,label="价 格")
#创建水平方向box布局管理器
hbox_no = wx.StaticBoxSizer(text_no,wx.HORIZONTAL)
hbox_type = wx.StaticBoxSizer(text_type,wx.HORIZONTAL)
hbox_color = wx.StaticBoxSizer(text_color,wx.HORIZONTAL)
hbox_maner = wx.StaticBoxSizer(text_maner,wx.HORIZONTAL)
hbox_date = wx.StaticBoxSizer(text_date,wx.HORIZONTAL)
hbox_price=wx.StaticBoxSizer(text_price,wx.HORIZONTAL)
#添加到hsbox布局管理器
hbox_no.Add(self.car_no,0,wx.EXPAND | wx.BOTTOM,5)
hbox_type.Add(self.car_type,0,wx.EXPAND | wx.BOTTOM,5)
hbox_color.Add(self.car_color,0,wx.EXPAND | wx.BOTTOM,5)
hbox_maner.Add(self.car_maner,0,wx.EXPAND | wx.BOTTOM,5)
hbox_date.Add(self.car_date,0,wx.EXPAND | wx.BOTTOM,5)
hbox_price.Add(self.car_price,0,wx.EXPAND | wx.BOTTOM,5)
#################################################################################
#添加到vbox_showop布局管理器
self.vbox_showop.Add(hbox_no,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_type,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_color,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_maner,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_date,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_price,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(self.add_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
def ClickButton(self,event):
Bid = event.GetId()
if Bid == 10:
print("查询操作!")
inquire_button = InquireOp(None,title="车辆管理系统",size=(1024,720))
inquire_button.Show()
self.Close(True)
elif Bid == 11:
pass
elif Bid == 12:
print("删除操作!")
del_button = DelOp(None,title="车辆管理系统",size=(1024,720))
del_button.Show()
self.Close(True)
elif Bid == 13:
self.Close(True)
elif Bid == 14:
print("修改操作!")
update_button=UpdateOp(None,title="用户管理系统",size=(1024,668))
update_button.Show()
self.Close(True)
def AddAffirm(self,event):
#连接car_sale数据库
op = Sql_operation("car_sale")
#向stu_information表添加学生信息
car_no = self.car_no.GetValue()
print(car_no)
car_type = self.car_type.GetValue()
print(car_type)
car_color = self.car_color.GetValue()
print(car_color)
car_maner = self.car_maner.GetValue()
print(car_maner)
car_date = self.car_date.GetValue()
print(car_date)
car_price=self.car_price.GetValue()
print(car_price)
np = op.CarInsert(car_no,car_type,car_color,car_maner,car_date,car_price)
#继承InquireOp类,实现初始化操作界面
class DelOp(InquireOp):
def __init__(self,*args,**kw):
# ensure the parent's __init__ is called
super(DelOp,self).__init__(*args, **kw)
#创建删除车辆输入框、删除按钮
self.del_id = wx.TextCtrl(self.pnl,pos = (407,400),size = (210,25))
self.del_affirm = wx.Button(self.pnl,label="删除",pos=(625,400),size=(80,25))
#为删除按钮组件绑定事件处理
self.del_affirm.Bind(wx.EVT_BUTTON,self.DelAffirm)
#################################################################################
#创建静态框
text_del = wx.StaticBox(self.pnl,label="请选择需要删除的车辆编号")
#创建水平方向box布局管理器
hbox_del = wx.StaticBoxSizer(text_del,wx.HORIZONTAL)
#添加到hbox_name布局管理器
hbox_del.Add(self.del_id,0,wx.EXPAND | wx.BOTTOM,5)
#添加到vbox_showop布局管理器
self.vbox_showop.Add(hbox_del,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(self.del_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
def ClickButton(self,event):
Bid = event.GetId()
if Bid == 10:
print("查询操作!")
inquire_button = InquireOp(None,title="车辆管理系统",size=(1024,720))
inquire_button.Show()
self.Close(True)
elif Bid == 11:
print("添加操作!")
add_button = AddOp(None,title="车辆管理系统",size=(1024,720))
add_button.Show()
self.Close(True)
elif Bid == 12:
pass
elif Bid == 13:
self.Close(True)
elif Bid == 14:
print("修改操作!")
update_button=UpdateOp(None,title="用户管理系统",size=(1024,668))
update_button.Show()
self.Close(True)
def DelAffirm(self,event):
#连接car_sale数据库
op = Sql_operation("car_sale")
del_id = self.del_id.GetValue()
print(del_id)
np = op.CarDel(int(del_id))
del_button = DelOp(None,title="车辆管理系统",size=(1024,720))
del_button.Show()
self.Close(True)
class UpdateOp(CarOperation):
def __init__(self,*args,**kw):
super(UpdateOp,self).__init__(*args, **kw)
#创建表中属性文本框
self.car_no = wx.TextCtrl(self.pnl,size = (210,25))
self.car_type = wx.TextCtrl(self.pnl,size = (210,25))
self.car_color = wx.TextCtrl(self.pnl,size = (210,25))
self.car_maner = wx.TextCtrl(self.pnl,size = (210,25))
self.car_date = wx.TextCtrl(self.pnl,size = (210,25))
self.car_price=wx.TextCtrl(self.pnl,size=(210,25))
self.update_affirm = wx.Button(self.pnl,label="修改",size=(80,25))
#为添加按钮组件绑定事件处理
self.update_affirm.Bind(wx.EVT_BUTTON,self.UpdateAffirm)
#创建静态框
text_no = wx.StaticBox(self.pnl,label="车辆编号")
text_type = wx.StaticBox(self.pnl,label="型 号")
text_color = wx.StaticBox(self.pnl,label="颜 色")
text_maner = wx.StaticBox(self.pnl,label="生产厂商")
text_date = wx.StaticBox(self.pnl,label="出厂日期")
text_price=wx.StaticBox(self.pnl,label="价 格")
#创建水平方向box布局管理器
hbox_no = wx.StaticBoxSizer(text_no,wx.HORIZONTAL)
hbox_type = wx.StaticBoxSizer(text_type,wx.HORIZONTAL)
hbox_color = wx.StaticBoxSizer(text_color,wx.HORIZONTAL)
hbox_maner = wx.StaticBoxSizer(text_maner,wx.HORIZONTAL)
hbox_date = wx.StaticBoxSizer(text_date,wx.HORIZONTAL)
hbox_price=wx.StaticBoxSizer(text_price,wx.HORIZONTAL)
#添加到hsbox布局管理器
hbox_no.Add(self.car_no,0,wx.EXPAND | wx.BOTTOM,5)
hbox_type.Add(self.car_type,0,wx.EXPAND | wx.BOTTOM,5)
hbox_color.Add(self.car_color,0,wx.EXPAND | wx.BOTTOM,5)
hbox_maner.Add(self.car_maner,0,wx.EXPAND | wx.BOTTOM,5)
hbox_date.Add(self.car_date,0,wx.EXPAND | wx.BOTTOM,5)
hbox_price.Add(self.car_price,0,wx.EXPAND | wx.BOTTOM,5)
#################################################################################
#添加到vbox_showop布局管理器
self.vbox_showop.Add(hbox_no,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_type,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_color,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_maner,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_date,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(hbox_price,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
self.vbox_showop.Add(self.update_affirm,0,wx.CENTER | wx.TOP | wx.FIXED_MINSIZE,5)
def ClickButton(self,event):
Bid = event.GetId()
if Bid == 10:
print("查询操作!")
inquire_button = InquireOp(None,title="用户管理系统",size=(1024,668))
inquire_button.Show()
self.Close(True)
elif Bid == 11:
print("添加操作!")
add_button=AddOp(None,title="用户管理系统",size=(1024,668))
add_button.Show()
self.Close(True)
elif Bid == 12:
print("删除操作!")
del_button = DelOp(None,title="用户管理系统",size=(1024,668))
del_button.Show()
self.Close(True)
elif Bid == 13:
self.Close(True)
elif Bid == 14:
pass
def UpdateAffirm(self,event):
#连接car_sale数据库
op = Sql_operation("car_sale")
#向stu_information表添加学生信息
car_no = self.car_no.GetValue()
print(car_no)
car_type = self.car_type.GetValue()
print(car_type)
car_color = self.car_color.GetValue()
print(car_color)
car_maner = self.car_maner.GetValue()
print(car_maner)
car_date = self.car_date.GetValue()
print(car_date)
car_price=self.car_price.GetValue()
print(car_price)
np = op.CarUpdate(car_no,car_type,car_color,car_maner,car_date,car_price)
"""
if __name__ == '__main__':
app = wx.App()
login = CarOperation(None,title="车辆管理系统",size=(1024,720))
login.Show()
app.MainLoop()
"""
|
zh
| 0.799155
|
#跳转至管理界面 操作界面 # ensure the parent's __init__ is called #设置窗口屏幕居中 #创建窗口 #调用操作界面函数 #创建垂直方向box布局管理器 ################################################################################# #创建logo静态文本,设置字体属性 #添加logo静态文本到vbox布局管理中 ################################################################################# #创建左侧的静态框 #创建垂直方向box布局管理器 #创建操作按钮、绑定事件处理 #添加操作按钮到vbox布局管理器 #创建右侧静态框 #创建垂直方向box布局管理器 #创建水平方向box布局管理器 #将hbox添加到垂直box ################################################################################# #通过对应的按钮进行事件的跳转 #继承CarOperation类,实现初始化操作界面 进行数据库的查询操作 # ensure the parent's __init__ is called #创建学生信息网格 #添加到vbox_showop布局管理器 #此处的ClickButton用于事件之间的跳转 #创建用于显示数据的表格 #连接car_sale数据库 #获取car表中的学生信息,返回为二维元组 #CreateGrid(行数,列数) #表格横向为对应表中的属性,纵向为首个属性的数据 #连接car_sale数据库 #继承CarOperation类,实现初始化操作界面 数据库插入操作 #创建表中属性文本框 #为添加按钮组件绑定事件处理 #创建静态框 #创建水平方向box布局管理器 #添加到hsbox布局管理器 ################################################################################# #添加到vbox_showop布局管理器 #连接car_sale数据库 #向stu_information表添加学生信息 #继承InquireOp类,实现初始化操作界面 # ensure the parent's __init__ is called #创建删除车辆输入框、删除按钮 #为删除按钮组件绑定事件处理 ################################################################################# #创建静态框 #创建水平方向box布局管理器 #添加到hbox_name布局管理器 #添加到vbox_showop布局管理器 #连接car_sale数据库 #创建表中属性文本框 #为添加按钮组件绑定事件处理 #创建静态框 #创建水平方向box布局管理器 #添加到hsbox布局管理器 ################################################################################# #添加到vbox_showop布局管理器 #连接car_sale数据库 #向stu_information表添加学生信息 if __name__ == '__main__':
app = wx.App()
login = CarOperation(None,title="车辆管理系统",size=(1024,720))
login.Show()
app.MainLoop()
| 2.532583
| 3
|
wntr/network/controls.py
|
algchyhao/WNTR
| 0
|
6626138
|
<filename>wntr/network/controls.py
"""
The wntr.network.controls module includes methods to define network controls
and control actions. These controls modify parameters in the network during
simulation.
.. rubric:: Contents
.. autosummary::
Subject
Observer
Comparison
ControlPriority
ControlCondition
TimeOfDayCondition
SimTimeCondition
ValueCondition
TankLevelCondition
RelativeCondition
OrCondition
AndCondition
BaseControlAction
ControlAction
ControlBase
Control
ControlManager
"""
import math
import enum
import numpy as np
import logging
import six
from .elements import LinkStatus
import abc
from wntr.utils.ordered_set import OrderedSet
from collections import OrderedDict, Iterable
from .elements import Tank, Junction, Valve, Pump, Reservoir, Pipe
from wntr.utils.doc_inheritor import DocInheritor
import warnings
logger = logging.getLogger(__name__)
# Control Priorities:
# 0 is the lowest
# 3 is the highest
#
# 0:
# Open check valves/pumps if flow would be forward
# Open links for time controls
# Open links for conditional controls
# Open links connected to tanks if the tank head is larger than the minimum head plus a tolerance
# Open links connected to tanks if the tank head is smaller than the maximum head minus a tolerance
# Open pumps if power comes back up
# Start/stop leaks
# 1:
# Close links connected to tanks if the tank head is less than the minimum head (except check valves and pumps than
# only allow flow in).
# Close links connected to tanks if the tank head is larger than the maximum head (exept check valves and pumps that
# only allow flow out).
# 2:
# Open links connected to tanks if the level is low but flow would be in
# Open links connected to tanks if the level is high but flow would be out
# Close links connected to tanks if the level is low and flow would be out
# Close links connected to tanks if the level is high and flow would be in
# 3:
# Close links for time controls
# Close links for conditional controls
# Close check valves/pumps for negative flow
# Close pumps without power
class Subject(object):
"""
A subject base class for the observer design pattern
"""
def __init__(self):
self._observers = OrderedSet()
def subscribe(self, observer):
"""
Subscribe observer to this subject. The update method of any observers of this subject will be called when
notify is called on this subject.
Parameters
----------
observer: Observer
"""
self._observers.add(observer)
def unsubscribe(self, observer):
"""
Unsubscribe observer from this subject.
Parameters
----------
observer: Observer
"""
self._observers.remove(observer)
def notify(self):
"""
Call the update method for all observers of this subject.
"""
for o in self._observers:
o.update(self)
class Observer(six.with_metaclass(abc.ABCMeta, object)):
"""
A base class for observers in the observer design pattern.
"""
@abc.abstractmethod
def update(self, subject):
"""
This method is called when the subject being observed calls notify.
Parameters
----------
subject: Subject
The subject that called notify.
"""
pass
class Comparison(enum.Enum):
"""
An enum class for comparison operators.
.. rubric:: Enum Members
=========== ==============================================
:attr:`~gt` greater than
:attr:`~ge` greater than or equal to
:attr:`~lt` less than
:attr:`~le` less than or equal to
:attr:`~eq` equal to
:attr:`~ne` not equal to
=========== ==============================================
"""
gt = (1, np.greater)
ge = (2, np.greater_equal)
lt = (3, np.less)
le = (4, np.less_equal)
eq = (5, np.equal)
ne = (6, np.not_equal)
def __str__(self):
return '-' + self.name
@property
def func(self):
"""The function call to use for this comparison"""
return self.value[1]
__call__ = func
@property
def symbol(self):
if self is Comparison.eq:
return '='
elif self is Comparison.ne:
return '<>'
elif self is Comparison.gt:
return '>'
elif self is Comparison.ge:
return '>='
elif self is Comparison.lt:
return '<'
elif self is Comparison.le:
return '<='
raise ValueError('Unknown Enum: Comparison.%s'%self)
@property
def text(self):
if self is Comparison.eq:
return 'Is'
elif self is Comparison.ne:
return 'Not'
elif self is Comparison.gt:
return 'Above'
elif self is Comparison.ge:
return '>='
elif self is Comparison.lt:
return 'Below'
elif self is Comparison.le:
return '<='
raise ValueError('Unknown Enum: Comparison.%s'%self)
@classmethod
def parse(cls, func):
if isinstance(func, six.string_types):
func = func.lower().strip()
elif isinstance(func, cls):
func = func.func
if func in [np.equal, '=', 'eq', '-eq', '==', 'is', 'equal', 'equal to']:
return cls.eq
elif func in [np.not_equal, '<>', 'ne', '-ne', '!=', 'not', 'not_equal', 'not equal to']:
return cls.ne
elif func in [np.greater, '>', 'gt', '-gt', 'above', 'after', 'greater', 'greater than']:
return cls.gt
elif func in [np.less, '<', 'lt', '-lt', 'below', 'before', 'less', 'less than']:
return cls.lt
elif func in [np.greater_equal, '>=', 'ge', '-ge', 'greater_equal', 'greater than or equal to']:
return cls.ge
elif func in [np.less_equal, '<=', 'le', '-le', 'less_equal', 'less than or equal to']:
return cls.le
raise ValueError('Invalid Comparison name: %s'%func)
#
# Control Condition classes
#
class ControlPriority(enum.IntEnum):
"""
An enum class for control priorities.
.. rubric:: Enum Members
==================== =====================================================
:attr:`~very_low` very low priority
:attr:`~low` low priority
:attr:`~medium_low` medium low priority
:attr:`~medium` medium priority
:attr:`~medium_high` medium high priority
:attr:`~high` high priority
:attr:`~very_high` very high priority
==================== =====================================================
"""
very_low = 0
low = 1
medium_low = 2
medium = 3
medium_high = 4
high = 5
very_high = 6
class _ControlType(enum.Enum):
presolve = 0
postsolve = 1
rule = 2
pre_and_postsolve = 3
class ControlCondition(six.with_metaclass(abc.ABCMeta, object)):
"""A base class for control conditions"""
def __init__(self):
self._backtrack = 0
@abc.abstractmethod
def requires(self):
"""
Returns a set of objects required to evaluate this condition
Returns
-------
required_objects: OrderedSet of object
"""
return OrderedSet()
@property
def name(self):
"""
Returns the string representation of the condition.
Returns
-------
name: str
"""
return str(self)
@property
def backtrack(self):
"""
The amount of time by which the simulation should be backed up.
Should be updated by the :class:`~wntr.network.controls.ControlCondition.evaluate` method if appropriate.
Returns
-------
backtrack: int
"""
return self._backtrack
@abc.abstractmethod
def evaluate(self):
"""
Check if the condition is satisfied.
Returns
-------
check: bool
"""
pass
def __bool__(self):
"""
Check if the condition is satisfied.
Returns
-------
check: bool
"""
return self.evaluate()
__nonzero__ = __bool__
@classmethod
def _parse_value(cls, value):
try:
v = float(value)
return v
except ValueError:
value = value.upper()
if value == 'CLOSED':
return 0
if value == 'OPEN':
return 1
if value == 'ACTIVE':
return np.nan
PM = 0
words = value.split()
if len(words) > 1:
if words[1] == 'PM':
PM = 86400 / 2
hms = words[0].split(':')
v = 0
if len(hms) > 2:
v += int(hms[2])
if len(hms) > 1:
v += int(hms[1])*60
if len(hms) > 0:
v += int(hms[0])*3600
if int(hms[0]) <= 12:
v += PM
return v
def _repr_value(self, attr, value):
if attr.lower() in ['status'] and int(value) == value:
return LinkStatus(int(value)).name
return value
@classmethod
def _sec_to_hours_min_sec(cls, value):
sec = float(value)
hours = int(sec/3600.)
sec -= hours*3600
mm = int(sec/60.)
sec -= mm*60
return '{:02d}:{:02d}:{:02d}'.format(hours, mm, int(sec))
@classmethod
def _sec_to_days_hours_min_sec(cls, value):
sec = float(value)
days = int(sec/86400.)
sec -= days*86400
hours = int(sec/3600.)
sec -= hours*3600
mm = int(sec/60.)
sec -= mm*60
if days > 0:
return '{}-{:02d}:{:02d}:{:02d}'.format(days, hours, mm, int(sec))
else:
return '{:02d}:{:02d}:{:02d}'.format(hours, mm, int(sec))
@classmethod
def _sec_to_clock(cls, value):
sec = float(value)
hours = int(sec/3600.)
sec -= hours*3600
mm = int(sec/60.)
sec -= mm*60
if hours >= 12:
pm = 'PM'
if hours > 12:
hours -= 12
elif hours == 0:
pm = 'AM'
hours = 12
else:
pm = 'AM'
return '{}:{:02d}:{:02d} {}'.format(hours, mm, int(sec), pm)
@DocInheritor({'requires', 'evaluate', 'name'})
class TimeOfDayCondition(ControlCondition):
"""Time-of-day or "clocktime" based condition statement.
Resets automatically at 12 AM in clock time (shifted time) every day simulated. Evaluated
from 12 AM the first day of the simulation, even if this is prior to simulation start.
Unlike the :class:`~wntr.network.controls.SimTimeCondition`, greater-than and less-than
relationships make sense, and reset at midnight.
Parameters
----------
model : WaterNetworkModel
The model that the time is being compared against
relation : str or None
String options are 'at', 'after' or 'before'. The 'at' and None are equivalent, and only
evaluate as True during the simulation step the time occurs. `after` evaluates as True
from the time specified until midnight, `before` evaluates as True from midnight until
the specified time.
threshold : float or str
The time (a ``float`` in decimal hours since 12 AM) used in the condition; if provided as a
string in 'hh:mm[:ss] [am|pm]' format, the time will be parsed from the string
repeat : bool, optional
True by default; if False, allows for a single, timed trigger, and probably needs an
entry for `first_day`; in this case a relation of `after` becomes True from the time until
the end of the simulation, and `before` is True from the beginning of the simulation until
the time specified.
first_day : float, default=0
Start rule on day `first_day`, with the first day of simulation as day 0
"""
def __init__(self, model, relation, threshold, repeat=True, first_day=0):
self._model = model
if isinstance(threshold, str) and not ':' in threshold:
self._threshold = float(threshold) * 3600.
else:
self._threshold = self._parse_value(threshold)
if relation is None:
self._relation = Comparison.eq
else:
self._relation = Comparison.parse(relation)
self._first_day = first_day
self._repeat = repeat
self._backtrack = 0
if model is not None and not self._repeat and self._threshold < model._start_clocktime and first_day < 1:
self._first_day = 1
def _compare(self, other):
"""
Parameters
----------
other: TimeOfDayCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if abs(self._threshold - other._threshold) > 1e-10:
return False
if self._relation != other._relation:
return False
if self._first_day != other._first_day:
return False
if self._repeat != other._repeat:
return False
return True
@property
def name(self):
if not self._repeat:
rep = '/Once'
else:
rep = '/Daily'
if self._first_day > 0:
start = '/FirstDay/{}'.format(self._first_day)
else:
start = '/'
return 'ClockTime/{}/{}{}{}'.format(self._relation.text,
self._sec_to_hours_min_sec(self._threshold),
rep, start)
def requires(self):
return OrderedSet()
def __repr__(self):
fmt = '<TimeOfDayCondition: model, {}, {}, {}, {}>'
return fmt.format(repr(self._relation.text), repr(self._sec_to_clock(self._threshold)),
repr(self._repeat), repr(self._first_day))
def __str__(self):
fmt = 'clock_time {:s} "{}"'.format(self._relation.symbol,
self._sec_to_clock(self._threshold))
if not self._repeat:
fmt = '( ' + ' && clock_day == {} )'.format(self._first_day)
elif self._first_day > 0:
fmt = '( ' + ' && clock_day >= {} )'.format(self._first_day)
return fmt
def evaluate(self):
cur_time = self._model._shifted_time
prev_time = self._model._prev_shifted_time
day = np.floor(cur_time/86400)
if day < self._first_day:
self._backtrack = None
return False
if self._repeat:
cur_time = int(cur_time - self._threshold) % 86400
prev_time = int(prev_time - self._threshold) % 86400
else:
cur_time = cur_time - self._first_day * 86400.
prev_time = prev_time - self._first_day * 86400.
if self._relation is Comparison.eq and (prev_time < self._threshold and self._threshold <= cur_time):
self._backtrack = int(cur_time - self._threshold)
return True
elif self._relation is Comparison.gt and cur_time >= self._threshold and prev_time < self._threshold:
self._backtrack = int(cur_time - self._threshold)
return True
elif self._relation is Comparison.gt and cur_time >= self._threshold and prev_time >= self._threshold:
self._backtrack = 0
return True
elif self._relation is Comparison.lt and cur_time >= self._threshold and prev_time < self._threshold:
self._backtrack = int(cur_time - self._threshold)
return False
elif self._relation is Comparison.lt and cur_time >= self._threshold and prev_time >= self._threshold:
self._backtrack = 0
return False
else:
self._backtrack = 0
return False
@DocInheritor({'requires', 'evaluate', 'name'})
class SimTimeCondition(ControlCondition):
"""Condition based on time since start of the simulation.
Generally, the relation should be ``None`` (converted to "at") --
then it is *only* evaluated "at" specific times. Using greater-than or less-than type
relationships should be reserved for complex, multi-condition statements and
should not be used for simple controls. If ``repeat`` is used, the relationship will
automatically be changed to an "at time" evaluation, and a warning will be raised.
Parameters
----------
model : WaterNetworkModel
The model that the time threshold is being compared against
relation : str or None
String options are 'at', 'after' or 'before'. The 'at' and None are equivalent, and only
evaluate as True during the simulation step the time occurs. After evaluates as True
from the time specified until the end of simulation, before evaluates as True from
start of simulation until the specified time.
threshold : float or str
The time (a ``float`` in decimal hours) used in the condition; if provided as a string in
'[dd-]hh:mm[:ss]' format, then the time will be parsed from the string;
repeat : bool or float, default=False
If True, then repeat every 24-hours; if non-zero float, reset the
condition every `repeat` seconds after the first_time.
first_time : float, default=0
Start rule at `first_time`, using that time as 0 for the condition evaluation
"""
def __init__(self, model, relation, threshold, repeat=False, first_time=0):
self._model = model
if isinstance(threshold, str) and not ':' in threshold:
self._threshold = float(threshold) * 3600.
else:
self._threshold = self._parse_value(threshold)
if relation is None:
self._relation = Comparison.eq
else:
self._relation = Comparison.parse(relation)
self._repeat = repeat
if repeat is True:
self._repeat = 86400
self._backtrack = 0
self._first_time = first_time
def _compare(self, other):
"""
Parameters
----------
other: SimTimeCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if abs(self._threshold - other._threshold) > 1e-10:
return False
if self._repeat != other._repeat:
return False
if self._first_time != other._first_time:
return False
if self._relation != other._relation:
return False
return True
@property
def name(self):
if not self._repeat:
rep = ''
else:
rep = '%Every{}sec'.format(self._repeat)
if self._first_time > 0:
start = '#Start@{}sec'.format((self._first_time))
else:
start = ''
return 'SimTime{}{}{}{}'.format(self._relation.symbol,
(self._threshold),
rep, start)
def __repr__(self):
fmt = '<SimTimeCondition: model, {}, {}, {}, {}>'
return fmt.format(repr(self._relation.text), repr(self._sec_to_days_hours_min_sec(self._threshold)),
repr(self._repeat), repr(self._first_time))
def __str__(self):
fmt = '{} {} sec'.format(self._relation.symbol, self._threshold)
if self._repeat is True:
fmt = '% 86400.0 ' + fmt
elif self._repeat > 0:
fmt = '% {:.1f} '.format(int(self._repeat)) + fmt
if self._first_time > 0:
fmt = '(sim_time - {:d}) '.format(int(self._first_time)) + fmt
else:
fmt = 'sim_time ' + fmt
return fmt
def requires(self):
return OrderedSet()
def evaluate(self):
cur_time = self._model.sim_time
prev_time = self._model._prev_sim_time
if self._repeat and cur_time > self._threshold:
cur_time = (cur_time - self._threshold) % self._repeat
prev_time = (prev_time - self._threshold) % self._repeat
if self._relation is Comparison.eq and (prev_time < self._threshold and self._threshold <= cur_time):
self._backtrack = int(cur_time - self._threshold)
return True
elif self._relation is Comparison.gt and cur_time > self._threshold:
self._backtrack = 0
return True
elif self._relation is Comparison.ge and cur_time >= self._threshold and prev_time < self._threshold:
self._backtrack = int(cur_time - self._threshold)
return True
elif self._relation is Comparison.ge and cur_time >= self._threshold and prev_time >= self._threshold:
self._backtrack = 0
return True
elif self._relation is Comparison.lt and cur_time < self._threshold:
self._backtrack = 0
return True
elif self._relation is Comparison.le and cur_time <= self._threshold:
self._backtrack = 0
return True
elif self._relation is Comparison.le and prev_time < self._threshold:
self._backtrack = int(cur_time - self._threshold)
return True
else:
self._backtrack = 0
return False
@DocInheritor({'requires', 'evaluate', 'name'})
class ValueCondition(ControlCondition):
"""Compare a network element attribute to a set value.
Parameters
----------
source_obj : object
The object (such as a Junction, Tank, Pipe, etc.) to use in the comparison
source_attr : str
The attribute of the object (such as level, pressure, setting, etc.) to
compare against the threshold
operation : function or str
A two-parameter comparison function (e.g., numpy.greater, numpy.less_equal), or a
string describing the comparison (e.g., '=', 'below', 'is', '>=', etc.)
Words, such as 'below', are only accepted from the EPANET rules conditions list (see ...)
threshold : float
A value to compare the source object attribute against
"""
def __new__(cls, source_obj, source_attr, relation, threshold):
if isinstance(source_obj, Tank) and source_attr in {'level', 'pressure', 'head'}:
return object.__new__(TankLevelCondition)
else:
return object.__new__(ValueCondition)
def __getnewargs__(self):
return self._source_obj, self._source_attr, self._relation, self._threshold
def __init__(self, source_obj, source_attr, relation, threshold):
self._source_obj = source_obj
self._source_attr = source_attr
self._relation = Comparison.parse(relation)
self._threshold = ControlCondition._parse_value(threshold)
self._backtrack = 0
def _compare(self, other):
"""
Parameters
----------
other: ValueCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if not self._source_obj._compare(other._source_obj):
return False
if self._source_attr != other._source_attr:
return False
if abs(self._threshold - other._threshold) > 1e-10:
return False
if self._relation != other._relation:
return False
return True
def requires(self):
return OrderedSet([self._source_obj])
@property
def name(self):
if hasattr(self._source_obj, 'name'):
obj = self._source_obj.name
else:
obj = str(self._source_obj)
return '{}:{}{}{}'.format(obj, self._source_attr,
self._relation.symbol, self._threshold)
def __repr__(self):
return "<ValueCondition: {}, {}, {}, {}>".format(str(self._source_obj),
str(self._source_attr),
str(self._relation.symbol),
str(self._threshold))
def __str__(self):
typ = self._source_obj.__class__.__name__
obj = str(self._source_obj)
if hasattr(self._source_obj, 'name'):
obj = self._source_obj.name
att = self._source_attr
rel = self._relation.symbol
val = self._repr_value(att, self._threshold)
return "{}('{}').{} {} {}".format(typ, obj, att, rel, val)
def evaluate(self):
cur_value = getattr(self._source_obj, self._source_attr)
thresh_value = self._threshold
relation = self._relation.func
if np.isnan(self._threshold):
relation = np.greater
thresh_value = 0.0
state = relation(cur_value, thresh_value)
return bool(state)
@DocInheritor({'requires', 'evaluate'})
class TankLevelCondition(ValueCondition):
"""
A special type of ValueCondition for tank levels/heads/pressures.
"""
def __init__(self, source_obj, source_attr, relation, threshold):
relation = Comparison.parse(relation)
if relation not in {Comparison.ge, Comparison.le, Comparison.gt, Comparison.lt}:
raise ValueError('TankLevelConditions only support <= and >= relations.')
super(TankLevelCondition, self).__init__(source_obj, source_attr, relation, threshold)
assert source_attr in {'level', 'pressure', 'head'}
self._last_value = getattr(self._source_obj, self._source_attr) # this is used to see if backtracking is needed
def _compare(self, other):
"""
Parameters
----------
other: TankLevelCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if not self._source_obj._compare(other._source_obj):
return False
if self._source_attr != other._source_attr:
return False
if abs(self._threshold - other._threshold) > 1e-10:
return False
if self._relation != other._relation:
return False
return True
def evaluate(self):
self._backtrack = 0 # no backtracking is needed unless specified in the if statement below
cur_value = getattr(self._source_obj, self._source_attr) # get the current tank level
thresh_value = self._threshold
relation = self._relation
if relation is Comparison.gt:
relation = Comparison.ge
if relation is Comparison.lt:
relation = Comparison.le
if np.isnan(self._threshold): # what is this doing?
relation = np.greater
thresh_value = 0.0
state = relation(cur_value, thresh_value) # determine if the condition is satisfied
if state and not relation(self._last_value, thresh_value):
# if the condition is satisfied and the last value did not satisfy the condition, then backtracking
# is needed.
# The math.floor is not actually needed, but I leave it here for clarity. We want the backtrack value to be
# slightly lower than what the floating point computation would give. This ensures the next time step will
# be slightly later than when the tank level hits the threshold. This ensures the tank level will go
# slightly beyond the threshold. This ensures that relation(self._last_value, thresh_value) will be True
# next time. This prevents us from computing very small backtrack values over and over.
if self._source_obj.demand != 0:
self._backtrack = int(math.floor((cur_value - thresh_value)*math.pi/4.0*self._source_obj.diameter**2/self._source_obj.demand))
self._last_value = cur_value # update the last value
return bool(state)
@DocInheritor({'requires', 'evaluate', 'name'})
class RelativeCondition(ControlCondition):
"""Compare attributes of two different objects (e.g., levels from tanks 1 and 2)
This type of condition does not work with the EpanetSimulator, only the WNTRSimulator.
Parameters
----------
source_obj : object
The object (such as a Junction, Tank, Pipe, etc.) to use in the comparison
source_attr : str
The attribute of the object (such as level, pressure, setting, etc.) to
compare against the threshold
relation : function
A numpy or other comparison method that takes two values and returns a bool
(e.g., numpy.greater, numpy.less_equal)
threshold_obj : object
The object (such as a Junction, Tank, Pipe, etc.) to use in the comparison of attributes
threshold_attr : str
The attribute to used in the comparison evaluation
"""
def __init__(self, source_obj, source_attr, relation, threshold_obj, threshold_attr):
self._source_obj = source_obj
self._source_attr = source_attr
self._relation = Comparison.parse(relation)
self._threshold_obj = threshold_obj
self._threshold_attr = threshold_attr
self._backtrack = 0
def _compare(self, other):
"""
Parameters
----------
other: RelativeCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if not self._source_obj._compare(other._source_obj):
return False
if self._source_attr != other._source_attr:
return False
if self._relation != other._relation:
return False
if not self._threshold_obj._compare(other._threshold_obj):
return False
if self._threshold_attr != other._threshold_attr:
return False
return True
@property
def name(self):
if hasattr(self._source_obj, 'name'):
obj = self._source_obj.name
else:
obj = str(self._source_obj)
if hasattr(self._threshold_obj, 'name'):
tobj = self._threshold_obj.name
else:
tobj = str(self._threshold_obj)
return '{}:{}_{}_{}:{}'.format(obj, self._source_attr,
self._relation.symbol,
tobj, self._threshold_attr)
def requires(self):
return OrderedSet([self._source_obj, self._threshold_obj])
def __repr__(self):
return "RelativeCondition({}, {}, {}, {}, {})".format(str(self._source_obj),
str(self._source_attr),
str(self._relation),
str(self._threshold_obj),
str(self._threshold_attr))
def __str__(self):
typ = self._source_obj.__class__.__name__
obj = str(self._source_obj)
if hasattr(self._source_obj, 'name'):
obj = self._source_obj.name
att = self._source_attr
rel = self._relation.symbol
ttyp = self._threshold_obj.__class__.__name__
if hasattr(self._threshold_obj, 'name'):
tobj = self._threshold_obj.name
else:
tobj = str(self._threshold_obj)
tatt = self._threshold_attr
fmt = "{}('{}').{} {} {}('{}').{}"
return fmt.format(typ, obj, att,
rel,
ttyp, tobj, tatt)
def evaluate(self):
cur_value = getattr(self._source_obj, self._source_attr)
thresh_value = getattr(self._threshold_obj, self._threshold_attr)
relation = self._relation.func
state = relation(cur_value, thresh_value)
return bool(state)
@DocInheritor({'requires', 'evaluate', 'backtrack'})
class OrCondition(ControlCondition):
"""Combine two WNTR Conditions with an OR.
Parameters
----------
cond1 : ControlCondition
The first condition
cond2 : ControlCondition
The second condition
"""
def __init__(self, cond1, cond2):
self._condition_1 = cond1
self._condition_2 = cond2
if isinstance(cond1, (TimeOfDayCondition, SimTimeCondition, ValueCondition, TankLevelCondition,
RelativeCondition)):
if cond1._relation is Comparison.eq:
logger.warning('Using Comparison.eq with {0} will probably not work!'.format(type(cond1)))
warnings.warn('Using Comparison.eq with {0} will probably not work!'.format(type(cond1)))
if isinstance(cond2, (TimeOfDayCondition, SimTimeCondition, ValueCondition, TankLevelCondition,
RelativeCondition)):
if cond2._relation is Comparison.eq:
logger.warning('Using Comparison.eq with {0} will probably not work!'.format(type(cond2)))
warnings.warn('Using Comparison.eq with {0} will probably not work!'.format(type(cond2)))
def _compare(self, other):
"""
Parameters
----------
other: OrCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if not self._condition_1._compare(other._condition_1):
return False
if not self._condition_2._compare(other._condition_2):
return False
return True
def __str__(self):
return "( " + str(self._condition_1) + " || " + str(self._condition_2) + " )"
def __repr__(self):
return 'Or({}, {})'.format(repr(self._condition_1), repr(self._condition_2))
def evaluate(self):
return bool(self._condition_1) or bool(self._condition_2)
@property
def backtrack(self):
return np.max([self._condition_1.backtrack, self._condition_2.backtrack])
def requires(self):
req = self._condition_1.requires()
req.update(self._condition_2.requires())
return req
@DocInheritor({'requires', 'evaluate', 'backtrack'})
class AndCondition(ControlCondition):
"""Combine two WNTR Conditions with an AND
Parameters
----------
cond1 : ControlCondition
The first condition
cond2 : ControlCondition
The second condition
"""
def __init__(self, cond1, cond2):
self._condition_1 = cond1
self._condition_2 = cond2
if isinstance(cond1, (TimeOfDayCondition, SimTimeCondition, ValueCondition, TankLevelCondition,
RelativeCondition)):
if cond1._relation is Comparison.eq:
logger.warning('Using Comparison.eq with {0} will probably not work!'.format(type(cond1)))
warnings.warn('Using Comparison.eq with {0} will probably not work!'.format(type(cond1)))
if isinstance(cond2, (TimeOfDayCondition, SimTimeCondition, ValueCondition, TankLevelCondition,
RelativeCondition)):
if cond2._relation is Comparison.eq:
logger.warning('Using Comparison.eq with {0} will probably not work!'.format(type(cond2)))
warnings.warn('Using Comparison.eq with {0} will probably not work!'.format(type(cond2)))
def _compare(self, other):
"""
Parameters
----------
other: OrCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if not self._condition_1._compare(other._condition_1):
return False
if not self._condition_2._compare(other._condition_2):
return False
return True
def __str__(self):
return "( "+ str(self._condition_1) + " && " + str(self._condition_2) + " )"
def __repr__(self):
return 'And({}, {})'.format(repr(self._condition_1), repr(self._condition_2))
def evaluate(self):
return bool(self._condition_1) and bool(self._condition_2)
@property
def backtrack(self):
return np.min([self._condition_1.backtrack, self._condition_2.backtrack])
def requires(self):
req = self._condition_1.requires()
req.update(self._condition_2.requires())
return req
class _CloseCVCondition(ControlCondition):
Htol = 0.0001524
Qtol = 2.83168e-6
def __init__(self, wn, cv):
self._cv = cv
self._start_node = wn.get_node(cv.start_node)
self._end_node = wn.get_node(cv.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._cv, self._start_node, self._end_node])
def evaluate(self):
"""
If True is returned, the cv needs to be closed
"""
dh = self._start_node.head - self._end_node.head
if abs(dh) > self.Htol:
if dh < -self.Htol:
return True
elif self._cv.flow < -self.Qtol:
return True
else:
return False
else:
if self._cv.flow < -self.Qtol:
return True
else:
return False
def __str__(self):
s = '{0} head - {1} head < -{2} or {3} flow < {4}'.format(self._start_node.name, self._end_node.name, self.Htol, self._cv.name, -self.Qtol)
return s
class _OpenCVCondition(ControlCondition):
Htol = 0.0001524
Qtol = 2.83168e-6
def __init__(self, wn, cv):
self._cv = cv
self._start_node = wn.get_node(cv.start_node)
self._end_node = wn.get_node(cv.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._cv, self._start_node, self._end_node])
def evaluate(self):
"""
If True is returned, the cv needs to be closed
"""
dh = self._start_node.head - self._end_node.head
if abs(dh) > self.Htol:
if dh < -self.Htol:
return False
elif self._cv.flow < -self.Qtol:
return False
else:
return True
else:
return False
def __str__(self):
s = '{0} head - {1} head > {2} and {3} flow >= {4}'.format(self._start_node.name, self._end_node.name, self.Htol, self._cv.name, -self.Qtol)
return s
class _ClosePowerPumpCondition(ControlCondition):
"""
Prevents reverse flow in pumps.
"""
Htol = 0.0001524
Qtol = 2.83168e-6
Hmax = 1e10
def __init__(self, wn, pump):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
pump: wntr.network.Pump
"""
self._pump = pump
self._start_node = wn.get_node(pump.start_node)
self._end_node = wn.get_node(pump.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._pump, self._start_node, self._end_node])
def evaluate(self):
"""
If True is returned, the pump needs to be closed
"""
dh = self._end_node.head - self._start_node.head
if dh > self.Hmax + self.Htol:
return True
return False
def __str__(self):
s = '{0} head - {1} head > {2:.4f}'.format(self._end_node.name, self._start_node.name, self.Hmax + self.Htol)
return s
class _OpenPowerPumpCondition(ControlCondition):
Htol = 0.0001524
Qtol = 2.83168e-6
Hmax = 1e10
def __init__(self, wn, pump):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
pump: wntr.network.Pump
"""
self._pump = pump
self._start_node = wn.get_node(pump.start_node)
self._end_node = wn.get_node(pump.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._pump, self._start_node, self._end_node])
def evaluate(self):
"""
If True is returned, the pump needs to be opened
"""
dh = self._end_node.head - self._start_node.head
if dh <= self.Hmax + self.Htol:
return True
return False
def __str__(self):
s = '{0} head - {1} head <= {2:.4f}'.format(self._end_node.name, self._start_node.name, self.Hmax + self.Htol)
return s
class _CloseHeadPumpCondition(ControlCondition):
"""
Prevents reverse flow in pumps.
"""
_Htol = 0.0001524
def __init__(self, wn, pump):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
pump: wntr.network.Pump
"""
self._pump = pump
self._start_node = wn.get_node(pump.start_node)
self._end_node = wn.get_node(pump.end_node)
self._backtrack = 0
self._wn = wn
def requires(self):
return OrderedSet([self._pump, self._start_node, self._end_node])
def evaluate(self):
"""
If True is returned, the pump needs to be closed
"""
a, b, c = self._pump.get_head_curve_coefficients()
if self._pump.speed_timeseries(self._wn.sim_time) != 1.0:
raise NotImplementedError('Pump speeds other than 1.0 are not yet supported.')
Hmax = a
dh = self._end_node.head - self._start_node.head
if dh > Hmax + self._Htol:
return True
return False
def __str__(self):
a, b, c = self._pump.get_head_curve_coefficients()
if self._pump.speed_timeseries(self._wn.sim_time) != 1.0:
raise NotImplementedError('Pump speeds other than 1.0 are not yet supported.')
Hmax = a
s = '{0} head - {1} head > {2:.4f}'.format(self._end_node.name, self._start_node.name, Hmax + self._Htol)
return s
class _OpenHeadPumpCondition(ControlCondition):
"""
Prevents reverse flow in pumps.
"""
_Htol = 0.0001524
def __init__(self, wn, pump):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
pump: wntr.network.Pump
"""
self._pump = pump
self._start_node = wn.get_node(pump.start_node)
self._end_node = wn.get_node(pump.end_node)
self._backtrack = 0
self._wn = wn
def requires(self):
return OrderedSet([self._pump, self._start_node, self._end_node])
def evaluate(self):
"""
If True is returned, the pump needs to be closed
"""
a, b, c = self._pump.get_head_curve_coefficients()
if self._pump.speed_timeseries(self._wn.sim_time) != 1.0:
raise NotImplementedError('Pump speeds other than 1.0 are not yet supported.')
Hmax = a
dh = self._end_node.head - self._start_node.head
if dh <= Hmax + self._Htol:
return True
return False
def __str__(self):
a, b, c = self._pump.get_head_curve_coefficients()
if self._pump.speed_timeseries(self._wn.sim_time) != 1.0:
raise NotImplementedError('Pump speeds other than 1.0 are not yet supported.')
Hmax = a
s = '{0} head - {1} head <= {2:.4f}'.format(self._end_node.name, self._start_node.name, Hmax + self._Htol)
return s
class _ClosePRVCondition(ControlCondition):
_Qtol = 2.83168e-6
def __init__(self, wn, prv):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
prv: wntr.network.Valve
"""
super(_ClosePRVCondition, self).__init__()
self._prv = prv
self._start_node = wn.get_node(self._prv.start_node)
self._end_node = wn.get_node(self._prv.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._prv])
def evaluate(self):
if self._prv._internal_status == LinkStatus.Active:
if self._prv.flow < -self._Qtol:
return True
return False
elif self._prv._internal_status == LinkStatus.Open:
if self._prv.flow < -self._Qtol:
return True
return False
elif self._prv._internal_status == LinkStatus.Closed:
return False
else:
raise RuntimeError('Unexpected PRV _internal_status for valve {0}: {1}.'.format(self._prv,
self._prv._internal_status))
def __str__(self):
s = 'prv {0} needs to be closed'.format(self._prv.name)
return s
class _OpenPRVCondition(ControlCondition):
_Qtol = 2.83168e-6
_Htol = 0.0001524
def __init__(self, wn, prv):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
prv: wntr.network.Valve
"""
super(_OpenPRVCondition, self).__init__()
self._prv = prv
self._start_node = wn.get_node(self._prv.start_node)
self._end_node = wn.get_node(self._prv.end_node)
self._backtrack = 0
self._r = 0.0826 * 0.02 * self._prv.diameter ** (-4) * 2.0
def requires(self):
return OrderedSet([self._prv, self._start_node, self._end_node])
def evaluate(self):
if self._prv._internal_status == LinkStatus.Active:
if self._prv.flow < -self._Qtol:
return False
elif self._start_node.head < self._prv.setting + self._end_node.elevation + self._r * abs(self._prv.flow)**2 - self._Htol:
return True
return False
elif self._prv._internal_status == LinkStatus.Open:
return False
elif self._prv._internal_status == LinkStatus.Closed:
if self._start_node.head >= self._prv.setting + self._end_node.elevation + self._Htol and self._end_node.head < self._prv.setting + self._end_node.elevation - self._Htol:
return False
elif self._start_node.head < self._prv.setting + self._end_node.elevation - self._Htol and self._start_node.head > self._end_node.head + self._Htol:
return True
return False
else:
raise RuntimeError('Unexpected PRV _internal_status for valve {0}: {1}.'.format(self._prv,
self._prv._internal_status))
def __str__(self):
s = 'prv {0} needs to be open'.format(self._prv.name)
return s
class _ActivePRVCondition(ControlCondition):
_Qtol = 2.83168e-6
_Htol = 0.0001524
def __init__(self, wn, prv):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
prv: wntr.network.Valve
"""
self._prv = prv
self._start_node = wn.get_node(self._prv.start_node)
self._end_node = wn.get_node(self._prv.end_node)
self._backtrack = 0
self._r = 0.0826 * 0.02 * self._prv.diameter ** (-4) * 2.0
def requires(self):
return OrderedSet([self._prv, self._start_node, self._end_node])
def evaluate(self):
if self._prv._internal_status == LinkStatus.Active:
return False
elif self._prv._internal_status == LinkStatus.Open:
if self._prv.flow < -self._Qtol:
return False
elif (self._end_node.head >= self._prv.setting + self._end_node.elevation + self._Htol):
return True
return False
elif self._prv._internal_status == LinkStatus.Closed:
if ((self._start_node.head >= self._prv.setting + self._end_node.elevation + self._Htol) and
(self._end_node.head < self._prv.setting + self._end_node.elevation - self._Htol)):
return True
return False
else:
raise RuntimeError('Unexpected PRV _internal_status for valve {0}: {1}.'.format(self._prv,
self._prv._internal_status))
def __str__(self):
s = 'prv {0} needs to be active'.format(self._prv.name)
return s
class _OpenFCVCondition(ControlCondition):
_Qtol = 2.83168e-6
_Htol = 0.0001524
def __init__(self, wn, fcv):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
fcv: wntr.network.Valve
"""
self._fcv = fcv
self._start_node = wn.get_node(self._fcv.start_node)
self._end_node = wn.get_node(self._fcv.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._fcv, self._start_node, self._end_node])
def evaluate(self):
if self._start_node.head - self._end_node.head < -self._Htol:
return True
elif self._fcv.flow < -self._Qtol:
return True
else:
return False
class _ActiveFCVCondition(ControlCondition):
_Qtol = 2.83168e-6
_Htol = 0.0001524
def __init__(self, wn, fcv):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
fcv: wntr.network.Valve
"""
self._fcv = fcv
self._start_node = wn.get_node(self._fcv.start_node)
self._end_node = wn.get_node(self._fcv.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._fcv, self._start_node, self._end_node])
def evaluate(self):
if self._start_node.head - self._end_node.head < -self._Htol:
return False
elif self._fcv.flow < -self._Qtol:
return False
elif self._fcv._internal_status == LinkStatus.Open and self._fcv.flow >= self._fcv.setting:
return True
else:
return False
class BaseControlAction(six.with_metaclass(abc.ABCMeta, Subject)):
"""
A base class for deriving new control actions. The control action is run by calling run_control_action.
This class is not meant to be used directly. Derived classes must implement the run_control_action, requires,
and target methods.
"""
def __init__(self):
super(BaseControlAction, self).__init__()
@abc.abstractmethod
def run_control_action(self):
"""
This method is called to run the corresponding control action.
"""
pass
@abc.abstractmethod
def requires(self):
"""
Returns a set of objects used to evaluate the control
Returns
-------
req: OrderedSet
The objects required to run the control action.
"""
pass
@abc.abstractmethod
def target(self):
"""
Returns a tuple (object, attribute) containing the object and attribute that the control action may change
Returns
-------
target: tuple
A tuple containing the target object and the attribute to be changed (target, attr).
"""
pass
def _compare(self, other):
"""
Parameters
----------
other: BaseControlAction
Returns
-------
bool
"""
if type(self) != type(other):
return False
target1, attr1 = self.target()
target2, attr2 = other.target()
val1 = self._value
val2 = other._value
if not target1._compare(target2):
return False
if attr1 != attr2:
return False
if type(val1) == float:
if abs(val1 - val2) > 1e-10:
return False
else:
if val1 != val2:
return False
return True
@DocInheritor({'requires', 'target', 'run_control_action'})
class ControlAction(BaseControlAction):
"""
A general class for specifying a control action that simply modifies the attribute of an object (target).
Parameters
----------
target_obj : object
The object whose attribute will be changed when the control runs.
attribute : string
The attribute that will be changed on the target_obj when the control runs.
value : any
The new value for target_obj.attribute when the control runs.
"""
def __init__(self, target_obj, attribute, value):
super(ControlAction, self).__init__()
if target_obj is None:
raise ValueError('target_obj is None in ControlAction::__init__. A valid target_obj is needed.')
if not hasattr(target_obj, attribute):
raise ValueError('attribute given in ControlAction::__init__ is not valid for target_obj')
self._target_obj = target_obj
self._attribute = attribute
self._value = value
def requires(self):
return OrderedSet([self._target_obj])
def __repr__(self):
return '<ControlAction: {}, {}, {}>'.format(str(self._target_obj), str(self._attribute), str(self._repr_value()))
def __str__(self):
return "set {}('{}').{} to {}".format(self._target_obj.__class__.__name__,
self._target_obj.name,
self._attribute,
self._repr_value())
def _repr_value(self):
if self._attribute.lower() in ['status']:
return LinkStatus(int(self._value)).name
return self._value
def run_control_action(self):
setattr(self._target_obj, self._attribute, self._value)
self.notify()
def target(self):
return self._target_obj, self._attribute
class _InternalControlAction(BaseControlAction):
"""
A control action class that modifies a private attribute in order to change a property on an object. For example,
a valve has a status property, but the control action must act on the _internal_status.
Parameters
----------
target_obj: object
The object for which an attribute is being changed.
internal_attribute: str
The attribute being modified (e.g., _internal_stats)
value: any
The new value for the internal_attribute
property_attribute: str
The attribute to be checked for an actual change (e.g., status)
"""
def __init__(self, target_obj, internal_attribute, value, property_attribute):
super(_InternalControlAction, self).__init__()
if not hasattr(target_obj, internal_attribute):
raise AttributeError('{0} does not have attribute {1}'.format(target_obj, internal_attribute))
if not hasattr(target_obj, property_attribute):
raise AttributeError('{0} does not have attribute {1}'.format(target_obj, property_attribute))
self._target_obj = target_obj
self._internal_attr = internal_attribute
self._value = value
self._property_attr = property_attribute
def requires(self):
"""
Return a list of objects required by the control action.
Returns
-------
required_objects: list of object
"""
return OrderedSet([self._target_obj])
def run_control_action(self):
"""
Activate the control action.
"""
if self._target_obj is None:
raise ValueError('target is None inside _InternalControlAction::RunControlAction.' +
'This may be because a target_obj was added, but later the object itself was deleted.')
setattr(self._target_obj, self._internal_attr, self._value)
self.notify()
def target(self):
"""
Returns a tuple containing the target object and the attribute to check for modification.
Returns
-------
target: tuple
"""
return self._target_obj, self._property_attr
def __repr__(self):
return '<_InternalControlAction: {}, {}, {}>'.format(str(self._target_obj), self._internal_attr,
str(self._value))
def __str__(self):
return "set {}('{}').{} to {}".format(self._target_obj.__class__.__name__,
self._target_obj.name,
self._internal_attr,
self._value)
#
# Control classes
#
class ControlBase(six.with_metaclass(abc.ABCMeta, object)):
"""
This is the base class for all control objects. Control objects are used to check the conditions under which a
ControlAction should be run. For example, if a pump is supposed to be turned on when the simulation time
reaches 6 AM, the ControlAction would be "turn the pump on", and the ControlCondition would be "when the simulation
reaches 6 AM".
"""
@abc.abstractmethod
def is_control_action_required(self):
"""
This method is called to see if any action is required by this control object. This method returns a tuple
that indicates if action is required (a bool) and a recommended time for the simulation to backup (in seconds
as a positive int).
Returns
-------
req: tuple
A tuple (bool, int) indicating if an action should be run and how far to back up the simulation.
"""
pass
@abc.abstractmethod
def run_control_action(self):
"""
This method is called to run the control action after a call to IsControlActionRequired indicates that an
action is required.
"""
pass
@abc.abstractmethod
def requires(self):
"""
Returns a set of objects required for this control.
Returns
-------
required_objects: OrderedSet of object
"""
return OrderedSet()
@abc.abstractmethod
def actions(self):
"""
Returns a list of all actions used by this control.
Returns
-------
act: list of BaseControlAction
"""
pass
def _control_type_str(self):
if self._control_type is _ControlType.rule:
return 'Rule'
else:
return 'Control'
@property
def condition(self):
return self._condition
@property
def priority(self):
return self._priority
def _compare(self, other):
"""
Parameters
----------
other: ControlBase
Returns
-------
bool
"""
ret = True
msg = '_compare failed in ControlBase because '
if self.priority != other.priority:
ret = False
msg += 'priorities were not equal'
if self._control_type_str() != other._control_type_str():
ret = False
msg += '_control_type_strs were not equal'
if not self.condition._compare(other.condition):
ret = False
msg += 'conditions were not equal'
for action1, action2 in zip(self.actions(), other.actions()):
if not action1._compare(action2):
ret = False
msg += 'actions were not equal'
break
if ret is False:
print(msg)
return ret
@DocInheritor({'is_control_action_required', 'run_control_action', 'requires', 'actions'})
class Rule(ControlBase):
"""
A very general and flexible class for defining both controls rules.
"""
def __init__(self, condition, then_actions, else_actions=None, priority=ControlPriority.medium, name=None):
"""
Parameters
----------
condition: ControlCondition
The condition that should be used to determine when the actions need to be activated. When the condition
evaluates to True, the then_actions are activated. When the condition evaluates to False, the else_actions
are activated.
then_actions: Iterable of ControlAction
The actions that should be activated when the condition evaluates to True.
else_actions: Iterable of ControlAction
The actions that should be activated when the condition evaluates to False.
priority: ControlPriority
The priority of the control. Default is ControlPriority.medium
name: str
The name of the control
"""
if not isinstance(condition, ControlCondition):
raise ValueError('The conditions argument must be a ControlCondition instance')
self._condition = condition
if isinstance(then_actions, Iterable):
self._then_actions = list(then_actions)
elif then_actions is not None:
self._then_actions = [then_actions]
else:
self._then_actions = []
if isinstance(else_actions, Iterable):
self._else_actions = list(else_actions)
elif else_actions is not None:
self._else_actions = [else_actions]
else:
self._else_actions = []
self._which = None
self._priority = priority
self._name = name
if self._name is None:
self._name = ''
self._control_type = _ControlType.rule
if isinstance(condition, (TimeOfDayCondition, SimTimeCondition, ValueCondition, TankLevelCondition,
RelativeCondition)):
if condition._relation is Comparison.eq:
logger.warning('Using Comparison.eq with {0} will probably not work!'.format(type(condition)))
warnings.warn('Using Comparison.eq with {0} will probably not work!'.format(type(condition)))
@property
def epanet_control_type(self):
"""
The control type. Note that presolve and postsolve controls are both simple controls in Epanet.
Returns
-------
control_type: _ControlType
"""
return self._control_type
def requires(self):
req = self._condition.requires()
for action in self._then_actions:
req.update(action.requires())
for action in self._else_actions:
req.update(action.requires())
return req
def actions(self):
return self._then_actions + self._else_actions
@property
def name(self):
"""
A string representation of the Control.
"""
if self._name is not None:
return self._name
else:
return '/'.join(str(self).split())
def __repr__(self):
fmt = "<Control: '{}', {}, {}, {}, priority={}>"
return fmt.format(self._name, repr(self._condition), repr(self._then_actions), repr(self._else_actions), self._priority)
def __str__(self):
text = '{} {} := if {}'.format(self._control_type_str(), self._name, self._condition)
if self._then_actions is not None and len(self._then_actions) > 0:
then_text = ' then '
for ct, act in enumerate(self._then_actions):
if ct == 0:
then_text += str(act)
else:
then_text += ' and {}'.format(str(act))
text += then_text
if self._else_actions is not None and len(self._else_actions) > 0:
else_text = ' else '
for ct, act in enumerate(self._else_actions):
if ct == 0:
else_text += str(act)
else:
else_text += ' and {}'.format(str(act))
text += else_text
if self._priority is not None and self._priority >= 0:
text += ' with priority {}'.format(self._priority)
return text
def is_control_action_required(self):
do = self._condition.evaluate()
back = self._condition.backtrack
if do:
self._which = 'then'
return True, back
elif not do and self._else_actions is not None and len(self._else_actions) > 0:
self._which = 'else'
return True, back
else:
return False, None
def run_control_action(self):
if self._which == 'then':
for control_action in self._then_actions:
control_action.run_control_action()
elif self._which == 'else':
for control_action in self._else_actions:
control_action.run_control_action()
else:
raise RuntimeError('control actions called even though if-then statement was False')
class Control(Rule):
"""
A class for controls.
"""
def __init__(self, condition, then_action, priority=ControlPriority.medium, name=None):
"""
Parameters
----------
condition: ControlCondition
The condition that should be used to determine when the actions need to be activated. When the condition
evaluates to True, the then_actions are activated. When the condition evaluates to False, the else_actions
are activated.
then_action: ControlAction
The action that should be activated when the condition evaluates to True.
priority: ControlPriority
The priority of the control. Default is ControlPriority.medium
name: str
The name of the control
"""
# if isinstance(condition, (TimeOfDayCondition, SimTimeCondition)):
# if condition._relation is not Comparison.eq:
# raise ValueError('SimTimeConditions and TimeOfDayConditions used with Control must have a relation of '
# 'Comparison.eq. Otherwise use Rule.')
if isinstance(condition, (ValueCondition, TankLevelCondition, RelativeCondition)):
if condition._relation is Comparison.eq:
logger.warning('Using Comparison.eq with {0} will probably not work!'.format(type(condition)))
warnings.warn('Using Comparison.eq with {0} will probably not work!'.format(type(condition)))
self._condition = condition
self._then_actions = [then_action]
self._else_actions = []
self._which = None
self._priority = priority
self._name = name
if self._name is None:
self._name = ''
if isinstance(condition, TankLevelCondition):
self._control_type = _ControlType.pre_and_postsolve
elif isinstance(condition, (TimeOfDayCondition, SimTimeCondition)):
self._control_type = _ControlType.presolve
else:
self._control_type = _ControlType.postsolve
@classmethod
def _time_control(cls, wnm, run_at_time, time_flag, daily_flag, control_action, name=None):
"""
This is a class method for creating simple time controls.
Parameters
----------
wnm: wntr.network.WaterNetworkModel
The WaterNetworkModel instance this control will be added to.
run_at_time: int
The time to activate the control action.
time_flag: str
Options are 'SIM_TIME' and 'CLOCK_TIME'. SIM_TIME indicates that run_at_time is the time since the start
of the simulation. CLOCK_TIME indicates that run_at_time is the time of day.
daily_flag: bool
If True, then the control will repeat every day.
control_action: BaseControlAction
The control action that should occur at run_at_time.
name: str
An optional name for the control.
Returns
-------
ctrl: Control
"""
if time_flag.upper() == 'SIM_TIME':
condition = SimTimeCondition(model=wnm, relation=Comparison.eq, threshold=run_at_time, repeat=daily_flag,
first_time=0)
elif time_flag.upper() == 'CLOCK_TIME':
condition = TimeOfDayCondition(model=wnm, relation=Comparison.eq, threshold=run_at_time, repeat=daily_flag,
first_day=0)
else:
raise ValueError("time_flag not recognized; expected either 'sim_time' or 'clock_time'")
control = Control(condition=condition, then_action=control_action)
return control
@classmethod
def _conditional_control(cls, source_obj, source_attr, operation, threshold, control_action, name=None):
"""
This is a class method for creating simple conditional controls.
Parameters
----------
source_obj: object
The object whose source_attr attribute will be compared to threshold to determine if control_action
needs activated.
source_attr: str
The attribute of source_obj to compare to threshold.
operation: Comparison
The comparison function used to compare the source_attr attribute of source_obj to threshold.
threshold: any
The threshold used in the comparison.
control_action: ControlAction
The control action that should occur when operation(getattr(source_obj, source_attr), threshold) is True.
name: str
An optional name for the control
Returns
-------
ctrl: Control
"""
condition = ValueCondition(source_obj=source_obj, source_attr=source_attr, relation=operation,
threshold=threshold)
control = Control(condition=condition, then_action=control_action)
return control
class ControlManager(Observer):
"""
A class for managing controls and identifying changes made by those controls.
"""
def __init__(self):
self._controls = OrderedSet()
"""OrderedSet of ControlBase"""
self._previous_values = OrderedDict() # {(obj, attr): value}
self._changed = OrderedSet() # set of (obj, attr) that has been changed from _previous_values
def __iter__(self):
return iter(self._controls)
def update(self, subject):
"""
The update method gets called when a subject (control action) is activated.
Parameters
-----------
subject: BaseControlAction
"""
obj, attr = subject.target()
if getattr(obj, attr) == self._previous_values[(obj, attr)]:
self._changed.discard((obj, attr))
else:
self._changed.add((obj, attr))
def register_control(self, control):
"""
Register a control with the ControlManager
Parameters
----------
control: ControlBase
"""
self._controls.add(control)
for action in control.actions():
action.subscribe(self)
obj, attr = action.target()
self._previous_values[(obj, attr)] = getattr(obj, attr)
def reset(self):
"""
Reset the _previous_values. This should be called before activating any control actions so that changes made
by the control actions can be tracked.
"""
self._changed = OrderedSet()
self._previous_values = OrderedDict()
for control in self._controls:
for action in control.actions():
obj, attr = action.target()
self._previous_values[(obj, attr)] = getattr(obj, attr)
def changes_made(self):
"""
Specifies if changes were made.
Returns
-------
changes: bool
"""
return len(self._changed) > 0
def get_changes(self):
"""
A generator for iterating over the objects, attributes that were changed.
Returns
-------
changes: tuple
(object, attr)
"""
for obj, attr in self._changed:
yield obj, attr
def deregister(self, control):
"""
Deregister a control with the ControlManager
Parameters
----------
control: ControlBase
"""
self._controls.remove(control)
for action in control.actions():
action.unsubscribe(self)
obj, attr = action.target()
self._previous_values.pop((obj, attr))
self._changed.discard((obj, attr))
def check(self):
"""
Check which controls have actions that need activated.
Returns
-------
controls_to_run: list of tuple
The tuple is (ControlBase, backtrack)
"""
controls_to_run = []
for c in self._controls:
do, back = c.is_control_action_required()
if do:
controls_to_run.append((c, back))
return controls_to_run
|
<filename>wntr/network/controls.py
"""
The wntr.network.controls module includes methods to define network controls
and control actions. These controls modify parameters in the network during
simulation.
.. rubric:: Contents
.. autosummary::
Subject
Observer
Comparison
ControlPriority
ControlCondition
TimeOfDayCondition
SimTimeCondition
ValueCondition
TankLevelCondition
RelativeCondition
OrCondition
AndCondition
BaseControlAction
ControlAction
ControlBase
Control
ControlManager
"""
import math
import enum
import numpy as np
import logging
import six
from .elements import LinkStatus
import abc
from wntr.utils.ordered_set import OrderedSet
from collections import OrderedDict, Iterable
from .elements import Tank, Junction, Valve, Pump, Reservoir, Pipe
from wntr.utils.doc_inheritor import DocInheritor
import warnings
logger = logging.getLogger(__name__)
# Control Priorities:
# 0 is the lowest
# 3 is the highest
#
# 0:
# Open check valves/pumps if flow would be forward
# Open links for time controls
# Open links for conditional controls
# Open links connected to tanks if the tank head is larger than the minimum head plus a tolerance
# Open links connected to tanks if the tank head is smaller than the maximum head minus a tolerance
# Open pumps if power comes back up
# Start/stop leaks
# 1:
# Close links connected to tanks if the tank head is less than the minimum head (except check valves and pumps than
# only allow flow in).
# Close links connected to tanks if the tank head is larger than the maximum head (exept check valves and pumps that
# only allow flow out).
# 2:
# Open links connected to tanks if the level is low but flow would be in
# Open links connected to tanks if the level is high but flow would be out
# Close links connected to tanks if the level is low and flow would be out
# Close links connected to tanks if the level is high and flow would be in
# 3:
# Close links for time controls
# Close links for conditional controls
# Close check valves/pumps for negative flow
# Close pumps without power
class Subject(object):
"""
A subject base class for the observer design pattern
"""
def __init__(self):
self._observers = OrderedSet()
def subscribe(self, observer):
"""
Subscribe observer to this subject. The update method of any observers of this subject will be called when
notify is called on this subject.
Parameters
----------
observer: Observer
"""
self._observers.add(observer)
def unsubscribe(self, observer):
"""
Unsubscribe observer from this subject.
Parameters
----------
observer: Observer
"""
self._observers.remove(observer)
def notify(self):
"""
Call the update method for all observers of this subject.
"""
for o in self._observers:
o.update(self)
class Observer(six.with_metaclass(abc.ABCMeta, object)):
"""
A base class for observers in the observer design pattern.
"""
@abc.abstractmethod
def update(self, subject):
"""
This method is called when the subject being observed calls notify.
Parameters
----------
subject: Subject
The subject that called notify.
"""
pass
class Comparison(enum.Enum):
"""
An enum class for comparison operators.
.. rubric:: Enum Members
=========== ==============================================
:attr:`~gt` greater than
:attr:`~ge` greater than or equal to
:attr:`~lt` less than
:attr:`~le` less than or equal to
:attr:`~eq` equal to
:attr:`~ne` not equal to
=========== ==============================================
"""
gt = (1, np.greater)
ge = (2, np.greater_equal)
lt = (3, np.less)
le = (4, np.less_equal)
eq = (5, np.equal)
ne = (6, np.not_equal)
def __str__(self):
return '-' + self.name
@property
def func(self):
"""The function call to use for this comparison"""
return self.value[1]
__call__ = func
@property
def symbol(self):
if self is Comparison.eq:
return '='
elif self is Comparison.ne:
return '<>'
elif self is Comparison.gt:
return '>'
elif self is Comparison.ge:
return '>='
elif self is Comparison.lt:
return '<'
elif self is Comparison.le:
return '<='
raise ValueError('Unknown Enum: Comparison.%s'%self)
@property
def text(self):
if self is Comparison.eq:
return 'Is'
elif self is Comparison.ne:
return 'Not'
elif self is Comparison.gt:
return 'Above'
elif self is Comparison.ge:
return '>='
elif self is Comparison.lt:
return 'Below'
elif self is Comparison.le:
return '<='
raise ValueError('Unknown Enum: Comparison.%s'%self)
@classmethod
def parse(cls, func):
if isinstance(func, six.string_types):
func = func.lower().strip()
elif isinstance(func, cls):
func = func.func
if func in [np.equal, '=', 'eq', '-eq', '==', 'is', 'equal', 'equal to']:
return cls.eq
elif func in [np.not_equal, '<>', 'ne', '-ne', '!=', 'not', 'not_equal', 'not equal to']:
return cls.ne
elif func in [np.greater, '>', 'gt', '-gt', 'above', 'after', 'greater', 'greater than']:
return cls.gt
elif func in [np.less, '<', 'lt', '-lt', 'below', 'before', 'less', 'less than']:
return cls.lt
elif func in [np.greater_equal, '>=', 'ge', '-ge', 'greater_equal', 'greater than or equal to']:
return cls.ge
elif func in [np.less_equal, '<=', 'le', '-le', 'less_equal', 'less than or equal to']:
return cls.le
raise ValueError('Invalid Comparison name: %s'%func)
#
# Control Condition classes
#
class ControlPriority(enum.IntEnum):
"""
An enum class for control priorities.
.. rubric:: Enum Members
==================== =====================================================
:attr:`~very_low` very low priority
:attr:`~low` low priority
:attr:`~medium_low` medium low priority
:attr:`~medium` medium priority
:attr:`~medium_high` medium high priority
:attr:`~high` high priority
:attr:`~very_high` very high priority
==================== =====================================================
"""
very_low = 0
low = 1
medium_low = 2
medium = 3
medium_high = 4
high = 5
very_high = 6
class _ControlType(enum.Enum):
presolve = 0
postsolve = 1
rule = 2
pre_and_postsolve = 3
class ControlCondition(six.with_metaclass(abc.ABCMeta, object)):
"""A base class for control conditions"""
def __init__(self):
self._backtrack = 0
@abc.abstractmethod
def requires(self):
"""
Returns a set of objects required to evaluate this condition
Returns
-------
required_objects: OrderedSet of object
"""
return OrderedSet()
@property
def name(self):
"""
Returns the string representation of the condition.
Returns
-------
name: str
"""
return str(self)
@property
def backtrack(self):
"""
The amount of time by which the simulation should be backed up.
Should be updated by the :class:`~wntr.network.controls.ControlCondition.evaluate` method if appropriate.
Returns
-------
backtrack: int
"""
return self._backtrack
@abc.abstractmethod
def evaluate(self):
"""
Check if the condition is satisfied.
Returns
-------
check: bool
"""
pass
def __bool__(self):
"""
Check if the condition is satisfied.
Returns
-------
check: bool
"""
return self.evaluate()
__nonzero__ = __bool__
@classmethod
def _parse_value(cls, value):
try:
v = float(value)
return v
except ValueError:
value = value.upper()
if value == 'CLOSED':
return 0
if value == 'OPEN':
return 1
if value == 'ACTIVE':
return np.nan
PM = 0
words = value.split()
if len(words) > 1:
if words[1] == 'PM':
PM = 86400 / 2
hms = words[0].split(':')
v = 0
if len(hms) > 2:
v += int(hms[2])
if len(hms) > 1:
v += int(hms[1])*60
if len(hms) > 0:
v += int(hms[0])*3600
if int(hms[0]) <= 12:
v += PM
return v
def _repr_value(self, attr, value):
if attr.lower() in ['status'] and int(value) == value:
return LinkStatus(int(value)).name
return value
@classmethod
def _sec_to_hours_min_sec(cls, value):
sec = float(value)
hours = int(sec/3600.)
sec -= hours*3600
mm = int(sec/60.)
sec -= mm*60
return '{:02d}:{:02d}:{:02d}'.format(hours, mm, int(sec))
@classmethod
def _sec_to_days_hours_min_sec(cls, value):
sec = float(value)
days = int(sec/86400.)
sec -= days*86400
hours = int(sec/3600.)
sec -= hours*3600
mm = int(sec/60.)
sec -= mm*60
if days > 0:
return '{}-{:02d}:{:02d}:{:02d}'.format(days, hours, mm, int(sec))
else:
return '{:02d}:{:02d}:{:02d}'.format(hours, mm, int(sec))
@classmethod
def _sec_to_clock(cls, value):
sec = float(value)
hours = int(sec/3600.)
sec -= hours*3600
mm = int(sec/60.)
sec -= mm*60
if hours >= 12:
pm = 'PM'
if hours > 12:
hours -= 12
elif hours == 0:
pm = 'AM'
hours = 12
else:
pm = 'AM'
return '{}:{:02d}:{:02d} {}'.format(hours, mm, int(sec), pm)
@DocInheritor({'requires', 'evaluate', 'name'})
class TimeOfDayCondition(ControlCondition):
"""Time-of-day or "clocktime" based condition statement.
Resets automatically at 12 AM in clock time (shifted time) every day simulated. Evaluated
from 12 AM the first day of the simulation, even if this is prior to simulation start.
Unlike the :class:`~wntr.network.controls.SimTimeCondition`, greater-than and less-than
relationships make sense, and reset at midnight.
Parameters
----------
model : WaterNetworkModel
The model that the time is being compared against
relation : str or None
String options are 'at', 'after' or 'before'. The 'at' and None are equivalent, and only
evaluate as True during the simulation step the time occurs. `after` evaluates as True
from the time specified until midnight, `before` evaluates as True from midnight until
the specified time.
threshold : float or str
The time (a ``float`` in decimal hours since 12 AM) used in the condition; if provided as a
string in 'hh:mm[:ss] [am|pm]' format, the time will be parsed from the string
repeat : bool, optional
True by default; if False, allows for a single, timed trigger, and probably needs an
entry for `first_day`; in this case a relation of `after` becomes True from the time until
the end of the simulation, and `before` is True from the beginning of the simulation until
the time specified.
first_day : float, default=0
Start rule on day `first_day`, with the first day of simulation as day 0
"""
def __init__(self, model, relation, threshold, repeat=True, first_day=0):
self._model = model
if isinstance(threshold, str) and not ':' in threshold:
self._threshold = float(threshold) * 3600.
else:
self._threshold = self._parse_value(threshold)
if relation is None:
self._relation = Comparison.eq
else:
self._relation = Comparison.parse(relation)
self._first_day = first_day
self._repeat = repeat
self._backtrack = 0
if model is not None and not self._repeat and self._threshold < model._start_clocktime and first_day < 1:
self._first_day = 1
def _compare(self, other):
"""
Parameters
----------
other: TimeOfDayCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if abs(self._threshold - other._threshold) > 1e-10:
return False
if self._relation != other._relation:
return False
if self._first_day != other._first_day:
return False
if self._repeat != other._repeat:
return False
return True
@property
def name(self):
if not self._repeat:
rep = '/Once'
else:
rep = '/Daily'
if self._first_day > 0:
start = '/FirstDay/{}'.format(self._first_day)
else:
start = '/'
return 'ClockTime/{}/{}{}{}'.format(self._relation.text,
self._sec_to_hours_min_sec(self._threshold),
rep, start)
def requires(self):
return OrderedSet()
def __repr__(self):
fmt = '<TimeOfDayCondition: model, {}, {}, {}, {}>'
return fmt.format(repr(self._relation.text), repr(self._sec_to_clock(self._threshold)),
repr(self._repeat), repr(self._first_day))
def __str__(self):
fmt = 'clock_time {:s} "{}"'.format(self._relation.symbol,
self._sec_to_clock(self._threshold))
if not self._repeat:
fmt = '( ' + ' && clock_day == {} )'.format(self._first_day)
elif self._first_day > 0:
fmt = '( ' + ' && clock_day >= {} )'.format(self._first_day)
return fmt
def evaluate(self):
cur_time = self._model._shifted_time
prev_time = self._model._prev_shifted_time
day = np.floor(cur_time/86400)
if day < self._first_day:
self._backtrack = None
return False
if self._repeat:
cur_time = int(cur_time - self._threshold) % 86400
prev_time = int(prev_time - self._threshold) % 86400
else:
cur_time = cur_time - self._first_day * 86400.
prev_time = prev_time - self._first_day * 86400.
if self._relation is Comparison.eq and (prev_time < self._threshold and self._threshold <= cur_time):
self._backtrack = int(cur_time - self._threshold)
return True
elif self._relation is Comparison.gt and cur_time >= self._threshold and prev_time < self._threshold:
self._backtrack = int(cur_time - self._threshold)
return True
elif self._relation is Comparison.gt and cur_time >= self._threshold and prev_time >= self._threshold:
self._backtrack = 0
return True
elif self._relation is Comparison.lt and cur_time >= self._threshold and prev_time < self._threshold:
self._backtrack = int(cur_time - self._threshold)
return False
elif self._relation is Comparison.lt and cur_time >= self._threshold and prev_time >= self._threshold:
self._backtrack = 0
return False
else:
self._backtrack = 0
return False
@DocInheritor({'requires', 'evaluate', 'name'})
class SimTimeCondition(ControlCondition):
"""Condition based on time since start of the simulation.
Generally, the relation should be ``None`` (converted to "at") --
then it is *only* evaluated "at" specific times. Using greater-than or less-than type
relationships should be reserved for complex, multi-condition statements and
should not be used for simple controls. If ``repeat`` is used, the relationship will
automatically be changed to an "at time" evaluation, and a warning will be raised.
Parameters
----------
model : WaterNetworkModel
The model that the time threshold is being compared against
relation : str or None
String options are 'at', 'after' or 'before'. The 'at' and None are equivalent, and only
evaluate as True during the simulation step the time occurs. After evaluates as True
from the time specified until the end of simulation, before evaluates as True from
start of simulation until the specified time.
threshold : float or str
The time (a ``float`` in decimal hours) used in the condition; if provided as a string in
'[dd-]hh:mm[:ss]' format, then the time will be parsed from the string;
repeat : bool or float, default=False
If True, then repeat every 24-hours; if non-zero float, reset the
condition every `repeat` seconds after the first_time.
first_time : float, default=0
Start rule at `first_time`, using that time as 0 for the condition evaluation
"""
def __init__(self, model, relation, threshold, repeat=False, first_time=0):
self._model = model
if isinstance(threshold, str) and not ':' in threshold:
self._threshold = float(threshold) * 3600.
else:
self._threshold = self._parse_value(threshold)
if relation is None:
self._relation = Comparison.eq
else:
self._relation = Comparison.parse(relation)
self._repeat = repeat
if repeat is True:
self._repeat = 86400
self._backtrack = 0
self._first_time = first_time
def _compare(self, other):
"""
Parameters
----------
other: SimTimeCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if abs(self._threshold - other._threshold) > 1e-10:
return False
if self._repeat != other._repeat:
return False
if self._first_time != other._first_time:
return False
if self._relation != other._relation:
return False
return True
@property
def name(self):
if not self._repeat:
rep = ''
else:
rep = '%Every{}sec'.format(self._repeat)
if self._first_time > 0:
start = '#Start@{}sec'.format((self._first_time))
else:
start = ''
return 'SimTime{}{}{}{}'.format(self._relation.symbol,
(self._threshold),
rep, start)
def __repr__(self):
fmt = '<SimTimeCondition: model, {}, {}, {}, {}>'
return fmt.format(repr(self._relation.text), repr(self._sec_to_days_hours_min_sec(self._threshold)),
repr(self._repeat), repr(self._first_time))
def __str__(self):
fmt = '{} {} sec'.format(self._relation.symbol, self._threshold)
if self._repeat is True:
fmt = '% 86400.0 ' + fmt
elif self._repeat > 0:
fmt = '% {:.1f} '.format(int(self._repeat)) + fmt
if self._first_time > 0:
fmt = '(sim_time - {:d}) '.format(int(self._first_time)) + fmt
else:
fmt = 'sim_time ' + fmt
return fmt
def requires(self):
return OrderedSet()
def evaluate(self):
cur_time = self._model.sim_time
prev_time = self._model._prev_sim_time
if self._repeat and cur_time > self._threshold:
cur_time = (cur_time - self._threshold) % self._repeat
prev_time = (prev_time - self._threshold) % self._repeat
if self._relation is Comparison.eq and (prev_time < self._threshold and self._threshold <= cur_time):
self._backtrack = int(cur_time - self._threshold)
return True
elif self._relation is Comparison.gt and cur_time > self._threshold:
self._backtrack = 0
return True
elif self._relation is Comparison.ge and cur_time >= self._threshold and prev_time < self._threshold:
self._backtrack = int(cur_time - self._threshold)
return True
elif self._relation is Comparison.ge and cur_time >= self._threshold and prev_time >= self._threshold:
self._backtrack = 0
return True
elif self._relation is Comparison.lt and cur_time < self._threshold:
self._backtrack = 0
return True
elif self._relation is Comparison.le and cur_time <= self._threshold:
self._backtrack = 0
return True
elif self._relation is Comparison.le and prev_time < self._threshold:
self._backtrack = int(cur_time - self._threshold)
return True
else:
self._backtrack = 0
return False
@DocInheritor({'requires', 'evaluate', 'name'})
class ValueCondition(ControlCondition):
"""Compare a network element attribute to a set value.
Parameters
----------
source_obj : object
The object (such as a Junction, Tank, Pipe, etc.) to use in the comparison
source_attr : str
The attribute of the object (such as level, pressure, setting, etc.) to
compare against the threshold
operation : function or str
A two-parameter comparison function (e.g., numpy.greater, numpy.less_equal), or a
string describing the comparison (e.g., '=', 'below', 'is', '>=', etc.)
Words, such as 'below', are only accepted from the EPANET rules conditions list (see ...)
threshold : float
A value to compare the source object attribute against
"""
def __new__(cls, source_obj, source_attr, relation, threshold):
if isinstance(source_obj, Tank) and source_attr in {'level', 'pressure', 'head'}:
return object.__new__(TankLevelCondition)
else:
return object.__new__(ValueCondition)
def __getnewargs__(self):
return self._source_obj, self._source_attr, self._relation, self._threshold
def __init__(self, source_obj, source_attr, relation, threshold):
self._source_obj = source_obj
self._source_attr = source_attr
self._relation = Comparison.parse(relation)
self._threshold = ControlCondition._parse_value(threshold)
self._backtrack = 0
def _compare(self, other):
"""
Parameters
----------
other: ValueCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if not self._source_obj._compare(other._source_obj):
return False
if self._source_attr != other._source_attr:
return False
if abs(self._threshold - other._threshold) > 1e-10:
return False
if self._relation != other._relation:
return False
return True
def requires(self):
return OrderedSet([self._source_obj])
@property
def name(self):
if hasattr(self._source_obj, 'name'):
obj = self._source_obj.name
else:
obj = str(self._source_obj)
return '{}:{}{}{}'.format(obj, self._source_attr,
self._relation.symbol, self._threshold)
def __repr__(self):
return "<ValueCondition: {}, {}, {}, {}>".format(str(self._source_obj),
str(self._source_attr),
str(self._relation.symbol),
str(self._threshold))
def __str__(self):
typ = self._source_obj.__class__.__name__
obj = str(self._source_obj)
if hasattr(self._source_obj, 'name'):
obj = self._source_obj.name
att = self._source_attr
rel = self._relation.symbol
val = self._repr_value(att, self._threshold)
return "{}('{}').{} {} {}".format(typ, obj, att, rel, val)
def evaluate(self):
cur_value = getattr(self._source_obj, self._source_attr)
thresh_value = self._threshold
relation = self._relation.func
if np.isnan(self._threshold):
relation = np.greater
thresh_value = 0.0
state = relation(cur_value, thresh_value)
return bool(state)
@DocInheritor({'requires', 'evaluate'})
class TankLevelCondition(ValueCondition):
"""
A special type of ValueCondition for tank levels/heads/pressures.
"""
def __init__(self, source_obj, source_attr, relation, threshold):
relation = Comparison.parse(relation)
if relation not in {Comparison.ge, Comparison.le, Comparison.gt, Comparison.lt}:
raise ValueError('TankLevelConditions only support <= and >= relations.')
super(TankLevelCondition, self).__init__(source_obj, source_attr, relation, threshold)
assert source_attr in {'level', 'pressure', 'head'}
self._last_value = getattr(self._source_obj, self._source_attr) # this is used to see if backtracking is needed
def _compare(self, other):
"""
Parameters
----------
other: TankLevelCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if not self._source_obj._compare(other._source_obj):
return False
if self._source_attr != other._source_attr:
return False
if abs(self._threshold - other._threshold) > 1e-10:
return False
if self._relation != other._relation:
return False
return True
def evaluate(self):
self._backtrack = 0 # no backtracking is needed unless specified in the if statement below
cur_value = getattr(self._source_obj, self._source_attr) # get the current tank level
thresh_value = self._threshold
relation = self._relation
if relation is Comparison.gt:
relation = Comparison.ge
if relation is Comparison.lt:
relation = Comparison.le
if np.isnan(self._threshold): # what is this doing?
relation = np.greater
thresh_value = 0.0
state = relation(cur_value, thresh_value) # determine if the condition is satisfied
if state and not relation(self._last_value, thresh_value):
# if the condition is satisfied and the last value did not satisfy the condition, then backtracking
# is needed.
# The math.floor is not actually needed, but I leave it here for clarity. We want the backtrack value to be
# slightly lower than what the floating point computation would give. This ensures the next time step will
# be slightly later than when the tank level hits the threshold. This ensures the tank level will go
# slightly beyond the threshold. This ensures that relation(self._last_value, thresh_value) will be True
# next time. This prevents us from computing very small backtrack values over and over.
if self._source_obj.demand != 0:
self._backtrack = int(math.floor((cur_value - thresh_value)*math.pi/4.0*self._source_obj.diameter**2/self._source_obj.demand))
self._last_value = cur_value # update the last value
return bool(state)
@DocInheritor({'requires', 'evaluate', 'name'})
class RelativeCondition(ControlCondition):
"""Compare attributes of two different objects (e.g., levels from tanks 1 and 2)
This type of condition does not work with the EpanetSimulator, only the WNTRSimulator.
Parameters
----------
source_obj : object
The object (such as a Junction, Tank, Pipe, etc.) to use in the comparison
source_attr : str
The attribute of the object (such as level, pressure, setting, etc.) to
compare against the threshold
relation : function
A numpy or other comparison method that takes two values and returns a bool
(e.g., numpy.greater, numpy.less_equal)
threshold_obj : object
The object (such as a Junction, Tank, Pipe, etc.) to use in the comparison of attributes
threshold_attr : str
The attribute to used in the comparison evaluation
"""
def __init__(self, source_obj, source_attr, relation, threshold_obj, threshold_attr):
self._source_obj = source_obj
self._source_attr = source_attr
self._relation = Comparison.parse(relation)
self._threshold_obj = threshold_obj
self._threshold_attr = threshold_attr
self._backtrack = 0
def _compare(self, other):
"""
Parameters
----------
other: RelativeCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if not self._source_obj._compare(other._source_obj):
return False
if self._source_attr != other._source_attr:
return False
if self._relation != other._relation:
return False
if not self._threshold_obj._compare(other._threshold_obj):
return False
if self._threshold_attr != other._threshold_attr:
return False
return True
@property
def name(self):
if hasattr(self._source_obj, 'name'):
obj = self._source_obj.name
else:
obj = str(self._source_obj)
if hasattr(self._threshold_obj, 'name'):
tobj = self._threshold_obj.name
else:
tobj = str(self._threshold_obj)
return '{}:{}_{}_{}:{}'.format(obj, self._source_attr,
self._relation.symbol,
tobj, self._threshold_attr)
def requires(self):
return OrderedSet([self._source_obj, self._threshold_obj])
def __repr__(self):
return "RelativeCondition({}, {}, {}, {}, {})".format(str(self._source_obj),
str(self._source_attr),
str(self._relation),
str(self._threshold_obj),
str(self._threshold_attr))
def __str__(self):
typ = self._source_obj.__class__.__name__
obj = str(self._source_obj)
if hasattr(self._source_obj, 'name'):
obj = self._source_obj.name
att = self._source_attr
rel = self._relation.symbol
ttyp = self._threshold_obj.__class__.__name__
if hasattr(self._threshold_obj, 'name'):
tobj = self._threshold_obj.name
else:
tobj = str(self._threshold_obj)
tatt = self._threshold_attr
fmt = "{}('{}').{} {} {}('{}').{}"
return fmt.format(typ, obj, att,
rel,
ttyp, tobj, tatt)
def evaluate(self):
cur_value = getattr(self._source_obj, self._source_attr)
thresh_value = getattr(self._threshold_obj, self._threshold_attr)
relation = self._relation.func
state = relation(cur_value, thresh_value)
return bool(state)
@DocInheritor({'requires', 'evaluate', 'backtrack'})
class OrCondition(ControlCondition):
"""Combine two WNTR Conditions with an OR.
Parameters
----------
cond1 : ControlCondition
The first condition
cond2 : ControlCondition
The second condition
"""
def __init__(self, cond1, cond2):
self._condition_1 = cond1
self._condition_2 = cond2
if isinstance(cond1, (TimeOfDayCondition, SimTimeCondition, ValueCondition, TankLevelCondition,
RelativeCondition)):
if cond1._relation is Comparison.eq:
logger.warning('Using Comparison.eq with {0} will probably not work!'.format(type(cond1)))
warnings.warn('Using Comparison.eq with {0} will probably not work!'.format(type(cond1)))
if isinstance(cond2, (TimeOfDayCondition, SimTimeCondition, ValueCondition, TankLevelCondition,
RelativeCondition)):
if cond2._relation is Comparison.eq:
logger.warning('Using Comparison.eq with {0} will probably not work!'.format(type(cond2)))
warnings.warn('Using Comparison.eq with {0} will probably not work!'.format(type(cond2)))
def _compare(self, other):
"""
Parameters
----------
other: OrCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if not self._condition_1._compare(other._condition_1):
return False
if not self._condition_2._compare(other._condition_2):
return False
return True
def __str__(self):
return "( " + str(self._condition_1) + " || " + str(self._condition_2) + " )"
def __repr__(self):
return 'Or({}, {})'.format(repr(self._condition_1), repr(self._condition_2))
def evaluate(self):
return bool(self._condition_1) or bool(self._condition_2)
@property
def backtrack(self):
return np.max([self._condition_1.backtrack, self._condition_2.backtrack])
def requires(self):
req = self._condition_1.requires()
req.update(self._condition_2.requires())
return req
@DocInheritor({'requires', 'evaluate', 'backtrack'})
class AndCondition(ControlCondition):
"""Combine two WNTR Conditions with an AND
Parameters
----------
cond1 : ControlCondition
The first condition
cond2 : ControlCondition
The second condition
"""
def __init__(self, cond1, cond2):
self._condition_1 = cond1
self._condition_2 = cond2
if isinstance(cond1, (TimeOfDayCondition, SimTimeCondition, ValueCondition, TankLevelCondition,
RelativeCondition)):
if cond1._relation is Comparison.eq:
logger.warning('Using Comparison.eq with {0} will probably not work!'.format(type(cond1)))
warnings.warn('Using Comparison.eq with {0} will probably not work!'.format(type(cond1)))
if isinstance(cond2, (TimeOfDayCondition, SimTimeCondition, ValueCondition, TankLevelCondition,
RelativeCondition)):
if cond2._relation is Comparison.eq:
logger.warning('Using Comparison.eq with {0} will probably not work!'.format(type(cond2)))
warnings.warn('Using Comparison.eq with {0} will probably not work!'.format(type(cond2)))
def _compare(self, other):
"""
Parameters
----------
other: OrCondition
Returns
-------
bool
"""
if type(self) != type(other):
return False
if not self._condition_1._compare(other._condition_1):
return False
if not self._condition_2._compare(other._condition_2):
return False
return True
def __str__(self):
return "( "+ str(self._condition_1) + " && " + str(self._condition_2) + " )"
def __repr__(self):
return 'And({}, {})'.format(repr(self._condition_1), repr(self._condition_2))
def evaluate(self):
return bool(self._condition_1) and bool(self._condition_2)
@property
def backtrack(self):
return np.min([self._condition_1.backtrack, self._condition_2.backtrack])
def requires(self):
req = self._condition_1.requires()
req.update(self._condition_2.requires())
return req
class _CloseCVCondition(ControlCondition):
Htol = 0.0001524
Qtol = 2.83168e-6
def __init__(self, wn, cv):
self._cv = cv
self._start_node = wn.get_node(cv.start_node)
self._end_node = wn.get_node(cv.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._cv, self._start_node, self._end_node])
def evaluate(self):
"""
If True is returned, the cv needs to be closed
"""
dh = self._start_node.head - self._end_node.head
if abs(dh) > self.Htol:
if dh < -self.Htol:
return True
elif self._cv.flow < -self.Qtol:
return True
else:
return False
else:
if self._cv.flow < -self.Qtol:
return True
else:
return False
def __str__(self):
s = '{0} head - {1} head < -{2} or {3} flow < {4}'.format(self._start_node.name, self._end_node.name, self.Htol, self._cv.name, -self.Qtol)
return s
class _OpenCVCondition(ControlCondition):
Htol = 0.0001524
Qtol = 2.83168e-6
def __init__(self, wn, cv):
self._cv = cv
self._start_node = wn.get_node(cv.start_node)
self._end_node = wn.get_node(cv.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._cv, self._start_node, self._end_node])
def evaluate(self):
"""
If True is returned, the cv needs to be closed
"""
dh = self._start_node.head - self._end_node.head
if abs(dh) > self.Htol:
if dh < -self.Htol:
return False
elif self._cv.flow < -self.Qtol:
return False
else:
return True
else:
return False
def __str__(self):
s = '{0} head - {1} head > {2} and {3} flow >= {4}'.format(self._start_node.name, self._end_node.name, self.Htol, self._cv.name, -self.Qtol)
return s
class _ClosePowerPumpCondition(ControlCondition):
"""
Prevents reverse flow in pumps.
"""
Htol = 0.0001524
Qtol = 2.83168e-6
Hmax = 1e10
def __init__(self, wn, pump):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
pump: wntr.network.Pump
"""
self._pump = pump
self._start_node = wn.get_node(pump.start_node)
self._end_node = wn.get_node(pump.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._pump, self._start_node, self._end_node])
def evaluate(self):
"""
If True is returned, the pump needs to be closed
"""
dh = self._end_node.head - self._start_node.head
if dh > self.Hmax + self.Htol:
return True
return False
def __str__(self):
s = '{0} head - {1} head > {2:.4f}'.format(self._end_node.name, self._start_node.name, self.Hmax + self.Htol)
return s
class _OpenPowerPumpCondition(ControlCondition):
Htol = 0.0001524
Qtol = 2.83168e-6
Hmax = 1e10
def __init__(self, wn, pump):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
pump: wntr.network.Pump
"""
self._pump = pump
self._start_node = wn.get_node(pump.start_node)
self._end_node = wn.get_node(pump.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._pump, self._start_node, self._end_node])
def evaluate(self):
"""
If True is returned, the pump needs to be opened
"""
dh = self._end_node.head - self._start_node.head
if dh <= self.Hmax + self.Htol:
return True
return False
def __str__(self):
s = '{0} head - {1} head <= {2:.4f}'.format(self._end_node.name, self._start_node.name, self.Hmax + self.Htol)
return s
class _CloseHeadPumpCondition(ControlCondition):
"""
Prevents reverse flow in pumps.
"""
_Htol = 0.0001524
def __init__(self, wn, pump):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
pump: wntr.network.Pump
"""
self._pump = pump
self._start_node = wn.get_node(pump.start_node)
self._end_node = wn.get_node(pump.end_node)
self._backtrack = 0
self._wn = wn
def requires(self):
return OrderedSet([self._pump, self._start_node, self._end_node])
def evaluate(self):
"""
If True is returned, the pump needs to be closed
"""
a, b, c = self._pump.get_head_curve_coefficients()
if self._pump.speed_timeseries(self._wn.sim_time) != 1.0:
raise NotImplementedError('Pump speeds other than 1.0 are not yet supported.')
Hmax = a
dh = self._end_node.head - self._start_node.head
if dh > Hmax + self._Htol:
return True
return False
def __str__(self):
a, b, c = self._pump.get_head_curve_coefficients()
if self._pump.speed_timeseries(self._wn.sim_time) != 1.0:
raise NotImplementedError('Pump speeds other than 1.0 are not yet supported.')
Hmax = a
s = '{0} head - {1} head > {2:.4f}'.format(self._end_node.name, self._start_node.name, Hmax + self._Htol)
return s
class _OpenHeadPumpCondition(ControlCondition):
"""
Prevents reverse flow in pumps.
"""
_Htol = 0.0001524
def __init__(self, wn, pump):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
pump: wntr.network.Pump
"""
self._pump = pump
self._start_node = wn.get_node(pump.start_node)
self._end_node = wn.get_node(pump.end_node)
self._backtrack = 0
self._wn = wn
def requires(self):
return OrderedSet([self._pump, self._start_node, self._end_node])
def evaluate(self):
"""
If True is returned, the pump needs to be closed
"""
a, b, c = self._pump.get_head_curve_coefficients()
if self._pump.speed_timeseries(self._wn.sim_time) != 1.0:
raise NotImplementedError('Pump speeds other than 1.0 are not yet supported.')
Hmax = a
dh = self._end_node.head - self._start_node.head
if dh <= Hmax + self._Htol:
return True
return False
def __str__(self):
a, b, c = self._pump.get_head_curve_coefficients()
if self._pump.speed_timeseries(self._wn.sim_time) != 1.0:
raise NotImplementedError('Pump speeds other than 1.0 are not yet supported.')
Hmax = a
s = '{0} head - {1} head <= {2:.4f}'.format(self._end_node.name, self._start_node.name, Hmax + self._Htol)
return s
class _ClosePRVCondition(ControlCondition):
_Qtol = 2.83168e-6
def __init__(self, wn, prv):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
prv: wntr.network.Valve
"""
super(_ClosePRVCondition, self).__init__()
self._prv = prv
self._start_node = wn.get_node(self._prv.start_node)
self._end_node = wn.get_node(self._prv.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._prv])
def evaluate(self):
if self._prv._internal_status == LinkStatus.Active:
if self._prv.flow < -self._Qtol:
return True
return False
elif self._prv._internal_status == LinkStatus.Open:
if self._prv.flow < -self._Qtol:
return True
return False
elif self._prv._internal_status == LinkStatus.Closed:
return False
else:
raise RuntimeError('Unexpected PRV _internal_status for valve {0}: {1}.'.format(self._prv,
self._prv._internal_status))
def __str__(self):
s = 'prv {0} needs to be closed'.format(self._prv.name)
return s
class _OpenPRVCondition(ControlCondition):
_Qtol = 2.83168e-6
_Htol = 0.0001524
def __init__(self, wn, prv):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
prv: wntr.network.Valve
"""
super(_OpenPRVCondition, self).__init__()
self._prv = prv
self._start_node = wn.get_node(self._prv.start_node)
self._end_node = wn.get_node(self._prv.end_node)
self._backtrack = 0
self._r = 0.0826 * 0.02 * self._prv.diameter ** (-4) * 2.0
def requires(self):
return OrderedSet([self._prv, self._start_node, self._end_node])
def evaluate(self):
if self._prv._internal_status == LinkStatus.Active:
if self._prv.flow < -self._Qtol:
return False
elif self._start_node.head < self._prv.setting + self._end_node.elevation + self._r * abs(self._prv.flow)**2 - self._Htol:
return True
return False
elif self._prv._internal_status == LinkStatus.Open:
return False
elif self._prv._internal_status == LinkStatus.Closed:
if self._start_node.head >= self._prv.setting + self._end_node.elevation + self._Htol and self._end_node.head < self._prv.setting + self._end_node.elevation - self._Htol:
return False
elif self._start_node.head < self._prv.setting + self._end_node.elevation - self._Htol and self._start_node.head > self._end_node.head + self._Htol:
return True
return False
else:
raise RuntimeError('Unexpected PRV _internal_status for valve {0}: {1}.'.format(self._prv,
self._prv._internal_status))
def __str__(self):
s = 'prv {0} needs to be open'.format(self._prv.name)
return s
class _ActivePRVCondition(ControlCondition):
_Qtol = 2.83168e-6
_Htol = 0.0001524
def __init__(self, wn, prv):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
prv: wntr.network.Valve
"""
self._prv = prv
self._start_node = wn.get_node(self._prv.start_node)
self._end_node = wn.get_node(self._prv.end_node)
self._backtrack = 0
self._r = 0.0826 * 0.02 * self._prv.diameter ** (-4) * 2.0
def requires(self):
return OrderedSet([self._prv, self._start_node, self._end_node])
def evaluate(self):
if self._prv._internal_status == LinkStatus.Active:
return False
elif self._prv._internal_status == LinkStatus.Open:
if self._prv.flow < -self._Qtol:
return False
elif (self._end_node.head >= self._prv.setting + self._end_node.elevation + self._Htol):
return True
return False
elif self._prv._internal_status == LinkStatus.Closed:
if ((self._start_node.head >= self._prv.setting + self._end_node.elevation + self._Htol) and
(self._end_node.head < self._prv.setting + self._end_node.elevation - self._Htol)):
return True
return False
else:
raise RuntimeError('Unexpected PRV _internal_status for valve {0}: {1}.'.format(self._prv,
self._prv._internal_status))
def __str__(self):
s = 'prv {0} needs to be active'.format(self._prv.name)
return s
class _OpenFCVCondition(ControlCondition):
_Qtol = 2.83168e-6
_Htol = 0.0001524
def __init__(self, wn, fcv):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
fcv: wntr.network.Valve
"""
self._fcv = fcv
self._start_node = wn.get_node(self._fcv.start_node)
self._end_node = wn.get_node(self._fcv.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._fcv, self._start_node, self._end_node])
def evaluate(self):
if self._start_node.head - self._end_node.head < -self._Htol:
return True
elif self._fcv.flow < -self._Qtol:
return True
else:
return False
class _ActiveFCVCondition(ControlCondition):
_Qtol = 2.83168e-6
_Htol = 0.0001524
def __init__(self, wn, fcv):
"""
Parameters
----------
wn: wntr.network.WaterNetworkModel
fcv: wntr.network.Valve
"""
self._fcv = fcv
self._start_node = wn.get_node(self._fcv.start_node)
self._end_node = wn.get_node(self._fcv.end_node)
self._backtrack = 0
def requires(self):
return OrderedSet([self._fcv, self._start_node, self._end_node])
def evaluate(self):
if self._start_node.head - self._end_node.head < -self._Htol:
return False
elif self._fcv.flow < -self._Qtol:
return False
elif self._fcv._internal_status == LinkStatus.Open and self._fcv.flow >= self._fcv.setting:
return True
else:
return False
class BaseControlAction(six.with_metaclass(abc.ABCMeta, Subject)):
"""
A base class for deriving new control actions. The control action is run by calling run_control_action.
This class is not meant to be used directly. Derived classes must implement the run_control_action, requires,
and target methods.
"""
def __init__(self):
super(BaseControlAction, self).__init__()
@abc.abstractmethod
def run_control_action(self):
"""
This method is called to run the corresponding control action.
"""
pass
@abc.abstractmethod
def requires(self):
"""
Returns a set of objects used to evaluate the control
Returns
-------
req: OrderedSet
The objects required to run the control action.
"""
pass
@abc.abstractmethod
def target(self):
"""
Returns a tuple (object, attribute) containing the object and attribute that the control action may change
Returns
-------
target: tuple
A tuple containing the target object and the attribute to be changed (target, attr).
"""
pass
def _compare(self, other):
"""
Parameters
----------
other: BaseControlAction
Returns
-------
bool
"""
if type(self) != type(other):
return False
target1, attr1 = self.target()
target2, attr2 = other.target()
val1 = self._value
val2 = other._value
if not target1._compare(target2):
return False
if attr1 != attr2:
return False
if type(val1) == float:
if abs(val1 - val2) > 1e-10:
return False
else:
if val1 != val2:
return False
return True
@DocInheritor({'requires', 'target', 'run_control_action'})
class ControlAction(BaseControlAction):
"""
A general class for specifying a control action that simply modifies the attribute of an object (target).
Parameters
----------
target_obj : object
The object whose attribute will be changed when the control runs.
attribute : string
The attribute that will be changed on the target_obj when the control runs.
value : any
The new value for target_obj.attribute when the control runs.
"""
def __init__(self, target_obj, attribute, value):
super(ControlAction, self).__init__()
if target_obj is None:
raise ValueError('target_obj is None in ControlAction::__init__. A valid target_obj is needed.')
if not hasattr(target_obj, attribute):
raise ValueError('attribute given in ControlAction::__init__ is not valid for target_obj')
self._target_obj = target_obj
self._attribute = attribute
self._value = value
def requires(self):
return OrderedSet([self._target_obj])
def __repr__(self):
return '<ControlAction: {}, {}, {}>'.format(str(self._target_obj), str(self._attribute), str(self._repr_value()))
def __str__(self):
return "set {}('{}').{} to {}".format(self._target_obj.__class__.__name__,
self._target_obj.name,
self._attribute,
self._repr_value())
def _repr_value(self):
if self._attribute.lower() in ['status']:
return LinkStatus(int(self._value)).name
return self._value
def run_control_action(self):
setattr(self._target_obj, self._attribute, self._value)
self.notify()
def target(self):
return self._target_obj, self._attribute
class _InternalControlAction(BaseControlAction):
"""
A control action class that modifies a private attribute in order to change a property on an object. For example,
a valve has a status property, but the control action must act on the _internal_status.
Parameters
----------
target_obj: object
The object for which an attribute is being changed.
internal_attribute: str
The attribute being modified (e.g., _internal_stats)
value: any
The new value for the internal_attribute
property_attribute: str
The attribute to be checked for an actual change (e.g., status)
"""
def __init__(self, target_obj, internal_attribute, value, property_attribute):
super(_InternalControlAction, self).__init__()
if not hasattr(target_obj, internal_attribute):
raise AttributeError('{0} does not have attribute {1}'.format(target_obj, internal_attribute))
if not hasattr(target_obj, property_attribute):
raise AttributeError('{0} does not have attribute {1}'.format(target_obj, property_attribute))
self._target_obj = target_obj
self._internal_attr = internal_attribute
self._value = value
self._property_attr = property_attribute
def requires(self):
"""
Return a list of objects required by the control action.
Returns
-------
required_objects: list of object
"""
return OrderedSet([self._target_obj])
def run_control_action(self):
"""
Activate the control action.
"""
if self._target_obj is None:
raise ValueError('target is None inside _InternalControlAction::RunControlAction.' +
'This may be because a target_obj was added, but later the object itself was deleted.')
setattr(self._target_obj, self._internal_attr, self._value)
self.notify()
def target(self):
"""
Returns a tuple containing the target object and the attribute to check for modification.
Returns
-------
target: tuple
"""
return self._target_obj, self._property_attr
def __repr__(self):
return '<_InternalControlAction: {}, {}, {}>'.format(str(self._target_obj), self._internal_attr,
str(self._value))
def __str__(self):
return "set {}('{}').{} to {}".format(self._target_obj.__class__.__name__,
self._target_obj.name,
self._internal_attr,
self._value)
#
# Control classes
#
class ControlBase(six.with_metaclass(abc.ABCMeta, object)):
"""
This is the base class for all control objects. Control objects are used to check the conditions under which a
ControlAction should be run. For example, if a pump is supposed to be turned on when the simulation time
reaches 6 AM, the ControlAction would be "turn the pump on", and the ControlCondition would be "when the simulation
reaches 6 AM".
"""
@abc.abstractmethod
def is_control_action_required(self):
"""
This method is called to see if any action is required by this control object. This method returns a tuple
that indicates if action is required (a bool) and a recommended time for the simulation to backup (in seconds
as a positive int).
Returns
-------
req: tuple
A tuple (bool, int) indicating if an action should be run and how far to back up the simulation.
"""
pass
@abc.abstractmethod
def run_control_action(self):
"""
This method is called to run the control action after a call to IsControlActionRequired indicates that an
action is required.
"""
pass
@abc.abstractmethod
def requires(self):
"""
Returns a set of objects required for this control.
Returns
-------
required_objects: OrderedSet of object
"""
return OrderedSet()
@abc.abstractmethod
def actions(self):
"""
Returns a list of all actions used by this control.
Returns
-------
act: list of BaseControlAction
"""
pass
def _control_type_str(self):
if self._control_type is _ControlType.rule:
return 'Rule'
else:
return 'Control'
@property
def condition(self):
return self._condition
@property
def priority(self):
return self._priority
def _compare(self, other):
"""
Parameters
----------
other: ControlBase
Returns
-------
bool
"""
ret = True
msg = '_compare failed in ControlBase because '
if self.priority != other.priority:
ret = False
msg += 'priorities were not equal'
if self._control_type_str() != other._control_type_str():
ret = False
msg += '_control_type_strs were not equal'
if not self.condition._compare(other.condition):
ret = False
msg += 'conditions were not equal'
for action1, action2 in zip(self.actions(), other.actions()):
if not action1._compare(action2):
ret = False
msg += 'actions were not equal'
break
if ret is False:
print(msg)
return ret
@DocInheritor({'is_control_action_required', 'run_control_action', 'requires', 'actions'})
class Rule(ControlBase):
"""
A very general and flexible class for defining both controls rules.
"""
def __init__(self, condition, then_actions, else_actions=None, priority=ControlPriority.medium, name=None):
"""
Parameters
----------
condition: ControlCondition
The condition that should be used to determine when the actions need to be activated. When the condition
evaluates to True, the then_actions are activated. When the condition evaluates to False, the else_actions
are activated.
then_actions: Iterable of ControlAction
The actions that should be activated when the condition evaluates to True.
else_actions: Iterable of ControlAction
The actions that should be activated when the condition evaluates to False.
priority: ControlPriority
The priority of the control. Default is ControlPriority.medium
name: str
The name of the control
"""
if not isinstance(condition, ControlCondition):
raise ValueError('The conditions argument must be a ControlCondition instance')
self._condition = condition
if isinstance(then_actions, Iterable):
self._then_actions = list(then_actions)
elif then_actions is not None:
self._then_actions = [then_actions]
else:
self._then_actions = []
if isinstance(else_actions, Iterable):
self._else_actions = list(else_actions)
elif else_actions is not None:
self._else_actions = [else_actions]
else:
self._else_actions = []
self._which = None
self._priority = priority
self._name = name
if self._name is None:
self._name = ''
self._control_type = _ControlType.rule
if isinstance(condition, (TimeOfDayCondition, SimTimeCondition, ValueCondition, TankLevelCondition,
RelativeCondition)):
if condition._relation is Comparison.eq:
logger.warning('Using Comparison.eq with {0} will probably not work!'.format(type(condition)))
warnings.warn('Using Comparison.eq with {0} will probably not work!'.format(type(condition)))
@property
def epanet_control_type(self):
"""
The control type. Note that presolve and postsolve controls are both simple controls in Epanet.
Returns
-------
control_type: _ControlType
"""
return self._control_type
def requires(self):
req = self._condition.requires()
for action in self._then_actions:
req.update(action.requires())
for action in self._else_actions:
req.update(action.requires())
return req
def actions(self):
return self._then_actions + self._else_actions
@property
def name(self):
"""
A string representation of the Control.
"""
if self._name is not None:
return self._name
else:
return '/'.join(str(self).split())
def __repr__(self):
fmt = "<Control: '{}', {}, {}, {}, priority={}>"
return fmt.format(self._name, repr(self._condition), repr(self._then_actions), repr(self._else_actions), self._priority)
def __str__(self):
text = '{} {} := if {}'.format(self._control_type_str(), self._name, self._condition)
if self._then_actions is not None and len(self._then_actions) > 0:
then_text = ' then '
for ct, act in enumerate(self._then_actions):
if ct == 0:
then_text += str(act)
else:
then_text += ' and {}'.format(str(act))
text += then_text
if self._else_actions is not None and len(self._else_actions) > 0:
else_text = ' else '
for ct, act in enumerate(self._else_actions):
if ct == 0:
else_text += str(act)
else:
else_text += ' and {}'.format(str(act))
text += else_text
if self._priority is not None and self._priority >= 0:
text += ' with priority {}'.format(self._priority)
return text
def is_control_action_required(self):
do = self._condition.evaluate()
back = self._condition.backtrack
if do:
self._which = 'then'
return True, back
elif not do and self._else_actions is not None and len(self._else_actions) > 0:
self._which = 'else'
return True, back
else:
return False, None
def run_control_action(self):
if self._which == 'then':
for control_action in self._then_actions:
control_action.run_control_action()
elif self._which == 'else':
for control_action in self._else_actions:
control_action.run_control_action()
else:
raise RuntimeError('control actions called even though if-then statement was False')
class Control(Rule):
"""
A class for controls.
"""
def __init__(self, condition, then_action, priority=ControlPriority.medium, name=None):
"""
Parameters
----------
condition: ControlCondition
The condition that should be used to determine when the actions need to be activated. When the condition
evaluates to True, the then_actions are activated. When the condition evaluates to False, the else_actions
are activated.
then_action: ControlAction
The action that should be activated when the condition evaluates to True.
priority: ControlPriority
The priority of the control. Default is ControlPriority.medium
name: str
The name of the control
"""
# if isinstance(condition, (TimeOfDayCondition, SimTimeCondition)):
# if condition._relation is not Comparison.eq:
# raise ValueError('SimTimeConditions and TimeOfDayConditions used with Control must have a relation of '
# 'Comparison.eq. Otherwise use Rule.')
if isinstance(condition, (ValueCondition, TankLevelCondition, RelativeCondition)):
if condition._relation is Comparison.eq:
logger.warning('Using Comparison.eq with {0} will probably not work!'.format(type(condition)))
warnings.warn('Using Comparison.eq with {0} will probably not work!'.format(type(condition)))
self._condition = condition
self._then_actions = [then_action]
self._else_actions = []
self._which = None
self._priority = priority
self._name = name
if self._name is None:
self._name = ''
if isinstance(condition, TankLevelCondition):
self._control_type = _ControlType.pre_and_postsolve
elif isinstance(condition, (TimeOfDayCondition, SimTimeCondition)):
self._control_type = _ControlType.presolve
else:
self._control_type = _ControlType.postsolve
@classmethod
def _time_control(cls, wnm, run_at_time, time_flag, daily_flag, control_action, name=None):
"""
This is a class method for creating simple time controls.
Parameters
----------
wnm: wntr.network.WaterNetworkModel
The WaterNetworkModel instance this control will be added to.
run_at_time: int
The time to activate the control action.
time_flag: str
Options are 'SIM_TIME' and 'CLOCK_TIME'. SIM_TIME indicates that run_at_time is the time since the start
of the simulation. CLOCK_TIME indicates that run_at_time is the time of day.
daily_flag: bool
If True, then the control will repeat every day.
control_action: BaseControlAction
The control action that should occur at run_at_time.
name: str
An optional name for the control.
Returns
-------
ctrl: Control
"""
if time_flag.upper() == 'SIM_TIME':
condition = SimTimeCondition(model=wnm, relation=Comparison.eq, threshold=run_at_time, repeat=daily_flag,
first_time=0)
elif time_flag.upper() == 'CLOCK_TIME':
condition = TimeOfDayCondition(model=wnm, relation=Comparison.eq, threshold=run_at_time, repeat=daily_flag,
first_day=0)
else:
raise ValueError("time_flag not recognized; expected either 'sim_time' or 'clock_time'")
control = Control(condition=condition, then_action=control_action)
return control
@classmethod
def _conditional_control(cls, source_obj, source_attr, operation, threshold, control_action, name=None):
"""
This is a class method for creating simple conditional controls.
Parameters
----------
source_obj: object
The object whose source_attr attribute will be compared to threshold to determine if control_action
needs activated.
source_attr: str
The attribute of source_obj to compare to threshold.
operation: Comparison
The comparison function used to compare the source_attr attribute of source_obj to threshold.
threshold: any
The threshold used in the comparison.
control_action: ControlAction
The control action that should occur when operation(getattr(source_obj, source_attr), threshold) is True.
name: str
An optional name for the control
Returns
-------
ctrl: Control
"""
condition = ValueCondition(source_obj=source_obj, source_attr=source_attr, relation=operation,
threshold=threshold)
control = Control(condition=condition, then_action=control_action)
return control
class ControlManager(Observer):
"""
A class for managing controls and identifying changes made by those controls.
"""
def __init__(self):
self._controls = OrderedSet()
"""OrderedSet of ControlBase"""
self._previous_values = OrderedDict() # {(obj, attr): value}
self._changed = OrderedSet() # set of (obj, attr) that has been changed from _previous_values
def __iter__(self):
return iter(self._controls)
def update(self, subject):
"""
The update method gets called when a subject (control action) is activated.
Parameters
-----------
subject: BaseControlAction
"""
obj, attr = subject.target()
if getattr(obj, attr) == self._previous_values[(obj, attr)]:
self._changed.discard((obj, attr))
else:
self._changed.add((obj, attr))
def register_control(self, control):
"""
Register a control with the ControlManager
Parameters
----------
control: ControlBase
"""
self._controls.add(control)
for action in control.actions():
action.subscribe(self)
obj, attr = action.target()
self._previous_values[(obj, attr)] = getattr(obj, attr)
def reset(self):
"""
Reset the _previous_values. This should be called before activating any control actions so that changes made
by the control actions can be tracked.
"""
self._changed = OrderedSet()
self._previous_values = OrderedDict()
for control in self._controls:
for action in control.actions():
obj, attr = action.target()
self._previous_values[(obj, attr)] = getattr(obj, attr)
def changes_made(self):
"""
Specifies if changes were made.
Returns
-------
changes: bool
"""
return len(self._changed) > 0
def get_changes(self):
"""
A generator for iterating over the objects, attributes that were changed.
Returns
-------
changes: tuple
(object, attr)
"""
for obj, attr in self._changed:
yield obj, attr
def deregister(self, control):
"""
Deregister a control with the ControlManager
Parameters
----------
control: ControlBase
"""
self._controls.remove(control)
for action in control.actions():
action.unsubscribe(self)
obj, attr = action.target()
self._previous_values.pop((obj, attr))
self._changed.discard((obj, attr))
def check(self):
"""
Check which controls have actions that need activated.
Returns
-------
controls_to_run: list of tuple
The tuple is (ControlBase, backtrack)
"""
controls_to_run = []
for c in self._controls:
do, back = c.is_control_action_required()
if do:
controls_to_run.append((c, back))
return controls_to_run
|
en
| 0.79994
|
The wntr.network.controls module includes methods to define network controls and control actions. These controls modify parameters in the network during simulation. .. rubric:: Contents .. autosummary:: Subject Observer Comparison ControlPriority ControlCondition TimeOfDayCondition SimTimeCondition ValueCondition TankLevelCondition RelativeCondition OrCondition AndCondition BaseControlAction ControlAction ControlBase Control ControlManager # Control Priorities: # 0 is the lowest # 3 is the highest # # 0: # Open check valves/pumps if flow would be forward # Open links for time controls # Open links for conditional controls # Open links connected to tanks if the tank head is larger than the minimum head plus a tolerance # Open links connected to tanks if the tank head is smaller than the maximum head minus a tolerance # Open pumps if power comes back up # Start/stop leaks # 1: # Close links connected to tanks if the tank head is less than the minimum head (except check valves and pumps than # only allow flow in). # Close links connected to tanks if the tank head is larger than the maximum head (exept check valves and pumps that # only allow flow out). # 2: # Open links connected to tanks if the level is low but flow would be in # Open links connected to tanks if the level is high but flow would be out # Close links connected to tanks if the level is low and flow would be out # Close links connected to tanks if the level is high and flow would be in # 3: # Close links for time controls # Close links for conditional controls # Close check valves/pumps for negative flow # Close pumps without power A subject base class for the observer design pattern Subscribe observer to this subject. The update method of any observers of this subject will be called when notify is called on this subject. Parameters ---------- observer: Observer Unsubscribe observer from this subject. Parameters ---------- observer: Observer Call the update method for all observers of this subject. A base class for observers in the observer design pattern. This method is called when the subject being observed calls notify. Parameters ---------- subject: Subject The subject that called notify. An enum class for comparison operators. .. rubric:: Enum Members =========== ============================================== :attr:`~gt` greater than :attr:`~ge` greater than or equal to :attr:`~lt` less than :attr:`~le` less than or equal to :attr:`~eq` equal to :attr:`~ne` not equal to =========== ============================================== The function call to use for this comparison # # Control Condition classes # An enum class for control priorities. .. rubric:: Enum Members ==================== ===================================================== :attr:`~very_low` very low priority :attr:`~low` low priority :attr:`~medium_low` medium low priority :attr:`~medium` medium priority :attr:`~medium_high` medium high priority :attr:`~high` high priority :attr:`~very_high` very high priority ==================== ===================================================== A base class for control conditions Returns a set of objects required to evaluate this condition Returns ------- required_objects: OrderedSet of object Returns the string representation of the condition. Returns ------- name: str The amount of time by which the simulation should be backed up. Should be updated by the :class:`~wntr.network.controls.ControlCondition.evaluate` method if appropriate. Returns ------- backtrack: int Check if the condition is satisfied. Returns ------- check: bool Check if the condition is satisfied. Returns ------- check: bool Time-of-day or "clocktime" based condition statement. Resets automatically at 12 AM in clock time (shifted time) every day simulated. Evaluated from 12 AM the first day of the simulation, even if this is prior to simulation start. Unlike the :class:`~wntr.network.controls.SimTimeCondition`, greater-than and less-than relationships make sense, and reset at midnight. Parameters ---------- model : WaterNetworkModel The model that the time is being compared against relation : str or None String options are 'at', 'after' or 'before'. The 'at' and None are equivalent, and only evaluate as True during the simulation step the time occurs. `after` evaluates as True from the time specified until midnight, `before` evaluates as True from midnight until the specified time. threshold : float or str The time (a ``float`` in decimal hours since 12 AM) used in the condition; if provided as a string in 'hh:mm[:ss] [am|pm]' format, the time will be parsed from the string repeat : bool, optional True by default; if False, allows for a single, timed trigger, and probably needs an entry for `first_day`; in this case a relation of `after` becomes True from the time until the end of the simulation, and `before` is True from the beginning of the simulation until the time specified. first_day : float, default=0 Start rule on day `first_day`, with the first day of simulation as day 0 Parameters ---------- other: TimeOfDayCondition Returns ------- bool Condition based on time since start of the simulation. Generally, the relation should be ``None`` (converted to "at") -- then it is *only* evaluated "at" specific times. Using greater-than or less-than type relationships should be reserved for complex, multi-condition statements and should not be used for simple controls. If ``repeat`` is used, the relationship will automatically be changed to an "at time" evaluation, and a warning will be raised. Parameters ---------- model : WaterNetworkModel The model that the time threshold is being compared against relation : str or None String options are 'at', 'after' or 'before'. The 'at' and None are equivalent, and only evaluate as True during the simulation step the time occurs. After evaluates as True from the time specified until the end of simulation, before evaluates as True from start of simulation until the specified time. threshold : float or str The time (a ``float`` in decimal hours) used in the condition; if provided as a string in '[dd-]hh:mm[:ss]' format, then the time will be parsed from the string; repeat : bool or float, default=False If True, then repeat every 24-hours; if non-zero float, reset the condition every `repeat` seconds after the first_time. first_time : float, default=0 Start rule at `first_time`, using that time as 0 for the condition evaluation Parameters ---------- other: SimTimeCondition Returns ------- bool Compare a network element attribute to a set value. Parameters ---------- source_obj : object The object (such as a Junction, Tank, Pipe, etc.) to use in the comparison source_attr : str The attribute of the object (such as level, pressure, setting, etc.) to compare against the threshold operation : function or str A two-parameter comparison function (e.g., numpy.greater, numpy.less_equal), or a string describing the comparison (e.g., '=', 'below', 'is', '>=', etc.) Words, such as 'below', are only accepted from the EPANET rules conditions list (see ...) threshold : float A value to compare the source object attribute against Parameters ---------- other: ValueCondition Returns ------- bool A special type of ValueCondition for tank levels/heads/pressures. # this is used to see if backtracking is needed Parameters ---------- other: TankLevelCondition Returns ------- bool # no backtracking is needed unless specified in the if statement below # get the current tank level # what is this doing? # determine if the condition is satisfied # if the condition is satisfied and the last value did not satisfy the condition, then backtracking # is needed. # The math.floor is not actually needed, but I leave it here for clarity. We want the backtrack value to be # slightly lower than what the floating point computation would give. This ensures the next time step will # be slightly later than when the tank level hits the threshold. This ensures the tank level will go # slightly beyond the threshold. This ensures that relation(self._last_value, thresh_value) will be True # next time. This prevents us from computing very small backtrack values over and over. # update the last value Compare attributes of two different objects (e.g., levels from tanks 1 and 2) This type of condition does not work with the EpanetSimulator, only the WNTRSimulator. Parameters ---------- source_obj : object The object (such as a Junction, Tank, Pipe, etc.) to use in the comparison source_attr : str The attribute of the object (such as level, pressure, setting, etc.) to compare against the threshold relation : function A numpy or other comparison method that takes two values and returns a bool (e.g., numpy.greater, numpy.less_equal) threshold_obj : object The object (such as a Junction, Tank, Pipe, etc.) to use in the comparison of attributes threshold_attr : str The attribute to used in the comparison evaluation Parameters ---------- other: RelativeCondition Returns ------- bool Combine two WNTR Conditions with an OR. Parameters ---------- cond1 : ControlCondition The first condition cond2 : ControlCondition The second condition Parameters ---------- other: OrCondition Returns ------- bool Combine two WNTR Conditions with an AND Parameters ---------- cond1 : ControlCondition The first condition cond2 : ControlCondition The second condition Parameters ---------- other: OrCondition Returns ------- bool If True is returned, the cv needs to be closed If True is returned, the cv needs to be closed Prevents reverse flow in pumps. Parameters ---------- wn: wntr.network.WaterNetworkModel pump: wntr.network.Pump If True is returned, the pump needs to be closed Parameters ---------- wn: wntr.network.WaterNetworkModel pump: wntr.network.Pump If True is returned, the pump needs to be opened Prevents reverse flow in pumps. Parameters ---------- wn: wntr.network.WaterNetworkModel pump: wntr.network.Pump If True is returned, the pump needs to be closed Prevents reverse flow in pumps. Parameters ---------- wn: wntr.network.WaterNetworkModel pump: wntr.network.Pump If True is returned, the pump needs to be closed Parameters ---------- wn: wntr.network.WaterNetworkModel prv: wntr.network.Valve Parameters ---------- wn: wntr.network.WaterNetworkModel prv: wntr.network.Valve Parameters ---------- wn: wntr.network.WaterNetworkModel prv: wntr.network.Valve Parameters ---------- wn: wntr.network.WaterNetworkModel fcv: wntr.network.Valve Parameters ---------- wn: wntr.network.WaterNetworkModel fcv: wntr.network.Valve A base class for deriving new control actions. The control action is run by calling run_control_action. This class is not meant to be used directly. Derived classes must implement the run_control_action, requires, and target methods. This method is called to run the corresponding control action. Returns a set of objects used to evaluate the control Returns ------- req: OrderedSet The objects required to run the control action. Returns a tuple (object, attribute) containing the object and attribute that the control action may change Returns ------- target: tuple A tuple containing the target object and the attribute to be changed (target, attr). Parameters ---------- other: BaseControlAction Returns ------- bool A general class for specifying a control action that simply modifies the attribute of an object (target). Parameters ---------- target_obj : object The object whose attribute will be changed when the control runs. attribute : string The attribute that will be changed on the target_obj when the control runs. value : any The new value for target_obj.attribute when the control runs. A control action class that modifies a private attribute in order to change a property on an object. For example, a valve has a status property, but the control action must act on the _internal_status. Parameters ---------- target_obj: object The object for which an attribute is being changed. internal_attribute: str The attribute being modified (e.g., _internal_stats) value: any The new value for the internal_attribute property_attribute: str The attribute to be checked for an actual change (e.g., status) Return a list of objects required by the control action. Returns ------- required_objects: list of object Activate the control action. Returns a tuple containing the target object and the attribute to check for modification. Returns ------- target: tuple # # Control classes # This is the base class for all control objects. Control objects are used to check the conditions under which a ControlAction should be run. For example, if a pump is supposed to be turned on when the simulation time reaches 6 AM, the ControlAction would be "turn the pump on", and the ControlCondition would be "when the simulation reaches 6 AM". This method is called to see if any action is required by this control object. This method returns a tuple that indicates if action is required (a bool) and a recommended time for the simulation to backup (in seconds as a positive int). Returns ------- req: tuple A tuple (bool, int) indicating if an action should be run and how far to back up the simulation. This method is called to run the control action after a call to IsControlActionRequired indicates that an action is required. Returns a set of objects required for this control. Returns ------- required_objects: OrderedSet of object Returns a list of all actions used by this control. Returns ------- act: list of BaseControlAction Parameters ---------- other: ControlBase Returns ------- bool A very general and flexible class for defining both controls rules. Parameters ---------- condition: ControlCondition The condition that should be used to determine when the actions need to be activated. When the condition evaluates to True, the then_actions are activated. When the condition evaluates to False, the else_actions are activated. then_actions: Iterable of ControlAction The actions that should be activated when the condition evaluates to True. else_actions: Iterable of ControlAction The actions that should be activated when the condition evaluates to False. priority: ControlPriority The priority of the control. Default is ControlPriority.medium name: str The name of the control The control type. Note that presolve and postsolve controls are both simple controls in Epanet. Returns ------- control_type: _ControlType A string representation of the Control. A class for controls. Parameters ---------- condition: ControlCondition The condition that should be used to determine when the actions need to be activated. When the condition evaluates to True, the then_actions are activated. When the condition evaluates to False, the else_actions are activated. then_action: ControlAction The action that should be activated when the condition evaluates to True. priority: ControlPriority The priority of the control. Default is ControlPriority.medium name: str The name of the control # if isinstance(condition, (TimeOfDayCondition, SimTimeCondition)): # if condition._relation is not Comparison.eq: # raise ValueError('SimTimeConditions and TimeOfDayConditions used with Control must have a relation of ' # 'Comparison.eq. Otherwise use Rule.') This is a class method for creating simple time controls. Parameters ---------- wnm: wntr.network.WaterNetworkModel The WaterNetworkModel instance this control will be added to. run_at_time: int The time to activate the control action. time_flag: str Options are 'SIM_TIME' and 'CLOCK_TIME'. SIM_TIME indicates that run_at_time is the time since the start of the simulation. CLOCK_TIME indicates that run_at_time is the time of day. daily_flag: bool If True, then the control will repeat every day. control_action: BaseControlAction The control action that should occur at run_at_time. name: str An optional name for the control. Returns ------- ctrl: Control This is a class method for creating simple conditional controls. Parameters ---------- source_obj: object The object whose source_attr attribute will be compared to threshold to determine if control_action needs activated. source_attr: str The attribute of source_obj to compare to threshold. operation: Comparison The comparison function used to compare the source_attr attribute of source_obj to threshold. threshold: any The threshold used in the comparison. control_action: ControlAction The control action that should occur when operation(getattr(source_obj, source_attr), threshold) is True. name: str An optional name for the control Returns ------- ctrl: Control A class for managing controls and identifying changes made by those controls. OrderedSet of ControlBase # {(obj, attr): value} # set of (obj, attr) that has been changed from _previous_values The update method gets called when a subject (control action) is activated. Parameters ----------- subject: BaseControlAction Register a control with the ControlManager Parameters ---------- control: ControlBase Reset the _previous_values. This should be called before activating any control actions so that changes made by the control actions can be tracked. Specifies if changes were made. Returns ------- changes: bool A generator for iterating over the objects, attributes that were changed. Returns ------- changes: tuple (object, attr) Deregister a control with the ControlManager Parameters ---------- control: ControlBase Check which controls have actions that need activated. Returns ------- controls_to_run: list of tuple The tuple is (ControlBase, backtrack)
| 2.870396
| 3
|
exercicios/ex025.py
|
Siqueira-Vinicius/Python
| 0
|
6626139
|
print('É da familia Silva?')
nome = str(input('Qual é o seu nome completo?: '))
nome = nome.strip()
nome = nome.upper()
silva = 'SILVA' in nome
print('Você pertence a familia Silva?')
print('R:{}'.format(silva))
|
print('É da familia Silva?')
nome = str(input('Qual é o seu nome completo?: '))
nome = nome.strip()
nome = nome.upper()
silva = 'SILVA' in nome
print('Você pertence a familia Silva?')
print('R:{}'.format(silva))
|
none
| 1
| 4.067384
| 4
|
|
tests/tools/emr/test_terminate_idle_clusters.py
|
timgates42/mrjob
| 4
|
6626140
|
# Copyright 2009-2012 Yelp
# Copyright 2013 Lyft
# Copyright 2014 <NAME>
# Copyright 2015-2018 Yelp
# Copyright 2019 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the idle cluster terminator"""
import sys
from datetime import timedelta
from mrjob.aws import _boto3_now
from mrjob.fs.s3 import S3Filesystem
from mrjob.pool import _pool_hash_and_name
from mrjob.py2 import StringIO
from mrjob.tools.emr.terminate_idle_clusters import _maybe_terminate_clusters
from mrjob.tools.emr.terminate_idle_clusters import _is_cluster_bootstrapping
from mrjob.tools.emr.terminate_idle_clusters import _is_cluster_done
from mrjob.tools.emr.terminate_idle_clusters import _is_cluster_running
from mrjob.tools.emr.terminate_idle_clusters import _is_cluster_starting
from mrjob.tools.emr.terminate_idle_clusters import _cluster_has_pending_steps
from mrjob.tools.emr.terminate_idle_clusters import _time_last_active
from tests.mock_boto3 import MockBoto3TestCase
from tests.py2 import patch
from tests.sandbox import SandboxedTestCase
class ClusterTerminationTestCase(MockBoto3TestCase):
maxDiff = None
_DEFAULT_STEP_ARGS = ['-mapper', 'my_job.py --mapper',
'-reducer', 'my_job.py --reducer']
def setUp(self):
super(ClusterTerminationTestCase, self).setUp()
self.create_fake_clusters()
def create_fake_clusters(self):
self.now = _boto3_now().replace(microsecond=0)
self.add_mock_s3_data({'my_bucket': {}})
# create a timestamp the given number of *hours*, *minutes*, etc.
# in the past
def ago(**kwargs):
return self.now - timedelta(**kwargs)
# Build a step object easily
# also make it respond to .args()
def step(jar='/home/hadoop/contrib/streaming/hadoop-streaming.jar',
args=self._DEFAULT_STEP_ARGS,
state='COMPLETED',
created=None,
started=None,
ended=None,
name='Streaming Step',
action_on_failure='TERMINATE_CLUSTER',
**kwargs):
timeline = dict()
if created:
timeline['CreationDateTime'] = created
if started:
timeline['StartDateTime'] = started
if ended:
timeline['EndDateTime'] = ended
return dict(
Config=dict(
ActionOnFailure=action_on_failure,
Args=args,
Jar=jar,
),
Status=dict(
State=state,
Timeline=timeline,
)
)
# empty job
self.add_mock_emr_cluster(
dict(
Id='j-EMPTY',
TerminationProtected=False,
Status=dict(
State='STARTING',
Timeline=dict(
CreationDateTime=ago(hours=10)
),
),
)
)
# job that's bootstrapping
self.add_mock_emr_cluster(dict(
Id='j-BOOTSTRAPPING',
TerminationProtected=False,
Status=dict(
State='BOOTSTRAPPING',
Timeline=dict(
CreationDateTime=ago(hours=10),
),
),
_Steps=[step(created=ago(hours=10), state='PENDING')],
))
# currently running job
self.add_mock_emr_cluster(
dict(
Id='j-CURRENTLY_RUNNING',
TerminationProtected=False,
Status=dict(
State='RUNNING',
Timeline=dict(
CreationDateTime=ago(hours=4, minutes=15),
ReadyDateTime=ago(hours=4, minutes=10)
)
),
_Steps=[step(started=ago(hours=4), state='RUNNING')]
)
)
# finished cluster
self.add_mock_emr_cluster(dict(
Id='j-DONE',
TerminationProtected=False,
Status=dict(
State='TERMINATED',
Timeline=dict(
CreationDateTime=ago(hours=10),
ReadyDateTime=ago(hours=8),
EndDateTime=ago(hours=5),
),
),
_Steps=[step(started=ago(hours=8), ended=ago(hours=6))],
))
# idle cluster
self.add_mock_emr_cluster(dict(
Id='j-DONE_AND_IDLE',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(started=ago(hours=4), ended=ago(hours=2))],
))
# idle cluster with 4.x step format. should still be
# recognizable as a streaming step
self.add_mock_emr_cluster(dict(
Id='j-DONE_AND_IDLE_4_X',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(started=ago(hours=4), ended=ago(hours=2),
jar='command-runner.jar',
args=['hadoop-streaming'] + self._DEFAULT_STEP_ARGS)],
))
# idle cluster with an active lock
self.add_mock_emr_cluster(dict(
Id='j-IDLE_AND_LOCKED',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(started=ago(hours=4), ended=ago(hours=2))],
))
self.add_mock_s3_data({
'my_bucket': {
'locks/j-IDLE_AND_LOCKED/2': b'not_you',
},
})
# idle cluster with an expired lock
self.add_mock_emr_cluster(dict(
Id='j-IDLE_AND_EXPIRED',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(started=ago(hours=4), ended=ago(hours=2))],
))
self.add_mock_s3_data({
'my_bucket': {
'locks/j-IDLE_AND_EXPIRED/2': b'not_you',
},
}, age=timedelta(minutes=5))
# idle cluster with an expired lock
self.add_mock_emr_cluster(dict(
Id='j-IDLE_BUT_INCOMPLETE_STEPS',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(started=ago(hours=4), end_hours_ago=None)],
))
# custom hadoop streaming jar
self.add_mock_emr_cluster(dict(
Id='j-CUSTOM_DONE_AND_IDLE',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(
started=ago(hours=4),
ended=ago(hours=4),
jar=('s3://my_bucket/tmp/somejob/files/'
'oddjob-0.0.3-SNAPSHOT-standalone.jar'),
args=[],
)],
))
# idle cluster, termination protected
self.add_mock_emr_cluster(dict(
Id='j-IDLE_AND_PROTECTED',
TerminationProtected=True,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(started=ago(hours=4), ended=ago(hours=2))],
))
# hadoop debugging without any other steps
self.add_mock_emr_cluster(dict(
Id='j-DEBUG_ONLY',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=3),
ReadyDateTime=ago(hours=2, minutes=55),
),
),
_Steps=[
step(jar='command-runner.jar',
name='Setup Hadoop Debugging',
args=['state-pusher-script'],
started=ago(hours=3),
ended=ago(hours=2))
],
))
# hadoop debugging + actual job
self.add_mock_emr_cluster(dict(
Id='j-HADOOP_DEBUGGING',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=55),
),
),
_Steps=[
step(jar='command-runner.jar',
name='Setup Hadoop Debugging',
args=['state-pusher-script'],
started=ago(hours=5),
ended=ago(hours=4)),
step(started=ago(hours=4), ended=ago(hours=2)),
],
))
# should skip cancelled steps
self.add_mock_emr_cluster(dict(
Id='j-IDLE_AND_FAILED',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[
step(started=ago(hours=4), ended=ago(hours=3), state='FAILED'),
step(state='CANCELLED'),
],
))
# pooled cluster reaching end of full hour
self.add_mock_emr_cluster(dict(
_BootstrapActions=[
dict(Args=[], Name='action 0'),
dict(
Args=['pool-0123456789abcdef0123456789abcdef',
'reflecting'],
Name='master',
),
],
Id='j-POOLED',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(minutes=55),
ReadyDateTime=ago(minutes=50),
),
),
Tags=[
dict(Key='__mrjob_pool_name',
Value='reflecting'),
dict(Key='__mrjob_pool_hash',
Value='0123456789abcdef0123456789abcdef'),
],
))
# cluster that has had pending jobs but hasn't run them
self.add_mock_emr_cluster(dict(
Id='j-PENDING_BUT_IDLE',
TerminationProtected=False,
Status=dict(
State='RUNNING',
Timeline=dict(
CreationDateTime=ago(hours=3),
ReadyDateTime=ago(hours=2, minutes=50),
),
),
_Steps=[step(created=ago(hours=3), state='PENDING')],
))
def ids_of_terminated_clusters(self):
return sorted(
str(cluster_id)
for cluster_id, cluster in self.mock_emr_clusters.items()
if cluster_id != 'j-DONE' and
cluster['Status']['State'] in (
'TERMINATING', 'TERMINATED', 'TERMINATED_WITH_ERRORS'))
def maybe_terminate_quietly(self, stdout=None, **kwargs):
if 'conf_paths' not in kwargs:
kwargs['conf_paths'] = []
if 'now' not in kwargs:
kwargs['now'] = self.now
kwargs['cloud_tmp_dir'] = 's3://my_bucket/'
kwargs['cloud_fs_sync_secs'] = 0
kwargs['max_mins_locked'] = 1
# don't print anything out
real_stdout = sys.stdout
sys.stdout = stdout or StringIO()
try:
return _maybe_terminate_clusters(**kwargs)
finally:
sys.stdout = real_stdout
def time_mock_cluster_idle(self, mock_cluster):
if (_is_cluster_starting(mock_cluster) or
_is_cluster_bootstrapping(mock_cluster) or
_is_cluster_running(mock_cluster['_Steps']) or
_is_cluster_done(mock_cluster)):
return timedelta(0)
else:
return self.now - _time_last_active(
mock_cluster, mock_cluster['_Steps'])
def assert_mock_cluster_is(
self, mock_cluster,
starting=False,
bootstrapping=False,
done=False,
has_pending_steps=False,
idle_for=timedelta(0),
pool_hash=None,
pool_name=None,
running=False):
self.assertEqual(starting,
_is_cluster_starting(mock_cluster))
self.assertEqual(bootstrapping,
_is_cluster_bootstrapping(mock_cluster))
self.assertEqual(done,
_is_cluster_done(mock_cluster))
self.assertEqual(has_pending_steps,
_cluster_has_pending_steps(mock_cluster['_Steps']))
self.assertEqual(idle_for,
self.time_mock_cluster_idle(mock_cluster))
self.assertEqual((pool_hash, pool_name),
_pool_hash_and_name(mock_cluster))
self.assertEqual(running,
_is_cluster_running(mock_cluster['_Steps']))
def _lock_contents(self, mock_cluster, steps_ahead=0):
fs = S3Filesystem()
contents = b''.join(fs.cat('s3://my_bucket/locks/%s/%d' % (
mock_cluster['Id'], len(mock_cluster['_Steps']) + steps_ahead)))
return contents or None
def assert_locked_by_terminate(self, mock_cluster, steps_ahead=1):
contents = self._lock_contents(mock_cluster, steps_ahead=steps_ahead)
self.assertIsNotNone(contents)
self.assertIn(b'terminate', contents)
def assert_locked_by_something_else(self, mock_cluster, steps_ahead=1):
contents = self._lock_contents(mock_cluster, steps_ahead=steps_ahead)
self.assertIsNotNone(contents)
self.assertNotIn(b'terminate', contents)
def assert_not_locked(self, mock_cluster, steps_ahead=1):
self.assertIsNone(
self._lock_contents(mock_cluster, steps_ahead=steps_ahead))
def assert_terminated_clusters_locked_by_terminate(self):
for cluster_id in self.ids_of_terminated_clusters():
self.assert_locked_by_terminate(self.mock_emr_clusters[cluster_id])
def test_empty(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-EMPTY'],
starting=True,
)
def test_currently_running(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-CURRENTLY_RUNNING'],
running=True,
)
def test_done(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-DONE'],
done=True,
)
def test_debug_only(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-DEBUG_ONLY'],
idle_for=timedelta(hours=2),
)
def test_done_and_idle(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-DONE_AND_IDLE'],
idle_for=timedelta(hours=2),
)
def test_done_and_idle_4_x(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-DONE_AND_IDLE_4_X'],
idle_for=timedelta(hours=2),
)
def test_idle_and_expired(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-IDLE_AND_EXPIRED'],
idle_for=timedelta(hours=2),
)
def test_hadoop_debugging_cluster(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-HADOOP_DEBUGGING'],
idle_for=timedelta(hours=2),
)
def test_idle_and_failed(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-IDLE_AND_FAILED'],
idle_for=timedelta(hours=3),
)
def test_pooled(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-POOLED'],
idle_for=timedelta(minutes=50),
pool_hash='0123456789abcdef0123456789abcdef',
pool_name='reflecting',
)
def test_pending_but_idle(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-PENDING_BUT_IDLE'],
has_pending_steps=True,
idle_for=timedelta(hours=2, minutes=50),
)
def test_dry_run_does_nothing(self):
self.maybe_terminate_quietly(max_mins_idle=0.6, dry_run=True)
unlocked_ids = [
'j-BOOTSTRAPPING',
'j-CURRENTLY_RUNNING',
'j-CUSTOM_DONE_AND_IDLE',
'j-IDLE_AND_PROTECTED',
'j-DEBUG_ONLY',
'j-DONE',
'j-DONE_AND_IDLE',
'j-DONE_AND_IDLE_4_X',
'j-EMPTY',
'j-HADOOP_DEBUGGING',
'j-IDLE_AND_FAILED',
'j-IDLE_BUT_INCOMPLETE_STEPS',
'j-PENDING_BUT_IDLE',
'j-POOLED'
]
for cluster_id in unlocked_ids:
self.assert_not_locked(self.mock_emr_clusters[cluster_id])
self.assertEqual(self.ids_of_terminated_clusters(), [])
def test_increasing_idle_time(self):
self.assertEqual(self.ids_of_terminated_clusters(), [])
# no clusters are 20 hours old
self.maybe_terminate_quietly(
conf_paths=[], max_mins_idle=1200, now=self.now)
self.assertEqual(self.ids_of_terminated_clusters(), [])
# terminate 5-hour-old jobs
self.maybe_terminate_quietly(
conf_paths=[], max_mins_idle=300, now=self.now)
# terminate 2-hour-old jobs
self.maybe_terminate_quietly(
conf_paths=[], max_mins_idle=120, now=self.now)
# picky edge case: two jobs are EXACTLY 2 hours old, so they're
# not over the maximum
self.assertEqual(self.ids_of_terminated_clusters(),
['j-CUSTOM_DONE_AND_IDLE',
'j-IDLE_AND_FAILED',
'j-PENDING_BUT_IDLE'])
self.maybe_terminate_quietly(max_mins_idle=60)
self.assert_terminated_clusters_locked_by_terminate()
self.assertEqual(self.ids_of_terminated_clusters(),
['j-CUSTOM_DONE_AND_IDLE',
'j-DEBUG_ONLY',
'j-DONE_AND_IDLE', 'j-DONE_AND_IDLE_4_X',
'j-HADOOP_DEBUGGING', 'j-IDLE_AND_EXPIRED',
'j-IDLE_AND_FAILED', 'j-PENDING_BUT_IDLE'])
def test_one_hour_is_the_default(self):
self.assertEqual(self.ids_of_terminated_clusters(), [])
self.maybe_terminate_quietly()
self.assert_terminated_clusters_locked_by_terminate()
self.assertEqual(self.ids_of_terminated_clusters(),
['j-CUSTOM_DONE_AND_IDLE',
'j-DEBUG_ONLY',
'j-DONE_AND_IDLE', 'j-DONE_AND_IDLE_4_X',
'j-HADOOP_DEBUGGING', 'j-IDLE_AND_EXPIRED',
'j-IDLE_AND_FAILED', 'j-PENDING_BUT_IDLE'])
def test_zero_idle_time(self):
self.assertEqual(self.ids_of_terminated_clusters(), [])
self.maybe_terminate_quietly(max_mins_idle=0)
self.assert_terminated_clusters_locked_by_terminate()
self.assertEqual(self.ids_of_terminated_clusters(),
['j-CUSTOM_DONE_AND_IDLE',
'j-DEBUG_ONLY',
'j-DONE_AND_IDLE', 'j-DONE_AND_IDLE_4_X',
'j-HADOOP_DEBUGGING', 'j-IDLE_AND_EXPIRED',
'j-IDLE_AND_FAILED', 'j-PENDING_BUT_IDLE',
'j-POOLED'])
def test_terminate_pooled_only(self):
self.assertEqual(self.ids_of_terminated_clusters(), [])
self.maybe_terminate_quietly(pooled_only=True)
self.assert_terminated_clusters_locked_by_terminate()
# pooled job was not idle for an hour (the default)
self.assertEqual(self.ids_of_terminated_clusters(), [])
self.maybe_terminate_quietly(pooled_only=True, max_mins_idle=0.6)
self.assertEqual(self.ids_of_terminated_clusters(), ['j-POOLED'])
def test_terminate_unpooled_only(self):
self.assertEqual(self.ids_of_terminated_clusters(), [])
self.maybe_terminate_quietly(unpooled_only=True)
self.assert_terminated_clusters_locked_by_terminate()
self.assertEqual(self.ids_of_terminated_clusters(),
['j-CUSTOM_DONE_AND_IDLE',
'j-DEBUG_ONLY',
'j-DONE_AND_IDLE', 'j-DONE_AND_IDLE_4_X',
'j-HADOOP_DEBUGGING', 'j-IDLE_AND_EXPIRED',
'j-IDLE_AND_FAILED', 'j-PENDING_BUT_IDLE'])
self.maybe_terminate_quietly(unpooled_only=True, max_mins_idle=0.6)
self.assertEqual(self.ids_of_terminated_clusters(),
['j-CUSTOM_DONE_AND_IDLE',
'j-DEBUG_ONLY',
'j-DONE_AND_IDLE', 'j-DONE_AND_IDLE_4_X',
'j-HADOOP_DEBUGGING', 'j-IDLE_AND_EXPIRED',
'j-IDLE_AND_FAILED', 'j-PENDING_BUT_IDLE'])
def test_terminate_by_pool_name(self):
self.assertEqual(self.ids_of_terminated_clusters(), [])
# wrong pool name
self.maybe_terminate_quietly(pool_name='default', max_mins_idle=0.6)
self.assertEqual(self.ids_of_terminated_clusters(), [])
# right pool name
self.maybe_terminate_quietly(pool_name='reflecting', max_mins_idle=0.6)
self.assert_terminated_clusters_locked_by_terminate()
self.assertEqual(self.ids_of_terminated_clusters(), ['j-POOLED'])
def test_its_quiet_too_quiet(self):
stdout = StringIO()
self.maybe_terminate_quietly(
stdout=stdout, max_mins_idle=0.6, quiet=True)
self.assertEqual(stdout.getvalue(), '')
EXPECTED_STDOUT_LINES = [
'Terminated cluster j-POOLED (POOLED);'
' was idle for 0:50:00',
'Terminated cluster j-PENDING_BUT_IDLE (PENDING_BUT_IDLE);'
' was pending for 2:50:00',
'Terminated cluster j-DEBUG_ONLY (DEBUG_ONLY);'
' was idle for 2:00:00',
'Terminated cluster j-DONE_AND_IDLE (DONE_AND_IDLE);'
' was idle for 2:00:00',
'Terminated cluster j-DONE_AND_IDLE_4_X (DONE_AND_IDLE_4_X);'
' was idle for 2:00:00',
'Terminated cluster j-IDLE_AND_EXPIRED (IDLE_AND_EXPIRED);'
' was idle for 2:00:00',
'Terminated cluster j-IDLE_AND_FAILED (IDLE_AND_FAILED);'
' was idle for 3:00:00',
'Terminated cluster j-HADOOP_DEBUGGING (HADOOP_DEBUGGING);'
' was idle for 2:00:00',
'Terminated cluster j-CUSTOM_DONE_AND_IDLE (CUSTOM_DONE_AND_IDLE);'
' was idle for 4:00:00',
]
def test_its_not_very_quiet(self):
stdout = StringIO()
self.maybe_terminate_quietly(stdout=stdout, max_mins_idle=0.6)
self.assertEqual(set(stdout.getvalue().splitlines()),
set(self.EXPECTED_STDOUT_LINES))
# should have actually terminated clusters
self.assertEqual(self.ids_of_terminated_clusters(), [
'j-CUSTOM_DONE_AND_IDLE',
'j-DEBUG_ONLY',
'j-DONE_AND_IDLE',
'j-DONE_AND_IDLE_4_X',
'j-HADOOP_DEBUGGING',
'j-IDLE_AND_EXPIRED',
'j-IDLE_AND_FAILED',
'j-PENDING_BUT_IDLE',
'j-POOLED',
])
def test_dry_run(self):
stdout = StringIO()
self.maybe_terminate_quietly(
stdout=stdout, max_mins_idle=0.6, dry_run=True)
# dry_run doesn't actually try to lock
expected_stdout_lines = self.EXPECTED_STDOUT_LINES + [
'Terminated cluster j-IDLE_AND_LOCKED (IDLE_AND_LOCKED);'
' was idle for 2:00:00']
self.assertEqual(set(stdout.getvalue().splitlines()),
set(expected_stdout_lines))
# shouldn't *actually* terminate clusters
self.assertEqual(self.ids_of_terminated_clusters(), [])
class DeprecatedSwitchesTestCase(SandboxedTestCase):
def setUp(self):
super(DeprecatedSwitchesTestCase, self).setUp()
self._maybe_terminate_clusters = self.start(patch(
'mrjob.tools.emr.terminate_idle_clusters.'
'_maybe_terminate_clusters'))
self.log = self.start(
patch('mrjob.tools.emr.terminate_idle_clusters.log'))
|
# Copyright 2009-2012 Yelp
# Copyright 2013 Lyft
# Copyright 2014 <NAME>
# Copyright 2015-2018 Yelp
# Copyright 2019 Yelp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the idle cluster terminator"""
import sys
from datetime import timedelta
from mrjob.aws import _boto3_now
from mrjob.fs.s3 import S3Filesystem
from mrjob.pool import _pool_hash_and_name
from mrjob.py2 import StringIO
from mrjob.tools.emr.terminate_idle_clusters import _maybe_terminate_clusters
from mrjob.tools.emr.terminate_idle_clusters import _is_cluster_bootstrapping
from mrjob.tools.emr.terminate_idle_clusters import _is_cluster_done
from mrjob.tools.emr.terminate_idle_clusters import _is_cluster_running
from mrjob.tools.emr.terminate_idle_clusters import _is_cluster_starting
from mrjob.tools.emr.terminate_idle_clusters import _cluster_has_pending_steps
from mrjob.tools.emr.terminate_idle_clusters import _time_last_active
from tests.mock_boto3 import MockBoto3TestCase
from tests.py2 import patch
from tests.sandbox import SandboxedTestCase
class ClusterTerminationTestCase(MockBoto3TestCase):
maxDiff = None
_DEFAULT_STEP_ARGS = ['-mapper', 'my_job.py --mapper',
'-reducer', 'my_job.py --reducer']
def setUp(self):
super(ClusterTerminationTestCase, self).setUp()
self.create_fake_clusters()
def create_fake_clusters(self):
self.now = _boto3_now().replace(microsecond=0)
self.add_mock_s3_data({'my_bucket': {}})
# create a timestamp the given number of *hours*, *minutes*, etc.
# in the past
def ago(**kwargs):
return self.now - timedelta(**kwargs)
# Build a step object easily
# also make it respond to .args()
def step(jar='/home/hadoop/contrib/streaming/hadoop-streaming.jar',
args=self._DEFAULT_STEP_ARGS,
state='COMPLETED',
created=None,
started=None,
ended=None,
name='Streaming Step',
action_on_failure='TERMINATE_CLUSTER',
**kwargs):
timeline = dict()
if created:
timeline['CreationDateTime'] = created
if started:
timeline['StartDateTime'] = started
if ended:
timeline['EndDateTime'] = ended
return dict(
Config=dict(
ActionOnFailure=action_on_failure,
Args=args,
Jar=jar,
),
Status=dict(
State=state,
Timeline=timeline,
)
)
# empty job
self.add_mock_emr_cluster(
dict(
Id='j-EMPTY',
TerminationProtected=False,
Status=dict(
State='STARTING',
Timeline=dict(
CreationDateTime=ago(hours=10)
),
),
)
)
# job that's bootstrapping
self.add_mock_emr_cluster(dict(
Id='j-BOOTSTRAPPING',
TerminationProtected=False,
Status=dict(
State='BOOTSTRAPPING',
Timeline=dict(
CreationDateTime=ago(hours=10),
),
),
_Steps=[step(created=ago(hours=10), state='PENDING')],
))
# currently running job
self.add_mock_emr_cluster(
dict(
Id='j-CURRENTLY_RUNNING',
TerminationProtected=False,
Status=dict(
State='RUNNING',
Timeline=dict(
CreationDateTime=ago(hours=4, minutes=15),
ReadyDateTime=ago(hours=4, minutes=10)
)
),
_Steps=[step(started=ago(hours=4), state='RUNNING')]
)
)
# finished cluster
self.add_mock_emr_cluster(dict(
Id='j-DONE',
TerminationProtected=False,
Status=dict(
State='TERMINATED',
Timeline=dict(
CreationDateTime=ago(hours=10),
ReadyDateTime=ago(hours=8),
EndDateTime=ago(hours=5),
),
),
_Steps=[step(started=ago(hours=8), ended=ago(hours=6))],
))
# idle cluster
self.add_mock_emr_cluster(dict(
Id='j-DONE_AND_IDLE',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(started=ago(hours=4), ended=ago(hours=2))],
))
# idle cluster with 4.x step format. should still be
# recognizable as a streaming step
self.add_mock_emr_cluster(dict(
Id='j-DONE_AND_IDLE_4_X',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(started=ago(hours=4), ended=ago(hours=2),
jar='command-runner.jar',
args=['hadoop-streaming'] + self._DEFAULT_STEP_ARGS)],
))
# idle cluster with an active lock
self.add_mock_emr_cluster(dict(
Id='j-IDLE_AND_LOCKED',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(started=ago(hours=4), ended=ago(hours=2))],
))
self.add_mock_s3_data({
'my_bucket': {
'locks/j-IDLE_AND_LOCKED/2': b'not_you',
},
})
# idle cluster with an expired lock
self.add_mock_emr_cluster(dict(
Id='j-IDLE_AND_EXPIRED',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(started=ago(hours=4), ended=ago(hours=2))],
))
self.add_mock_s3_data({
'my_bucket': {
'locks/j-IDLE_AND_EXPIRED/2': b'not_you',
},
}, age=timedelta(minutes=5))
# idle cluster with an expired lock
self.add_mock_emr_cluster(dict(
Id='j-IDLE_BUT_INCOMPLETE_STEPS',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(started=ago(hours=4), end_hours_ago=None)],
))
# custom hadoop streaming jar
self.add_mock_emr_cluster(dict(
Id='j-CUSTOM_DONE_AND_IDLE',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(
started=ago(hours=4),
ended=ago(hours=4),
jar=('s3://my_bucket/tmp/somejob/files/'
'oddjob-0.0.3-SNAPSHOT-standalone.jar'),
args=[],
)],
))
# idle cluster, termination protected
self.add_mock_emr_cluster(dict(
Id='j-IDLE_AND_PROTECTED',
TerminationProtected=True,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[step(started=ago(hours=4), ended=ago(hours=2))],
))
# hadoop debugging without any other steps
self.add_mock_emr_cluster(dict(
Id='j-DEBUG_ONLY',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=3),
ReadyDateTime=ago(hours=2, minutes=55),
),
),
_Steps=[
step(jar='command-runner.jar',
name='Setup Hadoop Debugging',
args=['state-pusher-script'],
started=ago(hours=3),
ended=ago(hours=2))
],
))
# hadoop debugging + actual job
self.add_mock_emr_cluster(dict(
Id='j-HADOOP_DEBUGGING',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=55),
),
),
_Steps=[
step(jar='command-runner.jar',
name='Setup Hadoop Debugging',
args=['state-pusher-script'],
started=ago(hours=5),
ended=ago(hours=4)),
step(started=ago(hours=4), ended=ago(hours=2)),
],
))
# should skip cancelled steps
self.add_mock_emr_cluster(dict(
Id='j-IDLE_AND_FAILED',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(hours=6),
ReadyDateTime=ago(hours=5, minutes=5),
),
),
_Steps=[
step(started=ago(hours=4), ended=ago(hours=3), state='FAILED'),
step(state='CANCELLED'),
],
))
# pooled cluster reaching end of full hour
self.add_mock_emr_cluster(dict(
_BootstrapActions=[
dict(Args=[], Name='action 0'),
dict(
Args=['pool-0123456789abcdef0123456789abcdef',
'reflecting'],
Name='master',
),
],
Id='j-POOLED',
TerminationProtected=False,
Status=dict(
State='WAITING',
Timeline=dict(
CreationDateTime=ago(minutes=55),
ReadyDateTime=ago(minutes=50),
),
),
Tags=[
dict(Key='__mrjob_pool_name',
Value='reflecting'),
dict(Key='__mrjob_pool_hash',
Value='0123456789abcdef0123456789abcdef'),
],
))
# cluster that has had pending jobs but hasn't run them
self.add_mock_emr_cluster(dict(
Id='j-PENDING_BUT_IDLE',
TerminationProtected=False,
Status=dict(
State='RUNNING',
Timeline=dict(
CreationDateTime=ago(hours=3),
ReadyDateTime=ago(hours=2, minutes=50),
),
),
_Steps=[step(created=ago(hours=3), state='PENDING')],
))
def ids_of_terminated_clusters(self):
return sorted(
str(cluster_id)
for cluster_id, cluster in self.mock_emr_clusters.items()
if cluster_id != 'j-DONE' and
cluster['Status']['State'] in (
'TERMINATING', 'TERMINATED', 'TERMINATED_WITH_ERRORS'))
def maybe_terminate_quietly(self, stdout=None, **kwargs):
if 'conf_paths' not in kwargs:
kwargs['conf_paths'] = []
if 'now' not in kwargs:
kwargs['now'] = self.now
kwargs['cloud_tmp_dir'] = 's3://my_bucket/'
kwargs['cloud_fs_sync_secs'] = 0
kwargs['max_mins_locked'] = 1
# don't print anything out
real_stdout = sys.stdout
sys.stdout = stdout or StringIO()
try:
return _maybe_terminate_clusters(**kwargs)
finally:
sys.stdout = real_stdout
def time_mock_cluster_idle(self, mock_cluster):
if (_is_cluster_starting(mock_cluster) or
_is_cluster_bootstrapping(mock_cluster) or
_is_cluster_running(mock_cluster['_Steps']) or
_is_cluster_done(mock_cluster)):
return timedelta(0)
else:
return self.now - _time_last_active(
mock_cluster, mock_cluster['_Steps'])
def assert_mock_cluster_is(
self, mock_cluster,
starting=False,
bootstrapping=False,
done=False,
has_pending_steps=False,
idle_for=timedelta(0),
pool_hash=None,
pool_name=None,
running=False):
self.assertEqual(starting,
_is_cluster_starting(mock_cluster))
self.assertEqual(bootstrapping,
_is_cluster_bootstrapping(mock_cluster))
self.assertEqual(done,
_is_cluster_done(mock_cluster))
self.assertEqual(has_pending_steps,
_cluster_has_pending_steps(mock_cluster['_Steps']))
self.assertEqual(idle_for,
self.time_mock_cluster_idle(mock_cluster))
self.assertEqual((pool_hash, pool_name),
_pool_hash_and_name(mock_cluster))
self.assertEqual(running,
_is_cluster_running(mock_cluster['_Steps']))
def _lock_contents(self, mock_cluster, steps_ahead=0):
fs = S3Filesystem()
contents = b''.join(fs.cat('s3://my_bucket/locks/%s/%d' % (
mock_cluster['Id'], len(mock_cluster['_Steps']) + steps_ahead)))
return contents or None
def assert_locked_by_terminate(self, mock_cluster, steps_ahead=1):
contents = self._lock_contents(mock_cluster, steps_ahead=steps_ahead)
self.assertIsNotNone(contents)
self.assertIn(b'terminate', contents)
def assert_locked_by_something_else(self, mock_cluster, steps_ahead=1):
contents = self._lock_contents(mock_cluster, steps_ahead=steps_ahead)
self.assertIsNotNone(contents)
self.assertNotIn(b'terminate', contents)
def assert_not_locked(self, mock_cluster, steps_ahead=1):
self.assertIsNone(
self._lock_contents(mock_cluster, steps_ahead=steps_ahead))
def assert_terminated_clusters_locked_by_terminate(self):
for cluster_id in self.ids_of_terminated_clusters():
self.assert_locked_by_terminate(self.mock_emr_clusters[cluster_id])
def test_empty(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-EMPTY'],
starting=True,
)
def test_currently_running(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-CURRENTLY_RUNNING'],
running=True,
)
def test_done(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-DONE'],
done=True,
)
def test_debug_only(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-DEBUG_ONLY'],
idle_for=timedelta(hours=2),
)
def test_done_and_idle(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-DONE_AND_IDLE'],
idle_for=timedelta(hours=2),
)
def test_done_and_idle_4_x(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-DONE_AND_IDLE_4_X'],
idle_for=timedelta(hours=2),
)
def test_idle_and_expired(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-IDLE_AND_EXPIRED'],
idle_for=timedelta(hours=2),
)
def test_hadoop_debugging_cluster(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-HADOOP_DEBUGGING'],
idle_for=timedelta(hours=2),
)
def test_idle_and_failed(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-IDLE_AND_FAILED'],
idle_for=timedelta(hours=3),
)
def test_pooled(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-POOLED'],
idle_for=timedelta(minutes=50),
pool_hash='0123456789abcdef0123456789abcdef',
pool_name='reflecting',
)
def test_pending_but_idle(self):
self.assert_mock_cluster_is(
self.mock_emr_clusters['j-PENDING_BUT_IDLE'],
has_pending_steps=True,
idle_for=timedelta(hours=2, minutes=50),
)
def test_dry_run_does_nothing(self):
self.maybe_terminate_quietly(max_mins_idle=0.6, dry_run=True)
unlocked_ids = [
'j-BOOTSTRAPPING',
'j-CURRENTLY_RUNNING',
'j-CUSTOM_DONE_AND_IDLE',
'j-IDLE_AND_PROTECTED',
'j-DEBUG_ONLY',
'j-DONE',
'j-DONE_AND_IDLE',
'j-DONE_AND_IDLE_4_X',
'j-EMPTY',
'j-HADOOP_DEBUGGING',
'j-IDLE_AND_FAILED',
'j-IDLE_BUT_INCOMPLETE_STEPS',
'j-PENDING_BUT_IDLE',
'j-POOLED'
]
for cluster_id in unlocked_ids:
self.assert_not_locked(self.mock_emr_clusters[cluster_id])
self.assertEqual(self.ids_of_terminated_clusters(), [])
def test_increasing_idle_time(self):
self.assertEqual(self.ids_of_terminated_clusters(), [])
# no clusters are 20 hours old
self.maybe_terminate_quietly(
conf_paths=[], max_mins_idle=1200, now=self.now)
self.assertEqual(self.ids_of_terminated_clusters(), [])
# terminate 5-hour-old jobs
self.maybe_terminate_quietly(
conf_paths=[], max_mins_idle=300, now=self.now)
# terminate 2-hour-old jobs
self.maybe_terminate_quietly(
conf_paths=[], max_mins_idle=120, now=self.now)
# picky edge case: two jobs are EXACTLY 2 hours old, so they're
# not over the maximum
self.assertEqual(self.ids_of_terminated_clusters(),
['j-CUSTOM_DONE_AND_IDLE',
'j-IDLE_AND_FAILED',
'j-PENDING_BUT_IDLE'])
self.maybe_terminate_quietly(max_mins_idle=60)
self.assert_terminated_clusters_locked_by_terminate()
self.assertEqual(self.ids_of_terminated_clusters(),
['j-CUSTOM_DONE_AND_IDLE',
'j-DEBUG_ONLY',
'j-DONE_AND_IDLE', 'j-DONE_AND_IDLE_4_X',
'j-HADOOP_DEBUGGING', 'j-IDLE_AND_EXPIRED',
'j-IDLE_AND_FAILED', 'j-PENDING_BUT_IDLE'])
def test_one_hour_is_the_default(self):
self.assertEqual(self.ids_of_terminated_clusters(), [])
self.maybe_terminate_quietly()
self.assert_terminated_clusters_locked_by_terminate()
self.assertEqual(self.ids_of_terminated_clusters(),
['j-CUSTOM_DONE_AND_IDLE',
'j-DEBUG_ONLY',
'j-DONE_AND_IDLE', 'j-DONE_AND_IDLE_4_X',
'j-HADOOP_DEBUGGING', 'j-IDLE_AND_EXPIRED',
'j-IDLE_AND_FAILED', 'j-PENDING_BUT_IDLE'])
def test_zero_idle_time(self):
self.assertEqual(self.ids_of_terminated_clusters(), [])
self.maybe_terminate_quietly(max_mins_idle=0)
self.assert_terminated_clusters_locked_by_terminate()
self.assertEqual(self.ids_of_terminated_clusters(),
['j-CUSTOM_DONE_AND_IDLE',
'j-DEBUG_ONLY',
'j-DONE_AND_IDLE', 'j-DONE_AND_IDLE_4_X',
'j-HADOOP_DEBUGGING', 'j-IDLE_AND_EXPIRED',
'j-IDLE_AND_FAILED', 'j-PENDING_BUT_IDLE',
'j-POOLED'])
def test_terminate_pooled_only(self):
self.assertEqual(self.ids_of_terminated_clusters(), [])
self.maybe_terminate_quietly(pooled_only=True)
self.assert_terminated_clusters_locked_by_terminate()
# pooled job was not idle for an hour (the default)
self.assertEqual(self.ids_of_terminated_clusters(), [])
self.maybe_terminate_quietly(pooled_only=True, max_mins_idle=0.6)
self.assertEqual(self.ids_of_terminated_clusters(), ['j-POOLED'])
def test_terminate_unpooled_only(self):
self.assertEqual(self.ids_of_terminated_clusters(), [])
self.maybe_terminate_quietly(unpooled_only=True)
self.assert_terminated_clusters_locked_by_terminate()
self.assertEqual(self.ids_of_terminated_clusters(),
['j-CUSTOM_DONE_AND_IDLE',
'j-DEBUG_ONLY',
'j-DONE_AND_IDLE', 'j-DONE_AND_IDLE_4_X',
'j-HADOOP_DEBUGGING', 'j-IDLE_AND_EXPIRED',
'j-IDLE_AND_FAILED', 'j-PENDING_BUT_IDLE'])
self.maybe_terminate_quietly(unpooled_only=True, max_mins_idle=0.6)
self.assertEqual(self.ids_of_terminated_clusters(),
['j-CUSTOM_DONE_AND_IDLE',
'j-DEBUG_ONLY',
'j-DONE_AND_IDLE', 'j-DONE_AND_IDLE_4_X',
'j-HADOOP_DEBUGGING', 'j-IDLE_AND_EXPIRED',
'j-IDLE_AND_FAILED', 'j-PENDING_BUT_IDLE'])
def test_terminate_by_pool_name(self):
self.assertEqual(self.ids_of_terminated_clusters(), [])
# wrong pool name
self.maybe_terminate_quietly(pool_name='default', max_mins_idle=0.6)
self.assertEqual(self.ids_of_terminated_clusters(), [])
# right pool name
self.maybe_terminate_quietly(pool_name='reflecting', max_mins_idle=0.6)
self.assert_terminated_clusters_locked_by_terminate()
self.assertEqual(self.ids_of_terminated_clusters(), ['j-POOLED'])
def test_its_quiet_too_quiet(self):
stdout = StringIO()
self.maybe_terminate_quietly(
stdout=stdout, max_mins_idle=0.6, quiet=True)
self.assertEqual(stdout.getvalue(), '')
EXPECTED_STDOUT_LINES = [
'Terminated cluster j-POOLED (POOLED);'
' was idle for 0:50:00',
'Terminated cluster j-PENDING_BUT_IDLE (PENDING_BUT_IDLE);'
' was pending for 2:50:00',
'Terminated cluster j-DEBUG_ONLY (DEBUG_ONLY);'
' was idle for 2:00:00',
'Terminated cluster j-DONE_AND_IDLE (DONE_AND_IDLE);'
' was idle for 2:00:00',
'Terminated cluster j-DONE_AND_IDLE_4_X (DONE_AND_IDLE_4_X);'
' was idle for 2:00:00',
'Terminated cluster j-IDLE_AND_EXPIRED (IDLE_AND_EXPIRED);'
' was idle for 2:00:00',
'Terminated cluster j-IDLE_AND_FAILED (IDLE_AND_FAILED);'
' was idle for 3:00:00',
'Terminated cluster j-HADOOP_DEBUGGING (HADOOP_DEBUGGING);'
' was idle for 2:00:00',
'Terminated cluster j-CUSTOM_DONE_AND_IDLE (CUSTOM_DONE_AND_IDLE);'
' was idle for 4:00:00',
]
def test_its_not_very_quiet(self):
stdout = StringIO()
self.maybe_terminate_quietly(stdout=stdout, max_mins_idle=0.6)
self.assertEqual(set(stdout.getvalue().splitlines()),
set(self.EXPECTED_STDOUT_LINES))
# should have actually terminated clusters
self.assertEqual(self.ids_of_terminated_clusters(), [
'j-CUSTOM_DONE_AND_IDLE',
'j-DEBUG_ONLY',
'j-DONE_AND_IDLE',
'j-DONE_AND_IDLE_4_X',
'j-HADOOP_DEBUGGING',
'j-IDLE_AND_EXPIRED',
'j-IDLE_AND_FAILED',
'j-PENDING_BUT_IDLE',
'j-POOLED',
])
def test_dry_run(self):
stdout = StringIO()
self.maybe_terminate_quietly(
stdout=stdout, max_mins_idle=0.6, dry_run=True)
# dry_run doesn't actually try to lock
expected_stdout_lines = self.EXPECTED_STDOUT_LINES + [
'Terminated cluster j-IDLE_AND_LOCKED (IDLE_AND_LOCKED);'
' was idle for 2:00:00']
self.assertEqual(set(stdout.getvalue().splitlines()),
set(expected_stdout_lines))
# shouldn't *actually* terminate clusters
self.assertEqual(self.ids_of_terminated_clusters(), [])
class DeprecatedSwitchesTestCase(SandboxedTestCase):
def setUp(self):
super(DeprecatedSwitchesTestCase, self).setUp()
self._maybe_terminate_clusters = self.start(patch(
'mrjob.tools.emr.terminate_idle_clusters.'
'_maybe_terminate_clusters'))
self.log = self.start(
patch('mrjob.tools.emr.terminate_idle_clusters.log'))
|
en
| 0.8823
|
# Copyright 2009-2012 Yelp # Copyright 2013 Lyft # Copyright 2014 <NAME> # Copyright 2015-2018 Yelp # Copyright 2019 Yelp # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Test the idle cluster terminator # create a timestamp the given number of *hours*, *minutes*, etc. # in the past # Build a step object easily # also make it respond to .args() # empty job # job that's bootstrapping # currently running job # finished cluster # idle cluster # idle cluster with 4.x step format. should still be # recognizable as a streaming step # idle cluster with an active lock # idle cluster with an expired lock # idle cluster with an expired lock # custom hadoop streaming jar # idle cluster, termination protected # hadoop debugging without any other steps # hadoop debugging + actual job # should skip cancelled steps # pooled cluster reaching end of full hour # cluster that has had pending jobs but hasn't run them # don't print anything out # no clusters are 20 hours old # terminate 5-hour-old jobs # terminate 2-hour-old jobs # picky edge case: two jobs are EXACTLY 2 hours old, so they're # not over the maximum # pooled job was not idle for an hour (the default) # wrong pool name # right pool name # should have actually terminated clusters # dry_run doesn't actually try to lock # shouldn't *actually* terminate clusters
| 1.481829
| 1
|
di_store/node_tracker/node_tracker_client.py
|
jasonzhang1998/DI-store
| 59
|
6626141
|
<gh_stars>10-100
import grpc
from . import node_tracker_pb2
from . import node_tracker_pb2_grpc
from ..tracing import trace, wrap_channel
# todo grpc with multiprocessing
# https://github.com/grpc/grpc/issues/18321
@trace
class NodeTrackerClient:
def __init__(self, node_tracker_host='127.0.0.1', node_tracker_port=50050):
self.node_tracker_rpc_target = f'{node_tracker_host}:{node_tracker_port}'
self.closed = False
def close(self):
if not self.closed:
self.closed = True
self.channel.close()
def register_storage_server(self,
hostname,
ip_addr='', rpc_port=50051,
plasma_socket='/tmp/plasma'):
assert hostname is not None
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(channel)
response = stub.register_storage_server(
node_tracker_pb2.StorageServer(
hostname=hostname,
ip_addr=ip_addr,
rpc_port=rpc_port,
plasma_socket=plasma_socket
))
return response
def register_storage_client(self, server_hostname):
assert server_hostname is not None
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(wrap_channel(channel))
response = stub.register_storage_client(
node_tracker_pb2.StorageClient(
server_hostname=server_hostname
))
return response
def register_group(self, server_hostname, group_list):
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(wrap_channel(channel))
response = stub.register_storage_group(
node_tracker_pb2.StorageServer(
hostname=server_hostname,
group_list=group_list
))
return response
def register_object(self, object_id_hex, server_hostname, push_hostname_list, push_group_list):
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(wrap_channel(channel))
response = stub.register_object(
node_tracker_pb2.RegisterObjectRequest(
object_id_hex=object_id_hex,
server_hostname=server_hostname,
push_hostname_list=push_hostname_list,
push_group_list=push_group_list
))
return response
def server_info(self, server_hostname_list=None):
request = node_tracker_pb2.ServerInfoRequest()
if server_hostname_list:
if isinstance(server_hostname_list, list):
request.server_hostname_list.extend(server_hostname_list)
else:
request.server_hostname_list.append(server_hostname_list)
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(wrap_channel(channel))
response = stub.server_info(request)
return response.storage_server_list
def object_info(self, object_id_hex_list):
request = node_tracker_pb2.ObjectInfoRequest()
if isinstance(object_id_hex_list, list):
request.object_id_hex_list.extend(object_id_hex_list)
else:
request.object_id_hex_list.append(object_id_hex_list)
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(wrap_channel(channel))
response = stub.object_info(request)
return response.object_info_list
def object_delelte(self, object_id_hex_list):
request = node_tracker_pb2.ObjectDeleteRequest()
if isinstance(object_id_hex_list, list):
request.object_id_hex_list.extend(object_id_hex_list)
else:
request.object_id_hex_list.append(object_id_hex_list)
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(wrap_channel(channel))
return stub.object_delete(request)
|
import grpc
from . import node_tracker_pb2
from . import node_tracker_pb2_grpc
from ..tracing import trace, wrap_channel
# todo grpc with multiprocessing
# https://github.com/grpc/grpc/issues/18321
@trace
class NodeTrackerClient:
def __init__(self, node_tracker_host='127.0.0.1', node_tracker_port=50050):
self.node_tracker_rpc_target = f'{node_tracker_host}:{node_tracker_port}'
self.closed = False
def close(self):
if not self.closed:
self.closed = True
self.channel.close()
def register_storage_server(self,
hostname,
ip_addr='', rpc_port=50051,
plasma_socket='/tmp/plasma'):
assert hostname is not None
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(channel)
response = stub.register_storage_server(
node_tracker_pb2.StorageServer(
hostname=hostname,
ip_addr=ip_addr,
rpc_port=rpc_port,
plasma_socket=plasma_socket
))
return response
def register_storage_client(self, server_hostname):
assert server_hostname is not None
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(wrap_channel(channel))
response = stub.register_storage_client(
node_tracker_pb2.StorageClient(
server_hostname=server_hostname
))
return response
def register_group(self, server_hostname, group_list):
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(wrap_channel(channel))
response = stub.register_storage_group(
node_tracker_pb2.StorageServer(
hostname=server_hostname,
group_list=group_list
))
return response
def register_object(self, object_id_hex, server_hostname, push_hostname_list, push_group_list):
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(wrap_channel(channel))
response = stub.register_object(
node_tracker_pb2.RegisterObjectRequest(
object_id_hex=object_id_hex,
server_hostname=server_hostname,
push_hostname_list=push_hostname_list,
push_group_list=push_group_list
))
return response
def server_info(self, server_hostname_list=None):
request = node_tracker_pb2.ServerInfoRequest()
if server_hostname_list:
if isinstance(server_hostname_list, list):
request.server_hostname_list.extend(server_hostname_list)
else:
request.server_hostname_list.append(server_hostname_list)
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(wrap_channel(channel))
response = stub.server_info(request)
return response.storage_server_list
def object_info(self, object_id_hex_list):
request = node_tracker_pb2.ObjectInfoRequest()
if isinstance(object_id_hex_list, list):
request.object_id_hex_list.extend(object_id_hex_list)
else:
request.object_id_hex_list.append(object_id_hex_list)
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(wrap_channel(channel))
response = stub.object_info(request)
return response.object_info_list
def object_delelte(self, object_id_hex_list):
request = node_tracker_pb2.ObjectDeleteRequest()
if isinstance(object_id_hex_list, list):
request.object_id_hex_list.extend(object_id_hex_list)
else:
request.object_id_hex_list.append(object_id_hex_list)
with grpc.insecure_channel(self.node_tracker_rpc_target) as channel:
stub = node_tracker_pb2_grpc.NodeTrackerStub(wrap_channel(channel))
return stub.object_delete(request)
|
en
| 0.524692
|
# todo grpc with multiprocessing # https://github.com/grpc/grpc/issues/18321
| 2.318139
| 2
|
hill-cipher-encryption.py
|
Samarth-Khatri/Hacktoberfest-2022
| 13
|
6626142
|
<reponame>Samarth-Khatri/Hacktoberfest-2022
## cryptography - Hill cipher encryption algorithm implementation
## input - any plaintext and a key(mostly used of size 9)
## Matrix of 3*3 is formed
## Output is a ciphertext generated using hill cipher encryption algorithm
## Characters considered for encryption are A-Z and ".,!" So mod 29 method is used
## eg. Sample inputs - Plaintext - ACT, key - GYBNQKURP
## Output - JTA
plainText = input("Enter the Plain Text: ").upper()
key = input("Enter the key: ").upper()
plainText = "".join(u for u in plainText if u not in ("?", " ", ";", ":", "/", "[", "]"))
x = len(plainText)%3
if(x!=0):
for i in range(3-x):
plainText += 'X'
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ.,!'
#generate key matrix from input key string
#As key is a string of length 9, key matrix will be 3x3
keyMatrix = [[0] * 3 for i in range(3)]
k = 0
for i in range(3):
for j in range(3):
keyMatrix[i][j] = LETTERS.find(key[k])
k = k+1
# generate column vector for the inputted message
# As key vector is 3x3, the message vectors will be of size 3x1
size_message = int(len(plainText) / 3)
messageMatrix = [[0] * size_message for i in range(3)]
k = 0
j = 0
while(k < size_message):
for i in range(3):
messageMatrix[i][k] = LETTERS.find(plainText[j])
j = j + 1
k = k + 1
# encrypt the plain text into cipher text using hill cipher
# C = KP mod 29
cipherMatrix = [[0] * size_message for i in range(3)]
for i in range(3):
for j in range(size_message):
cipherMatrix[i][j] = 0
for x in range(3):
#Matrix multiplication
cipherMatrix[i][j] += (keyMatrix[i][x] * messageMatrix[x][j])
# Taking mod 29 of the generated vector according to the formula
cipherMatrix[i][j] = cipherMatrix[i][j] % 29
# Generate the encrypted text from above encrypted numbered matrix
CipherText = []
k = 0
while(k < size_message):
for i in range(3):
num = cipherMatrix[i][k]
CipherText.append(LETTERS[num])
k = k + 1
print("Ciphertext:", "".join(CipherText))
|
## cryptography - Hill cipher encryption algorithm implementation
## input - any plaintext and a key(mostly used of size 9)
## Matrix of 3*3 is formed
## Output is a ciphertext generated using hill cipher encryption algorithm
## Characters considered for encryption are A-Z and ".,!" So mod 29 method is used
## eg. Sample inputs - Plaintext - ACT, key - GYBNQKURP
## Output - JTA
plainText = input("Enter the Plain Text: ").upper()
key = input("Enter the key: ").upper()
plainText = "".join(u for u in plainText if u not in ("?", " ", ";", ":", "/", "[", "]"))
x = len(plainText)%3
if(x!=0):
for i in range(3-x):
plainText += 'X'
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ.,!'
#generate key matrix from input key string
#As key is a string of length 9, key matrix will be 3x3
keyMatrix = [[0] * 3 for i in range(3)]
k = 0
for i in range(3):
for j in range(3):
keyMatrix[i][j] = LETTERS.find(key[k])
k = k+1
# generate column vector for the inputted message
# As key vector is 3x3, the message vectors will be of size 3x1
size_message = int(len(plainText) / 3)
messageMatrix = [[0] * size_message for i in range(3)]
k = 0
j = 0
while(k < size_message):
for i in range(3):
messageMatrix[i][k] = LETTERS.find(plainText[j])
j = j + 1
k = k + 1
# encrypt the plain text into cipher text using hill cipher
# C = KP mod 29
cipherMatrix = [[0] * size_message for i in range(3)]
for i in range(3):
for j in range(size_message):
cipherMatrix[i][j] = 0
for x in range(3):
#Matrix multiplication
cipherMatrix[i][j] += (keyMatrix[i][x] * messageMatrix[x][j])
# Taking mod 29 of the generated vector according to the formula
cipherMatrix[i][j] = cipherMatrix[i][j] % 29
# Generate the encrypted text from above encrypted numbered matrix
CipherText = []
k = 0
while(k < size_message):
for i in range(3):
num = cipherMatrix[i][k]
CipherText.append(LETTERS[num])
k = k + 1
print("Ciphertext:", "".join(CipherText))
|
en
| 0.718202
|
## cryptography - Hill cipher encryption algorithm implementation ## input - any plaintext and a key(mostly used of size 9) ## Matrix of 3*3 is formed ## Output is a ciphertext generated using hill cipher encryption algorithm ## Characters considered for encryption are A-Z and ".,!" So mod 29 method is used ## eg. Sample inputs - Plaintext - ACT, key - GYBNQKURP ## Output - JTA #generate key matrix from input key string #As key is a string of length 9, key matrix will be 3x3 # generate column vector for the inputted message # As key vector is 3x3, the message vectors will be of size 3x1 # encrypt the plain text into cipher text using hill cipher # C = KP mod 29 #Matrix multiplication # Taking mod 29 of the generated vector according to the formula # Generate the encrypted text from above encrypted numbered matrix
| 4.307861
| 4
|
course/migrations/0107_rename_flowpagedata_ordinal_to_page_ordinal.py
|
inducer/courseflow
| 284
|
6626143
|
<reponame>inducer/courseflow
# Generated by Django 1.10.7 on 2017-12-19 02:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course', '0106_add_auth_tokens_permission'),
]
operations = [
migrations.RenameField(
model_name='flowpagedata',
old_name='ordinal',
new_name='page_ordinal',
),
]
|
# Generated by Django 1.10.7 on 2017-12-19 02:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course', '0106_add_auth_tokens_permission'),
]
operations = [
migrations.RenameField(
model_name='flowpagedata',
old_name='ordinal',
new_name='page_ordinal',
),
]
|
en
| 0.819372
|
# Generated by Django 1.10.7 on 2017-12-19 02:37
| 1.691371
| 2
|
array/inversion_number.py
|
fujihiraryo/library
| 0
|
6626144
|
<reponame>fujihiraryo/library<filename>array/inversion_number.py
INF = 1 << 30
def merge(x, y):
cnt = 0
nx, ny = len(x), len(y)
x.append(INF)
y.append(INF)
z = []
i, j = 0, 0
for k in range(nx + ny):
if x[i] < y[j]:
z.append(x[i])
i += 1
else:
z.append(y[j])
j += 1
cnt += nx - i
return z, cnt
def merge_sort(a):
n = len(a)
if n == 1:
return a, 0
x, cnt_x = merge_sort(a[: n // 2])
y, cnt_y = merge_sort(a[n // 2 :])
a, cnt = merge(x, y)
return a, cnt + cnt_x + cnt_y
|
INF = 1 << 30
def merge(x, y):
cnt = 0
nx, ny = len(x), len(y)
x.append(INF)
y.append(INF)
z = []
i, j = 0, 0
for k in range(nx + ny):
if x[i] < y[j]:
z.append(x[i])
i += 1
else:
z.append(y[j])
j += 1
cnt += nx - i
return z, cnt
def merge_sort(a):
n = len(a)
if n == 1:
return a, 0
x, cnt_x = merge_sort(a[: n // 2])
y, cnt_y = merge_sort(a[n // 2 :])
a, cnt = merge(x, y)
return a, cnt + cnt_x + cnt_y
|
none
| 1
| 3.268082
| 3
|
|
moip_sdk/payment/schemas.py
|
mastertech/moip-sdk-python
| 2
|
6626145
|
from marshmallow import fields
from marshmallow_enum import EnumField
from moip_sdk.common.converters import CamelCasedSchema
from moip_sdk.payment.enums import MoipPaymentMethod
class PaymentSchema(CamelCasedSchema):
installment_count = fields.Integer(required=True)
statement_descriptor = fields.String(required=True)
funding_instrument = fields.Nested('FundingInstrumentSchema')
class FundingInstrumentSchema(CamelCasedSchema):
method = EnumField(MoipPaymentMethod, required=True)
credit_card = fields.Nested('CreditCardSchema')
boleto = fields.Nested('BoletoSchema')
class CreditCardSchema(CamelCasedSchema):
hash = fields.String()
store = fields.Boolean()
holder = fields.Nested('HolderSchema')
class HolderSchema(CamelCasedSchema):
fullname = fields.String()
birth_date = fields.Date()
tax_document = fields.Nested('TaxDocumentSchema')
phone = fields.Nested('PhoneSchema')
class InstructionLinesSchema(CamelCasedSchema):
first = fields.String()
second = fields.String()
third = fields.String()
class BoletoSchema(CamelCasedSchema):
expiration_date = fields.Date()
instruction_lines = fields.Nested('InstructionLinesSchema')
logo_uri = fields.String()
|
from marshmallow import fields
from marshmallow_enum import EnumField
from moip_sdk.common.converters import CamelCasedSchema
from moip_sdk.payment.enums import MoipPaymentMethod
class PaymentSchema(CamelCasedSchema):
installment_count = fields.Integer(required=True)
statement_descriptor = fields.String(required=True)
funding_instrument = fields.Nested('FundingInstrumentSchema')
class FundingInstrumentSchema(CamelCasedSchema):
method = EnumField(MoipPaymentMethod, required=True)
credit_card = fields.Nested('CreditCardSchema')
boleto = fields.Nested('BoletoSchema')
class CreditCardSchema(CamelCasedSchema):
hash = fields.String()
store = fields.Boolean()
holder = fields.Nested('HolderSchema')
class HolderSchema(CamelCasedSchema):
fullname = fields.String()
birth_date = fields.Date()
tax_document = fields.Nested('TaxDocumentSchema')
phone = fields.Nested('PhoneSchema')
class InstructionLinesSchema(CamelCasedSchema):
first = fields.String()
second = fields.String()
third = fields.String()
class BoletoSchema(CamelCasedSchema):
expiration_date = fields.Date()
instruction_lines = fields.Nested('InstructionLinesSchema')
logo_uri = fields.String()
|
none
| 1
| 2.217144
| 2
|
|
htsinfer/get_library_type.py
|
zavolanlab/htsinfer
| 5
|
6626146
|
<reponame>zavolanlab/htsinfer
"""Infer mate information from sample data."""
import logging
from pathlib import Path
import re
from typing import (List, Optional)
from Bio.SeqIO.QualityIO import FastqGeneralIterator # type: ignore
from htsinfer.exceptions import (
FileProblem,
InconsistentFastqIdentifiers,
MetadataWarning,
UnknownFastqIdentifier,
)
from htsinfer.models import (
ResultsType,
StatesType,
StatesTypeRelationship,
SeqIdFormats,
)
LOGGER = logging.getLogger(__name__)
class GetLibType:
"""Determine type (single/paired) information for a single or a pair of
FASTQ sequencing libraries.
Args:
path_1: Path to single-end library or first mate file.
path_2: Path to second mate file.
Attributes:
path_1: Path to single-end library or first mate file.
path_2: Path to second mate file.
results: Results container for storing library type information for
the provided files, as well as the mate relationship between the
two files, if applicable.
Examples:
>>> GetLibType(
... path_1="tests/files/first_mate.fastq"
... ).evaluate()
ResultsType(file_1=<OutcomesType.single: 'single'>, file_2=<OutcomesTyp
e.not_available: 'not_available'>, relationship=<OutcomesTypeRelationship.not_a
vailable: 'not_available'>)
>>> GetLibType(
... path_1="tests/files/first_mate.fastq",
... path_2="../tests/test_files/second_mate.fastq",
... ).evaluate()
ResultsType(file_1=<OutcomesType.first_mate: 'first_mate'>, file_2=<Out
comesType.second_mate: 'second_mate'>, relationship=<OutcomesTypeRelationship.s
plit_mates: 'split_mates'>)
('first_mate', 'second_mate', 'split_mates')
"""
def __init__(
self,
path_1: Path,
path_2: Optional[Path] = None,
):
"""Class constructor."""
self.path_1: Path = path_1
self.path_2: Optional[Path] = path_2
self.results: ResultsType = ResultsType()
def evaluate(self) -> None:
"""Decide type information and mate relationship."""
# process file 1
LOGGER.debug(f"Processing file: '{self.path_1}'")
mates_file_1 = GetFastqType(path=self.path_1)
mates_file_1.evaluate()
self.results.file_1 = mates_file_1.result
LOGGER.debug(f"Library type: {self.results.file_1}")
# process file 2
if self.path_2 is not None:
LOGGER.debug(f"Processing putative mate file: '{self.path_2}'")
mates_file_2 = GetFastqType(path=self.path_2)
mates_file_2.evaluate()
self.results.file_2 = mates_file_2.result
LOGGER.debug(f"Library type: {self.results.file_2}")
# check whether libraries are from a pair
LOGGER.debug("Checking mate relationship between files...")
self._evaluate_mate_relationship(
ids_1=mates_file_1.seq_ids,
ids_2=mates_file_2.seq_ids,
)
LOGGER.debug(f"Mate relationship: {self.results.relationship}")
def _evaluate_mate_relationship(
self,
ids_1: List[str],
ids_2: List[str],
) -> None:
"""Decide mate relationship.
Args:
ids_1: List of sequence identifier prefixes of the putative first
mate file, i.e., the fragments up until the mate information,
if available, as defined by a named capture group ``prefix`` in a
regular expression to extract mate information.
ids_2: As `ids_1` but for the putative second mate file.
"""
self.results.relationship = StatesTypeRelationship.not_mates
if ids_1 == ids_2:
if (
self.results.file_1 == StatesType.first_mate and
self.results.file_2 == StatesType.second_mate
) or (
self.results.file_1 == StatesType.second_mate and
self.results.file_2 == StatesType.first_mate
):
self.results.relationship = (
StatesTypeRelationship.split_mates
)
class GetFastqType():
"""Determine type (single/paired) information for an individual FASTQ
sequencing library.
Args:
path: File path to read library.
Attributes:
path: File path to read library.
seq_ids: List of sequence identifier prefixes of the provided read
library, i.e., the fragments up until the mate information,
if available, as defined by a named capture group ``prefix`` in a
regular expression to extract mate information.
seq_id_format: The sequence identifier format of the read library, as
identified by inspecting the first read and matching one of the
available regular expressions for the different identifier formats.
result: The current best guess for the type of the provided library.
Examples:
>>> lib_type = GetFastqType(
... path="tests/files/first_mate.fastq"
... ).evaluate()
<OutcomesType.first_mate: 'first_mate'>
"""
def __init__(
self,
path: Path,
):
"""Class constructor."""
self.path: Path = path
self.seq_ids: List[str] = []
self.seq_id_format: Optional[SeqIdFormats] = None
self.result: StatesType = StatesType.not_available
def evaluate(self) -> None:
"""Decide library type.
Raises:
NoMetadataDetermined: Type information could not be determined.
"""
records: int = 0
try:
with open(self.path) as _f: # type: ignore
# Get sequence identifier format from first record
LOGGER.debug(
"Determining identifier and library type from first "
"record..."
)
try:
seq_iter = FastqGeneralIterator(source=_f)
seq_id = next(seq_iter)[0]
records += 1
for seq_id_format in SeqIdFormats:
try:
self._get_read_type(
seq_id=seq_id,
regex=seq_id_format.value,
)
except UnknownFastqIdentifier:
continue
self.seq_id_format = seq_id_format
break
except StopIteration as exc:
self.result = StatesType.not_available
raise FileProblem(f"File is empty: {self.path}") from exc
if self.seq_id_format is None:
self.result = StatesType.not_available
raise MetadataWarning(
"Could not determine sequence identifier format."
)
LOGGER.debug(
f"Sequence identifier format: {self.seq_id_format.name}"
)
# Ensure that remaining records are compatible with sequence
# identifier format and library type determined from first
# record
LOGGER.debug(
"Checking consistency of remaining reads with initially "
"determined identifier format and library type..."
)
for record in seq_iter:
records += 1
try:
self._get_read_type(
seq_id=record[0],
regex=self.seq_id_format.value,
)
except (
InconsistentFastqIdentifiers,
UnknownFastqIdentifier,
) as exc:
self.result = StatesType.not_available
raise MetadataWarning(
f"{type(exc).__name__}: {str(exc)}"
) from exc
except (OSError, ValueError) as exc:
self.result = StatesType.not_available
raise FileProblem(f"{type(exc).__name__}: {str(exc)}") from exc
LOGGER.debug(f"Total records processed: {records}")
def _get_read_type(
self,
seq_id: str,
regex: re.Pattern,
) -> None:
"""Get/update library type information from sequence identifier.
Args:
seq_id: Sequence identifier.
regex: A regular expression to extract library type information
from a read. The expression needs to contain a named group
``mate`` that MAY be present in the sequence identifier and
that, if present, MUST match a single character that can take
values ``1`` (for first-mate files) and ``2`` (for second-mate
files), as well as a named group ``prefix`` that MUST be
present in the sequence identifier and that contains all of the
sequence identifier that is identical between a mate pair up
until the mate information itself.
Raises:
InconsistentFastqIdentifiers: A sequence identifier was encountered
that suggests a different library type than previous
identifiers.
UnkwownFastqIdentifier: A sequence identifier of unknown format was
encountered.
"""
# Note: Conditionals have been optimized to minimize required checks
# for the most likely scenarios, not to simplify code
match = re.search(regex, seq_id)
if match:
self.seq_ids.append(match.group('prefix'))
# Read appears to be derived from a single-end library
if match.group('mate') is None:
self._get_read_type_single(seq_id=seq_id)
# Read appears to be derived from a paired-end library
else:
# First mate
if int(match.group('mate')) == 1:
self._get_read_type_paired_mate_1(seq_id=seq_id)
# Second mate
else:
self._get_read_type_paired_mate_2(seq_id=seq_id)
else:
raise UnknownFastqIdentifier(
f"Cannot determine identifier format: {seq_id}"
)
def _get_read_type_single(
self,
seq_id: str,
) -> None:
"""Helper function to process single-end libraries in
``GetFastqType._get_read_type()``.
Args:
seq_id: Sequence identifier.
Raises:
InconsistentFastqIdentifiers: A sequence identifier was encountered
that suggests a different library type than previous
identifiers.
"""
if self.result == StatesType.single:
pass
elif self.result == StatesType.not_available:
self.result = StatesType.single
else:
raise InconsistentFastqIdentifiers(
"Previous sequence identifiers suggest that reads are part of "
"a paired-end sequencing library, but current identifier "
f"suggests a single-end library: {seq_id}"
)
def _get_read_type_paired_mate_1(
self,
seq_id: str,
) -> None:
"""Helper function to process first mate files of paired-end libraries
in ``GetFastqType._get_read_type()``.
Args:
seq_id: Sequence identifier.
Raises:
InconsistentFastqIdentifiers: A sequence identifier was encountered
that suggests a different library type than previous
identifiers.
"""
if (
self.result == StatesType.first_mate or
self.result == StatesType.mixed_mates
):
pass
elif self.result == StatesType.second_mate:
self.result = StatesType.mixed_mates
elif self.result == StatesType.not_available:
self.result = StatesType.first_mate
else:
raise InconsistentFastqIdentifiers(
"Previous sequence identifiers suggest that reads are part of "
"a single-end sequencing library, but current identifier "
f"suggests a paired-end library: {seq_id}"
)
def _get_read_type_paired_mate_2(
self,
seq_id: str,
) -> None:
"""Helper function to process second mate files of paired-end libraries
in ``GetFastqType._get_read_type()``.
Args:
seq_id: Sequence identifier.
Raises:
InconsistentFastqIdentifiers: A sequence identifier was encountered
that suggests a different library type than previous
identifiers.
"""
if (
self.result == StatesType.second_mate or
self.result == StatesType.mixed_mates
):
pass
elif self.result == StatesType.first_mate:
self.result = StatesType.mixed_mates
elif self.result == StatesType.not_available:
self.result = StatesType.second_mate
else:
raise InconsistentFastqIdentifiers(
"Previous sequence identifiers suggest that reads are part of "
"a single-end sequencing library, but current identifier "
f"suggests a paired-end library: {seq_id}"
)
|
"""Infer mate information from sample data."""
import logging
from pathlib import Path
import re
from typing import (List, Optional)
from Bio.SeqIO.QualityIO import FastqGeneralIterator # type: ignore
from htsinfer.exceptions import (
FileProblem,
InconsistentFastqIdentifiers,
MetadataWarning,
UnknownFastqIdentifier,
)
from htsinfer.models import (
ResultsType,
StatesType,
StatesTypeRelationship,
SeqIdFormats,
)
LOGGER = logging.getLogger(__name__)
class GetLibType:
"""Determine type (single/paired) information for a single or a pair of
FASTQ sequencing libraries.
Args:
path_1: Path to single-end library or first mate file.
path_2: Path to second mate file.
Attributes:
path_1: Path to single-end library or first mate file.
path_2: Path to second mate file.
results: Results container for storing library type information for
the provided files, as well as the mate relationship between the
two files, if applicable.
Examples:
>>> GetLibType(
... path_1="tests/files/first_mate.fastq"
... ).evaluate()
ResultsType(file_1=<OutcomesType.single: 'single'>, file_2=<OutcomesTyp
e.not_available: 'not_available'>, relationship=<OutcomesTypeRelationship.not_a
vailable: 'not_available'>)
>>> GetLibType(
... path_1="tests/files/first_mate.fastq",
... path_2="../tests/test_files/second_mate.fastq",
... ).evaluate()
ResultsType(file_1=<OutcomesType.first_mate: 'first_mate'>, file_2=<Out
comesType.second_mate: 'second_mate'>, relationship=<OutcomesTypeRelationship.s
plit_mates: 'split_mates'>)
('first_mate', 'second_mate', 'split_mates')
"""
def __init__(
self,
path_1: Path,
path_2: Optional[Path] = None,
):
"""Class constructor."""
self.path_1: Path = path_1
self.path_2: Optional[Path] = path_2
self.results: ResultsType = ResultsType()
def evaluate(self) -> None:
"""Decide type information and mate relationship."""
# process file 1
LOGGER.debug(f"Processing file: '{self.path_1}'")
mates_file_1 = GetFastqType(path=self.path_1)
mates_file_1.evaluate()
self.results.file_1 = mates_file_1.result
LOGGER.debug(f"Library type: {self.results.file_1}")
# process file 2
if self.path_2 is not None:
LOGGER.debug(f"Processing putative mate file: '{self.path_2}'")
mates_file_2 = GetFastqType(path=self.path_2)
mates_file_2.evaluate()
self.results.file_2 = mates_file_2.result
LOGGER.debug(f"Library type: {self.results.file_2}")
# check whether libraries are from a pair
LOGGER.debug("Checking mate relationship between files...")
self._evaluate_mate_relationship(
ids_1=mates_file_1.seq_ids,
ids_2=mates_file_2.seq_ids,
)
LOGGER.debug(f"Mate relationship: {self.results.relationship}")
def _evaluate_mate_relationship(
self,
ids_1: List[str],
ids_2: List[str],
) -> None:
"""Decide mate relationship.
Args:
ids_1: List of sequence identifier prefixes of the putative first
mate file, i.e., the fragments up until the mate information,
if available, as defined by a named capture group ``prefix`` in a
regular expression to extract mate information.
ids_2: As `ids_1` but for the putative second mate file.
"""
self.results.relationship = StatesTypeRelationship.not_mates
if ids_1 == ids_2:
if (
self.results.file_1 == StatesType.first_mate and
self.results.file_2 == StatesType.second_mate
) or (
self.results.file_1 == StatesType.second_mate and
self.results.file_2 == StatesType.first_mate
):
self.results.relationship = (
StatesTypeRelationship.split_mates
)
class GetFastqType():
"""Determine type (single/paired) information for an individual FASTQ
sequencing library.
Args:
path: File path to read library.
Attributes:
path: File path to read library.
seq_ids: List of sequence identifier prefixes of the provided read
library, i.e., the fragments up until the mate information,
if available, as defined by a named capture group ``prefix`` in a
regular expression to extract mate information.
seq_id_format: The sequence identifier format of the read library, as
identified by inspecting the first read and matching one of the
available regular expressions for the different identifier formats.
result: The current best guess for the type of the provided library.
Examples:
>>> lib_type = GetFastqType(
... path="tests/files/first_mate.fastq"
... ).evaluate()
<OutcomesType.first_mate: 'first_mate'>
"""
def __init__(
self,
path: Path,
):
"""Class constructor."""
self.path: Path = path
self.seq_ids: List[str] = []
self.seq_id_format: Optional[SeqIdFormats] = None
self.result: StatesType = StatesType.not_available
def evaluate(self) -> None:
"""Decide library type.
Raises:
NoMetadataDetermined: Type information could not be determined.
"""
records: int = 0
try:
with open(self.path) as _f: # type: ignore
# Get sequence identifier format from first record
LOGGER.debug(
"Determining identifier and library type from first "
"record..."
)
try:
seq_iter = FastqGeneralIterator(source=_f)
seq_id = next(seq_iter)[0]
records += 1
for seq_id_format in SeqIdFormats:
try:
self._get_read_type(
seq_id=seq_id,
regex=seq_id_format.value,
)
except UnknownFastqIdentifier:
continue
self.seq_id_format = seq_id_format
break
except StopIteration as exc:
self.result = StatesType.not_available
raise FileProblem(f"File is empty: {self.path}") from exc
if self.seq_id_format is None:
self.result = StatesType.not_available
raise MetadataWarning(
"Could not determine sequence identifier format."
)
LOGGER.debug(
f"Sequence identifier format: {self.seq_id_format.name}"
)
# Ensure that remaining records are compatible with sequence
# identifier format and library type determined from first
# record
LOGGER.debug(
"Checking consistency of remaining reads with initially "
"determined identifier format and library type..."
)
for record in seq_iter:
records += 1
try:
self._get_read_type(
seq_id=record[0],
regex=self.seq_id_format.value,
)
except (
InconsistentFastqIdentifiers,
UnknownFastqIdentifier,
) as exc:
self.result = StatesType.not_available
raise MetadataWarning(
f"{type(exc).__name__}: {str(exc)}"
) from exc
except (OSError, ValueError) as exc:
self.result = StatesType.not_available
raise FileProblem(f"{type(exc).__name__}: {str(exc)}") from exc
LOGGER.debug(f"Total records processed: {records}")
def _get_read_type(
self,
seq_id: str,
regex: re.Pattern,
) -> None:
"""Get/update library type information from sequence identifier.
Args:
seq_id: Sequence identifier.
regex: A regular expression to extract library type information
from a read. The expression needs to contain a named group
``mate`` that MAY be present in the sequence identifier and
that, if present, MUST match a single character that can take
values ``1`` (for first-mate files) and ``2`` (for second-mate
files), as well as a named group ``prefix`` that MUST be
present in the sequence identifier and that contains all of the
sequence identifier that is identical between a mate pair up
until the mate information itself.
Raises:
InconsistentFastqIdentifiers: A sequence identifier was encountered
that suggests a different library type than previous
identifiers.
UnkwownFastqIdentifier: A sequence identifier of unknown format was
encountered.
"""
# Note: Conditionals have been optimized to minimize required checks
# for the most likely scenarios, not to simplify code
match = re.search(regex, seq_id)
if match:
self.seq_ids.append(match.group('prefix'))
# Read appears to be derived from a single-end library
if match.group('mate') is None:
self._get_read_type_single(seq_id=seq_id)
# Read appears to be derived from a paired-end library
else:
# First mate
if int(match.group('mate')) == 1:
self._get_read_type_paired_mate_1(seq_id=seq_id)
# Second mate
else:
self._get_read_type_paired_mate_2(seq_id=seq_id)
else:
raise UnknownFastqIdentifier(
f"Cannot determine identifier format: {seq_id}"
)
def _get_read_type_single(
self,
seq_id: str,
) -> None:
"""Helper function to process single-end libraries in
``GetFastqType._get_read_type()``.
Args:
seq_id: Sequence identifier.
Raises:
InconsistentFastqIdentifiers: A sequence identifier was encountered
that suggests a different library type than previous
identifiers.
"""
if self.result == StatesType.single:
pass
elif self.result == StatesType.not_available:
self.result = StatesType.single
else:
raise InconsistentFastqIdentifiers(
"Previous sequence identifiers suggest that reads are part of "
"a paired-end sequencing library, but current identifier "
f"suggests a single-end library: {seq_id}"
)
def _get_read_type_paired_mate_1(
self,
seq_id: str,
) -> None:
"""Helper function to process first mate files of paired-end libraries
in ``GetFastqType._get_read_type()``.
Args:
seq_id: Sequence identifier.
Raises:
InconsistentFastqIdentifiers: A sequence identifier was encountered
that suggests a different library type than previous
identifiers.
"""
if (
self.result == StatesType.first_mate or
self.result == StatesType.mixed_mates
):
pass
elif self.result == StatesType.second_mate:
self.result = StatesType.mixed_mates
elif self.result == StatesType.not_available:
self.result = StatesType.first_mate
else:
raise InconsistentFastqIdentifiers(
"Previous sequence identifiers suggest that reads are part of "
"a single-end sequencing library, but current identifier "
f"suggests a paired-end library: {seq_id}"
)
def _get_read_type_paired_mate_2(
self,
seq_id: str,
) -> None:
"""Helper function to process second mate files of paired-end libraries
in ``GetFastqType._get_read_type()``.
Args:
seq_id: Sequence identifier.
Raises:
InconsistentFastqIdentifiers: A sequence identifier was encountered
that suggests a different library type than previous
identifiers.
"""
if (
self.result == StatesType.second_mate or
self.result == StatesType.mixed_mates
):
pass
elif self.result == StatesType.first_mate:
self.result = StatesType.mixed_mates
elif self.result == StatesType.not_available:
self.result = StatesType.second_mate
else:
raise InconsistentFastqIdentifiers(
"Previous sequence identifiers suggest that reads are part of "
"a single-end sequencing library, but current identifier "
f"suggests a paired-end library: {seq_id}"
)
|
en
| 0.799165
|
Infer mate information from sample data. # type: ignore Determine type (single/paired) information for a single or a pair of FASTQ sequencing libraries. Args: path_1: Path to single-end library or first mate file. path_2: Path to second mate file. Attributes: path_1: Path to single-end library or first mate file. path_2: Path to second mate file. results: Results container for storing library type information for the provided files, as well as the mate relationship between the two files, if applicable. Examples: >>> GetLibType( ... path_1="tests/files/first_mate.fastq" ... ).evaluate() ResultsType(file_1=<OutcomesType.single: 'single'>, file_2=<OutcomesTyp e.not_available: 'not_available'>, relationship=<OutcomesTypeRelationship.not_a vailable: 'not_available'>) >>> GetLibType( ... path_1="tests/files/first_mate.fastq", ... path_2="../tests/test_files/second_mate.fastq", ... ).evaluate() ResultsType(file_1=<OutcomesType.first_mate: 'first_mate'>, file_2=<Out comesType.second_mate: 'second_mate'>, relationship=<OutcomesTypeRelationship.s plit_mates: 'split_mates'>) ('first_mate', 'second_mate', 'split_mates') Class constructor. Decide type information and mate relationship. # process file 1 # process file 2 # check whether libraries are from a pair Decide mate relationship. Args: ids_1: List of sequence identifier prefixes of the putative first mate file, i.e., the fragments up until the mate information, if available, as defined by a named capture group ``prefix`` in a regular expression to extract mate information. ids_2: As `ids_1` but for the putative second mate file. Determine type (single/paired) information for an individual FASTQ sequencing library. Args: path: File path to read library. Attributes: path: File path to read library. seq_ids: List of sequence identifier prefixes of the provided read library, i.e., the fragments up until the mate information, if available, as defined by a named capture group ``prefix`` in a regular expression to extract mate information. seq_id_format: The sequence identifier format of the read library, as identified by inspecting the first read and matching one of the available regular expressions for the different identifier formats. result: The current best guess for the type of the provided library. Examples: >>> lib_type = GetFastqType( ... path="tests/files/first_mate.fastq" ... ).evaluate() <OutcomesType.first_mate: 'first_mate'> Class constructor. Decide library type. Raises: NoMetadataDetermined: Type information could not be determined. # type: ignore # Get sequence identifier format from first record # Ensure that remaining records are compatible with sequence # identifier format and library type determined from first # record Get/update library type information from sequence identifier. Args: seq_id: Sequence identifier. regex: A regular expression to extract library type information from a read. The expression needs to contain a named group ``mate`` that MAY be present in the sequence identifier and that, if present, MUST match a single character that can take values ``1`` (for first-mate files) and ``2`` (for second-mate files), as well as a named group ``prefix`` that MUST be present in the sequence identifier and that contains all of the sequence identifier that is identical between a mate pair up until the mate information itself. Raises: InconsistentFastqIdentifiers: A sequence identifier was encountered that suggests a different library type than previous identifiers. UnkwownFastqIdentifier: A sequence identifier of unknown format was encountered. # Note: Conditionals have been optimized to minimize required checks # for the most likely scenarios, not to simplify code # Read appears to be derived from a single-end library # Read appears to be derived from a paired-end library # First mate # Second mate Helper function to process single-end libraries in ``GetFastqType._get_read_type()``. Args: seq_id: Sequence identifier. Raises: InconsistentFastqIdentifiers: A sequence identifier was encountered that suggests a different library type than previous identifiers. Helper function to process first mate files of paired-end libraries in ``GetFastqType._get_read_type()``. Args: seq_id: Sequence identifier. Raises: InconsistentFastqIdentifiers: A sequence identifier was encountered that suggests a different library type than previous identifiers. Helper function to process second mate files of paired-end libraries in ``GetFastqType._get_read_type()``. Args: seq_id: Sequence identifier. Raises: InconsistentFastqIdentifiers: A sequence identifier was encountered that suggests a different library type than previous identifiers.
| 2.425914
| 2
|
pcapass/__init__.py
|
ksadowski13/PCAPass
| 3
|
6626147
|
<filename>pcapass/__init__.py
from .layer import PCAPass
|
<filename>pcapass/__init__.py
from .layer import PCAPass
|
none
| 1
| 1.139054
| 1
|
|
994. Rotting Oranges/994. Rotting Oranges.py
|
JawadAsifBD/leetcode
| 0
|
6626148
|
<reponame>JawadAsifBD/leetcode
from typing import List
from itertools import product
from collections import deque
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
queue = deque([])
m = len(grid)
n = len(grid[0])
rottenTimes = [[-1 for _ in range(n)] for _ in range(m)]
# print(rottenTime)
for i, j in product(range(m), range(n)):
if grid[i][j] == 2: # rotten
# print(i, j)
queue.append((i, j, 0))
rottenTimes[i][j] = 0
while len(queue) != 0:
i, j, rottentime = queue.popleft()
# check left
# grid[i][j] == 0 for no apple
if j > 0 and grid[i][j-1] != 0 and rottenTimes[i][j-1] == -1:
queue.append((i, j-1, rottentime+1))
rottenTimes[i][j-1] = rottentime + 1
# check right
if j < n-1 and grid[i][j+1] != 0 and rottenTimes[i][j+1] == -1:
queue.append((i, j+1, rottentime+1))
rottenTimes[i][j+1] = rottentime + 1
# check up
if i > 0 and grid[i-1][j] != 0 and rottenTimes[i-1][j] == -1:
queue.append((i-1, j, rottentime+1))
rottenTimes[i-1][j] = rottentime + 1
# check down
if i < m-1 and grid[i+1][j] != 0 and rottenTimes[i+1][j] == -1:
queue.append((i+1, j, rottentime+1))
rottenTimes[i+1][j] = rottentime + 1
# print(rottenTimes)
max = 0
for i, j in product(range(m), range(n)):
if rottenTimes[i][j] > max:
max = rottenTimes[i][j]
if rottenTimes[i][j] == -1 and grid[i][j] != 0:
return -1
return max
# grid = [[2, 1, 1], [1, 1, 0], [0, 1, 1]]
# grid = [[2, 1, 1], [0, 1, 1], [1, 0, 1]]
# grid = [[0, 2]]
grid = [[0, 0, 0, 0]]
s = Solution()
print(s.orangesRotting(grid))
|
from typing import List
from itertools import product
from collections import deque
class Solution:
def orangesRotting(self, grid: List[List[int]]) -> int:
queue = deque([])
m = len(grid)
n = len(grid[0])
rottenTimes = [[-1 for _ in range(n)] for _ in range(m)]
# print(rottenTime)
for i, j in product(range(m), range(n)):
if grid[i][j] == 2: # rotten
# print(i, j)
queue.append((i, j, 0))
rottenTimes[i][j] = 0
while len(queue) != 0:
i, j, rottentime = queue.popleft()
# check left
# grid[i][j] == 0 for no apple
if j > 0 and grid[i][j-1] != 0 and rottenTimes[i][j-1] == -1:
queue.append((i, j-1, rottentime+1))
rottenTimes[i][j-1] = rottentime + 1
# check right
if j < n-1 and grid[i][j+1] != 0 and rottenTimes[i][j+1] == -1:
queue.append((i, j+1, rottentime+1))
rottenTimes[i][j+1] = rottentime + 1
# check up
if i > 0 and grid[i-1][j] != 0 and rottenTimes[i-1][j] == -1:
queue.append((i-1, j, rottentime+1))
rottenTimes[i-1][j] = rottentime + 1
# check down
if i < m-1 and grid[i+1][j] != 0 and rottenTimes[i+1][j] == -1:
queue.append((i+1, j, rottentime+1))
rottenTimes[i+1][j] = rottentime + 1
# print(rottenTimes)
max = 0
for i, j in product(range(m), range(n)):
if rottenTimes[i][j] > max:
max = rottenTimes[i][j]
if rottenTimes[i][j] == -1 and grid[i][j] != 0:
return -1
return max
# grid = [[2, 1, 1], [1, 1, 0], [0, 1, 1]]
# grid = [[2, 1, 1], [0, 1, 1], [1, 0, 1]]
# grid = [[0, 2]]
grid = [[0, 0, 0, 0]]
s = Solution()
print(s.orangesRotting(grid))
|
en
| 0.417007
|
# print(rottenTime) # rotten # print(i, j) # check left # grid[i][j] == 0 for no apple # check right # check up # check down # print(rottenTimes) # grid = [[2, 1, 1], [1, 1, 0], [0, 1, 1]] # grid = [[2, 1, 1], [0, 1, 1], [1, 0, 1]] # grid = [[0, 2]]
| 3.328804
| 3
|
nutils/points.py
|
RezaZeinali91/NUTLIS
| 0
|
6626149
|
<gh_stars>0
# Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
The points module defines the :class:`Points` base class, which bundles point
coordinates, point weights, a local triangulation and a hull triangulation. The
module provides several different implementations such as :class:`TensorPoints`
and :class:`SimplexGaussPoints` that reflect the variety of elements in the
:mod:`nutils.element` module.
'''
from . import types, transform, numeric, util
import numpy, functools, itertools, warnings
_ = numpy.newaxis
class Points(types.Singleton):
'''Collection of points on an element.
The :class:`Points` base class bundles point coordinates, point weights,
a local triangulation and hull triangulation. Of these only the coordinates
are mandatory, and should be provided by the derived class in the form of the
``coords`` attribute. Of the remaining properties only :func:`hull` has a
functional base implementation that relies on the availability of ``tri``.
.. attribute:: coords
Coordinates of the points as a :class:`float` array.
.. attribute:: weights
Weights of the points as a :class:`float` array.
Args
----
npoints : :class:`int`
Number of discrete points.
ndims : :class:`int`
Number of spatial dimensions.
'''
__cache__ = 'hull', 'onhull'
@types.apply_annotations
def __init__(self, npoints:types.strictint, ndims:types.strictint):
self.npoints = npoints
self.ndims = ndims
@property
def tri(self):
'''Triangulation of interior.
A two-dimensional integer array with ``ndims+1`` columns, of which every
row defines a simplex by mapping vertices into the list of points.
'''
if self.ndims == 0 and self.npoints == 1:
return types.frozenarray([[0]])
raise Exception('tri not defined for {}'.format(self))
@property
def hull(self):
'''Triangulation of the exterior hull.
A two-dimensional integer array with ``ndims`` columns, of which every row
defines a simplex by mapping vertices into the list of points.
'''
edges = set()
iedges = numpy.array(list(itertools.combinations(range(self.ndims+1), self.ndims)))
for tri in self.tri:
edges.symmetric_difference_update(map(tuple, numpy.sort(tri[iedges], axis=1)))
return numpy.array(sorted(edges))
@property
def onhull(self):
'''Boolean mask marking boundary points.
The array of length ``npoints`` is ``True`` where the corresponding point
is part of the :attr:`hull`, and ``False`` where it is not.
'''
onhull = numpy.zeros(self.npoints, dtype=bool)
onhull[numpy.ravel(self.hull)] = True # not clear why ravel is necessary but setitem seems to require it
return types.frozenarray(onhull, copy=False)
strictpoints = types.strict[Points]
class CoordsPoints(Points):
'''Manually supplied points.'''
@types.apply_annotations
def __init__(self, coords:types.frozenarray[float]):
self.coords = coords
super().__init__(*coords.shape)
class CoordsWeightsPoints(CoordsPoints):
'''Manually supplied points and weights.'''
@types.apply_annotations
def __init__(self, coords:types.frozenarray[float], weights:types.frozenarray[float]):
self.weights = weights
super().__init__(coords)
class CoordsUniformPoints(CoordsPoints):
'''Manually supplied points with uniform weights.'''
@types.apply_annotations
def __init__(self, coords:types.frozenarray[float], volume:float):
self.weights = types.frozenarray.full([len(coords)], volume/len(coords))
super().__init__(coords)
class TensorPoints(Points):
'''Tensor product of two Points instances.'''
__cache__ = 'coords', 'weights', 'tri', 'hull'
@types.apply_annotations
def __init__(self, points1:strictpoints, points2:strictpoints):
self.points1 = points1
self.points2 = points2
super().__init__(points1.npoints * points2.npoints, points1.ndims + points2.ndims)
@property
def coords(self):
coords = numpy.empty((self.points1.npoints, self.points2.npoints, self.ndims))
coords[:,:,:self.points1.ndims] = self.points1.coords[:,_,:]
coords[:,:,self.points1.ndims:] = self.points2.coords[_,:,:]
return types.frozenarray(coords.reshape(self.npoints, self.ndims), copy=False)
@property
def weights(self):
return types.frozenarray((self.points1.weights[:,_] * self.points2.weights[_,:]).ravel(), copy=False)
@property
def tri(self):
if self.points1.ndims == 1:
# For an n-dimensional simplex with vertices a0,a1,..,an, the extruded
# element has vertices a0,a1,..,an,b0,b1,..,bn. These can be divided in
# simplices by selecting a0,a1,..,an,b0; a1,..,an,b0,n1; and so on until
# an,b0,b1,..,bn; resulting in n+1 n+1-dimensional simplices. In the
# algorithm below this is achieved by first taking the tensorial product
# of triangulations and raveling, effectively achieving vectorized
# concatenation. The overlapping vertex subsets then follow directly from
# numeric.overlapping.
tri12 = self.points1.tri[:,_,:,_] * self.points2.npoints + self.points2.tri[_,:,_,:] # ntri1 x ntri2 x 2 x ndims
return types.frozenarray(numeric.overlapping(tri12.reshape(-1, 2*self.ndims), n=self.ndims+1).reshape(-1, self.ndims+1), copy=False)
return super().tri
@property
def hull(self):
if self.points1.ndims == 1:
hull1 = self.points1.hull[:,_,:,_] * self.points2.npoints + self.points2.tri[_,:,_,:] # 2 x ntri2 x 1 x ndims
hull2 = self.points1.tri[:,_,:,_] * self.points2.npoints + self.points2.hull[_,:,_,:] # ntri1 x nhull2 x 2 x ndims-1
# The subdivision of hull2 into simplices follows identical logic to that
# used in the construction of self.tri.
hull = numpy.concatenate([hull1.reshape(-1, self.ndims), numeric.overlapping(hull2.reshape(-1, 2*(self.ndims-1)), n=self.ndims).reshape(-1, self.ndims)])
return types.frozenarray(hull, copy=False)
return super().hull
class SimplexGaussPoints(CoordsWeightsPoints):
'''Gauss quadrature points on a simplex.'''
@types.apply_annotations
def __init__(self, ndims:types.strictint, degree:types.strictint):
super().__init__(*gaussn[ndims](degree))
class SimplexBezierPoints(CoordsPoints):
'''Bezier points on a simplex.'''
__cache__ = 'tri', 'hull'
@types.apply_annotations
def __init__(self, ndims:types.strictint, n:types.strictint):
self.n = n
linspace = numpy.linspace(0, 1, n)
super().__init__([linspace[list(index)[::-1]] for index in numpy.ndindex(*[n] * ndims) if sum(index) < n])
@property
def tri(self):
if self.ndims == 1:
return types.frozenarray(numeric.overlapping(numpy.arange(self.n)), copy=False)
if self.ndims == 2:
n = self.n
vert1 = [((2*n-i+1)*i)//2 + numpy.array([j,j+1,j+n-i]) for i in range(n-1) for j in range(n-i-1)]
vert2 = [((2*n-i+1)*i)//2 + numpy.array([j+1,j+n-i+1,j+n-i]) for i in range(n-1) for j in range(n-i-2)]
return types.frozenarray(vert1 + vert2, copy=False)
return super().tri
@property
def hull(self):
if self.ndims == 2:
n = self.n
hull = numpy.concatenate([numpy.arange(n), numpy.arange(n-1,0,-1).cumsum()+n-1, numpy.arange(n+1,2,-1).cumsum()[::-1]-n-1])
return types.frozenarray(numeric.overlapping(hull), copy=False)
return super().hull
class TransformPoints(Points):
'''Affinely transformed Points.'''
__cache__ = 'coords', 'weights'
@types.apply_annotations
def __init__(self, points:strictpoints, trans:transform.stricttransformitem):
self.points = points
self.trans = trans
super().__init__(points.npoints, points.ndims)
@property
def coords(self):
return self.trans.apply(self.points.coords)
@property
def weights(self):
return self.points.weights * abs(float(self.trans.det))
@property
def tri(self):
return self.points.tri
@property
def hull(self):
return self.points.hull
class ConcatPoints(Points):
'''Concatenation of several Points objects.
An optional ``duplicates`` argument lists all points that are equal,
triggering deduplication and resulting in a smaller total point count.
'''
__cache__ = 'coords', 'weights', 'tri', 'masks'
@types.apply_annotations
def __init__(self, allpoints:types.tuple[strictpoints], duplicates:frozenset=frozenset()):
self.allpoints = allpoints
self.duplicates = duplicates
super().__init__(sum(points.npoints for points in allpoints) - sum(len(d)-1 for d in duplicates), allpoints[0].ndims)
@property
def masks(self):
masks = [numpy.ones(points.npoints, dtype=bool) for points in self.allpoints]
for pairs in self.duplicates:
for i, j in pairs[1:]:
masks[i][j] = False
return tuple(masks)
@property
def coords(self):
return types.frozenarray(numpy.concatenate([points.coords[mask] for mask, points in zip(self.masks, self.allpoints)] if self.duplicates else [points.coords for points in self.allpoints]), copy=False)
@property
def weights(self):
if not self.duplicates:
return types.frozenarray(numpy.concatenate([points.weights for points in self.allpoints]), copy=False)
weights = [points.weights[mask] for mask, points in zip(self.masks, self.allpoints)]
for pairs in self.duplicates:
I, J = pairs[0]
weights[I][self.masks[I][:J].sum()] += sum(self.allpoints[i].weights[j] for i, j in pairs[1:])
return types.frozenarray(numpy.concatenate(weights), copy=False)
@property
def tri(self):
if not self.duplicates:
offsets = util.cumsum(points.npoints for points in self.allpoints)
return types.frozenarray(numpy.concatenate([points.tri + offset for offset, points in zip(offsets, self.allpoints)]), copy=False)
renumber = []
n = 0
for mask in self.masks:
cumsum = mask.cumsum()
renumber.append(cumsum+(n-1))
n += cumsum[-1]
assert n == self.npoints
for pairs in self.duplicates:
I, J = pairs[0]
for i, j in pairs[1:]:
renumber[i][j] = renumber[I][J]
return types.frozenarray(numpy.concatenate([renum.take(points.tri) for renum, points in zip(renumber, self.allpoints)]), copy=False)
class ConePoints(Points):
'''Affinely transformed lower-dimensional points plus tip.
The point count is incremented by one regardless of the nature of the point
set; no effort is made to introduce extra points between base plane and tip.
Likewise, the simplex count stays equal, with all simplices obtaining an
extra vertex in tip.
'''
__cache__ = 'coords', 'tri'
@types.apply_annotations
def __init__(self, edgepoints:strictpoints, edgeref:transform.stricttransformitem, tip:types.frozenarray):
self.edgepoints = edgepoints
self.edgeref = edgeref
self.tip = tip
super().__init__(edgepoints.npoints+1, edgepoints.ndims+1)
@property
def coords(self):
return types.frozenarray(numpy.concatenate([self.edgeref.apply(self.edgepoints.coords), self.tip[_,:]]), copy=False)
@property
def tri(self):
tri = numpy.concatenate([self.edgepoints.tri, [[self.edgepoints.npoints]]*len(self.edgepoints.tri)], axis=1)
return types.frozenarray(tri, copy=False)
## UTILITY FUNCTIONS
@functools.lru_cache(8)
def gauss(n):
k = numpy.arange(n) + 1
d = k / numpy.sqrt(4*k**2-1)
x, w = numpy.linalg.eigh(numpy.diagflat(d,-1)) # eigh operates (by default) on lower triangle
return types.frozenarray((x+1) * .5, copy=False), types.frozenarray(w[0]**2, copy=False)
def gauss1(degree):
'''Gauss quadrature for line.'''
x, w = gauss(degree//2)
return x[:,_], w
@functools.lru_cache(8)
def gauss2(degree):
'''Gauss quadrature for triangle.
Reference: http://www.cs.rpi.edu/~flaherje/pdf/fea6.pdf'''
assert isinstance(degree, int) and degree >= 0
I = [0,0],
J = [1,1],[0,1],[1,0]
K = [1,2],[2,0],[0,1],[2,1],[1,0],[0,2]
icw = [
(I, [1/3], 1)
] if degree <= 1 else [
(J, [2/3,1/6], 1/3)
] if degree == 2 else [
(I, [1/3], -9/16),
(J, [3/5,1/5], 25/48),
] if degree == 3 else [
(J, [0.816847572980458,0.091576213509771], 0.109951743655322),
(J, [0.108103018168070,0.445948490915965], 0.223381589678011),
] if degree == 4 else [
(I, [1/3], 0.225),
(J, [0.797426985353088,0.101286507323456], 0.125939180544827),
(J, [0.059715871789770,0.470142064105115], 0.132394152788506),
] if degree == 5 else [
(J, [0.873821971016996,0.063089014491502], 0.050844906370207),
(J, [0.501426509658180,0.249286745170910], 0.116786275726379),
(K, [0.636502499121399,0.310352451033785,0.053145049844816], 0.082851075618374),
] if degree == 6 else [
(I, [1/3.], -0.149570044467671),
(J, [0.479308067841924,0.260345966079038], 0.175615257433204),
(J, [0.869739794195568,0.065130102902216], 0.053347235608839),
(K, [0.638444188569809,0.312865496004875,0.048690315425316], 0.077113760890257),
]
if degree > 7:
warnings.warn('inexact integration for polynomial of degree {}'.format(degree))
return types.frozenarray(numpy.concatenate([numpy.take(c,i) for i, c, w in icw]), copy=False), \
types.frozenarray(numpy.concatenate([[w/2] * len(i) for i, c, w in icw]), copy=False)
@functools.lru_cache(8)
def gauss3(degree):
'''Gauss quadrature for tetrahedron.
Reference http://www.cs.rpi.edu/~flaherje/pdf/fea6.pdf'''
assert isinstance(degree, int) and degree >= 0
I = [0,0,0],
J = [1,1,1],[0,1,1],[1,1,0],[1,0,1]
K = [0,1,1],[1,0,1],[1,1,0],[1,0,0],[0,1,0],[0,0,1]
L = [0,1,1],[1,0,1],[1,1,0],[2,1,1],[1,2,1],[1,1,2],[1,0,2],[0,2,1],[2,1,0],[1,2,0],[0,1,2],[2,0,1]
icw = [
(I, [1/4], 1),
] if degree == 1 else [
(J, [0.5854101966249685,0.1381966011250105], 1/4),
] if degree == 2 else [
(I, [.25], -.8),
(J, [.5,1/6], .45),
] if degree == 3 else [
(I, [.25], -.2368/3),
(J, [0.7857142857142857,0.0714285714285714], .1372/3),
(K, [0.1005964238332008,0.3994035761667992], .448/3),
] if degree == 4 else [
(I, [.25], 0.1817020685825351),
(J, [0,1/3.], 0.0361607142857143),
(J, [8/11.,1/11.], 0.0698714945161738),
(K, [0.4334498464263357,0.0665501535736643], 0.0656948493683187),
] if degree == 5 else [
(J, [0.3561913862225449,0.2146028712591517], 0.0399227502581679),
(J, [0.8779781243961660,0.0406739585346113], 0.0100772110553207),
(J, [0.0329863295731731,0.3223378901422757], 0.0553571815436544),
(L, [0.2696723314583159,0.0636610018750175,0.6030056647916491], 0.0482142857142857),
] if degree == 6 else [
(I, [.25], 0.1095853407966528),
(J, [0.7653604230090441,0.0782131923303186], 0.0635996491464850),
(J, [0.6344703500082868,0.1218432166639044], -0.3751064406859797),
(J, [0.0023825066607383,0.3325391644464206], 0.0293485515784412),
(K, [0,.5], 0.0058201058201058),
(L, [.2,.1,.6], 0.1653439153439105)
] if degree == 7 else [
(I, [.25], -0.2359620398477557),
(J, [0.6175871903000830,0.1274709365666390], 0.0244878963560562),
(J, [0.9037635088221031,0.0320788303926323], 0.0039485206398261),
(K, [0.4502229043567190,0.0497770956432810], 0.0263055529507371),
(K, [0.3162695526014501,0.1837304473985499], 0.0829803830550589),
(L, [0.0229177878448171,0.2319010893971509,0.5132800333608811], 0.0254426245481023),
(L, [0.7303134278075384,0.0379700484718286,0.1937464752488044], 0.0134324384376852),
]
if degree > 8:
warnings.warn('inexact integration for polynomial of degree {}'.format(degree))
return types.frozenarray(numpy.concatenate([numpy.take(c,i) for i, c, w in icw]), copy=False), \
types.frozenarray(numpy.concatenate([[w/6] * len(i) for i, c, w in icw]), copy=False)
gaussn = None, gauss1, gauss2, gauss3
def find_duplicates(allpoints):
coords = {}
for i, points in enumerate(allpoints):
for j in points.onhull.nonzero()[0]:
coords.setdefault(tuple(points.coords[j]), []).append((i, j))
return [tuple(pairs) for pairs in coords.values() if len(pairs) > 1]
# vim:sw=2:sts=2:et
|
# Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
The points module defines the :class:`Points` base class, which bundles point
coordinates, point weights, a local triangulation and a hull triangulation. The
module provides several different implementations such as :class:`TensorPoints`
and :class:`SimplexGaussPoints` that reflect the variety of elements in the
:mod:`nutils.element` module.
'''
from . import types, transform, numeric, util
import numpy, functools, itertools, warnings
_ = numpy.newaxis
class Points(types.Singleton):
'''Collection of points on an element.
The :class:`Points` base class bundles point coordinates, point weights,
a local triangulation and hull triangulation. Of these only the coordinates
are mandatory, and should be provided by the derived class in the form of the
``coords`` attribute. Of the remaining properties only :func:`hull` has a
functional base implementation that relies on the availability of ``tri``.
.. attribute:: coords
Coordinates of the points as a :class:`float` array.
.. attribute:: weights
Weights of the points as a :class:`float` array.
Args
----
npoints : :class:`int`
Number of discrete points.
ndims : :class:`int`
Number of spatial dimensions.
'''
__cache__ = 'hull', 'onhull'
@types.apply_annotations
def __init__(self, npoints:types.strictint, ndims:types.strictint):
self.npoints = npoints
self.ndims = ndims
@property
def tri(self):
'''Triangulation of interior.
A two-dimensional integer array with ``ndims+1`` columns, of which every
row defines a simplex by mapping vertices into the list of points.
'''
if self.ndims == 0 and self.npoints == 1:
return types.frozenarray([[0]])
raise Exception('tri not defined for {}'.format(self))
@property
def hull(self):
'''Triangulation of the exterior hull.
A two-dimensional integer array with ``ndims`` columns, of which every row
defines a simplex by mapping vertices into the list of points.
'''
edges = set()
iedges = numpy.array(list(itertools.combinations(range(self.ndims+1), self.ndims)))
for tri in self.tri:
edges.symmetric_difference_update(map(tuple, numpy.sort(tri[iedges], axis=1)))
return numpy.array(sorted(edges))
@property
def onhull(self):
'''Boolean mask marking boundary points.
The array of length ``npoints`` is ``True`` where the corresponding point
is part of the :attr:`hull`, and ``False`` where it is not.
'''
onhull = numpy.zeros(self.npoints, dtype=bool)
onhull[numpy.ravel(self.hull)] = True # not clear why ravel is necessary but setitem seems to require it
return types.frozenarray(onhull, copy=False)
strictpoints = types.strict[Points]
class CoordsPoints(Points):
'''Manually supplied points.'''
@types.apply_annotations
def __init__(self, coords:types.frozenarray[float]):
self.coords = coords
super().__init__(*coords.shape)
class CoordsWeightsPoints(CoordsPoints):
'''Manually supplied points and weights.'''
@types.apply_annotations
def __init__(self, coords:types.frozenarray[float], weights:types.frozenarray[float]):
self.weights = weights
super().__init__(coords)
class CoordsUniformPoints(CoordsPoints):
'''Manually supplied points with uniform weights.'''
@types.apply_annotations
def __init__(self, coords:types.frozenarray[float], volume:float):
self.weights = types.frozenarray.full([len(coords)], volume/len(coords))
super().__init__(coords)
class TensorPoints(Points):
'''Tensor product of two Points instances.'''
__cache__ = 'coords', 'weights', 'tri', 'hull'
@types.apply_annotations
def __init__(self, points1:strictpoints, points2:strictpoints):
self.points1 = points1
self.points2 = points2
super().__init__(points1.npoints * points2.npoints, points1.ndims + points2.ndims)
@property
def coords(self):
coords = numpy.empty((self.points1.npoints, self.points2.npoints, self.ndims))
coords[:,:,:self.points1.ndims] = self.points1.coords[:,_,:]
coords[:,:,self.points1.ndims:] = self.points2.coords[_,:,:]
return types.frozenarray(coords.reshape(self.npoints, self.ndims), copy=False)
@property
def weights(self):
return types.frozenarray((self.points1.weights[:,_] * self.points2.weights[_,:]).ravel(), copy=False)
@property
def tri(self):
if self.points1.ndims == 1:
# For an n-dimensional simplex with vertices a0,a1,..,an, the extruded
# element has vertices a0,a1,..,an,b0,b1,..,bn. These can be divided in
# simplices by selecting a0,a1,..,an,b0; a1,..,an,b0,n1; and so on until
# an,b0,b1,..,bn; resulting in n+1 n+1-dimensional simplices. In the
# algorithm below this is achieved by first taking the tensorial product
# of triangulations and raveling, effectively achieving vectorized
# concatenation. The overlapping vertex subsets then follow directly from
# numeric.overlapping.
tri12 = self.points1.tri[:,_,:,_] * self.points2.npoints + self.points2.tri[_,:,_,:] # ntri1 x ntri2 x 2 x ndims
return types.frozenarray(numeric.overlapping(tri12.reshape(-1, 2*self.ndims), n=self.ndims+1).reshape(-1, self.ndims+1), copy=False)
return super().tri
@property
def hull(self):
if self.points1.ndims == 1:
hull1 = self.points1.hull[:,_,:,_] * self.points2.npoints + self.points2.tri[_,:,_,:] # 2 x ntri2 x 1 x ndims
hull2 = self.points1.tri[:,_,:,_] * self.points2.npoints + self.points2.hull[_,:,_,:] # ntri1 x nhull2 x 2 x ndims-1
# The subdivision of hull2 into simplices follows identical logic to that
# used in the construction of self.tri.
hull = numpy.concatenate([hull1.reshape(-1, self.ndims), numeric.overlapping(hull2.reshape(-1, 2*(self.ndims-1)), n=self.ndims).reshape(-1, self.ndims)])
return types.frozenarray(hull, copy=False)
return super().hull
class SimplexGaussPoints(CoordsWeightsPoints):
'''Gauss quadrature points on a simplex.'''
@types.apply_annotations
def __init__(self, ndims:types.strictint, degree:types.strictint):
super().__init__(*gaussn[ndims](degree))
class SimplexBezierPoints(CoordsPoints):
'''Bezier points on a simplex.'''
__cache__ = 'tri', 'hull'
@types.apply_annotations
def __init__(self, ndims:types.strictint, n:types.strictint):
self.n = n
linspace = numpy.linspace(0, 1, n)
super().__init__([linspace[list(index)[::-1]] for index in numpy.ndindex(*[n] * ndims) if sum(index) < n])
@property
def tri(self):
if self.ndims == 1:
return types.frozenarray(numeric.overlapping(numpy.arange(self.n)), copy=False)
if self.ndims == 2:
n = self.n
vert1 = [((2*n-i+1)*i)//2 + numpy.array([j,j+1,j+n-i]) for i in range(n-1) for j in range(n-i-1)]
vert2 = [((2*n-i+1)*i)//2 + numpy.array([j+1,j+n-i+1,j+n-i]) for i in range(n-1) for j in range(n-i-2)]
return types.frozenarray(vert1 + vert2, copy=False)
return super().tri
@property
def hull(self):
if self.ndims == 2:
n = self.n
hull = numpy.concatenate([numpy.arange(n), numpy.arange(n-1,0,-1).cumsum()+n-1, numpy.arange(n+1,2,-1).cumsum()[::-1]-n-1])
return types.frozenarray(numeric.overlapping(hull), copy=False)
return super().hull
class TransformPoints(Points):
'''Affinely transformed Points.'''
__cache__ = 'coords', 'weights'
@types.apply_annotations
def __init__(self, points:strictpoints, trans:transform.stricttransformitem):
self.points = points
self.trans = trans
super().__init__(points.npoints, points.ndims)
@property
def coords(self):
return self.trans.apply(self.points.coords)
@property
def weights(self):
return self.points.weights * abs(float(self.trans.det))
@property
def tri(self):
return self.points.tri
@property
def hull(self):
return self.points.hull
class ConcatPoints(Points):
'''Concatenation of several Points objects.
An optional ``duplicates`` argument lists all points that are equal,
triggering deduplication and resulting in a smaller total point count.
'''
__cache__ = 'coords', 'weights', 'tri', 'masks'
@types.apply_annotations
def __init__(self, allpoints:types.tuple[strictpoints], duplicates:frozenset=frozenset()):
self.allpoints = allpoints
self.duplicates = duplicates
super().__init__(sum(points.npoints for points in allpoints) - sum(len(d)-1 for d in duplicates), allpoints[0].ndims)
@property
def masks(self):
masks = [numpy.ones(points.npoints, dtype=bool) for points in self.allpoints]
for pairs in self.duplicates:
for i, j in pairs[1:]:
masks[i][j] = False
return tuple(masks)
@property
def coords(self):
return types.frozenarray(numpy.concatenate([points.coords[mask] for mask, points in zip(self.masks, self.allpoints)] if self.duplicates else [points.coords for points in self.allpoints]), copy=False)
@property
def weights(self):
if not self.duplicates:
return types.frozenarray(numpy.concatenate([points.weights for points in self.allpoints]), copy=False)
weights = [points.weights[mask] for mask, points in zip(self.masks, self.allpoints)]
for pairs in self.duplicates:
I, J = pairs[0]
weights[I][self.masks[I][:J].sum()] += sum(self.allpoints[i].weights[j] for i, j in pairs[1:])
return types.frozenarray(numpy.concatenate(weights), copy=False)
@property
def tri(self):
if not self.duplicates:
offsets = util.cumsum(points.npoints for points in self.allpoints)
return types.frozenarray(numpy.concatenate([points.tri + offset for offset, points in zip(offsets, self.allpoints)]), copy=False)
renumber = []
n = 0
for mask in self.masks:
cumsum = mask.cumsum()
renumber.append(cumsum+(n-1))
n += cumsum[-1]
assert n == self.npoints
for pairs in self.duplicates:
I, J = pairs[0]
for i, j in pairs[1:]:
renumber[i][j] = renumber[I][J]
return types.frozenarray(numpy.concatenate([renum.take(points.tri) for renum, points in zip(renumber, self.allpoints)]), copy=False)
class ConePoints(Points):
'''Affinely transformed lower-dimensional points plus tip.
The point count is incremented by one regardless of the nature of the point
set; no effort is made to introduce extra points between base plane and tip.
Likewise, the simplex count stays equal, with all simplices obtaining an
extra vertex in tip.
'''
__cache__ = 'coords', 'tri'
@types.apply_annotations
def __init__(self, edgepoints:strictpoints, edgeref:transform.stricttransformitem, tip:types.frozenarray):
self.edgepoints = edgepoints
self.edgeref = edgeref
self.tip = tip
super().__init__(edgepoints.npoints+1, edgepoints.ndims+1)
@property
def coords(self):
return types.frozenarray(numpy.concatenate([self.edgeref.apply(self.edgepoints.coords), self.tip[_,:]]), copy=False)
@property
def tri(self):
tri = numpy.concatenate([self.edgepoints.tri, [[self.edgepoints.npoints]]*len(self.edgepoints.tri)], axis=1)
return types.frozenarray(tri, copy=False)
## UTILITY FUNCTIONS
@functools.lru_cache(8)
def gauss(n):
k = numpy.arange(n) + 1
d = k / numpy.sqrt(4*k**2-1)
x, w = numpy.linalg.eigh(numpy.diagflat(d,-1)) # eigh operates (by default) on lower triangle
return types.frozenarray((x+1) * .5, copy=False), types.frozenarray(w[0]**2, copy=False)
def gauss1(degree):
'''Gauss quadrature for line.'''
x, w = gauss(degree//2)
return x[:,_], w
@functools.lru_cache(8)
def gauss2(degree):
'''Gauss quadrature for triangle.
Reference: http://www.cs.rpi.edu/~flaherje/pdf/fea6.pdf'''
assert isinstance(degree, int) and degree >= 0
I = [0,0],
J = [1,1],[0,1],[1,0]
K = [1,2],[2,0],[0,1],[2,1],[1,0],[0,2]
icw = [
(I, [1/3], 1)
] if degree <= 1 else [
(J, [2/3,1/6], 1/3)
] if degree == 2 else [
(I, [1/3], -9/16),
(J, [3/5,1/5], 25/48),
] if degree == 3 else [
(J, [0.816847572980458,0.091576213509771], 0.109951743655322),
(J, [0.108103018168070,0.445948490915965], 0.223381589678011),
] if degree == 4 else [
(I, [1/3], 0.225),
(J, [0.797426985353088,0.101286507323456], 0.125939180544827),
(J, [0.059715871789770,0.470142064105115], 0.132394152788506),
] if degree == 5 else [
(J, [0.873821971016996,0.063089014491502], 0.050844906370207),
(J, [0.501426509658180,0.249286745170910], 0.116786275726379),
(K, [0.636502499121399,0.310352451033785,0.053145049844816], 0.082851075618374),
] if degree == 6 else [
(I, [1/3.], -0.149570044467671),
(J, [0.479308067841924,0.260345966079038], 0.175615257433204),
(J, [0.869739794195568,0.065130102902216], 0.053347235608839),
(K, [0.638444188569809,0.312865496004875,0.048690315425316], 0.077113760890257),
]
if degree > 7:
warnings.warn('inexact integration for polynomial of degree {}'.format(degree))
return types.frozenarray(numpy.concatenate([numpy.take(c,i) for i, c, w in icw]), copy=False), \
types.frozenarray(numpy.concatenate([[w/2] * len(i) for i, c, w in icw]), copy=False)
@functools.lru_cache(8)
def gauss3(degree):
'''Gauss quadrature for tetrahedron.
Reference http://www.cs.rpi.edu/~flaherje/pdf/fea6.pdf'''
assert isinstance(degree, int) and degree >= 0
I = [0,0,0],
J = [1,1,1],[0,1,1],[1,1,0],[1,0,1]
K = [0,1,1],[1,0,1],[1,1,0],[1,0,0],[0,1,0],[0,0,1]
L = [0,1,1],[1,0,1],[1,1,0],[2,1,1],[1,2,1],[1,1,2],[1,0,2],[0,2,1],[2,1,0],[1,2,0],[0,1,2],[2,0,1]
icw = [
(I, [1/4], 1),
] if degree == 1 else [
(J, [0.5854101966249685,0.1381966011250105], 1/4),
] if degree == 2 else [
(I, [.25], -.8),
(J, [.5,1/6], .45),
] if degree == 3 else [
(I, [.25], -.2368/3),
(J, [0.7857142857142857,0.0714285714285714], .1372/3),
(K, [0.1005964238332008,0.3994035761667992], .448/3),
] if degree == 4 else [
(I, [.25], 0.1817020685825351),
(J, [0,1/3.], 0.0361607142857143),
(J, [8/11.,1/11.], 0.0698714945161738),
(K, [0.4334498464263357,0.0665501535736643], 0.0656948493683187),
] if degree == 5 else [
(J, [0.3561913862225449,0.2146028712591517], 0.0399227502581679),
(J, [0.8779781243961660,0.0406739585346113], 0.0100772110553207),
(J, [0.0329863295731731,0.3223378901422757], 0.0553571815436544),
(L, [0.2696723314583159,0.0636610018750175,0.6030056647916491], 0.0482142857142857),
] if degree == 6 else [
(I, [.25], 0.1095853407966528),
(J, [0.7653604230090441,0.0782131923303186], 0.0635996491464850),
(J, [0.6344703500082868,0.1218432166639044], -0.3751064406859797),
(J, [0.0023825066607383,0.3325391644464206], 0.0293485515784412),
(K, [0,.5], 0.0058201058201058),
(L, [.2,.1,.6], 0.1653439153439105)
] if degree == 7 else [
(I, [.25], -0.2359620398477557),
(J, [0.6175871903000830,0.1274709365666390], 0.0244878963560562),
(J, [0.9037635088221031,0.0320788303926323], 0.0039485206398261),
(K, [0.4502229043567190,0.0497770956432810], 0.0263055529507371),
(K, [0.3162695526014501,0.1837304473985499], 0.0829803830550589),
(L, [0.0229177878448171,0.2319010893971509,0.5132800333608811], 0.0254426245481023),
(L, [0.7303134278075384,0.0379700484718286,0.1937464752488044], 0.0134324384376852),
]
if degree > 8:
warnings.warn('inexact integration for polynomial of degree {}'.format(degree))
return types.frozenarray(numpy.concatenate([numpy.take(c,i) for i, c, w in icw]), copy=False), \
types.frozenarray(numpy.concatenate([[w/6] * len(i) for i, c, w in icw]), copy=False)
gaussn = None, gauss1, gauss2, gauss3
def find_duplicates(allpoints):
coords = {}
for i, points in enumerate(allpoints):
for j in points.onhull.nonzero()[0]:
coords.setdefault(tuple(points.coords[j]), []).append((i, j))
return [tuple(pairs) for pairs in coords.values() if len(pairs) > 1]
# vim:sw=2:sts=2:et
|
en
| 0.803451
|
# Copyright (c) 2014 Evalf # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. The points module defines the :class:`Points` base class, which bundles point coordinates, point weights, a local triangulation and a hull triangulation. The module provides several different implementations such as :class:`TensorPoints` and :class:`SimplexGaussPoints` that reflect the variety of elements in the :mod:`nutils.element` module. Collection of points on an element. The :class:`Points` base class bundles point coordinates, point weights, a local triangulation and hull triangulation. Of these only the coordinates are mandatory, and should be provided by the derived class in the form of the ``coords`` attribute. Of the remaining properties only :func:`hull` has a functional base implementation that relies on the availability of ``tri``. .. attribute:: coords Coordinates of the points as a :class:`float` array. .. attribute:: weights Weights of the points as a :class:`float` array. Args ---- npoints : :class:`int` Number of discrete points. ndims : :class:`int` Number of spatial dimensions. Triangulation of interior. A two-dimensional integer array with ``ndims+1`` columns, of which every row defines a simplex by mapping vertices into the list of points. Triangulation of the exterior hull. A two-dimensional integer array with ``ndims`` columns, of which every row defines a simplex by mapping vertices into the list of points. Boolean mask marking boundary points. The array of length ``npoints`` is ``True`` where the corresponding point is part of the :attr:`hull`, and ``False`` where it is not. # not clear why ravel is necessary but setitem seems to require it Manually supplied points. Manually supplied points and weights. Manually supplied points with uniform weights. Tensor product of two Points instances. # For an n-dimensional simplex with vertices a0,a1,..,an, the extruded # element has vertices a0,a1,..,an,b0,b1,..,bn. These can be divided in # simplices by selecting a0,a1,..,an,b0; a1,..,an,b0,n1; and so on until # an,b0,b1,..,bn; resulting in n+1 n+1-dimensional simplices. In the # algorithm below this is achieved by first taking the tensorial product # of triangulations and raveling, effectively achieving vectorized # concatenation. The overlapping vertex subsets then follow directly from # numeric.overlapping. # ntri1 x ntri2 x 2 x ndims # 2 x ntri2 x 1 x ndims # ntri1 x nhull2 x 2 x ndims-1 # The subdivision of hull2 into simplices follows identical logic to that # used in the construction of self.tri. Gauss quadrature points on a simplex. Bezier points on a simplex. Affinely transformed Points. Concatenation of several Points objects. An optional ``duplicates`` argument lists all points that are equal, triggering deduplication and resulting in a smaller total point count. Affinely transformed lower-dimensional points plus tip. The point count is incremented by one regardless of the nature of the point set; no effort is made to introduce extra points between base plane and tip. Likewise, the simplex count stays equal, with all simplices obtaining an extra vertex in tip. ## UTILITY FUNCTIONS # eigh operates (by default) on lower triangle Gauss quadrature for line. Gauss quadrature for triangle. Reference: http://www.cs.rpi.edu/~flaherje/pdf/fea6.pdf Gauss quadrature for tetrahedron. Reference http://www.cs.rpi.edu/~flaherje/pdf/fea6.pdf # vim:sw=2:sts=2:et
| 1.611705
| 2
|
test/shared/utils.py
|
Chia-Network/cibuildwheel
| 0
|
6626150
|
'''
Utility functions used by the cibuildwheel tests.
This file is added to the PYTHONPATH in the test runner at bin/run_test.py.
'''
import os
import platform as pm
import shutil
import subprocess
import sys
from contextlib import contextmanager
from tempfile import mkdtemp
IS_WINDOWS_RUNNING_ON_AZURE = os.path.exists('C:\\hostedtoolcache')
IS_WINDOWS_RUNNING_ON_TRAVIS = os.environ.get('TRAVIS_OS_NAME') == 'windows'
# Python 2 does not have a tempfile.TemporaryDirectory context manager
@contextmanager
def TemporaryDirectoryIfNone(path):
_path = path or mkdtemp()
try:
yield _path
finally:
if path is None:
shutil.rmtree(_path)
def cibuildwheel_get_build_identifiers(project_path, env=None):
'''
Returns the list of build identifiers that cibuildwheel will try to build
for the current platform.
'''
cmd_output = subprocess.check_output(
[sys.executable, '-m', 'cibuildwheel', '--print-build-identifiers', project_path],
universal_newlines=True,
env=env,
)
return cmd_output.strip().split('\n')
def cibuildwheel_run(project_path, env=None, add_env=None, output_dir=None):
'''
Runs cibuildwheel as a subprocess, building the project at project_path.
Uses the current Python interpreter.
:param project_path: path of the project to be built.
:param env: full environment to be used, os.environ if None
:param add_env: environment used to update env
:param output_dir: directory where wheels are saved. If None, a temporary
directory will be used for the duration of the command.
:return: list of built wheels (file names).
'''
if env is None:
env = os.environ.copy()
# If present in the host environment, remove the MACOSX_DEPLOYMENT_TARGET for consistency
env.pop('MACOSX_DEPLOYMENT_TARGET', None)
if add_env is not None:
env.update(add_env)
with TemporaryDirectoryIfNone(output_dir) as _output_dir:
subprocess.check_call(
[sys.executable, '-m', 'cibuildwheel', '--output-dir', str(_output_dir), project_path],
env=env,
)
wheels = os.listdir(_output_dir)
return wheels
def expected_wheels(package_name, package_version, manylinux_versions=None,
macosx_deployment_target=None):
'''
Returns a list of expected wheels from a run of cibuildwheel.
'''
# per PEP 425 (https://www.python.org/dev/peps/pep-0425/), wheel files shall have name of the form
# {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl
# {python tag} and {abi tag} are closely related to the python interpreter used to build the wheel
# so we'll merge them below as python_abi_tag
python_abi_tags = ['cp35-cp35m', 'cp36-cp36m', 'cp37-cp37m', 'cp38-cp38']
extra_x86_python_abi_tags = ['cp27-cp27m', 'pp27-pypy_73', 'pp36-pypy36_pp73']
if platform == 'linux':
if pm.machine() not in ['x86_64', 'i686']:
if manylinux_versions is None:
manylinux_versions = ['manylinux2014']
architectures = {'cp': [pm.machine()]}
else:
if manylinux_versions is None:
manylinux_versions = ['manylinux1', 'manylinux2010']
python_abi_tags += extra_x86_python_abi_tags
python_abi_tags.append('cp27-cp27mu') # python 2.7 has 2 different ABI on manylinux
architectures = {'cp': ['x86_64', 'i686'], 'pp': ['x86_64']}
platform_tags = {}
for python_implemention in architectures:
platform_tags[python_implemention] = [
'{manylinux_version}_{architecture}'.format(
manylinux_version=manylinux_version, architecture=architecture)
for architecture in architectures[python_implemention]
for manylinux_version in manylinux_versions
]
def get_platform_tags(python_abi_tag):
return platform_tags[python_abi_tag[:2]]
elif platform == 'windows':
python_abi_tags += extra_x86_python_abi_tags
platform_tags = {'cp': ['win32', 'win_amd64'], 'pp': ['win32']}
def get_platform_tags(python_abi_tag):
return platform_tags[python_abi_tag[:2]]
elif platform == 'macos':
python_abi_tags += extra_x86_python_abi_tags
def get_platform_tags(python_abi_tag):
default_version = '10.7' if python_abi_tag.startswith('pp') else '10.9'
return ['macosx_{}_x86_64'.format((macosx_deployment_target or default_version).replace('.', '_'))]
else:
raise Exception('unsupported platform')
templates = []
for python_abi_tag in python_abi_tags:
for platform_tag in get_platform_tags(python_abi_tag):
templates.append('{package_name}-{package_version}-{python_abi_tag}-{platform_tag}.whl'.format(
package_name=package_name, package_version=package_version,
python_abi_tag=python_abi_tag, platform_tag=platform_tag
))
if IS_WINDOWS_RUNNING_ON_TRAVIS:
# Python 2.7 isn't supported on Travis.
templates = [t for t in templates if '-cp27-' not in t and '-pp2' not in t]
return templates
platform = None
if 'CIBW_PLATFORM' in os.environ:
platform = os.environ['CIBW_PLATFORM']
elif sys.platform.startswith('linux'):
platform = 'linux'
elif sys.platform.startswith('darwin'):
platform = 'macos'
elif sys.platform in ['win32', 'cygwin']:
platform = 'windows'
else:
raise Exception('Unsupported platform')
|
'''
Utility functions used by the cibuildwheel tests.
This file is added to the PYTHONPATH in the test runner at bin/run_test.py.
'''
import os
import platform as pm
import shutil
import subprocess
import sys
from contextlib import contextmanager
from tempfile import mkdtemp
IS_WINDOWS_RUNNING_ON_AZURE = os.path.exists('C:\\hostedtoolcache')
IS_WINDOWS_RUNNING_ON_TRAVIS = os.environ.get('TRAVIS_OS_NAME') == 'windows'
# Python 2 does not have a tempfile.TemporaryDirectory context manager
@contextmanager
def TemporaryDirectoryIfNone(path):
_path = path or mkdtemp()
try:
yield _path
finally:
if path is None:
shutil.rmtree(_path)
def cibuildwheel_get_build_identifiers(project_path, env=None):
'''
Returns the list of build identifiers that cibuildwheel will try to build
for the current platform.
'''
cmd_output = subprocess.check_output(
[sys.executable, '-m', 'cibuildwheel', '--print-build-identifiers', project_path],
universal_newlines=True,
env=env,
)
return cmd_output.strip().split('\n')
def cibuildwheel_run(project_path, env=None, add_env=None, output_dir=None):
'''
Runs cibuildwheel as a subprocess, building the project at project_path.
Uses the current Python interpreter.
:param project_path: path of the project to be built.
:param env: full environment to be used, os.environ if None
:param add_env: environment used to update env
:param output_dir: directory where wheels are saved. If None, a temporary
directory will be used for the duration of the command.
:return: list of built wheels (file names).
'''
if env is None:
env = os.environ.copy()
# If present in the host environment, remove the MACOSX_DEPLOYMENT_TARGET for consistency
env.pop('MACOSX_DEPLOYMENT_TARGET', None)
if add_env is not None:
env.update(add_env)
with TemporaryDirectoryIfNone(output_dir) as _output_dir:
subprocess.check_call(
[sys.executable, '-m', 'cibuildwheel', '--output-dir', str(_output_dir), project_path],
env=env,
)
wheels = os.listdir(_output_dir)
return wheels
def expected_wheels(package_name, package_version, manylinux_versions=None,
macosx_deployment_target=None):
'''
Returns a list of expected wheels from a run of cibuildwheel.
'''
# per PEP 425 (https://www.python.org/dev/peps/pep-0425/), wheel files shall have name of the form
# {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl
# {python tag} and {abi tag} are closely related to the python interpreter used to build the wheel
# so we'll merge them below as python_abi_tag
python_abi_tags = ['cp35-cp35m', 'cp36-cp36m', 'cp37-cp37m', 'cp38-cp38']
extra_x86_python_abi_tags = ['cp27-cp27m', 'pp27-pypy_73', 'pp36-pypy36_pp73']
if platform == 'linux':
if pm.machine() not in ['x86_64', 'i686']:
if manylinux_versions is None:
manylinux_versions = ['manylinux2014']
architectures = {'cp': [pm.machine()]}
else:
if manylinux_versions is None:
manylinux_versions = ['manylinux1', 'manylinux2010']
python_abi_tags += extra_x86_python_abi_tags
python_abi_tags.append('cp27-cp27mu') # python 2.7 has 2 different ABI on manylinux
architectures = {'cp': ['x86_64', 'i686'], 'pp': ['x86_64']}
platform_tags = {}
for python_implemention in architectures:
platform_tags[python_implemention] = [
'{manylinux_version}_{architecture}'.format(
manylinux_version=manylinux_version, architecture=architecture)
for architecture in architectures[python_implemention]
for manylinux_version in manylinux_versions
]
def get_platform_tags(python_abi_tag):
return platform_tags[python_abi_tag[:2]]
elif platform == 'windows':
python_abi_tags += extra_x86_python_abi_tags
platform_tags = {'cp': ['win32', 'win_amd64'], 'pp': ['win32']}
def get_platform_tags(python_abi_tag):
return platform_tags[python_abi_tag[:2]]
elif platform == 'macos':
python_abi_tags += extra_x86_python_abi_tags
def get_platform_tags(python_abi_tag):
default_version = '10.7' if python_abi_tag.startswith('pp') else '10.9'
return ['macosx_{}_x86_64'.format((macosx_deployment_target or default_version).replace('.', '_'))]
else:
raise Exception('unsupported platform')
templates = []
for python_abi_tag in python_abi_tags:
for platform_tag in get_platform_tags(python_abi_tag):
templates.append('{package_name}-{package_version}-{python_abi_tag}-{platform_tag}.whl'.format(
package_name=package_name, package_version=package_version,
python_abi_tag=python_abi_tag, platform_tag=platform_tag
))
if IS_WINDOWS_RUNNING_ON_TRAVIS:
# Python 2.7 isn't supported on Travis.
templates = [t for t in templates if '-cp27-' not in t and '-pp2' not in t]
return templates
platform = None
if 'CIBW_PLATFORM' in os.environ:
platform = os.environ['CIBW_PLATFORM']
elif sys.platform.startswith('linux'):
platform = 'linux'
elif sys.platform.startswith('darwin'):
platform = 'macos'
elif sys.platform in ['win32', 'cygwin']:
platform = 'windows'
else:
raise Exception('Unsupported platform')
|
en
| 0.830927
|
Utility functions used by the cibuildwheel tests. This file is added to the PYTHONPATH in the test runner at bin/run_test.py. # Python 2 does not have a tempfile.TemporaryDirectory context manager Returns the list of build identifiers that cibuildwheel will try to build for the current platform. Runs cibuildwheel as a subprocess, building the project at project_path. Uses the current Python interpreter. :param project_path: path of the project to be built. :param env: full environment to be used, os.environ if None :param add_env: environment used to update env :param output_dir: directory where wheels are saved. If None, a temporary directory will be used for the duration of the command. :return: list of built wheels (file names). # If present in the host environment, remove the MACOSX_DEPLOYMENT_TARGET for consistency Returns a list of expected wheels from a run of cibuildwheel. # per PEP 425 (https://www.python.org/dev/peps/pep-0425/), wheel files shall have name of the form # {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl # {python tag} and {abi tag} are closely related to the python interpreter used to build the wheel # so we'll merge them below as python_abi_tag # python 2.7 has 2 different ABI on manylinux # Python 2.7 isn't supported on Travis.
| 2.481622
| 2
|