code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Feature Pyramid Networks.
Feature Pyramid Networks were proposed in:
[1] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
, and <NAME>
Feature Pyramid Networks for Object Detection. CVPR 2017.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import logging
import tensorflow as tf
from tensorflow.python.keras import backend
from . import nn_ops
from ..ops import spatial_transform_ops
from ..utils.efficientdet_utils import get_feat_sizes, activation_fn
from xl_tensorflow.utils import hparams_config
@tf.keras.utils.register_keras_serializable(package='Text')
class WeightedAdd(tf.keras.layers.Layer):
def __init__(self, epsilon=1e-4, activation="relu", **kwargs):
"""
Args:
epsilon:
activation: relu and softmax
**kwargs:
"""
super(WeightedAdd, self).__init__(**kwargs)
self.epsilon = epsilon
self.activation = tf.nn.softmax if activation == "softmax" else tf.nn.relu
def build(self, input_shape):
num_in = len(input_shape)
self.w = self.add_weight(name=self.name,
shape=(num_in,),
initializer=tf.keras.initializers.constant(1 / num_in),
trainable=True,
dtype=tf.float32)
def call(self, inputs, **kwargs):
w = self.activation(self.w)
weights_sum = tf.reduce_sum(self.w)
x = tf.reduce_sum([(w[i] * inputs[i]) / (weights_sum + self.epsilon) for i in range(len(inputs))], axis=0)
return x
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = super(WeightedAdd, self).get_config()
config.update({
'epsilon': self.epsilon
})
return config
class BiFpn(object):
"""BiFeature pyramid networks.
1、去掉training_bn参数
2、以keras网络层为主,部分tf.nn层
todo 把bifpn放到yolo种
"""
def __init__(self,
min_level=3,
max_level=7,
):
"""FPN initialization function.
Args:
min_level: `int` minimum level in FPN output feature maps.
max_level: `int` maximum level in FPN output feature maps.
"""
self._min_level = min_level
self._max_level = max_level
def get_fpn_config(self, fpn_name, min_level, max_level, weight_method):
"""Get fpn related configuration."""
if not fpn_name:
fpn_name = 'bifpn_fa'
name_to_config = {
'bifpn_sum': self.bifpn_sum_config(),
'bifpn_fa': self.bifpn_fa_config(),
'bifpn_dyn': self.bifpn_dynamic_config(min_level, max_level, weight_method)
}
return name_to_config[fpn_name]
def fuse_features(self, nodes, weight_method):
"""Fuse features from different resolutions and return a weighted sum.
Args:
nodes: a list of tensorflow features at different levels
weight_method: feature fusion method. One of:
- "attn" - Softmax weighted fusion
- "fastattn" - Fast normalzied feature fusion
- "sum" - a sum of inputs
Returns:
A tensor denoting the fused feature.
"""
dtype = nodes[0].dtype
if weight_method == 'attn':
new_node = WeightedAdd(activation="softmax")(nodes)
elif weight_method == 'fastattn':
new_node = WeightedAdd(activation="relu")(nodes)
elif weight_method == 'sum':
new_node = tf.add_n(nodes)
else:
raise ValueError(
'unknown weight_method {}'.format(weight_method))
return new_node
def build_bifpn_layer(self, feats, feat_sizes, params):
"""Builds a feature pyramid given previous feature pyramid and config."""
p = params # use p to denote the network config.
if p.fpn.fpn_config:
fpn_config = p.fpn_config
else:
fpn_config = self.get_fpn_config(p.fpn.fpn_name, p.architecture.min_level, p.architecture.max_level,
p.fpn.fpn_weight_method)
num_output_connections = [0 for _ in feats]
for i, fnode in enumerate(fpn_config.nodes):
with tf.name_scope('fnode{}'.format(i)):
logging.info('fnode %d : %s', i, fnode)
new_node_height = feat_sizes[fnode['feat_level']]['height']
new_node_width = feat_sizes[fnode['feat_level']]['width']
nodes = []
for idx, input_offset in enumerate(fnode['inputs_offsets']):
input_node = feats[input_offset]
num_output_connections[input_offset] += 1
input_node = spatial_transform_ops.resample_feature_map(
input_node, '{}_{}_{}'.format(idx, input_offset, len(feats)),
new_node_height, new_node_width, p.fpn.fpn_feat_dims,
p.fpn.apply_bn_for_resampling, p.is_training_bn,
p.fpn.conv_after_downsample,
p.fpn.use_native_resize_op,
p.fpn.pooling_type,
use_tpu=p.use_tpu,
data_format=params.data_format)
nodes.append(input_node)
new_node = self.fuse_features(nodes, fpn_config.weight_method)
with tf.name_scope('op_after_combine{}'.format(len(feats))):
if not p.fpn.conv_bn_act_pattern:
new_node = activation_fn(new_node, p.act_type)
if p.fpn.use_separable_conv:
conv_op = functools.partial(
tf.keras.layers.SeparableConv2D, depth_multiplier=1)
else:
conv_op = tf.keras.layers.Conv2D
new_node = conv_op(
filters=p.fpn.fpn_feat_dims,
kernel_size=(3, 3),
padding='same',
use_bias=not p.fpn.conv_bn_act_pattern,
data_format=params.data_format)(new_node)
# 拆分activation
act_type = None if not p.fpn.conv_bn_act_pattern else p.act_type
new_node = tf.keras.layers.BatchNormalization(
axis=1 if params.data_format == "channels_first" else -1,
momentum=p.norm_activation.batch_norm_momentum,
epsilon=p.norm_activation.batch_norm_epsilon)(new_node)
if act_type:
new_node = activation_fn(new_node, act_type)
feats.append(new_node)
num_output_connections.append(0)
output_feats = {}
for l in range(p.architecture.min_level, p.architecture.max_level + 1):
for i, fnode in enumerate(reversed(fpn_config.nodes)):
if fnode['feat_level'] == l:
output_feats[l] = feats[-1 - i]
break
return output_feats
def bifpn_sum_config(self):
"""BiFPN config with sum."""
p = hparams_config.Config()
p.nodes = [
{'feat_level': 6, 'inputs_offsets': [3, 4]},
{'feat_level': 5, 'inputs_offsets': [2, 5]},
{'feat_level': 4, 'inputs_offsets': [1, 6]},
{'feat_level': 3, 'inputs_offsets': [0, 7]},
{'feat_level': 4, 'inputs_offsets': [1, 7, 8]},
{'feat_level': 5, 'inputs_offsets': [2, 6, 9]},
{'feat_level': 6, 'inputs_offsets': [3, 5, 10]},
{'feat_level': 7, 'inputs_offsets': [4, 11]},
]
p.weight_method = 'sum'
return p
def bifpn_fa_config(self):
"""BiFPN config with fast weighted sum."""
p = self.bifpn_sum_config()
p.weight_method = 'fastattn'
return p
def bifpn_dynamic_config(self, min_level, max_level, weight_method):
"""A dynamic bifpn config that can adapt to different min/max levels."""
p = hparams_config.Config()
p.weight_method = weight_method or 'fastattn'
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [level_last_id(i), level_last_id(i + 1)]
})
node_ids[i].append(next(id_cnt))
for i in range(min_level + 1, max_level + 1):
# bottom-up path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)]
})
node_ids[i].append(next(id_cnt))
return p
def __call__(self, multilevel_features, params):
"""Returns the FPN features for a given multilevel features.
Args:
multilevel_features: a `dict` containing `int` keys for continuous feature
levels, e.g., [2, 3, 4, 5]. The values are corresponding features with
shape [batch_size, height_l, width_l, num_filters].
Returns:
a `dict` containing `int` keys for continuous feature levels
[min_level, min_level + 1, ..., max_level]. The values are corresponding
FPN features with shape [batch_size, height_l, width_l, fpn_feat_dims].
"""
# step 1: Build additional input features that are not from backbone.(ie. level 6 and 7)
feats = []
# with tf.name_scope('bifpn'):
with backend.get_graph().as_default(), tf.name_scope('bifpn'):
for level in range(self._min_level, self._max_level + 1):
if level in multilevel_features.keys():
feats.append(multilevel_features[level])
else:
h_id, w_id = (1, 2) # 不允许通道前置,即data_format必须等于channels_last
feats.append(
spatial_transform_ops.resample_feature_map(
feats[-1],
name='p%d' % level,
target_height=(feats[-1].shape[h_id] - 1) // 2 + 1,
target_width=(feats[-1].shape[w_id] - 1) // 2 + 1,
target_num_channels=params.fpn.fpn_feat_dims,
apply_bn=params.fpn.apply_bn_for_resampling,
is_training=params.is_training_bn,
conv_after_downsample=params.fpn.conv_after_downsample,
use_native_resize_op=params.fpn.use_native_resize_op,
pooling_type=params.fpn.pooling_type,
use_tpu=False,
data_format="channels_last"
))
feat_sizes = get_feat_sizes(params.efficientdet_parser.output_size[0], self._max_level)
with tf.name_scope("bifpn_cells"):
for rep in range(params.fpn.fpn_cell_repeats):
logging.info('building cell %d', rep)
new_feats = self.build_bifpn_layer(feats, feat_sizes, params)
feats = [
new_feats[level]
for level in range(
self._min_level, self._max_level + 1)
]
return new_feats
|
[
"functools.partial",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.reduce_sum",
"tensorflow.keras.initializers.constant",
"tensorflow.add_n",
"tensorflow.keras.layers.BatchNormalization",
"itertools.count",
"logging.info",
"xl_tensorflow.utils.hparams_config.Config",
"tensorflow.name_scope"
] |
[((1299, 1357), 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""Text"""'}), "(package='Text')\n", (1341, 1357), True, 'import tensorflow as tf\n'), ((2210, 2231), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.w'], {}), '(self.w)\n', (2223, 2231), True, 'import tensorflow as tf\n'), ((8047, 8070), 'xl_tensorflow.utils.hparams_config.Config', 'hparams_config.Config', ([], {}), '()\n', (8068, 8070), False, 'from xl_tensorflow.utils import hparams_config\n'), ((8957, 8980), 'xl_tensorflow.utils.hparams_config.Config', 'hparams_config.Config', ([], {}), '()\n', (8978, 8980), False, 'from xl_tensorflow.utils import hparams_config\n'), ((9280, 9307), 'itertools.count', 'itertools.count', (['num_levels'], {}), '(num_levels)\n', (9295, 9307), False, 'import itertools\n'), ((10754, 10776), 'tensorflow.name_scope', 'tf.name_scope', (['"""bifpn"""'], {}), "('bifpn')\n", (10767, 10776), True, 'import tensorflow as tf\n'), ((1969, 2011), 'tensorflow.keras.initializers.constant', 'tf.keras.initializers.constant', (['(1 / num_in)'], {}), '(1 / num_in)\n', (1999, 2011), True, 'import tensorflow as tf\n'), ((5159, 5198), 'logging.info', 'logging.info', (['"""fnode %d : %s"""', 'i', 'fnode'], {}), "('fnode %d : %s', i, fnode)\n", (5171, 5198), False, 'import logging\n'), ((12103, 12131), 'tensorflow.name_scope', 'tf.name_scope', (['"""bifpn_cells"""'], {}), "('bifpn_cells')\n", (12116, 12131), True, 'import tensorflow as tf\n'), ((4368, 4383), 'tensorflow.add_n', 'tf.add_n', (['nodes'], {}), '(nodes)\n', (4376, 4383), True, 'import tensorflow as tf\n'), ((10720, 10739), 'tensorflow.python.keras.backend.get_graph', 'backend.get_graph', ([], {}), '()\n', (10737, 10739), False, 'from tensorflow.python.keras import backend\n'), ((12216, 12253), 'logging.info', 'logging.info', (['"""building cell %d"""', 'rep'], {}), "('building cell %d', rep)\n", (12228, 12253), False, 'import logging\n'), ((6539, 6609), 'functools.partial', 'functools.partial', (['tf.keras.layers.SeparableConv2D'], {'depth_multiplier': '(1)'}), '(tf.keras.layers.SeparableConv2D, depth_multiplier=1)\n', (6556, 6609), False, 'import functools\n'), ((7180, 7375), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'axis': "(1 if params.data_format == 'channels_first' else -1)", 'momentum': 'p.norm_activation.batch_norm_momentum', 'epsilon': 'p.norm_activation.batch_norm_epsilon'}), "(axis=1 if params.data_format ==\n 'channels_first' else -1, momentum=p.norm_activation.\n batch_norm_momentum, epsilon=p.norm_activation.batch_norm_epsilon)\n", (7214, 7375), True, 'import tensorflow as tf\n')]
|
import os
import secrets
def create_env_file():
prompt = "> "
print("POSTGRES_USER= ?")
POSTGRES_USER = input(prompt)
print("POSTGRES_PW= ?")
POSTGRES_PW = input(prompt)
print("DATABASE= ?")
DATABASE = input(prompt)
print("REDIS_PW= ?")
REDIS_PW = input(prompt)
SECRET_KEY = secrets.token_hex(32)
JWT_SECRET = secrets.token_hex(32)
env_list = [
"APP_SETTINGS={}".format(APP_SETTINGS),
"FLASK_ENV={}".format(FLASK_ENV),
"FLASK_APP={}".format(FLASK_APP),
"POSTGRES_USER={}".format(POSTGRES_USER),
"POSTGRES_PW={}".format(POSTGRES_PW),
"DATABASE={}".format(DATABASE),
"REDIS_PW={}".format(REDIS_PW),
"SECRET_KEY={}".format(SECRET_KEY),
"JWT_SECRET={}".format(JWT_SECRET)
]
with open(os.path.join('../../..', '.env'), 'a') as f:
[ f.write(env_var) for env_var in env_list ]
f.close()
def main():
create_env_file()
if __name__ == '__main__':
main()
|
[
"secrets.token_hex",
"os.path.join"
] |
[((323, 344), 'secrets.token_hex', 'secrets.token_hex', (['(32)'], {}), '(32)\n', (340, 344), False, 'import secrets\n'), ((362, 383), 'secrets.token_hex', 'secrets.token_hex', (['(32)'], {}), '(32)\n', (379, 383), False, 'import secrets\n'), ((819, 851), 'os.path.join', 'os.path.join', (['"""../../.."""', '""".env"""'], {}), "('../../..', '.env')\n", (831, 851), False, 'import os\n')]
|
from unittest import TestCase
from copy import copy
from pyprocessing.math import PVector
class PyProcessingMathTest(TestCase):
def setUp(self):
pass
def test_pvector_instanciation(self):
'''
Test instanciating a vector
'''
vector = PVector(0, 0, 0)
self.assertIsInstance(vector, PVector)
def test_pvector_addition(self):
'''
Test additioning one vector to another
'''
vector = PVector(0, 1, 0)
adder = PVector(1, 0, 0)
self.assertEqual(PVector(1, 1, 0), vector + adder)
self.assertEqual(PVector(1, 1, 0), vector.add(adder))
self.assertEqual(PVector(1, 1, 0), PVector.add(vector, adder))
self.assertEqual(PVector(1, 1, 0), vector.add((1, 0, 0)))
self.assertEqual(PVector(1, 1, 0), vector.add(1, 0, 0))
def test_pvector_difference(self):
'''
Test substracting one vector to another
'''
vector = PVector(1, 0, 0)
diff = PVector(1, 0, 0)
self.assertEqual(PVector(0, 0, 0), vector - diff)
self.assertEqual(PVector(0, 0, 0), vector.sub(diff))
self.assertEqual(PVector(0, 0, 0), PVector.sub(vector, diff))
self.assertEqual(PVector(0, 0, 0), vector.sub((1, 0, 0)))
self.assertEqual(PVector(0, 0, 0), vector.sub(1, 0, 0))
def test_pvector_mult(self):
'''
Test multiplying by a scalar, computing the cross product, and the dot
product.
'''
self.assertEqual(PVector(2, 2, 2), PVector(1, 1, 1) * 2)
self.assertEqual(PVector(2, 2, 2), PVector(1, 1, 1).mult(2))
self.assertEqual(6, PVector(1, 1, 1) * PVector(2, 2, 2))
self.assertEqual(6, PVector(1, 1, 1) * (2, 2, 2))
self.assertEqual(6, PVector(1, 1, 1) * [2, 2, 2])
self.assertEqual(PVector(0, 0, 0), PVector(1, 1, 1) @ PVector(2, 2, 2))
self.assertEqual(PVector(1, 0, 0), PVector(0, 0, 1) @ PVector(0, -1, 0))
self.assertEqual(PVector(0, 0, 0), PVector(1, 1, 1).cross(PVector(2, 2, 2)))
self.assertEqual(PVector(1, 0, 0), PVector(0, 0, 1).cross(PVector(0, -1, 0)))
def test_pvector_div(self):
'''
Test dividing a vector by a scalar
'''
self.assertEqual(PVector(1, 1, 1), PVector(2, 2, 2) / 2.)
self.assertEqual(PVector(1, 1, 1), PVector(2, 2, 2).div(2))
def test_pvector_copy(self):
'''
Test that copying a vector returns a new instance
'''
vec = PVector(0, 0, 0)
cp = copy(vec)
cp2 = vec.copy()
self.assertEqual(vec, cp)
self.assertEqual(vec, cp2)
self.assertIsNot(vec, cp)
self.assertIsNot(vec, cp2)
def test_pvector_lerp(self):
'''
Test that linear interpolation of a vector to another returns the proper
value
'''
vec = PVector(0, 0, 0)
target = PVector(2, 2, 2)
self.assertEqual(PVector(1, 1, 1), vec.lerp(target, 0.5))
self.assertIs(vec, vec.lerp(target, 0))
self.assertIs(target, vec.lerp(target, 1))
def test_pvector_shorthands(self):
'''
Test that PVector shorthands for common vectors work properly
'''
self.assertEqual(PVector(0, 0, 0), PVector.zero)
self.assertEqual(PVector(1, 1, 1), PVector.one)
self.assertEqual(PVector(1, 0, 0), PVector.x_unit)
self.assertEqual(PVector(0, 1, 0), PVector.y_unit)
self.assertEqual(PVector(0, 0, 1), PVector.z_unit)
def test_pvector_swizzle(self):
'''
Test that swizzle operations to scramble vector elements behave as expected
'''
vec = PVector(1, 2, 3, 4)
self.assertEqual(PVector(1, 1, 1), vec.xxx)
self.assertEqual(PVector(2, 2, 2), vec.yyy)
self.assertEqual(PVector(3, 2, 1), vec.zyx)
self.assertEqual(PVector(3, 2, 1, 4), vec.zyxw)
self.assertEqual(1, vec.x)
self.assertEqual(2, vec.y)
self.assertEqual(3, vec.z)
self.assertEqual(4, vec.w)
vec.xyz = 2, 4, 6
self.assertEqual(PVector(2, 4, 6, 4), vec)
vec.zxw = 1, 2, 4
self.assertEqual(PVector(2, 4, 1, 4), vec)
def test_pvector_normalization(self):
'''
Test that normalization operations work properly
'''
vec = PVector(1, 2, 3)
self.assertEqual(vec.dot(vec), vec.mag_sq())
self.assertAlmostEqual(vec.normalized().mag(), 1)
vec.normalize()
self.assertAlmostEqual(vec.mag_sq(), 1)
|
[
"pyprocessing.math.PVector.add",
"pyprocessing.math.PVector",
"pyprocessing.math.PVector.sub",
"copy.copy"
] |
[((285, 301), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (292, 301), False, 'from pyprocessing.math import PVector\n'), ((475, 491), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (482, 491), False, 'from pyprocessing.math import PVector\n'), ((508, 524), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (515, 524), False, 'from pyprocessing.math import PVector\n'), ((976, 992), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (983, 992), False, 'from pyprocessing.math import PVector\n'), ((1008, 1024), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (1015, 1024), False, 'from pyprocessing.math import PVector\n'), ((2509, 2525), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (2516, 2525), False, 'from pyprocessing.math import PVector\n'), ((2539, 2548), 'copy.copy', 'copy', (['vec'], {}), '(vec)\n', (2543, 2548), False, 'from copy import copy\n'), ((2880, 2896), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (2887, 2896), False, 'from pyprocessing.math import PVector\n'), ((2914, 2930), 'pyprocessing.math.PVector', 'PVector', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2921, 2930), False, 'from pyprocessing.math import PVector\n'), ((3679, 3698), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(2)', '(3)', '(4)'], {}), '(1, 2, 3, 4)\n', (3686, 3698), False, 'from pyprocessing.math import PVector\n'), ((4344, 4360), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (4351, 4360), False, 'from pyprocessing.math import PVector\n'), ((550, 566), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (557, 566), False, 'from pyprocessing.math import PVector\n'), ((609, 625), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (616, 625), False, 'from pyprocessing.math import PVector\n'), ((671, 687), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (678, 687), False, 'from pyprocessing.math import PVector\n'), ((689, 715), 'pyprocessing.math.PVector.add', 'PVector.add', (['vector', 'adder'], {}), '(vector, adder)\n', (700, 715), False, 'from pyprocessing.math import PVector\n'), ((742, 758), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (749, 758), False, 'from pyprocessing.math import PVector\n'), ((808, 824), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(0)'], {}), '(1, 1, 0)\n', (815, 824), False, 'from pyprocessing.math import PVector\n'), ((1050, 1066), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1057, 1066), False, 'from pyprocessing.math import PVector\n'), ((1108, 1124), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1115, 1124), False, 'from pyprocessing.math import PVector\n'), ((1169, 1185), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1176, 1185), False, 'from pyprocessing.math import PVector\n'), ((1187, 1212), 'pyprocessing.math.PVector.sub', 'PVector.sub', (['vector', 'diff'], {}), '(vector, diff)\n', (1198, 1212), False, 'from pyprocessing.math import PVector\n'), ((1239, 1255), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1246, 1255), False, 'from pyprocessing.math import PVector\n'), ((1305, 1321), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1312, 1321), False, 'from pyprocessing.math import PVector\n'), ((1523, 1539), 'pyprocessing.math.PVector', 'PVector', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (1530, 1539), False, 'from pyprocessing.math import PVector\n'), ((1588, 1604), 'pyprocessing.math.PVector', 'PVector', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (1595, 1604), False, 'from pyprocessing.math import PVector\n'), ((1838, 1854), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1845, 1854), False, 'from pyprocessing.math import PVector\n'), ((1918, 1934), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (1925, 1934), False, 'from pyprocessing.math import PVector\n'), ((1999, 2015), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (2006, 2015), False, 'from pyprocessing.math import PVector\n'), ((2084, 2100), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (2091, 2100), False, 'from pyprocessing.math import PVector\n'), ((2270, 2286), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (2277, 2286), False, 'from pyprocessing.math import PVector\n'), ((2336, 2352), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (2343, 2352), False, 'from pyprocessing.math import PVector\n'), ((2956, 2972), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (2963, 2972), False, 'from pyprocessing.math import PVector\n'), ((3255, 3271), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (3262, 3271), False, 'from pyprocessing.math import PVector\n'), ((3312, 3328), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (3319, 3328), False, 'from pyprocessing.math import PVector\n'), ((3368, 3384), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(0)', '(0)'], {}), '(1, 0, 0)\n', (3375, 3384), False, 'from pyprocessing.math import PVector\n'), ((3427, 3443), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (3434, 3443), False, 'from pyprocessing.math import PVector\n'), ((3486, 3502), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (3493, 3502), False, 'from pyprocessing.math import PVector\n'), ((3725, 3741), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (3732, 3741), False, 'from pyprocessing.math import PVector\n'), ((3777, 3793), 'pyprocessing.math.PVector', 'PVector', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (3784, 3793), False, 'from pyprocessing.math import PVector\n'), ((3829, 3845), 'pyprocessing.math.PVector', 'PVector', (['(3)', '(2)', '(1)'], {}), '(3, 2, 1)\n', (3836, 3845), False, 'from pyprocessing.math import PVector\n'), ((3881, 3900), 'pyprocessing.math.PVector', 'PVector', (['(3)', '(2)', '(1)', '(4)'], {}), '(3, 2, 1, 4)\n', (3888, 3900), False, 'from pyprocessing.math import PVector\n'), ((4103, 4122), 'pyprocessing.math.PVector', 'PVector', (['(2)', '(4)', '(6)', '(4)'], {}), '(2, 4, 6, 4)\n', (4110, 4122), False, 'from pyprocessing.math import PVector\n'), ((4180, 4199), 'pyprocessing.math.PVector', 'PVector', (['(2)', '(4)', '(1)', '(4)'], {}), '(2, 4, 1, 4)\n', (4187, 4199), False, 'from pyprocessing.math import PVector\n'), ((1541, 1557), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1548, 1557), False, 'from pyprocessing.math import PVector\n'), ((1660, 1676), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1667, 1676), False, 'from pyprocessing.math import PVector\n'), ((1679, 1695), 'pyprocessing.math.PVector', 'PVector', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (1686, 1695), False, 'from pyprocessing.math import PVector\n'), ((1725, 1741), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1732, 1741), False, 'from pyprocessing.math import PVector\n'), ((1783, 1799), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1790, 1799), False, 'from pyprocessing.math import PVector\n'), ((1856, 1872), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1863, 1872), False, 'from pyprocessing.math import PVector\n'), ((1875, 1891), 'pyprocessing.math.PVector', 'PVector', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (1882, 1891), False, 'from pyprocessing.math import PVector\n'), ((1936, 1952), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (1943, 1952), False, 'from pyprocessing.math import PVector\n'), ((1955, 1972), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(-1)', '(0)'], {}), '(0, -1, 0)\n', (1962, 1972), False, 'from pyprocessing.math import PVector\n'), ((2040, 2056), 'pyprocessing.math.PVector', 'PVector', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2047, 2056), False, 'from pyprocessing.math import PVector\n'), ((2125, 2142), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(-1)', '(0)'], {}), '(0, -1, 0)\n', (2132, 2142), False, 'from pyprocessing.math import PVector\n'), ((2288, 2304), 'pyprocessing.math.PVector', 'PVector', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2295, 2304), False, 'from pyprocessing.math import PVector\n'), ((1606, 1622), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1613, 1622), False, 'from pyprocessing.math import PVector\n'), ((2017, 2033), 'pyprocessing.math.PVector', 'PVector', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (2024, 2033), False, 'from pyprocessing.math import PVector\n'), ((2102, 2118), 'pyprocessing.math.PVector', 'PVector', (['(0)', '(0)', '(1)'], {}), '(0, 0, 1)\n', (2109, 2118), False, 'from pyprocessing.math import PVector\n'), ((2354, 2370), 'pyprocessing.math.PVector', 'PVector', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (2361, 2370), False, 'from pyprocessing.math import PVector\n')]
|
import mysql.connector
import pandas as pd
from sqlalchemy import create_engine
hostname="localhost"
dbname="DWM"
uname="root"
pwd="<PASSWORD>"
engine = create_engine("mysql+pymysql://{user}:{pw}@{host}/{db}".format(host=hostname, db=dbname, user=uname, pw=pwd))
dataset = pd.read_csv('Student_details.csv')
dataset
|
[
"pandas.read_csv"
] |
[((273, 307), 'pandas.read_csv', 'pd.read_csv', (['"""Student_details.csv"""'], {}), "('Student_details.csv')\n", (284, 307), True, 'import pandas as pd\n')]
|
from pipeline_monitor import prometheus_monitor as monitor
_labels= {'a_label_key':'a_label_value'}
@monitor(labels=_labels, name="test_monitor")
def test_log_inputs_and_outputs(arg1: int, arg2: int):
return arg1 + arg2
test_log_inputs_and_outputs(4, 5)
|
[
"pipeline_monitor.prometheus_monitor"
] |
[((103, 147), 'pipeline_monitor.prometheus_monitor', 'monitor', ([], {'labels': '_labels', 'name': '"""test_monitor"""'}), "(labels=_labels, name='test_monitor')\n", (110, 147), True, 'from pipeline_monitor import prometheus_monitor as monitor\n')]
|
from django import forms
class AnswerQuestion(forms.Form):
answer = forms.IntegerField()
|
[
"django.forms.IntegerField"
] |
[((73, 93), 'django.forms.IntegerField', 'forms.IntegerField', ([], {}), '()\n', (91, 93), False, 'from django import forms\n')]
|
import networkx as nx
from scipy.io import loadmat
x = loadmat(dataset)
dataset='blogcatalog.mat'
x = loadmat(dataset)
x = x['network']
G = nx.from_scipy_sparse_matrix(x)
del x
f=open("BC_DW.edgelist",'wb')
nx.write_edgelist(G, f)
|
[
"networkx.from_scipy_sparse_matrix",
"networkx.write_edgelist",
"scipy.io.loadmat"
] |
[((55, 71), 'scipy.io.loadmat', 'loadmat', (['dataset'], {}), '(dataset)\n', (62, 71), False, 'from scipy.io import loadmat\n'), ((102, 118), 'scipy.io.loadmat', 'loadmat', (['dataset'], {}), '(dataset)\n', (109, 118), False, 'from scipy.io import loadmat\n'), ((140, 170), 'networkx.from_scipy_sparse_matrix', 'nx.from_scipy_sparse_matrix', (['x'], {}), '(x)\n', (167, 170), True, 'import networkx as nx\n'), ((207, 230), 'networkx.write_edgelist', 'nx.write_edgelist', (['G', 'f'], {}), '(G, f)\n', (224, 230), True, 'import networkx as nx\n')]
|
from pyinstrument import Profiler
from functools import wraps
def profile(func):
@wraps(func)
def wrapper(*args, **kwargs):
profiler = Profiler()
profiler.start()
results = func(*args, **kwargs)
profiler.stop()
profiler.output_text()
return results
return wrapper
|
[
"pyinstrument.Profiler",
"functools.wraps"
] |
[((86, 97), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (91, 97), False, 'from functools import wraps\n'), ((145, 155), 'pyinstrument.Profiler', 'Profiler', ([], {}), '()\n', (153, 155), False, 'from pyinstrument import Profiler\n')]
|
"""A lab app that runs a sub process for a demo or a test."""
import sys
from jupyter_server.extension.application import ExtensionApp, ExtensionAppJinjaMixin
from tornado.ioloop import IOLoop
from .handlers import LabConfig, add_handlers
from .process import Process
class ProcessApp(ExtensionAppJinjaMixin, LabConfig, ExtensionApp):
"""A jupyterlab app that runs a separate process and exits on completion."""
load_other_extensions = True
# Do not open a browser for process apps
open_browser = False
def get_command(self):
"""Get the command and kwargs to run with `Process`.
This is intended to be overridden.
"""
return ["python", "--version"], {}
def initialize_settings(self):
"""Start the application."""
IOLoop.current().add_callback(self._run_command)
def initialize_handlers(self):
add_handlers(self.handlers, self)
def _run_command(self):
command, kwargs = self.get_command()
kwargs.setdefault("logger", self.log)
future = Process(command, **kwargs).wait_async()
IOLoop.current().add_future(future, self._process_finished)
def _process_finished(self, future):
try:
IOLoop.current().stop()
sys.exit(future.result())
except Exception as e:
self.log.error(str(e))
sys.exit(1)
|
[
"tornado.ioloop.IOLoop.current",
"sys.exit"
] |
[((793, 809), 'tornado.ioloop.IOLoop.current', 'IOLoop.current', ([], {}), '()\n', (807, 809), False, 'from tornado.ioloop import IOLoop\n'), ((1105, 1121), 'tornado.ioloop.IOLoop.current', 'IOLoop.current', ([], {}), '()\n', (1119, 1121), False, 'from tornado.ioloop import IOLoop\n'), ((1372, 1383), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1380, 1383), False, 'import sys\n'), ((1232, 1248), 'tornado.ioloop.IOLoop.current', 'IOLoop.current', ([], {}), '()\n', (1246, 1248), False, 'from tornado.ioloop import IOLoop\n')]
|
from random import random
import sys
def pie(times=100):
incircle = 0
for _ in range(times):
x = random()
y = random()
if x * x + y * y < 1:
incircle += 1
return incircle / times * 4
if __name__ == "__main__":
with open(sys.argv[1], "r") as f:
times = f.readlines()[0]
p = pie(int(times))
with open(sys.argv[2], "w") as f:
f.writelines([str(p) + "\n" + times])
|
[
"random.random"
] |
[((115, 123), 'random.random', 'random', ([], {}), '()\n', (121, 123), False, 'from random import random\n'), ((136, 144), 'random.random', 'random', ([], {}), '()\n', (142, 144), False, 'from random import random\n')]
|
import numpy as np
from autotabular.pipeline.components.base import AutotabularClassificationAlgorithm
from autotabular.pipeline.constants import DENSE, PREDICTIONS, UNSIGNED_DATA
from ConfigSpace.configuration_space import ConfigurationSpace
class GaussianNB(AutotabularClassificationAlgorithm):
def __init__(self, random_state=None, verbose=0):
self.random_state = random_state
self.verbose = int(verbose)
self.estimator = None
def fit(self, X, y):
import sklearn.naive_bayes
self.estimator = sklearn.naive_bayes.GaussianNB()
self.classes_ = np.unique(y.astype(int))
# Fallback for multilabel classification
if len(y.shape) > 1 and y.shape[1] > 1:
import sklearn.multiclass
self.estimator = sklearn.multiclass.OneVsRestClassifier(
self.estimator, n_jobs=1)
self.estimator.fit(X, y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
'shortname': 'GaussianNB',
'name': 'Gaussian Naive Bayes classifier',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'handles_multioutput': False,
'is_deterministic': True,
'input': (DENSE, UNSIGNED_DATA),
'output': (PREDICTIONS, )
}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
return cs
|
[
"ConfigSpace.configuration_space.ConfigurationSpace"
] |
[((1843, 1863), 'ConfigSpace.configuration_space.ConfigurationSpace', 'ConfigurationSpace', ([], {}), '()\n', (1861, 1863), False, 'from ConfigSpace.configuration_space import ConfigurationSpace\n')]
|
# SPDX-License-Identifier: BSD-3-Clause
"""Test result storage and processing functionality."""
from pytest import fixture, raises
from softfab.resultlib import ResultStorage
@fixture
def resultStorage(tmp_path):
return ResultStorage(tmp_path)
# Test data that can be used by various test cases:
TASK_NAME = 'testtask'
RUN_ID = 'faster'
KEY = 'dawn'
NR_RUNS = 50
def testResultsPutGet(resultStorage):
"""Test whether data can be stored and retrieved."""
def valueFunc(index):
return f'value{index:02d}'
runIds = []
for index in range(NR_RUNS):
runId = f'run{index:02d}'
runIds.append(runId)
data = {KEY: valueFunc(index)}
resultStorage.putData(TASK_NAME, runId, data)
results = resultStorage.getCustomData(TASK_NAME, runIds, KEY)
foundIds = []
for runId, value in results:
assert runId.startswith('run')
index = int(runId[3:])
assert 0 <= index < NR_RUNS
assert value == valueFunc(index)
foundIds.append(runId)
assert sorted(foundIds) == sorted(runIds)
def testResultsInvalidKey(resultStorage):
"""Test treatment of invalid keys."""
# TODO: Maybe we need more thought about what should be valid keys.
for key in ('../abc', ''):
data = {key: 'dummy'}
with raises(KeyError):
resultStorage.putData(TASK_NAME, RUN_ID, data)
results = resultStorage.getCustomData(TASK_NAME, [RUN_ID], key)
assert list(results) == []
def testResultsReplace(resultStorage):
"""Check that new data replaces old data."""
oldData = {KEY: 'old'}
newData = {KEY: 'new'}
resultStorage.putData(TASK_NAME, RUN_ID, oldData)
resultStorage.putData(TASK_NAME, RUN_ID, newData)
results = resultStorage.getCustomData(TASK_NAME, [RUN_ID], KEY)
assert list(results) == [(RUN_ID, 'new')]
def testResultsAdd(resultStorage):
"""Check that new data with different keys is added to old data."""
oldData = {'oldkey': 'old'}
newData = {'newkey': 'new'}
resultStorage.putData(TASK_NAME, RUN_ID, oldData)
resultStorage.putData(TASK_NAME, RUN_ID, newData)
results1 = resultStorage.getCustomData(TASK_NAME, [RUN_ID], 'oldkey')
assert list(results1) == [(RUN_ID, 'old')]
results2 = resultStorage.getCustomData(TASK_NAME, [RUN_ID], 'newkey')
assert list(results2) == [(RUN_ID, 'new')]
def testResultsListKeys(resultStorage):
"""Tests listing the keys that exist for a task name."""
for index in range(2, NR_RUNS):
runId = f'run{index:02d}'
keys = [
f'key{key:02d}'
for key in range(2, NR_RUNS)
if key % index == 0
]
data = dict.fromkeys(keys, 'dummy')
resultStorage.putData(TASK_NAME, runId, data)
assert resultStorage.getCustomKeys(TASK_NAME) == {
# for every N, N % N == 0 is true
# so every key for 2 <= key < nrRuns should be present
f'key{key:02d}' for key in range(2, NR_RUNS)
}
def testResultsListKeysNone(resultStorage):
"""Tests listing the keys if no data is stored for a task name."""
assert resultStorage.getCustomKeys(TASK_NAME) == set()
|
[
"softfab.resultlib.ResultStorage",
"pytest.raises"
] |
[((229, 252), 'softfab.resultlib.ResultStorage', 'ResultStorage', (['tmp_path'], {}), '(tmp_path)\n', (242, 252), False, 'from softfab.resultlib import ResultStorage\n'), ((1311, 1327), 'pytest.raises', 'raises', (['KeyError'], {}), '(KeyError)\n', (1317, 1327), False, 'from pytest import fixture, raises\n')]
|
# coding: utf-8
import os
def fileinode(filename):
return os.stat(filename).st_ino
if __name__ == '__main__':
print(fileinode("test.txt"))
|
[
"os.stat"
] |
[((64, 81), 'os.stat', 'os.stat', (['filename'], {}), '(filename)\n', (71, 81), False, 'import os\n')]
|
from django.test import TestCase
from channels.models import Channel
from talks.models import Talk
# Create your tests here.
class TalkModelTests(TestCase):
def setUp(self):
chanel_1 = Channel.objects.create(code='1', title='channel title 1')
Talk.objects.create(code='1', title='talk title 1', channel=chanel_1)
Talk.objects.create(code='11', title='talk title same title', channel=chanel_1)
Talk.objects.create(code='12', title='talk title same title', channel=chanel_1)
def test_instance_get_string_repr(self):
talk_1 = Talk.objects.get(code='1')
self.assertEquals(str(talk_1), talk_1.title)
def test_instance_get_youtube_valid_url(self):
talk_1 = Talk.objects.get(code='1')
self.assertEquals(talk_1.youtube_url,
'https://www.youtube.com/watch?v=1')
def test_instance_thumbnails(self):
talk_1 = Talk.objects.get(code='1')
self.assertEquals(talk_1.default_thumb,
'https://i.ytimg.com/vi/1/default.jpg')
self.assertEquals(talk_1.medium_thumb,
'https://i.ytimg.com/vi/1/mqdefault.jpg')
self.assertEquals(talk_1.high_thumb,
'https://i.ytimg.com/vi/1/hqdefault.jpg')
self.assertEquals(talk_1.standard_thumb,
'https://i.ytimg.com/vi/1/sddefault.jpg')
self.assertEquals(talk_1.maxres_thumb,
'https://i.ytimg.com/vi/1/maxresdefault.jpg')
def test_create_talk_slug(self):
talk_1 = Talk.objects.get(code='1')
self.assertEquals(talk_1.slug, 'talk-title-1')
def test_create_duplicate_title_slug(self):
talk_12 = Talk.objects.get(code='12')
self.assertEquals(talk_12.slug, 'talk-title-same-title-12')
def test_save_talk_slug(self):
talk_1 = Talk.objects.get(code=1)
talk_1.title = "another title"
talk_1.save()
self.assertEquals(talk_1.slug, 'talk-title-1')
|
[
"talks.models.Talk.objects.create",
"talks.models.Talk.objects.get",
"channels.models.Channel.objects.create"
] |
[((202, 259), 'channels.models.Channel.objects.create', 'Channel.objects.create', ([], {'code': '"""1"""', 'title': '"""channel title 1"""'}), "(code='1', title='channel title 1')\n", (224, 259), False, 'from channels.models import Channel\n'), ((268, 337), 'talks.models.Talk.objects.create', 'Talk.objects.create', ([], {'code': '"""1"""', 'title': '"""talk title 1"""', 'channel': 'chanel_1'}), "(code='1', title='talk title 1', channel=chanel_1)\n", (287, 337), False, 'from talks.models import Talk\n'), ((346, 425), 'talks.models.Talk.objects.create', 'Talk.objects.create', ([], {'code': '"""11"""', 'title': '"""talk title same title"""', 'channel': 'chanel_1'}), "(code='11', title='talk title same title', channel=chanel_1)\n", (365, 425), False, 'from talks.models import Talk\n'), ((434, 513), 'talks.models.Talk.objects.create', 'Talk.objects.create', ([], {'code': '"""12"""', 'title': '"""talk title same title"""', 'channel': 'chanel_1'}), "(code='12', title='talk title same title', channel=chanel_1)\n", (453, 513), False, 'from talks.models import Talk\n'), ((577, 603), 'talks.models.Talk.objects.get', 'Talk.objects.get', ([], {'code': '"""1"""'}), "(code='1')\n", (593, 603), False, 'from talks.models import Talk\n'), ((726, 752), 'talks.models.Talk.objects.get', 'Talk.objects.get', ([], {'code': '"""1"""'}), "(code='1')\n", (742, 752), False, 'from talks.models import Talk\n'), ((920, 946), 'talks.models.Talk.objects.get', 'Talk.objects.get', ([], {'code': '"""1"""'}), "(code='1')\n", (936, 946), False, 'from talks.models import Talk\n'), ((1580, 1606), 'talks.models.Talk.objects.get', 'Talk.objects.get', ([], {'code': '"""1"""'}), "(code='1')\n", (1596, 1606), False, 'from talks.models import Talk\n'), ((1729, 1756), 'talks.models.Talk.objects.get', 'Talk.objects.get', ([], {'code': '"""12"""'}), "(code='12')\n", (1745, 1756), False, 'from talks.models import Talk\n'), ((1878, 1902), 'talks.models.Talk.objects.get', 'Talk.objects.get', ([], {'code': '(1)'}), '(code=1)\n', (1894, 1902), False, 'from talks.models import Talk\n')]
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
import logging
import os.path
from pathlib import Path
import sys
import requests
from cookiecutter.main import cookiecutter
from swaggertosdk.SwaggerToSdkNewCLI import generate_code
_LOGGER = logging.getLogger(__name__)
def create_package_service_mapping(service_info, autorest_options):
type_str = "Management" if service_info["is_arm"] else "Client"
return {
autorest_options["package-name"]: {
"service_name": service_info["pretty_name"],
"category": type_str,
"namespaces": [
autorest_options["namespace"]
]
}
}
def main(package_name):
service_info = {
"is_arm": package_name.startswith("azure-mgmt"),
"pretty_name": package_name # FIXME
}
autorest_options = {
"package-name": package_name,
"namespace": package_name.replace("-", ".")
}
package_service_mapping = Path("package_service_mapping.json")
if package_service_mapping:
_LOGGER.info("Updating package_service_mapping.json")
entry = create_package_service_mapping(service_info, autorest_options)
with package_service_mapping.open() as fd:
data_conf = json.load(fd)
data_conf.update(entry)
with package_service_mapping.open("w") as fd:
json.dump(data_conf, fd, indent=2, sort_keys=True)
_LOGGER.info("Done! Enjoy your Python SDK!!")
if __name__ == "__main__":
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
main(sys.argv[1])
|
[
"json.dump",
"json.load",
"logging.basicConfig",
"pathlib.Path",
"logging.getLogger"
] |
[((575, 602), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (592, 602), False, 'import logging\n'), ((1300, 1336), 'pathlib.Path', 'Path', (['"""package_service_mapping.json"""'], {}), "('package_service_mapping.json')\n", (1304, 1336), False, 'from pathlib import Path\n'), ((1835, 1856), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (1854, 1856), False, 'import logging\n'), ((1585, 1598), 'json.load', 'json.load', (['fd'], {}), '(fd)\n', (1594, 1598), False, 'import json\n'), ((1701, 1751), 'json.dump', 'json.dump', (['data_conf', 'fd'], {'indent': '(2)', 'sort_keys': '(True)'}), '(data_conf, fd, indent=2, sort_keys=True)\n', (1710, 1751), False, 'import json\n'), ((1861, 1880), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1878, 1880), False, 'import logging\n')]
|
import types
from collections import OrderedDict
import apiclient
import pandas as pd
from datasheets import exceptions, helpers
class Tab(object):
def __init__(self, tabname, workbook, drive_svc, sheets_svc):
"""Create a datasheets.Tab instance of an existing Google Sheets tab.
This class in not intended to be directly instantiated; it is created by
datasheets.Workbook.fetch_tab().
Args:
tabname (str): The name of the tab
workbook (datasheets.Workbook): The workbook instance that instantiated this tab
drive_svc (googleapiclient.discovery.Resource): An instance of Google Drive
sheets_svc (googleapiclient.discovery.Resource): An instance of Google Sheets
"""
self.tabname = tabname
self._workbook = workbook
self.drive_svc = drive_svc
self.sheets_svc = sheets_svc
# Get basic properties of the tab. We do this here partly
# to force failures early if tab can't be found
try:
self._update_tab_properties()
except apiclient.errors.HttpError as e:
if 'Unable to parse range'.encode() in e.content:
raise exceptions.TabNotFound('The given tab could not be found. Error generated: {}'.format(e))
else:
raise
self.url = 'https://docs.google.com/spreadsheets/d/{}#gid={}'.format(self.workbook.file_id, self.tab_id)
def __getattribute__(self, attr):
"""Get an attribute (variable or method) of this instance of this class
For client OAuth, before each user-facing method call this method will verify that the
access token is not expired and refresh it if it is.
We only refresh on user-facing method calls since otherwise we'd be refreshing multiple
times per user action (once for the user call, possibly multiple times for the private
method calls invoked by it).
"""
requested_attr = super(Tab, self).__getattribute__(attr)
if isinstance(requested_attr, types.MethodType) \
and not attr.startswith('_'):
self.workbook.client._refresh_token_if_needed()
return requested_attr
def __repr__(self):
msg = "<{module}.{name}(filename='{filename}', tabname='{tabname}')>"
return msg.format(module=self.__class__.__module__,
name=self.__class__.__name__,
filename=self.workbook.filename,
tabname=self.tabname)
@staticmethod
def _process_rows(raw_data):
"""Prepare a tab's raw data so that a pandas.DataFrame can be produced from it
Args:
raw_data (dict): The raw data from a tab
Returns:
list: A list of lists representing the raw_data, with one list per row in the tab
"""
raw_rows = raw_data['sheets'][0]['data'][0].get('rowData', {})
rows = []
for row_num, row in enumerate(raw_rows):
row_values = []
for col_num, cell in enumerate(row.get('values', {})):
# If the cell is empty, use None
value = cell.get('effectiveValue', {None: None})
# If a cell has an error in it (e.g. someone divides by zero, adds a number to
# text, etc.), then we raise an exception.
if 'errorValue' in value.keys():
cell_label = helpers.convert_cell_index_to_label(row_num+1, col_num+1)
error_type = value['errorValue'].get('type', 'unknown type')
error_message = value['errorValue'].get('message', 'unknown error message')
msg = 'Error of type "{}" within cell {} prevents fetching data. Message: "{}"'
raise exceptions.FetchDataError(msg.format(error_type, cell_label, error_message))
# value is a dict with only 1 key so this next(iter()) is safe
base_fmt, cell_value = next(iter(value.items()))
num_fmt = cell.get('effectiveFormat', {}).get('numberFormat')
if num_fmt:
cell_format = num_fmt['type']
else:
cell_format = base_fmt
formatting_fn = helpers._TYPE_CONVERSIONS[cell_format]
if cell_value:
try:
cell_value = formatting_fn(cell_value)
except ValueError:
pass
except TypeError:
raise TypeError(
"Mismatch exists in expected and actual data types for cell with "
"value '{value}'. Cell format is '{cell_format}' but cell value type "
"is '{value_type}'. To correct this, in Google Sheets set the "
"appropriate cell format or set it to Automatic".format(
value=cell_value,
cell_format=cell_format,
value_type=type(cell_value))
)
row_values.append(cell_value)
rows.append(row_values)
return rows
@property
def ncols(self):
""" Property for the number (int) of columns in the tab """
return self.properties['gridProperties']['columnCount']
@property
def nrows(self):
""" Property for the number (int) of rows in the tab """
return self.properties['gridProperties']['rowCount']
@property
def tab_id(self):
""" Property that gives the ID for the tab """
return self.properties['sheetId']
@property
def workbook(self):
""" Property for the workbook instance that this tab belongs to """
return self._workbook
def _add_rows_or_columns(self, kind, n):
request_body = {'appendDimension': {
'sheetId': self.tab_id,
'dimension': kind,
'length': n
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
self._update_tab_properties()
def _update_tab_properties(self):
raw_properties = self.sheets_svc.get(spreadsheetId=self.workbook.file_id,
ranges=self.tabname + '!A1',
fields='sheets/properties').execute()
self.properties = raw_properties['sheets'][0]['properties']
def add_rows(self, n):
"""Add n rows to the given tab
Args:
n (int): The number of rows to add
Returns:
None
"""
self._add_rows_or_columns(kind='ROWS', n=n)
def add_columns(self, n):
"""Add n columns to the given tab
Args:
n (int): The number of columns to add
Returns:
None
"""
self._add_rows_or_columns(kind='COLUMNS', n=n)
def align_cells(self, horizontal='LEFT', vertical='MIDDLE'):
"""Align all cells in the tab
Args:
horizontal (str): The horizontal alignment for cells. May be one of 'LEFT',
'CENTER', or 'RIGHT'
vertical (str): The vertical alignment for cells. May be one of 'TOP',
'MIDDLE', 'BOTTOM'
Returns:
None
"""
request_body = {'repeatCell': {
'range': {
'sheetId': self.tab_id,
'startRowIndex': 0,
'endRowIndex': self.nrows
},
'cell': {
'userEnteredFormat': {
'horizontalAlignment': horizontal,
'verticalAlignment': vertical,
}
},
'fields': 'userEnteredFormat(horizontalAlignment,verticalAlignment)'
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
def alter_dimensions(self, nrows=None, ncols=None):
"""Alter the dimensions of the current tab.
If either dimension is left to None, that dimension will not be altered. Note that it is
possible to set nrows or ncols to smaller than the current tab dimensions, in which case
that data will be eliminated.
Args:
nrows (int): The number of rows for the tab to have
ncols (int): The number of columns for the tab to have
Returns:
None
"""
request_body = {'updateSheetProperties': {
'properties': {
'sheetId': self.tab_id,
'gridProperties': {
'columnCount': ncols or self.ncols,
'rowCount': nrows or self.nrows
}
},
'fields': 'gridProperties(columnCount, rowCount)'
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
self._update_tab_properties()
def append_data(self, data, index=True, autoformat=True):
"""Append data to the existing data in this tab.
If the new data exceeds the tab's current dimensions the tab will be resized to
accommodate it. Data headers will not be included among the appended data as they are
assumed to already be among the existing tab data.
If the dimensions of `data` are larger than the tab's current dimensions,
the tab will automatically be resized to fit it.
Args:
data (pandas.DataFrame or dict or list): The data to be uploaded, formatted as a
pandas.DataFrame, a dict of lists, or a list of lists
index (bool): If `data` is a pandas.DataFrame, whether to upload the index as well
Returns:
None
"""
# Convert everything to lists of lists, which Google Sheets requires
headers, values = helpers._make_list_of_lists(data, index)
values = helpers._convert_nan_and_datelike_values(values)
body = {'values': values}
self.sheets_svc.values().append(spreadsheetId=self.workbook.file_id, range=self.tabname,
valueInputOption='USER_ENTERED', body=body).execute()
if autoformat:
self.autoformat(len(headers))
self._update_tab_properties()
def autoformat(self, n_header_rows):
"""Apply default stylings to the tab
This will apply the following stylings to the tab:
- Header rows will be formatted to a dark gray background and off-white text
- Font for all cells will be set to size 10 Proxima Nova
- Cells will be horizontally left-aligned and vertically middle-aligned
- Columns will be resized to display their largest entry
- Empty columns and rows will be trimmed from the tab
Args:
n_header_rows (int): The number of header rows (i.e. row of labels / metadata)
Returns:
None
"""
self.format_headers(nrows=n_header_rows)
self.format_font()
self.align_cells()
self.autosize_columns()
populated_cells = self.sheets_svc.values().get(spreadsheetId=self.workbook.file_id,
range=self.tabname).execute()
nrows = len(populated_cells['values'])
ncols = max(map(len, populated_cells['values']))
self.alter_dimensions(nrows=nrows, ncols=ncols)
self._update_tab_properties()
def autosize_columns(self):
"""Resize the widths of all columns in the tab to fit their data
Returns:
None
"""
request_body = {'autoResizeDimensions': {
'dimensions': {
'sheetId': self.tab_id,
'dimension': 'COLUMNS',
'startIndex': 0,
'endIndex': self.ncols
}
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
def clear_data(self):
"""Clear all data from the tab while leaving formatting intact
Returns:
None
"""
self.sheets_svc.values().clear(spreadsheetId=self.workbook.file_id,
range=self.tabname,
body={}).execute()
def format_font(self, font='Proxima Nova', size=10):
"""Set the font and size for all cells in the tab
Args:
font (str): The name of the font to use
size (int): The size to set the font to
Returns:
None
"""
request_body = {'repeatCell': {
'range': {'sheetId': self.tab_id},
'cell': {
'userEnteredFormat': {
'textFormat': {
'fontSize': size,
'fontFamily': font
}
}
},
'fields': 'userEnteredFormat(textFormat(fontSize,fontFamily))'
}
}
body = {'requests': [request_body]}
self.workbook.batch_update(body)
def format_headers(self, nrows):
"""Format the first n rows of a tab.
The following stylings will be applied to these rows:
- Background will be set to dark gray with off-white text
- Font will be set to size 10 Proxima Nova
- Text will be horizontally left-aligned and vertically middle-aligned
- Rows will be made "frozen" so that when the user scrolls these rows stay visible
Args:
nrows (int): The number of rows of headers in the tab
Returns:
None
"""
body = {
'requests': [
{
'repeatCell': {
'range': {
'sheetId': self.tab_id,
'startRowIndex': 0,
'endRowIndex': nrows
},
'cell': {
'userEnteredFormat': {
'backgroundColor': {
'red': 0.26274511,
'green': 0.26274511,
'blue': 0.26274511
},
'horizontalAlignment': 'LEFT',
'textFormat': {
'foregroundColor': {
'red': 0.95294118,
'green': 0.95294118,
'blue': 0.95294118
},
'fontSize': 10,
'fontFamily': 'Proxima Nova',
'bold': False
}
}
},
'fields': 'userEnteredFormat(backgroundColor,textFormat,horizontalAlignment)'
}
},
{
'updateSheetProperties': {
'properties': {
'sheetId': self.tab_id,
'gridProperties': {
'frozenRowCount': nrows
}
},
'fields': 'gridProperties(frozenRowCount)'
}
}
]
}
self.workbook.batch_update(body)
def fetch_data(self, headers=True, fmt='df'):
"""Retrieve the data within this tab.
Efforts are taken to ensure that returned rows are always the same length. If
headers=True, this length will be equal to the length of the headers. If headers=False,
this length will be equal to the longest row.
In either case, shorter rows will be padded with Nones and longer rows will be
truncated (i.e. if there are 3 headers then all rows will have 3 entries regardless
of the amount of populated cells they have).
Args:
headers (bool): If True, the first row will be used as the column names for the
pandas.DataFrame. Otherwise, a 0-indexed range will be used instead
fmt (str): The format in which to return the data. Accepted values: 'df', 'dict', 'list'
Returns:
When fmt='df' --> pandas.DataFrame
When fmt='dict' --> list of dicts, e.g.::
[{header1: row1cell1, header2: row1cell2},
{header1: row2cell1, header2: row2cell2},
...]
When fmt='list' --> tuple of header names, list of lists with row data, e.g.::
([header1, header2, ...],
[[row1cell1, row1cell2, ...], [row2cell1, row2cell2, ...], ...])
"""
if fmt not in ('df', 'dict', 'list'):
raise ValueError("Unexpected value '{}' for parameter `fmt`. "
"Accepted values are 'df', 'dict', and 'list'".format(fmt))
fields = 'sheets/data/rowData/values(effectiveValue,effectiveFormat/numberFormat/type)'
raw_data = self.sheets_svc.get(spreadsheetId=self.workbook.file_id, ranges=self.tabname,
includeGridData=True, fields=fields).execute()
processed_rows = self._process_rows(raw_data)
# filter out empty rows
max_idx = helpers._find_max_nonempty_row(processed_rows)
if max_idx is None:
if fmt == 'df':
return pd.DataFrame([])
elif fmt == 'dict':
return []
else:
return ([], [])
processed_rows = processed_rows[:max_idx+1]
# remove trailing Nones on rows
processed_rows = list(map(helpers._remove_trailing_nones, processed_rows))
if headers:
header_names = processed_rows.pop(0)
max_width = len(header_names)
else:
# Iterate through rows to find widest one
max_width = max(map(len, processed_rows))
header_names = list(range(max_width))
# resize the rows to match the number of column headers
processed_rows = [helpers._resize_row(row, max_width) for row in processed_rows]
if fmt == 'df':
df = pd.DataFrame(data=processed_rows, columns=header_names)
return df
elif fmt == 'dict':
make_row_dict = lambda row: OrderedDict(zip(header_names, row))
return list(map(make_row_dict, processed_rows))
else:
return header_names, processed_rows
def insert_data(self, data, index=True, autoformat=True):
"""Overwrite all data in this tab with the provided data.
All existing data in the tab will be removed, even if it might not have been overwritten
(for example, if there is 4x2 data already in the tab and only 2x2 data is being inserted).
If the dimensions of `data` are larger than the tab's current dimensions,
the tab will automatically be resized to fit it.
Args:
data (pandas.DataFrame or dict or list): The data to be uploaded, formatted as a
pandas.DataFrame, a dict of lists, or a list of lists
index (bool): If `data` is a pandas.DataFrame, whether to upload the index as well
Returns:
None
"""
# Convert everything to lists of lists, which Google Sheets requires
headers, values = helpers._make_list_of_lists(data, index)
values = headers + values # Include headers for inserts but not for appends
self.clear_data()
values = helpers._convert_nan_and_datelike_values(values)
body = {'values': values}
self.sheets_svc.values().update(spreadsheetId=self.workbook.file_id, range=self.tabname,
valueInputOption='USER_ENTERED', body=body).execute()
if autoformat:
self.autoformat(len(headers))
self._update_tab_properties()
|
[
"pandas.DataFrame",
"datasheets.helpers._convert_nan_and_datelike_values",
"datasheets.helpers._make_list_of_lists",
"datasheets.helpers._find_max_nonempty_row",
"datasheets.helpers.convert_cell_index_to_label",
"datasheets.helpers._resize_row"
] |
[((10510, 10550), 'datasheets.helpers._make_list_of_lists', 'helpers._make_list_of_lists', (['data', 'index'], {}), '(data, index)\n', (10537, 10550), False, 'from datasheets import exceptions, helpers\n'), ((10568, 10616), 'datasheets.helpers._convert_nan_and_datelike_values', 'helpers._convert_nan_and_datelike_values', (['values'], {}), '(values)\n', (10608, 10616), False, 'from datasheets import exceptions, helpers\n'), ((18146, 18192), 'datasheets.helpers._find_max_nonempty_row', 'helpers._find_max_nonempty_row', (['processed_rows'], {}), '(processed_rows)\n', (18176, 18192), False, 'from datasheets import exceptions, helpers\n'), ((20249, 20289), 'datasheets.helpers._make_list_of_lists', 'helpers._make_list_of_lists', (['data', 'index'], {}), '(data, index)\n', (20276, 20289), False, 'from datasheets import exceptions, helpers\n'), ((20419, 20467), 'datasheets.helpers._convert_nan_and_datelike_values', 'helpers._convert_nan_and_datelike_values', (['values'], {}), '(values)\n', (20459, 20467), False, 'from datasheets import exceptions, helpers\n'), ((18950, 18985), 'datasheets.helpers._resize_row', 'helpers._resize_row', (['row', 'max_width'], {}), '(row, max_width)\n', (18969, 18985), False, 'from datasheets import exceptions, helpers\n'), ((19055, 19110), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'processed_rows', 'columns': 'header_names'}), '(data=processed_rows, columns=header_names)\n', (19067, 19110), True, 'import pandas as pd\n'), ((18273, 18289), 'pandas.DataFrame', 'pd.DataFrame', (['[]'], {}), '([])\n', (18285, 18289), True, 'import pandas as pd\n'), ((3469, 3530), 'datasheets.helpers.convert_cell_index_to_label', 'helpers.convert_cell_index_to_label', (['(row_num + 1)', '(col_num + 1)'], {}), '(row_num + 1, col_num + 1)\n', (3504, 3530), False, 'from datasheets import exceptions, helpers\n')]
|
#pylint: disable=logging-fstring-interpolation
#Standart library imports
import subprocess
import os
import re
import platform
from typing import Tuple,Any
from pathlib import Path
import shutil
# Third party imports
from bs4 import BeautifulSoup
import wget
# Selenium imports
from selenium import webdriver
from selenium.common.exceptions import SessionNotCreatedException
from selenium.common.exceptions import WebDriverException
# Local imports
from selenium_driver_updater._setting import setting
from selenium_driver_updater.util.requests_getter import RequestsGetter
from selenium_driver_updater.util.extractor import Extractor
from selenium_driver_updater.util.logger import logger
class OperaBrowser():
"""Class for working with Opera browser"""
def __init__(self, **kwargs):
self.setting : Any = setting
self.check_browser_is_up_to_date = bool(kwargs.get('check_browser_is_up_to_date'))
self.operadriver_path = str(kwargs.get('path'))
self.requests_getter = RequestsGetter
self.extractor = Extractor
self.system_name = ''
self.url_release = ''
def main(self) -> None:
"""Main function, checks for the latest version, downloads or updates opera browser"""
if self.check_browser_is_up_to_date:
self._check_if_opera_browser_is_up_to_date()
def _check_if_opera_browser_is_up_to_date(self) -> None:
"""Сhecks for the latest version of opera browser
Raises:
Except: If unexpected error raised.
"""
try:
if platform.system() not in ['Darwin', 'Windows']:
message = 'Opera browser checking/updating is currently disabled for your OS. Please wait for the new releases.'
logger.error(message)
return
is_browser_up_to_date, current_version, latest_version = self._compare_current_version_and_latest_version_opera_browser()
if not is_browser_up_to_date:
self._get_latest_opera_browser_for_current_os()
is_browser_up_to_date, current_version, latest_version = self._compare_current_version_and_latest_version_opera_browser()
if not is_browser_up_to_date:
message = f'Problem with updating opera browser current_version: {current_version} latest_version: {latest_version}'
logger.info(message)
except (ValueError, FileNotFoundError):
pass
def _get_current_version_opera_browser_selenium(self) -> str:
"""Gets current opera browser version
Returns:
str
browser_version (str) : Current opera browser version.
Raises:
SessionNotCreatedException: Occurs when current operadriver could not start.
WebDriverException: Occurs when current operadriver could not start or critical error occured.
"""
browser_version : str = ''
try:
browser_version = self._get_current_version_opera_browser_selenium_via_terminal()
if not browser_version:
message = 'Trying to get current version of opera browser via operadriver'
logger.info(message)
if Path(self.operadriver_path).exists() and not browser_version:
with webdriver.Opera(executable_path = self.operadriver_path) as driver:
browser_version = driver.execute_script("return navigator.userAgent")
find_string = re.findall('OPR/' + self.setting["Program"]["wedriverVersionPattern"], browser_version)
browser_version = find_string[0] if len(find_string) > 0 else ''
logger.info(f'Current version of opera browser: {browser_version}')
except (WebDriverException, SessionNotCreatedException, OSError):
pass #[Errno 86] Bad CPU type in executable:
return browser_version
def _get_latest_version_opera_browser(self) -> str:
"""Gets latest opera browser version
Returns:
str
latest_version (str) : Latest version of opera browser.
Raises:
Except: If unexpected error raised.
"""
latest_version : str = ''
version : str = ''
url = self.setting["OperaBrowser"]["LinkAllLatestRelease"]
json_data = self.requests_getter.get_result_by_request(url=url)
soup = BeautifulSoup(json_data, 'html.parser')
system_name = platform.system()
system_name = system_name.replace('Darwin', 'mac')
system_name = system_name.replace('Windows', 'win')
self.system_name = system_name.lower() + '/' #mac -> mac/ or Linux -> linux/
elements = soup.findAll('a')
for i,_ in enumerate(elements, 1):
version = elements[-i].attrs.get('href')
self.url_release = url + version
json_data = self.requests_getter.get_result_by_request(url=self.url_release)
if not self.system_name in json_data:
continue
else:
break
latest_version = version.replace('/', '')
logger.info(f'Latest version of opera browser: {latest_version}')
return latest_version
def _get_latest_opera_browser_for_current_os(self) -> None:
"""Trying to update opera browser to its latest version"""
if platform.system() not in ['Darwin', 'Windows']:
message = 'Opera browser checking/updating is currently disabled for your OS. Please wait for the new releases.'
logger.error(message)
return
latest_version = self._get_latest_version_opera_browser()
url_full_release = self.url_release + self.system_name
if platform.system() == 'Darwin':
if 'arm' in str(os.uname().machine) and platform.system() == 'Darwin':
url_full_release = url_full_release + f'Opera_{latest_version}_Autoupdate_arm64.tar.xz'
else:
url_full_release = url_full_release + f'Opera_{latest_version}_Autoupdate.tar.xz'
elif platform.system() == 'Windows':
if self.setting['Program']['OSBitness'] == '64':
url_full_release = url_full_release + f'Opera_{latest_version}_Setup_x64.exe'
else:
url_full_release = url_full_release + f'Opera_{latest_version}_Setup.exe'
logger.info(f'Started download operabrowser by url: {url_full_release}')
path = self.operadriver_path.replace(self.operadriver_path.split(os.path.sep)[-1], '') + 'selenium-driver-updater' + os.path.sep
archive_name = url_full_release.split('/')[-1]
if not Path(path).exists():
Path(path).mkdir()
if Path(path + archive_name).exists():
Path(path + archive_name).unlink()
logger.info(f'Started to download opera browser by url: {url_full_release}')
archive_path = wget.download(url=url_full_release, out=path + archive_name)
logger.info(f'Opera browser was downloaded to path: {archive_path}')
if platform.system() == 'Darwin':
logger.info('Trying to kill all opera processes')
subprocess.Popen('killall Opera', shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
subprocess.Popen('killall Opera', shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
logger.info('Successfully killed all opera processes')
self.extractor.extract_all_tar_xz_archive(archive_path=archive_path, delete_archive=True, out_path=path)
opera_browser_path = path + 'Opera.app'
opera_browser_path_application = '/Applications/Opera.app'
if Path(opera_browser_path_application).exists():
shutil.rmtree(opera_browser_path_application)
shutil.move(opera_browser_path, opera_browser_path_application)
logger.info(f'Successfully moved opera browser from: {opera_browser_path} to: {opera_browser_path_application}')
if Path(archive_path).exists():
Path(archive_path).unlink()
elif platform.system() == 'Windows':
logger.info('Trying to kill all opera.exe processes')
subprocess.Popen('taskkill /F /IM "opera.exe" /T', shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
logger.info('Successfully killed all opera.exe processes')
logger.info('Trying to install new opera browser')
os.system(f'{archive_path} /install /silent /launchopera=no /desktopshortcut=no /pintotaskbar=no /setdefaultbrowser=0')
logger.info('Successfully updated current opera browser')
def _compare_current_version_and_latest_version_opera_browser(self) -> Tuple[bool, str, str]:
"""Compares current version of opera browser to latest version
Returns:
Tuple of bool, str and str
is_browser_up_to_date (bool) : It true the browser is up to date. Defaults to False.
current_version (str) : Current version of the browser.
latest_version (str) : Latest version of the browser.
Raises:
Except: If unexpected error raised.
"""
is_browser_up_to_date : bool = False
current_version : str = ''
latest_version : str = ''
current_version = self._get_current_version_opera_browser_selenium()
if not current_version:
return is_browser_up_to_date, current_version, latest_version
latest_version = self._get_latest_version_opera_browser()
if current_version == latest_version:
is_browser_up_to_date = True
message = f"Your existing opera browser is up to date. current_version: {current_version} latest_version: {latest_version}"
logger.info(message)
return is_browser_up_to_date, current_version, latest_version
def _get_current_version_opera_browser_selenium_via_terminal(self) -> str:
"""Gets current opera browser version via command in terminal
Returns:
str
browser_version (str) : Current opera browser version.
Raises:
Except: If unexpected error raised.
"""
browser_version : str = ''
browser_version_terminal : str = ''
operabrowser_path = self.setting["OperaBrowser"]["Path"]
if operabrowser_path:
logger.info('Trying to get current version of opera browser via terminal')
if platform.system() == 'Windows':
with subprocess.Popen(operabrowser_path, stdout=subprocess.PIPE) as process:
browser_version_terminal = process.communicate()[0].decode('UTF-8')
find_string_terminal = re.findall("Opera.*", browser_version_terminal)
browser_version_terminal = find_string_terminal[0] if len(find_string_terminal) > 0 else ''
elif platform.system() == 'Darwin':
with subprocess.Popen([operabrowser_path, '--version'], stdout=subprocess.PIPE) as process:
browser_version_terminal = process.communicate()[0].decode('UTF-8')
find_string = re.findall(self.setting["Program"]["wedriverVersionPattern"], browser_version_terminal)
browser_version = find_string[0] if len(find_string) > 0 else ''
return browser_version
|
[
"selenium.webdriver.Opera",
"subprocess.Popen",
"selenium_driver_updater.util.logger.logger.error",
"os.uname",
"os.system",
"platform.system",
"wget.download",
"pathlib.Path",
"re.findall",
"shutil.move",
"bs4.BeautifulSoup",
"shutil.rmtree",
"selenium_driver_updater.util.logger.logger.info"
] |
[((4454, 4493), 'bs4.BeautifulSoup', 'BeautifulSoup', (['json_data', '"""html.parser"""'], {}), "(json_data, 'html.parser')\n", (4467, 4493), False, 'from bs4 import BeautifulSoup\n'), ((4517, 4534), 'platform.system', 'platform.system', ([], {}), '()\n', (4532, 4534), False, 'import platform\n'), ((5185, 5250), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['f"""Latest version of opera browser: {latest_version}"""'], {}), "(f'Latest version of opera browser: {latest_version}')\n", (5196, 5250), False, 'from selenium_driver_updater.util.logger import logger\n'), ((6451, 6523), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['f"""Started download operabrowser by url: {url_full_release}"""'], {}), "(f'Started download operabrowser by url: {url_full_release}')\n", (6462, 6523), False, 'from selenium_driver_updater.util.logger import logger\n'), ((6889, 6965), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['f"""Started to download opera browser by url: {url_full_release}"""'], {}), "(f'Started to download opera browser by url: {url_full_release}')\n", (6900, 6965), False, 'from selenium_driver_updater.util.logger import logger\n'), ((6989, 7049), 'wget.download', 'wget.download', ([], {'url': 'url_full_release', 'out': '(path + archive_name)'}), '(url=url_full_release, out=path + archive_name)\n', (7002, 7049), False, 'import wget\n'), ((7059, 7127), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['f"""Opera browser was downloaded to path: {archive_path}"""'], {}), "(f'Opera browser was downloaded to path: {archive_path}')\n", (7070, 7127), False, 'from selenium_driver_updater.util.logger import logger\n'), ((3717, 3784), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['f"""Current version of opera browser: {browser_version}"""'], {}), "(f'Current version of opera browser: {browser_version}')\n", (3728, 3784), False, 'from selenium_driver_updater.util.logger import logger\n'), ((5426, 5443), 'platform.system', 'platform.system', ([], {}), '()\n', (5441, 5443), False, 'import platform\n'), ((5611, 5632), 'selenium_driver_updater.util.logger.logger.error', 'logger.error', (['message'], {}), '(message)\n', (5623, 5632), False, 'from selenium_driver_updater.util.logger import logger\n'), ((5794, 5811), 'platform.system', 'platform.system', ([], {}), '()\n', (5809, 5811), False, 'import platform\n'), ((7140, 7157), 'platform.system', 'platform.system', ([], {}), '()\n', (7155, 7157), False, 'import platform\n'), ((7184, 7233), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['"""Trying to kill all opera processes"""'], {}), "('Trying to kill all opera processes')\n", (7195, 7233), False, 'from selenium_driver_updater.util.logger import logger\n'), ((7246, 7349), 'subprocess.Popen', 'subprocess.Popen', (['"""killall Opera"""'], {'shell': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.DEVNULL'}), "('killall Opera', shell=True, stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL)\n", (7262, 7349), False, 'import subprocess\n'), ((7358, 7461), 'subprocess.Popen', 'subprocess.Popen', (['"""killall Opera"""'], {'shell': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.DEVNULL'}), "('killall Opera', shell=True, stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL)\n", (7374, 7461), False, 'import subprocess\n'), ((7470, 7524), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['"""Successfully killed all opera processes"""'], {}), "('Successfully killed all opera processes')\n", (7481, 7524), False, 'from selenium_driver_updater.util.logger import logger\n'), ((7904, 7967), 'shutil.move', 'shutil.move', (['opera_browser_path', 'opera_browser_path_application'], {}), '(opera_browser_path, opera_browser_path_application)\n', (7915, 7967), False, 'import shutil\n'), ((7981, 8103), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['f"""Successfully moved opera browser from: {opera_browser_path} to: {opera_browser_path_application}"""'], {}), "(\n f'Successfully moved opera browser from: {opera_browser_path} to: {opera_browser_path_application}'\n )\n", (7992, 8103), False, 'from selenium_driver_updater.util.logger import logger\n'), ((9926, 9946), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['message'], {}), '(message)\n', (9937, 9946), False, 'from selenium_driver_updater.util.logger import logger\n'), ((10541, 10615), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['"""Trying to get current version of opera browser via terminal"""'], {}), "('Trying to get current version of opera browser via terminal')\n", (10552, 10615), False, 'from selenium_driver_updater.util.logger import logger\n'), ((11316, 11407), 're.findall', 're.findall', (["self.setting['Program']['wedriverVersionPattern']", 'browser_version_terminal'], {}), "(self.setting['Program']['wedriverVersionPattern'],\n browser_version_terminal)\n", (11326, 11407), False, 'import re\n'), ((1581, 1598), 'platform.system', 'platform.system', ([], {}), '()\n', (1596, 1598), False, 'import platform\n'), ((1774, 1795), 'selenium_driver_updater.util.logger.logger.error', 'logger.error', (['message'], {}), '(message)\n', (1786, 1795), False, 'from selenium_driver_updater.util.logger import logger\n'), ((3225, 3245), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['message'], {}), '(message)\n', (3236, 3245), False, 'from selenium_driver_updater.util.logger import logger\n'), ((3535, 3626), 're.findall', 're.findall', (["('OPR/' + self.setting['Program']['wedriverVersionPattern'])", 'browser_version'], {}), "('OPR/' + self.setting['Program']['wedriverVersionPattern'],\n browser_version)\n", (3545, 3626), False, 'import re\n'), ((6144, 6161), 'platform.system', 'platform.system', ([], {}), '()\n', (6159, 6161), False, 'import platform\n'), ((6797, 6822), 'pathlib.Path', 'Path', (['(path + archive_name)'], {}), '(path + archive_name)\n', (6801, 6822), False, 'from pathlib import Path\n'), ((7846, 7891), 'shutil.rmtree', 'shutil.rmtree', (['opera_browser_path_application'], {}), '(opera_browser_path_application)\n', (7859, 7891), False, 'import shutil\n'), ((8197, 8214), 'platform.system', 'platform.system', ([], {}), '()\n', (8212, 8214), False, 'import platform\n'), ((8242, 8295), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['"""Trying to kill all opera.exe processes"""'], {}), "('Trying to kill all opera.exe processes')\n", (8253, 8295), False, 'from selenium_driver_updater.util.logger import logger\n'), ((8308, 8429), 'subprocess.Popen', 'subprocess.Popen', (['"""taskkill /F /IM "opera.exe" /T"""'], {'shell': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.DEVNULL'}), '(\'taskkill /F /IM "opera.exe" /T\', shell=True, stdout=\n subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n', (8324, 8429), False, 'import subprocess\n'), ((8437, 8495), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['"""Successfully killed all opera.exe processes"""'], {}), "('Successfully killed all opera.exe processes')\n", (8448, 8495), False, 'from selenium_driver_updater.util.logger import logger\n'), ((8509, 8559), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['"""Trying to install new opera browser"""'], {}), "('Trying to install new opera browser')\n", (8520, 8559), False, 'from selenium_driver_updater.util.logger import logger\n'), ((8572, 8701), 'os.system', 'os.system', (['f"""{archive_path} /install /silent /launchopera=no /desktopshortcut=no /pintotaskbar=no /setdefaultbrowser=0"""'], {}), "(\n f'{archive_path} /install /silent /launchopera=no /desktopshortcut=no /pintotaskbar=no /setdefaultbrowser=0'\n )\n", (8581, 8701), False, 'import os\n'), ((8704, 8761), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['"""Successfully updated current opera browser"""'], {}), "('Successfully updated current opera browser')\n", (8715, 8761), False, 'from selenium_driver_updater.util.logger import logger\n'), ((10632, 10649), 'platform.system', 'platform.system', ([], {}), '()\n', (10647, 10649), False, 'import platform\n'), ((10886, 10933), 're.findall', 're.findall', (['"""Opera.*"""', 'browser_version_terminal'], {}), "('Opera.*', browser_version_terminal)\n", (10896, 10933), False, 'import re\n'), ((2405, 2425), 'selenium_driver_updater.util.logger.logger.info', 'logger.info', (['message'], {}), '(message)\n', (2416, 2425), False, 'from selenium_driver_updater.util.logger import logger\n'), ((3346, 3400), 'selenium.webdriver.Opera', 'webdriver.Opera', ([], {'executable_path': 'self.operadriver_path'}), '(executable_path=self.operadriver_path)\n', (3361, 3400), False, 'from selenium import webdriver\n'), ((5877, 5894), 'platform.system', 'platform.system', ([], {}), '()\n', (5892, 5894), False, 'import platform\n'), ((6733, 6743), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6737, 6743), False, 'from pathlib import Path\n'), ((6766, 6776), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6770, 6776), False, 'from pathlib import Path\n'), ((6845, 6870), 'pathlib.Path', 'Path', (['(path + archive_name)'], {}), '(path + archive_name)\n', (6849, 6870), False, 'from pathlib import Path\n'), ((7783, 7819), 'pathlib.Path', 'Path', (['opera_browser_path_application'], {}), '(opera_browser_path_application)\n', (7787, 7819), False, 'from pathlib import Path\n'), ((8110, 8128), 'pathlib.Path', 'Path', (['archive_path'], {}), '(archive_path)\n', (8114, 8128), False, 'from pathlib import Path\n'), ((10686, 10745), 'subprocess.Popen', 'subprocess.Popen', (['operabrowser_path'], {'stdout': 'subprocess.PIPE'}), '(operabrowser_path, stdout=subprocess.PIPE)\n', (10702, 10745), False, 'import subprocess\n'), ((11061, 11078), 'platform.system', 'platform.system', ([], {}), '()\n', (11076, 11078), False, 'import platform\n'), ((3262, 3289), 'pathlib.Path', 'Path', (['self.operadriver_path'], {}), '(self.operadriver_path)\n', (3266, 3289), False, 'from pathlib import Path\n'), ((8155, 8173), 'pathlib.Path', 'Path', (['archive_path'], {}), '(archive_path)\n', (8159, 8173), False, 'from pathlib import Path\n'), ((11114, 11188), 'subprocess.Popen', 'subprocess.Popen', (["[operabrowser_path, '--version']"], {'stdout': 'subprocess.PIPE'}), "([operabrowser_path, '--version'], stdout=subprocess.PIPE)\n", (11130, 11188), False, 'import subprocess\n'), ((5853, 5863), 'os.uname', 'os.uname', ([], {}), '()\n', (5861, 5863), False, 'import os\n')]
|
import urllib.request
import yaml
import os
import json
import time
import logging
import requests
logger = logging.getLogger('IR')
_FETCH_CONFIG_RETRIES = 5
# Defines a process-wide instance of the config.
# Alternative is passing the configuration through the entire stack, which is feasible,
# but we consider that YAGNI for now.
_CONFIG_SINGLETON = None
def get_sample_service_url(sw_url, ss_release):
""""""
payload = {
"method": "ServiceWizard.get_service_status",
"id": '',
"params": [{"module_name": "SampleService", "version": ss_release}],
"version": "1.1"
}
headers = {'Content-Type': 'application/json'}
sw_resp = requests.post(url=sw_url, headers=headers, data=json.dumps(payload))
if not sw_resp.ok:
raise RuntimeError(f"ServiceWizard error, with code {sw_resp.status_code}. \n{sw_resp.text}")
wiz_resp = sw_resp.json()
if wiz_resp.get('error'):
raise RuntimeError(f"ServiceWizard {sw_url} with params"
f" {json.dumps(payload)} Error - " + str(wiz_resp['error']))
return wiz_resp['result'][0]['url']
def config(force_reload=False):
"""wrapper for get config that reloads config every 'config_timeout' seconds"""
global _CONFIG_SINGLETON
if not _CONFIG_SINGLETON:
# could do this on module load, but let's delay until actually requested
_CONFIG_SINGLETON = Config()
_CONFIG_SINGLETON.reload(force_reload=force_reload)
return _CONFIG_SINGLETON
class Config:
"""
A class containing the configuration for the for the search indexer. Supports dict like
configuration item access, e.g. config['ws-token'].
Not thread-safe.
"""
def __init__(self):
"""Initialize configuration data from the environment."""
self._cfg = {}
self.reload()
def reload(self, force_reload=False):
"""
Reload the configuration data from the environment.
Only reloads if the configuration has expired or force_reload is true.
"""
if self._cfg:
expired = (time.time() - self._cfg['last_config_reload']) > self._cfg['config_timeout']
if not expired and not force_reload:
# can remove force_reload once all reload logic is handled here
return
reqs = ['WORKSPACE_TOKEN', 'RE_API_TOKEN']
for req in reqs:
if not os.environ.get(req):
raise RuntimeError(f'{req} env var is not set.')
es_host = os.environ.get("ELASTICSEARCH_HOST", 'elasticsearch')
es_port = os.environ.get("ELASTICSEARCH_PORT", 9200)
kbase_endpoint = os.environ.get(
'KBASE_ENDPOINT', 'https://ci.kbase.us/services').strip('/')
workspace_url = os.environ.get('WS_URL', kbase_endpoint + '/ws')
catalog_url = os.environ.get('CATALOG_URL', kbase_endpoint + '/catalog')
re_api_url = os.environ.get('RE_URL', kbase_endpoint + '/relation_engine_api').strip('/')
service_wizard_url = os.environ.get('SW_URL', kbase_endpoint + '/service_wizard').strip('/')
sample_service_release = os.environ.get('SAMPLE_SERVICE_RELEASE', 'dev')
sample_service_url = get_sample_service_url(service_wizard_url, sample_service_release)
config_url = os.environ.get('GLOBAL_CONFIG_URL')
github_release_url = os.environ.get(
'GITHUB_RELEASE_URL',
'https://api.github.com/repos/kbase/index_runner_spec/releases/latest'
)
# Load the global configuration release (non-environment specific, public config)
if config_url and not config_url.startswith('http'):
raise RuntimeError(f"Invalid global config url: {config_url}")
if not github_release_url.startswith('http'):
raise RuntimeError(f"Invalid global github release url: {github_release_url}")
gh_token = os.environ.get('GITHUB_TOKEN')
global_config = _fetch_global_config(config_url, github_release_url, gh_token)
skip_indices = _get_comma_delimited_env('SKIP_INDICES')
allow_indices = _get_comma_delimited_env('ALLOW_INDICES')
# Use a tempfile to indicate that the service is done booting up
proc_ready_path = '/tmp/IR_READY' # nosec
# Set the indexer log messages index name from a configured index name or alias
msg_log_index_name = os.environ.get('MSG_LOG_INDEX_NAME', 'indexer_messages')
if msg_log_index_name in global_config['latest_versions']:
msg_log_index_name = global_config['latest_versions'][msg_log_index_name]
self._cfg = {
'skip_releng': os.environ.get('SKIP_RELENG'),
'skip_features': os.environ.get('SKIP_FEATURES'),
'skip_indices': skip_indices,
'allow_indices': allow_indices,
'global': global_config,
'github_release_url': github_release_url,
'github_token': gh_token,
'global_config_url': config_url,
'ws_token': os.environ['WORKSPACE_TOKEN'],
'mount_dir': os.environ.get('MOUNT_DIR', os.getcwd()),
'kbase_endpoint': kbase_endpoint,
'catalog_url': catalog_url,
'workspace_url': workspace_url,
're_api_url': re_api_url,
're_api_token': os.environ['RE_API_TOKEN'],
'sample_service_url': sample_service_url,
'elasticsearch_host': es_host,
'elasticsearch_port': es_port,
'elasticsearch_url': f"http://{es_host}:{es_port}",
'kafka_server': os.environ.get('KAFKA_SERVER', 'kafka'),
'kafka_clientgroup': os.environ.get('KAFKA_CLIENTGROUP', 'search_indexer'),
'error_index_name': os.environ.get('ERROR_INDEX_NAME', 'indexing_errors'),
'msg_log_index_name': msg_log_index_name,
'elasticsearch_index_prefix': os.environ.get('ELASTICSEARCH_INDEX_PREFIX', 'search2'),
'topics': {
'workspace_events': os.environ.get('KAFKA_WORKSPACE_TOPIC', 'workspaceevents'),
'admin_events': os.environ.get('KAFKA_ADMIN_TOPIC', 'indexeradminevents')
},
'config_timeout': 600, # 10 minutes in seconds.
'last_config_reload': time.time(),
'proc_ready_path': proc_ready_path, # File indicating the daemon is booted and ready
'generic_shard_count': os.environ.get('GENERIC_SHARD_COUNT', 2),
'generic_replica_count': os.environ.get('GENERIC_REPLICA_COUNT', 1),
}
def __getitem__(self, key):
return self._cfg[key]
def _fetch_global_config(config_url, github_release_url, gh_token):
"""
Fetch the index_runner_spec configuration file from the Github release
using either the direct URL to the file or by querying the repo's release
info using the GITHUB API.
"""
if config_url:
print('Fetching config from the direct url')
# Fetch the config directly from config_url
with urllib.request.urlopen(config_url) as res: # nosec
return yaml.safe_load(res) # type: ignore
else:
print('Fetching config from the release info')
# Fetch the config url from the release info
if gh_token:
headers = {'Authorization': f'token {gh_token}'}
else:
headers = {}
tries = 0
# Sometimes Github returns usage errors and a retry will solve it
while True:
release_info = requests.get(github_release_url, headers=headers).json()
if release_info.get('assets'):
break
if tries == _FETCH_CONFIG_RETRIES:
raise RuntimeError(f"Cannot fetch config from {github_release_url}: {release_info}")
tries += 1
for asset in release_info['assets']:
if asset['name'] == 'config.yaml':
download_url = asset['browser_download_url']
with urllib.request.urlopen(download_url) as res: # nosec
return yaml.safe_load(res)
raise RuntimeError("Unable to load the config.yaml file from index_runner_spec")
def _get_comma_delimited_env(key):
"""
Fetch a comma-delimited list of strings from an environment variable as a set.
"""
ret = set()
for piece in os.environ.get(key, '').split(','):
piece = piece.strip()
if piece:
ret.add(piece)
return ret
|
[
"os.getcwd",
"json.dumps",
"time.time",
"os.environ.get",
"yaml.safe_load",
"requests.get",
"logging.getLogger"
] |
[((109, 132), 'logging.getLogger', 'logging.getLogger', (['"""IR"""'], {}), "('IR')\n", (126, 132), False, 'import logging\n'), ((2529, 2582), 'os.environ.get', 'os.environ.get', (['"""ELASTICSEARCH_HOST"""', '"""elasticsearch"""'], {}), "('ELASTICSEARCH_HOST', 'elasticsearch')\n", (2543, 2582), False, 'import os\n'), ((2601, 2643), 'os.environ.get', 'os.environ.get', (['"""ELASTICSEARCH_PORT"""', '(9200)'], {}), "('ELASTICSEARCH_PORT', 9200)\n", (2615, 2643), False, 'import os\n'), ((2782, 2830), 'os.environ.get', 'os.environ.get', (['"""WS_URL"""', "(kbase_endpoint + '/ws')"], {}), "('WS_URL', kbase_endpoint + '/ws')\n", (2796, 2830), False, 'import os\n'), ((2853, 2911), 'os.environ.get', 'os.environ.get', (['"""CATALOG_URL"""', "(kbase_endpoint + '/catalog')"], {}), "('CATALOG_URL', kbase_endpoint + '/catalog')\n", (2867, 2911), False, 'import os\n'), ((3144, 3191), 'os.environ.get', 'os.environ.get', (['"""SAMPLE_SERVICE_RELEASE"""', '"""dev"""'], {}), "('SAMPLE_SERVICE_RELEASE', 'dev')\n", (3158, 3191), False, 'import os\n'), ((3309, 3344), 'os.environ.get', 'os.environ.get', (['"""GLOBAL_CONFIG_URL"""'], {}), "('GLOBAL_CONFIG_URL')\n", (3323, 3344), False, 'import os\n'), ((3374, 3486), 'os.environ.get', 'os.environ.get', (['"""GITHUB_RELEASE_URL"""', '"""https://api.github.com/repos/kbase/index_runner_spec/releases/latest"""'], {}), "('GITHUB_RELEASE_URL',\n 'https://api.github.com/repos/kbase/index_runner_spec/releases/latest')\n", (3388, 3486), False, 'import os\n'), ((3907, 3937), 'os.environ.get', 'os.environ.get', (['"""GITHUB_TOKEN"""'], {}), "('GITHUB_TOKEN')\n", (3921, 3937), False, 'import os\n'), ((4396, 4452), 'os.environ.get', 'os.environ.get', (['"""MSG_LOG_INDEX_NAME"""', '"""indexer_messages"""'], {}), "('MSG_LOG_INDEX_NAME', 'indexer_messages')\n", (4410, 4452), False, 'import os\n'), ((731, 750), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (741, 750), False, 'import json\n'), ((4655, 4684), 'os.environ.get', 'os.environ.get', (['"""SKIP_RELENG"""'], {}), "('SKIP_RELENG')\n", (4669, 4684), False, 'import os\n'), ((4715, 4746), 'os.environ.get', 'os.environ.get', (['"""SKIP_FEATURES"""'], {}), "('SKIP_FEATURES')\n", (4729, 4746), False, 'import os\n'), ((5586, 5625), 'os.environ.get', 'os.environ.get', (['"""KAFKA_SERVER"""', '"""kafka"""'], {}), "('KAFKA_SERVER', 'kafka')\n", (5600, 5625), False, 'import os\n'), ((5660, 5713), 'os.environ.get', 'os.environ.get', (['"""KAFKA_CLIENTGROUP"""', '"""search_indexer"""'], {}), "('KAFKA_CLIENTGROUP', 'search_indexer')\n", (5674, 5713), False, 'import os\n'), ((5747, 5800), 'os.environ.get', 'os.environ.get', (['"""ERROR_INDEX_NAME"""', '"""indexing_errors"""'], {}), "('ERROR_INDEX_NAME', 'indexing_errors')\n", (5761, 5800), False, 'import os\n'), ((5898, 5953), 'os.environ.get', 'os.environ.get', (['"""ELASTICSEARCH_INDEX_PREFIX"""', '"""search2"""'], {}), "('ELASTICSEARCH_INDEX_PREFIX', 'search2')\n", (5912, 5953), False, 'import os\n'), ((6275, 6286), 'time.time', 'time.time', ([], {}), '()\n', (6284, 6286), False, 'import time\n'), ((6421, 6461), 'os.environ.get', 'os.environ.get', (['"""GENERIC_SHARD_COUNT"""', '(2)'], {}), "('GENERIC_SHARD_COUNT', 2)\n", (6435, 6461), False, 'import os\n'), ((6500, 6542), 'os.environ.get', 'os.environ.get', (['"""GENERIC_REPLICA_COUNT"""', '(1)'], {}), "('GENERIC_REPLICA_COUNT', 1)\n", (6514, 6542), False, 'import os\n'), ((7095, 7114), 'yaml.safe_load', 'yaml.safe_load', (['res'], {}), '(res)\n', (7109, 7114), False, 'import yaml\n'), ((8335, 8358), 'os.environ.get', 'os.environ.get', (['key', '""""""'], {}), "(key, '')\n", (8349, 8358), False, 'import os\n'), ((2425, 2444), 'os.environ.get', 'os.environ.get', (['req'], {}), '(req)\n', (2439, 2444), False, 'import os\n'), ((2669, 2733), 'os.environ.get', 'os.environ.get', (['"""KBASE_ENDPOINT"""', '"""https://ci.kbase.us/services"""'], {}), "('KBASE_ENDPOINT', 'https://ci.kbase.us/services')\n", (2683, 2733), False, 'import os\n'), ((2933, 2998), 'os.environ.get', 'os.environ.get', (['"""RE_URL"""', "(kbase_endpoint + '/relation_engine_api')"], {}), "('RE_URL', kbase_endpoint + '/relation_engine_api')\n", (2947, 2998), False, 'import os\n'), ((3039, 3099), 'os.environ.get', 'os.environ.get', (['"""SW_URL"""', "(kbase_endpoint + '/service_wizard')"], {}), "('SW_URL', kbase_endpoint + '/service_wizard')\n", (3053, 3099), False, 'import os\n'), ((5116, 5127), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5125, 5127), False, 'import os\n'), ((6015, 6073), 'os.environ.get', 'os.environ.get', (['"""KAFKA_WORKSPACE_TOPIC"""', '"""workspaceevents"""'], {}), "('KAFKA_WORKSPACE_TOPIC', 'workspaceevents')\n", (6029, 6073), False, 'import os\n'), ((6107, 6164), 'os.environ.get', 'os.environ.get', (['"""KAFKA_ADMIN_TOPIC"""', '"""indexeradminevents"""'], {}), "('KAFKA_ADMIN_TOPIC', 'indexeradminevents')\n", (6121, 6164), False, 'import os\n'), ((2100, 2111), 'time.time', 'time.time', ([], {}), '()\n', (2109, 2111), False, 'import time\n'), ((7509, 7558), 'requests.get', 'requests.get', (['github_release_url'], {'headers': 'headers'}), '(github_release_url, headers=headers)\n', (7521, 7558), False, 'import requests\n'), ((8057, 8076), 'yaml.safe_load', 'yaml.safe_load', (['res'], {}), '(res)\n', (8071, 8076), False, 'import yaml\n'), ((1033, 1052), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1043, 1052), False, 'import json\n')]
|
#
# File:
# streamline1.py
#
# Synopsis:
# Draws streamlines on a map over water only.
#
# Category:
# Streamlines on a map.
#
# Author:
# <NAME>
#
# Date of original publication:
# December, 2004
#
# Description:
# This example draws streamlines over water on a map using a
# Cylindrical Equidistant map projection. The "add_cyclic"
# function is illustrated graphically.
#
# Effects illustrated:
# o Streamlines over maps.
# o Adding cyclic points.
# o Specifying colors by name.
# o Polylines.
# o Masking land areas.
#
# Output:
# This example produces two visualizations:
# 1.) Streamlines on a Cylindrical Equidistant map over water
# only highlighting missing cyclic points.
# 2.) Same as 1.) with the cyclic points added.
#
# Notes:
#
#
# Import Nio for reading netCDF files.
#
from __future__ import print_function
import Nio
#
# Import Ngl support functions.
#
import Ngl
import os
#
# Open the netCDF file.
#
file = Nio.open_file(os.path.join(Ngl.pynglpath("data"),"cdf","pop.nc"))
#
# Open a workstation.
#
wks_type = "png"
wks = Ngl.open_wks(wks_type,"streamline1")
#
# Get the u/v and lat/lon variables.
#
urot = file.variables["urot"]
vrot = file.variables["vrot"]
lat2d = file.variables["lat2d"]
lon2d = file.variables["lon2d"]
#
# Set up resource list.
#
resources = Ngl.Resources()
#
# Don't advance frame, because we want to draw a couple of lines on
# plot later.
#
resources.nglFrame = False
#
# Coordinate arrays for data
#
resources.vfXArray = lon2d[::4,::4]
resources.vfYArray = lat2d[::4,::4]
resources.mpProjection = "CylindricalEquidistant"
resources.mpFillOn = True
resources.mpLandFillColor = "Tan1"
resources.mpOceanFillColor = "SkyBlue"
resources.mpInlandWaterFillColor = "SkyBlue"
resources.mpGridAndLimbOn = False
resources.tiMainString = "Streamline plot without cyclic point added"
plot = Ngl.streamline_map(wks,urot[::4,::4],vrot[::4,::4],resources)
#
# Add a couple of lines showing the area where there's a gap in the
# data because of lack of a cyclic point. (It should be obvious even
# without the lines.)
#
line_res = Ngl.Resources() # line resources
line_res.gsLineColor = "Red" # line color
line_res.gsLineThicknessF = 1.5 # line thickness scale
line_res.gsLineDashPattern = 2 # dashed lines
Ngl.polyline(wks,plot,lon2d[::4,0],lat2d[::4,0],line_res)
Ngl.polyline(wks,plot,lon2d[::4,-1],lat2d[::4,-1],line_res)
#
# Add a text string explaining the lines.
#
text_res = Ngl.Resources() # text resources
text_res.txFontHeightF = 0.03 # font height
text_res.txFontColor = "Red"
Ngl.text_ndc(wks,"dashed red line shows area with no data",0.5,0.17,text_res)
Ngl.frame(wks) # Now advance frame.
#
# Add cyclic points. Since lat2d/lon2d are 2D arrays, make them
# cyclic the same way you do the 2D data array.
#
u = Ngl.add_cyclic(urot[::4,::4])
v = Ngl.add_cyclic(vrot[::4,::4])
lon = Ngl.add_cyclic(lon2d[::4,::4])
lat = Ngl.add_cyclic(lat2d[::4,::4])
#
# Specify new coordinate arrays for data.
#
resources.vfXArray = lon
resources.vfYArray = lat
resources.tiMainString = "Streamline plot with cyclic point added"
plot = Ngl.streamline_map(wks,u,v,resources)
#
# Add a couple of lines showing the area where the missing data were.
# Make the lines solid so we can see them.
#
line_res.gsLineDashPattern = 0
Ngl.polyline(wks,plot,lon2d[::4,0],lat2d[::4,0],line_res)
Ngl.polyline(wks,plot,lon2d[::4,-1],lat2d[::4,-1],line_res)
#
# Add a text string explaining the lines.
#
Ngl.text_ndc(wks,"red line shows area that previously had no data",0.5,0.17,text_res)
Ngl.frame(wks)
Ngl.end()
|
[
"Ngl.open_wks",
"Ngl.end",
"Ngl.frame",
"Ngl.streamline_map",
"Ngl.polyline",
"Ngl.pynglpath",
"Ngl.add_cyclic",
"Ngl.Resources",
"Ngl.text_ndc"
] |
[((1137, 1174), 'Ngl.open_wks', 'Ngl.open_wks', (['wks_type', '"""streamline1"""'], {}), "(wks_type, 'streamline1')\n", (1149, 1174), False, 'import Ngl\n'), ((1384, 1399), 'Ngl.Resources', 'Ngl.Resources', ([], {}), '()\n', (1397, 1399), False, 'import Ngl\n'), ((1985, 2051), 'Ngl.streamline_map', 'Ngl.streamline_map', (['wks', 'urot[::4, ::4]', 'vrot[::4, ::4]', 'resources'], {}), '(wks, urot[::4, ::4], vrot[::4, ::4], resources)\n', (2003, 2051), False, 'import Ngl\n'), ((2242, 2257), 'Ngl.Resources', 'Ngl.Resources', ([], {}), '()\n', (2255, 2257), False, 'import Ngl\n'), ((2482, 2545), 'Ngl.polyline', 'Ngl.polyline', (['wks', 'plot', 'lon2d[::4, 0]', 'lat2d[::4, 0]', 'line_res'], {}), '(wks, plot, lon2d[::4, 0], lat2d[::4, 0], line_res)\n', (2494, 2545), False, 'import Ngl\n'), ((2541, 2606), 'Ngl.polyline', 'Ngl.polyline', (['wks', 'plot', 'lon2d[::4, -1]', 'lat2d[::4, -1]', 'line_res'], {}), '(wks, plot, lon2d[::4, -1], lat2d[::4, -1], line_res)\n', (2553, 2606), False, 'import Ngl\n'), ((2678, 2693), 'Ngl.Resources', 'Ngl.Resources', ([], {}), '()\n', (2691, 2693), False, 'import Ngl\n'), ((2816, 2901), 'Ngl.text_ndc', 'Ngl.text_ndc', (['wks', '"""dashed red line shows area with no data"""', '(0.5)', '(0.17)', 'text_res'], {}), "(wks, 'dashed red line shows area with no data', 0.5, 0.17,\n text_res)\n", (2828, 2901), False, 'import Ngl\n'), ((2895, 2909), 'Ngl.frame', 'Ngl.frame', (['wks'], {}), '(wks)\n', (2904, 2909), False, 'import Ngl\n'), ((3090, 3120), 'Ngl.add_cyclic', 'Ngl.add_cyclic', (['urot[::4, ::4]'], {}), '(urot[::4, ::4])\n', (3104, 3120), False, 'import Ngl\n'), ((3126, 3156), 'Ngl.add_cyclic', 'Ngl.add_cyclic', (['vrot[::4, ::4]'], {}), '(vrot[::4, ::4])\n', (3140, 3156), False, 'import Ngl\n'), ((3162, 3193), 'Ngl.add_cyclic', 'Ngl.add_cyclic', (['lon2d[::4, ::4]'], {}), '(lon2d[::4, ::4])\n', (3176, 3193), False, 'import Ngl\n'), ((3199, 3230), 'Ngl.add_cyclic', 'Ngl.add_cyclic', (['lat2d[::4, ::4]'], {}), '(lat2d[::4, ::4])\n', (3213, 3230), False, 'import Ngl\n'), ((3411, 3451), 'Ngl.streamline_map', 'Ngl.streamline_map', (['wks', 'u', 'v', 'resources'], {}), '(wks, u, v, resources)\n', (3429, 3451), False, 'import Ngl\n'), ((3600, 3663), 'Ngl.polyline', 'Ngl.polyline', (['wks', 'plot', 'lon2d[::4, 0]', 'lat2d[::4, 0]', 'line_res'], {}), '(wks, plot, lon2d[::4, 0], lat2d[::4, 0], line_res)\n', (3612, 3663), False, 'import Ngl\n'), ((3659, 3724), 'Ngl.polyline', 'Ngl.polyline', (['wks', 'plot', 'lon2d[::4, -1]', 'lat2d[::4, -1]', 'line_res'], {}), '(wks, plot, lon2d[::4, -1], lat2d[::4, -1], line_res)\n', (3671, 3724), False, 'import Ngl\n'), ((3767, 3861), 'Ngl.text_ndc', 'Ngl.text_ndc', (['wks', '"""red line shows area that previously had no data"""', '(0.5)', '(0.17)', 'text_res'], {}), "(wks, 'red line shows area that previously had no data', 0.5, \n 0.17, text_res)\n", (3779, 3861), False, 'import Ngl\n'), ((3854, 3868), 'Ngl.frame', 'Ngl.frame', (['wks'], {}), '(wks)\n', (3863, 3868), False, 'import Ngl\n'), ((3870, 3879), 'Ngl.end', 'Ngl.end', ([], {}), '()\n', (3877, 3879), False, 'import Ngl\n'), ((1047, 1068), 'Ngl.pynglpath', 'Ngl.pynglpath', (['"""data"""'], {}), "('data')\n", (1060, 1068), False, 'import Ngl\n')]
|
from setuptools import find_packages, setup
setup(
name='grpcping',
version='0.1.0',
packages=find_packages(),
include_package_data=True,
url='https://github.com/hashimom/grpcping',
entry_points={
"console_scripts": [
"grpcping = grpcping.ping:main",
]
},
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description=''
)
|
[
"setuptools.find_packages"
] |
[((107, 122), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (120, 122), False, 'from setuptools import find_packages, setup\n')]
|
from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import QuerySet
from django.utils.safestring import mark_safe
from django.template import Library
import json
register = Library()
@register.filter
def jsonify(object):
return json.dumps(object, cls=DjangoJSONEncoder)
|
[
"django.template.Library",
"json.dumps"
] |
[((255, 264), 'django.template.Library', 'Library', ([], {}), '()\n', (262, 264), False, 'from django.template import Library\n'), ((316, 357), 'json.dumps', 'json.dumps', (['object'], {'cls': 'DjangoJSONEncoder'}), '(object, cls=DjangoJSONEncoder)\n', (326, 357), False, 'import json\n')]
|
# Implementation from https://github.com/nmhkahn/CARN-pytorch
# Fast, Accurate, and Lightweight Super-Resolution with Cascading Residual Network
# https://arxiv.org/abs/1803.08664
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.hub import load_state_dict_from_url
__all__ = [ 'carn', 'carn_m' ]
urls = {
'carn': 'https://github.com/Coloquinte/torchSR/releases/download/v1.0.3/carn.pt',
'carn_m': 'https://github.com/Coloquinte/torchSR/releases/download/v1.0.3/carn_m.pt',
}
class MeanShift(nn.Module):
def __init__(self, mean_rgb, sub):
super(MeanShift, self).__init__()
sign = -1 if sub else 1
r = mean_rgb[0] * sign
g = mean_rgb[1] * sign
b = mean_rgb[2] * sign
self.shifter = nn.Conv2d(3, 3, 1, 1, 0)
self.shifter.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.shifter.bias.data = torch.Tensor([r, g, b])
# Freeze the mean shift layer
for params in self.shifter.parameters():
params.requires_grad = False
def forward(self, x):
x = self.shifter(x)
return x
class BasicBlock(nn.Module):
def __init__(self,
in_channels, out_channels,
ksize=3, stride=1, pad=1):
super(BasicBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, ksize, stride, pad),
nn.ReLU(inplace=True)
)
def forward(self, x):
out = self.body(x)
return out
class ResidualBlock(nn.Module):
def __init__(self,
in_channels, out_channels):
super(ResidualBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1),
)
def forward(self, x):
out = self.body(x)
out = F.relu(out + x)
return out
class EResidualBlock(nn.Module):
def __init__(self,
in_channels, out_channels,
group=1):
super(EResidualBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1, groups=group),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1, groups=group),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 1, 1, 0),
)
def forward(self, x):
out = self.body(x)
out = F.relu(out + x)
return out
class UpsampleBlock(nn.Module):
def __init__(self,
n_channels, scale, multi_scale,
group=1):
super(UpsampleBlock, self).__init__()
if multi_scale:
self.up2 = _UpsampleBlock(n_channels, scale=2, group=group)
self.up3 = _UpsampleBlock(n_channels, scale=3, group=group)
self.up4 = _UpsampleBlock(n_channels, scale=4, group=group)
else:
self.up = _UpsampleBlock(n_channels, scale=scale, group=group)
self.multi_scale = multi_scale
def forward(self, x, scale):
if self.multi_scale:
if scale == 2:
return self.up2(x)
elif scale == 3:
return self.up3(x)
elif scale == 4:
return self.up4(x)
else:
return self.up(x)
class _UpsampleBlock(nn.Module):
def __init__(self,
n_channels, scale,
group=1):
super(_UpsampleBlock, self).__init__()
modules = []
if scale == 2 or scale == 4 or scale == 8:
for _ in range(int(math.log(scale, 2))):
modules += [nn.Conv2d(n_channels, 4*n_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True)]
modules += [nn.PixelShuffle(2)]
elif scale == 3:
modules += [nn.Conv2d(n_channels, 9*n_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True)]
modules += [nn.PixelShuffle(3)]
self.body = nn.Sequential(*modules)
def forward(self, x):
out = self.body(x)
return out
class CARNBlock(nn.Module):
def __init__(self,
in_channels, out_channels,
group=1):
super(CARNBlock, self).__init__()
self.b1 = ResidualBlock(64, 64)
self.b2 = ResidualBlock(64, 64)
self.b3 = ResidualBlock(64, 64)
self.c1 = BasicBlock(64*2, 64, 1, 1, 0)
self.c2 = BasicBlock(64*3, 64, 1, 1, 0)
self.c3 = BasicBlock(64*4, 64, 1, 1, 0)
def forward(self, x):
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b2(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b3(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
return o3
class CARN(nn.Module):
def __init__(self, scale, pretrained=False, map_location=None):
super(CARN, self).__init__()
multi_scale = True
group = 1
self.scale = scale
self.sub_mean = MeanShift((0.4488, 0.4371, 0.4040), sub=True)
self.add_mean = MeanShift((0.4488, 0.4371, 0.4040), sub=False)
self.entry = nn.Conv2d(3, 64, 3, 1, 1)
self.b1 = CARNBlock(64, 64)
self.b2 = CARNBlock(64, 64)
self.b3 = CARNBlock(64, 64)
self.c1 = BasicBlock(64*2, 64, 1, 1, 0)
self.c2 = BasicBlock(64*3, 64, 1, 1, 0)
self.c3 = BasicBlock(64*4, 64, 1, 1, 0)
self.upsample = UpsampleBlock(64, scale=scale,
multi_scale=multi_scale,
group=group)
self.exit = nn.Conv2d(64, 3, 3, 1, 1)
if pretrained:
self.load_pretrained(map_location=map_location)
def forward(self, x, scale=None):
if self.scale is not None:
if scale is not None and scale != self.scale:
raise ValueError(f"Network scale is {self.scale}, not {scale}")
scale = self.scale
else:
if scale is None:
raise ValueError(f"Network scale was not set")
x = self.sub_mean(x)
x = self.entry(x)
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b2(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b3(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
out = self.upsample(o3, scale=scale)
out = self.exit(out)
out = self.add_mean(out)
return out
def load_pretrained(self, map_location=None):
if torch.cuda.is_available():
map_location = torch.device('cuda')
else:
map_location = torch.device('cpu')
state_dict = load_state_dict_from_url(urls["carn"], map_location=map_location, progress=True)
self.load_state_dict(state_dict)
class CARNMBlock(nn.Module):
def __init__(self,
in_channels, out_channels,
group=1):
super(CARNMBlock, self).__init__()
self.b1 = EResidualBlock(64, 64, group=group)
self.c1 = BasicBlock(64*2, 64, 1, 1, 0)
self.c2 = BasicBlock(64*3, 64, 1, 1, 0)
self.c3 = BasicBlock(64*4, 64, 1, 1, 0)
def forward(self, x):
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b1(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b1(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
return o3
class CARNM(nn.Module):
def __init__(self, scale, pretrained=False, map_location=None):
super(CARNM, self).__init__()
multi_scale = True
group = 4
self.scale = scale
self.sub_mean = MeanShift((0.4488, 0.4371, 0.4040), sub=True)
self.add_mean = MeanShift((0.4488, 0.4371, 0.4040), sub=False)
self.entry = nn.Conv2d(3, 64, 3, 1, 1)
self.b1 = CARNMBlock(64, 64, group=group)
self.b2 = CARNMBlock(64, 64, group=group)
self.b3 = CARNMBlock(64, 64, group=group)
self.c1 = BasicBlock(64*2, 64, 1, 1, 0)
self.c2 = BasicBlock(64*3, 64, 1, 1, 0)
self.c3 = BasicBlock(64*4, 64, 1, 1, 0)
self.upsample = UpsampleBlock(64, scale=scale,
multi_scale=multi_scale,
group=group)
self.exit = nn.Conv2d(64, 3, 3, 1, 1)
if pretrained:
self.load_pretrained(map_location=map_location)
def forward(self, x, scale=None):
if self.scale is not None:
if scale is not None and scale != self.scale:
raise ValueError(f"Network scale is {self.scale}, not {scale}")
scale = self.scale
else:
if scale is None:
raise ValueError(f"Network scale was not set")
scale = self.scale
x = self.sub_mean(x)
x = self.entry(x)
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b2(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b3(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
out = self.upsample(o3, scale=scale)
out = self.exit(out)
out = self.add_mean(out)
return out
def load_pretrained(self, map_location=None):
if not torch.cuda.is_available():
map_location = torch.device('cpu')
state_dict = load_state_dict_from_url(urls["carn_m"], map_location=map_location, progress=True)
self.load_state_dict(state_dict)
def carn(scale, pretrained=False):
return CARN(scale, pretrained)
def carn_m(scale, pretrained=False):
return CARNM(scale, pretrained)
|
[
"torch.hub.load_state_dict_from_url",
"torch.nn.ReLU",
"torch.eye",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.cat",
"torch.Tensor",
"torch.cuda.is_available",
"torch.nn.functional.relu",
"torch.device",
"math.log",
"torch.nn.PixelShuffle"
] |
[((789, 813), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(3)', '(1)', '(1)', '(0)'], {}), '(3, 3, 1, 1, 0)\n', (798, 813), True, 'import torch.nn as nn\n'), ((914, 937), 'torch.Tensor', 'torch.Tensor', (['[r, g, b]'], {}), '([r, g, b])\n', (926, 937), False, 'import torch\n'), ((1964, 1979), 'torch.nn.functional.relu', 'F.relu', (['(out + x)'], {}), '(out + x)\n', (1970, 1979), True, 'import torch.nn.functional as F\n'), ((2567, 2582), 'torch.nn.functional.relu', 'F.relu', (['(out + x)'], {}), '(out + x)\n', (2573, 2582), True, 'import torch.nn.functional as F\n'), ((4079, 4102), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (4092, 4102), True, 'import torch.nn as nn\n'), ((4701, 4727), 'torch.cat', 'torch.cat', (['[c0, b1]'], {'dim': '(1)'}), '([c0, b1], dim=1)\n', (4710, 4727), False, 'import torch\n'), ((4800, 4826), 'torch.cat', 'torch.cat', (['[c1, b2]'], {'dim': '(1)'}), '([c1, b2], dim=1)\n', (4809, 4826), False, 'import torch\n'), ((4899, 4925), 'torch.cat', 'torch.cat', (['[c2, b3]'], {'dim': '(1)'}), '([c2, b3], dim=1)\n', (4908, 4925), False, 'import torch\n'), ((5361, 5386), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3)', '(1)', '(1)'], {}), '(3, 64, 3, 1, 1)\n', (5370, 5386), True, 'import torch.nn as nn\n'), ((5839, 5864), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(3)', '(3)', '(1)', '(1)'], {}), '(64, 3, 3, 1, 1)\n', (5848, 5864), True, 'import torch.nn as nn\n'), ((6429, 6455), 'torch.cat', 'torch.cat', (['[c0, b1]'], {'dim': '(1)'}), '([c0, b1], dim=1)\n', (6438, 6455), False, 'import torch\n'), ((6528, 6554), 'torch.cat', 'torch.cat', (['[c1, b2]'], {'dim': '(1)'}), '([c1, b2], dim=1)\n', (6537, 6554), False, 'import torch\n'), ((6627, 6653), 'torch.cat', 'torch.cat', (['[c2, b3]'], {'dim': '(1)'}), '([c2, b3], dim=1)\n', (6636, 6653), False, 'import torch\n'), ((6870, 6895), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6893, 6895), False, 'import torch\n'), ((7027, 7112), 'torch.hub.load_state_dict_from_url', 'load_state_dict_from_url', (["urls['carn']"], {'map_location': 'map_location', 'progress': '(True)'}), "(urls['carn'], map_location=map_location, progress=True\n )\n", (7051, 7112), False, 'from torch.hub import load_state_dict_from_url\n'), ((7603, 7629), 'torch.cat', 'torch.cat', (['[c0, b1]'], {'dim': '(1)'}), '([c0, b1], dim=1)\n', (7612, 7629), False, 'import torch\n'), ((7702, 7728), 'torch.cat', 'torch.cat', (['[c1, b2]'], {'dim': '(1)'}), '([c1, b2], dim=1)\n', (7711, 7728), False, 'import torch\n'), ((7801, 7827), 'torch.cat', 'torch.cat', (['[c2, b3]'], {'dim': '(1)'}), '([c2, b3], dim=1)\n', (7810, 7827), False, 'import torch\n'), ((8257, 8282), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)', '(3)', '(1)', '(1)'], {}), '(3, 64, 3, 1, 1)\n', (8266, 8282), True, 'import torch.nn as nn\n'), ((8777, 8802), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(3)', '(3)', '(1)', '(1)'], {}), '(64, 3, 3, 1, 1)\n', (8786, 8802), True, 'import torch.nn as nn\n'), ((9394, 9420), 'torch.cat', 'torch.cat', (['[c0, b1]'], {'dim': '(1)'}), '([c0, b1], dim=1)\n', (9403, 9420), False, 'import torch\n'), ((9493, 9519), 'torch.cat', 'torch.cat', (['[c1, b2]'], {'dim': '(1)'}), '([c1, b2], dim=1)\n', (9502, 9519), False, 'import torch\n'), ((9592, 9618), 'torch.cat', 'torch.cat', (['[c2, b3]'], {'dim': '(1)'}), '([c2, b3], dim=1)\n', (9601, 9618), False, 'import torch\n'), ((9934, 10020), 'torch.hub.load_state_dict_from_url', 'load_state_dict_from_url', (["urls['carn_m']"], {'map_location': 'map_location', 'progress': '(True)'}), "(urls['carn_m'], map_location=map_location,\n progress=True)\n", (9958, 10020), False, 'from torch.hub import load_state_dict_from_url\n'), ((1372, 1428), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', 'ksize', 'stride', 'pad'], {}), '(in_channels, out_channels, ksize, stride, pad)\n', (1381, 1428), True, 'import torch.nn as nn\n'), ((1442, 1463), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1449, 1463), True, 'import torch.nn as nn\n'), ((1744, 1789), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(3)', '(1)', '(1)'], {}), '(in_channels, out_channels, 3, 1, 1)\n', (1753, 1789), True, 'import torch.nn as nn\n'), ((1803, 1824), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1810, 1824), True, 'import torch.nn as nn\n'), ((1838, 1884), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'out_channels', '(3)', '(1)', '(1)'], {}), '(out_channels, out_channels, 3, 1, 1)\n', (1847, 1884), True, 'import torch.nn as nn\n'), ((2224, 2283), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(3)', '(1)', '(1)'], {'groups': 'group'}), '(in_channels, out_channels, 3, 1, 1, groups=group)\n', (2233, 2283), True, 'import torch.nn as nn\n'), ((2297, 2318), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2304, 2318), True, 'import torch.nn as nn\n'), ((2332, 2392), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'out_channels', '(3)', '(1)', '(1)'], {'groups': 'group'}), '(out_channels, out_channels, 3, 1, 1, groups=group)\n', (2341, 2392), True, 'import torch.nn as nn\n'), ((2406, 2427), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2413, 2427), True, 'import torch.nn as nn\n'), ((2441, 2487), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'out_channels', '(1)', '(1)', '(0)'], {}), '(out_channels, out_channels, 1, 1, 0)\n', (2450, 2487), True, 'import torch.nn as nn\n'), ((6924, 6944), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6936, 6944), False, 'import torch\n'), ((6986, 7005), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6998, 7005), False, 'import torch\n'), ((9839, 9864), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9862, 9864), False, 'import torch\n'), ((9893, 9912), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (9905, 9912), False, 'import torch\n'), ((849, 861), 'torch.eye', 'torch.eye', (['(3)'], {}), '(3)\n', (858, 861), False, 'import torch\n'), ((3701, 3719), 'math.log', 'math.log', (['scale', '(2)'], {}), '(scale, 2)\n', (3709, 3719), False, 'import math\n'), ((3751, 3811), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_channels', '(4 * n_channels)', '(3)', '(1)', '(1)'], {'groups': 'group'}), '(n_channels, 4 * n_channels, 3, 1, 1, groups=group)\n', (3760, 3811), True, 'import torch.nn as nn\n'), ((3811, 3832), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3818, 3832), True, 'import torch.nn as nn\n'), ((3862, 3880), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(2)'], {}), '(2)\n', (3877, 3880), True, 'import torch.nn as nn\n'), ((3931, 3991), 'torch.nn.Conv2d', 'nn.Conv2d', (['n_channels', '(9 * n_channels)', '(3)', '(1)', '(1)'], {'groups': 'group'}), '(n_channels, 9 * n_channels, 3, 1, 1, groups=group)\n', (3940, 3991), True, 'import torch.nn as nn\n'), ((3991, 4012), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3998, 4012), True, 'import torch.nn as nn\n'), ((4038, 4056), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['(3)'], {}), '(3)\n', (4053, 4056), True, 'import torch.nn as nn\n')]
|
# Author: <NAME>
# Date: 05 April 2020
# Project: Challenger
import os, json, sys, subprocess
rmf = ['bin', 'etc', 'include', 'lib', 'lib64', 'pyvenv.cfg', 'share']
msk = ['challenger', 'package', 'elementtree', 'ffprobe']
def packages_from_project(path):
try:
cmd = 'pipreqs --force --no-pin --print --savepath /dev/null'
pck = subprocess.check_output(cmd.split(' ') + [path])
return pck.decode('utf-8')[:-1].split('\n')
except:
return []
def update_requirements(path, mask):
try:
cmd = 'pipreqs --force --print --savepath /dev/null'
pck = subprocess.check_output(cmd.split(' ') + [path])
pck = pck.decode('utf-8')[:-1].split('\n')
pck = [e for e in pck if not e.split('==')[0] in mask and len(e) > 0]
if len(pck) != 0:
with open('{}/requirements.txt'.format(path), 'w') as f:
f.write('\n'.join(pck)+'\n')
except:
pass
def compile_list_packages(packages):
lst = []
for itm in packages:
for p in itm:
if p not in lst:
lst.append(p)
return lst
def c_server(file='config-instance.json'):
cfg = {'instance_type': 't3.micro'}
if os.path.exists(file): cfg.update(json.load(open(file)))
return cfg
def get_tags(file='.elasticbeanstalk/config.yml'):
import yaml
if os.path.exists(file):
cfg = yaml.safe_load(open(file))
app = cfg.get('global').get('application_name').lower()
try: nme = cfg.get('branch-defaults').get('master').get('environment').lower()
except: nme = cfg.get('tbranch-defaults').get('default').get('environment').lower()
cfg = dict(zip(['application', 'service'], [app, nme]))
return ','.join(['{}={}'.format(k,v) for k,v in cfg.items()])
else:
return ''
def env_vars(file='config-environment.json'):
import datetime
cfg = dict()
if os.path.exists(file): cfg.update(json.load(open(file)))
cfg.update({'BIRTH_DATE': str(datetime.datetime.now().date())})
return cfg
def get_conf(root='.', file='config-environment.json'):
cfg = dict()
for path in os.listdir(root):
if os.path.isdir('/'.join([root, path])):
fle = '/'.join([root, path, file])
if os.path.exists(fle): cfg.update(json.load(open(fle)))
return cfg
if __name__ == '__main__':
if sys.argv[1] == 'config-project':
os.system('rm -rf {}'.format(' '.join(rmf)))
os.system('python3 -m venv .')
if sys.argv[1] == 'create-project':
os.system('pip install setuptools wheel pip --upgrade')
os.system('pip install pipreqs --upgrade')
frc = ['numpy', 'cmake', 'jupyter', 'notebook', 'ipython', 'ipykernel']
with open('requirements.txt', 'w') as f: f.write('\n'.join(frc)+'\n')
os.system('pip install -r requirements.txt')
os.system('pip install jupyter_contrib_nbextensions')
os.system('jupyter contrib nbextension install --user')
os.system('jupyter nbextension enable codefolding/main')
src = os.getcwd().split('/')[-1]
os.system('python -m ipykernel install --user --name={}'.format(src.lower()))
lst = [d for d in os.listdir() if os.path.isdir(d) and not d.startswith('.') and d not in rmf]
lst = [packages_from_project(d) for d in lst]
lst = compile_list_packages(lst)
lst = [p for p in lst if p not in msk + frc]
with open('requirements.txt', 'w') as f: f.write('\n'.join(lst)+'\n')
os.system('pip install -r requirements.txt')
os.remove('requirements.txt')
if sys.argv[1] == 'update-project':
lst = [d for d in os.listdir() if os.path.isdir(d) and not d.startswith('.') and d not in rmf]
for drc in lst: update_requirements(drc, msk)
if sys.argv[1] == 'config-python':
vars_env = env_vars()
cfg_size = len(vars_env.keys())
if not os.path.exists('bin/activate-origin'):
os.system('cp bin/activate bin/activate-origin')
os.system('cp bin/activate-origin bin/activate')
env_vars = get_conf()
add_vars = ['export {}={}'.format(key, env_vars.get(key)) for key in sorted(env_vars.keys())]
add_vars = '\n' + '\n'.join(add_vars)
del_vars = ['unset {}'.format(key) for key in sorted(env_vars.keys())]
del_vars = '\n ' + '\n '.join(del_vars)
old_file = open('bin/activate').readlines()
new_file = ''.join(old_file[:37]) + del_vars + ''.join(old_file[37:]) + add_vars
open('bin/activate', 'w').write(new_file)
if sys.argv[1] == 'create-service':
src_tags = get_tags()
vars_env = env_vars()
cfg_size = len(vars_env.keys())
c_server = c_server()
template = 'eb create {} {} --envvars {} --tags {}'
instance = ' '.join(["--{} '{}'".format(k, str(v)) for k,v in c_server.items()])
env_vars = ','.join(["{}='{}'".format(k, str(v)) for k,v in vars_env.items()])
print('\n# Launch {} Creation'.format(sys.argv[2]))
print('# On {} with {} Associated Variables\n'.format(c_server.get('instance_type'), cfg_size))
os.system(template.format(sys.argv[2], instance, env_vars, src_tags))
if sys.argv[1] == 'config-service':
vars_env = env_vars()
cfg_size = len(vars_env.keys())
template = 'eb setenv {}'
env_vars = ','.join(['='.join([k, str(v)]) for k,v in vars_env.items()])
print('\n# Update Environment Variables')
print('# {} Associated Variables\n'.format(cfg_size))
os.system(template.format(env_vars))
if sys.argv[1] == 'config-docker':
vars_env = env_vars()
cfg_size = len(vars_env.keys())
env_vars = '\n'.join(['='.join([k, str(v)]) for k,v in vars_env.items()])
env_file = open('config-docker.env', 'w')
env_file.write(env_vars + '\n')
if sys.argv[1] == 'config-lambda':
sip = sys.argv[2].replace('.', '-')
try: avz = sys.argv[3]
except: avz = 'us-east-2'
try: key = sys.argv[4]
except: key = '../aws.pem'
os.system("scp -i {} packages.sh requirements.txt ec2-user@ec2-{}.{}.compute.amazonaws.com:~".format(key, sip, avz))
os.system("ssh -i {} ec2-user@ec2-{}.{}.compute.amazonaws.com 'bash -s' < packages.sh".format(key, sip, avz))
os.system("scp -i {} ec2-user@ec2-{}.{}.compute.amazonaws.com:~/app/packages.zip .".format(key, sip, avz))
os.system("unzip packages.zip -d packages")
os.remove("packages.zip")
if sys.argv[1] == 'create-lambda':
os.system("mkdir tmp; cp *.py *.json tmp; cp -r packages/* tmp")
os.system("cd tmp; zip -r ../function.zip *; cd ..; rm -rf tmp")
|
[
"os.remove",
"os.getcwd",
"os.path.isdir",
"os.path.exists",
"os.system",
"datetime.datetime.now",
"os.listdir"
] |
[((1227, 1247), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (1241, 1247), False, 'import os, json, sys, subprocess\n'), ((1376, 1396), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (1390, 1396), False, 'import os, json, sys, subprocess\n'), ((1937, 1957), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (1951, 1957), False, 'import os, json, sys, subprocess\n'), ((2169, 2185), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (2179, 2185), False, 'import os, json, sys, subprocess\n'), ((2500, 2530), 'os.system', 'os.system', (['"""python3 -m venv ."""'], {}), "('python3 -m venv .')\n", (2509, 2530), False, 'import os, json, sys, subprocess\n'), ((2581, 2636), 'os.system', 'os.system', (['"""pip install setuptools wheel pip --upgrade"""'], {}), "('pip install setuptools wheel pip --upgrade')\n", (2590, 2636), False, 'import os, json, sys, subprocess\n'), ((2645, 2687), 'os.system', 'os.system', (['"""pip install pipreqs --upgrade"""'], {}), "('pip install pipreqs --upgrade')\n", (2654, 2687), False, 'import os, json, sys, subprocess\n'), ((2854, 2898), 'os.system', 'os.system', (['"""pip install -r requirements.txt"""'], {}), "('pip install -r requirements.txt')\n", (2863, 2898), False, 'import os, json, sys, subprocess\n'), ((2907, 2960), 'os.system', 'os.system', (['"""pip install jupyter_contrib_nbextensions"""'], {}), "('pip install jupyter_contrib_nbextensions')\n", (2916, 2960), False, 'import os, json, sys, subprocess\n'), ((2969, 3024), 'os.system', 'os.system', (['"""jupyter contrib nbextension install --user"""'], {}), "('jupyter contrib nbextension install --user')\n", (2978, 3024), False, 'import os, json, sys, subprocess\n'), ((3033, 3089), 'os.system', 'os.system', (['"""jupyter nbextension enable codefolding/main"""'], {}), "('jupyter nbextension enable codefolding/main')\n", (3042, 3089), False, 'import os, json, sys, subprocess\n'), ((3554, 3598), 'os.system', 'os.system', (['"""pip install -r requirements.txt"""'], {}), "('pip install -r requirements.txt')\n", (3563, 3598), False, 'import os, json, sys, subprocess\n'), ((3607, 3636), 'os.remove', 'os.remove', (['"""requirements.txt"""'], {}), "('requirements.txt')\n", (3616, 3636), False, 'import os, json, sys, subprocess\n'), ((4071, 4119), 'os.system', 'os.system', (['"""cp bin/activate-origin bin/activate"""'], {}), "('cp bin/activate-origin bin/activate')\n", (4080, 4119), False, 'import os, json, sys, subprocess\n'), ((6529, 6572), 'os.system', 'os.system', (['"""unzip packages.zip -d packages"""'], {}), "('unzip packages.zip -d packages')\n", (6538, 6572), False, 'import os, json, sys, subprocess\n'), ((6581, 6606), 'os.remove', 'os.remove', (['"""packages.zip"""'], {}), "('packages.zip')\n", (6590, 6606), False, 'import os, json, sys, subprocess\n'), ((6656, 6720), 'os.system', 'os.system', (['"""mkdir tmp; cp *.py *.json tmp; cp -r packages/* tmp"""'], {}), "('mkdir tmp; cp *.py *.json tmp; cp -r packages/* tmp')\n", (6665, 6720), False, 'import os, json, sys, subprocess\n'), ((6729, 6793), 'os.system', 'os.system', (['"""cd tmp; zip -r ../function.zip *; cd ..; rm -rf tmp"""'], {}), "('cd tmp; zip -r ../function.zip *; cd ..; rm -rf tmp')\n", (6738, 6793), False, 'import os, json, sys, subprocess\n'), ((2299, 2318), 'os.path.exists', 'os.path.exists', (['fle'], {}), '(fle)\n', (2313, 2318), False, 'import os, json, sys, subprocess\n'), ((3963, 4000), 'os.path.exists', 'os.path.exists', (['"""bin/activate-origin"""'], {}), "('bin/activate-origin')\n", (3977, 4000), False, 'import os, json, sys, subprocess\n'), ((4014, 4062), 'os.system', 'os.system', (['"""cp bin/activate bin/activate-origin"""'], {}), "('cp bin/activate bin/activate-origin')\n", (4023, 4062), False, 'import os, json, sys, subprocess\n'), ((3243, 3255), 'os.listdir', 'os.listdir', ([], {}), '()\n', (3253, 3255), False, 'import os, json, sys, subprocess\n'), ((3705, 3717), 'os.listdir', 'os.listdir', ([], {}), '()\n', (3715, 3717), False, 'import os, json, sys, subprocess\n'), ((3104, 3115), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3113, 3115), False, 'import os, json, sys, subprocess\n'), ((3259, 3275), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (3272, 3275), False, 'import os, json, sys, subprocess\n'), ((3721, 3737), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (3734, 3737), False, 'import os, json, sys, subprocess\n'), ((2027, 2050), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2048, 2050), False, 'import datetime\n')]
|
import collections
import math
import numpy as np
import mlpy
class TermFrequencyAnalyzer(object):
def __init__(self, *documents):
self.idf = self.compute_idf(*documents)
def compute_idf(self, *documents):
# document frequency
df = collections.defaultdict(int)
for tokens in documents:
for token in set(tokens):
df[token] += 1
# idf
idf = dict()
for token, count in df.iteritems():
idf[token] = math.log(float(len(documents)) / float(count))
return idf
def get_similarity(self, *strings):
if len(strings) <= 1:
return 0.0
counts = [collections.defaultdict(int) for _ in strings]
for index, tokens in enumerate(strings):
for token in tokens:
counts[index][token] += 1
score = 0.0
# intercept of the tokens
for token in set.intersection(*[set(tokens) for tokens in strings]):
# term frequency
tf = float(sum([count[token] for count in counts]))
score += tf * self.idf[token]
return score
class LongestAnalyzer(object):
def __init__(self, *documents):
pass
def get_similarity(self, a, b):
#return self.lcs(a, b)
a = np.array(list(a), dtype='U1').view(np.uint32)
b = np.array(list(b), dtype='U1').view(np.uint32)
length, path = mlpy.lcs_std(a, b)
return length
def lcs(self, a, b):
a = a[:200]
b = b[:200]
if (len(a) < len(b)):
a, b = b, a
M = len(a)
N = len(b)
arr = np.zeros((2, N + 1))
for i in range(1, M + 1):
curIdx = i % 2
prevIdx = 1 - curIdx
ai = a[i - 1]
for j in range(1, N + 1):
bj = b[j - 1]
if (ai == bj):
arr[curIdx][j] = 1 + arr[prevIdx][j - 1]
else:
arr[curIdx][j] = max(arr[curIdx][j - 1], arr[prevIdx][j])
return arr[M % 2][N]
|
[
"collections.defaultdict",
"numpy.zeros",
"mlpy.lcs_std"
] |
[((268, 296), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (291, 296), False, 'import collections\n'), ((1437, 1455), 'mlpy.lcs_std', 'mlpy.lcs_std', (['a', 'b'], {}), '(a, b)\n', (1449, 1455), False, 'import mlpy\n'), ((1660, 1680), 'numpy.zeros', 'np.zeros', (['(2, N + 1)'], {}), '((2, N + 1))\n', (1668, 1680), True, 'import numpy as np\n'), ((684, 712), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (707, 712), False, 'import collections\n')]
|
"""Custom Forms"""
from django import forms
from django.db.models import (
CharField)
class CustomForm(forms.ModelForm):
"""Sample Custom form"""
sample_id = CharField("Sample Id:", max_length=200, editable=False)
|
[
"django.db.models.CharField"
] |
[((173, 228), 'django.db.models.CharField', 'CharField', (['"""Sample Id:"""'], {'max_length': '(200)', 'editable': '(False)'}), "('Sample Id:', max_length=200, editable=False)\n", (182, 228), False, 'from django.db.models import CharField\n')]
|
from field_types import field_type, field_regex_pattern
class Phone(field_type.FieldType):
name = "PHONE_NUMBER"
context = ["phone", "number", "telephone", "cell", "mobile", "call"]
patterns = []
# Strong pattern: e.g., (425) 882 8080, 425 882-8080, 425.882.8080
pattern = field_regex_pattern.RegexFieldPattern()
pattern.regex = r'(\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|d{3}[-\.\s]\d{3}[-\.\s]\d{4})' # noqa: E501
pattern.name = 'Phone (strong)'
pattern.strength = 0.7
patterns.append(pattern)
# Medium pattern: e.g., 425 8828080
pattern = field_regex_pattern.RegexFieldPattern()
pattern.regex = r'\b(\d{3}[-\.\s]\d{3}[-\.\s]??\d{4})\b'
pattern.name = 'Phone (medium)'
pattern.strength = 0.5
patterns.append(pattern)
# Weak pattern: e.g., 4258828080
pattern = field_regex_pattern.RegexFieldPattern()
pattern.regex = r'(\b\d{10}\b)'
pattern.name = 'Phone (weak)'
pattern.strength = 0.05
patterns.append(pattern)
patterns.sort(key=lambda p: p.strength, reverse=True)
|
[
"field_types.field_regex_pattern.RegexFieldPattern"
] |
[((296, 335), 'field_types.field_regex_pattern.RegexFieldPattern', 'field_regex_pattern.RegexFieldPattern', ([], {}), '()\n', (333, 335), False, 'from field_types import field_type, field_regex_pattern\n'), ((583, 622), 'field_types.field_regex_pattern.RegexFieldPattern', 'field_regex_pattern.RegexFieldPattern', ([], {}), '()\n', (620, 622), False, 'from field_types import field_type, field_regex_pattern\n'), ((828, 867), 'field_types.field_regex_pattern.RegexFieldPattern', 'field_regex_pattern.RegexFieldPattern', ([], {}), '()\n', (865, 867), False, 'from field_types import field_type, field_regex_pattern\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-27 05:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('presto', '0034_auto_20180324_1319'),
]
operations = [
migrations.CreateModel(
name='AppraisalOption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=128)),
('value', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('instruction', models.TextField(blank=True, default='')),
('word_count', models.IntegerField(default=0)),
('appraisal', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='ItemAppraisal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('style', models.CharField(max_length=64)),
('options', models.ManyToManyField(to='presto.AppraisalOption')),
],
),
migrations.AddField(
model_name='course',
name='suffix',
field=models.CharField(default='', max_length=10),
),
migrations.AddField(
model_name='courseestafette',
name='is_hidden',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='estafettetemplate',
name='editors',
field=models.ManyToManyField(related_name='est_editors', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='questionnairetemplate',
name='editors',
field=models.ManyToManyField(related_name='evt_editors', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='estafettetemplate',
name='last_editor',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='est_last_editor', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='questionnairetemplate',
name='last_editor',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='evt_last_editor', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='estafetteleg',
name='review_items',
field=models.ManyToManyField(to='presto.Item'),
),
]
|
[
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.IntegerField"
] |
[((292, 349), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (323, 349), False, 'from django.db import migrations, models\n'), ((1792, 1835), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(10)'}), "(default='', max_length=10)\n", (1808, 1835), False, 'from django.db import migrations, models\n'), ((1967, 2001), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1986, 2001), False, 'from django.db import migrations, models\n'), ((2133, 2212), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""est_editors"""', 'to': 'settings.AUTH_USER_MODEL'}), "(related_name='est_editors', to=settings.AUTH_USER_MODEL)\n", (2155, 2212), False, 'from django.db import migrations, models\n'), ((2348, 2427), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""evt_editors"""', 'to': 'settings.AUTH_USER_MODEL'}), "(related_name='evt_editors', to=settings.AUTH_USER_MODEL)\n", (2370, 2427), False, 'from django.db import migrations, models\n'), ((2565, 2704), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""est_last_editor"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='est_last_editor', to=settings.AUTH_USER_MODEL)\n", (2582, 2704), False, 'from django.db import migrations, models\n'), ((2842, 2981), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""evt_last_editor"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='evt_last_editor', to=settings.AUTH_USER_MODEL)\n", (2859, 2981), False, 'from django.db import migrations, models\n'), ((3109, 3149), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""presto.Item"""'}), "(to='presto.Item')\n", (3131, 3149), False, 'from django.db import migrations, models\n'), ((536, 629), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (552, 629), False, 'from django.db import migrations, models\n'), ((654, 686), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (670, 686), False, 'from django.db import migrations, models\n'), ((715, 745), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (734, 745), False, 'from django.db import migrations, models\n'), ((875, 968), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (891, 968), False, 'from django.db import migrations, models\n'), ((992, 1024), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1008, 1024), False, 'from django.db import migrations, models\n'), ((1059, 1099), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'default': '""""""'}), "(blank=True, default='')\n", (1075, 1099), False, 'from django.db import migrations, models\n'), ((1133, 1163), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1152, 1163), False, 'from django.db import migrations, models\n'), ((1196, 1226), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1215, 1226), False, 'from django.db import migrations, models\n'), ((1365, 1458), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1381, 1458), False, 'from django.db import migrations, models\n'), ((1482, 1514), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)'}), '(max_length=128)\n', (1498, 1514), False, 'from django.db import migrations, models\n'), ((1543, 1574), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (1559, 1574), False, 'from django.db import migrations, models\n'), ((1605, 1656), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""presto.AppraisalOption"""'}), "(to='presto.AppraisalOption')\n", (1627, 1656), False, 'from django.db import migrations, models\n')]
|
from tox._cmdline import main
main()
|
[
"tox._cmdline.main"
] |
[((31, 37), 'tox._cmdline.main', 'main', ([], {}), '()\n', (35, 37), False, 'from tox._cmdline import main\n')]
|
"""
digitalarchive.models
The module provides documented models and an ORM for interacting with the DA API.
"""
from __future__ import annotations
# Standard Library
import dataclasses
import json
import logging
import copy
from datetime import datetime, date
from typing import List, Any, Optional, Union, Dict, ClassVar
from abc import ABC
# 3rd Party Libraries
import pydantic
# Application Modules
from pydantic import validator
import digitalarchive.matching as matching
import digitalarchive.api as api
import digitalarchive.exceptions as exceptions
class Resource(pydantic.BaseModel, ABC):
"""
Abstract parent for all DigitalArchive objects.
We add custom hash and eq fields so that hydrated and unhydrated records are equal.
"""
id: str
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
if not self.__class__ == other.__class__:
return NotImplemented
else:
return self.id == other.id
class MatchingMixin:
"""Abstract parent for Resources that can be searched against."""
@classmethod
def match(cls, **kwargs) -> matching.ResourceMatcher:
"""Find a resource using passed keyword arguments.
Note:
If called without arguments, returns all records in the DA .
"""
# Check that we no invalid search terms were passed.
for key in kwargs:
if key not in cls.__fields__.keys():
raise exceptions.InvalidSearchFieldError
# Prepare the "term" search field.
# If we've got both a name and a value, join them.
if kwargs.get("name") and kwargs.get("value"):
kwargs["term"] = " ".join([kwargs.pop("name"), kwargs.pop("value")])
# Otherwise, treat the one that exists as the term.
elif kwargs.get("name"):
kwargs["term"] = kwargs.pop("name")
elif kwargs.get("value"):
kwargs["term"] = kwargs.pop("value")
return matching.ResourceMatcher(cls, **kwargs)
class HydrateMixin:
"""Mixin for resources that can be individually accessed and hydrated."""
def pull(self):
"""Update the resource using data from the DA API."""
data = api.get(endpoint=self.endpoint, resource_id=self.id)
self.__init__(**data)
def hydrate(self):
"""
Populate all unhydrated fields of a resource.
"""
# Preserve unhydrated fields.
unhydrated_fields = copy.copy(self.__dict__)
# Hydrate
self.pull()
hydrated_fields = vars(self)
# Merge fields
for key, value in unhydrated_fields.items():
if (
hydrated_fields.get(key) is None
and unhydrated_fields.get(key) is not None
):
hydrated_fields[key] = value
# Re-initialize the object.
self.__init__(**hydrated_fields)
class Subject(Resource, MatchingMixin, HydrateMixin):
"""
A historical topic to which documents can be related.
Attributes:
id (str): The ID of the record.
name (str): The name of the subject.
value (str): An alias for :attr:`~digitalarchive.models.Subject.name`.
uri (str): The URI for the Subject in the API.
"""
name: str
# Optional fields
value: Optional[str] = None
uri: Optional[str] = None
# Private fields
endpoint: ClassVar[str] = "subject"
class Language(Resource):
"""
The original language of a resource.
Attributes:
id (str): An ISO 639-2/B language code.
name (str): The ISO language name for the language.
"""
name: Optional[str] = None
class Asset(Resource, ABC, HydrateMixin):
"""
Abstract parent for Translations, Transcriptions, and MediaFiles.
Note:
We don't define raw, html, or pdf here because they are not present on
the stub version of Assets.
"""
# pylint: disable=too-many-instance-attributes
filename: str
content_type: str
extension: str
asset_id: str
source_created_at: str
source_updated_at: str
url: Optional[str] = None
raw: Optional[str] = None
pdf: Optional[str] = None
html: Optional[str] = None
def hydrate(self):
"""Populate all unhydrated fields of a :class:`digitalarchive.models._Asset`."""
response = api.SESSION.get(
f"https://digitalarchive.wilsoncenter.org/{self.url}"
)
if response.status_code == 200:
# Preserve the raw content from the DA in any case.
self.raw = response.content
# Add add helper attributes for the common filetypes.
if self.extension == "html":
self.html = response.text
self.pdf = None
elif self.extension == "pdf":
self.pdf = response.content
self.html = None
else:
logging.warning(
"[!] Unknown file format '%s' encountered!", self.extension
)
else:
raise exceptions.APIServerError(
f"[!] Hydrating asset ID#: %s failed with code: %s",
self.id,
response.status_code,
)
class Transcript(Asset):
"""A transcript of a document in its original language.
Attributes:
id (str): The ID# of the Transcript.
url (str): A URL to accessing the hydrated Transcript.
html (str): The html of of the Transcript.
pdf (bytes): A bytes object of the Transcript pdf content.
raw (str or bytes): The raw content recieved from the DA API for the Transcript.
filename (str): The filename of the Transcript on the content server.
content_type (str): The MIME type of the Transcript file.
extension (str): The file extension of the Transcript.
asset_id (str): The Transcript's unique ID on the content server.
source_created_at (str): ISO 8601 timestamp of the first time the Translation was published.
source_updated_at (str): ISO 8601 timestamp of the last time the Translation was modified.
"""
url: str
html: Optional[str] = None
pdf: Optional[bytes] = None
raw: Union[str, bytes, None] = None
class Translation(Asset):
"""
A translation of a Document into a another language.
Attributes:
id (str): The ID# of the Translation.
language (:class:`digitalarchive.models.Language`) The langauge of the Translation.
html (str): The HTML-formatted text of the Translation.
pdf (bytes): A bytes object of the Translation pdf content.
raw (str or bytes): The raw content recieved from the DA API for the Translation.
filename (str): The filename of the Translation on the content server.
content_type (str): The MIME type of the Translation file.
extension (str): The file extension of the Translation.
asset_id (str): The Translation's unique ID on the content server.
source_created_at (str): ISO 8601 timestamp of the first time the Translation was published.
source_updated_at (str): ISO 8601 timestamp of the last time the Translation was modified.
"""
url: str
language: Union[Language, dict]
html: Optional[str] = None
pdf: Optional[bytes] = None
raw: Optional[str] = None
class MediaFile(Asset):
"""
An original scan of a Document.
Attributes:
id (str): The ID# of the MediaFile.
pdf (bytes): A bytes object of the MediaFile content.
raw (str or bytes): The raw content received from the DA API for the MediaFile.
filename (str): The filename of the MediaFile on the content server.
content_type (str): The MIME type of the MediaFile file.
extension (str): The file extension of the MediaFile.
asset_id (str): The MediaFile's unique ID on the content server.
source_created_at (str): ISO 8601 timestamp of the first time the MediaFile was published.
source_updated_at (str): ISO 8601 timestamp of the last time the MediaFile was modified.
"""
path: str
def __init__(self, **data):
data["url"] = data.get("path")
super().__init__(**data)
class Contributor(Resource, MatchingMixin, HydrateMixin):
"""
An individual person or organization that contributed to the creation of the document.
Contributors are typically the Document's author, but for meeting minutes and similar documents,
a Contributor may simply be somebody who was in attendance at the meeting.
Attributes:
id (str): The ID# of the Contributor.
name (str): The name of the contributor.
uri (str): The URI of the contributor metadata on the DA API.
"""
name: str
value: Optional[str] = None
uri: Optional[str] = None
endpoint: ClassVar[str] = "contributor"
class Donor(Resource):
"""
An entity whose resources helped publish or translate a document.
Attributes:
id (str): The ID# of the Donor.
name (str): The name of the Donor.
"""
name: str
endpoint: ClassVar[str] = "donor"
class Coverage(Resource, MatchingMixin, HydrateMixin):
"""
A geographical area referenced by a Document.
Attributes:
id (str): The ID# of the geographic Coverage.
name (str): The name of geographic coverage area.
value (str): An alias to :attr:`~digitalarchive.models.Coverage.name`.
uri (str): URI to the Coverage's metadata on the DA API.
parent (:class:`~digitalarchive.models.Coverage`): The parent coverage,
if any
children: (list of :class:`~digitalarchive.models.Covereage`):
Subordinate geographical areas, if any.
"""
name: str
uri: str
value: Optional[str] = None
parent: Union[Coverage, List, None] = None # Inconsistent endpoint. Parent is either a dict or a empty list.
children: Optional[List[Coverage]] = None
endpoint: ClassVar[str] = "coverage"
@validator("parent")
def _process_parent(cls, parent):
if isinstance(parent, list):
return None
return parent
Coverage.update_forward_refs()
class Collection(Resource, MatchingMixin, HydrateMixin):
"""
A collection of Documents on a single topic
Attributes:
name (str): The title of the collection.
slug (str): A url-friendly name of the collection.
uri (str): The URI of the record on the DA API.
parent(:class:`digitalarchive.models.Collection`): A `Collection` containing the `Collection`.
model (str): A sting name of the model used to differentiate `Collection` and `Document` searches in the DA API.
value (str): A string identical to the `title` field.
description (str): A 1-2 sentence description of the `Collection`'s content.
short_description (str): A short description that appears in search views.
main_src (str): Placeholder
no_of_documents (str): The count of documents contained in the collection.
is_inactive (str): Whether the collection is displayed in the collections list.
source_created_at(:class:`datetime.datetime`): Timestamp of when the Document was first added to the DA.
source_updated_at(:class:`datetime.datetime`): Timestamp of when the Document was last edited.
first_published_at(:class:`datetime.datetime`): Timestamp of when the document was first made publically
accessible.
"""
# pylint: disable=too-many-instance-attributes
# Required Fields
name: str
slug: str
# Optional Fields
uri: Optional[str] = None
parent: Optional[Collection] = None
model: Optional[str] = None
value: Optional[str] = None
description: Optional[str] = None
short_description: Optional[str] = None
main_src: Optional[str] = None
thumb_src: Optional[str] = None
no_of_documents: Optional[str] = None
is_inactive: Optional[str] = None
source_created_at: Optional[datetime] = None
source_updated_at: Optional[datetime] = None
first_published_at: Optional[datetime] = None
# Internal Fields
endpoint: ClassVar[str] = "collection"
Collection.update_forward_refs()
class Repository(Resource, MatchingMixin, HydrateMixin):
"""
The archive or library possessing the original, physical Document.
Attributes:
id (str): The ID# of the Repository.
name (str): The name of the repository
uri (str): The URI for the Repository's metadata on the Digital Archive API.
value (str): An alias to :attr:`~digitalarchive.models.Repository.name`
"""
name: str
uri: Optional[str] = None
value: Optional[str] = None
endpoint: ClassVar[str] = "repository"
class Publisher(Resource):
"""
An organization involved in the publication of the document.
Attributes:
id (str): The ID# of the Publisher.
name (str): The name of the Publisher.
"""
name: str
value: str
endpoint: ClassVar[str] = "publisher"
class Type(Resource):
"""
The type of a document (memo, report, etc).
Attributes:
id (str): The ID# of the Type.
name (str): The name of the resource Type.
"""
name: str
class Right(Resource):
"""
A copyright notice attached to the Document.
Attributes:
id (str): The ID# of the Copyright type.
name (str): The name of the Copyright type.
rights (str): A description of the copyright requirements.
"""
name: str
rights: str
class Classification(Resource):
"""
A classification marking applied to the original Document.
Attributes:
id (str): The ID# of the Classification type.
name (str): A description of the Classification type.
"""
name: str
class Document(Resource, MatchingMixin, HydrateMixin):
"""
A Document corresponding to a single record page on digitalarchive.wilsoncenter.org.
Note:
Avoid constructing Documents directly--use the `match` function to create
Documents by keyword search or by ID.
**Attributes present on all Documents:**
Attributes:
id (str): The ID# of the record in the DA.
title (str): The title of a document.
description (str): A one-sentence description of the document's content.
doc_date (str): The date of the document's creation in ``YYYYMMDD`` format.
frontend_doc_date (str): How the date appears when presented on the DA website.
slug (str): A url-friendly name for the document. Not currently used.
source_created_at(:class:`datetime.datetime`): Timestamp of when the Document was first added to the DA.
source_updated_at(:class:`datetime.datetime`): Timestamp of when the Document was last edited.
first_published_at(:class:`datetime.datetime`): Timestamp of when the document was first made publically
accessible.
**Attributes present only on hydrated Documents**
These attributes are aliases of :class:`UnhydratedField` until :func:`Document.hydrate` is called on the Document.
Attributes:
source (str): The archive where the document was retrieved from.
type (:class:`digitalarchive.models.Type`): The type of the document (meeting minutes, report, etc.)
rights (:obj:`list` of :class:`digitalarchive.models.Right`): A list of entities holding the copyright of the
Document.
pdf_generated_at (str): The date that the combined source, translations, and transcriptions PDF. was generated.
date_range_start (:class:`datetime.date`): A rounded-down date used to standardize approximate dates for
date-range matching.
sort_string_by_coverage (str): An alphanumeric identifier used by the API to sort search results.
main_src (str): The original Source that a Document was retrieved from.
model (str): The model of a record, used to differentiate collections and keywords in searches.
donors (:obj:`list` of :class:`digitalarchive.models.Donor`): A list of donors whose funding make the acquisiton
or translation of a document possible.
subjects (:obj:`list` of :class:`digitalarchive.models.Subject`): A list of subjects that the document is tagged
with.
transcripts (:obj:`list` of :class:`digitalarchive.models.Transcript`): A list of transcripts of the document's
contents.
translations (:obj:`list` of :class:`digitalarchive.models.Translation`): A list of translations of the original
document.
media_files (:obj:`list` of :class:`digitalarchive.models.MediaFile`): A list of attached original scans of the
document.
languages(:obj:`list` of :class:`digitalarchive.models.Language`): A list of langauges contained in the
document.
creators (:obj:`list` of :class:`digitalarhive.models.Creator`): A list of persons who authored the document.
original_coverages (:obj:`list` of :class:`digitalarchive.models.Coverage`): A list of geographic locations
referenced in the document.
collections (:obj:`list` of :class:`digitalarchive.models.Collection`): A list of Collections that contain this
document.
attachments (:obj:`list` of :class:`digitalarchive.models.Document`): A list of Documents that were attached to
the Document.
links (:obj:`list` of :class:`digitalarchive.models.Document`): A list of topically related documents.
respositories (:obj:`list` of :class:`digitalarchive.models.Repository`): A list of archives/libraries
containing this document.
publishers (:obj:`list` of :class:`digitalarchive.models.Publisher`): A list of Publishers that released the
document.
classifications (:obj:`list` of :class:`digitalarchive.models.Publisher`): A list of security classification
markings present on the document.
"""
# pylint: disable=too-many-instance-attributes
# Required Fields
uri: str
title: str
doc_date: str
frontend_doc_date: str
slug: str
source_created_at: datetime
source_updated_at: datetime
first_published_at: datetime
# Optional Fields
description: Optional[str] = None
source: Optional[str] = None
type: Optional[List[Type]] = None
rights: Optional[Right] = None
pdf_generated_at: Optional[datetime] = None
date_range_start: Optional[date] = None
sort_string_by_coverage: Optional[str] = None
main_src: Optional[
Any
] = None # TODO: Never seen one of these in the while, so not sure how to handle.
model: Optional[str] = None
# Optional Lists:
donors: Optional[List[Donor]] = None
subjects: Optional[List[Subject]] = None
transcripts: Optional[List[Transcript]] = None
translations: Optional[List[Translation]] = None
media_files: Optional[List[MediaFile]] = None
languages: Optional[List[Language]] = None
contributors: Optional[List[Contributor]] = None
creators: Optional[List[Contributor]] = None
original_coverages: Optional[List[Coverage]] = None
collections: Optional[List[Collection]] = None
attachments: Optional[List[Any]] = None # TODO: Should be "document" -- fix.
links: Optional[List[Any]] = None
repositories: Optional[List[Repository]] = None
publishers: Optional[List[Publisher]] = None
classifications: Optional[List[Classification]] = None
# Private properties
endpoint: ClassVar[str] = "record"
@validator("date_range_start", pre=True)
def _parse_date_range_start(cls, doc_date) -> date:
"""Transform a DA-style date string to a Python datetime."""
if isinstance(doc_date, date):
return doc_date
elif doc_date is None:
return doc_date
# Try to parse it as a normal one
try:
return date.fromisoformat(doc_date)
except ValueError:
pass
year = int(doc_date[:4])
month = int(doc_date[4:6])
day = int(doc_date[-2:])
return date(year, month, day)
@classmethod
def match(cls, **kwargs) -> matching.ResourceMatcher:
"""
Search for a Document by keyword, or fetch one by ID.
Matching on the Document model runs a full-text search using keywords passed via the title and description
keywords. Results can also be limited by dates or by related records, as described below.
Note:
Title and description keywords are not searched for individually. All
non-date or child record searches are concatenated to single querystring.
Note:
Collection and other related record searches use `INNER JOIN` logic when
passed multiple related resources.
**Allowed search fields:**
Args:
title (:obj:`str`, optional): Title search keywords.
description (:obj:`str`, optional): Title search keywords.
start_date (:class:`datetime.date`, optional): Return only Documents with a `doc_date` after the passed
`start_date`.
end_date (:class:`datetime.date`, optional): Return only Documents with a `doc_date` before the passed
`end_date`.
collections (:obj:`list` of :class:`digitalarchive.models.Collection`, optional): Restrict results to
Documents contained in all of the passed Collections.
publishers (:obj:`list` of :class:`digitalarchive.models.Publisher`, optional): Restrict results to
Documents published by all of the passed Publishers.
repositories (:obj:`list` of :class:`digitalarchive.models.Repository`, optional) Restrict results to
Documents contained in all of the passed Repositories.
coverages (:obj:`list` of :class:`digitalarchive.models.Coverage`, optional) Restrict results to Documents
relating to all of the passed geographical Coverages.
subjects (:obj:`list` of :class:`digitalarchive.models.Subject`) Restrict results to Documents tagged with
all of the passed subjects
contributors (:obj:`list of :class:`digitalarchive.models.Contributor`) Restrict results to Documents whose
authors include all of the passed contributors.
donors (list(:class:`digitalarchive.models.Donor`)) Restrict results to Documents who were obtained or
translated with support from all of the passed donors.
languages (:class:`digitalarchive.models.Language` or str) Restrict results to Documents by language of
original document. If passing a string, you must pass an ISO 639-2/B language code.
translation (:class:`digitalarchive.models.Translation`) Restrict results to Documents for which there
is a translation available in the passed Language.
theme (:class:`digitalarchive.models.Theme`) Restrict results to Documents belonging to the passed Theme.
Returns:
An instance of (:class:`digitalarchive.matching.ResourceMatcher`) containing any records responsive to the
search.
"""
# Limit search to only Documents (this excludes Collections from search result).
kwargs["model"] = "Record"
# Check that search keywords are valid.
allowed_search_fields = [
*cls.__fields__.keys(),
"start_date",
"end_date",
"themes",
"model",
]
for key in kwargs:
if key not in allowed_search_fields:
logging.error(
f"[!] {key} is not a valid search term for {cls}. Valid terms: {allowed_search_fields}"
)
raise exceptions.InvalidSearchFieldError
# Process date searches if they are present.
if any(key in kwargs.keys() for key in ["start_date", "end_date"]):
kwargs = Document._process_date_searches(kwargs)
# Process language searches if they are present.
if "languages" in kwargs.keys():
kwargs = Document._process_language_search(kwargs)
# Process any related model searches.
if any(
key in kwargs.keys()
for key in [
"collections",
"publishers",
"repositories",
"original_coverages",
"subjects",
"contributors",
"donors",
"languages",
"translations",
"themes",
]
):
kwargs = Document._process_related_model_searches(kwargs)
# Prepare the 'q' fulltext search field.
keywords = []
for field in ["name", "title", "description", "slug", "q"]:
if kwargs.get(field) is not None:
keywords.append(kwargs.pop(field))
kwargs["q"] = " ".join(keywords)
# Reformat fields that accept lists. This makes the queries inner joins rather than union all.
for field in ["donor", "subject", "contributor", "coverage", "collection"]:
if field in kwargs.keys():
kwargs[f"{field}[]"] = kwargs.pop(field)
# Run the match.
return matching.ResourceMatcher(cls, **kwargs)
def hydrate(self, recurse: bool = False):
"""
Downloads the complete version of the Document with metadata for any related objects.
Args:
recurse (bool): If true, also hydrate subordinate and related records records.
"""
# Preserve unhydrated fields.
unhydrated_fields = copy.copy(self.__dict__)
# Hydrate
self.pull()
hydrated_fields = vars(self)
# Merge fields
for key, value in unhydrated_fields.items():
if (
hydrated_fields.get(key) is None
and unhydrated_fields.get(key) is not None
):
hydrated_fields[key] = value
# Re-initialize the object.
self.__init__(**hydrated_fields)
# Hydrate Assets
if recurse is True:
[transcript.hydrate() for transcript in self.transcripts]
[translation.hydrate() for translation in self.translations]
[media_file.hydrate() for media_file in self.media_files]
[collection.hydrate() for collection in self.collections]
@staticmethod
def _process_date_searches(query: dict) -> dict:
"""Run formatting and type checks against date search fields."""
date_search_terms = ["start_date", "end_date"]
# Handle open-ended date searches.
if "start_date" in query.keys() and "end_date" not in query.keys():
query["end_date"] = date.today()
elif "end_date" in query.keys() and "start_date" not in query.keys():
# Pull earliest record date from API.
da_date_range = api.get_date_range()
start_date = Document._parse_date_range_start(da_date_range["begin"])
query["start_date"] = start_date
# Transform datetime objects into formatted string and return
for field in date_search_terms:
search_date = query[field]
if isinstance(search_date, date):
query[
field
] = f"{search_date.year}{search_date.strftime('%m')}{search_date.strftime('%d')}"
# If passed a string but its wrong length, raise.
elif isinstance(search_date, str) and len(search_date) != 8:
logging.error("[!] Invalid date string! Format is: YYYYMMDD")
raise exceptions.MalformedDateSearch
return query
@staticmethod
def _process_related_model_searches(query: dict) -> dict:
"""
Process and format searches by related models.
We have to re-name the fields from plural to singular to match the DA format.
"""
multi_terms = {
"collections": "collection",
"publishers": "publisher",
"repositories": "repository",
"original_coverages": "coverage",
"subjects": "subject",
"contributors": "contributor",
"donors": "donor",
"languages": "language",
"translations": "translation",
"themes": "theme",
}
# Rename each term to singular
for key, value in multi_terms.items():
if key in query.keys():
query[value] = query.pop(key)
# Build list of terms we need to parse
terms_to_parse = []
for term in multi_terms.values():
if term in query.keys():
terms_to_parse.append(term)
# transform each term list into a list of IDs
for term in terms_to_parse:
query[term] = [str(item.id) for item in query[term]]
# Special handling for langauges, translations, themes.
# Unlike they above, they only accept singular values
for term in ["language", "translation", "theme"]:
if term in query.keys():
if len(query[term]) > 1:
logging.error(f"[!] Cannot filter for more than one %s", term)
raise exceptions.InvalidSearchFieldError
# Pull out the singleton.
query[term] = query[term][0]
# Return the reformatted query.
return query
@staticmethod
def _process_language_search(query: dict) -> dict:
"""
Process a language search
Looks up the DA's language ID# for user provided ISO 639-2/B language codes and updates the query.
Args:
query (dict): A ResourceMatcher query.
Returns:
dict: A query dict with a ISO 639-2/B string replaced with appropriate Language object.
"""
parsed_languages = []
for language in query["languages"]:
# Check if ID# is instance of language, bail on yes.
if isinstance(language, Language):
parsed_languages.append(language)
# If str, lookup ID# of language
elif isinstance(language, str) and len(language) == 3:
parsed_languages.append(Language(id=language))
else:
raise exceptions.MalformedLanguageSearch
# Replace kwarg with Langauge object.
query["languages"] = parsed_languages
return query
class Theme(Resource, HydrateMixin):
"""
A parent container for collections on a single geopolitical topic.
Note:
Themes never appear on any record model, but can be passed as a search param to Document.
Attributes:
id (str): The ID# of the Theme.
slug (str): A url-friendly version of the theme title.
title (str): The name of the Theme.
description (str): A short description of the Theme contents.
main_src: A URI for the Theme's banner image on the Digital Archive website.
has_map (str): A boolean value for whether the Theme has an accompanying map on the Digital Archive website.
has_timeline(str) : A boolean value for whether the Theme has a Timeline on the Digital Archive website.
featured_collections (list of :class:`~digitalarchive.models.Collection`): A list of related collections.
dates_with_events (list): A list of date ranges that the Theme has timeline entries for.
"""
# Required fields
slug: str
# Optional Fields
title: Optional[str] = None
value: Optional[str] = None
description: Optional[str] = None
main_src: Optional[str] = None
uri: Optional[str] = None
featured_resources: Optional[List[dict]] = None
has_map: Optional[str] = None
has_timeline: Optional[str] = None
featured_collections: Optional[List[Collection]] = None
dates_with_events: Optional[list] = None
# Private fields.
endpoint: ClassVar[str] = "theme"
def pull(self):
"""
Downloads the complete Theme object from the DA and re-initializes the dataclass..
Note: The Theme pull method differs from from the pull methods of other models as Themes use the `slug`
attribute as a primary key, rather than the `id` attribute.
"""
data = api.get(endpoint=self.endpoint, resource_id=self.slug)
self.__init__(**data)
|
[
"logging.error",
"logging.warning",
"copy.copy",
"datetime.date",
"digitalarchive.matching.ResourceMatcher",
"datetime.date.today",
"datetime.date.fromisoformat",
"pydantic.validator",
"digitalarchive.api.get",
"digitalarchive.exceptions.APIServerError",
"digitalarchive.api.get_date_range",
"digitalarchive.api.SESSION.get"
] |
[((10121, 10140), 'pydantic.validator', 'validator', (['"""parent"""'], {}), "('parent')\n", (10130, 10140), False, 'from pydantic import validator\n'), ((19770, 19809), 'pydantic.validator', 'validator', (['"""date_range_start"""'], {'pre': '(True)'}), "('date_range_start', pre=True)\n", (19779, 19809), False, 'from pydantic import validator\n'), ((1998, 2037), 'digitalarchive.matching.ResourceMatcher', 'matching.ResourceMatcher', (['cls'], {}), '(cls, **kwargs)\n', (2022, 2037), True, 'import digitalarchive.matching as matching\n'), ((2236, 2288), 'digitalarchive.api.get', 'api.get', ([], {'endpoint': 'self.endpoint', 'resource_id': 'self.id'}), '(endpoint=self.endpoint, resource_id=self.id)\n', (2243, 2288), True, 'import digitalarchive.api as api\n'), ((2487, 2511), 'copy.copy', 'copy.copy', (['self.__dict__'], {}), '(self.__dict__)\n', (2496, 2511), False, 'import copy\n'), ((4392, 4462), 'digitalarchive.api.SESSION.get', 'api.SESSION.get', (['f"""https://digitalarchive.wilsoncenter.org/{self.url}"""'], {}), "(f'https://digitalarchive.wilsoncenter.org/{self.url}')\n", (4407, 4462), True, 'import digitalarchive.api as api\n'), ((20326, 20348), 'datetime.date', 'date', (['year', 'month', 'day'], {}), '(year, month, day)\n', (20330, 20348), False, 'from datetime import datetime, date\n'), ((25592, 25631), 'digitalarchive.matching.ResourceMatcher', 'matching.ResourceMatcher', (['cls'], {}), '(cls, **kwargs)\n', (25616, 25631), True, 'import digitalarchive.matching as matching\n'), ((25969, 25993), 'copy.copy', 'copy.copy', (['self.__dict__'], {}), '(self.__dict__)\n', (25978, 25993), False, 'import copy\n'), ((32681, 32735), 'digitalarchive.api.get', 'api.get', ([], {'endpoint': 'self.endpoint', 'resource_id': 'self.slug'}), '(endpoint=self.endpoint, resource_id=self.slug)\n', (32688, 32735), True, 'import digitalarchive.api as api\n'), ((5113, 5226), 'digitalarchive.exceptions.APIServerError', 'exceptions.APIServerError', (['f"""[!] Hydrating asset ID#: %s failed with code: %s"""', 'self.id', 'response.status_code'], {}), "(f'[!] Hydrating asset ID#: %s failed with code: %s',\n self.id, response.status_code)\n", (5138, 5226), True, 'import digitalarchive.exceptions as exceptions\n'), ((20136, 20164), 'datetime.date.fromisoformat', 'date.fromisoformat', (['doc_date'], {}), '(doc_date)\n', (20154, 20164), False, 'from datetime import datetime, date\n'), ((27100, 27112), 'datetime.date.today', 'date.today', ([], {}), '()\n', (27110, 27112), False, 'from datetime import datetime, date\n'), ((23918, 24030), 'logging.error', 'logging.error', (['f"""[!] {key} is not a valid search term for {cls}. Valid terms: {allowed_search_fields}"""'], {}), "(\n f'[!] {key} is not a valid search term for {cls}. Valid terms: {allowed_search_fields}'\n )\n", (23931, 24030), False, 'import logging\n'), ((27269, 27289), 'digitalarchive.api.get_date_range', 'api.get_date_range', ([], {}), '()\n', (27287, 27289), True, 'import digitalarchive.api as api\n'), ((4965, 5041), 'logging.warning', 'logging.warning', (['"""[!] Unknown file format \'%s\' encountered!"""', 'self.extension'], {}), '("[!] Unknown file format \'%s\' encountered!", self.extension)\n', (4980, 5041), False, 'import logging\n'), ((27912, 27973), 'logging.error', 'logging.error', (['"""[!] Invalid date string! Format is: YYYYMMDD"""'], {}), "('[!] Invalid date string! Format is: YYYYMMDD')\n", (27925, 27973), False, 'import logging\n'), ((29525, 29587), 'logging.error', 'logging.error', (['f"""[!] Cannot filter for more than one %s"""', 'term'], {}), "(f'[!] Cannot filter for more than one %s', term)\n", (29538, 29587), False, 'import logging\n')]
|
from pyramid import interfaces
from zope import interface
from h.auth.policy._identity_base import IdentityBasedPolicy
from h.security import Identity
@interface.implementer(interfaces.IAuthenticationPolicy)
class TokenAuthenticationPolicy(IdentityBasedPolicy):
"""
A bearer token authentication policy.
This policy uses a bearer token which is validated against Token objects
in the DB. This can come from the `request.auth_token` (from
`h.auth.tokens.auth_token`) or in the case of Websocket requests the
GET parameter `access_token`.
"""
def identity(self, request):
"""
Get an Identity object for valid credentials.
Validate the token from the request by matching them to Token records
in the DB.
:param request: Pyramid request to inspect
:returns: An `Identity` object if the login is authenticated or None
"""
token_str = self._get_token(request)
if token_str is None:
return None
token = request.find_service(name="auth_token").validate(token_str)
if token is None:
return None
user = request.find_service(name="user").fetch(token.userid)
if user is None:
return None
return Identity(user=user)
def _get_token(self, request):
token_str = None
if self._is_ws_request(request):
token_str = request.GET.get("access_token", None)
return token_str or getattr(request, "auth_token", None)
@staticmethod
def _is_ws_request(request):
return request.path == "/ws"
|
[
"zope.interface.implementer",
"h.security.Identity"
] |
[((155, 210), 'zope.interface.implementer', 'interface.implementer', (['interfaces.IAuthenticationPolicy'], {}), '(interfaces.IAuthenticationPolicy)\n', (176, 210), False, 'from zope import interface\n'), ((1273, 1292), 'h.security.Identity', 'Identity', ([], {'user': 'user'}), '(user=user)\n', (1281, 1292), False, 'from h.security import Identity\n')]
|
import os, glob, sys
from turbo_seti.find_event.plot_dat import plot_dat
from turbo_seti import find_event as find
import numpy as np
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dir', default=os.getcwd())
parser.add_argument('--minHit', type=float, default=None)
parser.add_argument('--maxHit', type=float, default=None)
args = parser.parse_args()
path = args.dir
dat_files = glob.glob(path + "*.dat")
min_hit = 1e9
max_hit = 0
if args.minHit == None or args.maxHit == None:
for file in dat_files:
tbl = find.read_dat(file)
min_freq, max_freq = min(tbl["Freq"]), max(tbl["Freq"])
if min_freq < min_hit:
min_hit = min_freq
if max_freq > max_hit:
max_hit = max_freq
else:
min_hit = args.minHit
max_hit = args.maxHit # set min and max hits by hand just to get this image
print("Lowest frequency hit: ", min_hit)
print("Highext frequency hit: ", max_hit)
plot_range = 2000*1e-6 # a 2000Hz width, adjusted to be in units of MHz
freq_range = np.arange(np.round(min_hit, 2), np.round(max_hit), plot_range)
outDir = path + "bautista-analysis/"
if not os.path.exists(outDir):
os.mkdir(outDir)
for center in freq_range:
plot_dat(path + "dat-list.lst",
path + "h5-list.lst",
path + "events-list.csv",
outdir=outDir,
check_zero_drift=False,
alpha=0.65,
color="black",
window=(center-0.001, center+0.001))
if __name__ == '__main__':
sys.exit(main())
|
[
"os.mkdir",
"turbo_seti.find_event.read_dat",
"argparse.ArgumentParser",
"os.getcwd",
"os.path.exists",
"glob.glob",
"numpy.round",
"turbo_seti.find_event.plot_dat.plot_dat"
] |
[((180, 205), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (203, 205), False, 'import argparse\n'), ((453, 478), 'glob.glob', 'glob.glob', (["(path + '*.dat')"], {}), "(path + '*.dat')\n", (462, 478), False, 'import os, glob, sys\n'), ((1166, 1186), 'numpy.round', 'np.round', (['min_hit', '(2)'], {}), '(min_hit, 2)\n', (1174, 1186), True, 'import numpy as np\n'), ((1188, 1205), 'numpy.round', 'np.round', (['max_hit'], {}), '(max_hit)\n', (1196, 1205), True, 'import numpy as np\n'), ((1272, 1294), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (1286, 1294), False, 'import os, glob, sys\n'), ((1304, 1320), 'os.mkdir', 'os.mkdir', (['outDir'], {}), '(outDir)\n', (1312, 1320), False, 'import os, glob, sys\n'), ((1360, 1554), 'turbo_seti.find_event.plot_dat.plot_dat', 'plot_dat', (["(path + 'dat-list.lst')", "(path + 'h5-list.lst')", "(path + 'events-list.csv')"], {'outdir': 'outDir', 'check_zero_drift': '(False)', 'alpha': '(0.65)', 'color': '"""black"""', 'window': '(center - 0.001, center + 0.001)'}), "(path + 'dat-list.lst', path + 'h5-list.lst', path +\n 'events-list.csv', outdir=outDir, check_zero_drift=False, alpha=0.65,\n color='black', window=(center - 0.001, center + 0.001))\n", (1368, 1554), False, 'from turbo_seti.find_event.plot_dat import plot_dat\n'), ((247, 258), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (256, 258), False, 'import os, glob, sys\n'), ((615, 634), 'turbo_seti.find_event.read_dat', 'find.read_dat', (['file'], {}), '(file)\n', (628, 634), True, 'from turbo_seti import find_event as find\n')]
|
"""
=====
sender.py
=====
Import data from server.
============================
"""
from flask import Flask, jsonify
from flask import make_response
from config import sender_links
app = Flask(__name__)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
# ####### #
# Profile #
# ####### #
@app.route(sender_links['user'], methods=['GET'])
def get_users(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
def get_products(task_ids):
pass
# ####### #
# Profile #
# ####### #
|
[
"flask.jsonify",
"flask.Flask"
] |
[((192, 207), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (197, 207), False, 'from flask import Flask, jsonify\n'), ((545, 571), 'flask.jsonify', 'jsonify', (["{'task': task[0]}"], {}), "({'task': task[0]})\n", (552, 571), False, 'from flask import Flask, jsonify\n'), ((280, 311), 'flask.jsonify', 'jsonify', (["{'error': 'Not found'}"], {}), "({'error': 'Not found'})\n", (287, 311), False, 'from flask import Flask, jsonify\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `marble` package."""
import unittest
import marble
import numpy as np
import sympl as sp
test_era5_filename = '/home/twine/data/era5/era5-interp-2016.nc'
def get_test_state(pc_value=0.):
n_features = marble.components.marble.name_feature_counts
state = {
'time': sp.timedelta(0),
'liquid_water_static_energy_components': sp.DataArray(
np.ones([n_features['sl']]) * pc_value,
dims=('sl_latent',), attrs={'units': ''}),
'total_water_mixing_ratio_components': sp.DataArray(
np.ones([n_features['rt']]) * pc_value,
dims=('rt_latent',), attrs={'units': ''}),
'cloud_water_mixing_ratio_components': sp.DataArray(
np.ones([n_features['rcld']]) * pc_value,
dims=('rcld_latent',), attrs={'units': ''}),
'rain_water_mixing_ratio_components': sp.DataArray(
np.ones([n_features['rrain']]) * pc_value,
dims=('rrain_latent',), attrs={'units': ''}),
'cloud_fraction_components': sp.DataArray(
np.ones([n_features['cld']]) * pc_value,
dims=('cld_latent',), attrs={'units': ''}),
'liquid_water_static_energy_components_horizontal_advective_tendency': sp.DataArray(
np.ones([n_features['sl']]) * pc_value,
dims=('sl_latent',), attrs={'units': ''}),
'total_water_mixing_ratio_components_horizontal_advective_tendency': sp.DataArray(
np.ones([n_features['sl']]) * pc_value,
dims=('rt_latent',), attrs={'units': ''}),
'vertical_wind_components': sp.DataArray(
np.ones([n_features['w']]) * pc_value,
dims=('w_latent',), attrs={'units': ''}),
}
return state
class TestPrincipalComponentConversions(unittest.TestCase):
"""Tests for `marble` package."""
def test_convert_input_zero_latent_to_height_and_back(self):
state = get_test_state(pc_value=0.)
converter = marble.InputPrincipalComponentsToHeight()
inverse_converter = marble.InputHeightToPrincipalComponents()
intermediate = converter(state)
intermediate['time'] = state['time']
result = inverse_converter(intermediate)
for name in result.keys():
self.assertIn(name, state)
self.assertEqual(result[name].shape, state[name].shape, name)
self.assertTrue(np.allclose(result[name].values, state[name].values), name)
def test_convert_input_nonzero_latent_to_height_and_back(self):
state = get_test_state(pc_value=0.6)
converter = marble.InputPrincipalComponentsToHeight()
inverse_converter = marble.InputHeightToPrincipalComponents()
intermediate = converter(state)
intermediate['time'] = state['time']
result = inverse_converter(intermediate)
for name in result.keys():
self.assertIn(name, state)
self.assertEqual(result[name].shape, state[name].shape, name)
self.assertTrue(np.allclose(result[name].values, state[name].values), name)
def test_convert_diagnostic_zero_latent_to_height(self):
"""
This only tests that the conversion runs without errors, it does not
check anything about the output value.
"""
state = get_test_state(pc_value=0.)
converter = marble.DiagnosticPrincipalComponentsToHeight()
result = converter(state)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"marble.DiagnosticPrincipalComponentsToHeight",
"numpy.allclose",
"sympl.timedelta",
"numpy.ones",
"marble.InputPrincipalComponentsToHeight",
"marble.InputHeightToPrincipalComponents"
] |
[((3497, 3512), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3510, 3512), False, 'import unittest\n'), ((344, 359), 'sympl.timedelta', 'sp.timedelta', (['(0)'], {}), '(0)\n', (356, 359), True, 'import sympl as sp\n'), ((2010, 2051), 'marble.InputPrincipalComponentsToHeight', 'marble.InputPrincipalComponentsToHeight', ([], {}), '()\n', (2049, 2051), False, 'import marble\n'), ((2080, 2121), 'marble.InputHeightToPrincipalComponents', 'marble.InputHeightToPrincipalComponents', ([], {}), '()\n', (2119, 2121), False, 'import marble\n'), ((2626, 2667), 'marble.InputPrincipalComponentsToHeight', 'marble.InputPrincipalComponentsToHeight', ([], {}), '()\n', (2665, 2667), False, 'import marble\n'), ((2696, 2737), 'marble.InputHeightToPrincipalComponents', 'marble.InputHeightToPrincipalComponents', ([], {}), '()\n', (2735, 2737), False, 'import marble\n'), ((3382, 3428), 'marble.DiagnosticPrincipalComponentsToHeight', 'marble.DiagnosticPrincipalComponentsToHeight', ([], {}), '()\n', (3426, 3428), False, 'import marble\n'), ((436, 463), 'numpy.ones', 'np.ones', (["[n_features['sl']]"], {}), "([n_features['sl']])\n", (443, 463), True, 'import numpy as np\n'), ((604, 631), 'numpy.ones', 'np.ones', (["[n_features['rt']]"], {}), "([n_features['rt']])\n", (611, 631), True, 'import numpy as np\n'), ((772, 801), 'numpy.ones', 'np.ones', (["[n_features['rcld']]"], {}), "([n_features['rcld']])\n", (779, 801), True, 'import numpy as np\n'), ((943, 973), 'numpy.ones', 'np.ones', (["[n_features['rrain']]"], {}), "([n_features['rrain']])\n", (950, 973), True, 'import numpy as np\n'), ((1107, 1135), 'numpy.ones', 'np.ones', (["[n_features['cld']]"], {}), "([n_features['cld']])\n", (1114, 1135), True, 'import numpy as np\n'), ((1309, 1336), 'numpy.ones', 'np.ones', (["[n_features['sl']]"], {}), "([n_features['sl']])\n", (1316, 1336), True, 'import numpy as np\n'), ((1507, 1534), 'numpy.ones', 'np.ones', (["[n_features['sl']]"], {}), "([n_features['sl']])\n", (1514, 1534), True, 'import numpy as np\n'), ((1664, 1690), 'numpy.ones', 'np.ones', (["[n_features['w']]"], {}), "([n_features['w']])\n", (1671, 1690), True, 'import numpy as np\n'), ((2432, 2484), 'numpy.allclose', 'np.allclose', (['result[name].values', 'state[name].values'], {}), '(result[name].values, state[name].values)\n', (2443, 2484), True, 'import numpy as np\n'), ((3048, 3100), 'numpy.allclose', 'np.allclose', (['result[name].values', 'state[name].values'], {}), '(result[name].values, state[name].values)\n', (3059, 3100), True, 'import numpy as np\n')]
|
### Figure 5 C and E - Obenhaus et al.
# Figure S6 A, C, E and F - Obenhaus et al.
#
# NN distance analysis
# Pairwise distance analysis
#
import sys, os
import os.path
import numpy as np
import pandas as pd
import datajoint as dj
import cmasher as cmr
from tabulate import tabulate
import itertools
# Make plots pretty
import seaborn as sns
sns.set(style='white')
# Prevent bug in figure export as pdf:
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
##### IMPORTS ###########################################################################
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from general import print_mannwhitneyu, print_wilcoxon
from dj_plotter.helpers.plotting_helpers import make_linear_colormap
from helpers_topography.notebooks.pairw_distances import norm_pairw_nn_df, plot_pairw_nn_summary
##### LOAD SCHEMA COMPONENTS #############################################################
from dj_schemas.dj_conn import *
##### EXPORT LOCATION ####################################################################
figure_dir = 'YOUR_EXPORT_DIRECTORY/'
def pairw_dist(animals,
col_dict,
param_hash_session='cf83e1357eefb8bd',
param_hash_id_cell='standard',
region='MEC',
pairwise_dist_param='A',
cutoff_n_starters=0,
plot=True
):
# Print col_dict
print(f'\nReceived the following column dictionary \n{col_dict}\n')
# Brain region filter
assert region in ['MEC','PAS'], f'Region "{region}" not understood. Choose "MEC" or "PAS"'
all_sessions = (Session.proj('animal_name') * FilteredSessions
& [f'animal_name = "{animal}"' for animal in animals]
& f'param_hash_session = "{param_hash_session}"'
)
# Print pairw dist. parameter
score_, score_cutoff_ = (PairwDistParams & f'pairwise_dist_param = "{pairwise_dist_param}"').fetch1('score','score_cutoff')
print(f'Filtering pairwise distances by {score_} > {score_cutoff_}')
pairw = (Session.proj('animal_name') * PairwDist.Cells * PairwDist.PairwD
& all_sessions.proj() \
& f'param_hash_id_cell = "{param_hash_id_cell}"'
& f'pairwise_dist_param = "{pairwise_dist_param}"'
& f'region = "{region}"'
& f'n_startr > {cutoff_n_starters}')
pairw_df = pd.DataFrame(pairw.fetch(as_dict=True))
pairw_df.dropna(inplace=True)
colors = make_linear_colormap(pairw_df.animal_name, categorical=True, cmap='cmr.guppy')
### COLS TO NORMALIZE #################################################################################
cols_to_norm = col_dict['cols_to_norm'] # ['mean_pairw_dist_shuffref', 'mean_pairw_dist']
cols_to_norm_label = col_dict['cols_to_norm_label'] # ['Ref', 'Data']
norm_to = col_dict['norm_to'] # mean_pairw_dist_shuffall'
cols = col_dict['cols'] # 'animal_name'
# Normalize
pairw_df_norm = norm_pairw_nn_df(pairw_df, cols_to_norm, cols, norm_to)
pairw_df_norm.reset_index(drop=True, inplace=True)
# Plot
if plot:
plot_pairw_nn_summary(pairw_df_norm,
cols_to_norm,
colors=colors,
xlabels=cols_to_norm_label,
save_path=figure_dir,
label='PairwD')
# Print statistics
print(f'Data over {len(pairw_df.session_name)} datasets (careful! Can be multiplane!) ({len(set(pairw_df.animal_name))} animals)')
print(f'{set(pairw_df.animal_name)}')
# Calculate p values MannWhithney and 1 sample Wilcoxon rank
pairw_df_norm_ = pairw_df_norm[cols_to_norm]
results = pd.DataFrame(columns = pairw_df_norm_.columns,
index = pairw_df_norm_.columns)
for (label1, column1), (label2, column2) in itertools.combinations(pairw_df_norm_.items(), 2):
_ ,results.loc[label1, label2] = _ ,results.loc[label2, label1] = print_mannwhitneyu(column1, column2, label_A=label1, label_B=label2)
#print(tabulate(results, headers='keys', tablefmt='psql'))
print('\nWilcoxon signed rank test (against 1.):')
for col in cols_to_norm:
try:
print_wilcoxon(pairw_df_norm[col] - 1., label=col)
except ValueError:
print(f'Skipping column {col} (all zero?)')
# Print some more stats
print('Mean and SEM for PairwDist results')
for col in cols_to_norm:
mean_col, sem_col = np.nanmean(pairw_df_norm[col]), np.std(pairw_df_norm[col]) / np.sqrt(len(pairw_df_norm[col]))
print(f'{col:<30} | Mean ± SEM: {mean_col:.2f} ± {sem_col:.2f}')
return pairw_df_norm, len(set(pairw_df.animal_name)), len(pairw_df.session_name)
def group_nn_dist(animals,
col_dict,
param_hash_session='cf83e1357eefb8bd',
param_hash_id_cell = 'standard',
region='MEC',
pairwise_dist_param='A',
cutoff_n_starters=0,
nn_group_number=5,
plot=True
):
'''
Like pairw_dist() but for PairwDist.NN instead of PairwDist.PairwD, i.e. grouped NN results
nn_group_number : default 5 : Number of NN to consider (group size).
Careful: Zero indexed! 0 = first nearest neighbour
'''
# Print col_dict
print(f'\nReceived the following column dictionary \n{col_dict}\n')
# Brain region filter
assert region in ['MEC','PAS'], f'Region "{region}" not understood. Choose "MEC" or "PAS"'
all_sessions = (Session.proj('animal_name') * FilteredSessions
& [f'animal_name = "{animal}"' for animal in animals]
& f'param_hash_session = "{param_hash_session}"'
)
# Print pairw dist. parameter
score_, score_cutoff_ = (PairwDistParams & f'pairwise_dist_param = "{pairwise_dist_param}"').fetch1('score','score_cutoff')
print(f'Filtering pairwise distances by {score_} > {score_cutoff_}')
nn = (Session.proj('animal_name') * PairwDist.Cells * PairwDist.NN
& all_sessions.proj()
& f'param_hash_id_cell = "{param_hash_id_cell}"'
& f'pairwise_dist_param = "{pairwise_dist_param}"'
& f'region = "{region}"'
& f'n_startr > {cutoff_n_starters}')
nn_df = pd.DataFrame(nn.fetch(as_dict=True))
nn_df.dropna(inplace=True) # Important here because apparently some of the stuff can be None
colors = make_linear_colormap(nn_df.animal_name, categorical=True, cmap='cmr.guppy')
# Subselect a specific nn_number = number of NN in result (group size)
data_cols_pairwDist_NN = ['mean_nn','mean_nn_shuff_all',
'mean_nn_shuff_ref','mean_nn_csr'] # All data columns in table
for col in data_cols_pairwDist_NN:
nn_df[col] = [res[nn_group_number] for res in nn_df[col]]
### COLS TO NORMALIZE #################################################################################
cols_to_norm = col_dict['cols_to_norm']
cols_to_norm_label = col_dict['cols_to_norm_label']
norm_to = col_dict['norm_to']
cols = col_dict['cols']
# Normalize
nn_df_norm = norm_pairw_nn_df(nn_df, cols_to_norm, cols, norm_to)
nn_df_norm.reset_index(drop=True, inplace=True)
# Plot
if plot:
plot_pairw_nn_summary(nn_df_norm,
cols_to_norm,
colors=colors,
xlabels=cols_to_norm_label,
save_path=figure_dir,
label='NN')
# Print statistics
print(f'Data over {len(nn_df.session_name)} datasets (careful! Can be multiplane!) ({len(set(nn_df.animal_name))} animals)')
print(f'{set(nn_df.animal_name)}')
# Calculate p values MannWhithney and 1 sample Wilcoxon rank
nn_df_norm_ = nn_df_norm[cols_to_norm]
results = pd.DataFrame(columns = nn_df_norm_.columns,
index = nn_df_norm_.columns)
for (label1, column1), (label2, column2) in itertools.combinations(nn_df_norm_.items(), 2):
_ ,results.loc[label1, label2] = _ ,results.loc[label2, label1] = print_mannwhitneyu(column1, column2, label_A=label1, label_B=label2)
#print(tabulate(results, headers='keys', tablefmt='psql'))
print('\nWilcoxon signed rank test (against 1.):')
for col in cols_to_norm:
#_, onesample_p_data = ttest_1samp(pairw_df_norm[col], 1.)
try:
print_wilcoxon(nn_df_norm[col] - 1., label=col)
except ValueError:
print(f'Skipping column {col} (all zero?)')
# Print some more stats
print('Mean and SEM for NN results')
for col in cols_to_norm:
mean_col, sem_col = np.nanmean(nn_df_norm[col]), np.std(nn_df_norm[col]) / np.sqrt(len(nn_df_norm[col]))
print(f'{col:<30} | Mean ± SEM: {mean_col:.2f} ± {sem_col:.2f}')
return nn_df_norm, len(set(nn_df.animal_name)), len(set(nn_df.session_name))
if __name__ == "__main__":
grid_mice = [
'82913','88592', '87244', '60480',
'97046','89841'
]
ov_mice = [
'87187','88106','87245','90222',
'94557','89622'
]
all_animals = [
'90222','90218','90647',
'82913','88592','89622',
'87244','89841','60480',
'87245','87187','88106',
'94557','97045','97046',
]
animals = grid_mice
pairwise_dist_param = "A"
param_hash_id_cell = 'standard'
region = 'MEC'
# Cutoff number of cells
cutoff_n_starters = 15.
# For NN
nn_group_number = 5
###### PAIRWISE DISTANCES ####################################################################################
# print(f'Creating pairwise distance figure for {len(animals)} animal(s)')
# print(animals)
# print('\n')
# # Create column dictionary
# col_dict = {}
# col_dict['cols_to_norm'] = ['mean_pairw_dist_shuffall', 'mean_pairw_dist_shuffref', 'mean_pairw_dist']
# #mean_pairw_dist_shuffref, mean_pairw_dist_shuffall
# col_dict['cols_to_norm_label'] = ['All', 'Ref', 'Data']
# col_dict['norm_to'] = 'mean_pairw_dist_shuffall'
# col_dict['cols'] = 'animal_name'
# pairw_dist(animals,
# col_dict,
# param_hash_session='cf83e1357eefb8bd',
# param_hash_id_cell=param_hash_id_cell,
# region=region,
# pairwise_dist_param=pairwise_dist_param,
# cutoff_n_starters=cutoff_n_starters,
# )
####### NN DISTANCES ###########################################################################################
print('\n########################################################################################################')
print(f'\nCreating NN distance figure for {len(animals)} animal(s)')
print(animals)
print('\n')
# Create column dictionary
col_dict = {}
col_dict['cols_to_norm'] = ['mean_nn_shuff_all', 'mean_nn_shuff_ref', 'mean_nn']
col_dict['cols_to_norm_label'] = ['All', 'Ref', 'Data']
col_dict['norm_to'] = 'mean_nn_shuff_all'
col_dict['cols'] = 'animal_name'
group_nn_dist(animals,
col_dict,
param_hash_session='cf83e1357eefb8bd',
param_hash_id_cell=param_hash_id_cell,
region=region,
pairwise_dist_param=pairwise_dist_param,
cutoff_n_starters=cutoff_n_starters,
nn_group_number=nn_group_number,
plot=True)
print(figure_dir)
print('Success.')
|
[
"pandas.DataFrame",
"helpers_topography.notebooks.pairw_distances.plot_pairw_nn_summary",
"numpy.std",
"helpers_topography.notebooks.pairw_distances.norm_pairw_nn_df",
"os.path.dirname",
"dj_plotter.helpers.plotting_helpers.make_linear_colormap",
"numpy.nanmean",
"seaborn.set",
"general.print_wilcoxon",
"general.print_mannwhitneyu"
] |
[((352, 374), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""'}), "(style='white')\n", (359, 374), True, 'import seaborn as sns\n'), ((2598, 2676), 'dj_plotter.helpers.plotting_helpers.make_linear_colormap', 'make_linear_colormap', (['pairw_df.animal_name'], {'categorical': '(True)', 'cmap': '"""cmr.guppy"""'}), "(pairw_df.animal_name, categorical=True, cmap='cmr.guppy')\n", (2618, 2676), False, 'from dj_plotter.helpers.plotting_helpers import make_linear_colormap\n'), ((3136, 3191), 'helpers_topography.notebooks.pairw_distances.norm_pairw_nn_df', 'norm_pairw_nn_df', (['pairw_df', 'cols_to_norm', 'cols', 'norm_to'], {}), '(pairw_df, cols_to_norm, cols, norm_to)\n', (3152, 3191), False, 'from helpers_topography.notebooks.pairw_distances import norm_pairw_nn_df, plot_pairw_nn_summary\n'), ((3899, 3973), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'pairw_df_norm_.columns', 'index': 'pairw_df_norm_.columns'}), '(columns=pairw_df_norm_.columns, index=pairw_df_norm_.columns)\n', (3911, 3973), True, 'import pandas as pd\n'), ((6797, 6872), 'dj_plotter.helpers.plotting_helpers.make_linear_colormap', 'make_linear_colormap', (['nn_df.animal_name'], {'categorical': '(True)', 'cmap': '"""cmr.guppy"""'}), "(nn_df.animal_name, categorical=True, cmap='cmr.guppy')\n", (6817, 6872), False, 'from dj_plotter.helpers.plotting_helpers import make_linear_colormap\n'), ((7556, 7608), 'helpers_topography.notebooks.pairw_distances.norm_pairw_nn_df', 'norm_pairw_nn_df', (['nn_df', 'cols_to_norm', 'cols', 'norm_to'], {}), '(nn_df, cols_to_norm, cols, norm_to)\n', (7572, 7608), False, 'from helpers_topography.notebooks.pairw_distances import norm_pairw_nn_df, plot_pairw_nn_summary\n'), ((8292, 8360), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'nn_df_norm_.columns', 'index': 'nn_df_norm_.columns'}), '(columns=nn_df_norm_.columns, index=nn_df_norm_.columns)\n', (8304, 8360), True, 'import pandas as pd\n'), ((3281, 3417), 'helpers_topography.notebooks.pairw_distances.plot_pairw_nn_summary', 'plot_pairw_nn_summary', (['pairw_df_norm', 'cols_to_norm'], {'colors': 'colors', 'xlabels': 'cols_to_norm_label', 'save_path': 'figure_dir', 'label': '"""PairwD"""'}), "(pairw_df_norm, cols_to_norm, colors=colors, xlabels=\n cols_to_norm_label, save_path=figure_dir, label='PairwD')\n", (3302, 3417), False, 'from helpers_topography.notebooks.pairw_distances import norm_pairw_nn_df, plot_pairw_nn_summary\n'), ((4179, 4247), 'general.print_mannwhitneyu', 'print_mannwhitneyu', (['column1', 'column2'], {'label_A': 'label1', 'label_B': 'label2'}), '(column1, column2, label_A=label1, label_B=label2)\n', (4197, 4247), False, 'from general import print_mannwhitneyu, print_wilcoxon\n'), ((7696, 7825), 'helpers_topography.notebooks.pairw_distances.plot_pairw_nn_summary', 'plot_pairw_nn_summary', (['nn_df_norm', 'cols_to_norm'], {'colors': 'colors', 'xlabels': 'cols_to_norm_label', 'save_path': 'figure_dir', 'label': '"""NN"""'}), "(nn_df_norm, cols_to_norm, colors=colors, xlabels=\n cols_to_norm_label, save_path=figure_dir, label='NN')\n", (7717, 7825), False, 'from helpers_topography.notebooks.pairw_distances import norm_pairw_nn_df, plot_pairw_nn_summary\n'), ((8563, 8631), 'general.print_mannwhitneyu', 'print_mannwhitneyu', (['column1', 'column2'], {'label_A': 'label1', 'label_B': 'label2'}), '(column1, column2, label_A=label1, label_B=label2)\n', (8581, 8631), False, 'from general import print_mannwhitneyu, print_wilcoxon\n'), ((611, 636), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (626, 636), False, 'import sys, os\n'), ((4421, 4472), 'general.print_wilcoxon', 'print_wilcoxon', (['(pairw_df_norm[col] - 1.0)'], {'label': 'col'}), '(pairw_df_norm[col] - 1.0, label=col)\n', (4435, 4472), False, 'from general import print_mannwhitneyu, print_wilcoxon\n'), ((4689, 4719), 'numpy.nanmean', 'np.nanmean', (['pairw_df_norm[col]'], {}), '(pairw_df_norm[col])\n', (4699, 4719), True, 'import numpy as np\n'), ((8872, 8920), 'general.print_wilcoxon', 'print_wilcoxon', (['(nn_df_norm[col] - 1.0)'], {'label': 'col'}), '(nn_df_norm[col] - 1.0, label=col)\n', (8886, 8920), False, 'from general import print_mannwhitneyu, print_wilcoxon\n'), ((9138, 9165), 'numpy.nanmean', 'np.nanmean', (['nn_df_norm[col]'], {}), '(nn_df_norm[col])\n', (9148, 9165), True, 'import numpy as np\n'), ((4721, 4747), 'numpy.std', 'np.std', (['pairw_df_norm[col]'], {}), '(pairw_df_norm[col])\n', (4727, 4747), True, 'import numpy as np\n'), ((9167, 9190), 'numpy.std', 'np.std', (['nn_df_norm[col]'], {}), '(nn_df_norm[col])\n', (9173, 9190), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
simulations for sensitivity to results
Author: <NAME>
Date: May, 2020
"""
import csv
import time as mytime
############################################ENVIRONMENT###################################################################################
exec(open('./environment.py').read()) # execute the environment script
####################################################### STANDARD (MARKET)#############################################################################
numsim = 4 # number of years of simulations
start_time=mytime.time() # set time for starting
risk_life = 0.5 # risk level by moving outside
social_radius = 2 # social radius within which interaction is possible
eff_quarantined = 0.25 # efficiency of contact tracing symptomatic for treatments at hospitals
hospital_capacity = 0.5 # the capacity of the hospitals (in reference to the general population)
essentials_move = 8 # move out only for essentials
exec(open('./market_modules.py').read()) # execute the main script
outfname = 'sim_standard_market.csv'
with open(outfname,'w') as outfile:
allsimdat=csv.writer(outfile)
for rep in range(numsim):
exec(open('./loop_modules.py').read())
with open('./simulation_data.csv', 'r') as csvfile:
onesimdat = csv.reader(csvfile, delimiter=',')
header = next(onesimdat)
header.append('NoSim')
if rep==0:
allsimdat.writerow(header)
for row in onesimdat:
row.append(str(rep))
allsimdat.writerow(row)
print('Done, simulation %i, with standard paramaters, ended at %.4f hours '%(rep+1,(mytime.time()-start_time)/3600. ))
#os.rename('sim_movie.mp4', 'movie_standard_market_rep_%i.mp4' %(rep+1) )
####################################################### STANDARD (MARKET) WITH MASK #############################################################################
numsim = 4 # number of years of simulations
start_time=mytime.time() # set time for starting
risk_life = 0.5 # risk level by moving outside
social_radius = 2 # social radius within which interaction is possible
eff_quarantined = 0.25 # efficiency of contact tracing symptomatic for treatments at hospitals
hospital_capacity = 0.5 # the capacity of the hospitals (in reference to the general population)
essentials_move = 8 # move out only for essentials
wearing_mask = 0.5 # prob of wearing mask
exec(open('./market_mask_modules.py').read()) # execute the main script
outfname = 'sim_standard_market.csv'
with open(outfname,'w') as outfile:
allsimdat=csv.writer(outfile)
for rep in range(numsim):
exec(open('./loop_modules.py').read())
with open('./simulation_data.csv', 'r') as csvfile:
onesimdat = csv.reader(csvfile, delimiter=',')
header = next(onesimdat)
header.append('NoSim')
if rep==0:
allsimdat.writerow(header)
for row in onesimdat:
row.append(str(rep))
allsimdat.writerow(row)
print('Done, simulation %i, with standard paramaters, ended at %.4f hours '%(rep+1,(mytime.time()-start_time)/3600. ))
#os.rename('sim_movie.mp4', 'movie_standard_market_rep_%i.mp4' %(rep+1) )
|
[
"csv.reader",
"csv.writer",
"time.time"
] |
[((581, 594), 'time.time', 'mytime.time', ([], {}), '()\n', (592, 594), True, 'import time as mytime\n'), ((2050, 2063), 'time.time', 'mytime.time', ([], {}), '()\n', (2061, 2063), True, 'import time as mytime\n'), ((1142, 1161), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (1152, 1161), False, 'import csv\n'), ((2659, 2678), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (2669, 2678), False, 'import csv\n'), ((1323, 1357), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (1333, 1357), False, 'import csv\n'), ((2840, 2874), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (2850, 2874), False, 'import csv\n'), ((1699, 1712), 'time.time', 'mytime.time', ([], {}), '()\n', (1710, 1712), True, 'import time as mytime\n'), ((3216, 3229), 'time.time', 'mytime.time', ([], {}), '()\n', (3227, 3229), True, 'import time as mytime\n')]
|
from swampdragon.serializers.model_serializer import ModelSerializer
from swampdragon.testing.dragon_testcase import DragonTestCase
from .models import TextModel, SDModel
from datetime import datetime
from django.db import models
# to make sure none of the ModelSerializer variables are clobbering the data
MODEL_KEYWORDS = ('data', )
# TODO: support the rest of these field names
# MODEL_KEYWORDS = ('data', 'opts', 'initial', 'base_fields', 'm2m_fields', 'related_fields', 'errors')
class KeywordModel(SDModel):
data = models.TextField()
# TODO: support the rest of these field names
# opts = models.TextField()
# initial = models.TextField()
# base_fields = models.TextField()
# m2m_fields = models.TextField()
# related_fields = models.TextField()
# errors = models.TextField()
class KeywordModelSerializer(ModelSerializer):
class Meta:
model = KeywordModel
publish_fields = MODEL_KEYWORDS
update_fields = MODEL_KEYWORDS
class DateModel(SDModel):
date = models.DateTimeField()
class DateModelSerializer(ModelSerializer):
class Meta:
model = DateModel
publish_fields = ('date')
update_fields = ('date')
class TextModelSerializer(ModelSerializer):
class Meta:
model = TextModel
publish_fields = ('text')
update_fields = ('text')
class TestModelSerializer(DragonTestCase):
def test_deserialize_model(self):
data = {'text': 'foo'}
serializer = TextModelSerializer(data)
model_instance = serializer.save()
self.assertEqual(model_instance.text, data['text'])
def test_passing_invalid_data(self):
foo = 'text'
with self.assertRaises(Exception):
TextModelSerializer(foo)
def test_ignore_non_model_fields(self):
data = {'text': 'foo', 'random_field': 'val'}
serializer = TextModelSerializer(data)
model_instance = serializer.deserialize()
self.assertEqual(model_instance.text, data['text'])
def test_deserialize_field(self):
date = datetime.now()
data = {'date': str(date)}
serializer = DateModelSerializer(data)
object = serializer.save()
self.assertEqual(object.date, date)
def test_deserialize_keyword_field(self):
data = dict(zip(MODEL_KEYWORDS, MODEL_KEYWORDS))
serializer = KeywordModelSerializer(data)
object = serializer.save()
for attr in MODEL_KEYWORDS:
self.assertEqual(getattr(object, attr), attr)
|
[
"django.db.models.DateTimeField",
"django.db.models.TextField",
"datetime.datetime.now"
] |
[((529, 547), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (545, 547), False, 'from django.db import models\n'), ((1030, 1052), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1050, 1052), False, 'from django.db import models\n'), ((2080, 2094), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2092, 2094), False, 'from datetime import datetime\n')]
|
import base64
import json
import os
import zlib
from urllib.request import urlretrieve
import boto3
import mrcnn.model as modellib
import numpy as np
import pandas as pd
import skimage.io
from mrcnn import utils
from mrcnn.config import Config
from superai.meta_ai import BaseModel
s3 = boto3.client("s3")
_MODEL_PATH = os.path.join("sagify_base/local_test/test_dir/", "model")
# _MODEL_PATH = "s3://canotic-ai/model/mask-rcnn-model.tar.gz"
# _MODEL_PATH = 'Mask_RCNN' # Path for models
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = [
"BG",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
class ModelService(BaseModel):
def __init__(self):
super().__init__()
self.model = None
self.initialized = False
def initialize(self, context):
class InferenceConfig(Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
NAME = "inference"
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 80 # COCO has 80 classes
config = InferenceConfig()
config.display()
print("Initialised class...")
self.initialized = True
properties = context.system_properties
_MODEL_PATH = properties.get("model_dir")
if self.model is None:
print("Model Content : ", os.listdir(_MODEL_PATH))
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(_MODEL_PATH, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
try:
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=os.path.join("logs"), config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
self.model = model
except RuntimeError:
raise MemoryError
return self.model
def predict_from_image(self, path, class_id=3):
image = skimage.io.imread(path)
# Run detection
clf = self.model
print("model retrieved.")
results = clf.detect([image], verbose=0)
print("detection on image done.")
# Visualize results
r = results[0]
# get indices corresponding to unwanted classes
indices_to_remove = np.where(r["class_ids"] != class_id)
# remove corresponding entries from `r`
new_masks = np.delete(r["masks"], indices_to_remove, axis=2)
scores = np.delete(r["scores"], indices_to_remove, axis=0)
aggregate_mask = np.logical_not(new_masks.any(axis=2))
class_ids = np.delete(r["class_ids"], indices_to_remove, axis=0)
return {
"new_masks": new_masks,
"aggregate_mask": aggregate_mask,
"scores": scores,
"class_ids": class_ids,
}
def predict_intermediate(self, input):
image_urls = input["image_url"]
predictions = []
for i, url in enumerate(image_urls):
image_path = f"image_{i}.jpg"
# download image
urlretrieve(url, image_path)
print("image retrieved")
image_path = os.getcwd() + "/" + image_path
prediction = self.predict_from_image(image_path)
print("predict from image done.")
new_masks = prediction["new_masks"]
aggregate_mask = prediction["aggregate_mask"]
n_masks = new_masks.shape[-1]
pred = []
for inst in range(n_masks):
pred.append(self._handle_mask(prediction, inst))
print(f"processing mask number {inst} done")
# num_workers = mp.cpu_count() // 4
# with Pool(num_workers) as pool:
# result = [pool.apply_async(_handle_mask, (prediction, i),) for i in range(n_masks)]
# pred = [res.get(timeout=15) for res in result]
print("everything done, uploading data.")
# data_uri = save_and_upload(aggregate_mask)
# pred.append({
# "category": "Background",
# "maskUrl": data_uri,
# "instance": 0
# })
predictions.append(pred)
return predictions
def predict(self, json_input):
"""
Prediction given the request input
:param json_input: [dict], request input
:return: [dict], prediction
"""
# transform json_input and assign the transformed value to model_input
print("json input", json_input)
json_input = json_input[0]["body"]
json_input = json_input.decode("utf-8")
print("Fixed json input", json_input)
try:
model_input = pd.read_json(json.loads(json_input))
except ValueError:
model_input = pd.read_json(json_input)
predictions = self.predict_intermediate(model_input)
print("Predictions: ", predictions)
# TODO If we have more than 1 model, then create additional classes similar to ModelService
# TODO where each of one will load one of your models
# # transform predictions to a list and assign and return it
# prediction_list = []
# output_keys = set([key.split("_")[0] for key in predictions.keys()])
# for index, row in predictions.iterrows():
# out_row = {key: {} for key in output_keys}
# for i, j in row.items():
# name, p_type = i.split("_")
# if p_type == "predictions":
# p_type = "prediction"
# if p_type == "probabilities":
# p_type = "probability"
# out_row[name][p_type] = j
# prediction_list.append(out_row)
return predictions
def train(self, input_data_path, model_save_path, hyperparams_path=None):
pass
@classmethod
def load_weights(cls, weights_path):
pass
@staticmethod
def get_encoding_string(mask):
data = zlib.compress(mask)
encoded_string = base64.b64encode(data).decode("utf-8")
return encoded_string
def _handle_mask(self, prediction, inst):
new_masks = prediction["new_masks"]
scores = prediction["scores"]
class_ids = prediction["class_ids"]
print(f"processing mask number {inst}")
mask = new_masks[..., inst]
mask_data = self.get_encoding_string(mask)
class_id = class_ids[inst]
w, h = mask.shape[:2]
print(f"processing mask number {inst} done")
return {
"category": class_names[class_id],
"class_id": int(class_id),
"maskData": mask_data,
"instance": inst,
"score": float(scores[inst]),
"width": w,
"height": h,
}
|
[
"os.listdir",
"mrcnn.utils.download_trained_weights",
"json.loads",
"boto3.client",
"os.getcwd",
"os.path.exists",
"pandas.read_json",
"zlib.compress",
"urllib.request.urlretrieve",
"numpy.where",
"base64.b64encode",
"os.path.join",
"numpy.delete"
] |
[((290, 308), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (302, 308), False, 'import boto3\n'), ((324, 381), 'os.path.join', 'os.path.join', (['"""sagify_base/local_test/test_dir/"""', '"""model"""'], {}), "('sagify_base/local_test/test_dir/', 'model')\n", (336, 381), False, 'import os\n'), ((3886, 3922), 'numpy.where', 'np.where', (["(r['class_ids'] != class_id)"], {}), "(r['class_ids'] != class_id)\n", (3894, 3922), True, 'import numpy as np\n'), ((3992, 4040), 'numpy.delete', 'np.delete', (["r['masks']", 'indices_to_remove'], {'axis': '(2)'}), "(r['masks'], indices_to_remove, axis=2)\n", (4001, 4040), True, 'import numpy as np\n'), ((4058, 4107), 'numpy.delete', 'np.delete', (["r['scores']", 'indices_to_remove'], {'axis': '(0)'}), "(r['scores'], indices_to_remove, axis=0)\n", (4067, 4107), True, 'import numpy as np\n'), ((4191, 4243), 'numpy.delete', 'np.delete', (["r['class_ids']", 'indices_to_remove'], {'axis': '(0)'}), "(r['class_ids'], indices_to_remove, axis=0)\n", (4200, 4243), True, 'import numpy as np\n'), ((7594, 7613), 'zlib.compress', 'zlib.compress', (['mask'], {}), '(mask)\n', (7607, 7613), False, 'import zlib\n'), ((2816, 2862), 'os.path.join', 'os.path.join', (['_MODEL_PATH', '"""mask_rcnn_coco.h5"""'], {}), "(_MODEL_PATH, 'mask_rcnn_coco.h5')\n", (2828, 2862), False, 'import os\n'), ((4657, 4685), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'image_path'], {}), '(url, image_path)\n', (4668, 4685), False, 'from urllib.request import urlretrieve\n'), ((2712, 2735), 'os.listdir', 'os.listdir', (['_MODEL_PATH'], {}), '(_MODEL_PATH)\n', (2722, 2735), False, 'import os\n'), ((6316, 6338), 'json.loads', 'json.loads', (['json_input'], {}), '(json_input)\n', (6326, 6338), False, 'import json\n'), ((6393, 6417), 'pandas.read_json', 'pd.read_json', (['json_input'], {}), '(json_input)\n', (6405, 6417), True, 'import pandas as pd\n'), ((7639, 7661), 'base64.b64encode', 'base64.b64encode', (['data'], {}), '(data)\n', (7655, 7661), False, 'import base64\n'), ((2971, 3002), 'os.path.exists', 'os.path.exists', (['COCO_MODEL_PATH'], {}), '(COCO_MODEL_PATH)\n', (2985, 3002), False, 'import os\n'), ((3024, 3071), 'mrcnn.utils.download_trained_weights', 'utils.download_trained_weights', (['COCO_MODEL_PATH'], {}), '(COCO_MODEL_PATH)\n', (3054, 3071), False, 'from mrcnn import utils\n'), ((4748, 4759), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4757, 4759), False, 'import os\n'), ((3199, 3219), 'os.path.join', 'os.path.join', (['"""logs"""'], {}), "('logs')\n", (3211, 3219), False, 'import os\n')]
|
"""
Storage layer for perses automated molecular design.
TODO
----
* Add write_sampler_state(modname, sampler_state, iteration)
* Generalize write_quantity to handle units
* Add data access routines for reading to isolate low-level storage layer
"""
__author__ = '<NAME>'
################################################################################
# IMPORTS
################################################################################
import os, os.path
import sys, math
import numpy as np
import copy
import time
import netCDF4 as netcdf
import pickle
import json
import mdtraj
from simtk import unit
import codecs
################################################################################
# LOGGER
################################################################################
import logging
logger = logging.getLogger(__name__)
################################################################################
# STORAGE
################################################################################
class NetCDFStorage(object):
"""NetCDF storage layer.
"""
def __init__(self, filename, mode='w'):
"""Create NetCDF storage layer, creating or appending to an existing file.
Parameters
----------
filename : str
Name of storage file to bind to.
mode : str, optional, default='w'
File open mode, 'w' for (over)write, 'a' for append.
"""
self._filename = filename
self._ncfile = netcdf.Dataset(self._filename, mode=mode)
self._envname = None
self._modname = None
# Create standard dimensions.
if 'iterations' not in self._ncfile.dimensions:
self._ncfile.createDimension('iterations', size=None)
if 'spatial' not in self._ncfile.dimensions:
self._ncfile.createDimension('spatial', size=3)
def _find_group(self):
"""Retrieve the specified group, creating it if it does not exist.
"""
groupname = '/'
if self._envname is not None:
groupname += self._envname + '/'
if self._modname is not None:
groupname += self._modname + '/'
ncgrp = self._ncfile.createGroup(groupname)
return ncgrp
def _encode_string(string, encoding='ascii'):
"""Encode strings to ASCII to avoid python 3 crap.
"""
try:
return string.encode(encoding)
except UnicodeEncodeError:
return string
def sync(self):
"""Flush write buffer.
"""
self._ncfile.sync()
def close(self):
"""Close the storage layer.
"""
self._ncfile.close()
def write_configuration(self, varname, positions, topology, iteration=None, frame=None, nframes=None):
"""Write a configuration (or one of a sequence of configurations) to be stored as a native NetCDF array
Parameters
----------
varname : str
The variable name to be stored
positions : simtk.unit.Quantity of size [natoms,3] with units compatible with angstroms
The positions to be written
topology : md.Topology object
The corresponding Topology object
iteration : int, optional, default=None
The local iteration for the module, or `None` if this is a singleton
frame : int, optional, default=None
If these coordinates are part of multiple frames in a sequence, the frame number
nframes : int, optional, default=None
If these coordinates are part of multiple frames in a sequence, the total number of frames in the sequence
"""
ncgrp = self._find_group()
if ((nframes is not None) and (frame is None)) or ((nframes is None) and (frame is not None)):
raise Exception("Both 'nfranes' and 'frame' must be used together.")
def dimension_name(iteration, suffix):
dimension_name = ''
if self._envname: dimension_name += self._envname + '_'
if self._modname: dimension_name += self._modname + '_'
dimension_name += varname + '_' + suffix + '_' + str(iteration)
return dimension_name
if iteration is not None:
varname += '_' + str(iteration)
if varname not in ncgrp.variables:
# Create dimensions
if (frame is not None):
frames_dimension_name = dimension_name(varname, 'frames')
ncdim = self._ncfile.createDimension(frames_dimension_name, nframes)
natoms = topology.n_atoms
atoms_dimension_name = dimension_name(varname, 'atoms')
ncdim = self._ncfile.createDimension(atoms_dimension_name, natoms)
# Create variables
# TODO: Handle cases with no iteration but with specified frames
if (iteration is not None) and (frame is not None):
ncgrp.createVariable(varname, np.float32, dimensions=(frames_dimension_name, atoms_dimension_name, 'spatial'), chunksizes=(1,natoms,3))
elif (iteration is not None):
ncgrp.createVariable(varname, np.float32, dimensions=(atoms_dimension_name, 'spatial'), chunksizes=(natoms,3))
else:
ncgrp.createVariable(varname, np.float32, dimensions=(atoms_dimension_name, 'spatial'), chunksizes=(natoms,3))
# Write Topology
if (frame is None) or (frame == 0):
topology_varname = varname + '_topology'
if (iteration is not None):
topology_varname += '_' + str(iteration)
self.write_object(topology_varname, topology, iteration=iteration)
# Write positions
# TODO: Handle cases with no iteration but with specified frames
positions_unit = unit.angstroms
if (frame is not None):
ncgrp.variables[varname][frame,:,:] = positions[:,:] / positions_unit
else:
ncgrp.variables[varname] = positions[:,:] / positions_unit
def write_object(self, varname, obj, iteration=None):
"""Serialize a Python object, encoding as pickle when storing as string in NetCDF.
Parameters
----------
varname : str
The variable name to be stored
obj : object
The object to be serialized
iteration : int, optional, default=None
The local iteration for the module, or `None` if this is a singleton
"""
ncgrp = self._find_group()
if varname not in ncgrp.variables:
if iteration is not None:
ncgrp.createVariable(varname, str, dimensions=('iterations',), chunksizes=(1,))
else:
ncgrp.createVariable(varname, str, dimensions=(), chunksizes=(1,))
pickled = codecs.encode(pickle.dumps(obj), "base64").decode()
if iteration is not None:
ncgrp.variables[varname][iteration] = pickled
else:
ncgrp.variables[varname] = pickled
def get_object(self, envname, modname, varname, iteration=None):
"""Get the serialized Python object.
Parameters
----------
envname : str
The name of the environment for the variable
modname : str
The name of the module for the variable
varname : str
The variable name to be stored
iteration : int, optional, default=None
The local iteration for the module, or `None` if this is a singleton
Returns
-------
obj : object
The retrieved object
"""
nc_path = "/{envname}/{modname}/{varname}".format(envname=envname, modname=modname, varname=varname)
if iteration is not None:
pickled = self._ncfile[nc_path][iteration]
else:
pickled = self._ncfile[nc_path][0]
obj = pickle.loads(codecs.decode(pickled.encode(), "base64"))
return obj
def write_quantity(self, varname, value, iteration=None):
"""Write a floating-point number
Parameters
----------
varname : str
The variable name to be stored
value : float
The floating-point value to be written
iteration : int, optional, default=None
The local iteration for the module, or `None` if this is a singleton
"""
ncgrp = self._find_group()
if varname not in ncgrp.variables:
if iteration is not None:
ncgrp.createVariable(varname, 'f8', dimensions=('iterations',), chunksizes=(1,))
else:
ncgrp.createVariable(varname, 'f8', dimensions=(), chunksizes=(1,))
if iteration is not None:
ncgrp.variables[varname][iteration] = value
else:
ncgrp.variables[varname] = value
def write_array(self, varname, array, iteration=None):
"""Write a numpy array as a native NetCDF array
Parameters
----------
varname : str
The variable name to be stored
array : numpy.array of arbitrary dimension
The numpy array to be written
iteration : int, optional, default=None
The local iteration for the module, or `None` if this is a singleton
"""
ncgrp = self._find_group()
def dimension_name(dimension_index):
dimension_name = ''
if self._envname: dimension_name += self._envname + '_'
if self._modname: dimension_name += self._modname + '_'
dimension_name += varname + '_' + str(dimension_index)
return dimension_name
if varname not in ncgrp.variables:
# Create dimensions
dimensions = list()
if iteration is not None:
dimensions.append('iterations')
for (dimension_index, size) in enumerate(array.shape):
ncdim = self._ncfile.createDimension(dimension_name(dimension_index), size)
dimensions.append(dimension_name(dimension_index))
dimensions = tuple(dimensions)
# Create variables
if iteration is not None:
ncgrp.createVariable(varname, array.dtype, dimensions=dimensions, chunksizes=((1,) + array.shape))
else:
ncgrp.createVariable(varname, array.dtype, dimensions=dimensions, chunksizes=array.shape)
# Check dimensions
expected_shape = list()
for (dimension_index, size) in enumerate(array.shape):
expected_shape.append(self._ncfile.dimensions[dimension_name(dimension_index)].size)
expected_shape = tuple(expected_shape)
if expected_shape != array.shape:
raise Exception("write_array called for /%s/%s/%s with different dimension (%s) than initially called (%s); dimension must stay constant." % (envname, modname, varname, str(array.shape), str(expected_shape)))
if iteration is not None:
ncgrp.variables[varname][iteration] = array
else:
ncgrp.variables[varname] = array
################################################################################
# BOUND STORAGE VIEWS THAT ENCAPSULATE ENVIRONMENT NAMES AND MODULE NAMES
################################################################################
class NetCDFStorageView(NetCDFStorage):
"""NetCDF storage view with bound environment and module names.
"""
def __init__(self, storage, envname=None, modname=None):
"""Initialize a view of the storage with a specific environment and module name.
Parameters
----------
envname : str, optional, default=None
Set the name of the environment this module is attached to.
modname : str, optional, default=None
Set the name of the module in the code writing the variable
"""
self._filename = storage._filename
self._ncfile = storage._ncfile
self._envname = storage._envname
self._modname = storage._modname
if envname: self._envname = envname
if modname: self._modname = modname
|
[
"netCDF4.Dataset",
"logging.getLogger",
"pickle.dumps"
] |
[((827, 854), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (844, 854), False, 'import logging\n'), ((1505, 1546), 'netCDF4.Dataset', 'netcdf.Dataset', (['self._filename'], {'mode': 'mode'}), '(self._filename, mode=mode)\n', (1519, 1546), True, 'import netCDF4 as netcdf\n'), ((6839, 6856), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (6851, 6856), False, 'import pickle\n')]
|
from datetime import datetime
from app import db
class DOCUMENT(db.Model):
id = db.Column(db.Integer, nullable=False, primary_key=True)
description = db.Column(db.String(300), nullable=False, default='Missing Description')
isActive = db.Column(db.Boolean, nullable=False, default=True)
template = db.Column(db.String(300), nullable=False)
class TERMS(db.Model):
id = db.Column(db.Integer, nullable=False, primary_key=True)
description = db.Column(db.String(300), nullable=False, default='Missing Description')
isActive = db.Column(db.Boolean, nullable=False, default=True)
template = db.Column(db.String(300), nullable=False)
enabledPNs = db.Column(db.Text, nullable=False, default='ALL')
|
[
"app.db.String",
"app.db.Column"
] |
[((99, 154), 'app.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)', 'primary_key': '(True)'}), '(db.Integer, nullable=False, primary_key=True)\n', (108, 154), False, 'from app import db\n'), ((274, 325), 'app.db.Column', 'db.Column', (['db.Boolean'], {'nullable': '(False)', 'default': '(True)'}), '(db.Boolean, nullable=False, default=True)\n', (283, 325), False, 'from app import db\n'), ((438, 493), 'app.db.Column', 'db.Column', (['db.Integer'], {'nullable': '(False)', 'primary_key': '(True)'}), '(db.Integer, nullable=False, primary_key=True)\n', (447, 493), False, 'from app import db\n'), ((613, 664), 'app.db.Column', 'db.Column', (['db.Boolean'], {'nullable': '(False)', 'default': '(True)'}), '(db.Boolean, nullable=False, default=True)\n', (622, 664), False, 'from app import db\n'), ((753, 802), 'app.db.Column', 'db.Column', (['db.Text'], {'nullable': '(False)', 'default': '"""ALL"""'}), "(db.Text, nullable=False, default='ALL')\n", (762, 802), False, 'from app import db\n'), ((188, 202), 'app.db.String', 'db.String', (['(300)'], {}), '(300)\n', (197, 202), False, 'from app import db\n'), ((359, 373), 'app.db.String', 'db.String', (['(300)'], {}), '(300)\n', (368, 373), False, 'from app import db\n'), ((527, 541), 'app.db.String', 'db.String', (['(300)'], {}), '(300)\n', (536, 541), False, 'from app import db\n'), ((698, 712), 'app.db.String', 'db.String', (['(300)'], {}), '(300)\n', (707, 712), False, 'from app import db\n')]
|
#################################################################
# 指定されたフォルダ配下のExcelを開いていき、画像が指定位置に貼付けされていないファイルを出力or調整します。
#
# 実行には、以下のライブラリが必要です.
# - win32com
# - $ python -m pip install pywin32
#
# [参考にした情報]
# - https://www.sejuku.net/blog/23647
#################################################################
import argparse
# noinspection SpellCheckingInspection
def go(target_dir: str, base_position: float, report_only: bool):
import pathlib
import pywintypes
import win32com.client
excel_dir = pathlib.Path(target_dir)
if not excel_dir.exists():
print(f'target directory not found [{target_dir}]')
return
try:
excel = win32com.client.Dispatch('Excel.Application')
excel.Visible = True
for f in excel_dir.glob('**/*.xlsx'):
abs_path = str(f)
try:
wb = excel.Workbooks.Open(abs_path)
wb.Activate()
except pywintypes.com_error as err:
print(err)
continue
try:
sheets_count = wb.Sheets.Count
for sheet_index in range(0, sheets_count):
ws = wb.Worksheets(sheet_index + 1)
ws.Activate()
for sh in ws.Shapes:
if base_position <= sh.Left:
if report_only:
print(f'{abs_path}-{ws.Name}')
else:
sh.Left = base_position
if not report_only:
wb.Save()
wb.Saved = True
finally:
wb.Close()
finally:
excel.Quit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
usage='python main.py -d /path/to/excel/dir -p base-left-position(e.g. 100.0) [-r]',
description='指定されたフォルダ配下のExcelを開いていき、画像が左端に貼付けされていないファイルを出力します。',
add_help=True
)
parser.add_argument('-d', '--directory', help='対象ディレクトリ', required=True)
parser.add_argument('-p', '--position', help='基準となるShape.Leftの値', type=float, default=100.0)
parser.add_argument('-r', '--report', help='情報のみ出力して変更はしない', action='store_true')
args = parser.parse_args()
go(args.directory, args.position, args.report)
|
[
"pathlib.Path",
"argparse.ArgumentParser"
] |
[((533, 557), 'pathlib.Path', 'pathlib.Path', (['target_dir'], {}), '(target_dir)\n', (545, 557), False, 'import pathlib\n'), ((1768, 1971), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""python main.py -d /path/to/excel/dir -p base-left-position(e.g. 100.0) [-r]"""', 'description': '"""指定されたフォルダ配下のExcelを開いていき、画像が左端に貼付けされていないファイルを出力します。"""', 'add_help': '(True)'}), "(usage=\n 'python main.py -d /path/to/excel/dir -p base-left-position(e.g. 100.0) [-r]'\n , description='指定されたフォルダ配下のExcelを開いていき、画像が左端に貼付けされていないファイルを出力します。',\n add_help=True)\n", (1791, 1971), False, 'import argparse\n')]
|
import torch
from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from ..builder import HEADS, build_head, build_roi_extractor
from .base_roi_head import BaseRoIHead
from .test_mixins import BBoxTestMixin, MaskTestMixin
from mmdet.core import multiclass_nms,bbox_select_per_class
from mmdet.core.evaluation import bbox_overlaps
from mmdet.models.losses import accuracy
from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,
merge_aug_masks, multiclass_nms)
import time
@HEADS.register_module()
class WsodEmbedHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
"""Simplest base roi head including one bbox head and one mask head."""
def __init__(self,
bbox_roi_extractor=None,
bbox_head=None,
contrast_head=None,
mask_roi_extractor=None,
mask_head=None,
shared_head=None,
train_cfg=None,
test_cfg=None):
super(WsodEmbedHead,self).__init__(
bbox_roi_extractor=bbox_roi_extractor,
bbox_head=bbox_head,
mask_roi_extractor=mask_roi_extractor,
mask_head=mask_head,
shared_head=shared_head,
train_cfg=train_cfg,
test_cfg=test_cfg)
# self.init_contrast_head(contrast_head)
def init_assigner_sampler(self):
"""Initialize assigner and sampler."""
self.bbox_assigner = None
self.bbox_sampler = None
if self.train_cfg:
self.bbox_assigner = build_assigner(self.train_cfg.assigner)
self.bbox_sampler = build_sampler(
self.train_cfg.sampler, context=self)
def init_bbox_head(self, bbox_roi_extractor, bbox_head):
"""Initialize ``bbox_head``"""
self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor)
self.bbox_head = build_head(bbox_head)
# def init_embedding_head(self,embedding_head):
# self.embedding_head = build_head(embedding_head)
def init_contrast_head(self,contrast_head):
self.contrast_head = build_head(contrast_head)
def init_mask_head(self, mask_roi_extractor, mask_head):
"""Initialize ``mask_head``"""
if mask_roi_extractor is not None:
self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.mask_head = build_head(mask_head)
def init_weights(self, pretrained):
"""Initialize the weights in head.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
if self.with_shared_head:
self.shared_head.init_weights(pretrained=pretrained)
if self.with_bbox:
self.bbox_roi_extractor.init_weights()
self.bbox_head.init_weights()
if self.with_mask:
self.mask_head.init_weights()
if not self.share_roi_extractor:
self.mask_roi_extractor.init_weights()
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
# bbox head
outs = ()
rois = bbox2roi([proposals])
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_feats = self.bbox_head.double_fc_forward(bbox_feats)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
if self.with_bbox:
bbox_results = self._bbox_forward_strong_branch2(bbox_feats)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_results = self._mask_forward(x, mask_rois)
outs = outs + (mask_results['mask_pred'], )
return outs
def match(self,bboxes1=None,bboxes2=None,labels1=None,labels2=None):
flag = False
for i,box1 in enumerate(bboxes1):
if labels1[i] != labels2[i]:
return False
for box2 in bboxes2:
if bbox_overlaps(box1.unsqueeze(0).cpu().numpy(),box2.unsqueeze(0).cpu().numpy())[0][0] > 0.5:
flag = True
break
if not flag: return False
return True
#duyu
#TODO online augmentation
@torch.no_grad()
def OAM_Confidence(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
max_iter=30,
gt_bboxes_ignore=None,
gt_masks=None):
if not self.with_bbox:
raise Exception
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
bbox_results, oam_bboxes, oam_labels = self._bbox_forward_train_strong(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
oam_bboxes = [oam_bboxes[:, :4]]
oam_labels = [oam_labels]
#begin iter
k = 0
T = max_iter
count = 0
while k < max_iter:
k += 1
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
oam_bboxes[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
oam_bboxes[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
bbox_results, oam_bboxes_next, oam_labels_next = self._bbox_forward_train_strong(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
oam_bboxes_next = [oam_bboxes_next[:,:4]]
oam_labels_next = [oam_labels_next]
if self.match(bboxes1=oam_bboxes_next[0],bboxes2=oam_bboxes[0],labels1=oam_labels_next[0],labels2=oam_labels[0]):
count += 1
if count == 3:
T = k
k = max_iter + 1
break
else:
count = 0
oam_bboxes,oam_labels = oam_bboxes_next,oam_labels_next
return T
#duyu
#TODO Double pass seems not work well. Here the first pass is removed
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
# losses_first_pass,oam_bboxes,oam_labels = self.forward_train_first_pass(x,img_metas,proposal_list,gt_bboxes,gt_labels,gt_bboxes_ignore,
# gt_masks=None)
losses_second_pass = self.forward_train_second_pass(x,img_metas,proposal_list,gt_bboxes,gt_labels,gt_bboxes_ignore,
gt_masks=None)
losses = dict()
# losses.update(losses_first_pass)
losses.update(losses_second_pass)
return losses
#duyu
def forward_train_first_pass(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results_strong,bbox_results_weak,oam_bboxes,oam_labels = \
self._bbox_forward_train_first_pass(x,sampling_results,
gt_bboxes, gt_labels,
img_metas)
losses.update(bbox_results_strong['loss_bbox_strong_fp'])
losses.update(bbox_results_weak['loss_bbox_weak_fp'])
# mask head forward and loss
#TODO
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results_strong['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
return losses,oam_bboxes,oam_labels
def forward_train_second_pass(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
assert num_imgs == 2
#assign for strong image
assign_result = self.bbox_assigner.assign(
proposal_list[0], gt_bboxes[0], gt_bboxes_ignore[0],
gt_labels[0])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[0],
gt_bboxes[0],
gt_labels[0],
feats=[lvl_feat[0][None] for lvl_feat in x])
sampling_results.append(sampling_result)
#assign for weak image
assign_result = self.bbox_assigner.assign(
proposal_list[1], gt_bboxes[1], gt_bboxes_ignore[1],
gt_labels=None)
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[1],
gt_bboxes[1],
gt_labels=None,
feats=[lvl_feat[1][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# print('#'*100)
# print(sampling_results)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results_weak_branch1, bbox_results_strong_branch1, bbox_results_weak_branch2, bbox_results_strong_branch2 = \
self._bbox_forward_train_second_pass(x, sampling_results,
gt_bboxes, gt_labels,
img_metas,gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(bbox_results_weak_branch1['loss_bbox_weak_branch1_sp'])
losses.update(bbox_results_strong_branch1['loss_bbox_strong_branch1_sp'])
losses.update(bbox_results_weak_branch2['loss_bbox_weak_branch2'])
losses.update(bbox_results_strong_branch2['loss_bbox_strong_branch2'])
# losses.update(contrastive_losses)
# mask head forward and loss
# TODO
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results_strong_branch1['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
return losses
#duyu
def _bbox_forward_strong_branch1(self,bbox_feats):
"""Box head forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
cls_score, bbox_pred = self.bbox_head.forward_strong_branch1(bbox_feats)
bbox_results = dict(
cls_score = cls_score, bbox_pred = bbox_pred, bbox_feats = bbox_feats)
return bbox_results
#duyu
def _bbox_forward_strong_branch2(self,bbox_feats):
"""Box head forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
cls_score, bbox_pred = self.bbox_head.forward_strong_branch2(bbox_feats)
bbox_results = dict(
cls_score = cls_score, bbox_pred = bbox_pred, bbox_feats = bbox_feats)
return bbox_results
#duyu
def _bbox_forward_weak(self,bbox_feats):
"""Box head forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
cls_proposal_mat = self.bbox_head.forward_weak(bbox_feats)
bbox_results = dict(
cls_proposal_mat = cls_proposal_mat, bbox_feats=bbox_feats)
return bbox_results
#duyu
def _bbox_forward_train_first_pass(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
"""Run forward function and calculate loss for box head in training."""
torch_device = gt_labels[0].get_device()
x_strong = tuple([torch.unsqueeze(xx[0],0) for xx in x])
x_weak = tuple([torch.unsqueeze(xx[1],0) for xx in x])
rois_strong = bbox2roi([res.bboxes for res in [sampling_results[0]]])
rois_weak = bbox2roi([res.bboxes for res in [sampling_results[1]]])
bbox_feats_strong = self.bbox_roi_extractor(
x_strong[:self.bbox_roi_extractor.num_inputs], rois_strong)
bbox_feats_strong = self.bbox_head.double_fc_forward(bbox_feats_strong)
bbox_feats_weak = self.bbox_roi_extractor(
x_weak[:self.bbox_roi_extractor.num_inputs], rois_weak)
bbox_feats_weak = self.bbox_head.double_fc_forward(bbox_feats_weak)
if self.with_shared_head:
bbox_feats_strong = self.shared_head(bbox_feats_strong)
bbox_feats_weak = self.shared_head(bbox_feats_weak)
#caculate loss_strong_branch1
bbox_targets_strong = self.bbox_head.get_targets([sampling_results[0]], [gt_bboxes[0]],
[gt_labels[0]], self.train_cfg)
bbox_results_strong = self._bbox_forward_strong_branch1(bbox_feats_strong)
loss_bbox_strong = self.bbox_head.loss_strong(bbox_results_strong['cls_score'],
bbox_results_strong['bbox_pred'], rois_strong,
*bbox_targets_strong)
loss_strong = dict()
loss_strong['loss_cls_strong_branch1_fp'] = loss_bbox_strong['loss_cls_strong']
loss_strong['acc_strong_branch1_fp'] = loss_bbox_strong['acc_strong']
loss_strong['loss_bbox_strong_branch1_fp'] = loss_bbox_strong['loss_bbox_strong']
bbox_results_strong.update(loss_bbox_strong_fp=loss_strong)
oam_bboxes_strong,oam_labels_strong = bbox_select_per_class(bbox_results_strong['bbox_pred'],
bbox_results_strong['cls_score'],
gt_labels[1],
score_thr=0,
nms_cfg={'iou_threshold':0.5},
max_num=-1
)
#calculate loss_weak_branch1
bbox_targets_weak = self.bbox_head.get_targets([sampling_results[1]], [gt_bboxes[1]],
[gt_labels[1]], self.train_cfg)
bbox_results_weak = self._bbox_forward_weak(bbox_feats_weak)
bbox_results_weak_pseudo = self._bbox_forward_strong_branch1(bbox_feats_weak)
loss_bbox_weak = self.bbox_head.loss_weak(bbox_results_weak['cls_proposal_mat'],
rois_weak,
*bbox_targets_weak)
loss_weak = dict()
loss_weak['loss_img_level_fp'] = loss_bbox_weak['loss_img_level']
bbox_results_weak.update(loss_bbox_weak_fp=loss_weak)
oam_bboxes_weak,oam_labels_weak = bbox_select_per_class(bbox_results_weak_pseudo['bbox_pred'],
bbox_results_weak_pseudo['cls_score'],
gt_labels[1],
score_thr=0,
nms_cfg={'iou_threshold':0.5},
max_num=-1
)
# print('oam_labels_first_pass: ',oam_labels_weak)
oam_bboxes = []
oam_labels = []
oam_bboxes.append(oam_bboxes_strong[:,:4])
oam_bboxes.append(oam_bboxes_weak[:,:4])
oam_labels.append(oam_labels_strong.to(torch_device))
oam_labels.append(oam_labels_weak.to(torch_device))
return bbox_results_strong,bbox_results_weak,oam_bboxes,oam_labels
#duyu
def contrast_forward_train(self,
x,
strong_bboxes,
strong_labels,
oam_bboxes,
oam_labels,
img_metas,
gt_bboxes_ignore=None,
):
torch_device = strong_labels.get_device()
oam_labels = oam_labels.to(torch_device)
x_strong = tuple([torch.unsqueeze(xx[0], 0) for xx in x])
x_weak = tuple([torch.unsqueeze(xx[1], 0) for xx in x])
rois_strong = bbox2roi([strong_bboxes])
rois_weak = bbox2roi([oam_bboxes])
bbox_feats_strong = self.bbox_roi_extractor(
x_strong[:self.bbox_roi_extractor.num_inputs], rois_strong)
bbox_feats_strong = self.bbox_head.double_fc_forward(bbox_feats_strong)
bbox_feats_weak = self.bbox_roi_extractor(
x_weak[:self.bbox_roi_extractor.num_inputs], rois_weak)
bbox_feats_weak = self.bbox_head.double_fc_forward(bbox_feats_weak)
if self.with_shared_head:
bbox_feats_strong = self.shared_head(bbox_feats_strong)
bbox_feats_weak = self.shared_head(bbox_feats_weak)
contrastive_losses = self.contrast_head.forward_train(bbox_feats_strong,bbox_feats_weak,strong_labels,oam_labels)
losses = dict()
losses.update(contrastive_losses)
# losses['contrastive_loss'] = torch.tensor([0.0])
return losses
# duyu
def _bbox_forward_embedding_branch2(self, bbox_feats,hard_neg_roi_id=None,pos_roi_id=None):
"""Box head forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
# time_start = time.time()
# cls_score, bbox_pred,min_pos_pos_dist, min_neg_neg_dist = self.bbox_head.forward_embedding(bbox_feats,hard_neg_roi_id=hard_neg_roi_id,pos_roi_id=pos_roi_id)
cls_score_fc,cls_score, bbox_pred,min_pos_pos_dist, min_neg_neg_dist = self.bbox_head.forward_embedding(bbox_feats,hard_neg_roi_id=hard_neg_roi_id,pos_roi_id=pos_roi_id)
# print(time.time()-time_start)
bbox_results = dict(
cls_score_fc=cls_score_fc,cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats,min_pos_pos_dist=min_pos_pos_dist,min_neg_neg_dist=min_neg_neg_dist)
# cls_score = cls_score, bbox_pred = bbox_pred, bbox_feats = bbox_feats, min_pos_pos_dist = min_pos_pos_dist, min_neg_neg_dist = min_neg_neg_dist)
return bbox_results
#yangyk
def get_hard_neg_target(self,rois,sampling_results):
#print([res.hard_neg_bboxes for res in sampling_results])
#print(sampling_results)
flag = True
for res in sampling_results:
if res.hard_neg_bboxes is None:
flag = False
if flag is False:
hard_neg_labels = None
hard_neg_roi_id = None
return hard_neg_labels,hard_neg_roi_id
# time_start = time.time()
hard_neg_labels = sampling_results[0].hard_neg_labels
hard_neg_roi_id = sampling_results[0].hard_neg_id
# hard_neg_roi_id = torch.cat(hard_neg_roi_id, 0)
# hard_neg_labels = torch.cat(hard_neg_labels_list, 0)
# print(time.time()-time_start)
return hard_neg_labels,hard_neg_roi_id
#duyu
def _bbox_forward_train_second_pass(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas,gt_bboxes_ignore=None):
"""Run forward function and calculate loss for box head in training."""
x_strong = tuple([torch.unsqueeze(xx[0],0) for xx in x])
x_weak = tuple([torch.unsqueeze(xx[1],0) for xx in x])
rois_strong = bbox2roi([res.bboxes for res in [sampling_results[0]]])
rois_weak = bbox2roi([res.bboxes for res in [sampling_results[1]]])
bbox_feats_strong = self.bbox_roi_extractor(
x_strong[:self.bbox_roi_extractor.num_inputs], rois_strong)
bbox_feats_strong = self.bbox_head.double_fc_forward(bbox_feats_strong)
bbox_feats_weak = self.bbox_roi_extractor(
x_weak[:self.bbox_roi_extractor.num_inputs], rois_weak)
bbox_feats_weak = self.bbox_head.double_fc_forward(bbox_feats_weak)
if self.with_shared_head:
bbox_feats_strong = self.shared_head(bbox_feats_strong)
bbox_feats_weak = self.shared_head(bbox_feats_weak)
#calculate loss_strong_branch1
bbox_targets_strong = self.bbox_head.get_targets([sampling_results[0]], [gt_bboxes[0]],
[gt_labels[0]], self.train_cfg)
#yanyk
bbox_results_strong_branch1 = self._bbox_forward_strong_branch1(bbox_feats_strong)
loss_bbox_strong_branch1 = self.bbox_head.loss_strong_branch1(bbox_results_strong_branch1['cls_score'],
bbox_results_strong_branch1['bbox_pred'], rois_strong,
*bbox_targets_strong)
loss_strong_branch1 = dict()
loss_strong_branch1['loss_cls_strong_branch1_sp'] = loss_bbox_strong_branch1['loss_cls_strong']
loss_strong_branch1['loss_bbox_strong_branch1_sp'] = loss_bbox_strong_branch1['loss_bbox_strong']
loss_strong_branch1['acc_strong_branch1_sp'] = loss_bbox_strong_branch1['acc_strong']
bbox_results_strong_branch1.update(loss_bbox_strong_branch1_sp=loss_strong_branch1)
#calculate loss_weak_branch1
bbox_results_weak_pseudo = self._bbox_forward_strong_branch1(bbox_feats_weak)
bbox_results_weak_branch1 = self._bbox_forward_weak(bbox_feats_weak)
loss_bbox_weak_branch1 = self.bbox_head.loss_weak_branch1(bbox_results_weak_branch1['cls_proposal_mat'],
gt_labels[1]
)
loss_weak_branch1 = dict()
loss_weak_branch1['loss_img_level'] = loss_bbox_weak_branch1['loss_img_level']
bbox_results_weak_branch1.update(loss_bbox_weak_branch1_sp=loss_weak_branch1)
#generate oam labels for weak image
oam_bboxes_weak, oam_labels_weak = bbox_select_per_class(bbox_results_weak_pseudo['bbox_pred'],
bbox_results_weak_pseudo['cls_score'],
gt_labels[1],
score_thr=0,
nms_cfg={'iou_threshold': 0.5},
max_num=-1
)
#contrastive_losses
# contrastive_losses = self.contrast_forward_train(x,gt_bboxes[0],gt_labels[0],oam_bboxes_weak[:,:4],
# oam_labels_weak,img_metas,gt_bboxes_ignore=gt_bboxes_ignore)
#calculate loss_strong_branch2
hard_neg_roi_labels, hard_neg_roi_id = self.get_hard_neg_target(rois_strong, [sampling_results[0]])
all_bbox_labels = bbox_targets_strong[0]
pos_roi_labels, pos_roi_id = all_bbox_labels[(all_bbox_labels != self.bbox_head.num_classes).nonzero(as_tuple=False)], \
(all_bbox_labels != self.bbox_head.num_classes).nonzero(as_tuple=False)
pos_roi_id = pos_roi_id.squeeze(1)
pos_roi_labels = pos_roi_labels.squeeze(1)
bbox_results_strong_branch2 = self._bbox_forward_embedding_branch2(bbox_feats_strong,
hard_neg_roi_id=hard_neg_roi_id,
pos_roi_id=pos_roi_id)
loss_bbox_strong_branch2 = self.bbox_head.loss_strong_branch2(
bbox_results_strong_branch2['cls_score_fc'],
bbox_results_strong_branch2['cls_score'],
bbox_results_strong_branch2['bbox_pred'],
rois_strong,
*bbox_targets_strong,
min_pos_pos_dist=bbox_results_strong_branch2['min_pos_pos_dist'],
min_neg_neg_dist=bbox_results_strong_branch2['min_neg_neg_dist'],
pos_roi_labels=pos_roi_labels,
hard_neg_roi_labels=hard_neg_roi_labels)
loss_strong_branch2 = dict()
loss_strong_branch2['loss_cls_strong_branch2'] = loss_bbox_strong_branch2['loss_cls_strong']
loss_strong_branch2['acc_strong_branch2'] = loss_bbox_strong_branch2['acc_strong']
loss_strong_branch2['acc_strong_branch2_fc'] = loss_bbox_strong_branch2['acc_fc']
loss_strong_branch2['loss_cls_strong_branch2_fc'] = loss_bbox_strong_branch2['loss_cls_fc']
loss_strong_branch2['loss_bbox_strong_branch2'] = loss_bbox_strong_branch2['loss_bbox_strong']
loss_strong_branch2['loss_embedding_strong'] = loss_bbox_strong_branch2['loss_embed_strong']
bbox_results_strong_branch2.update(loss_bbox_strong_branch2=loss_strong_branch2)
#calculate loss_weak_branch2
bbox_targets_weak_branch2 = self.bbox_head.get_targets([sampling_results[1]],oam_bboxes_weak,oam_labels_weak,self.train_cfg)
hard_neg_roi_labels, hard_neg_roi_id = self.get_hard_neg_target(rois_weak, [sampling_results[1]])
all_bbox_labels = bbox_targets_weak_branch2[0]
pos_roi_labels, pos_roi_id = all_bbox_labels[
(all_bbox_labels != self.bbox_head.num_classes).nonzero(as_tuple=False)], (
all_bbox_labels != self.bbox_head.num_classes).nonzero(as_tuple=False)
pos_roi_id = pos_roi_id.squeeze(1)
pos_roi_labels = pos_roi_labels.squeeze(1)
bbox_results_weak_branch2 = self._bbox_forward_embedding_branch2(bbox_feats_weak,
hard_neg_roi_id=hard_neg_roi_id,
pos_roi_id=pos_roi_id)
labels,label_weights,bbox_targets,bbox_weights = bbox_targets_weak_branch2
# avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
# acc_weak = accuracy(bbox_results_weak_branch2['cls_score'],labels)
loss_bbox_weak_branch2 = self.bbox_head.loss_weak_branch2(
bbox_results_weak_branch2['cls_score_fc'],
bbox_results_weak_branch2['cls_score'],
labels,
label_weights,
min_pos_pos_dist=bbox_results_weak_branch2['min_pos_pos_dist'],
min_neg_neg_dist=bbox_results_weak_branch2['min_neg_neg_dist'],
pos_roi_labels=pos_roi_labels,
hard_neg_roi_labels=hard_neg_roi_labels)
loss_weak_branch2 = dict()
loss_weak_branch2['loss_cls_weak_branch2'] = loss_bbox_weak_branch2['loss_cls_weak']
loss_weak_branch2['acc_weak_branch2'] = loss_bbox_weak_branch2['acc_weak']
loss_strong_branch2['acc_weak_branch2_fc'] = loss_bbox_strong_branch2['acc_fc']
loss_strong_branch2['loss_cls_weak_branch2_fc'] = loss_bbox_strong_branch2['loss_cls_fc']
# loss_weak_branch2['loss_embedding_weak'] = loss_bbox_weak_branch2['loss_embed_weak']
bbox_results_weak_branch2.update(loss_bbox_weak_branch2=loss_weak_branch2)
return bbox_results_weak_branch1,bbox_results_strong_branch1,bbox_results_weak_branch2,bbox_results_strong_branch2
# contrastive_losses
def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
img_metas):
"""Run forward function and calculate loss for mask head in
training."""
if not self.share_roi_extractor:
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_results = self._mask_forward(x, pos_rois)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_results = self._mask_forward(
x, pos_inds=pos_inds, bbox_feats=bbox_feats)
mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,
self.train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head.loss(mask_results['mask_pred'],
mask_targets, pos_labels)
mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)
return mask_results
def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):
"""Mask head forward function used in both training and testing."""
assert ((rois is not None) ^
(pos_inds is not None and bbox_feats is not None))
if rois is not None:
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
assert bbox_feats is not None
mask_feats = bbox_feats[pos_inds]
mask_pred = self.mask_head(mask_feats)
mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)
return mask_results
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Async test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
print('async_simple_test')
det_bboxes, det_labels = await self.async_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
if not self.with_mask:
return bbox_results
else:
segm_results = await self.async_test_mask(
x,
img_metas,
det_bboxes,
det_labels,
rescale=rescale,
mask_test_cfg=self.test_cfg.get('mask'))
return bbox_results, segm_results
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
if torch.onnx.is_in_onnx_export():
if self.with_mask:
segm_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
return det_bboxes, det_labels, segm_results
else:
return det_bboxes, det_labels
bbox_results = [
bbox2result(det_bboxes[i], det_labels[i],
self.bbox_head.num_classes)
for i in range(len(det_bboxes))
]
if not self.with_mask:
return bbox_results
else:
segm_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
return list(zip(bbox_results, segm_results))
def simple_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False):
"""Test only det bboxes without augmentation."""
rois = bbox2roi(proposals)
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_feats = self.bbox_head.double_fc_forward(bbox_feats)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
bbox_results = self._bbox_forward_embedding_branch2(bbox_feats)
img_shapes = tuple(meta['img_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
# split batch bbox prediction back to each image
cls_score = bbox_results['cls_score_fc']
bbox_pred = bbox_results['bbox_pred']
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = rois.split(num_proposals_per_img, 0)
cls_score = cls_score.split(num_proposals_per_img, 0)
# some detector with_reg is False, bbox_pred will be None
if bbox_pred is not None:
# the bbox prediction of some detectors like SABL is not Tensor
if isinstance(bbox_pred, torch.Tensor):
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
else:
bbox_pred = self.bbox_head.bbox_pred_split(
bbox_pred, num_proposals_per_img)
else:
bbox_pred = (None, ) * len(proposals)
# apply bbox post-processing to each image individually
det_bboxes = []
det_labels = []
for i in range(len(proposals)):
det_bbox, det_label = self.bbox_head.get_bboxes(
rois[i],
cls_score[i],
bbox_pred[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
return det_bboxes, det_labels
def aug_test(self, x, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,
proposal_list,
self.test_cfg)
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= det_bboxes.new_tensor(
img_metas[0][0]['scale_factor'])
bbox_results = bbox2result(_det_bboxes, det_labels,
self.bbox_head.num_classes)
# det_bboxes always keep the original scale
if self.with_mask:
segm_results = self.aug_test_mask(x, img_metas, det_bboxes,
det_labels)
return [(bbox_results, segm_results)]
else:
return [bbox_results]
def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
"""Test det bboxes with test time augmentation."""
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
# TODO more flexible
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
rois = bbox2roi([proposals])
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_feats = self.bbox_head.double_fc_forward(bbox_feats)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
bbox_results = self._bbox_forward_embedding_branch2(bbox_feats)
bboxes, scores = self.bbox_head.get_bboxes(
rois,
bbox_results['cls_score'],
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
return det_bboxes, det_labels
|
[
"torch.ones",
"mmdet.core.bbox_mapping",
"mmdet.core.bbox2roi",
"torch.unsqueeze",
"mmdet.core.build_assigner",
"torch.cat",
"torch.zeros",
"mmdet.core.build_sampler",
"mmdet.core.bbox_select_per_class",
"torch.onnx.is_in_onnx_export",
"mmdet.core.merge_aug_bboxes",
"mmdet.core.bbox2result",
"torch.no_grad",
"mmdet.core.multiclass_nms"
] |
[((4587, 4602), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4600, 4602), False, 'import torch\n'), ((3363, 3384), 'mmdet.core.bbox2roi', 'bbox2roi', (['[proposals]'], {}), '([proposals])\n', (3371, 3384), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((16533, 16588), 'mmdet.core.bbox2roi', 'bbox2roi', (['[res.bboxes for res in [sampling_results[0]]]'], {}), '([res.bboxes for res in [sampling_results[0]]])\n', (16541, 16588), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((16609, 16664), 'mmdet.core.bbox2roi', 'bbox2roi', (['[res.bboxes for res in [sampling_results[1]]]'], {}), '([res.bboxes for res in [sampling_results[1]]])\n', (16617, 16664), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((18197, 18367), 'mmdet.core.bbox_select_per_class', 'bbox_select_per_class', (["bbox_results_strong['bbox_pred']", "bbox_results_strong['cls_score']", 'gt_labels[1]'], {'score_thr': '(0)', 'nms_cfg': "{'iou_threshold': 0.5}", 'max_num': '(-1)'}), "(bbox_results_strong['bbox_pred'], bbox_results_strong\n ['cls_score'], gt_labels[1], score_thr=0, nms_cfg={'iou_threshold': 0.5\n }, max_num=-1)\n", (18218, 18367), False, 'from mmdet.core import multiclass_nms, bbox_select_per_class\n'), ((19577, 19755), 'mmdet.core.bbox_select_per_class', 'bbox_select_per_class', (["bbox_results_weak_pseudo['bbox_pred']", "bbox_results_weak_pseudo['cls_score']", 'gt_labels[1]'], {'score_thr': '(0)', 'nms_cfg': "{'iou_threshold': 0.5}", 'max_num': '(-1)'}), "(bbox_results_weak_pseudo['bbox_pred'],\n bbox_results_weak_pseudo['cls_score'], gt_labels[1], score_thr=0,\n nms_cfg={'iou_threshold': 0.5}, max_num=-1)\n", (19598, 19755), False, 'from mmdet.core import multiclass_nms, bbox_select_per_class\n'), ((21204, 21229), 'mmdet.core.bbox2roi', 'bbox2roi', (['[strong_bboxes]'], {}), '([strong_bboxes])\n', (21212, 21229), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((21250, 21272), 'mmdet.core.bbox2roi', 'bbox2roi', (['[oam_bboxes]'], {}), '([oam_bboxes])\n', (21258, 21272), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((24382, 24437), 'mmdet.core.bbox2roi', 'bbox2roi', (['[res.bboxes for res in [sampling_results[0]]]'], {}), '([res.bboxes for res in [sampling_results[0]]])\n', (24390, 24437), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((24458, 24513), 'mmdet.core.bbox2roi', 'bbox2roi', (['[res.bboxes for res in [sampling_results[1]]]'], {}), '([res.bboxes for res in [sampling_results[1]]])\n', (24466, 24513), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((26896, 27074), 'mmdet.core.bbox_select_per_class', 'bbox_select_per_class', (["bbox_results_weak_pseudo['bbox_pred']", "bbox_results_weak_pseudo['cls_score']", 'gt_labels[1]'], {'score_thr': '(0)', 'nms_cfg': "{'iou_threshold': 0.5}", 'max_num': '(-1)'}), "(bbox_results_weak_pseudo['bbox_pred'],\n bbox_results_weak_pseudo['cls_score'], gt_labels[1], score_thr=0,\n nms_cfg={'iou_threshold': 0.5}, max_num=-1)\n", (26917, 27074), False, 'from mmdet.core import multiclass_nms, bbox_select_per_class\n'), ((34436, 34494), 'torch.cat', 'torch.cat', (['[res.pos_gt_labels for res in sampling_results]'], {}), '([res.pos_gt_labels for res in sampling_results])\n', (34445, 34494), False, 'import torch\n'), ((36042, 36105), 'mmdet.core.bbox2result', 'bbox2result', (['det_bboxes', 'det_labels', 'self.bbox_head.num_classes'], {}), '(det_bboxes, det_labels, self.bbox_head.num_classes)\n', (36053, 36105), False, 'from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler\n'), ((36947, 36977), 'torch.onnx.is_in_onnx_export', 'torch.onnx.is_in_onnx_export', ([], {}), '()\n', (36975, 36977), False, 'import torch\n'), ((38002, 38021), 'mmdet.core.bbox2roi', 'bbox2roi', (['proposals'], {}), '(proposals)\n', (38010, 38021), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((40545, 40609), 'mmdet.core.bbox2result', 'bbox2result', (['_det_bboxes', 'det_labels', 'self.bbox_head.num_classes'], {}), '(_det_bboxes, det_labels, self.bbox_head.num_classes)\n', (40556, 40609), False, 'from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler\n'), ((42480, 42546), 'mmdet.core.merge_aug_bboxes', 'merge_aug_bboxes', (['aug_bboxes', 'aug_scores', 'img_metas', 'rcnn_test_cfg'], {}), '(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n', (42496, 42546), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((42593, 42712), 'mmdet.core.multiclass_nms', 'multiclass_nms', (['merged_bboxes', 'merged_scores', 'rcnn_test_cfg.score_thr', 'rcnn_test_cfg.nms', 'rcnn_test_cfg.max_per_img'], {}), '(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img)\n', (42607, 42712), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((1613, 1652), 'mmdet.core.build_assigner', 'build_assigner', (['self.train_cfg.assigner'], {}), '(self.train_cfg.assigner)\n', (1627, 1652), False, 'from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler\n'), ((1685, 1736), 'mmdet.core.build_sampler', 'build_sampler', (['self.train_cfg.sampler'], {'context': 'self'}), '(self.train_cfg.sampler, context=self)\n', (1698, 1736), False, 'from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler\n'), ((33489, 33543), 'mmdet.core.bbox2roi', 'bbox2roi', (['[res.pos_bboxes for res in sampling_results]'], {}), '([res.pos_bboxes for res in sampling_results])\n', (33497, 33543), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((34141, 34160), 'torch.cat', 'torch.cat', (['pos_inds'], {}), '(pos_inds)\n', (34150, 34160), False, 'import torch\n'), ((37301, 37370), 'mmdet.core.bbox2result', 'bbox2result', (['det_bboxes[i]', 'det_labels[i]', 'self.bbox_head.num_classes'], {}), '(det_bboxes[i], det_labels[i], self.bbox_head.num_classes)\n', (37312, 37370), False, 'from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler\n'), ((41492, 41580), 'mmdet.core.bbox_mapping', 'bbox_mapping', (['proposal_list[0][:, :4]', 'img_shape', 'scale_factor', 'flip', 'flip_direction'], {}), '(proposal_list[0][:, :4], img_shape, scale_factor, flip,\n flip_direction)\n', (41504, 41580), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((41633, 41654), 'mmdet.core.bbox2roi', 'bbox2roi', (['[proposals]'], {}), '([proposals])\n', (41641, 41654), False, 'from mmdet.core import bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms\n'), ((16408, 16433), 'torch.unsqueeze', 'torch.unsqueeze', (['xx[0]', '(0)'], {}), '(xx[0], 0)\n', (16423, 16433), False, 'import torch\n'), ((16471, 16496), 'torch.unsqueeze', 'torch.unsqueeze', (['xx[1]', '(0)'], {}), '(xx[1], 0)\n', (16486, 16496), False, 'import torch\n'), ((21077, 21102), 'torch.unsqueeze', 'torch.unsqueeze', (['xx[0]', '(0)'], {}), '(xx[0], 0)\n', (21092, 21102), False, 'import torch\n'), ((21141, 21166), 'torch.unsqueeze', 'torch.unsqueeze', (['xx[1]', '(0)'], {}), '(xx[1], 0)\n', (21156, 21166), False, 'import torch\n'), ((24257, 24282), 'torch.unsqueeze', 'torch.unsqueeze', (['xx[0]', '(0)'], {}), '(xx[0], 0)\n', (24272, 24282), False, 'import torch\n'), ((24320, 24345), 'torch.unsqueeze', 'torch.unsqueeze', (['xx[1]', '(0)'], {}), '(xx[1], 0)\n', (24335, 24345), False, 'import torch\n'), ((33776, 33845), 'torch.ones', 'torch.ones', (['res.pos_bboxes.shape[0]'], {'device': 'device', 'dtype': 'torch.uint8'}), '(res.pos_bboxes.shape[0], device=device, dtype=torch.uint8)\n', (33786, 33845), False, 'import torch\n'), ((33973, 34043), 'torch.zeros', 'torch.zeros', (['res.neg_bboxes.shape[0]'], {'device': 'device', 'dtype': 'torch.uint8'}), '(res.neg_bboxes.shape[0], device=device, dtype=torch.uint8)\n', (33984, 34043), False, 'import torch\n')]
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import i2c, sensor
from esphome.const import CONF_HUMIDITY, CONF_ID, CONF_TEMPERATURE, \
UNIT_CELSIUS, ICON_THERMOMETER, ICON_WATER_PERCENT, UNIT_PERCENT
DEPENDENCIES = ['i2c']
am2320_ns = cg.esphome_ns.namespace('am2320')
AM2320Component = am2320_ns.class_('AM2320Component', cg.PollingComponent, i2c.I2CDevice)
CONFIG_SCHEMA = cv.Schema({
cv.GenerateID(): cv.declare_id(AM2320Component),
cv.Optional(CONF_TEMPERATURE): sensor.sensor_schema(UNIT_CELSIUS, ICON_THERMOMETER, 1),
cv.Optional(CONF_HUMIDITY): sensor.sensor_schema(UNIT_PERCENT, ICON_WATER_PERCENT, 1),
}).extend(cv.polling_component_schema('60s')).extend(i2c.i2c_device_schema(0x5C))
def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
yield cg.register_component(var, config)
yield i2c.register_i2c_device(var, config)
if CONF_TEMPERATURE in config:
sens = yield sensor.new_sensor(config[CONF_TEMPERATURE])
cg.add(var.set_temperature_sensor(sens))
if CONF_HUMIDITY in config:
sens = yield sensor.new_sensor(config[CONF_HUMIDITY])
cg.add(var.set_humidity_sensor(sens))
|
[
"esphome.config_validation.declare_id",
"esphome.components.sensor.sensor_schema",
"esphome.components.i2c.register_i2c_device",
"esphome.config_validation.GenerateID",
"esphome.components.i2c.i2c_device_schema",
"esphome.config_validation.polling_component_schema",
"esphome.codegen.new_Pvariable",
"esphome.components.sensor.new_sensor",
"esphome.config_validation.Optional",
"esphome.codegen.esphome_ns.namespace",
"esphome.codegen.register_component"
] |
[((287, 320), 'esphome.codegen.esphome_ns.namespace', 'cg.esphome_ns.namespace', (['"""am2320"""'], {}), "('am2320')\n", (310, 320), True, 'import esphome.codegen as cg\n'), ((729, 754), 'esphome.components.i2c.i2c_device_schema', 'i2c.i2c_device_schema', (['(92)'], {}), '(92)\n', (750, 754), False, 'from esphome.components import i2c, sensor\n'), ((791, 824), 'esphome.codegen.new_Pvariable', 'cg.new_Pvariable', (['config[CONF_ID]'], {}), '(config[CONF_ID])\n', (807, 824), True, 'import esphome.codegen as cg\n'), ((835, 869), 'esphome.codegen.register_component', 'cg.register_component', (['var', 'config'], {}), '(var, config)\n', (856, 869), True, 'import esphome.codegen as cg\n'), ((880, 916), 'esphome.components.i2c.register_i2c_device', 'i2c.register_i2c_device', (['var', 'config'], {}), '(var, config)\n', (903, 916), False, 'from esphome.components import i2c, sensor\n'), ((686, 720), 'esphome.config_validation.polling_component_schema', 'cv.polling_component_schema', (['"""60s"""'], {}), "('60s')\n", (713, 720), True, 'import esphome.config_validation as cv\n'), ((974, 1017), 'esphome.components.sensor.new_sensor', 'sensor.new_sensor', (['config[CONF_TEMPERATURE]'], {}), '(config[CONF_TEMPERATURE])\n', (991, 1017), False, 'from esphome.components import i2c, sensor\n'), ((1121, 1161), 'esphome.components.sensor.new_sensor', 'sensor.new_sensor', (['config[CONF_HUMIDITY]'], {}), '(config[CONF_HUMIDITY])\n', (1138, 1161), False, 'from esphome.components import i2c, sensor\n'), ((444, 459), 'esphome.config_validation.GenerateID', 'cv.GenerateID', ([], {}), '()\n', (457, 459), True, 'import esphome.config_validation as cv\n'), ((497, 526), 'esphome.config_validation.Optional', 'cv.Optional', (['CONF_TEMPERATURE'], {}), '(CONF_TEMPERATURE)\n', (508, 526), True, 'import esphome.config_validation as cv\n'), ((589, 615), 'esphome.config_validation.Optional', 'cv.Optional', (['CONF_HUMIDITY'], {}), '(CONF_HUMIDITY)\n', (600, 615), True, 'import esphome.config_validation as cv\n'), ((461, 491), 'esphome.config_validation.declare_id', 'cv.declare_id', (['AM2320Component'], {}), '(AM2320Component)\n', (474, 491), True, 'import esphome.config_validation as cv\n'), ((528, 583), 'esphome.components.sensor.sensor_schema', 'sensor.sensor_schema', (['UNIT_CELSIUS', 'ICON_THERMOMETER', '(1)'], {}), '(UNIT_CELSIUS, ICON_THERMOMETER, 1)\n', (548, 583), False, 'from esphome.components import i2c, sensor\n'), ((617, 674), 'esphome.components.sensor.sensor_schema', 'sensor.sensor_schema', (['UNIT_PERCENT', 'ICON_WATER_PERCENT', '(1)'], {}), '(UNIT_PERCENT, ICON_WATER_PERCENT, 1)\n', (637, 674), False, 'from esphome.components import i2c, sensor\n')]
|
import ast
import os
import dj_database_url
PROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir))
SECRET_KEY = os.environ.get("SECRET_KEY", "UNKNOWN")
DEBUG = ast.literal_eval(os.environ.get('DEBUG', 'False'))
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
ROOT_URLCONF = 'estate.urls'
WSGI_APPLICATION = 'estate.wsgi.application'
ALLOWED_HOSTS = [
'*',
]
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {
'default': dj_database_url.config(default=os.environ.get("DATABASE_URL"))
}
TERRAFORM_ELASTICACHE_URL = os.environ.get("TERRAFORM_ELASTICACHE_URL")
if TERRAFORM_ELASTICACHE_URL:
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache"
},
"terraform": {
"BACKEND": "django_elasticache.memcached.ElastiCache",
"LOCATION": TERRAFORM_ELASTICACHE_URL,
}
}
else:
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache"
},
"terraform": {
"BACKEND": "django.core.cache.backends.filebased.FileBasedCache",
"LOCATION": "/tmp/django_cache_terraform"
}
}
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'assets'),
)
|
[
"os.environ.get",
"os.path.dirname",
"os.path.join"
] |
[((143, 182), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""', '"""UNKNOWN"""'], {}), "('SECRET_KEY', 'UNKNOWN')\n", (157, 182), False, 'import os\n'), ((1799, 1842), 'os.environ.get', 'os.environ.get', (['"""TERRAFORM_ELASTICACHE_URL"""'], {}), "('TERRAFORM_ELASTICACHE_URL')\n", (1813, 1842), False, 'import os\n'), ((2479, 2515), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""static"""'], {}), "(PROJECT_ROOT, 'static')\n", (2491, 2515), False, 'import os\n'), ((209, 241), 'os.environ.get', 'os.environ.get', (['"""DEBUG"""', '"""False"""'], {}), "('DEBUG', 'False')\n", (223, 241), False, 'import os\n'), ((2623, 2659), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""assets"""'], {}), "(PROJECT_ROOT, 'assets')\n", (2635, 2659), False, 'import os\n'), ((90, 115), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (105, 115), False, 'import os\n'), ((1259, 1298), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '"""templates"""'], {}), "(PROJECT_ROOT, 'templates')\n", (1271, 1298), False, 'import os\n'), ((1736, 1766), 'os.environ.get', 'os.environ.get', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (1750, 1766), False, 'import os\n')]
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from accounts.models import Player
class PlayerInline(admin.StackedInline):
model = Player
class UserAdmin(BaseUserAdmin):
inlines = (PlayerInline,)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Player)
|
[
"django.contrib.admin.site.register",
"django.contrib.admin.site.unregister"
] |
[((306, 333), 'django.contrib.admin.site.unregister', 'admin.site.unregister', (['User'], {}), '(User)\n', (327, 333), False, 'from django.contrib import admin\n'), ((334, 370), 'django.contrib.admin.site.register', 'admin.site.register', (['User', 'UserAdmin'], {}), '(User, UserAdmin)\n', (353, 370), False, 'from django.contrib import admin\n'), ((371, 398), 'django.contrib.admin.site.register', 'admin.site.register', (['Player'], {}), '(Player)\n', (390, 398), False, 'from django.contrib import admin\n')]
|
"""
Connect to a BL-NET via it's web interface and read and write data
TODO: as component
"""
import logging
import voluptuous as vol
from homeassistant.helpers.discovery import load_platform
from homeassistant.const import (
CONF_RESOURCE, CONF_PASSWORD, CONF_SCAN_INTERVAL, TEMP_CELSIUS,
)
from homeassistant.helpers.event import async_track_time_interval
from datetime import timedelta
from datetime import datetime
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = [
'pyblnet==0.8.0'
]
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'blnet'
CONF_WEB_PORT = 'web_port'
CONF_TA_PORT = 'ta_port'
CONF_USE_WEB = 'use_web'
CONF_USE_TA = 'use_ta'
CONF_NODE = 'can_node'
# Defaults
DEFAULT_WEB_PORT = 80
DEFAULT_TA_PORT = 40000
# scan every 6 minutes per default
DEFAULT_SCAN_INTERVAL = 360
UNIT = {
'analog': TEMP_CELSIUS,
'speed': 'rpm',
'power': 'kW',
'energy': 'kWh'
}
ICON = {
'analog': 'mdi:thermometer',
'speed': 'mdi:speedometer',
'power': 'mdi:power-plug',
'energy': 'mdi:power-plug'
}
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_RESOURCE): cv.url,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NODE): cv.positive_int,
vol.Optional(CONF_SCAN_INTERVAL,
default=DEFAULT_SCAN_INTERVAL): cv.positive_int,
vol.Optional(CONF_WEB_PORT, default=DEFAULT_WEB_PORT): cv.positive_int,
vol.Optional(CONF_TA_PORT, default=DEFAULT_TA_PORT): cv.positive_int,
vol.Optional(CONF_USE_WEB, default=True): cv.boolean,
vol.Optional(CONF_USE_TA, default=False): cv.boolean
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the BLNET component"""
from pyblnet import BLNET, test_blnet
config = config[DOMAIN]
resource = config.get(CONF_RESOURCE)
password = config.get(CONF_PASSWORD)
can_node = config.get(CONF_NODE)
scan_interval = config.get(CONF_SCAN_INTERVAL)
web_port = config.get(CONF_WEB_PORT)
ta_port = config.get(CONF_TA_PORT)
use_web = config.get(CONF_USE_WEB)
use_ta = config.get(CONF_USE_TA)
# Initialize the BL-NET sensor
try:
blnet = BLNET(resource, password=password, web_port=web_port,
ta_port=ta_port, use_web=use_web, use_ta=use_ta)
except (ValueError, AssertionError) as ex:
if isinstance(ex, ValueError):
_LOGGER.error("No BL-Net reached at {}".format(resource))
else:
_LOGGER.error("Configuration invalid: {}".format(ex))
return False
# set the communication entity
hass.data["DATA_{}".format(DOMAIN)] = BLNETComm(blnet, can_node)
# make sure the communication device gets updated once in a while
def fetch_data(*_):
return hass.data["DATA_{}".format(DOMAIN)].update()
# Get the latest data from REST API and load
# sensors and switches accordingly
data = fetch_data()
async_track_time_interval(hass,
fetch_data,
timedelta(seconds=scan_interval))
i = 0
# iterate through the list and create a sensor for every value
for domain in ['analog', 'speed', 'power', 'energy']:
for sensor_id in data[domain]:
_LOGGER.info("Discovered {} sensor {} in use, adding".format(domain, sensor_id))
i+=1
disc_info = {
'name': '{} {} {}'.format(DOMAIN, domain, sensor_id),
'domain': domain,
'id': sensor_id
}
load_platform(hass, 'sensor', DOMAIN, disc_info, config)
# iterate through the list and create a sensor for every value
for sensor_id in data['digital']:
_LOGGER.info("Discovered digital sensor {} in use, adding".format(sensor_id))
i+=1
disc_info = {
'name': '{} digital {}'.format(DOMAIN, sensor_id),
'id': sensor_id,
'domain': 'digital'
}
if use_web:
component = 'switch'
else:
component = 'sensor'
load_platform(hass, component, DOMAIN, disc_info, config)
_LOGGER.info("Added overall {} sensors".format(i))
return True
class BLNETComm(object):
"""Implementation of a BL-NET - UVR1611 communication component"""
def __init__(self, blnet, node):
self.blnet = blnet
self.node = node
# Map id -> attributes
self.data = {}
self._last_updated = None
def turn_on(self, switch_id):
# only change active node if this is desired
self.blnet.turn_on(switch_id, self.node)
def turn_off(self, switch_id):
# only change active node if this is desired
self.blnet.turn_off(switch_id, self.node)
def turn_auto(self, switch_id):
# only change active node if this is desired
self.blnet.turn_auto(switch_id, self.node)
def last_updated(self):
return self._last_updated
def update(self):
"""Get the latest data from BLNET and update the state."""
data = self.blnet.fetch(self.node)
for domain in ['analog', 'speed', 'power', 'energy']:
# iterate through the list and create a sensor for every value
for key, sensor in data.get(domain, {}).items():
attributes = {}
entity_id = '{} {} {}'.format(DOMAIN, domain, key)
attributes['value'] = sensor.get('value')
attributes['unit_of_measurement'] = sensor.get('unit_of_measurement',
UNIT[domain])
attributes['friendly_name'] = sensor.get('name')
attributes['icon'] = ICON[domain]
self.data[entity_id] = attributes
# iterate through the list and create a sensor for every value
for key, sensor in data.get('digital', {}).items():
attributes = {}
entity_id = '{} digital {}'.format(DOMAIN, key)
attributes['friendly_name'] = sensor.get('name')
attributes['mode'] = sensor.get('mode')
attributes['value'] = sensor.get('value')
# Change the symbol according to current mode and setting
# Automated switch => gear symbol
if sensor.get('mode') == 'AUTO':
attributes['icon'] = 'mdi:settings'
# Nonautomated switch, toggled on => switch on
elif sensor.get('mode') == 'EIN':
attributes['icon'] = 'mdi:toggle-switch'
# Nonautomated switch, toggled off => switch off
else:
attributes['icon'] = 'mdi:toggle-switch-off'
self.data[entity_id] = attributes
# save that the data was updated now
self._last_updated = datetime.now()
return data
|
[
"voluptuous.Optional",
"voluptuous.Required",
"pyblnet.BLNET",
"homeassistant.helpers.discovery.load_platform",
"datetime.timedelta",
"datetime.datetime.now",
"logging.getLogger"
] |
[((530, 557), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (547, 557), False, 'import logging\n'), ((2205, 2311), 'pyblnet.BLNET', 'BLNET', (['resource'], {'password': 'password', 'web_port': 'web_port', 'ta_port': 'ta_port', 'use_web': 'use_web', 'use_ta': 'use_ta'}), '(resource, password=password, web_port=web_port, ta_port=ta_port,\n use_web=use_web, use_ta=use_ta)\n', (2210, 2311), False, 'from pyblnet import BLNET, test_blnet\n'), ((3069, 3101), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'scan_interval'}), '(seconds=scan_interval)\n', (3078, 3101), False, 'from datetime import timedelta\n'), ((4102, 4159), 'homeassistant.helpers.discovery.load_platform', 'load_platform', (['hass', 'component', 'DOMAIN', 'disc_info', 'config'], {}), '(hass, component, DOMAIN, disc_info, config)\n', (4115, 4159), False, 'from homeassistant.helpers.discovery import load_platform\n'), ((6831, 6845), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6843, 6845), False, 'from datetime import datetime\n'), ((3576, 3632), 'homeassistant.helpers.discovery.load_platform', 'load_platform', (['hass', '"""sensor"""', 'DOMAIN', 'disc_info', 'config'], {}), "(hass, 'sensor', DOMAIN, disc_info, config)\n", (3589, 3632), False, 'from homeassistant.helpers.discovery import load_platform\n'), ((1121, 1148), 'voluptuous.Required', 'vol.Required', (['CONF_RESOURCE'], {}), '(CONF_RESOURCE)\n', (1133, 1148), True, 'import voluptuous as vol\n'), ((1166, 1193), 'voluptuous.Optional', 'vol.Optional', (['CONF_PASSWORD'], {}), '(CONF_PASSWORD)\n', (1178, 1193), True, 'import voluptuous as vol\n'), ((1214, 1237), 'voluptuous.Optional', 'vol.Optional', (['CONF_NODE'], {}), '(CONF_NODE)\n', (1226, 1237), True, 'import voluptuous as vol\n'), ((1264, 1327), 'voluptuous.Optional', 'vol.Optional', (['CONF_SCAN_INTERVAL'], {'default': 'DEFAULT_SCAN_INTERVAL'}), '(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL)\n', (1276, 1327), True, 'import voluptuous as vol\n'), ((1375, 1428), 'voluptuous.Optional', 'vol.Optional', (['CONF_WEB_PORT'], {'default': 'DEFAULT_WEB_PORT'}), '(CONF_WEB_PORT, default=DEFAULT_WEB_PORT)\n', (1387, 1428), True, 'import voluptuous as vol\n'), ((1455, 1506), 'voluptuous.Optional', 'vol.Optional', (['CONF_TA_PORT'], {'default': 'DEFAULT_TA_PORT'}), '(CONF_TA_PORT, default=DEFAULT_TA_PORT)\n', (1467, 1506), True, 'import voluptuous as vol\n'), ((1533, 1573), 'voluptuous.Optional', 'vol.Optional', (['CONF_USE_WEB'], {'default': '(True)'}), '(CONF_USE_WEB, default=True)\n', (1545, 1573), True, 'import voluptuous as vol\n'), ((1595, 1635), 'voluptuous.Optional', 'vol.Optional', (['CONF_USE_TA'], {'default': '(False)'}), '(CONF_USE_TA, default=False)\n', (1607, 1635), True, 'import voluptuous as vol\n')]
|
from FEV_KEGG.Graph import SubstanceGraphs
from FEV_KEGG.Graph.Elements import ReactionID, EcNumber
from FEV_KEGG.Graph.SubstanceGraphs import SubstanceEcGraph, SubstanceReactionGraph
from FEV_KEGG.KEGG.File import cache
from FEV_KEGG.KEGG.Organism import Organism
from FEV_KEGG.settings import verbosity as init_verbosity
class NUKA(object):
def __init__(self):
"""
This is a hypothetical 'complete' organism - NUKA - which possesses all EC numbers known to all metabolic KEGG pathways.
Conversions to other graph types are not possible, because as a hypothetical organism, NUKA has no genes.
Attributes
----------
self.nameAbbreviation : str
"""
self.nameAbbreviation = 'NUKA'
@property
@cache(folder_path = 'NUKA/graph', file_name = 'SubstanceReactionGraph')
def substanceReactionGraph(self) -> SubstanceReactionGraph:
"""
NUKA's substance-reaction graph.
Returns
-------
SubstanceReactionGraph
Contains all substrates/products and all reactions known to KEGG's metabolic pathways.
Raises
------
HTTPError
If any underlying organism, pathway, or gene does not exist.
URLError
If connection to KEGG fails.
Note
----
This SubstanceReactionGraph can **NOT** be converted into a SubstanceGeneGraph, as the pathways do not contain gene information!
"""
mockOrganism = Organism('ec') # 'ec' is not an organism abbreviation, but merely desribes that pathways shall contain EC numbers as edges. This returns the full pathways not specific to any species.
pathwaysSet = mockOrganism.getMetabolicPathways(includeOverviewMaps = False)
substanceReactionGraph = SubstanceGraphs.Conversion.KeggPathwaySet2SubstanceReactionGraph(pathwaysSet, localVerbosity = 0)
substanceReactionGraph.name = 'Substance-Reaction NUKA'
if init_verbosity > 0:
print('calculated ' + substanceReactionGraph.name)
return substanceReactionGraph
@property
@cache(folder_path = 'NUKA/graph', file_name = 'SubstanceEcGraph')
def substanceEcGraph(self) -> SubstanceEcGraph:
"""
NUKA's substance-EC graph.
Returns
-------
SubstanceEcGraph
Contains all substrates/products and all EC numbers known to KEGG's metabolic pathways.
Raises
------
HTTPError
If any underlying organism, pathway, or gene does not exist.
URLError
If connection to KEGG fails.
"""
return self._SubstanceReactionGraph2SubstanceEcGraph(self.substanceReactionGraph)
def _SubstanceReactionGraph2SubstanceEcGraph(self, speciesSubstanceReactionGraph: SubstanceReactionGraph) -> SubstanceEcGraph:
"""
Converts NUKA's substance-reaction graph into a substance-EC graph. Uses pathway information embedded into the graph object.
Parameters
----------
speciesSubstanceReactionGraph : SubstanceReactionGraph
NUKA's substance-reaction graph.
Returns
-------
SubstanceEcGraph
NUKA's substance-EC graph.
Warnings
--------
This function is special to NUKA and **MUST NOT** be used anywhere else!
"""
# shallow-copy old graph to new graph
graph = SubstanceEcGraph(speciesSubstanceReactionGraph.underlyingRawGraph)
graph.name = 'Substance-EC NUKA'
# create dict of replacements: reaction -> {EC numbers}
replacementDict = dict()
# for each embedded pathway, get list of 'enzyme' entries
for pathway in speciesSubstanceReactionGraph.pathwaySet:
ecEntryList = [e for e in pathway.entries.values() if e.type == 'enzyme']
# for each EC number, get reactions in which it is involved
for ecEntry in ecEntryList:
reactionIDList = ecEntry.reaction.split()
if len(reactionIDList) > 0: # filter EC numbers not associated with any reaction
ecNumberList = ecEntry.name.split()
# replace each reaction with its associated EC number
for reactionID in reactionIDList:
reactionName = reactionID.split(':', 1)[1]
reaction = ReactionID(reactionName)
# save associated EC numbers in a set
ecNumberSet = set()
for ecNumberString in ecNumberList:
ecNumber = EcNumber(ecNumberString.replace('ec:', ''))
ecNumberSet.add(ecNumber)
# update the replacement dict for the current reaction, adding the newly created EC number set
replacementSet = replacementDict.get(reaction, None)
if replacementSet == None or replacementSet.__class__ != set:
replacementSet = set()
replacementSet.update(ecNumberSet)
replacementDict[reaction] = replacementSet
# get list of all reaction edges. Copy edge list to prevent changes in-place, which would NOT work
edgeList = list(graph.getEdges())
# replace reaction edges with EC number edges, using replacement dict
for edge in edgeList:
substrate, product, reaction = edge
# delete old edge
graph.removeEdge(substrate, product, reaction, False)
# add new edges, according to replacement dict
replacementSet = replacementDict[reaction]
for ecNumber in replacementSet:
graph.addEC(substrate, product, ecNumber, False)
if init_verbosity > 0:
print('calculated ' + graph.name)
return graph
|
[
"FEV_KEGG.KEGG.File.cache",
"FEV_KEGG.Graph.SubstanceGraphs.Conversion.KeggPathwaySet2SubstanceReactionGraph",
"FEV_KEGG.Graph.Elements.ReactionID",
"FEV_KEGG.Graph.SubstanceGraphs.SubstanceEcGraph",
"FEV_KEGG.KEGG.Organism.Organism"
] |
[((805, 872), 'FEV_KEGG.KEGG.File.cache', 'cache', ([], {'folder_path': '"""NUKA/graph"""', 'file_name': '"""SubstanceReactionGraph"""'}), "(folder_path='NUKA/graph', file_name='SubstanceReactionGraph')\n", (810, 872), False, 'from FEV_KEGG.KEGG.File import cache\n'), ((2203, 2264), 'FEV_KEGG.KEGG.File.cache', 'cache', ([], {'folder_path': '"""NUKA/graph"""', 'file_name': '"""SubstanceEcGraph"""'}), "(folder_path='NUKA/graph', file_name='SubstanceEcGraph')\n", (2208, 2264), False, 'from FEV_KEGG.KEGG.File import cache\n'), ((1560, 1574), 'FEV_KEGG.KEGG.Organism.Organism', 'Organism', (['"""ec"""'], {}), "('ec')\n", (1568, 1574), False, 'from FEV_KEGG.KEGG.Organism import Organism\n'), ((1862, 1961), 'FEV_KEGG.Graph.SubstanceGraphs.Conversion.KeggPathwaySet2SubstanceReactionGraph', 'SubstanceGraphs.Conversion.KeggPathwaySet2SubstanceReactionGraph', (['pathwaysSet'], {'localVerbosity': '(0)'}), '(pathwaysSet,\n localVerbosity=0)\n', (1926, 1961), False, 'from FEV_KEGG.Graph import SubstanceGraphs\n'), ((3580, 3646), 'FEV_KEGG.Graph.SubstanceGraphs.SubstanceEcGraph', 'SubstanceEcGraph', (['speciesSubstanceReactionGraph.underlyingRawGraph'], {}), '(speciesSubstanceReactionGraph.underlyingRawGraph)\n', (3596, 3646), False, 'from FEV_KEGG.Graph.SubstanceGraphs import SubstanceEcGraph, SubstanceReactionGraph\n'), ((4607, 4631), 'FEV_KEGG.Graph.Elements.ReactionID', 'ReactionID', (['reactionName'], {}), '(reactionName)\n', (4617, 4631), False, 'from FEV_KEGG.Graph.Elements import ReactionID, EcNumber\n')]
|
#! /usr/bin/env python
# Licensed to Big Data Genomics (BDG) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The BDG licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# standard lib only makes it easier to run
import re
import os
import sys
import json
from subprocess import check_call
from os.path import join as pjoin
from hashlib import md5
def sanitize(dirty):
# for sanitizing URIs/filenames
# inspired by datacache
clean = re.sub(r'/|\\|;|:|\?|=', '_', dirty)
if len(clean) > 150:
prefix = md5(dirty).hexdigest()
clean = prefix + clean[-114:]
return clean
def uri_to_sanitized_filename(source_uri, decompress=False):
# inspired by datacache
digest = md5(source_uri.encode('utf-8')).hexdigest()
filename = '{digest}.{sanitized_uri}'.format(
digest=digest, sanitized_uri=sanitize(source_uri))
if decompress:
(base, ext) = os.path.splitext(filename)
if ext == '.gz':
filename = base
return filename
for line in sys.stdin:
resource = json.loads(line.split('\t', 1)[1])
# compute dest filename
staging_path = os.environ['STAGING_PATH']
decompress = resource['compression'] in ['gzip']
dest_name = uri_to_sanitized_filename(resource['url'],
decompress=decompress)
dest_path = pjoin(staging_path, dest_name)
# construct dnload cmd (straight into HDFS)
pipeline = ['curl -L {0}'.format(resource['url'])]
if resource['compression'] == 'gzip':
pipeline.append('gunzip')
pipeline.append('hadoop fs -put - {0}'.format(dest_path))
# ensure staging path exists
check_call('hadoop fs -mkdir -p {0}'.format(staging_path), shell=True)
# execute dnload
cmd = ' | '.join(pipeline)
check_call(cmd, shell=True)
# dummy output
sys.stdout.write('{0}\t1\n'.format(dest_path))
|
[
"hashlib.md5",
"os.path.join",
"os.path.splitext",
"re.sub",
"subprocess.check_call"
] |
[((1073, 1111), 're.sub', 're.sub', (['"""/|\\\\\\\\|;|:|\\\\?|="""', '"""_"""', 'dirty'], {}), "('/|\\\\\\\\|;|:|\\\\?|=', '_', dirty)\n", (1079, 1111), False, 'import re\n'), ((1975, 2005), 'os.path.join', 'pjoin', (['staging_path', 'dest_name'], {}), '(staging_path, dest_name)\n', (1980, 2005), True, 'from os.path import join as pjoin\n'), ((2418, 2445), 'subprocess.check_call', 'check_call', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (2428, 2445), False, 'from subprocess import check_call\n'), ((1528, 1554), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1544, 1554), False, 'import os\n'), ((1152, 1162), 'hashlib.md5', 'md5', (['dirty'], {}), '(dirty)\n', (1155, 1162), False, 'from hashlib import md5\n')]
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from abc import ABC
from typing import Dict, List, Optional, Type, Union
import attr
from django.conf import settings
from .client import IAMClient
from .exceptions import AttrValidationError, PermissionDeniedError
from .request import ActionResourcesRequest, IAMResource, ResourceRequest
logger = logging.getLogger(__name__)
def validate_empty(instance, attribute, value):
"""用于校验属性是否为空. https://www.attrs.org/en/20.2.0/init.html#callables"""
if not value:
raise AttrValidationError(f"{attribute.name} must not be empty")
@attr.dataclass
class ResCreatorAction:
"""用于新建关联属性授权接口"""
creator: str
project_id: str
resource_type: str
def __attrs_post_init__(self):
self.system = settings.BK_IAM_SYSTEM_ID
def to_data(self) -> Dict:
return {'creator': self.creator, 'system': self.system, 'type': self.resource_type}
@attr.s(kw_only=True)
class PermCtx:
"""
权限参数上下文
note: 由于 force_raise 默认值的原因,其子类属性必须设置默认值
"""
username = attr.ib(validator=[attr.validators.instance_of(str), validate_empty])
# 如果为 True, 表示不做权限校验,直接以无权限方式抛出异常
force_raise = attr.ib(validator=[attr.validators.instance_of(bool)], default=False)
@classmethod
def from_dict(cls, init_data: Dict) -> 'PermCtx':
return cls(username=init_data['username'], force_raise=init_data['force_raise'])
def validate_resource_id(self):
"""校验资源实例 ID. 如果校验不过,抛出 AttrValidationError 异常"""
if not self.resource_id:
raise AttrValidationError('missing valid resource_id')
@property
def resource_id(self) -> str:
"""注册到权限中心的资源实例 ID. 空字符串表示实例无关"""
return ''
def get_parent_chain(self) -> List[IAMResource]:
return []
class Permission(ABC, IAMClient):
"""
对接 IAM 的权限基类
"""
resource_type: str = ''
perm_ctx_cls: Type[PermCtx] = PermCtx
resource_request_cls: Type[ResourceRequest] = ResourceRequest
parent_res_perm: Optional['Permission'] = None # 父级资源的权限类对象
def can_action(self, perm_ctx: PermCtx, action_id: str, raise_exception: bool, use_cache: bool = False) -> bool:
"""
校验用户的 action_id 权限
:param perm_ctx: 权限校验的上下文
:param action_id: 资源操作 ID
:param raise_exception: 无权限时,是否抛出异常
:param use_cache: 是否使用本地缓存 (缓存时间 1 min) 校验权限。用于非敏感操作鉴权,比如 view 操作
"""
if perm_ctx.force_raise:
self._raise_permission_denied_error(perm_ctx, action_id)
is_allowed = self._can_action(perm_ctx, action_id, use_cache)
if raise_exception and not is_allowed:
self._raise_permission_denied_error(perm_ctx, action_id)
return is_allowed
def can_multi_actions(self, perm_ctx: PermCtx, action_ids: List[str], raise_exception: bool) -> bool:
"""
校验同类型单个资源的多个 action 权限
:param perm_ctx: 权限校验的上下文
:param action_ids: 资源 action_id 列表
:param raise_exception: 无权限时,是否抛出异常
:return: 只有 action_id 都有权限时,才返回 True; 否则返回 False 或者抛出异常
"""
perm_ctx.validate_resource_id()
# perms 结构如 {'project_view': True, 'project_edit': False}
if perm_ctx.force_raise:
perms = {action_id: False for action_id in action_ids}
else:
res_request = self.make_res_request(perm_ctx)
perms = self.resource_inst_multi_actions_allowed(
perm_ctx.username, action_ids, resources=res_request.make_resources(perm_ctx.resource_id)
)
return self._can_multi_actions(perm_ctx, perms, raise_exception)
def resources_actions_allowed(
self, username: str, action_ids: List[str], res_ids: Union[List[str], str], res_request: ResourceRequest
):
"""
判断用户对某些资源是否具有多个指定操作的权限. 当前sdk仅支持同类型的资源
:return 示例 {'0ad86c25363f4ef8adcb7ac67a483837': {'project_view': True, 'project_edit': False}}
"""
return self.batch_resource_multi_actions_allowed(username, action_ids, res_request.make_resources(res_ids))
def grant_resource_creator_actions(self, creator_action: ResCreatorAction):
"""
用于创建资源时,注册用户对该资源的关联操作权限.
note: 具体的关联操作见权限模型的 resource_creator_actions 字段
"""
return self.iam._client.grant_resource_creator_actions(None, creator_action.creator, creator_action.to_data())
def has_parent_resource(self) -> bool:
return self.parent_res_perm is not None
def make_res_request(self, perm_ctx: PermCtx) -> ResourceRequest:
return self.resource_request_cls.from_dict(attr.asdict(perm_ctx))
def _can_action(self, perm_ctx: PermCtx, action_id: str, use_cache: bool = False) -> bool:
res_id = perm_ctx.resource_id
if res_id: # 与当前资源实例相关
res_request = self.make_res_request(perm_ctx)
resources = res_request.make_resources(res_id)
return self.resource_inst_allowed(perm_ctx.username, action_id, resources, use_cache)
# 与当前资源实例无关, 并且无关联上级资源, 按资源实例无关处理
if not self.has_parent_resource():
return self.resource_type_allowed(perm_ctx.username, action_id, use_cache)
# 有关联上级资源
p_perm_ctx = self.parent_res_perm.perm_ctx_cls.from_dict(attr.asdict(perm_ctx))
res_request = self.parent_res_perm.make_res_request(p_perm_ctx)
resources = res_request.make_resources(p_perm_ctx.resource_id)
return self.resource_inst_allowed(perm_ctx.username, action_id, resources, use_cache)
def _can_multi_actions(self, perm_ctx: PermCtx, perms: Dict[str, bool], raise_exception: bool) -> bool:
messages = []
action_request_list = []
for action_id, is_allowed in perms.items():
if is_allowed:
continue
try:
self._raise_permission_denied_error(perm_ctx, action_id)
except PermissionDeniedError as e:
messages.append(e.message)
action_request_list.extend(e.action_request_list)
if not messages:
return True
if not raise_exception:
return False
raise PermissionDeniedError(
message=';'.join(messages), username=perm_ctx.username, action_request_list=action_request_list
)
def _raise_permission_denied_error(self, perm_ctx: PermCtx, action_id: str):
"""抛出 PermissionDeniedError 异常, 其中 username 和 action_request_list 可用于生成权限申请跳转链接"""
res_id = perm_ctx.resource_id
resources = None
resource_type = self.resource_type
parent_chain = None
if res_id:
resources = [res_id]
parent_chain = perm_ctx.get_parent_chain()
elif self.has_parent_resource():
resource_type = self.parent_res_perm.resource_type
p_perm_ctx = self.parent_res_perm.perm_ctx_cls.from_dict(attr.asdict(perm_ctx))
resources = [p_perm_ctx.resource_id]
parent_chain = p_perm_ctx.get_parent_chain()
raise PermissionDeniedError(
f"no {action_id} permission",
username=perm_ctx.username,
action_request_list=[ActionResourcesRequest(action_id, resource_type, resources, parent_chain)],
)
|
[
"attr.s",
"attr.asdict",
"attr.validators.instance_of",
"logging.getLogger"
] |
[((1054, 1081), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1071, 1081), False, 'import logging\n'), ((1634, 1654), 'attr.s', 'attr.s', ([], {'kw_only': '(True)'}), '(kw_only=True)\n', (1640, 1654), False, 'import attr\n'), ((5293, 5314), 'attr.asdict', 'attr.asdict', (['perm_ctx'], {}), '(perm_ctx)\n', (5304, 5314), False, 'import attr\n'), ((5955, 5976), 'attr.asdict', 'attr.asdict', (['perm_ctx'], {}), '(perm_ctx)\n', (5966, 5976), False, 'import attr\n'), ((1778, 1810), 'attr.validators.instance_of', 'attr.validators.instance_of', (['str'], {}), '(str)\n', (1805, 1810), False, 'import attr\n'), ((1904, 1937), 'attr.validators.instance_of', 'attr.validators.instance_of', (['bool'], {}), '(bool)\n', (1931, 1937), False, 'import attr\n'), ((7583, 7604), 'attr.asdict', 'attr.asdict', (['perm_ctx'], {}), '(perm_ctx)\n', (7594, 7604), False, 'import attr\n')]
|
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from PIL import ImageTk, Image
import numpy as np
import cv2
#load the trained model to classify sign
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from pickle import dump, load
from tensorflow.keras.preprocessing.image import load_img, img_to_array
base_model = InceptionV3(weights = 'inception_v3_weights_tf_dim_ordering_tf_kernels.h5')
vgg_model = Model(base_model.input, base_model.layers[-2].output)
def preprocess_img(img_path):
#inception v3 excepts img in 299*299
img = load_img(img_path, target_size = (299, 299))
x = img_to_array(img)
# Add one more dimension
x = np.expand_dims(x, axis = 0)
x = preprocess_input(x)
return x
def encode(image):
image = preprocess_img(image)
vec = vgg_model.predict(image)
vec = np.reshape(vec, (vec.shape[1]))
return vec
pickle_in = open("wordtoix.pkl", "rb")
wordtoix = load(pickle_in)
pickle_in = open("ixtoword.pkl", "rb")
ixtoword = load(pickle_in)
max_length = 74
def greedy_search(pic):
start = 'startseq'
for i in range(max_length):
seq = [wordtoix[word] for word in start.split() if word in wordtoix]
seq = pad_sequences([seq], maxlen = max_length)
yhat = model.predict([pic, seq])
yhat = np.argmax(yhat)
word = ixtoword[yhat]
start += ' ' + word
if word == 'endseq':
break
final = start.split()
final = final[1:-1]
final = ' '.join(final)
return final
def beam_search(image, beam_index = 3):
start = [wordtoix["startseq"]]
# start_word[0][0] = index of the starting word
# start_word[0][1] = probability of the word predicted
start_word = [[start, 0.0]]
while len(start_word[0][0]) < max_length:
temp = []
for s in start_word:
par_caps = pad_sequences([s[0]], maxlen=max_length)
e = image
preds = model.predict([e, np.array(par_caps)])
# Getting the top <beam_index>(n) predictions
word_preds = np.argsort(preds[0])[-beam_index:]
# creating a new list so as to put them via the model again
for w in word_preds:
next_cap, prob = s[0][:], s[1]
next_cap.append(w)
prob += preds[0][w]
temp.append([next_cap, prob])
start_word = temp
# Sorting according to the probabilities
start_word = sorted(start_word, reverse=False, key=lambda l: l[1])
# Getting the top words
start_word = start_word[-beam_index:]
start_word = start_word[-1][0]
intermediate_caption = [ixtoword[i] for i in start_word]
final_caption = []
for i in intermediate_caption:
if i != 'endseq':
final_caption.append(i)
else:
break
final_caption = ' '.join(final_caption[1:])
return final_caption
model = load_model('new-model-1.h5')
#initialise GUI
top=tk.Tk()
top.geometry('800x600')
top.title('Image Caption Generator')
top.configure(background='#CDCDCD')
label2=Label(top,background='#CDCDCD', font=('arial',15))
label1=Label(top,background='#CDCDCD', font=('arial',15))
label=Label(top,background='#CDCDCD', font=('arial',15))
sign_image = Label(top)
def classify(file_path):
global label_packed
enc = encode(file_path)
image = enc.reshape(1, 2048)
pred = greedy_search(image)
print(pred)
label.configure(foreground='#000', text= 'Greedy: ' + pred)
label.pack(side=BOTTOM,expand=True)
beam_3 = beam_search(image)
print(beam_3)
label1.configure(foreground='#011638', text = 'Beam_3: ' + beam_3)
label1.pack(side = BOTTOM, expand = True)
beam_5 = beam_search(image, 5)
print(beam_5)
label2.configure(foreground='#228B22', text = 'Beam_5: ' + beam_5)
label2.pack(side = BOTTOM, expand = True)
def show_classify_button(file_path):
classify_b=Button(top,text="Generate",command=lambda: classify(file_path),padx=10,pady=5)
classify_b.configure(background='#364156', foreground='white',font=('arial',10,'bold'))
classify_b.place(relx=0.79,rely=0.46)
def upload_image():
try:
file_path=filedialog.askopenfilename()
uploaded=Image.open(file_path)
uploaded.thumbnail(((top.winfo_width()/2.25),(top.winfo_height()/2.25)))
im=ImageTk.PhotoImage(uploaded)
sign_image.configure(image=im)
sign_image.image=im
label.configure(text='')
label1.configure(text='')
label2.configure(text='')
show_classify_button(file_path)
except:
pass
upload=Button(top,text="Upload an image",command=upload_image,padx=10,pady=5)
upload.configure(background='#364156', foreground='white',font=('arial',10,'bold'))
upload.pack(side=BOTTOM,pady=50)
sign_image.pack(side=BOTTOM,expand=True)
#label2.pack(side = BOTTOM, expand = True)
heading = Label(top, text="Image Caption Generator",pady=20, font=('arial',22,'bold'))
heading.configure(background='#CDCDED',foreground='#FF6348')
heading.pack()
top.mainloop()
|
[
"PIL.ImageTk.PhotoImage",
"tensorflow.keras.models.load_model",
"tensorflow.keras.applications.inception_v3.preprocess_input",
"numpy.argmax",
"tensorflow.keras.applications.inception_v3.InceptionV3",
"tensorflow.keras.preprocessing.image.img_to_array",
"numpy.expand_dims",
"tkinter.filedialog.askopenfilename",
"PIL.Image.open",
"tensorflow.keras.models.Model",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"pickle.load",
"numpy.argsort",
"numpy.reshape",
"numpy.array",
"tkinter.Tk"
] |
[((533, 606), 'tensorflow.keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'weights': '"""inception_v3_weights_tf_dim_ordering_tf_kernels.h5"""'}), "(weights='inception_v3_weights_tf_dim_ordering_tf_kernels.h5')\n", (544, 606), False, 'from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), ((621, 674), 'tensorflow.keras.models.Model', 'Model', (['base_model.input', 'base_model.layers[-2].output'], {}), '(base_model.input, base_model.layers[-2].output)\n', (626, 674), False, 'from tensorflow.keras.models import Model\n'), ((1132, 1147), 'pickle.load', 'load', (['pickle_in'], {}), '(pickle_in)\n', (1136, 1147), False, 'from pickle import dump, load\n'), ((1198, 1213), 'pickle.load', 'load', (['pickle_in'], {}), '(pickle_in)\n', (1202, 1213), False, 'from pickle import dump, load\n'), ((3191, 3219), 'tensorflow.keras.models.load_model', 'load_model', (['"""new-model-1.h5"""'], {}), "('new-model-1.h5')\n", (3201, 3219), False, 'from tensorflow.keras.models import load_model\n'), ((3241, 3248), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (3246, 3248), True, 'import tkinter as tk\n'), ((757, 799), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['img_path'], {'target_size': '(299, 299)'}), '(img_path, target_size=(299, 299))\n', (765, 799), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((810, 827), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (822, 827), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((865, 890), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (879, 890), True, 'import numpy as np\n'), ((901, 920), 'tensorflow.keras.applications.inception_v3.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (917, 920), False, 'from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), ((1033, 1062), 'numpy.reshape', 'np.reshape', (['vec', 'vec.shape[1]'], {}), '(vec, vec.shape[1])\n', (1043, 1062), True, 'import numpy as np\n'), ((1401, 1440), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[seq]'], {'maxlen': 'max_length'}), '([seq], maxlen=max_length)\n', (1414, 1440), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((1499, 1514), 'numpy.argmax', 'np.argmax', (['yhat'], {}), '(yhat)\n', (1508, 1514), True, 'import numpy as np\n'), ((4455, 4483), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (4481, 4483), False, 'from tkinter import filedialog\n'), ((4501, 4522), 'PIL.Image.open', 'Image.open', (['file_path'], {}), '(file_path)\n', (4511, 4522), False, 'from PIL import ImageTk, Image\n'), ((4615, 4643), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['uploaded'], {}), '(uploaded)\n', (4633, 4643), False, 'from PIL import ImageTk, Image\n'), ((2060, 2100), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['[s[0]]'], {'maxlen': 'max_length'}), '([s[0]], maxlen=max_length)\n', (2073, 2100), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((2278, 2298), 'numpy.argsort', 'np.argsort', (['preds[0]'], {}), '(preds[0])\n', (2288, 2298), True, 'import numpy as np\n'), ((2161, 2179), 'numpy.array', 'np.array', (['par_caps'], {}), '(par_caps)\n', (2169, 2179), True, 'import numpy as np\n')]
|
import abc
import operator
from fuzzysets import utils
class Domain(abc.ABC):
"""
An abstract class for domain of a fuzzy set.
"""
@abc.abstractmethod
def __iter__(self):
"""
:returns: a generator which yields the elements of the domain.
The order of the elements is the same in each call.
"""
pass
@abc.abstractmethod
def __contains__(self, item):
pass
@abc.abstractmethod
def __eq__(self, other):
pass
def __ne__(self, other):
return not self == other
class FuzzySet(abc.ABC):
"""
An abstract class for fuzzy set.
"""
def __init__(self, domain, degrees):
"""
:param domain: an instance of type Domain.
:param degrees: a NumPy array of floats in the range [0, 1] -
the corresponding membership degrees.
:raises ValueError: if the degrees are invalid.
"""
self.__set_degrees(degrees)
self.__domain = domain
self.__core = None
self.__support = None
self.__cross_over_points = None
def __set_degrees(self, degrees):
if (utils.is_membership_degree_v(degrees).all()):
self.__degrees = degrees
else:
raise ValueError("Membership degrees must be "
"floats between 0 and 1!")
def _degree_at(self, i):
return self.__degrees[i]
@abc.abstractmethod
def mu(self, x):
"""
:param x: an element of the domain.
:returns: the membership degree of `x`, if it is within the
domain, otherwise 0.
"""
pass
@property
def domain(self):
"""
:returns: an instance of type Domain - the set's domain.
"""
return self.__domain
@property
def range(self):
"""
:returns: a generator of floats in the range [0, 1] - the set's
range.
"""
return (i for i in self.__degrees)
def __iter__(self):
"""
:returns: an generator of pairs (x, d), where x is an element
of the domain and d is its membership degree.
"""
return zip(self.domain, self.range)
@property
def core(self):
"""
:returns: an immutable set of all the elements whose membership
degree is 1.
"""
if (self.__core is None):
self.__core = frozenset(x for x, d in self if (d == 1.))
return self.__core
@property
def support(self):
"""
:returns: an immutable set of all the elements whose membership
degree is positive.
"""
if (self.__support is None):
self.__support = frozenset(x for x, d in self if (d > 0.))
return self.__support
@property
def cross_over_points(self):
"""
:returns: an immutable set of all the elements whose membership
degree is 0.5.
"""
if (self.__cross_over_points is None):
self.__cross_over_points = frozenset(
x for x, d in self if (d == 0.5)
)
return self.__cross_over_points
def alpha_cut(self, alpha):
"""
:param alpha: a float between 0 and 1.
:returns: a set of the elements whose membership degree is
greater or equal to `alpha`.
"""
alpha = utils.to_float_if_int(alpha)
utils.validate_alpha(alpha)
return {x for x, d in self if (d >= alpha)}
@property
def height(self):
"""
:returns: the highest membership degree in the set, 0.0 if it is
empty.
"""
return self.__degrees.max(initial=0.0)
def __eq__(self, other):
"""
:param other: a value.
:returns: a boolean value indicating whether `other` is a fuzzy
set of the same type which has the same domain and membership
degrees.
"""
return (isinstance(other, self.__class__) and
self.__pointwise_comparison(other, operator.eq))
def __pointwise_comparison(self, other, p, reduction=all):
return (self.domain == other.domain and
reduction(p(self.mu(x), other.mu(x))
for x in self._select_between_domains(other)))
def _select_between_domains(self, other):
"""
This method is invoked whenever two FS's have equal domains and
one of them is needed, in case it matters which one it is.
"""
return self.domain
def __ne__(self, other):
return not self == other
def __lt__(self, other):
"""
Checks whether the fuzzy set is a proper subset of `other`.
:param other: an instance of the same FuzzySet subclass.
:returns: a boolean value indicating whether `other` has the
same domain and its membership degrees are greater or equal
to the fuzzy set's membership degrees, with at least one of them
being greater.
:raises TypeError: if `other` is not an instance of the same
class.
"""
self.__class__.__verify_has_same_class(other)
return (self.__pointwise_comparison(other, operator.le) and
self.__pointwise_comparison(other, operator.lt, any))
@classmethod
def __verify_has_same_class(cls, other):
if (not isinstance(other, cls)):
raise TypeError(
f"Expected an instance of {cls.__name__!r}!"
)
def __gt__(self, other):
"""
Checks whether the fuzzy set is a proper superset of `other`.
:raises TypeError: if `other` is not an instance of the same
class.
"""
self.__class__.__verify_has_same_class(other)
return other < self
def __le__(self, other):
"""
Checks whether the fuzzy set is a subset of `other`.
:raises TypeError: if `other` is not an instance of the same
class.
"""
self.__class__.__verify_has_same_class(other)
return self.__pointwise_comparison(other, operator.le)
def __ge__(self, other):
"""
Checks whether the fuzzy set is a superset of `other`.
:raises TypeError: if `other` is not an instance of the same
class.
"""
self.__class__.__verify_has_same_class(other)
return other <= self
def __norm(self, other, norm):
self.__verify_has_same_class_and_domain(other)
return self.__class__._from_domain(
self._select_between_domains(other),
mu=lambda x: norm(self.mu(x), other.mu(x))
)
def __verify_has_same_class_and_domain(self, other):
self.__class__.__verify_has_same_class(other)
if (self.domain != other.domain):
raise ValueError(f"Domains differ: {self.domain} "
f"!= {other.domain}")
@classmethod
@abc.abstractmethod
def _from_domain(cls, domain, mu):
"""
:param domain: an instance of Domain.
:param mu: a callable that takes elements of `domain` and
returns floats in the range [0, 1] (not assumed).
"""
pass
def t_norm(self, other, norm=min):
"""
Finds the t-norm of the fuzzy set and `other`.
:param other: an instance of the same FuzzySet class.
:param norm: a callable that takes two membership degrees
(floats between 0 and 1) and returns a membership degree. This
callable (denoted by I below) must also satisfy the following
axioms:
1) boundary condition:
I(1, 1) = 1; I(0, 0) = 0; I(0, 1) = 0; I(1, 0) = 0
2) commutativity:
I(a, b) = I(b, a)
3) I is monotonic:
If a' <= a and b' <= b, then I(a', b') <= I(a, b)
4) associativity
I(a, I(b, c)) = I(I(a, b), c)
Defaults to min.
:returns: an instance of the same FuzzySet class.
:raises TypeError: if `other` is not an instance of the same
class.
:raises ValueError: if the supplied callable does not return
membership degrees.
"""
return self.__norm(other, norm)
def s_norm(self, other, norm=max):
"""
Finds the s-norm of the fuzzy set and `other`.
:param other: an instance of the same FuzzySet class.
:param norm: a callable that takes two membership degrees
(floats between 0 and 1) and returns a membership degree. This
callable (denoted by U below) must also satisfy the following
axioms:
1) boundary condition:
U(1, 1) = 1; U(0, 0) = 0; U(0, 1) = 1; U(1, 0) = 1
2) commutativity:
U(a, b) = U(b, a)
3) U is monotonic:
If a' <= a and b' <= b, then U(a', b') <= U(a, b)
4) associativity
U(a, U(b, c)) = U(U(a, b), c)
Defaults to max.
:returns: an instance of the same FuzzySet class.
:raises TypeError: if `other` is not an instance of the same
class.
:raises ValueError: if the supplied callable does not return
membership degrees.
"""
return self.__norm(other, norm)
def complement(self, comp=utils.complement):
"""
Finds the complement of the fuzzy set.
:param comp: a callable that takes a membership degree (float
between 0 and 1) and returns a membership degree. This callable
(denoted by C below) must also satisfy the following axioms:
1) boundary condition:
C(0) = 1; C(1) = 0
2) if a <= b then C(a) >= C(b)
Defaults to `1 - x`.
:returns: an instance of the same FuzzySet class.
:raises ValueError: if the supplied callable does not return
membership degrees.
"""
return self.__class__._from_domain(
self.domain,
mu=lambda x: comp(self.mu(x))
)
def __repr__(self):
return f"{self.__class__.__name__}({self.domain})"
def __str__(self):
"""
:returns: a str in the format:
<x 0>/<d 0> + ... + <x n>/<d n>
where <x i> and <d i> are the elements of the set and their
membership degrees, respectively.
"""
return " + ".join(f"{x}/{d:.2f}" for x, d in self)
def t_norm(a, b, norm=min):
"""
Equivalent to `a.t_norm(b, norm)`.
"""
return a.t_norm(b, norm)
def s_norm(a, b, norm=max):
"""
Equivalent to `a.s_norm(b, norm)`.
"""
return a.s_norm(b, norm)
def complement(a, comp=utils.complement):
"""
Equivalent to `a.complement(comp)`.
"""
return a.complement(comp)
def alpha_cut(a, alpha):
"""
Equivalent to `a.alpha_cut(alpha)`.
"""
return a.alpha_cut(alpha)
|
[
"fuzzysets.utils.is_membership_degree_v",
"fuzzysets.utils.to_float_if_int",
"fuzzysets.utils.validate_alpha"
] |
[((3385, 3413), 'fuzzysets.utils.to_float_if_int', 'utils.to_float_if_int', (['alpha'], {}), '(alpha)\n', (3406, 3413), False, 'from fuzzysets import utils\n'), ((3422, 3449), 'fuzzysets.utils.validate_alpha', 'utils.validate_alpha', (['alpha'], {}), '(alpha)\n', (3442, 3449), False, 'from fuzzysets import utils\n'), ((1148, 1185), 'fuzzysets.utils.is_membership_degree_v', 'utils.is_membership_degree_v', (['degrees'], {}), '(degrees)\n', (1176, 1185), False, 'from fuzzysets import utils\n')]
|
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2022 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from .tools import LDAPTools
from pex.string import String
from twisted.application import service
from twisted.internet.endpoints import serverFromString
from twisted.internet.protocol import ServerFactory
from twisted.python.components import registerAdapter
from twisted.python import log
from ldaptor.inmemory import fromLDIFFile
from ldaptor.interfaces import IConnectedLDAPEntry
from ldaptor.protocols.ldap.ldapserver import LDAPServer
from ldaptor.protocols.ldap import distinguishedname, ldaperrors
from ldaptor import delta, entry
from ldaptor.protocols import pureldap, pureber
class Handler(LDAPServer):
def __init__(self, host, port, payload):
self.string_tools = String()
self.host = host
self.port = int(port)
self.payload = payload
LDAPServer.__init__(self)
def handle_LDAPSearchRequest(self, request, controls, reply):
command = request.baseObject.decode()
command = self.string_tools.base64_decode(command.encode())
class_name = 'Main'
reply_url = f'http://{self.host}:{str(self.port)}/'
attr = [
("javaClassName", [class_name]),
("objectClass", ["javaNamingReference"]),
("javaCodeBase", [reply_url]),
("javaFactory", [class_name])
]
reply(
pureldap.LDAPSearchResultEntry(
objectName="",
attributes=attr
)
)
return pureldap.LDAPSearchResultDone(resultCode=ldaperrors.Success.resultCode)
class Factory(ServerFactory):
protocol = Handler
def __init__(self, host, port, root=None):
self.root = root
super(ServerFactory).__init__()
self.host = host
self.port = int(port)
def buildProtocol(self, addr):
proto = self.protocol(self.host, self.port)
proto.debug = self.debug
proto.factory = self
return proto
class LDAPListen:
def __init__(self, host, port, methods={}):
self.http_tools = LDAPTools()
self.handler = Handler
self.host = host
self.port = int(port)
self.sock = None
def listen(self):
try:
pass
except Exception:
return False
def stop(self):
try:
pass
except Exception:
return False
def accept(self):
try:
pass
except Exception:
return False
class LDAPListener:
@staticmethod
def listen_http(host, port):
return LDAPListen(host, port)
|
[
"ldaptor.protocols.pureldap.LDAPSearchResultEntry",
"ldaptor.protocols.pureldap.LDAPSearchResultDone",
"pex.string.String",
"ldaptor.protocols.ldap.ldapserver.LDAPServer.__init__"
] |
[((1828, 1836), 'pex.string.String', 'String', ([], {}), '()\n', (1834, 1836), False, 'from pex.string import String\n'), ((1941, 1966), 'ldaptor.protocols.ldap.ldapserver.LDAPServer.__init__', 'LDAPServer.__init__', (['self'], {}), '(self)\n', (1960, 1966), False, 'from ldaptor.protocols.ldap.ldapserver import LDAPServer\n'), ((2612, 2683), 'ldaptor.protocols.pureldap.LDAPSearchResultDone', 'pureldap.LDAPSearchResultDone', ([], {'resultCode': 'ldaperrors.Success.resultCode'}), '(resultCode=ldaperrors.Success.resultCode)\n', (2641, 2683), False, 'from ldaptor.protocols import pureldap, pureber\n'), ((2477, 2539), 'ldaptor.protocols.pureldap.LDAPSearchResultEntry', 'pureldap.LDAPSearchResultEntry', ([], {'objectName': '""""""', 'attributes': 'attr'}), "(objectName='', attributes=attr)\n", (2507, 2539), False, 'from ldaptor.protocols import pureldap, pureber\n')]
|
from cli.src.providers.aws.InfrastructureBuilder import InfrastructureBuilder
from cli.src.helpers.objdict_helpers import dict_to_objdict
def test_get_resource_group_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster', address_pool='10.20.0.0/22')
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_resource_group()
assert actual.specification.name == 'prefix-testcluster-rg'
assert actual.specification.cluster_name == 'testcluster'
def test_get_vpc_config_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster', address_pool='10.20.0.0/22')
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_vpc_config()
assert actual.specification.name == 'prefix-testcluster-vpc'
assert actual.specification.address_pool == '10.20.0.0/22'
def test_get_default_security_group_config_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster', address_pool='10.20.0.0/22')
vpc_config = dict_to_objdict({
'specification': {
'name': 'prefix-testcluster-vpc'
}
})
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_default_security_group_config(vpc_config)
assert actual.specification.vpc_name == 'prefix-testcluster-vpc'
def test_get_efs_config_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster', address_pool='10.20.0.0/22')
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_efs_config()
assert actual.specification.token == 'aws-efs-token-testcluster'
assert actual.specification.name == 'prefix-testcluster-efs'
def test_get_subnet_config_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster')
component_value = dict_to_objdict({
'address_pool': '10.20.0.0/24',
'availability_zone': 'eu-westa'
})
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_subnet(component_value, 'component', 'my-test-vpc', 1,)
assert actual.specification.name == 'prefix-testcluster-component-subnet-1'
assert actual.specification.vpc_name == 'my-test-vpc'
assert actual.specification.cidr_block == '10.20.0.0/24'
assert actual.specification.availability_zone == 'eu-westa'
def test_get_security_group_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster')
subnet = dict_to_objdict({
'specification': {
'cidr_block': '10.21.0.0/24'
}
})
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_security_group(subnet, 'component', 'my-test-vpc', 1)
assert actual.specification.name == 'prefix-testcluster-component-security-group-1'
assert actual.specification.vpc_name == 'my-test-vpc'
assert actual.specification.cidr_block == '10.21.0.0/24'
def test_get_route_table_association_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster')
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_route_table_association('route-table-name','component', 'test-subnet', 1)
assert actual.specification.name == 'prefix-testcluster-component-1-route-association'
assert actual.specification.subnet_name == 'test-subnet'
assert actual.specification.route_table_name == 'route-table-name'
def test_get_internet_gateway_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster')
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_internet_gateway('test-vpc-name')
assert actual.specification.name == 'prefix-testcluster-internet-gateway'
assert actual.specification.vpc_name == 'test-vpc-name'
def test_get_routing_table_should_set_proper_values_to_model():
cluster_model = get_cluster_model(cluster_name='TestCluster')
builder = InfrastructureBuilder([cluster_model])
actual = builder.get_routing_table('test-vpc-name', 'test-internet-gateway')
assert actual.specification.name == 'prefix-testcluster-route-table'
assert actual.specification.vpc_name == 'test-vpc-name'
assert actual.specification.route.gateway_name == 'test-internet-gateway'
def get_cluster_model(address_pool='10.22.0.0/22', cluster_name='EpiphanyTestCluster'):
cluster_model = dict_to_objdict({
'kind': 'epiphany-cluster',
'provider': 'aws',
'specification': {
'name': cluster_name,
'prefix': 'prefix',
'cloud': {
'vnet_address_pool': address_pool,
'network': {
'use_network_security_groups': True
},
'default_os_image': 'default',
'use_public_ips': True
}
}
})
return cluster_model
|
[
"cli.src.helpers.objdict_helpers.dict_to_objdict",
"cli.src.providers.aws.InfrastructureBuilder.InfrastructureBuilder"
] |
[((313, 351), 'cli.src.providers.aws.InfrastructureBuilder.InfrastructureBuilder', 'InfrastructureBuilder', (['[cluster_model]'], {}), '([cluster_model])\n', (334, 351), False, 'from cli.src.providers.aws.InfrastructureBuilder import InfrastructureBuilder\n'), ((694, 732), 'cli.src.providers.aws.InfrastructureBuilder.InfrastructureBuilder', 'InfrastructureBuilder', (['[cluster_model]'], {}), '([cluster_model])\n', (715, 732), False, 'from cli.src.providers.aws.InfrastructureBuilder import InfrastructureBuilder\n'), ((1095, 1165), 'cli.src.helpers.objdict_helpers.dict_to_objdict', 'dict_to_objdict', (["{'specification': {'name': 'prefix-testcluster-vpc'}}"], {}), "({'specification': {'name': 'prefix-testcluster-vpc'}})\n", (1110, 1165), False, 'from cli.src.helpers.objdict_helpers import dict_to_objdict\n'), ((1216, 1254), 'cli.src.providers.aws.InfrastructureBuilder.InfrastructureBuilder', 'InfrastructureBuilder', (['[cluster_model]'], {}), '([cluster_model])\n', (1237, 1254), False, 'from cli.src.providers.aws.InfrastructureBuilder import InfrastructureBuilder\n'), ((1565, 1603), 'cli.src.providers.aws.InfrastructureBuilder.InfrastructureBuilder', 'InfrastructureBuilder', (['[cluster_model]'], {}), '([cluster_model])\n', (1586, 1603), False, 'from cli.src.providers.aws.InfrastructureBuilder import InfrastructureBuilder\n'), ((1932, 2018), 'cli.src.helpers.objdict_helpers.dict_to_objdict', 'dict_to_objdict', (["{'address_pool': '10.20.0.0/24', 'availability_zone': 'eu-westa'}"], {}), "({'address_pool': '10.20.0.0/24', 'availability_zone':\n 'eu-westa'})\n", (1947, 2018), False, 'from cli.src.helpers.objdict_helpers import dict_to_objdict\n'), ((2051, 2089), 'cli.src.providers.aws.InfrastructureBuilder.InfrastructureBuilder', 'InfrastructureBuilder', (['[cluster_model]'], {}), '([cluster_model])\n', (2072, 2089), False, 'from cli.src.providers.aws.InfrastructureBuilder import InfrastructureBuilder\n'), ((2582, 2648), 'cli.src.helpers.objdict_helpers.dict_to_objdict', 'dict_to_objdict', (["{'specification': {'cidr_block': '10.21.0.0/24'}}"], {}), "({'specification': {'cidr_block': '10.21.0.0/24'}})\n", (2597, 2648), False, 'from cli.src.helpers.objdict_helpers import dict_to_objdict\n'), ((2699, 2737), 'cli.src.providers.aws.InfrastructureBuilder.InfrastructureBuilder', 'InfrastructureBuilder', (['[cluster_model]'], {}), '([cluster_model])\n', (2720, 2737), False, 'from cli.src.providers.aws.InfrastructureBuilder import InfrastructureBuilder\n'), ((3182, 3220), 'cli.src.providers.aws.InfrastructureBuilder.InfrastructureBuilder', 'InfrastructureBuilder', (['[cluster_model]'], {}), '([cluster_model])\n', (3203, 3220), False, 'from cli.src.providers.aws.InfrastructureBuilder import InfrastructureBuilder\n'), ((3696, 3734), 'cli.src.providers.aws.InfrastructureBuilder.InfrastructureBuilder', 'InfrastructureBuilder', (['[cluster_model]'], {}), '([cluster_model])\n', (3717, 3734), False, 'from cli.src.providers.aws.InfrastructureBuilder import InfrastructureBuilder\n'), ((4081, 4119), 'cli.src.providers.aws.InfrastructureBuilder.InfrastructureBuilder', 'InfrastructureBuilder', (['[cluster_model]'], {}), '([cluster_model])\n', (4102, 4119), False, 'from cli.src.providers.aws.InfrastructureBuilder import InfrastructureBuilder\n'), ((4524, 4818), 'cli.src.helpers.objdict_helpers.dict_to_objdict', 'dict_to_objdict', (["{'kind': 'epiphany-cluster', 'provider': 'aws', 'specification': {'name':\n cluster_name, 'prefix': 'prefix', 'cloud': {'vnet_address_pool':\n address_pool, 'network': {'use_network_security_groups': True},\n 'default_os_image': 'default', 'use_public_ips': True}}}"], {}), "({'kind': 'epiphany-cluster', 'provider': 'aws',\n 'specification': {'name': cluster_name, 'prefix': 'prefix', 'cloud': {\n 'vnet_address_pool': address_pool, 'network': {\n 'use_network_security_groups': True}, 'default_os_image': 'default',\n 'use_public_ips': True}}})\n", (4539, 4818), False, 'from cli.src.helpers.objdict_helpers import dict_to_objdict\n')]
|
import random,math
def distribution(decay,buckets):
"Return random numbers, sum noamrlzes 0..1"
tmp=[random.random()]
for _ in range(buckets-1):
old=tmp[-1];
tmp += [old*decay]
s=sum(tmp)
return sorted([x/s for x in tmp])
def run(n=1000,decay=0.99,dimensions=10, buckets = 10):
ds=[distribution(decay,buckets) for _ in range(dimensions)]
print()
[print(d) for d in ds]
print()
nums=[math.prod(random.choice(d) for d in ds) for _ in range(n)]
return sorted(nums,reverse=True)
for x in run(decay=.9,dimensions=6,buckets=10,n=1000): print(x)
|
[
"random.random",
"random.choice"
] |
[((106, 121), 'random.random', 'random.random', ([], {}), '()\n', (119, 121), False, 'import random, math\n'), ((427, 443), 'random.choice', 'random.choice', (['d'], {}), '(d)\n', (440, 443), False, 'import random, math\n')]
|
from ngubot.utils.base import BaseGame
import pyautogui as pag
import time
game = BaseGame()
def clean(game):
itemsToMerge = [
"Head",
"Chest",
"Legs",
"Boots",
"Weapon",
"Accessory1",
"0_0",
"0_1",
"0_2",
"0_3",
"0_4",
"0_5",
"0_6",
"0_7",
]
itemsToBoost = [
"Head",
"Chest",
"Legs",
"Boots",
"Weapon",
"Accessory1",
"0_0",
"0_1",
"0_2",
"0_6",
"3_9",
]
path = True
for item in itemsToMerge:
game.move(item, path)
pag.press("d")
path = False
for item in itemsToBoost:
game.move(item, path)
pag.press("a")
game.move("InfCube", False)
pag.click(
game._shift(game.coords["Inventory"]["InfCube"]["Button"]), button="right"
)
while True:
game.click("IdleAttackPlus", True)
clean(game)
game.click("Nuke", True)
game.click("Right Arrow", True)
for _ in range(4):
game.click("Right Arrow", False)
game.click("ShoulderPlus", True)
game.click("TackPlus", True)
time.sleep(300)
# 330, 5.5
game._focus_window()
clean(game)
game.click("ShoulderPlus", True)
game.click("TackPlus", True)
time.sleep(300)
# 750, 12.5
game._focus_window()
clean(game)
game.click("Nuke", True)
game.click("Left Arrow", True)
for _ in range(6):
game.click("Left Arrow", False)
for _ in range(6):
game.click("Right Arrow", False)
pag.press("r")
game.click("TMEPlus", True)
game.click("TackPlus", True)
time.sleep(300)
# 1100, 18.3
game._focus_window()
clean(game)
time.sleep(300)
# 1420, 23.6
game._focus_window()
clean(game)
game.click("Nuke", True)
game.click("Left Arrow", True)
for _ in range(6):
game.click("Left Arrow", False)
for _ in range(6):
game.click("Right Arrow", False)
pag.press("r")
game.click("ShoulderPlus", True)
game.click("TackPlus", True)
time.sleep(300)
# 1750, 29.1 (adv training unlocked)
game._focus_window()
clean(game)
pag.press("r")
game.click("Adv. Training", False)
game.click("PowerPlus", False)
pag.press("t")
game.click("CutsPlus")
time.sleep(240)
# ~ 2000, 33.3
game._focus_window()
clean(game)
# 0, 0
game.click("Feed Me", True)
game.click("Really", True)
game.click("RebirthYeah", False)
time.sleep(1)
|
[
"pyautogui.press",
"ngubot.utils.base.BaseGame",
"time.sleep"
] |
[((83, 93), 'ngubot.utils.base.BaseGame', 'BaseGame', ([], {}), '()\n', (91, 93), False, 'from ngubot.utils.base import BaseGame\n'), ((1189, 1204), 'time.sleep', 'time.sleep', (['(300)'], {}), '(300)\n', (1199, 1204), False, 'import time\n'), ((1337, 1352), 'time.sleep', 'time.sleep', (['(300)'], {}), '(300)\n', (1347, 1352), False, 'import time\n'), ((1608, 1622), 'pyautogui.press', 'pag.press', (['"""r"""'], {}), "('r')\n", (1617, 1622), True, 'import pyautogui as pag\n'), ((1693, 1708), 'time.sleep', 'time.sleep', (['(300)'], {}), '(300)\n', (1703, 1708), False, 'import time\n'), ((1773, 1788), 'time.sleep', 'time.sleep', (['(300)'], {}), '(300)\n', (1783, 1788), False, 'import time\n'), ((2045, 2059), 'pyautogui.press', 'pag.press', (['"""r"""'], {}), "('r')\n", (2054, 2059), True, 'import pyautogui as pag\n'), ((2135, 2150), 'time.sleep', 'time.sleep', (['(300)'], {}), '(300)\n', (2145, 2150), False, 'import time\n'), ((2239, 2253), 'pyautogui.press', 'pag.press', (['"""r"""'], {}), "('r')\n", (2248, 2253), True, 'import pyautogui as pag\n'), ((2332, 2346), 'pyautogui.press', 'pag.press', (['"""t"""'], {}), "('t')\n", (2341, 2346), True, 'import pyautogui as pag\n'), ((2379, 2394), 'time.sleep', 'time.sleep', (['(240)'], {}), '(240)\n', (2389, 2394), False, 'import time\n'), ((2572, 2585), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2582, 2585), False, 'import time\n'), ((658, 672), 'pyautogui.press', 'pag.press', (['"""d"""'], {}), "('d')\n", (667, 672), True, 'import pyautogui as pag\n'), ((762, 776), 'pyautogui.press', 'pag.press', (['"""a"""'], {}), "('a')\n", (771, 776), True, 'import pyautogui as pag\n')]
|
#!/usr/bin/env python
import print_environment
import sys
print_environment.execute()
sys.exit(1)
|
[
"sys.exit",
"print_environment.execute"
] |
[((60, 87), 'print_environment.execute', 'print_environment.execute', ([], {}), '()\n', (85, 87), False, 'import print_environment\n'), ((88, 99), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (96, 99), False, 'import sys\n')]
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package sampling_train
# Module caffe2.python.layers.sampling_train
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer, get_layer_class
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
class SamplingTrain(ModelLayer):
def __init__(
self,
model,
input_record,
prediction_layer,
output_dims,
subtract_log_odd=True,
name='sampling_train',
**kwargs
):
super(SamplingTrain, self).__init__(
model, name, input_record, **kwargs
)
layer_class = get_layer_class(prediction_layer)
assert issubclass(layer_class, SamplingTrainableMixin)
assert 'indices' in input_record
assert isinstance(input_record.indices, schema.Scalar),\
"input_record.indices is expected to be a schema.Scalar"
assert 'input' in input_record
self.subtract_log_odd = subtract_log_odd
if self.subtract_log_odd:
assert 'sampling_prob' in input_record
self._prediction_layer = layer_class(
model,
input_record.input,
output_dims=output_dims,
**kwargs
)
self._prediction_layer.train_param_blobs = [
model.net.NextBlob(str(blob) + '_sampled')
for blob in self._prediction_layer.param_blobs
]
self.params = self._prediction_layer.params
self.output_schema = self._prediction_layer.output_schema
def add_ops(self, net):
self._prediction_layer.add_ops(net)
def add_train_ops(self, net):
for full_blob, sampled_blob in zip(
self._prediction_layer.param_blobs,
self._prediction_layer.train_param_blobs
):
net.Gather([full_blob, self.input_record.indices()], sampled_blob)
self._prediction_layer.add_train_ops(net)
if not self.subtract_log_odd:
return
log_q = net.Log(self.input_record.sampling_prob(),
net.NextScopedBlob("log_q"))
net.Sub([self.output_schema(), log_q], self.output_schema(),
broadcast=1, use_grad_hack=1)
|
[
"caffe2.python.layers.layers.get_layer_class"
] |
[((1438, 1471), 'caffe2.python.layers.layers.get_layer_class', 'get_layer_class', (['prediction_layer'], {}), '(prediction_layer)\n', (1453, 1471), False, 'from caffe2.python.layers.layers import ModelLayer, get_layer_class\n')]
|
from datetime import timedelta
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.crypto import get_random_string
from invitations.adapters import get_invitations_adapter
from invitations.base_invitation import AbstractBaseInvitation
from apps.home.models import Classes
class Assignment(models.Model):
key_class = models.ForeignKey(
Classes, on_delete=models.CASCADE, related_name="assignments"
)
title = models.TextField(max_length=400, default="Title")
questions = models.FileField(blank=True, upload_to="questions/")
created_at = models.DateTimeField(auto_now_add=True)
ends_at = models.DateTimeField(blank=True, null=True)
def is_active(self):
if self.ends_at is None:
if self.created_at < (timezone.now() - timedelta(days=120)):
return False
return True
if self.ends_at > timezone.now():
return True
return False
def __str__(self):
return f"{self.key_class.class_name} - {self.title}"
class Upload(models.Model):
assignment = models.ForeignKey(
Assignment, on_delete=models.CASCADE, related_name="upload"
)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="uploads")
file = models.FileField(upload_to="uploads", null=False)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return f"{self.author.username} - {self.assignment}"
class ClassInvitation(AbstractBaseInvitation):
email = models.EmailField(unique=False, verbose_name='email_address',
max_length=420)
created = models.DateTimeField(verbose_name='created',
default=timezone.now)
invited_class = models.ForeignKey(Classes, on_delete=models.CASCADE, related_name='invitees')
@classmethod
def create(cls, email, inviter=None, **kwargs):
key = get_random_string(64).lower()
instance = cls._default_manager.create(
email=email,
invited_class=kwargs.pop('invited_class', None),
key=key,
inviter=inviter,
**kwargs)
return instance
def key_expired(self):
expiration_date = (
self.sent + timedelta(days=3))
return expiration_date <= timezone.now()
def send_invitation(self, request, **kwargs):
invite_url = reverse('AcceptInvite',
args=[self.key])
ctx = kwargs
ctx.update({
'invite_url': request.build_absolute_uri(invite_url),
'register_url': request.build_absolute_uri(reverse("register")),
'site_name': self.invited_class.class_name,
'email': self.email,
'key': self.key,
'inviter': self.inviter,
})
email_template = 'emails/email_invite'
get_invitations_adapter().send_mail(
email_template,
self.email,
ctx)
self.sent = timezone.now()
self.save()
def __str__(self):
return "Invite: {0}".format(self.email)
|
[
"django.db.models.FileField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.utils.timezone.now",
"django.db.models.EmailField",
"django.urls.reverse",
"datetime.timedelta",
"invitations.adapters.get_invitations_adapter",
"django.db.models.DateTimeField",
"django.utils.crypto.get_random_string"
] |
[((474, 559), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Classes'], {'on_delete': 'models.CASCADE', 'related_name': '"""assignments"""'}), "(Classes, on_delete=models.CASCADE, related_name='assignments'\n )\n", (491, 559), False, 'from django.db import models\n'), ((581, 630), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(400)', 'default': '"""Title"""'}), "(max_length=400, default='Title')\n", (597, 630), False, 'from django.db import models\n'), ((647, 699), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'upload_to': '"""questions/"""'}), "(blank=True, upload_to='questions/')\n", (663, 699), False, 'from django.db import models\n'), ((717, 756), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (737, 756), False, 'from django.db import models\n'), ((771, 814), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (791, 814), False, 'from django.db import models\n'), ((1219, 1297), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Assignment'], {'on_delete': 'models.CASCADE', 'related_name': '"""upload"""'}), "(Assignment, on_delete=models.CASCADE, related_name='upload')\n", (1236, 1297), False, 'from django.db import models\n'), ((1325, 1398), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'related_name': '"""uploads"""'}), "(User, on_delete=models.CASCADE, related_name='uploads')\n", (1342, 1398), False, 'from django.db import models\n'), ((1410, 1459), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""uploads"""', 'null': '(False)'}), "(upload_to='uploads', null=False)\n", (1426, 1459), False, 'from django.db import models\n'), ((1477, 1516), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1497, 1516), False, 'from django.db import models\n'), ((1663, 1740), 'django.db.models.EmailField', 'models.EmailField', ([], {'unique': '(False)', 'verbose_name': '"""email_address"""', 'max_length': '(420)'}), "(unique=False, verbose_name='email_address', max_length=420)\n", (1680, 1740), False, 'from django.db import models\n'), ((1785, 1851), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'verbose_name': '"""created"""', 'default': 'timezone.now'}), "(verbose_name='created', default=timezone.now)\n", (1805, 1851), False, 'from django.db import models\n'), ((1907, 1984), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Classes'], {'on_delete': 'models.CASCADE', 'related_name': '"""invitees"""'}), "(Classes, on_delete=models.CASCADE, related_name='invitees')\n", (1924, 1984), False, 'from django.db import models\n'), ((2553, 2593), 'django.urls.reverse', 'reverse', (['"""AcceptInvite"""'], {'args': '[self.key]'}), "('AcceptInvite', args=[self.key])\n", (2560, 2593), False, 'from django.urls import reverse\n'), ((3157, 3171), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (3169, 3171), False, 'from django.utils import timezone\n'), ((1026, 1040), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1038, 1040), False, 'from django.utils import timezone\n'), ((2413, 2430), 'datetime.timedelta', 'timedelta', ([], {'days': '(3)'}), '(days=3)\n', (2422, 2430), False, 'from datetime import timedelta\n'), ((2466, 2480), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2478, 2480), False, 'from django.utils import timezone\n'), ((2069, 2090), 'django.utils.crypto.get_random_string', 'get_random_string', (['(64)'], {}), '(64)\n', (2086, 2090), False, 'from django.utils.crypto import get_random_string\n'), ((3031, 3056), 'invitations.adapters.get_invitations_adapter', 'get_invitations_adapter', ([], {}), '()\n', (3054, 3056), False, 'from invitations.adapters import get_invitations_adapter\n'), ((908, 922), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (920, 922), False, 'from django.utils import timezone\n'), ((925, 944), 'datetime.timedelta', 'timedelta', ([], {'days': '(120)'}), '(days=120)\n', (934, 944), False, 'from datetime import timedelta\n'), ((2786, 2805), 'django.urls.reverse', 'reverse', (['"""register"""'], {}), "('register')\n", (2793, 2805), False, 'from django.urls import reverse\n')]
|
# this is the mysql service used to communicate with the backend
import mysql.connector
from datetime import datetime
from flask import jsonify
import json
# connector method for the spothole db
def connect():
return mysql.connector.connect(
host="localhost",
user="",
passwd="",
database=""
)
# post new claim
def post_claim_data(data):
db = connect()
cursor = db.cursor()
sql = "INSERT INTO __claims__ (claim_id, imageURL, severity, userId, status, make, model, vehicle_year, phone_no, created_date) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
val = (data["claimId"], str(data["imageUrls"]), data["severity"], data["userId"], data["status"], data["make"], data["model"], data["year"], data["phoneNo"], datetime.utcnow())
cursor.execute(sql, val)
db.commit()
return str(cursor.rowcount) + " record inserted."
# get claims for a user
def get_user_claims_data(data):
db = connect()
cursor = db.cursor()
sql = "SELECT * FROM __claims__ WHERE userId = %s ORDER BY last_updated DESC"
userId = (data["userId"], )
cursor.execute(sql, userId)
results = cursor.fetchall()
payload = []
content = {}
for result in results:
content = {'claimId': result[0], 'imageUrl': result[1], 'severity': result[2], 'userId': result[3], 'status': result[4], 'created_date': result[5], 'last_updated': result[6], 'make': result[7], 'model': result[8], 'vehicle_year': result[9], 'phone': result[10]}
payload.append(content)
content = {}
return jsonify(payload)
# update profile data
def post_user_profile_data(data):
print(data)
db = connect()
cursor = db.cursor()
sql = "INSERT INTO __users__ (user_id, email_id, name, photo_url) VALUES (%s, %s, %s, %s) ON DUPLICATE KEY UPDATE name=%s, photo_url=%s"
val = (data["userId"], data["emailId"], data["name"], data["photoURL"], data["name"], data["photoURL"])
cursor.execute(sql, val)
db.commit()
return str(cursor.rowcount) + " records affected."
|
[
"datetime.datetime.utcnow",
"flask.jsonify"
] |
[((1552, 1568), 'flask.jsonify', 'jsonify', (['payload'], {}), '(payload)\n', (1559, 1568), False, 'from flask import jsonify\n'), ((759, 776), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (774, 776), False, 'from datetime import datetime\n')]
|
from basic_tests import BasicTest
from models.model_operations import scenario_operations
from models.model_operations import topic_operations
from models.model_operations import user_operations
from models.model_operations import vision_operations
from models.model import db
import unittest
class VisionTest(BasicTest):
"""Test case for visions."""
def setUp(self):
db.create_all()
self.topic = topic_operations.create_topic("test", "test")
self.scenario_1 = scenario_operations.create_scenario(
"t1", "d1", "i1", self.topic.id)
self.scenario_2 = scenario_operations.create_scenario(
"t2", "d2", "i2", self.topic.id)
self.mood = vision_operations.create_mood("happy")
self.user_1 = user_operations.create_user("user1")
self.user_2 = user_operations.create_user("user2")
def test_get_all_moods(self):
moods = vision_operations.get_all_moods()
assert len(moods) == 1
assert moods[0].name == "happy"
def test_create_vision(self):
medias = [
{
"url": "http://url_to_image.com",
"description": "description",
"type": "VIDEO"
},
{
"url": "http://url_to_image.com",
"description": "description",
"type": "GIF"
},
{
"url": "http://url_to_image.com",
"description": "description",
"unsplash_image_id":"uid",
"unsplash_creator_name":"name",
"unsplash_creator_url":"url",
"type": "IMAGE"
},
{
"description": "description",
"type": "TEXT"
}
]
mood_id = self.mood.id
user_id = self.user_1.id
scenario_id = self.scenario_1.id
vision = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id)
assert vision in db.session
for m in vision.medias:
assert m in db.session
def test_get_vision_by_id(self):
medias = [
{
"url": "http://url_to_image.com",
"description": "description",
"type": "VIDEO"
},
{
"url": "http://url_to_image.com",
"description": "description",
"type": "GIF"
},
{
"url": "http://url_to_image.com",
"description": "description",
"unsplash_image_id":"uid",
"unsplash_creator_name":"name",
"unsplash_creator_url":"url",
"type": "IMAGE"
},
{
"description": "description",
"type": "TEXT"
}
]
mood_id = self.mood.id
user_id = self.user_1.id
scenario_id = self.scenario_1.id
vision = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id)
retrieved_vision = vision_operations.get_vision_by_id(vision.id)
assert retrieved_vision.mood_id == mood_id and retrieved_vision.user_id == user_id and retrieved_vision.scenario_id == scenario_id
assert vision.medias == retrieved_vision.medias
def test_get_vision_by_user_id(self):
medias = [
{
"url": "http://url_to_image.com",
"description": "description",
"type": "VIDEO"
},
{
"url": "http://url_to_image.com",
"description": "description",
"type": "GIF"
},
{
"url": "http://url_to_image.com",
"description": "description",
"unsplash_image_id":"uid",
"unsplash_creator_name":"name",
"unsplash_creator_url":"url",
"type": "IMAGE"
},
{
"description": "description",
"type": "TEXT"
}
]
mood_id = self.mood.id
user_id = self.user_1.id
scenario_id = self.scenario_1.id
vision_1 = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id)
vision_2 = vision_operations.create_vision(
mood_id=mood_id, medias=medias[:-1], user_id=user_id, scenario_id=scenario_id)
user_id_2 = self.user_2.id
vision_3 = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id_2, scenario_id=scenario_id)
retrieved_visions = vision_operations.get_visions_by_user(user_id, paginate=False, order=None)
assert len(retrieved_visions) == 2
c1 = retrieved_visions[0].medias == vision_1.medias
c2 = retrieved_visions[0].mood_id == vision_1.mood_id
c3 = retrieved_visions[0].scenario_id == vision_1.scenario_id
assert c1 and c2 and c3
c4 = retrieved_visions[1].medias == vision_2.medias
c5 = retrieved_visions[1].mood_id == vision_2.mood_id
c6 = retrieved_visions[1].scenario_id == vision_2.scenario_id
assert c4 and c5 and c6
def test_get_visions_by_scenario(self):
medias = [
{
"url": "http://url_to_image.com",
"description": "description",
"type": "VIDEO"
},
{
"url": "http://url_to_image.com",
"description": "description",
"type": "GIF"
},
{
"url": "http://url_to_image.com",
"description": "description",
"unsplash_image_id":"uid",
"unsplash_creator_name":"name",
"unsplash_creator_url":"url",
"type": "IMAGE"
},
{
"description": "description",
"type": "TEXT"
}
]
mood_id = self.mood.id
user_id = self.user_1.id
scenario_id = self.scenario_1.id
vision_1 = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id)
vision_2 = vision_operations.create_vision(
mood_id=mood_id, medias=medias[:-1], user_id=user_id, scenario_id=scenario_id)
scenario_id_2 = self.scenario_2.id
vision_3 = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id_2)
retrieved_visions = vision_operations.get_visions_by_scenario(user_id, paginate=False, order=None)
assert len(retrieved_visions) == 2
c1 = retrieved_visions[0].medias == vision_1.medias
c2 = retrieved_visions[0].mood_id == vision_1.mood_id
c3 = retrieved_visions[0].scenario_id == vision_1.scenario_id
assert c1 and c2 and c3
c4 = retrieved_visions[1].medias == vision_2.medias
c5 = retrieved_visions[1].mood_id == vision_2.mood_id
c6 = retrieved_visions[1].scenario_id == vision_2.scenario_id
assert c4 and c5 and c6
def test_update_vision(self):
medias = [
{
"url": "http://url_to_image.com",
"description": "description",
"type": "VIDEO"
},
{
"url": "http://url_to_image.com",
"description": "description",
"type": "GIF"
},
{
"url": "http://url_to_image.com",
"description": "description",
"unsplash_image_id":"uid",
"unsplash_creator_name":"name",
"unsplash_creator_url":"url",
"type": "IMAGE"
},
{
"description": "description",
"type": "TEXT"
}
]
mood_id = self.mood.id
user_id = self.user_1.id
scenario_id = self.scenario_1.id
vision = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id)
new_mood = vision_operations.create_mood("sad")
vision_operations.update_vision(vision.id, mood_id=new_mood.id)
retrieved_vision = vision_operations.get_vision_by_id(vision.id)
assert retrieved_vision.mood_id == new_mood.id
old_medias = vision.medias
new_medias = [
{
"url": "http://url_to_image.com_q",
"description": "description_q",
"unsplash_image_id":"uid_q",
"unsplash_creator_name":"name_q",
"unsplash_creator_url":"url_q",
"type": "IMAGE"
}
]
vision_operations.update_vision(vision.id, medias=new_medias)
retrieved_vision = vision_operations.get_vision_by_id(vision.id)
assert len(retrieved_vision.medias) == 1 and retrieved_vision.medias[0].url == new_medias[0]["url"]
assert retrieved_vision.medias[0].description == new_medias[0]["description"]
assert retrieved_vision.medias[0].media_type.name == new_medias[0]["type"]
for m in old_medias:
assert m not in db.session
def test_remove_vision(self):
medias = [
{
"url": "http://url_to_image.com",
"description": "description",
"type": "VIDEO"
},
{
"url": "http://url_to_image.com",
"description": "description",
"type": "GIF"
},
{
"url": "http://url_to_image.com",
"description": "description",
"unsplash_image_id":"uid",
"unsplash_creator_name":"name",
"unsplash_creator_url":"url",
"type": "IMAGE"
},
{
"description": "description",
"type": "TEXT"
}
]
mood_id = self.mood.id
user_id = self.user_1.id
scenario_id = self.scenario_1.id
vision = vision_operations.create_vision(
mood_id=mood_id, medias=medias, user_id=user_id, scenario_id=scenario_id)
assert vision in db.session
vision_operations.remove_vision(vision.id)
assert vision not in db.session
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"models.model_operations.vision_operations.get_visions_by_scenario",
"models.model_operations.vision_operations.remove_vision",
"models.model_operations.scenario_operations.create_scenario",
"models.model.db.create_all",
"models.model_operations.vision_operations.create_vision",
"models.model_operations.vision_operations.get_visions_by_user",
"models.model_operations.vision_operations.update_vision",
"models.model_operations.vision_operations.create_mood",
"models.model_operations.user_operations.create_user",
"models.model_operations.vision_operations.get_all_moods",
"models.model_operations.vision_operations.get_vision_by_id",
"models.model_operations.topic_operations.create_topic"
] |
[((10663, 10678), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10676, 10678), False, 'import unittest\n'), ((386, 401), 'models.model.db.create_all', 'db.create_all', ([], {}), '()\n', (399, 401), False, 'from models.model import db\n'), ((424, 469), 'models.model_operations.topic_operations.create_topic', 'topic_operations.create_topic', (['"""test"""', '"""test"""'], {}), "('test', 'test')\n", (453, 469), False, 'from models.model_operations import topic_operations\n'), ((496, 564), 'models.model_operations.scenario_operations.create_scenario', 'scenario_operations.create_scenario', (['"""t1"""', '"""d1"""', '"""i1"""', 'self.topic.id'], {}), "('t1', 'd1', 'i1', self.topic.id)\n", (531, 564), False, 'from models.model_operations import scenario_operations\n'), ((604, 672), 'models.model_operations.scenario_operations.create_scenario', 'scenario_operations.create_scenario', (['"""t2"""', '"""d2"""', '"""i2"""', 'self.topic.id'], {}), "('t2', 'd2', 'i2', self.topic.id)\n", (639, 672), False, 'from models.model_operations import scenario_operations\n'), ((707, 745), 'models.model_operations.vision_operations.create_mood', 'vision_operations.create_mood', (['"""happy"""'], {}), "('happy')\n", (736, 745), False, 'from models.model_operations import vision_operations\n'), ((768, 804), 'models.model_operations.user_operations.create_user', 'user_operations.create_user', (['"""user1"""'], {}), "('user1')\n", (795, 804), False, 'from models.model_operations import user_operations\n'), ((827, 863), 'models.model_operations.user_operations.create_user', 'user_operations.create_user', (['"""user2"""'], {}), "('user2')\n", (854, 863), False, 'from models.model_operations import user_operations\n'), ((915, 948), 'models.model_operations.vision_operations.get_all_moods', 'vision_operations.get_all_moods', ([], {}), '()\n', (946, 948), False, 'from models.model_operations import vision_operations\n'), ((1920, 2030), 'models.model_operations.vision_operations.create_vision', 'vision_operations.create_vision', ([], {'mood_id': 'mood_id', 'medias': 'medias', 'user_id': 'user_id', 'scenario_id': 'scenario_id'}), '(mood_id=mood_id, medias=medias, user_id=\n user_id, scenario_id=scenario_id)\n', (1951, 2030), False, 'from models.model_operations import vision_operations\n'), ((3046, 3156), 'models.model_operations.vision_operations.create_vision', 'vision_operations.create_vision', ([], {'mood_id': 'mood_id', 'medias': 'medias', 'user_id': 'user_id', 'scenario_id': 'scenario_id'}), '(mood_id=mood_id, medias=medias, user_id=\n user_id, scenario_id=scenario_id)\n', (3077, 3156), False, 'from models.model_operations import vision_operations\n'), ((3193, 3238), 'models.model_operations.vision_operations.get_vision_by_id', 'vision_operations.get_vision_by_id', (['vision.id'], {}), '(vision.id)\n', (3227, 3238), False, 'from models.model_operations import vision_operations\n'), ((4344, 4454), 'models.model_operations.vision_operations.create_vision', 'vision_operations.create_vision', ([], {'mood_id': 'mood_id', 'medias': 'medias', 'user_id': 'user_id', 'scenario_id': 'scenario_id'}), '(mood_id=mood_id, medias=medias, user_id=\n user_id, scenario_id=scenario_id)\n', (4375, 4454), False, 'from models.model_operations import vision_operations\n'), ((4482, 4596), 'models.model_operations.vision_operations.create_vision', 'vision_operations.create_vision', ([], {'mood_id': 'mood_id', 'medias': 'medias[:-1]', 'user_id': 'user_id', 'scenario_id': 'scenario_id'}), '(mood_id=mood_id, medias=medias[:-1],\n user_id=user_id, scenario_id=scenario_id)\n', (4513, 4596), False, 'from models.model_operations import vision_operations\n'), ((4662, 4774), 'models.model_operations.vision_operations.create_vision', 'vision_operations.create_vision', ([], {'mood_id': 'mood_id', 'medias': 'medias', 'user_id': 'user_id_2', 'scenario_id': 'scenario_id'}), '(mood_id=mood_id, medias=medias, user_id=\n user_id_2, scenario_id=scenario_id)\n', (4693, 4774), False, 'from models.model_operations import vision_operations\n'), ((4812, 4886), 'models.model_operations.vision_operations.get_visions_by_user', 'vision_operations.get_visions_by_user', (['user_id'], {'paginate': '(False)', 'order': 'None'}), '(user_id, paginate=False, order=None)\n', (4849, 4886), False, 'from models.model_operations import vision_operations\n'), ((6292, 6402), 'models.model_operations.vision_operations.create_vision', 'vision_operations.create_vision', ([], {'mood_id': 'mood_id', 'medias': 'medias', 'user_id': 'user_id', 'scenario_id': 'scenario_id'}), '(mood_id=mood_id, medias=medias, user_id=\n user_id, scenario_id=scenario_id)\n', (6323, 6402), False, 'from models.model_operations import vision_operations\n'), ((6430, 6544), 'models.model_operations.vision_operations.create_vision', 'vision_operations.create_vision', ([], {'mood_id': 'mood_id', 'medias': 'medias[:-1]', 'user_id': 'user_id', 'scenario_id': 'scenario_id'}), '(mood_id=mood_id, medias=medias[:-1],\n user_id=user_id, scenario_id=scenario_id)\n', (6461, 6544), False, 'from models.model_operations import vision_operations\n'), ((6618, 6730), 'models.model_operations.vision_operations.create_vision', 'vision_operations.create_vision', ([], {'mood_id': 'mood_id', 'medias': 'medias', 'user_id': 'user_id', 'scenario_id': 'scenario_id_2'}), '(mood_id=mood_id, medias=medias, user_id=\n user_id, scenario_id=scenario_id_2)\n', (6649, 6730), False, 'from models.model_operations import vision_operations\n'), ((6768, 6846), 'models.model_operations.vision_operations.get_visions_by_scenario', 'vision_operations.get_visions_by_scenario', (['user_id'], {'paginate': '(False)', 'order': 'None'}), '(user_id, paginate=False, order=None)\n', (6809, 6846), False, 'from models.model_operations import vision_operations\n'), ((8240, 8350), 'models.model_operations.vision_operations.create_vision', 'vision_operations.create_vision', ([], {'mood_id': 'mood_id', 'medias': 'medias', 'user_id': 'user_id', 'scenario_id': 'scenario_id'}), '(mood_id=mood_id, medias=medias, user_id=\n user_id, scenario_id=scenario_id)\n', (8271, 8350), False, 'from models.model_operations import vision_operations\n'), ((8379, 8415), 'models.model_operations.vision_operations.create_mood', 'vision_operations.create_mood', (['"""sad"""'], {}), "('sad')\n", (8408, 8415), False, 'from models.model_operations import vision_operations\n'), ((8425, 8488), 'models.model_operations.vision_operations.update_vision', 'vision_operations.update_vision', (['vision.id'], {'mood_id': 'new_mood.id'}), '(vision.id, mood_id=new_mood.id)\n', (8456, 8488), False, 'from models.model_operations import vision_operations\n'), ((8516, 8561), 'models.model_operations.vision_operations.get_vision_by_id', 'vision_operations.get_vision_by_id', (['vision.id'], {}), '(vision.id)\n', (8550, 8561), False, 'from models.model_operations import vision_operations\n'), ((9000, 9061), 'models.model_operations.vision_operations.update_vision', 'vision_operations.update_vision', (['vision.id'], {'medias': 'new_medias'}), '(vision.id, medias=new_medias)\n', (9031, 9061), False, 'from models.model_operations import vision_operations\n'), ((9089, 9134), 'models.model_operations.vision_operations.get_vision_by_id', 'vision_operations.get_vision_by_id', (['vision.id'], {}), '(vision.id)\n', (9123, 9134), False, 'from models.model_operations import vision_operations\n'), ((10381, 10491), 'models.model_operations.vision_operations.create_vision', 'vision_operations.create_vision', ([], {'mood_id': 'mood_id', 'medias': 'medias', 'user_id': 'user_id', 'scenario_id': 'scenario_id'}), '(mood_id=mood_id, medias=medias, user_id=\n user_id, scenario_id=scenario_id)\n', (10412, 10491), False, 'from models.model_operations import vision_operations\n'), ((10546, 10588), 'models.model_operations.vision_operations.remove_vision', 'vision_operations.remove_vision', (['vision.id'], {}), '(vision.id)\n', (10577, 10588), False, 'from models.model_operations import vision_operations\n')]
|
# import the necessary packages
from imutils import paths
import argparse
import cv2
import os
def variance_of_laplacian(image):
# compute the Laplacian of the image and then return the focus
# measure -- the variance of the Laplacian
return cv2.Laplacian(image, cv2.CV_64F).var()
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--images", required=True,
help="path to input directory of images")
ap.add_argument("-t", "--threshold", type=float, default=130.0,
help="focus measures that fall below this value will be considered 'blurry'")
ap.add_argument("-d", "--delete", type=bool, default="false",
help="whether to delete 'blurry' images or not")
args = vars(ap.parse_args())
# loop over the input images
for imagePath in paths.list_images(args["images"]):
# load the image, convert it to grayscale, and compute the
# focus measure of the image using the Variance of Laplacian
# method
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fm = variance_of_laplacian(gray)
text = "Not Blurry"
# if the focus measure is less than the supplied threshold,
# then the image should be considered "Blurry"
if fm < args["threshold"]:
text = "Blurry"
# print out the Focus Measure and
# result -- 'Blurry'/'Not Blurry'
print(imagePath)
print('focus measure', fm)
print('result', text)
# show the image
cv2.putText(image, "{}: {:.2f}".format(text, fm), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 3)
# cv2.imshow("Image", image)
cv2.imwrite("blur_test_result/"+imagePath.split("/",1)[1] , image)
key = cv2.waitKey(0)
# based on whether the 'delete' flag is set
# delete the Blurry image
if(args["delete"] == "true") :
if(text == "Blurry"):
try:
os.remove(imagePath)
except: pass
|
[
"imutils.paths.list_images",
"os.remove",
"argparse.ArgumentParser",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imread",
"cv2.Laplacian"
] |
[((346, 371), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (369, 371), False, 'import argparse\n'), ((795, 828), 'imutils.paths.list_images', 'paths.list_images', (["args['images']"], {}), "(args['images'])\n", (812, 828), False, 'from imutils import paths\n'), ((983, 1004), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (993, 1004), False, 'import cv2\n'), ((1016, 1055), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1028, 1055), False, 'import cv2\n'), ((1706, 1720), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1717, 1720), False, 'import cv2\n'), ((246, 278), 'cv2.Laplacian', 'cv2.Laplacian', (['image', 'cv2.CV_64F'], {}), '(image, cv2.CV_64F)\n', (259, 278), False, 'import cv2\n'), ((1900, 1920), 'os.remove', 'os.remove', (['imagePath'], {}), '(imagePath)\n', (1909, 1920), False, 'import os\n')]
|
import pandas as pd
import argparse
import os
import mdtraj
import numpy as np
parser = argparse.ArgumentParser(description='Script to generate trajectories containing only top scoring frames as scored by RWPlus. These top scoring trajectories can then be averaged with Gromacs to produce an averaged structure.')
parser.add_argument('-p','--path',help='Path to directory containing all refinement trajectories and RWPlus score files.',required=True,dest='path')
parser.add_argument('--percent',help='Percent of top scoring structures to average over. Default: 15,5,40,1',nargs='*',default=[15,5,40,1],type=int)
args = parser.parse_args()
dir_path = args.path
percent = args.percent
all_trajs = dict()
rw_df = pd.DataFrame(columns = ['traj_idx','frame_idx','score'])
for file in os.listdir(dir_path):
if file.endswith('.dcd') and file.startswith('refinement_'):
print(f'Reading {file}')
traj_idx = int(file[file.rfind('_')+1:file.rfind('.')])
curr_traj = mdtraj.load(os.path.join(dir_path,file),top=os.path.join(dir_path,f'minimized_{traj_idx}.pdb'))
curr_traj.remove_solvent(inplace=True)
all_trajs[traj_idx] = curr_traj
elif file.endswith('.txt') and file.startswith('scorelist_'):
print(f'Reading {file}')
traj_idx = int(file[file.rfind('_')+1:file.rfind('.')])
with open(os.path.join(dir_path,file),'r') as f:
scores = f.readlines()
scores = np.array(scores,dtype=float)
num_frames = len(scores)
df = pd.DataFrame(list(zip([traj_idx]*num_frames,np.arange(num_frames),scores)),columns=['traj_idx','frame_idx','score'])
rw_df = rw_df.append(df)
rw_df.sort_values(by=['score'],inplace=True)
num_frames = len(rw_df)
for perc in percent:
num_top = round(perc*.01*num_frames)
print(perc)
print(num_top)
best_frames = rw_df.head(num_top)
for idx in all_trajs.keys():
traj_best = best_frames[best_frames['traj_idx'] == idx]
try:
newtraj = newtraj.join([all_trajs[idx][list(traj_best['frame_idx'])]])
except:
newtraj = all_trajs[idx][list(traj_best['frame_idx'])]
print(len(newtraj))
print(f'Saving top {perc}% of frames to top_{perc}_percent.xtc')
newtraj.save(os.path.join(dir_path,f'top_{perc}_percent.xtc'),force_overwrite=True)
del newtraj
|
[
"pandas.DataFrame",
"argparse.ArgumentParser",
"numpy.array",
"numpy.arange",
"os.path.join",
"os.listdir"
] |
[((90, 325), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script to generate trajectories containing only top scoring frames as scored by RWPlus. These top scoring trajectories can then be averaged with Gromacs to produce an averaged structure."""'}), "(description=\n 'Script to generate trajectories containing only top scoring frames as scored by RWPlus. These top scoring trajectories can then be averaged with Gromacs to produce an averaged structure.'\n )\n", (113, 325), False, 'import argparse\n'), ((714, 770), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['traj_idx', 'frame_idx', 'score']"}), "(columns=['traj_idx', 'frame_idx', 'score'])\n", (726, 770), True, 'import pandas as pd\n'), ((783, 803), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (793, 803), False, 'import os\n'), ((2263, 2312), 'os.path.join', 'os.path.join', (['dir_path', 'f"""top_{perc}_percent.xtc"""'], {}), "(dir_path, f'top_{perc}_percent.xtc')\n", (2275, 2312), False, 'import os\n'), ((999, 1027), 'os.path.join', 'os.path.join', (['dir_path', 'file'], {}), '(dir_path, file)\n', (1011, 1027), False, 'import os\n'), ((1442, 1471), 'numpy.array', 'np.array', (['scores'], {'dtype': 'float'}), '(scores, dtype=float)\n', (1450, 1471), True, 'import numpy as np\n'), ((1031, 1082), 'os.path.join', 'os.path.join', (['dir_path', 'f"""minimized_{traj_idx}.pdb"""'], {}), "(dir_path, f'minimized_{traj_idx}.pdb')\n", (1043, 1082), False, 'import os\n'), ((1351, 1379), 'os.path.join', 'os.path.join', (['dir_path', 'file'], {}), '(dir_path, file)\n', (1363, 1379), False, 'import os\n'), ((1561, 1582), 'numpy.arange', 'np.arange', (['num_frames'], {}), '(num_frames)\n', (1570, 1582), True, 'import numpy as np\n')]
|
import asyncio
import time
from async_reduce import async_reduceable
@async_reduceable()
async def fetch(url):
print('- fetch page: ', url)
await asyncio.sleep(1)
return time.time()
async def amain():
coros = [
fetch('/page') for _ in range(10)
]
print('-- Simultaneous run')
done, pending = await asyncio.wait(coros)
print('Results:')
for f in done:
print(
await f
)
def main():
asyncio.run(amain())
if __name__ == '__main__':
main()
|
[
"async_reduce.async_reduceable",
"asyncio.wait",
"asyncio.sleep",
"time.time"
] |
[((73, 91), 'async_reduce.async_reduceable', 'async_reduceable', ([], {}), '()\n', (89, 91), False, 'from async_reduce import async_reduceable\n'), ((185, 196), 'time.time', 'time.time', ([], {}), '()\n', (194, 196), False, 'import time\n'), ((157, 173), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (170, 173), False, 'import asyncio\n'), ((340, 359), 'asyncio.wait', 'asyncio.wait', (['coros'], {}), '(coros)\n', (352, 359), False, 'import asyncio\n')]
|
"""Helpers to generate ulids."""
from random import getrandbits
import time
def ulid_hex() -> str:
"""Generate a ULID in lowercase hex that will work for a UUID.
This ulid should not be used for cryptographically secure
operations.
This string can be converted with https://github.com/ahawker/ulid
ulid.from_uuid(uuid.UUID(ulid_hex))
"""
return f"{int(time.time()*1000):012x}{getrandbits(80):020x}"
def ulid() -> str:
"""Generate a ULID.
This ulid should not be used for cryptographically secure
operations.
01AN4Z07BY 79KA1307SR9X4MV3
|----------| |----------------|
Timestamp Randomness
48bits 80bits
This string can be loaded directly with https://github.com/ahawker/ulid
import homeassistant.util.ulid as ulid_util
import ulid
ulid.parse(ulid_util.ulid())
"""
ulid_bytes = int(time.time() * 1000).to_bytes(6, byteorder="big") + int(
getrandbits(80)
).to_bytes(10, byteorder="big")
# This is base32 crockford encoding with the loop unrolled for performance
#
# This code is adapted from:
# https://github.com/ahawker/ulid/blob/06289583e9de4286b4d80b4ad000d137816502ca/ulid/base32.py#L102
#
enc = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
return (
enc[(ulid_bytes[0] & 224) >> 5]
+ enc[ulid_bytes[0] & 31]
+ enc[(ulid_bytes[1] & 248) >> 3]
+ enc[((ulid_bytes[1] & 7) << 2) | ((ulid_bytes[2] & 192) >> 6)]
+ enc[((ulid_bytes[2] & 62) >> 1)]
+ enc[((ulid_bytes[2] & 1) << 4) | ((ulid_bytes[3] & 240) >> 4)]
+ enc[((ulid_bytes[3] & 15) << 1) | ((ulid_bytes[4] & 128) >> 7)]
+ enc[(ulid_bytes[4] & 124) >> 2]
+ enc[((ulid_bytes[4] & 3) << 3) | ((ulid_bytes[5] & 224) >> 5)]
+ enc[ulid_bytes[5] & 31]
+ enc[(ulid_bytes[6] & 248) >> 3]
+ enc[((ulid_bytes[6] & 7) << 2) | ((ulid_bytes[7] & 192) >> 6)]
+ enc[(ulid_bytes[7] & 62) >> 1]
+ enc[((ulid_bytes[7] & 1) << 4) | ((ulid_bytes[8] & 240) >> 4)]
+ enc[((ulid_bytes[8] & 15) << 1) | ((ulid_bytes[9] & 128) >> 7)]
+ enc[(ulid_bytes[9] & 124) >> 2]
+ enc[((ulid_bytes[9] & 3) << 3) | ((ulid_bytes[10] & 224) >> 5)]
+ enc[ulid_bytes[10] & 31]
+ enc[(ulid_bytes[11] & 248) >> 3]
+ enc[((ulid_bytes[11] & 7) << 2) | ((ulid_bytes[12] & 192) >> 6)]
+ enc[(ulid_bytes[12] & 62) >> 1]
+ enc[((ulid_bytes[12] & 1) << 4) | ((ulid_bytes[13] & 240) >> 4)]
+ enc[((ulid_bytes[13] & 15) << 1) | ((ulid_bytes[14] & 128) >> 7)]
+ enc[(ulid_bytes[14] & 124) >> 2]
+ enc[((ulid_bytes[14] & 3) << 3) | ((ulid_bytes[15] & 224) >> 5)]
+ enc[ulid_bytes[15] & 31]
)
|
[
"time.time",
"random.getrandbits"
] |
[((410, 425), 'random.getrandbits', 'getrandbits', (['(80)'], {}), '(80)\n', (421, 425), False, 'from random import getrandbits\n'), ((386, 397), 'time.time', 'time.time', ([], {}), '()\n', (395, 397), False, 'import time\n'), ((971, 986), 'random.getrandbits', 'getrandbits', (['(80)'], {}), '(80)\n', (982, 986), False, 'from random import getrandbits\n'), ((907, 918), 'time.time', 'time.time', ([], {}), '()\n', (916, 918), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import matplotlib.pyplot as plt
import os
import plot_tools
import settings
from pythonapi import anno_tools
def plt_print_text(*args):
print('plot_tools.print_text', *args[:-1])
with plt.style.context({
'pdf.fonttype': 42,
}):
plot_tools.print_text(*args)
plt_print_text.concurrent = False
print_text = plt_print_text
def main():
with open(settings.DATA_LIST) as f:
data_list = json.load(f)
lines = []
with open(settings.TRAIN) as f:
lines += f.read().splitlines()
with open(settings.VAL) as f:
lines += f.read().splitlines()
with open(settings.TEST_DETECTION_GT) as f:
lines += f.read().splitlines()
def gt2array(gt):
color = '#0f0'
a = list()
for char in anno_tools.each_char(gt):
if char['is_chinese']:
a.append({'polygon': char['polygon'], 'text': char['text'], 'color': color, 'fontsize': 10})
for char in gt['ignore']:
a.append({'polygon': char['polygon'], 'text': '', 'color': '#ff0', 'fontsize': 10})
return a
selected = [
('0000507', 0, 0, 2048, 2048),
('1023899', 0, 0, 2048, 2048),
('1031755', 0, 0, 2048, 2048),
('1044721', 0, 0, 2048, 2048),
('1046905', 0, 0, 2048, 2048),
('2000215', 0, 0, 2048, 2048),
('2004154', 0, 0, 2048, 2048),
('2005679', 0, 0, 2048, 2048),
('2024003', 0, 0, 2048, 2048),
('3005669', 0, 0, 2048, 2048),
('3029319', 0, 0, 2048, 2048),
('3040629', 0, 0, 2048, 2048),
('3001838', 0, 650, 700, 550),
('1041797', 530, 740, 700, 550),
]
if not os.path.isdir(settings.PRINTTEXT_DRAWING_DIR):
os.makedirs(settings.PRINTTEXT_DRAWING_DIR)
tasks = []
for image_id, x, y, w, h in selected:
i = [o['image_id'] for o in data_list['train'] + data_list['val'] + data_list['test_det']].index(image_id)
gt = json.loads(lines[i])
crop = (x, y, w, h)
file_name = os.path.join(settings.TRAINVAL_IMAGE_DIR if i < len(data_list['train'] + data_list['val']) else settings.TEST_IMAGE_DIR, gt['file_name'])
output = os.path.join(settings.PRINTTEXT_DRAWING_DIR, 'gt_{}_{}_{}_{}_{}.pdf'.format(image_id, x, y, w, h))
print_text(file_name, output, {
'boxes': gt2array(gt),
'crop': crop,
})
if __name__ == '__main__':
main()
|
[
"json.load",
"os.makedirs",
"json.loads",
"os.path.isdir",
"pythonapi.anno_tools.each_char",
"matplotlib.pyplot.style.context",
"plot_tools.print_text"
] |
[((382, 421), 'matplotlib.pyplot.style.context', 'plt.style.context', (["{'pdf.fonttype': 42}"], {}), "({'pdf.fonttype': 42})\n", (399, 421), True, 'import matplotlib.pyplot as plt\n'), ((446, 474), 'plot_tools.print_text', 'plot_tools.print_text', (['*args'], {}), '(*args)\n', (467, 474), False, 'import plot_tools\n'), ((612, 624), 'json.load', 'json.load', (['f'], {}), '(f)\n', (621, 624), False, 'import json\n'), ((960, 984), 'pythonapi.anno_tools.each_char', 'anno_tools.each_char', (['gt'], {}), '(gt)\n', (980, 984), False, 'from pythonapi import anno_tools\n'), ((1861, 1906), 'os.path.isdir', 'os.path.isdir', (['settings.PRINTTEXT_DRAWING_DIR'], {}), '(settings.PRINTTEXT_DRAWING_DIR)\n', (1874, 1906), False, 'import os\n'), ((1916, 1959), 'os.makedirs', 'os.makedirs', (['settings.PRINTTEXT_DRAWING_DIR'], {}), '(settings.PRINTTEXT_DRAWING_DIR)\n', (1927, 1959), False, 'import os\n'), ((2145, 2165), 'json.loads', 'json.loads', (['lines[i]'], {}), '(lines[i])\n', (2155, 2165), False, 'import json\n')]
|
import django
from django.http import HttpResponse
import random
def rand_string(min, max):
"""Returns a randomly-generated string, of a random length.
Args:
min (int): Minimum string length to return, inclusive
max (int): Maximum string length to return, inclusive
"""
int_gen = random.randint
string_length = int_gen(min, max)
return ''.join([chr(int_gen(ord(' '), ord('~')))
for __ in range(string_length)])
BODY = rand_string(10240, 10240).encode('utf-8') # NOQA
HEADERS = {'X-Test': 'Funky Chicken'}
_body = BODY
_headers = HEADERS
def hello(request, account_id):
user_agent = request.META['HTTP_USER_AGENT'] # NOQA
limit = request.GET.get('limit', '10') # NOQA
response = HttpResponse(_body)
for name, value in _headers.items():
response[name] = value
return response
|
[
"django.http.HttpResponse"
] |
[((762, 781), 'django.http.HttpResponse', 'HttpResponse', (['_body'], {}), '(_body)\n', (774, 781), False, 'from django.http import HttpResponse\n')]
|
'''Recommendations Module'''
from time import time
from urllib.parse import quote, urlencode
import requests as r
from constructor_io.helpers.exception import ConstructorException
from constructor_io.helpers.utils import (clean_params, create_auth_header,
create_request_headers,
create_shared_query_params,
throw_http_exception_from_response)
def _create_recommendations_url(pod_id, parameters, user_parameters, options):
'''Create URL from supplied parameters'''
query_params = create_shared_query_params(options, parameters, user_parameters)
if not pod_id or not isinstance(pod_id, str):
raise ConstructorException('pod_id is a required parameter of type string')
if parameters:
if parameters.get('num_results'):
query_params['num_results'] = parameters.get('num_results')
if parameters.get('item_ids'):
query_params['item_id'] = parameters.get('item_ids')
if parameters.get('term'):
query_params['term'] = parameters.get('term')
query_params['_dt'] = int(time()*1000.0)
query_params = clean_params(query_params)
query_string = urlencode(query_params, doseq=True)
return f'{options.get("service_url")}/recommendations/v1/pods/{quote(pod_id)}?{query_string}'
class Recommendations:
'''Recommendations Class'''
def __init__(self, options):
self.__options = options or {}
def get_recommendation_results(self, pod_id, parameters=None, user_parameters=None):
'''
Retrieve recommendation results from API
:param str pod_id: Recommendation pod identifier
:param dict parameters: Additional parameters to refine result set
:param int parameters.num_results: The total number of results to return
:param str|list parameters.item_ids: Item ID(s) to retrieve recommendations for (strategy specific)
:param str term: The term to use to refine results (strategy specific)
:param dict parameters.filters: Filters used to refine recommendation results (strategy specific)
:param str parameters.section: The section to return results from
:param dict user_parameters: Parameters relevant to the user request
:param int user_parameters.session_id: Session ID, utilized to personalize results
:param str user_parameters.client_id: Client ID, utilized to personalize results
:param str user_parameters.user_id: User ID, utilized to personalize results
:param str user_parameters.segments: User segments
:param dict user_parameters.test_cells: User test cells
:param str user_parameters.user_ip: Origin user IP, from client
:param str user_parameters.user_agent: Origin user agent, from client
:return: dict
''' # pylint: disable=line-too-long
if not parameters:
parameters = {}
if not user_parameters:
user_parameters = {}
request_url = _create_recommendations_url(pod_id, parameters, user_parameters, self.__options)
requests = self.__options.get('requests') or r
response = requests.get(
request_url,
auth=create_auth_header(self.__options),
headers=create_request_headers(self.__options, user_parameters)
)
print(response)
if not response.ok:
throw_http_exception_from_response(response)
json = response.json()
json_response = json.get('response')
if json_response:
if json_response.get('results') or json_response.get('results') == []:
result_id = json.get('result_id')
if result_id:
for result in json_response.get('results'):
result['result_id'] = result_id
return json
raise ConstructorException('get_recommendation_results response data is malformed')
|
[
"constructor_io.helpers.utils.clean_params",
"urllib.parse.urlencode",
"constructor_io.helpers.utils.throw_http_exception_from_response",
"time.time",
"urllib.parse.quote",
"constructor_io.helpers.utils.create_auth_header",
"constructor_io.helpers.utils.create_shared_query_params",
"constructor_io.helpers.utils.create_request_headers",
"constructor_io.helpers.exception.ConstructorException"
] |
[((620, 684), 'constructor_io.helpers.utils.create_shared_query_params', 'create_shared_query_params', (['options', 'parameters', 'user_parameters'], {}), '(options, parameters, user_parameters)\n', (646, 684), False, 'from constructor_io.helpers.utils import clean_params, create_auth_header, create_request_headers, create_shared_query_params, throw_http_exception_from_response\n'), ((1218, 1244), 'constructor_io.helpers.utils.clean_params', 'clean_params', (['query_params'], {}), '(query_params)\n', (1230, 1244), False, 'from constructor_io.helpers.utils import clean_params, create_auth_header, create_request_headers, create_shared_query_params, throw_http_exception_from_response\n'), ((1264, 1299), 'urllib.parse.urlencode', 'urlencode', (['query_params'], {'doseq': '(True)'}), '(query_params, doseq=True)\n', (1273, 1299), False, 'from urllib.parse import quote, urlencode\n'), ((750, 819), 'constructor_io.helpers.exception.ConstructorException', 'ConstructorException', (['"""pod_id is a required parameter of type string"""'], {}), "('pod_id is a required parameter of type string')\n", (770, 819), False, 'from constructor_io.helpers.exception import ConstructorException\n'), ((3960, 4037), 'constructor_io.helpers.exception.ConstructorException', 'ConstructorException', (['"""get_recommendation_results response data is malformed"""'], {}), "('get_recommendation_results response data is malformed')\n", (3980, 4037), False, 'from constructor_io.helpers.exception import ConstructorException\n'), ((1184, 1190), 'time.time', 'time', ([], {}), '()\n', (1188, 1190), False, 'from time import time\n'), ((1368, 1381), 'urllib.parse.quote', 'quote', (['pod_id'], {}), '(pod_id)\n', (1373, 1381), False, 'from urllib.parse import quote, urlencode\n'), ((3487, 3531), 'constructor_io.helpers.utils.throw_http_exception_from_response', 'throw_http_exception_from_response', (['response'], {}), '(response)\n', (3521, 3531), False, 'from constructor_io.helpers.utils import clean_params, create_auth_header, create_request_headers, create_shared_query_params, throw_http_exception_from_response\n'), ((3300, 3334), 'constructor_io.helpers.utils.create_auth_header', 'create_auth_header', (['self.__options'], {}), '(self.__options)\n', (3318, 3334), False, 'from constructor_io.helpers.utils import clean_params, create_auth_header, create_request_headers, create_shared_query_params, throw_http_exception_from_response\n'), ((3356, 3411), 'constructor_io.helpers.utils.create_request_headers', 'create_request_headers', (['self.__options', 'user_parameters'], {}), '(self.__options, user_parameters)\n', (3378, 3411), False, 'from constructor_io.helpers.utils import clean_params, create_auth_header, create_request_headers, create_shared_query_params, throw_http_exception_from_response\n')]
|
import json
from builtins import NotImplemented
from pprint import pprint
import aiohttp
import asyncio
import async_timeout
from aiohttp.client import _RequestContextManager
from logging import getLogger
from exceptions.SmhiExceptions import SmhiConnectionException
BASE_URL = 'https://opendata-download-metfcst.smhi.se/api/category/pmp3g/version/2/geotype/point/lon/{}/lat/{}/data.json'
logger = getLogger()
@asyncio.coroutine
def get_forecast(longitude: str, latitude: str) -> dict:
with aiohttp.ClientSession() as session:
url = BASE_URL.format(longitude, latitude)
print(url)
with async_timeout.timeout(10):
try:
response = yield from session.get(url)
text = (yield from response.json())
except Exception as e:
text = yield from response.text()
logger.exception("Could not fetch data from SMHI.")
raise SmhiConnectionException("Could not fetch data from SMHI.")
finally:
yield from response.release()
pprint(text)
def __check_response_for_error(response: aiohttp.ClientResponse):
if 200 <= response.status < 300:
raise Exception
def __format_response(response: json) -> json:
NotImplemented()
loop = asyncio.get_event_loop()
loop.run_until_complete(get_forecast("18.176879", "59.237234"))
|
[
"asyncio.get_event_loop",
"builtins.NotImplemented",
"aiohttp.ClientSession",
"async_timeout.timeout",
"pprint.pprint",
"exceptions.SmhiExceptions.SmhiConnectionException",
"logging.getLogger"
] |
[((402, 413), 'logging.getLogger', 'getLogger', ([], {}), '()\n', (411, 413), False, 'from logging import getLogger\n'), ((1305, 1329), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1327, 1329), False, 'import asyncio\n'), ((1279, 1295), 'builtins.NotImplemented', 'NotImplemented', ([], {}), '()\n', (1293, 1295), False, 'from builtins import NotImplemented\n'), ((501, 524), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (522, 524), False, 'import aiohttp\n'), ((620, 645), 'async_timeout.timeout', 'async_timeout.timeout', (['(10)'], {}), '(10)\n', (641, 645), False, 'import async_timeout\n'), ((1084, 1096), 'pprint.pprint', 'pprint', (['text'], {}), '(text)\n', (1090, 1096), False, 'from pprint import pprint\n'), ((946, 1004), 'exceptions.SmhiExceptions.SmhiConnectionException', 'SmhiConnectionException', (['"""Could not fetch data from SMHI."""'], {}), "('Could not fetch data from SMHI.')\n", (969, 1004), False, 'from exceptions.SmhiExceptions import SmhiConnectionException\n')]
|
import unittest
from pieces.bishop import Bishop
from board import Board
class TestSum(unittest.TestCase):
def test_no_movement(self):
board = Board()
bishop1 = Bishop("W", 7, 0, board)
bishop2 = Bishop("W", 6, 1, board)
board.add_piece(bishop1, 7, 0)
board.add_piece(bishop2, 6, 1)
self.assertEqual(bishop1.generate_legal_moves(), [], "Should be empty array")
def test_capture(self):
board = Board()
bishop1 = Bishop("W", 7, 0, board)
bishop2 = Bishop("B", 6, 1, board)
board.add_piece(bishop1, 7, 0)
board.add_piece(bishop2, 6, 1)
self.assertEqual(bishop1.generate_legal_moves(), [(6, 1)], "Should be [(6, 1)]")
def test_capture_and_move(self):
board = Board()
bishop1 = Bishop("W", 6, 1, board)
bishop2 = Bishop("B", 7, 0, board)
bishop3 = Bishop("W", 5, 2, board)
bishop4 = Bishop("W", 5, 0, board)
bishop5 = Bishop("W", 7, 2, board)
board.add_piece(bishop1, 6, 1)
board.add_piece(bishop2, 7, 0)
board.add_piece(bishop3, 5, 2)
board.add_piece(bishop4, 5, 0)
board.add_piece(bishop5, 7, 2)
self.assertEqual(bishop1.generate_legal_moves(), [(7, 0)], "Should be [(7, 0)]")
def test_full_diagonal(self):
board = Board()
bishop1 = Bishop("W", 7, 7, board)
board.add_piece(bishop1, 7, 7)
self.assertEqual(bishop1.generate_legal_moves(),
[(6, 6), (5, 5), (4, 4), (3, 3), (2, 2), (1, 1,), (0, 0)],
"Should be [(6, 6), (5, 5), (4, 4), (3, 3), (2, 2), (1, 1,), (0, 0)]")
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"pieces.bishop.Bishop",
"board.Board"
] |
[((1676, 1691), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1689, 1691), False, 'import unittest\n'), ((157, 164), 'board.Board', 'Board', ([], {}), '()\n', (162, 164), False, 'from board import Board\n'), ((184, 208), 'pieces.bishop.Bishop', 'Bishop', (['"""W"""', '(7)', '(0)', 'board'], {}), "('W', 7, 0, board)\n", (190, 208), False, 'from pieces.bishop import Bishop\n'), ((227, 251), 'pieces.bishop.Bishop', 'Bishop', (['"""W"""', '(6)', '(1)', 'board'], {}), "('W', 6, 1, board)\n", (233, 251), False, 'from pieces.bishop import Bishop\n'), ((463, 470), 'board.Board', 'Board', ([], {}), '()\n', (468, 470), False, 'from board import Board\n'), ((490, 514), 'pieces.bishop.Bishop', 'Bishop', (['"""W"""', '(7)', '(0)', 'board'], {}), "('W', 7, 0, board)\n", (496, 514), False, 'from pieces.bishop import Bishop\n'), ((533, 557), 'pieces.bishop.Bishop', 'Bishop', (['"""B"""', '(6)', '(1)', 'board'], {}), "('B', 6, 1, board)\n", (539, 557), False, 'from pieces.bishop import Bishop\n'), ((785, 792), 'board.Board', 'Board', ([], {}), '()\n', (790, 792), False, 'from board import Board\n'), ((812, 836), 'pieces.bishop.Bishop', 'Bishop', (['"""W"""', '(6)', '(1)', 'board'], {}), "('W', 6, 1, board)\n", (818, 836), False, 'from pieces.bishop import Bishop\n'), ((855, 879), 'pieces.bishop.Bishop', 'Bishop', (['"""B"""', '(7)', '(0)', 'board'], {}), "('B', 7, 0, board)\n", (861, 879), False, 'from pieces.bishop import Bishop\n'), ((898, 922), 'pieces.bishop.Bishop', 'Bishop', (['"""W"""', '(5)', '(2)', 'board'], {}), "('W', 5, 2, board)\n", (904, 922), False, 'from pieces.bishop import Bishop\n'), ((941, 965), 'pieces.bishop.Bishop', 'Bishop', (['"""W"""', '(5)', '(0)', 'board'], {}), "('W', 5, 0, board)\n", (947, 965), False, 'from pieces.bishop import Bishop\n'), ((984, 1008), 'pieces.bishop.Bishop', 'Bishop', (['"""W"""', '(7)', '(2)', 'board'], {}), "('W', 7, 2, board)\n", (990, 1008), False, 'from pieces.bishop import Bishop\n'), ((1346, 1353), 'board.Board', 'Board', ([], {}), '()\n', (1351, 1353), False, 'from board import Board\n'), ((1373, 1397), 'pieces.bishop.Bishop', 'Bishop', (['"""W"""', '(7)', '(7)', 'board'], {}), "('W', 7, 7, board)\n", (1379, 1397), False, 'from pieces.bishop import Bishop\n')]
|
#!/usr/local/bin/python3.4
# encoding: utf-8
'''
pyverse.bin.build -- builds DB objects into your database.
pyverse.bin.build Use this command to build the various code generate objects in your DB, like Stored Procedures, Functions, Views and Triggers
@author: <NAME>
@copyright: 2014 open source. All rights reserved.
@license: license
@contact: <EMAIL>
@deffield updated: Updated
'''
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/..')
from app import parser as parser
import app.commands
import traceback
def main(parser):
'''Command line options.'''
try:
parser.add_argument("-s","--stored_proc", dest="stored_proc", action="store",nargs='?', default=False, const='All', \
help="build all stored procedures, or the folder/*.sql specified. Root folder is the database name.")
parser.add_argument("-w","--views", dest="views", action="store",nargs='?', default=False, const='All', \
help="build all views, or the folder/*.sql specified. Root folder is the database name.")
parser.add_argument("-t","--triggers", dest="triggers", action="store",nargs='?', default=False, const='All', \
help="build all triggers, or the folder/*.sql specified. Root folder is the database name.")
parser.add_argument("-f","--functions", dest="functions", action="store",nargs='?', default=False, const='All', \
help="build all functions, or the folder/*.sql config specified. Root folder is the database name.")
parser.add_argument("-c","--scripts", dest="scripts", action="store",nargs='?', default=False, const='All', \
help="run all scripts, or the folder/*.sql specified. Root folder is the database name.")
args = app.init(parser)
Builder = app.commands.BuildDBObj(args)
Builder.run()
# TODO if not run from another tool, I should let the exception be thrown so I can see proper error logs
except Exception:
traceback.print_exc()
return 1
#++++++++++++++++++++++++++++++++++++ MAIN ENTRY POINT ++++++++++++++++++++++++++++++++++
sys.exit(main(parser))
|
[
"app.parser.add_argument",
"os.path.realpath",
"traceback.print_exc"
] |
[((639, 873), 'app.parser.add_argument', 'parser.add_argument', (['"""-s"""', '"""--stored_proc"""'], {'dest': '"""stored_proc"""', 'action': '"""store"""', 'nargs': '"""?"""', 'default': '(False)', 'const': '"""All"""', 'help': '"""build all stored procedures, or the folder/*.sql specified. Root folder is the database name."""'}), "('-s', '--stored_proc', dest='stored_proc', action=\n 'store', nargs='?', default=False, const='All', help=\n 'build all stored procedures, or the folder/*.sql specified. Root folder is the database name.'\n )\n", (658, 873), True, 'from app import parser as parser\n'), ((912, 1122), 'app.parser.add_argument', 'parser.add_argument', (['"""-w"""', '"""--views"""'], {'dest': '"""views"""', 'action': '"""store"""', 'nargs': '"""?"""', 'default': '(False)', 'const': '"""All"""', 'help': '"""build all views, or the folder/*.sql specified. Root folder is the database name."""'}), "('-w', '--views', dest='views', action='store', nargs=\n '?', default=False, const='All', help=\n 'build all views, or the folder/*.sql specified. Root folder is the database name.'\n )\n", (931, 1122), True, 'from app import parser as parser\n'), ((1173, 1391), 'app.parser.add_argument', 'parser.add_argument', (['"""-t"""', '"""--triggers"""'], {'dest': '"""triggers"""', 'action': '"""store"""', 'nargs': '"""?"""', 'default': '(False)', 'const': '"""All"""', 'help': '"""build all triggers, or the folder/*.sql specified. Root folder is the database name."""'}), "('-t', '--triggers', dest='triggers', action='store',\n nargs='?', default=False, const='All', help=\n 'build all triggers, or the folder/*.sql specified. Root folder is the database name.'\n )\n", (1192, 1391), True, 'from app import parser as parser\n'), ((1437, 1665), 'app.parser.add_argument', 'parser.add_argument', (['"""-f"""', '"""--functions"""'], {'dest': '"""functions"""', 'action': '"""store"""', 'nargs': '"""?"""', 'default': '(False)', 'const': '"""All"""', 'help': '"""build all functions, or the folder/*.sql config specified. Root folder is the database name."""'}), "('-f', '--functions', dest='functions', action='store',\n nargs='?', default=False, const='All', help=\n 'build all functions, or the folder/*.sql config specified. Root folder is the database name.'\n )\n", (1456, 1665), True, 'from app import parser as parser\n'), ((1709, 1922), 'app.parser.add_argument', 'parser.add_argument', (['"""-c"""', '"""--scripts"""'], {'dest': '"""scripts"""', 'action': '"""store"""', 'nargs': '"""?"""', 'default': '(False)', 'const': '"""All"""', 'help': '"""run all scripts, or the folder/*.sql specified. Root folder is the database name."""'}), "('-c', '--scripts', dest='scripts', action='store',\n nargs='?', default=False, const='All', help=\n 'run all scripts, or the folder/*.sql specified. Root folder is the database name.'\n )\n", (1728, 1922), True, 'from app import parser as parser\n'), ((463, 489), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (479, 489), False, 'import os\n'), ((2205, 2226), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2224, 2226), False, 'import traceback\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-09-06 21:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calls', '0006_auto_20180906_0430'),
]
operations = [
migrations.AlterField(
model_name='callcampaign',
name='status',
field=models.IntegerField(choices=[(1, 'New'), (10, 'Approved'), (20, 'In Progress'), (30, 'Paused'), (40, 'Complete'), (50, 'Declined'), (60, 'Suspended')], default=1),
),
]
|
[
"django.db.models.IntegerField"
] |
[((406, 576), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(1, 'New'), (10, 'Approved'), (20, 'In Progress'), (30, 'Paused'), (40,\n 'Complete'), (50, 'Declined'), (60, 'Suspended')]", 'default': '(1)'}), "(choices=[(1, 'New'), (10, 'Approved'), (20,\n 'In Progress'), (30, 'Paused'), (40, 'Complete'), (50, 'Declined'), (60,\n 'Suspended')], default=1)\n", (425, 576), False, 'from django.db import migrations, models\n')]
|
# vim: filetype=python ts=2 sw=2 sts=2 et :
# (c) 2021, <NAME> (<EMAIL>) unlicense.org
"""Standard library functions."""
import re
import sys
import copy
class obj:
"Simple base class with pretty print"
def __init__(i, **d): i.__dict__.update(d)
def __repr__(i) : return i.__class__.__name__+"{" + ', '.join(
[f":{k} {v}" for k, v in sorted(i.__dict__.items()) if k[0] != "_"]) + "}"
def clone(i): return obj(**copy.deepcopy(i.__dict__))
def cli(**d):
"If command line has :key val, and 'key' is in d, then d[key]=val"
i=-1
while i<len(sys.argv)-1:
i, key, now = i+1, sys.argv[i][1:], coerce(sys.argv[i+1])
if key in d:
i += 1
if type(now) == type(d[key]): d[key] = now
return d
def csv(src=None):
"""Iterator. returns lines from files or standard input or a string,
return an iterator for the lines."""
def lines(src):
for line in src:
line = re.sub(r'([\n\t\r ]|#.*)', '', line)
if line:
line = line.split(",")
line = [coerce(x) for x in line]
yield line
if src and src[-4:] == ".csv":
with open(src) as fp:
for out in lines(fp): yield out
else:
src = src.split("\n") if src else sys.stdin
for out in lines(src):
yield out
def coerce(string):
"""When appropriate, coerce `string` into some type.
Supports floats, ints, booleans and strings."""
if string == "True": return True
if string == "False": return False
try: return int(string)
except Exception:
try: return float(string)
except Exception: return string
def rs(l,r=0):
"Round a list to `r` decimal places."
return [(f"{x:.{r}f}" if isinstance(x,(int,float)) else str(x))
for x in l]
|
[
"copy.deepcopy",
"re.sub"
] |
[((907, 945), 're.sub', 're.sub', (['"""([\\\\n\\\\t\\\\r ]|#.*)"""', '""""""', 'line'], {}), "('([\\\\n\\\\t\\\\r ]|#.*)', '', line)\n", (913, 945), False, 'import re\n'), ((427, 452), 'copy.deepcopy', 'copy.deepcopy', (['i.__dict__'], {}), '(i.__dict__)\n', (440, 452), False, 'import copy\n')]
|
import unittest
import torch.utils.data
from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario
from nuplan.planning.simulation.trajectory.trajectory_sampling import TrajectorySampling
from nuplan.planning.training.data_loader.scenario_dataset import ScenarioDataset
from nuplan.planning.training.preprocessing.feature_builders.raster_feature_builder import RasterFeatureBuilder
from nuplan.planning.training.preprocessing.feature_builders.vector_map_feature_builder import VectorMapFeatureBuilder
from nuplan.planning.training.preprocessing.feature_collate import FeatureCollate
from nuplan.planning.training.preprocessing.feature_preprocessor import FeaturePreprocessor
from nuplan.planning.training.preprocessing.features.vector_map import VectorMap
from nuplan.planning.training.preprocessing.target_builders.ego_trajectory_target_builder import (
EgoTrajectoryTargetBuilder,
)
NUM_BATCHES = 20
class TestCollateDataLoader(unittest.TestCase):
"""
Tests data loading functionality
"""
def setUp(self) -> None:
"""Set up the test case."""
self.batch_size = 4
feature_preprocessor = FeaturePreprocessor(
cache_path=None,
feature_builders=[
RasterFeatureBuilder(
map_features={'LANE': 1.0, 'INTERSECTION': 1.0, 'STOP_LINE': 0.5, 'CROSSWALK': 0.5},
num_input_channels=4,
target_width=224,
target_height=224,
target_pixel_size=0.5,
ego_width=2.297,
ego_front_length=4.049,
ego_rear_length=1.127,
ego_longitudinal_offset=0.0,
baseline_path_thickness=1,
),
VectorMapFeatureBuilder(radius=20),
],
target_builders=[EgoTrajectoryTargetBuilder(TrajectorySampling(time_horizon=6.0, num_poses=12))],
force_feature_computation=False,
)
# Keep only a few scenarios instead of testing the whole extraction
scenario = get_test_nuplan_scenario()
scenarios = [scenario] * 3
dataset = ScenarioDataset(scenarios=scenarios, feature_preprocessor=feature_preprocessor)
self.dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
num_workers=2,
pin_memory=False,
drop_last=True,
collate_fn=FeatureCollate(),
)
def test_dataloader(self) -> None:
"""
Tests that the training dataloader can be iterated without errors
"""
dataloader = self.dataloader
dataloader_iter = iter(dataloader)
iterations = min(len(dataloader), NUM_BATCHES)
for _ in range(iterations):
features, targets = next(dataloader_iter)
self.assertTrue("vector_map" in features.keys())
vector_map: VectorMap = features["vector_map"]
self.assertEqual(vector_map.num_of_batches, self.batch_size)
self.assertEqual(len(vector_map.coords), self.batch_size)
self.assertEqual(len(vector_map.multi_scale_connections), self.batch_size)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"nuplan.planning.training.preprocessing.feature_collate.FeatureCollate",
"nuplan.planning.training.preprocessing.feature_builders.raster_feature_builder.RasterFeatureBuilder",
"nuplan.planning.training.preprocessing.feature_builders.vector_map_feature_builder.VectorMapFeatureBuilder",
"nuplan.planning.training.data_loader.scenario_dataset.ScenarioDataset",
"nuplan.planning.simulation.trajectory.trajectory_sampling.TrajectorySampling",
"nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils.get_test_nuplan_scenario"
] |
[((3312, 3327), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3325, 3327), False, 'import unittest\n'), ((2144, 2170), 'nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils.get_test_nuplan_scenario', 'get_test_nuplan_scenario', ([], {}), '()\n', (2168, 2170), False, 'from nuplan.planning.scenario_builder.nuplan_db.test.nuplan_scenario_test_utils import get_test_nuplan_scenario\n'), ((2225, 2304), 'nuplan.planning.training.data_loader.scenario_dataset.ScenarioDataset', 'ScenarioDataset', ([], {'scenarios': 'scenarios', 'feature_preprocessor': 'feature_preprocessor'}), '(scenarios=scenarios, feature_preprocessor=feature_preprocessor)\n', (2240, 2304), False, 'from nuplan.planning.training.data_loader.scenario_dataset import ScenarioDataset\n'), ((2537, 2553), 'nuplan.planning.training.preprocessing.feature_collate.FeatureCollate', 'FeatureCollate', ([], {}), '()\n', (2551, 2553), False, 'from nuplan.planning.training.preprocessing.feature_collate import FeatureCollate\n'), ((1288, 1613), 'nuplan.planning.training.preprocessing.feature_builders.raster_feature_builder.RasterFeatureBuilder', 'RasterFeatureBuilder', ([], {'map_features': "{'LANE': 1.0, 'INTERSECTION': 1.0, 'STOP_LINE': 0.5, 'CROSSWALK': 0.5}", 'num_input_channels': '(4)', 'target_width': '(224)', 'target_height': '(224)', 'target_pixel_size': '(0.5)', 'ego_width': '(2.297)', 'ego_front_length': '(4.049)', 'ego_rear_length': '(1.127)', 'ego_longitudinal_offset': '(0.0)', 'baseline_path_thickness': '(1)'}), "(map_features={'LANE': 1.0, 'INTERSECTION': 1.0,\n 'STOP_LINE': 0.5, 'CROSSWALK': 0.5}, num_input_channels=4, target_width\n =224, target_height=224, target_pixel_size=0.5, ego_width=2.297,\n ego_front_length=4.049, ego_rear_length=1.127, ego_longitudinal_offset=\n 0.0, baseline_path_thickness=1)\n", (1308, 1613), False, 'from nuplan.planning.training.preprocessing.feature_builders.raster_feature_builder import RasterFeatureBuilder\n'), ((1832, 1866), 'nuplan.planning.training.preprocessing.feature_builders.vector_map_feature_builder.VectorMapFeatureBuilder', 'VectorMapFeatureBuilder', ([], {'radius': '(20)'}), '(radius=20)\n', (1855, 1866), False, 'from nuplan.planning.training.preprocessing.feature_builders.vector_map_feature_builder import VectorMapFeatureBuilder\n'), ((1939, 1989), 'nuplan.planning.simulation.trajectory.trajectory_sampling.TrajectorySampling', 'TrajectorySampling', ([], {'time_horizon': '(6.0)', 'num_poses': '(12)'}), '(time_horizon=6.0, num_poses=12)\n', (1957, 1989), False, 'from nuplan.planning.simulation.trajectory.trajectory_sampling import TrajectorySampling\n')]
|
import unittest
from knapsack_with_repetition import knapsack_with_repetition
from knapsack_no_repetition import knapsack_no_repetition
class Test_Case_Knapsack(unittest.TestCase):
def test_knapsack_with_repetition(self):
self.assertEqual(knapsack_with_repetition([6, 3, 4, 2], [30,14,16,9], 10), 48)
def test_knapsack_no_repetition(self):
self.assertEqual(knapsack_no_repetition([6, 3, 4, 2], [30,14,16,9], 10), 46)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"knapsack_with_repetition.knapsack_with_repetition",
"knapsack_no_repetition.knapsack_no_repetition"
] |
[((474, 489), 'unittest.main', 'unittest.main', ([], {}), '()\n', (487, 489), False, 'import unittest\n'), ((252, 311), 'knapsack_with_repetition.knapsack_with_repetition', 'knapsack_with_repetition', (['[6, 3, 4, 2]', '[30, 14, 16, 9]', '(10)'], {}), '([6, 3, 4, 2], [30, 14, 16, 9], 10)\n', (276, 311), False, 'from knapsack_with_repetition import knapsack_with_repetition\n'), ((382, 439), 'knapsack_no_repetition.knapsack_no_repetition', 'knapsack_no_repetition', (['[6, 3, 4, 2]', '[30, 14, 16, 9]', '(10)'], {}), '([6, 3, 4, 2], [30, 14, 16, 9], 10)\n', (404, 439), False, 'from knapsack_no_repetition import knapsack_no_repetition\n')]
|
import re
ASCII_IS_DEFAULT_ENCODING = False
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = '\xef\xbb\xbf'
def _prepare_source(fn):
"""Read the source code for re-writing."""
try:
stat = fn.stat()
source = fn.read("rb")
except EnvironmentError:
return None, None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (not source.startswith(BOM_UTF8) and
cookie_re.match(source[0:end1]) is None and
cookie_re.match(source[end1 + 1:end2]) is None):
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None, None
# On Python versions which are not 2.7 and less than or equal to 3.1, the
# parser expects *nix newlines.
return stat, source
|
[
"re.compile"
] |
[((58, 111), 're.compile', 're.compile', (['"""^[ \\\\t\\\\f]*#.*coding[:=][ \\\\t]*[-\\\\w.]+"""'], {}), "('^[ \\\\t\\\\f]*#.*coding[:=][ \\\\t]*[-\\\\w.]+')\n", (68, 111), False, 'import re\n')]
|
# -*- coding: utf-8 -*-
import sys
sys.path.append('src')
import unittest
from src.get_posters import (download_poster, get_title_display,
get_yearly_url_imgs)
from src.utils import create_folder
class UtilsGetPosters(unittest.TestCase):
def setUp(self):
self.year = 1913
create_folder('./data/{}/posters'.format(self.year))
create_folder('./data/{}/thumbnails'.format(self.year))
self.dict_imgs_1913 = get_yearly_url_imgs(1913)
def test_get_yearly_url_imgs(self):
dict_imgs_1913 = self.dict_imgs_1913
self.assertTrue(isinstance(
dict_imgs_1913, list))
self.assertTrue(all(
[isinstance(x, dict) for x in dict_imgs_1913]))
self.assertTrue(all(
['title' in x.keys() for x in dict_imgs_1913]))
self.assertTrue(all(
['year' in x.keys() for x in dict_imgs_1913]))
self.assertTrue(all(
['title_display' in x.keys() for x in dict_imgs_1913]))
# self.assertTrue(all(
# ['base64_img' in x.keys() for x in dict_imgs_1913]))
# self.assertTrue(all(
# ['base64_thumb' in x.keys() for x in dict_imgs_1913]))
self.assertTrue(all(
['url_img' in x.keys() for x in dict_imgs_1913]))
def test_download_poster(self):
link = self.dict_imgs_1913[0]
img, thumb = download_poster(link['url_img'], size_thumb=(50, 50))
self.assertTrue(isinstance(img, str))
self.assertTrue(isinstance(thumb, str))
def test_get_title_display(self):
title = 'my movie title'
year = 2010
url1 = 'http://dummyurl.com/2010/posters/my_movie_title.jpg'
url2 = 'http://dummyurl.com/2010/posters/my_movie_title_ver2.jpg'
url3 = 'http://dummyurl.com/2010/posters/my_movie_title_ver28.jpg'
title_display1 = get_title_display(title, year, url1)
title_display2 = get_title_display(title, year, url2)
title_display3 = get_title_display(title, year, url3)
self.assertTrue(
title_display1 == 'my movie title, 2010')
self.assertTrue(
title_display2 == 'my movie title, 2010, v2')
self.assertTrue(
title_display3 == 'my movie title, 2010, v28')
|
[
"sys.path.append",
"src.get_posters.get_title_display",
"src.get_posters.download_poster",
"src.get_posters.get_yearly_url_imgs"
] |
[((37, 59), 'sys.path.append', 'sys.path.append', (['"""src"""'], {}), "('src')\n", (52, 59), False, 'import sys\n'), ((473, 498), 'src.get_posters.get_yearly_url_imgs', 'get_yearly_url_imgs', (['(1913)'], {}), '(1913)\n', (492, 498), False, 'from src.get_posters import download_poster, get_title_display, get_yearly_url_imgs\n'), ((1412, 1465), 'src.get_posters.download_poster', 'download_poster', (["link['url_img']"], {'size_thumb': '(50, 50)'}), "(link['url_img'], size_thumb=(50, 50))\n", (1427, 1465), False, 'from src.get_posters import download_poster, get_title_display, get_yearly_url_imgs\n'), ((1896, 1932), 'src.get_posters.get_title_display', 'get_title_display', (['title', 'year', 'url1'], {}), '(title, year, url1)\n', (1913, 1932), False, 'from src.get_posters import download_poster, get_title_display, get_yearly_url_imgs\n'), ((1958, 1994), 'src.get_posters.get_title_display', 'get_title_display', (['title', 'year', 'url2'], {}), '(title, year, url2)\n', (1975, 1994), False, 'from src.get_posters import download_poster, get_title_display, get_yearly_url_imgs\n'), ((2020, 2056), 'src.get_posters.get_title_display', 'get_title_display', (['title', 'year', 'url3'], {}), '(title, year, url3)\n', (2037, 2056), False, 'from src.get_posters import download_poster, get_title_display, get_yearly_url_imgs\n')]
|
"""
{This script reads in the raw chain and plots times series for all parameters
in order to identify the burn-in}
"""
# Libs
from cosmo_utils.utils import work_paths as cwpaths
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib
import pandas as pd
import numpy as np
import math
import os
__author__ = '{<NAME>}'
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']},size=20)
rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
rc('axes', linewidth=2)
rc('xtick.major', width=2, size=7)
rc('ytick.major', width=2, size=7)
def find_nearest(array, value):
"""Finds the element in array that is closest to the value
Args:
array (numpy.array): Array of values
value (numpy.float): Value to find closest match to
Returns:
numpy.float: Closest match found in array
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_raw = dict_of_paths['raw_dir']
path_to_proc = dict_of_paths['proc_dir']
path_to_interim = dict_of_paths['int_dir']
path_to_figures = dict_of_paths['plot_dir']
survey = 'eco'
mf_type = 'smf'
quenching = 'hybrid'
nwalkers = 260
if mf_type == 'smf':
path_to_proc = path_to_proc + 'smhm_colour_run27/'
else:
path_to_proc = path_to_proc + 'bmhm_run3/'
chain_fname = path_to_proc + 'mcmc_{0}_colour_raw.txt'.format(survey)
if quenching == 'hybrid':
emcee_table = pd.read_csv(chain_fname, delim_whitespace=True,
names=['Mstar_q','Mhalo_q','mu','nu'],
header=None)
emcee_table = emcee_table[emcee_table.Mstar_q.values != '#']
emcee_table.Mstar_q = emcee_table.Mstar_q.astype(np.float64)
emcee_table.Mhalo_q = emcee_table.Mhalo_q.astype(np.float64)
emcee_table.mu = emcee_table.mu.astype(np.float64)
emcee_table.nu = emcee_table.nu.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
nu_val = emcee_table.values[idx+1][0]
row[3] = nu_val
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
# emcee_table.nu = np.log10(emcee_table.nu)
elif quenching == 'halo':
emcee_table = pd.read_csv(chain_fname, delim_whitespace=True,
names=['Mh_qc','Mh_qs','mu_c','mu_s'],
header=None)
emcee_table = emcee_table[emcee_table.Mh_qc.values != '#']
emcee_table.Mh_qc = emcee_table.Mh_qc.astype(np.float64)
emcee_table.Mh_qs = emcee_table.Mh_qs.astype(np.float64)
emcee_table.mu_c = emcee_table.mu_c.astype(np.float64)
emcee_table.mu_s = emcee_table.mu_s.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
mu_s_val = emcee_table.values[idx+1][0]
row[3] = mu_s_val
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
chi2_fname = path_to_proc + '{0}_colour_chi2.txt'.format(survey)
chi2_df = pd.read_csv(chi2_fname,header=None,names=['chisquared'])
chi2 = np.log10(chi2_df.chisquared.values)
emcee_table['chi2'] = chi2
# Each chunk is now a step and within each chunk, each row is a walker
# Different from what it used to be where each chunk was a walker and
# within each chunk, each row was a step
walker_id_arr = np.zeros(len(emcee_table))
iteration_id_arr = np.zeros(len(emcee_table))
counter_wid = 0
counter_stepid = 0
for idx,row in emcee_table.iterrows():
counter_wid += 1
if idx % nwalkers == 0:
counter_stepid += 1
counter_wid = 1
walker_id_arr[idx] = counter_wid
iteration_id_arr[idx] = counter_stepid
id_data = {'walker_id': walker_id_arr, 'iteration_id': iteration_id_arr}
id_df = pd.DataFrame(id_data, index=emcee_table.index)
emcee_table = emcee_table.assign(**id_df)
grps = emcee_table.groupby('iteration_id')
grp_keys = grps.groups.keys()
if quenching == 'hybrid':
Mstar_q = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
Mhalo_q = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
mu = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
nu = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
chi2 = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
for idx,key in enumerate(grp_keys):
group = grps.get_group(key)
Mstar_q_mean = np.mean(group.Mstar_q.values)
Mstar_q_std = np.std(group.Mstar_q.values)
Mstar_q[0][idx] = Mstar_q_mean
Mstar_q[1][idx] = Mstar_q_std
Mhalo_q_mean = np.mean(group.Mhalo_q.values)
Mhalo_q_std = np.std(group.Mhalo_q.values)
Mhalo_q[0][idx] = Mhalo_q_mean
Mhalo_q[1][idx] = Mhalo_q_std
mu_mean = np.mean(group.mu.values)
mu_std = np.std(group.mu.values)
mu[0][idx] = mu_mean
mu[1][idx] = mu_std
nu_mean = np.mean(group.nu.values)
nu_std = np.std(group.nu.values)
nu[0][idx] = nu_mean
nu[1][idx] = nu_std
chi2_mean = np.mean(group.chi2.values)
chi2_std = np.std(group.chi2.values)
chi2[0][idx] = chi2_mean
chi2[1][idx] = chi2_std
zumandelbaum_param_vals = [10.5, 13.76, 0.69, 0.15]
grp_keys = list(grp_keys)
fig1, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, sharex=True, \
figsize=(10,10))
ax1.plot(grp_keys, Mstar_q[0],c='#941266',ls='--', marker='o')
ax1.axhline(zumandelbaum_param_vals[0],color='lightgray')
ax2.plot(grp_keys, Mhalo_q[0], c='#941266',ls='--', marker='o')
ax2.axhline(zumandelbaum_param_vals[1],color='lightgray')
ax3.plot(grp_keys, mu[0], c='#941266',ls='--', marker='o')
ax3.axhline(zumandelbaum_param_vals[2],color='lightgray')
ax4.plot(grp_keys, nu[0], c='#941266',ls='--', marker='o')
ax4.axhline(zumandelbaum_param_vals[3],color='lightgray')
ax5.plot(grp_keys, chi2[0], c='#941266',ls='--', marker='o')
ax1.fill_between(grp_keys, Mstar_q[0]-Mstar_q[1], Mstar_q[0]+Mstar_q[1],
alpha=0.3, color='#941266')
ax2.fill_between(grp_keys, Mhalo_q[0]-Mhalo_q[1], Mhalo_q[0]+Mhalo_q[1], \
alpha=0.3, color='#941266')
ax3.fill_between(grp_keys, mu[0]-mu[1], mu[0]+mu[1], \
alpha=0.3, color='#941266')
ax4.fill_between(grp_keys, nu[0]-nu[1], nu[0]+nu[1], \
alpha=0.3, color='#941266')
ax5.fill_between(grp_keys, chi2[0]-chi2[1], chi2[0]+chi2[1], \
alpha=0.3, color='#941266')
ax1.set_ylabel(r"$\mathbf{log_{10}\ M^{q}_{*}}$")
ax2.set_ylabel(r"$\mathbf{log_{10}\ M^{q}_{h}}$")
ax3.set_ylabel(r"$\boldsymbol{\mu}$")
# ax4.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{\ \nu}$")
ax4.set_ylabel(r"$\boldsymbol{\nu}$")
ax5.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{{\ \chi}^2}$")
# ax5.set_ylabel(r"$\boldsymbol{{\chi}^2}$")
# ax1.set_yscale('log')
# ax2.set_yscale('log')
ax1.annotate(zumandelbaum_param_vals[0], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax2.annotate(zumandelbaum_param_vals[1], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax3.annotate(zumandelbaum_param_vals[2], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax4.annotate(0.15, (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
plt.xlabel(r"$\mathbf{iteration\ number}$")
plt.show()
elif quenching == 'halo':
Mh_qc = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
Mh_qs = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
mu_c = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
mu_s = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
chi2 = [np.zeros(len(grp_keys)),np.zeros(len(grp_keys))]
for idx,key in enumerate(grp_keys):
group = grps.get_group(key)
Mh_qc_mean = np.mean(group.Mh_qc.values)
Mh_qc_std = np.std(group.Mh_qc.values)
Mh_qc[0][idx] = Mh_qc_mean
Mh_qc[1][idx] = Mh_qc_std
Mh_qs_mean = np.mean(group.Mh_qs.values)
Mh_qs_std = np.std(group.Mh_qs.values)
Mh_qs[0][idx] = Mh_qs_mean
Mh_qs[1][idx] = Mh_qs_std
mu_c_mean = np.mean(group.mu_c.values)
mu_c_std = np.std(group.mu_c.values)
mu_c[0][idx] = mu_c_mean
mu_c[1][idx] = mu_c_std
mu_s_mean = np.mean(group.mu_s.values)
mu_s_std = np.std(group.mu_s.values)
mu_s[0][idx] = mu_s_mean
mu_s[1][idx] = mu_s_std
chi2_mean = np.mean(group.chi2.values)
chi2_std = np.std(group.chi2.values)
chi2[0][idx] = chi2_mean
chi2[1][idx] = chi2_std
zumandelbaum_param_vals = [12.2, 12.17, 0.38, 0.15]
grp_keys = list(grp_keys)
fig1, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, sharex=True, \
figsize=(10,10))
ax1.plot(grp_keys, Mh_qc[0],c='#941266',ls='--', marker='o')
ax1.axhline(zumandelbaum_param_vals[0],color='lightgray')
ax2.plot(grp_keys, Mh_qs[0], c='#941266',ls='--', marker='o')
ax2.axhline(zumandelbaum_param_vals[1],color='lightgray')
ax3.plot(grp_keys, mu_c[0], c='#941266',ls='--', marker='o')
ax3.axhline(zumandelbaum_param_vals[2],color='lightgray')
ax4.plot(grp_keys, mu_s[0], c='#941266',ls='--', marker='o')
ax4.axhline(zumandelbaum_param_vals[3],color='lightgray')
ax5.plot(grp_keys, chi2[0], c='#941266',ls='--', marker='o')
ax1.fill_between(grp_keys, Mh_qc[0]-Mh_qc[1], Mh_qc[0]+Mh_qc[1],
alpha=0.3, color='#941266')
ax2.fill_between(grp_keys, Mh_qs[0]-Mh_qs[1], Mh_qs[0]+Mh_qs[1], \
alpha=0.3, color='#941266')
ax3.fill_between(grp_keys, mu_c[0]-mu_c[1], mu_c[0]+mu_c[1], \
alpha=0.3, color='#941266')
ax4.fill_between(grp_keys, mu_s[0]-mu_s[1], mu_s[0]+mu_s[1], \
alpha=0.3, color='#941266')
ax5.fill_between(grp_keys, chi2[0]-chi2[1], chi2[0]+chi2[1], \
alpha=0.3, color='#941266')
ax1.set_ylabel(r"$\mathbf{log_{10}\ Mh_{qc}}$")
ax2.set_ylabel(r"$\mathbf{log_{10}\ Mh_{qs}}$")
ax3.set_ylabel(r"$\boldsymbol{\ mu_{c}}$")
# ax4.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{\ \nu}$")
ax4.set_ylabel(r"$\boldsymbol{\ mu_{s}}$")
ax5.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{{\ \chi}^2}$")
# ax5.set_ylabel(r"$\boldsymbol{{\chi}^2}$")
# ax1.set_yscale('log')
# ax2.set_yscale('log')
ax1.annotate(zumandelbaum_param_vals[0], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax2.annotate(zumandelbaum_param_vals[1], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax3.annotate(zumandelbaum_param_vals[2], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax4.annotate(0.15, (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
plt.xlabel(r"$\mathbf{iteration\ number}$")
plt.show()
######################## Calculate acceptance fraction ########################
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_proc = dict_of_paths['proc_dir']
if mf_type == 'smf':
path_to_proc = path_to_proc + 'smhm_colour_run21/'
else:
path_to_proc = path_to_proc + 'bmhm_run3/'
chain_fname = path_to_proc + 'mcmc_{0}_colour_raw.txt'.format(survey)
if quenching == 'hybrid':
emcee_table = pd.read_csv(chain_fname, delim_whitespace=True,
names=['Mstar_q','Mhalo_q','mu','nu'],
header=None)
emcee_table = emcee_table[emcee_table.Mstar_q.values != '#']
emcee_table.Mstar_q = emcee_table.Mstar_q.astype(np.float64)
emcee_table.Mhalo_q = emcee_table.Mhalo_q.astype(np.float64)
emcee_table.mu = emcee_table.mu.astype(np.float64)
emcee_table.nu = emcee_table.nu.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
nu_val = emcee_table.values[idx+1][0]
row[3] = nu_val
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
# emcee_table.nu = np.log10(emcee_table.nu)
elif quenching == 'halo':
emcee_table = pd.read_csv(chain_fname, delim_whitespace=True,
names=['Mh_qc','Mh_qs','mu_c','mu_s'],
header=None)
emcee_table = emcee_table[emcee_table.Mh_qc.values != '#']
emcee_table.Mh_qc = emcee_table.Mh_qc.astype(np.float64)
emcee_table.Mh_qs = emcee_table.Mh_qs.astype(np.float64)
emcee_table.mu_c = emcee_table.mu_c.astype(np.float64)
emcee_table.mu_s = emcee_table.mu_s.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[3] == True and np.isnan(row)[2] == False:
mu_s_val = emcee_table.values[idx+1][0]
row[3] = mu_s_val
emcee_table = emcee_table.dropna(axis='index', how='any').\
reset_index(drop=True)
num_unique_rows = emcee_table[['Mstar_q','Mhalo_q','mu','nu']].drop_duplicates().shape[0]
num_rows = len(emcee_table)
acceptance_fraction = num_unique_rows / num_rows
print("Acceptance fraction: {0}%".format(np.round(acceptance_fraction,2)*100))
# For behroozi chains
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_proc = dict_of_paths['proc_dir']
chain_fname = path_to_proc + 'smhm_run6/mcmc_{0}_raw.txt'.\
format(survey)
emcee_table = pd.read_csv(chain_fname,
names=['mhalo_c','mstellar_c','lowmass_slope','highmass_slope',
'scatter'],header=None, delim_whitespace=True)
emcee_table = emcee_table[emcee_table.mhalo_c.values != '#']
emcee_table.mhalo_c = emcee_table.mhalo_c.astype(np.float64)
emcee_table.mstellar_c = emcee_table.mstellar_c.astype(np.float64)
emcee_table.lowmass_slope = emcee_table.lowmass_slope.astype(np.float64)
for idx,row in enumerate(emcee_table.values):
if np.isnan(row)[4] == True and np.isnan(row)[3] == False:
scatter_val = emcee_table.values[idx+1][0]
row[4] = scatter_val
emcee_table = emcee_table.dropna(axis='index', how='any').reset_index(drop=True)
num_unique_rows = emcee_table[['mhalo_c','mstellar_c','lowmass_slope',\
'highmass_slope']].drop_duplicates().shape[0]
num_rows = len(emcee_table)
acceptance_fraction = num_unique_rows / num_rows
print("Acceptance fraction: {0}%".format(np.round(acceptance_fraction,2)*100))
################################################################################
def hybrid_quenching_model(theta, gals_df, mock, randint=None):
"""
Apply hybrid quenching model from Zu and Mandelbaum 2015
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
"""
# parameter values from Table 1 of Zu and Mandelbaum 2015 "prior case"
Mstar_q = theta[0] # Msun/h
Mh_q = theta[1] # Msun/h
mu = theta[2]
nu = theta[3]
cen_hosthalo_mass_arr, sat_hosthalo_mass_arr = get_host_halo_mock(gals_df, \
mock)
cen_stellar_mass_arr, sat_stellar_mass_arr = get_stellar_mock(gals_df, mock, \
randint)
f_red_cen = 1 - np.exp(-((cen_stellar_mass_arr/(10**Mstar_q))**mu))
g_Mstar = np.exp(-((sat_stellar_mass_arr/(10**Mstar_q))**mu))
h_Mh = np.exp(-((sat_hosthalo_mass_arr/(10**Mh_q))**nu))
f_red_sat = 1 - (g_Mstar * h_Mh)
return f_red_cen, f_red_sat
def assign_colour_label_mock(f_red_cen, f_red_sat, gals_df, drop_fred=False):
"""
Assign colour label to mock catalog
Parameters
----------
f_red_cen: array
Array of central red fractions
f_red_sat: array
Array of satellite red fractions
gals_df: pandas Dataframe
Mock catalog
drop_fred: boolean
Whether or not to keep red fraction column after colour has been
assigned
Returns
---------
df: pandas Dataframe
Dataframe with colour label and random number assigned as
new columns
"""
# Copy of dataframe
df = gals_df.copy()
# Saving labels
color_label_arr = [[] for x in range(len(df))]
rng_arr = [[] for x in range(len(df))]
# Adding columns for f_red to df
df.loc[:, 'f_red'] = np.zeros(len(df))
df.loc[df['cs_flag'] == 1, 'f_red'] = f_red_cen
df.loc[df['cs_flag'] == 0, 'f_red'] = f_red_sat
# Converting to array
f_red_arr = df['f_red'].values
# Looping over galaxies
for ii, cs_ii in enumerate(df['cs_flag']):
# Draw a random number
rng = np.random.uniform()
# Comparing against f_red
if (rng >= f_red_arr[ii]):
color_label = 'B'
else:
color_label = 'R'
# Saving to list
color_label_arr[ii] = color_label
rng_arr[ii] = rng
## Assigning to DataFrame
df.loc[:, 'colour_label'] = color_label_arr
df.loc[:, 'rng'] = rng_arr
# Dropping 'f_red` column
if drop_fred:
df.drop('f_red', axis=1, inplace=True)
return df
def get_host_halo_mock(gals_df, mock):
"""
Get host halo mass from mock catalog
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
cen_halos: array
Array of central host halo masses
sat_halos: array
Array of satellite host halo masses
"""
df = gals_df.copy()
# groups = df.groupby('halo_id')
# keys = groups.groups.keys()
# for key in keys:
# group = groups.get_group(key)
# for index, value in enumerate(group.cs_flag):
# if value == 1:
# cen_halos.append(group.loghalom.values[index])
# else:
# sat_halos.append(group.loghalom.values[index])
if mock == 'vishnu':
cen_halos = []
sat_halos = []
for index, value in enumerate(df.cs_flag):
if value == 1:
cen_halos.append(df.halo_mvir.values[index])
else:
sat_halos.append(df.halo_mvir.values[index])
else:
cen_halos = []
sat_halos = []
for index, value in enumerate(df.cs_flag):
if value == 1:
cen_halos.append(df.loghalom.values[index])
else:
sat_halos.append(df.loghalom.values[index])
cen_halos = np.array(cen_halos)
sat_halos = np.array(sat_halos)
return cen_halos, sat_halos
def get_stellar_mock(gals_df, mock, randint=None):
"""
Get stellar mass from mock catalog
Parameters
----------
gals_df: pandas dataframe
Mock catalog
Returns
---------
cen_gals: array
Array of central stellar masses
sat_gals: array
Array of satellite stellar masses
"""
df = gals_df.copy()
if mock == 'vishnu':
cen_gals = []
sat_gals = []
for idx,value in enumerate(df.cs_flag):
if value == 1:
cen_gals.append(df['{0}'.format(randint)].values[idx])
elif value == 0:
sat_gals.append(df['{0}'.format(randint)].values[idx])
else:
cen_gals = []
sat_gals = []
for idx,value in enumerate(df.cs_flag):
if value == 1:
cen_gals.append(df.logmstar.values[idx])
elif value == 0:
sat_gals.append(df.logmstar.values[idx])
cen_gals = np.array(cen_gals)
sat_gals = np.array(sat_gals)
return cen_gals, sat_gals
def diff_smf(mstar_arr, volume, h1_bool, colour_flag=False):
"""
Calculates differential stellar mass function in units of h=1.0
Parameters
----------
mstar_arr: numpy array
Array of stellar masses
volume: float
Volume of survey or simulation
h1_bool: boolean
True if units of masses are h=1, False if units of masses are not h=1
Returns
---------
maxis: array
Array of x-axis mass values
phi: array
Array of y-axis values
err_tot: array
Array of error values per bin
bins: array
Array of bin edge values
"""
if not h1_bool:
# changing from h=0.7 to h=1 assuming h^-2 dependence
logmstar_arr = np.log10((10**mstar_arr) / 2.041)
else:
logmstar_arr = np.log10(mstar_arr)
if survey == 'eco' or survey == 'resolvea':
bin_min = np.round(np.log10((10**8.9) / 2.041), 1)
if survey == 'eco' and colour_flag == 'R':
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 6
elif survey == 'eco' and colour_flag == 'B':
bin_max = np.round(np.log10((10**11) / 2.041), 1)
bin_num = 6
elif survey == 'resolvea':
# different to avoid nan in inverse corr mat
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
else:
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
bins = np.linspace(bin_min, bin_max, bin_num)
elif survey == 'resolveb':
bin_min = np.round(np.log10((10**8.7) / 2.041), 1)
bin_max = np.round(np.log10((10**11.8) / 2.041), 1)
bins = np.linspace(bin_min, bin_max, 7)
# Unnormalized histogram and bin edges
counts, edg = np.histogram(logmstar_arr, bins=bins) # paper used 17 bins
dm = edg[1] - edg[0] # Bin width
maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers
# Normalized to volume and bin width
err_poiss = np.sqrt(counts) / (volume * dm)
err_tot = err_poiss
phi = counts / (volume * dm) # not a log quantity
phi = np.log10(phi)
return maxis, phi, err_tot, bins, counts
def measure_all_smf(table, volume, data_bool, randint_logmstar=None):
"""
Calculates differential stellar mass function for all, red and blue galaxies
from mock/data
Parameters
----------
table: pandas Dataframe
Dataframe of either mock or data
volume: float
Volume of simulation/survey
cvar: float
Cosmic variance error
data_bool: Boolean
Data or mock
Returns
---------
3 multidimensional arrays of stellar mass, phi, total error in SMF and
counts per bin for all, red and blue galaxies
"""
colour_col = 'colour_label'
if data_bool:
logmstar_col = 'logmstar'
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(table[logmstar_col], volume, False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'R'],
volume, False, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'B'],
volume, False, 'B')
else:
# logmstar_col = 'stellar_mass'
logmstar_col = '{0}'.format(randint_logmstar)
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(table[logmstar_col], volume, True)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'R'],
volume, True, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(table[logmstar_col].loc[table[colour_col] == 'B'],
volume, True, 'B')
return [max_total, phi_total, err_total, counts_total] , \
[max_red, phi_red, err_red, counts_red] , \
[max_blue, phi_blue, err_blue, counts_blue]
def assign_colour_label_data(catl):
"""
Assign colour label to data
Parameters
----------
catl: pandas Dataframe
Data catalog
Returns
---------
catl: pandas Dataframe
Data catalog with colour label assigned as new column
"""
logmstar_arr = catl.logmstar.values
u_r_arr = catl.modelu_rcorr.values
colour_label_arr = np.empty(len(catl), dtype='str')
for idx, value in enumerate(logmstar_arr):
# Divisions taken from Moffett et al. 2015 equation 1
if value <= 9.1:
if u_r_arr[idx] > 1.457:
colour_label = 'R'
else:
colour_label = 'B'
if value > 9.1 and value < 10.1:
divider = 0.24 * value - 0.7
if u_r_arr[idx] > divider:
colour_label = 'R'
else:
colour_label = 'B'
if value >= 10.1:
if u_r_arr[idx] > 1.7:
colour_label = 'R'
else:
colour_label = 'B'
colour_label_arr[idx] = colour_label
catl['colour_label'] = colour_label_arr
return catl
def read_data_catl(path_to_file, survey):
"""
Reads survey catalog from file
Parameters
----------
path_to_file: `string`
Path to survey catalog file
survey: `string`
Name of survey
Returns
---------
catl: `pandas.DataFrame`
Survey catalog with grpcz, abs rmag and stellar mass limits
volume: `float`
Volume of survey
z_median: `float`
Median redshift of survey
"""
if survey == 'eco':
columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
'logmstar', 'logmgas', 'grp', 'grpn', 'logmh', 'logmh_s',
'fc', 'grpmb', 'grpms','modelu_rcorr']
# 13878 galaxies
eco_buff = pd.read_csv(path_to_file,delimiter=",", header=0, \
usecols=columns)
if mf_type == 'smf':
# 6456 galaxies
catl = eco_buff.loc[(eco_buff.grpcz.values >= 3000) &
(eco_buff.grpcz.values <= 7000) &
(eco_buff.absrmag.values <= -17.33)]
elif mf_type == 'bmf':
catl = eco_buff.loc[(eco_buff.grpcz.values >= 3000) &
(eco_buff.grpcz.values <= 7000) &
(eco_buff.absrmag.values <= -17.33)]
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
# cvar = 0.125
z_median = np.median(catl.grpcz.values) / (3 * 10**5)
elif survey == 'resolvea' or survey == 'resolveb':
columns = ['name', 'radeg', 'dedeg', 'cz', 'grpcz', 'absrmag',
'logmstar', 'logmgas', 'grp', 'grpn', 'grpnassoc', 'logmh',
'logmh_s', 'fc', 'grpmb', 'grpms', 'f_a', 'f_b']
# 2286 galaxies
resolve_live18 = pd.read_csv(path_to_file, delimiter=",", header=0, \
usecols=columns)
if survey == 'resolvea':
if mf_type == 'smf':
catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17.33)]
elif mf_type == 'bmf':
catl = resolve_live18.loc[(resolve_live18.f_a.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17.33)]
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
# cvar = 0.30
z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)
elif survey == 'resolveb':
if mf_type == 'smf':
# 487 - cz, 369 - grpcz
catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17)]
elif mf_type == 'bmf':
catl = resolve_live18.loc[(resolve_live18.f_b.values == 1) &
(resolve_live18.grpcz.values >= 4500) &
(resolve_live18.grpcz.values <= 7000) &
(resolve_live18.absrmag.values <= -17)]
volume = 4709.8373 # *2.915 #Survey volume without buffer [Mpc/h]^3
# cvar = 0.58
z_median = np.median(resolve_live18.grpcz.values) / (3 * 10**5)
return catl, volume, z_median
def std_func(bins, mass_arr, vel_arr):
## Calculate std from mean=0
last_index = len(bins)-1
i = 0
std_arr = []
for index1, bin_edge in enumerate(bins):
if index1 == last_index:
break
cen_deltav_arr = []
for index2, stellar_mass in enumerate(mass_arr):
if stellar_mass >= bin_edge and stellar_mass < bins[index1+1]:
cen_deltav_arr.append(vel_arr[index2])
N = len(cen_deltav_arr)
mean = 0
diff_sqrd_arr = []
for value in cen_deltav_arr:
diff = value - mean
diff_sqrd = diff**2
diff_sqrd_arr.append(diff_sqrd)
mean_diff_sqrd = np.mean(diff_sqrd_arr)
std = np.sqrt(mean_diff_sqrd)
std_arr.append(std)
return std_arr
def get_deltav_sigma_vishnu_qmcolour(gals_df, randint):
"""
Calculate spread in velocity dispersion from Vishnu mock (logmstar already
in h=1)
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
mock_pd = gals_df.copy()
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
logmstar_col = '{0}'.format(randint)
g_galtype_col = 'g_galtype_{0}'.format(randint)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer except no M_r cut since vishnu mock has no M_r info
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & \
(mock_pd[logmstar_col].values >= (10**mstar_limit/2.041))]
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd[g_galtype_col] == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd[g_galtype_col] == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups
# with a red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group['{0}'.format(randint)].loc[group[g_galtype_col].\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(red_cen_stellar_mass_arr))
red_cen_stellar_mass_arr = np.log10(red_cen_stellar_mass_arr)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
# Calculating spread in velocity dispersion for galaxies in groups
# with a blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group['{0}'.format(randint)].loc[group[g_galtype_col]\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(blue_cen_stellar_mass_arr))
blue_cen_stellar_mass_arr = np.log10(blue_cen_stellar_mass_arr)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, \
blue_cen_stellar_mass_arr, blue_deltav_arr)
std_blue = np.array(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
return std_red, std_blue, centers_red, centers_blue
def get_deltav_sigma_mocks_qmcolour(survey, path):
"""
Calculate spread in velocity dispersion from survey mocks (logmstar converted
to h=1 units before analysis)
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
std_red_arr: numpy array
Spread in velocity dispersion of red galaxies
centers_red_arr: numpy array
Bin centers of central stellar mass for red galaxies
std_blue_arr: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue_arr: numpy array
Bin centers of central stellar mass for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
std_red_arr = []
centers_red_arr = []
std_blue_arr = []
centers_blue_arr = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & \
(mock_pd.M_r.values <= mag_limit) & \
(mock_pd.logmstar.values >= mstar_limit)]
Mstar_q = 10.5 # Msun/h
Mh_q = 13.76 # Msun/h
mu = 0.69
nu = 0.15
theta = [Mstar_q, Mh_q, mu, nu]
f_red_c, f_red_s = hybrid_quenching_model(theta, mock_pd, 'nonvishnu')
mock_pd = assign_colour_label_mock(f_red_c, f_red_s, mock_pd)
mock_pd.logmstar = np.log10((10**mock_pd.logmstar) / 2.041)
red_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'R') & (mock_pd.g_galtype == 1)].values)
blue_subset_grpids = np.unique(mock_pd.groupid.loc[(mock_pd.\
colour_label == 'B') & (mock_pd.g_galtype == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups
# with a red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(red_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
std_red_arr.append(std_red)
# Calculating spread in velocity dispersion for galaxies in groups
# with a blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = mock_pd.loc[mock_pd.groupid == key]
cen_stellar_mass = group.logmstar.loc[group.g_galtype\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
# print(max(blue_cen_stellar_mass_arr))
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, \
blue_cen_stellar_mass_arr, blue_deltav_arr)
std_blue = np.array(std_blue)
std_blue_arr.append(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
centers_red_arr.append(centers_red)
centers_blue_arr.append(centers_blue)
std_red_arr = np.array(std_red_arr)
centers_red_arr = np.array(centers_red_arr)
std_blue_arr = np.array(std_blue_arr)
centers_blue_arr = np.array(centers_blue_arr)
return std_red_arr, std_blue_arr, centers_red_arr, centers_blue_arr
def get_deltav_sigma_data(df):
"""
Measure spread in velocity dispersion separately for red and blue galaxies
by binning up central stellar mass (changes logmstar units from h=0.7 to h=1)
Parameters
----------
df: pandas Dataframe
Data catalog
Returns
---------
std_red: numpy array
Spread in velocity dispersion of red galaxies
centers_red: numpy array
Bin centers of central stellar mass for red galaxies
std_blue: numpy array
Spread in velocity dispersion of blue galaxies
centers_blue: numpy array
Bin centers of central stellar mass for blue galaxies
"""
catl = df.copy()
if survey == 'eco' or survey == 'resolvea':
catl = catl.loc[catl.logmstar >= 8.9]
elif survey == 'resolveb':
catl = catl.loc[catl.logmstar >= 8.7]
catl.logmstar = np.log10((10**catl.logmstar) / 2.041)
red_subset_grpids = np.unique(catl.grp.loc[(catl.\
colour_label == 'R') & (catl.fc == 1)].values)
blue_subset_grpids = np.unique(catl.grp.loc[(catl.\
colour_label == 'B') & (catl.fc == 1)].values)
# Calculating spread in velocity dispersion for galaxies in groups with a
# red central
red_deltav_arr = []
red_cen_stellar_mass_arr = []
for key in red_subset_grpids:
group = catl.loc[catl.grp == key]
cen_stellar_mass = group.logmstar.loc[group.fc.\
values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
red_deltav_arr.append(val)
red_cen_stellar_mass_arr.append(cen_stellar_mass)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
red_stellar_mass_bins = np.linspace(8.6,11.2,6)
elif survey == 'resolveb':
red_stellar_mass_bins = np.linspace(8.4,11.0,6)
std_red = std_func(red_stellar_mass_bins, red_cen_stellar_mass_arr,
red_deltav_arr)
std_red = np.array(std_red)
# Calculating spread in velocity dispersion for galaxies in groups with a
# blue central
blue_deltav_arr = []
blue_cen_stellar_mass_arr = []
for key in blue_subset_grpids:
group = catl.loc[catl.grp == key]
cen_stellar_mass = group.logmstar.loc[group.fc\
.values == 1].values[0]
mean_cz_grp = np.round(np.mean(group.cz.values),2)
deltav = group.cz.values - len(group)*[mean_cz_grp]
for val in deltav:
blue_deltav_arr.append(val)
blue_cen_stellar_mass_arr.append(cen_stellar_mass)
if survey == 'eco' or survey == 'resolvea':
# TODO : check if this is actually correct for resolve a
blue_stellar_mass_bins = np.linspace(8.6,10.7,6)
elif survey == 'resolveb':
blue_stellar_mass_bins = np.linspace(8.4,10.4,6)
std_blue = std_func(blue_stellar_mass_bins, blue_cen_stellar_mass_arr,
blue_deltav_arr)
std_blue = np.array(std_blue)
centers_red = 0.5 * (red_stellar_mass_bins[1:] + \
red_stellar_mass_bins[:-1])
centers_blue = 0.5 * (blue_stellar_mass_bins[1:] + \
blue_stellar_mass_bins[:-1])
return std_red, centers_red, std_blue, centers_blue
def reading_catls(filename, catl_format='.hdf5'):
"""
Function to read ECO/RESOLVE catalogues.
Parameters
----------
filename: string
path and name of the ECO/RESOLVE catalogue to read
catl_format: string, optional (default = '.hdf5')
type of file to read.
Options:
- '.hdf5': Reads in a catalogue in HDF5 format
Returns
-------
mock_pd: pandas DataFrame
DataFrame with galaxy/group information
Examples
--------
# Specifying `filename`
>>> filename = 'ECO_catl.hdf5'
# Reading in Catalogue
>>> mock_pd = reading_catls(filename, format='.hdf5')
>>> mock_pd.head()
x y z vx vy vz \
0 10.225435 24.778214 3.148386 356.112457 -318.894409 366.721832
1 20.945772 14.500367 -0.237940 168.731766 37.558834 447.436951
2 21.335835 14.808488 0.004653 967.204407 -701.556763 -388.055115
3 11.102760 21.782235 2.947002 611.646484 -179.032089 113.388794
4 13.217764 21.214905 2.113904 120.689598 -63.448833 400.766541
loghalom cs_flag haloid halo_ngal ... cz_nodist vel_tot \
0 12.170 1 196005 1 ... 2704.599189 602.490355
1 11.079 1 197110 1 ... 2552.681697 479.667489
2 11.339 1 197131 1 ... 2602.377466 1256.285409
3 11.529 1 199056 1 ... 2467.277182 647.318259
4 10.642 1 199118 1 ... 2513.381124 423.326770
vel_tan vel_pec ra_orig groupid M_group g_ngal g_galtype \
0 591.399858 -115.068833 215.025116 0 11.702527 1 1
1 453.617221 155.924074 182.144134 1 11.524787 4 0
2 1192.742240 394.485714 182.213220 1 11.524787 4 0
3 633.928896 130.977416 210.441320 2 11.502205 1 1
4 421.064495 43.706352 205.525386 3 10.899680 1 1
halo_rvir
0 0.184839
1 0.079997
2 0.097636
3 0.113011
4 0.057210
"""
## Checking if file exists
if not os.path.exists(filename):
msg = '`filename`: {0} NOT FOUND! Exiting..'.format(filename)
raise ValueError(msg)
## Reading file
if catl_format=='.hdf5':
mock_pd = pd.read_hdf(filename)
else:
msg = '`catl_format` ({0}) not supported! Exiting...'.format(catl_format)
raise ValueError(msg)
return mock_pd
def get_err_data(survey, path):
"""
Calculate error in data SMF from mocks
Parameters
----------
survey: string
Name of survey
path: string
Path to mock catalogs
Returns
---------
err_total: array
Standard deviation of phi values between all mocks and for all galaxies
err_red: array
Standard deviation of phi values between all mocks and for red galaxies
err_blue: array
Standard deviation of phi values between all mocks and for blue galaxies
"""
if survey == 'eco':
mock_name = 'ECO'
num_mocks = 8
min_cz = 3000
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 151829.26 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolvea':
mock_name = 'A'
num_mocks = 59
min_cz = 4500
max_cz = 7000
mag_limit = -17.33
mstar_limit = 8.9
volume = 13172.384 # Survey volume without buffer [Mpc/h]^3
elif survey == 'resolveb':
mock_name = 'B'
num_mocks = 104
min_cz = 4500
max_cz = 7000
mag_limit = -17
mstar_limit = 8.7
volume = 4709.8373 # Survey volume without buffer [Mpc/h]^3
phi_arr_total = []
phi_arr_red = []
phi_arr_blue = []
# logmstar_red_max_arr = []
# logmstar_blue_max_arr = []
# colour_err_arr = []
# colour_corr_mat_inv = []
box_id_arr = np.linspace(5001,5008,8)
for box in box_id_arr:
box = int(box)
temp_path = path + '{0}/{1}_m200b_catls/'.format(box,
mock_name)
for num in range(num_mocks):
filename = temp_path + '{0}_cat_{1}_Planck_memb_cat.hdf5'.format(
mock_name, num)
mock_pd = reading_catls(filename)
# Using the same survey definition as in mcmc smf i.e excluding the
# buffer
mock_pd = mock_pd.loc[(mock_pd.cz.values >= min_cz) & \
(mock_pd.cz.values <= max_cz) & (mock_pd.M_r.values <= mag_limit) &\
(mock_pd.logmstar.values >= mstar_limit)]
Mstar_q = 10.5 # Msun/h
Mh_q = 13.76 # Msun/h
mu = 0.69
nu = 0.15
theta = [Mstar_q, Mh_q, mu, nu]
f_red_c, f_red_s = hybrid_quenching_model(theta, mock_pd, 'nonvishnu')
mock_pd = assign_colour_label_mock(f_red_c, f_red_s, mock_pd)
# logmstar_red_max = mock_pd.logmstar.loc[mock_pd.colour_label == 'R'].max()
# logmstar_red_max_arr.append(logmstar_red_max)
# logmstar_blue_max = mock_pd.logmstar.loc[mock_pd.colour_label == 'B'].max()
# logmstar_blue_max_arr.append(logmstar_blue_max)
logmstar_arr = mock_pd.logmstar.values
#Measure SMF of mock using diff_smf function
max_total, phi_total, err_total, bins_total, counts_total = \
diff_smf(logmstar_arr, volume, False)
max_red, phi_red, err_red, bins_red, counts_red = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'R'],
volume, False, 'R')
max_blue, phi_blue, err_blue, bins_blue, counts_blue = \
diff_smf(mock_pd.logmstar.loc[mock_pd.colour_label.values == 'B'],
volume, False, 'B')
phi_arr_total.append(phi_total)
phi_arr_red.append(phi_red)
phi_arr_blue.append(phi_blue)
phi_arr_total = np.array(phi_arr_total)
phi_arr_red = np.array(phi_arr_red)
phi_arr_blue = np.array(phi_arr_blue)
# phi_arr_colour = np.append(phi_arr_red, phi_arr_blue, axis = 0)
# Covariance matrix for total phi (all galaxies)
# cov_mat = np.cov(phi_arr_total, rowvar=False) # default norm is N-1
# err_total = np.sqrt(cov_mat.diagonal())
# cov_mat_red = np.cov(phi_arr_red, rowvar=False) # default norm is N-1
# err_red = np.sqrt(cov_mat_red.diagonal())
# colour_err_arr.append(err_red)
# cov_mat_blue = np.cov(phi_arr_blue, rowvar=False) # default norm is N-1
# err_blue = np.sqrt(cov_mat_blue.diagonal())
# colour_err_arr.append(err_blue)
# corr_mat_red = cov_mat_red / np.outer(err_red , err_red)
# corr_mat_inv_red = np.linalg.inv(corr_mat_red)
# colour_corr_mat_inv.append(corr_mat_inv_red)
# corr_mat_blue = cov_mat_blue / np.outer(err_blue , err_blue)
# corr_mat_inv_blue = np.linalg.inv(corr_mat_blue)
# colour_corr_mat_inv.append(corr_mat_inv_blue)
deltav_sig_red, deltav_sig_blue, deltav_sig_cen_red, deltav_sig_cen_blue = \
get_deltav_sigma_mocks_qmcolour(survey, path)
phi_red_0 = phi_arr_red[:,0]
phi_red_1 = phi_arr_red[:,1]
phi_red_2 = phi_arr_red[:,2]
phi_red_3 = phi_arr_red[:,3]
phi_red_4 = phi_arr_red[:,4]
phi_blue_0 = phi_arr_blue[:,0]
phi_blue_1 = phi_arr_blue[:,1]
phi_blue_2 = phi_arr_blue[:,2]
phi_blue_3 = phi_arr_blue[:,3]
phi_blue_4 = phi_arr_blue[:,4]
dv_red_0 = deltav_sig_red[:,0]
dv_red_1 = deltav_sig_red[:,1]
dv_red_2 = deltav_sig_red[:,2]
dv_red_3 = deltav_sig_red[:,3]
dv_red_4 = deltav_sig_red[:,4]
dv_blue_0 = deltav_sig_blue[:,0]
dv_blue_1 = deltav_sig_blue[:,1]
dv_blue_2 = deltav_sig_blue[:,2]
dv_blue_3 = deltav_sig_blue[:,3]
dv_blue_4 = deltav_sig_blue[:,4]
combined_df = pd.DataFrame({'phi_red_0':phi_red_0, 'phi_red_1':phi_red_1,\
'phi_red_2':phi_red_2, 'phi_red_3':phi_red_3, 'phi_red_4':phi_red_4, \
'phi_blue_0':phi_blue_0, 'phi_blue_1':phi_blue_1,
'phi_blue_2':phi_blue_2, 'phi_blue_3':phi_blue_3,
'phi_blue_4':phi_blue_4, \
'dv_red_0':dv_red_0, 'dv_red_1':dv_red_1, 'dv_red_2':dv_red_2, \
'dv_red_3':dv_red_3, 'dv_red_4':dv_red_4, \
'dv_blue_0':dv_blue_0, 'dv_blue_1':dv_blue_1, 'dv_blue_2':dv_blue_2, \
'dv_blue_3':dv_blue_3, 'dv_blue_4':dv_blue_4})
# Correlation matrix of phi and deltav colour measurements combined
corr_mat_colour = combined_df.corr()
corr_mat_inv_colour = np.linalg.inv(corr_mat_colour.values)
err_colour = np.sqrt(np.diag(combined_df.cov()))
# deltav_sig_colour = np.append(deltav_sig_red, deltav_sig_blue, axis = 0)
# cov_mat_colour = np.cov(phi_arr_colour,deltav_sig_colour, rowvar=False)
# err_colour = np.sqrt(cov_mat_colour.diagonal())
# corr_mat_colour = cov_mat_colour / np.outer(err_colour, err_colour)
# corr_mat_inv_colour = np.linalg.inv(corr_mat_colour)
# cov_mat_colour = np.cov(phi_arr_red,phi_arr_blue, rowvar=False)
# err_colour = np.sqrt(cov_mat_colour.diagonal())
# corr_mat_colour = cov_mat_colour / np.outer(err_colour, err_colour)
# corr_mat_inv_colour = np.linalg.inv(corr_mat_colour)
return err_colour, corr_mat_inv_colour
def debug_within_outside_1sig(emcee_table, grp_keys, Mstar_q, Mhalo_q, mu, nu, chi2):
zumandelbaum_param_vals = [10.5, 13.76, 0.69, 0.15]
iteration = 600.0
emcee_table_it600 = emcee_table.loc[emcee_table.iteration_id == iteration]
chi2_std_it600 = np.std(emcee_table_it600.chi2)
chi2_mean_it600 = np.mean(emcee_table_it600.chi2)
# selecting value from within one sigma
df_within_sig = emcee_table_it600.loc[(emcee_table_it600.chi2 < chi2_mean_it600 + chi2_std_it600)&(emcee_table_it600.chi2 > chi2_mean_it600 - chi2_std_it600)]
chi2_within_sig = df_within_sig.chi2.values[3]
mstar_within_sig = df_within_sig.Mstar_q.values[3]
mhalo_within_sig = df_within_sig.Mhalo_q.values[3]
mu_within_sig = df_within_sig.mu.values[3]
nu_within_sig = df_within_sig.nu.values[3]
# # selecting value from outside one sigma
df_outside_sig = emcee_table_it600.loc[emcee_table_it600.chi2 > chi2_mean_it600 + chi2_std_it600]
chi2_outside_sig = df_outside_sig.chi2.values[3]
mstar_outside_sig = df_outside_sig.Mstar_q.values[3]
mhalo_outside_sig = df_outside_sig.Mhalo_q.values[3]
mu_outside_sig = df_outside_sig.mu.values[3]
nu_outside_sig = df_outside_sig.nu.values[3]
fig1, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1, sharex=True, \
figsize=(10,10))
ax1.plot(grp_keys, Mstar_q[0],c='#941266',ls='--', marker='o')
ax1.axhline(zumandelbaum_param_vals[0],color='lightgray')
ax1.scatter(iteration, mstar_outside_sig, marker='*', c='k', s=70)
ax1.scatter(iteration, mstar_within_sig, marker='o', c='k', s=70)
ax2.plot(grp_keys, Mhalo_q[0], c='#941266',ls='--', marker='o')
ax2.axhline(zumandelbaum_param_vals[1],color='lightgray')
ax2.scatter(iteration, mhalo_outside_sig, marker='*', c='k', s=70)
ax2.scatter(iteration, mhalo_within_sig, marker='o', c='k', s=70)
ax3.plot(grp_keys, mu[0], c='#941266',ls='--', marker='o')
ax3.axhline(zumandelbaum_param_vals[2],color='lightgray')
ax3.scatter(iteration, mu_outside_sig, marker='*', c='k', s=70)
ax3.scatter(iteration, mu_within_sig, marker='o', c='k', s=70)
ax4.plot(grp_keys, nu[0], c='#941266',ls='--', marker='o')
ax4.axhline(zumandelbaum_param_vals[3],color='lightgray')
ax4.scatter(iteration, nu_outside_sig, marker='*', c='k', s=70)
ax4.scatter(iteration, nu_within_sig, marker='o', c='k', s=70)
ax5.plot(grp_keys, chi2[0], c='#941266',ls='--', marker='o')
ax5.scatter(iteration, chi2_outside_sig, marker='*', c='k', s=70)
ax5.scatter(iteration, chi2_within_sig, marker='o', c='k', s=70)
ax1.fill_between(grp_keys, Mstar_q[0]-Mstar_q[1], Mstar_q[0]+Mstar_q[1],
alpha=0.3, color='#941266')
ax2.fill_between(grp_keys, Mhalo_q[0]-Mhalo_q[1], Mhalo_q[0]+Mhalo_q[1], \
alpha=0.3, color='#941266')
ax3.fill_between(grp_keys, mu[0]-mu[1], mu[0]+mu[1], \
alpha=0.3, color='#941266')
ax4.fill_between(grp_keys, nu[0]-nu[1], nu[0]+nu[1], \
alpha=0.3, color='#941266')
ax5.fill_between(grp_keys, chi2[0]-chi2[1], chi2[0]+chi2[1], \
alpha=0.3, color='#941266')
ax1.set_ylabel(r"$\mathbf{log_{10}\ M^{q}_{*}}$")
ax2.set_ylabel(r"$\mathbf{log_{10}\ M^{q}_{h}}$")
ax3.set_ylabel(r"$\boldsymbol{\mu}$")
# ax4.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{\ \nu}$")
ax4.set_ylabel(r"$\boldsymbol{\nu}$")
ax5.set_ylabel(r"$\mathbf{log_{10}} \boldsymbol{{\ \chi}^2}$")
# ax5.set_ylabel(r"$\boldsymbol{{\chi}^2}$")
# ax1.set_yscale('log')
# ax2.set_yscale('log')
ax1.annotate(zumandelbaum_param_vals[0], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax2.annotate(zumandelbaum_param_vals[1], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax3.annotate(zumandelbaum_param_vals[2], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
ax4.annotate(zumandelbaum_param_vals[3], (0.95,0.85), xycoords='axes fraction',
bbox=dict(boxstyle="square", ec='k', fc='lightgray', alpha=0.5), size=10)
plt.xlabel(r"$\mathbf{iteration\ number}$")
plt.show()
dict_of_paths = cwpaths.cookiecutter_paths()
path_to_raw = dict_of_paths['raw_dir']
path_to_proc = dict_of_paths['proc_dir']
path_to_interim = dict_of_paths['int_dir']
path_to_figures = dict_of_paths['plot_dir']
path_to_data = dict_of_paths['data_dir']
catl_file = path_to_raw + "eco/eco_all.csv"
path_to_mocks = path_to_data + 'mocks/m200b/eco/'
randint_logmstar_file = pd.read_csv("/Users/asadm2/Desktop/randint_logmstar.txt",
header=None)
mock_num = randint_logmstar_file[0].values[int(iteration)-1]
gals_df_ = reading_catls(path_to_proc + "gal_group.hdf5")
theta_within = [mstar_within_sig, mhalo_within_sig, mu_within_sig, nu_within_sig]
f_red_cen, f_red_sat = hybrid_quenching_model(theta_within, gals_df_, 'vishnu', \
mock_num)
gals_df = assign_colour_label_mock(f_red_cen, f_red_sat, gals_df_)
v_sim = 130**3
total_model, red_model, blue_model = measure_all_smf(gals_df, v_sim
, False, mock_num)
sig_red_within, sig_blue_within, cen_red_within, cen_blue_within = \
get_deltav_sigma_vishnu_qmcolour(gals_df, mock_num)
total_model_within, red_model_within, blue_model_within = total_model, \
red_model, blue_model
theta_outside = [mstar_outside_sig, mhalo_outside_sig, mu_outside_sig, \
nu_outside_sig]
f_red_cen, f_red_sat = hybrid_quenching_model(theta_outside, gals_df_, 'vishnu', \
mock_num)
gals_df = assign_colour_label_mock(f_red_cen, f_red_sat, gals_df_)
v_sim = 130**3
total_model, red_model, blue_model = measure_all_smf(gals_df, v_sim
, False, mock_num)
sig_red_outside, sig_blue_outside, cen_red_outside, cen_blue_outside = \
get_deltav_sigma_vishnu_qmcolour(gals_df, mock_num)
total_model_outside, red_model_outside, blue_model_outside = total_model, \
red_model, blue_model
catl, volume, z_median = read_data_catl(catl_file, survey)
catl = assign_colour_label_data(catl)
total_data, red_data, blue_data = measure_all_smf(catl, volume, True)
std_red, centers_red, std_blue, centers_blue = get_deltav_sigma_data(catl)
sigma, corr_mat_inv = get_err_data(survey, path_to_mocks)
plt.clf()
plt.plot(total_model_within[0], total_model_within[1], c='k', linestyle='-', \
label='total within 1sig')
plt.plot(total_model_outside[0], total_model_outside[1], c='k', linestyle='--',\
label='total outside 1sig')
plt.plot(red_model_within[0], red_model_within[1], color='maroon',
linestyle='--', label='within 1sig')
plt.plot(blue_model_within[0], blue_model_within[1], color='mediumblue',
linestyle='--', label='within 1sig')
plt.plot(red_model_outside[0], red_model_outside[1], color='indianred',
linestyle='--', label='outside 1sig')
plt.plot(blue_model_outside[0], blue_model_outside[1], color='cornflowerblue',
linestyle='--', label='outside 1sig')
plt.errorbar(x=red_data[0], y=red_data[1], yerr=sigma[0:5], xerr=None,
color='r', label='data')
plt.errorbar(x=blue_data[0], y=blue_data[1], yerr=sigma[5:10], xerr=None,
color='b', label='data')
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'\boldmath$\Phi \left[\mathrm{dex}^{-1}\,\mathrm{Mpc}^{-3}\,\mathrm{h}^{3} \right]$', fontsize=20)
plt.legend(loc='best')
plt.title('ECO SMF')
plt.show()
plt.clf()
plt.plot(max_total, phi_total, c='k')
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'\boldmath$\Phi \left[\mathrm{dex}^{-1}\,\mathrm{Mpc}^{-3}\,\mathrm{h}^{3} \right]$', fontsize=20)
plt.legend(loc='best')
plt.title('ECO SMF')
plt.show()
plt.clf()
plt.scatter(cen_red_within, sig_red_within, c='maroon', label='within 1sig')
plt.scatter(cen_red_outside, sig_red_outside, c='indianred', label='outside 1sig')
plt.scatter(cen_blue_within, sig_blue_within, c='mediumblue', label='within 1sig')
plt.scatter(cen_blue_outside, sig_blue_outside, c='cornflowerblue', \
label='outside 1sig')
plt.errorbar(x=centers_red, y=std_red, yerr=sigma[10:15], xerr=None, color='r',\
label='data', fmt='')
plt.errorbar(x=centers_blue, y=std_blue, yerr=sigma[15:20], xerr=None, \
color='b', label='data', fmt='')
plt.xlabel(r'\boldmath$\log_{10}\ M_\star \left[\mathrm{M_\odot}\, \mathrm{h}^{-1} \right]$', fontsize=20)
plt.ylabel(r'$\sigma$')
plt.legend(loc='best')
plt.title(r'ECO spread in $\delta v$')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.rc",
"numpy.abs",
"matplotlib.pyplot.clf",
"pandas.read_csv",
"numpy.isnan",
"numpy.histogram",
"numpy.mean",
"numpy.exp",
"cosmo_utils.utils.work_paths.cookiecutter_paths",
"numpy.round",
"numpy.unique",
"pandas.DataFrame",
"pandas.read_hdf",
"numpy.std",
"os.path.exists",
"numpy.linspace",
"numpy.log10",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.errorbar",
"matplotlib.pyplot.show",
"numpy.median",
"numpy.asarray",
"matplotlib.pyplot.legend",
"numpy.linalg.inv",
"matplotlib.pyplot.ylabel",
"numpy.random.uniform",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"numpy.array",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] |
[((344, 420), 'matplotlib.rc', 'rc', (['"""font"""'], {'size': '(20)'}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}, size=20)\n", (346, 420), False, 'from matplotlib import rc\n'), ((416, 439), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (418, 439), False, 'from matplotlib import rc\n'), ((509, 532), 'matplotlib.rc', 'rc', (['"""axes"""'], {'linewidth': '(2)'}), "('axes', linewidth=2)\n", (511, 532), False, 'from matplotlib import rc\n'), ((533, 567), 'matplotlib.rc', 'rc', (['"""xtick.major"""'], {'width': '(2)', 'size': '(7)'}), "('xtick.major', width=2, size=7)\n", (535, 567), False, 'from matplotlib import rc\n'), ((568, 602), 'matplotlib.rc', 'rc', (['"""ytick.major"""'], {'width': '(2)', 'size': '(7)'}), "('ytick.major', width=2, size=7)\n", (570, 602), False, 'from matplotlib import rc\n'), ((1002, 1030), 'cosmo_utils.utils.work_paths.cookiecutter_paths', 'cwpaths.cookiecutter_paths', ([], {}), '()\n', (1028, 1030), True, 'from cosmo_utils.utils import work_paths as cwpaths\n'), ((3117, 3175), 'pandas.read_csv', 'pd.read_csv', (['chi2_fname'], {'header': 'None', 'names': "['chisquared']"}), "(chi2_fname, header=None, names=['chisquared'])\n", (3128, 3175), True, 'import pandas as pd\n'), ((3181, 3216), 'numpy.log10', 'np.log10', (['chi2_df.chisquared.values'], {}), '(chi2_df.chisquared.values)\n', (3189, 3216), True, 'import numpy as np\n'), ((3855, 3901), 'pandas.DataFrame', 'pd.DataFrame', (['id_data'], {'index': 'emcee_table.index'}), '(id_data, index=emcee_table.index)\n', (3867, 3901), True, 'import pandas as pd\n'), ((11390, 11418), 'cosmo_utils.utils.work_paths.cookiecutter_paths', 'cwpaths.cookiecutter_paths', ([], {}), '()\n', (11416, 11418), True, 'from cosmo_utils.utils import work_paths as cwpaths\n'), ((13520, 13548), 'cosmo_utils.utils.work_paths.cookiecutter_paths', 'cwpaths.cookiecutter_paths', ([], {}), '()\n', (13546, 13548), True, 'from cosmo_utils.utils import work_paths as cwpaths\n'), ((13684, 13827), 'pandas.read_csv', 'pd.read_csv', (['chain_fname'], {'names': "['mhalo_c', 'mstellar_c', 'lowmass_slope', 'highmass_slope', 'scatter']", 'header': 'None', 'delim_whitespace': '(True)'}), "(chain_fname, names=['mhalo_c', 'mstellar_c', 'lowmass_slope',\n 'highmass_slope', 'scatter'], header=None, delim_whitespace=True)\n", (13695, 13827), True, 'import pandas as pd\n'), ((899, 916), 'numpy.asarray', 'np.asarray', (['array'], {}), '(array)\n', (909, 916), True, 'import numpy as np\n'), ((1512, 1618), 'pandas.read_csv', 'pd.read_csv', (['chain_fname'], {'delim_whitespace': '(True)', 'names': "['Mstar_q', 'Mhalo_q', 'mu', 'nu']", 'header': 'None'}), "(chain_fname, delim_whitespace=True, names=['Mstar_q', 'Mhalo_q',\n 'mu', 'nu'], header=None)\n", (1523, 1618), True, 'import pandas as pd\n'), ((5359, 5408), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 10)'}), '(5, 1, sharex=True, figsize=(10, 10))\n', (5371, 5408), True, 'import matplotlib.pyplot as plt\n'), ((7595, 7639), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathbf{iteration\\\\ number}$"""'], {}), "('$\\\\mathbf{iteration\\\\ number}$')\n", (7605, 7639), True, 'import matplotlib.pyplot as plt\n'), ((7643, 7653), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7651, 7653), True, 'import matplotlib.pyplot as plt\n'), ((11706, 11812), 'pandas.read_csv', 'pd.read_csv', (['chain_fname'], {'delim_whitespace': '(True)', 'names': "['Mstar_q', 'Mhalo_q', 'mu', 'nu']", 'header': 'None'}), "(chain_fname, delim_whitespace=True, names=['Mstar_q', 'Mhalo_q',\n 'mu', 'nu'], header=None)\n", (11717, 11812), True, 'import pandas as pd\n'), ((15560, 15613), 'numpy.exp', 'np.exp', (['(-(sat_stellar_mass_arr / 10 ** Mstar_q) ** mu)'], {}), '(-(sat_stellar_mass_arr / 10 ** Mstar_q) ** mu)\n', (15566, 15613), True, 'import numpy as np\n'), ((15623, 15674), 'numpy.exp', 'np.exp', (['(-(sat_hosthalo_mass_arr / 10 ** Mh_q) ** nu)'], {}), '(-(sat_hosthalo_mass_arr / 10 ** Mh_q) ** nu)\n', (15629, 15674), True, 'import numpy as np\n'), ((18631, 18650), 'numpy.array', 'np.array', (['cen_halos'], {}), '(cen_halos)\n', (18639, 18650), True, 'import numpy as np\n'), ((18667, 18686), 'numpy.array', 'np.array', (['sat_halos'], {}), '(sat_halos)\n', (18675, 18686), True, 'import numpy as np\n'), ((19687, 19705), 'numpy.array', 'np.array', (['cen_gals'], {}), '(cen_gals)\n', (19695, 19705), True, 'import numpy as np\n'), ((19721, 19739), 'numpy.array', 'np.array', (['sat_gals'], {}), '(sat_gals)\n', (19729, 19739), True, 'import numpy as np\n'), ((21577, 21614), 'numpy.histogram', 'np.histogram', (['logmstar_arr'], {'bins': 'bins'}), '(logmstar_arr, bins=bins)\n', (21589, 21614), True, 'import numpy as np\n'), ((21923, 21936), 'numpy.log10', 'np.log10', (['phi'], {}), '(phi)\n', (21931, 21936), True, 'import numpy as np\n'), ((31210, 31315), 'numpy.unique', 'np.unique', (["mock_pd.groupid.loc[(mock_pd.colour_label == 'R') & (mock_pd[g_galtype_col] ==\n 1)].values"], {}), "(mock_pd.groupid.loc[(mock_pd.colour_label == 'R') & (mock_pd[\n g_galtype_col] == 1)].values)\n", (31219, 31315), True, 'import numpy as np\n'), ((31348, 31453), 'numpy.unique', 'np.unique', (["mock_pd.groupid.loc[(mock_pd.colour_label == 'B') & (mock_pd[g_galtype_col] ==\n 1)].values"], {}), "(mock_pd.groupid.loc[(mock_pd.colour_label == 'B') & (mock_pd[\n g_galtype_col] == 1)].values)\n", (31357, 31453), True, 'import numpy as np\n'), ((32142, 32176), 'numpy.log10', 'np.log10', (['red_cen_stellar_mass_arr'], {}), '(red_cen_stellar_mass_arr)\n', (32150, 32176), True, 'import numpy as np\n'), ((32544, 32561), 'numpy.array', 'np.array', (['std_red'], {}), '(std_red)\n', (32552, 32561), True, 'import numpy as np\n'), ((33254, 33289), 'numpy.log10', 'np.log10', (['blue_cen_stellar_mass_arr'], {}), '(blue_cen_stellar_mass_arr)\n', (33262, 33289), True, 'import numpy as np\n'), ((33669, 33687), 'numpy.array', 'np.array', (['std_blue'], {}), '(std_blue)\n', (33677, 33687), True, 'import numpy as np\n'), ((35468, 35494), 'numpy.linspace', 'np.linspace', (['(5001)', '(5008)', '(8)'], {}), '(5001, 5008, 8)\n', (35479, 35494), True, 'import numpy as np\n'), ((39686, 39707), 'numpy.array', 'np.array', (['std_red_arr'], {}), '(std_red_arr)\n', (39694, 39707), True, 'import numpy as np\n'), ((39730, 39755), 'numpy.array', 'np.array', (['centers_red_arr'], {}), '(centers_red_arr)\n', (39738, 39755), True, 'import numpy as np\n'), ((39775, 39797), 'numpy.array', 'np.array', (['std_blue_arr'], {}), '(std_blue_arr)\n', (39783, 39797), True, 'import numpy as np\n'), ((39821, 39847), 'numpy.array', 'np.array', (['centers_blue_arr'], {}), '(centers_blue_arr)\n', (39829, 39847), True, 'import numpy as np\n'), ((40802, 40839), 'numpy.log10', 'np.log10', (['(10 ** catl.logmstar / 2.041)'], {}), '(10 ** catl.logmstar / 2.041)\n', (40810, 40839), True, 'import numpy as np\n'), ((40868, 40943), 'numpy.unique', 'np.unique', (["catl.grp.loc[(catl.colour_label == 'R') & (catl.fc == 1)].values"], {}), "(catl.grp.loc[(catl.colour_label == 'R') & (catl.fc == 1)].values)\n", (40877, 40943), True, 'import numpy as np\n'), ((40981, 41056), 'numpy.unique', 'np.unique', (["catl.grp.loc[(catl.colour_label == 'B') & (catl.fc == 1)].values"], {}), "(catl.grp.loc[(catl.colour_label == 'B') & (catl.fc == 1)].values)\n", (40990, 41056), True, 'import numpy as np\n'), ((42009, 42026), 'numpy.array', 'np.array', (['std_red'], {}), '(std_red)\n', (42017, 42026), True, 'import numpy as np\n'), ((42985, 43003), 'numpy.array', 'np.array', (['std_blue'], {}), '(std_blue)\n', (42993, 43003), True, 'import numpy as np\n'), ((47334, 47360), 'numpy.linspace', 'np.linspace', (['(5001)', '(5008)', '(8)'], {}), '(5001, 5008, 8)\n', (47345, 47360), True, 'import numpy as np\n'), ((49379, 49402), 'numpy.array', 'np.array', (['phi_arr_total'], {}), '(phi_arr_total)\n', (49387, 49402), True, 'import numpy as np\n'), ((49421, 49442), 'numpy.array', 'np.array', (['phi_arr_red'], {}), '(phi_arr_red)\n', (49429, 49442), True, 'import numpy as np\n'), ((49462, 49484), 'numpy.array', 'np.array', (['phi_arr_blue'], {}), '(phi_arr_blue)\n', (49470, 49484), True, 'import numpy as np\n'), ((51258, 51780), 'pandas.DataFrame', 'pd.DataFrame', (["{'phi_red_0': phi_red_0, 'phi_red_1': phi_red_1, 'phi_red_2': phi_red_2,\n 'phi_red_3': phi_red_3, 'phi_red_4': phi_red_4, 'phi_blue_0':\n phi_blue_0, 'phi_blue_1': phi_blue_1, 'phi_blue_2': phi_blue_2,\n 'phi_blue_3': phi_blue_3, 'phi_blue_4': phi_blue_4, 'dv_red_0':\n dv_red_0, 'dv_red_1': dv_red_1, 'dv_red_2': dv_red_2, 'dv_red_3':\n dv_red_3, 'dv_red_4': dv_red_4, 'dv_blue_0': dv_blue_0, 'dv_blue_1':\n dv_blue_1, 'dv_blue_2': dv_blue_2, 'dv_blue_3': dv_blue_3, 'dv_blue_4':\n dv_blue_4}"], {}), "({'phi_red_0': phi_red_0, 'phi_red_1': phi_red_1, 'phi_red_2':\n phi_red_2, 'phi_red_3': phi_red_3, 'phi_red_4': phi_red_4, 'phi_blue_0':\n phi_blue_0, 'phi_blue_1': phi_blue_1, 'phi_blue_2': phi_blue_2,\n 'phi_blue_3': phi_blue_3, 'phi_blue_4': phi_blue_4, 'dv_red_0':\n dv_red_0, 'dv_red_1': dv_red_1, 'dv_red_2': dv_red_2, 'dv_red_3':\n dv_red_3, 'dv_red_4': dv_red_4, 'dv_blue_0': dv_blue_0, 'dv_blue_1':\n dv_blue_1, 'dv_blue_2': dv_blue_2, 'dv_blue_3': dv_blue_3, 'dv_blue_4':\n dv_blue_4})\n", (51270, 51780), True, 'import pandas as pd\n'), ((51950, 51987), 'numpy.linalg.inv', 'np.linalg.inv', (['corr_mat_colour.values'], {}), '(corr_mat_colour.values)\n', (51963, 51987), True, 'import numpy as np\n'), ((53076, 53106), 'numpy.std', 'np.std', (['emcee_table_it600.chi2'], {}), '(emcee_table_it600.chi2)\n', (53082, 53106), True, 'import numpy as np\n'), ((53129, 53160), 'numpy.mean', 'np.mean', (['emcee_table_it600.chi2'], {}), '(emcee_table_it600.chi2)\n', (53136, 53160), True, 'import numpy as np\n'), ((54138, 54187), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 10)'}), '(5, 1, sharex=True, figsize=(10, 10))\n', (54150, 54187), True, 'import matplotlib.pyplot as plt\n'), ((57088, 57132), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathbf{iteration\\\\ number}$"""'], {}), "('$\\\\mathbf{iteration\\\\ number}$')\n", (57098, 57132), True, 'import matplotlib.pyplot as plt\n'), ((57136, 57146), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (57144, 57146), True, 'import matplotlib.pyplot as plt\n'), ((57168, 57196), 'cosmo_utils.utils.work_paths.cookiecutter_paths', 'cwpaths.cookiecutter_paths', ([], {}), '()\n', (57194, 57196), True, 'from cosmo_utils.utils import work_paths as cwpaths\n'), ((57556, 57626), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/asadm2/Desktop/randint_logmstar.txt"""'], {'header': 'None'}), "('/Users/asadm2/Desktop/randint_logmstar.txt', header=None)\n", (57567, 57626), True, 'import pandas as pd\n'), ((59352, 59361), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (59359, 59361), True, 'import matplotlib.pyplot as plt\n'), ((59366, 59473), 'matplotlib.pyplot.plot', 'plt.plot', (['total_model_within[0]', 'total_model_within[1]'], {'c': '"""k"""', 'linestyle': '"""-"""', 'label': '"""total within 1sig"""'}), "(total_model_within[0], total_model_within[1], c='k', linestyle='-',\n label='total within 1sig')\n", (59374, 59473), True, 'import matplotlib.pyplot as plt\n'), ((59484, 59596), 'matplotlib.pyplot.plot', 'plt.plot', (['total_model_outside[0]', 'total_model_outside[1]'], {'c': '"""k"""', 'linestyle': '"""--"""', 'label': '"""total outside 1sig"""'}), "(total_model_outside[0], total_model_outside[1], c='k', linestyle=\n '--', label='total outside 1sig')\n", (59492, 59596), True, 'import matplotlib.pyplot as plt\n'), ((59605, 59712), 'matplotlib.pyplot.plot', 'plt.plot', (['red_model_within[0]', 'red_model_within[1]'], {'color': '"""maroon"""', 'linestyle': '"""--"""', 'label': '"""within 1sig"""'}), "(red_model_within[0], red_model_within[1], color='maroon',\n linestyle='--', label='within 1sig')\n", (59613, 59712), True, 'import matplotlib.pyplot as plt\n'), ((59722, 59835), 'matplotlib.pyplot.plot', 'plt.plot', (['blue_model_within[0]', 'blue_model_within[1]'], {'color': '"""mediumblue"""', 'linestyle': '"""--"""', 'label': '"""within 1sig"""'}), "(blue_model_within[0], blue_model_within[1], color='mediumblue',\n linestyle='--', label='within 1sig')\n", (59730, 59835), True, 'import matplotlib.pyplot as plt\n'), ((59845, 59958), 'matplotlib.pyplot.plot', 'plt.plot', (['red_model_outside[0]', 'red_model_outside[1]'], {'color': '"""indianred"""', 'linestyle': '"""--"""', 'label': '"""outside 1sig"""'}), "(red_model_outside[0], red_model_outside[1], color='indianred',\n linestyle='--', label='outside 1sig')\n", (59853, 59958), True, 'import matplotlib.pyplot as plt\n'), ((59968, 60089), 'matplotlib.pyplot.plot', 'plt.plot', (['blue_model_outside[0]', 'blue_model_outside[1]'], {'color': '"""cornflowerblue"""', 'linestyle': '"""--"""', 'label': '"""outside 1sig"""'}), "(blue_model_outside[0], blue_model_outside[1], color=\n 'cornflowerblue', linestyle='--', label='outside 1sig')\n", (59976, 60089), True, 'import matplotlib.pyplot as plt\n'), ((60098, 60197), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': 'red_data[0]', 'y': 'red_data[1]', 'yerr': 'sigma[0:5]', 'xerr': 'None', 'color': '"""r"""', 'label': '"""data"""'}), "(x=red_data[0], y=red_data[1], yerr=sigma[0:5], xerr=None,\n color='r', label='data')\n", (60110, 60197), True, 'import matplotlib.pyplot as plt\n'), ((60207, 60309), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': 'blue_data[0]', 'y': 'blue_data[1]', 'yerr': 'sigma[5:10]', 'xerr': 'None', 'color': '"""b"""', 'label': '"""data"""'}), "(x=blue_data[0], y=blue_data[1], yerr=sigma[5:10], xerr=None,\n color='b', label='data')\n", (60219, 60309), True, 'import matplotlib.pyplot as plt\n'), ((60319, 60444), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\boldmath$\\\\log_{10}\\\\ M_\\\\star \\\\left[\\\\mathrm{M_\\\\odot}\\\\, \\\\mathrm{h}^{-1} \\\\right]$"""'], {'fontsize': '(20)'}), "(\n '\\\\boldmath$\\\\log_{10}\\\\ M_\\\\star \\\\left[\\\\mathrm{M_\\\\odot}\\\\, \\\\mathrm{h}^{-1} \\\\right]$'\n , fontsize=20)\n", (60329, 60444), True, 'import matplotlib.pyplot as plt\n'), ((60430, 60558), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""\\\\boldmath$\\\\Phi \\\\left[\\\\mathrm{dex}^{-1}\\\\,\\\\mathrm{Mpc}^{-3}\\\\,\\\\mathrm{h}^{3} \\\\right]$"""'], {'fontsize': '(20)'}), "(\n '\\\\boldmath$\\\\Phi \\\\left[\\\\mathrm{dex}^{-1}\\\\,\\\\mathrm{Mpc}^{-3}\\\\,\\\\mathrm{h}^{3} \\\\right]$'\n , fontsize=20)\n", (60440, 60558), True, 'import matplotlib.pyplot as plt\n'), ((60545, 60567), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (60555, 60567), True, 'import matplotlib.pyplot as plt\n'), ((60572, 60592), 'matplotlib.pyplot.title', 'plt.title', (['"""ECO SMF"""'], {}), "('ECO SMF')\n", (60581, 60592), True, 'import matplotlib.pyplot as plt\n'), ((60597, 60607), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (60605, 60607), True, 'import matplotlib.pyplot as plt\n'), ((60613, 60622), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (60620, 60622), True, 'import matplotlib.pyplot as plt\n'), ((60627, 60664), 'matplotlib.pyplot.plot', 'plt.plot', (['max_total', 'phi_total'], {'c': '"""k"""'}), "(max_total, phi_total, c='k')\n", (60635, 60664), True, 'import matplotlib.pyplot as plt\n'), ((60669, 60794), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\boldmath$\\\\log_{10}\\\\ M_\\\\star \\\\left[\\\\mathrm{M_\\\\odot}\\\\, \\\\mathrm{h}^{-1} \\\\right]$"""'], {'fontsize': '(20)'}), "(\n '\\\\boldmath$\\\\log_{10}\\\\ M_\\\\star \\\\left[\\\\mathrm{M_\\\\odot}\\\\, \\\\mathrm{h}^{-1} \\\\right]$'\n , fontsize=20)\n", (60679, 60794), True, 'import matplotlib.pyplot as plt\n'), ((60780, 60908), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""\\\\boldmath$\\\\Phi \\\\left[\\\\mathrm{dex}^{-1}\\\\,\\\\mathrm{Mpc}^{-3}\\\\,\\\\mathrm{h}^{3} \\\\right]$"""'], {'fontsize': '(20)'}), "(\n '\\\\boldmath$\\\\Phi \\\\left[\\\\mathrm{dex}^{-1}\\\\,\\\\mathrm{Mpc}^{-3}\\\\,\\\\mathrm{h}^{3} \\\\right]$'\n , fontsize=20)\n", (60790, 60908), True, 'import matplotlib.pyplot as plt\n'), ((60895, 60917), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (60905, 60917), True, 'import matplotlib.pyplot as plt\n'), ((60922, 60942), 'matplotlib.pyplot.title', 'plt.title', (['"""ECO SMF"""'], {}), "('ECO SMF')\n", (60931, 60942), True, 'import matplotlib.pyplot as plt\n'), ((60947, 60957), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (60955, 60957), True, 'import matplotlib.pyplot as plt\n'), ((60963, 60972), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (60970, 60972), True, 'import matplotlib.pyplot as plt\n'), ((60977, 61053), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cen_red_within', 'sig_red_within'], {'c': '"""maroon"""', 'label': '"""within 1sig"""'}), "(cen_red_within, sig_red_within, c='maroon', label='within 1sig')\n", (60988, 61053), True, 'import matplotlib.pyplot as plt\n'), ((61058, 61145), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cen_red_outside', 'sig_red_outside'], {'c': '"""indianred"""', 'label': '"""outside 1sig"""'}), "(cen_red_outside, sig_red_outside, c='indianred', label=\n 'outside 1sig')\n", (61069, 61145), True, 'import matplotlib.pyplot as plt\n'), ((61145, 61232), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cen_blue_within', 'sig_blue_within'], {'c': '"""mediumblue"""', 'label': '"""within 1sig"""'}), "(cen_blue_within, sig_blue_within, c='mediumblue', label=\n 'within 1sig')\n", (61156, 61232), True, 'import matplotlib.pyplot as plt\n'), ((61232, 61326), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cen_blue_outside', 'sig_blue_outside'], {'c': '"""cornflowerblue"""', 'label': '"""outside 1sig"""'}), "(cen_blue_outside, sig_blue_outside, c='cornflowerblue', label=\n 'outside 1sig')\n", (61243, 61326), True, 'import matplotlib.pyplot as plt\n'), ((61336, 61442), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': 'centers_red', 'y': 'std_red', 'yerr': 'sigma[10:15]', 'xerr': 'None', 'color': '"""r"""', 'label': '"""data"""', 'fmt': '""""""'}), "(x=centers_red, y=std_red, yerr=sigma[10:15], xerr=None, color=\n 'r', label='data', fmt='')\n", (61348, 61442), True, 'import matplotlib.pyplot as plt\n'), ((61451, 61558), 'matplotlib.pyplot.errorbar', 'plt.errorbar', ([], {'x': 'centers_blue', 'y': 'std_blue', 'yerr': 'sigma[15:20]', 'xerr': 'None', 'color': '"""b"""', 'label': '"""data"""', 'fmt': '""""""'}), "(x=centers_blue, y=std_blue, yerr=sigma[15:20], xerr=None,\n color='b', label='data', fmt='')\n", (61463, 61558), True, 'import matplotlib.pyplot as plt\n'), ((61569, 61694), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\boldmath$\\\\log_{10}\\\\ M_\\\\star \\\\left[\\\\mathrm{M_\\\\odot}\\\\, \\\\mathrm{h}^{-1} \\\\right]$"""'], {'fontsize': '(20)'}), "(\n '\\\\boldmath$\\\\log_{10}\\\\ M_\\\\star \\\\left[\\\\mathrm{M_\\\\odot}\\\\, \\\\mathrm{h}^{-1} \\\\right]$'\n , fontsize=20)\n", (61579, 61694), True, 'import matplotlib.pyplot as plt\n'), ((61680, 61703), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\sigma$"""'], {}), "('$\\\\sigma$')\n", (61690, 61703), True, 'import matplotlib.pyplot as plt\n'), ((61708, 61730), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (61718, 61730), True, 'import matplotlib.pyplot as plt\n'), ((61735, 61773), 'matplotlib.pyplot.title', 'plt.title', (['"""ECO spread in $\\\\delta v$"""'], {}), "('ECO spread in $\\\\delta v$')\n", (61744, 61773), True, 'import matplotlib.pyplot as plt\n'), ((61778, 61788), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (61786, 61788), True, 'import matplotlib.pyplot as plt\n'), ((2322, 2428), 'pandas.read_csv', 'pd.read_csv', (['chain_fname'], {'delim_whitespace': '(True)', 'names': "['Mh_qc', 'Mh_qs', 'mu_c', 'mu_s']", 'header': 'None'}), "(chain_fname, delim_whitespace=True, names=['Mh_qc', 'Mh_qs',\n 'mu_c', 'mu_s'], header=None)\n", (2333, 2428), True, 'import pandas as pd\n'), ((4451, 4480), 'numpy.mean', 'np.mean', (['group.Mstar_q.values'], {}), '(group.Mstar_q.values)\n', (4458, 4480), True, 'import numpy as np\n'), ((4503, 4531), 'numpy.std', 'np.std', (['group.Mstar_q.values'], {}), '(group.Mstar_q.values)\n', (4509, 4531), True, 'import numpy as np\n'), ((4632, 4661), 'numpy.mean', 'np.mean', (['group.Mhalo_q.values'], {}), '(group.Mhalo_q.values)\n', (4639, 4661), True, 'import numpy as np\n'), ((4684, 4712), 'numpy.std', 'np.std', (['group.Mhalo_q.values'], {}), '(group.Mhalo_q.values)\n', (4690, 4712), True, 'import numpy as np\n'), ((4808, 4832), 'numpy.mean', 'np.mean', (['group.mu.values'], {}), '(group.mu.values)\n', (4815, 4832), True, 'import numpy as np\n'), ((4850, 4873), 'numpy.std', 'np.std', (['group.mu.values'], {}), '(group.mu.values)\n', (4856, 4873), True, 'import numpy as np\n'), ((4949, 4973), 'numpy.mean', 'np.mean', (['group.nu.values'], {}), '(group.nu.values)\n', (4956, 4973), True, 'import numpy as np\n'), ((4991, 5014), 'numpy.std', 'np.std', (['group.nu.values'], {}), '(group.nu.values)\n', (4997, 5014), True, 'import numpy as np\n'), ((5092, 5118), 'numpy.mean', 'np.mean', (['group.chi2.values'], {}), '(group.chi2.values)\n', (5099, 5118), True, 'import numpy as np\n'), ((5138, 5163), 'numpy.std', 'np.std', (['group.chi2.values'], {}), '(group.chi2.values)\n', (5144, 5163), True, 'import numpy as np\n'), ((8991, 9040), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)', '(1)'], {'sharex': '(True)', 'figsize': '(10, 10)'}), '(5, 1, sharex=True, figsize=(10, 10))\n', (9003, 9040), True, 'import matplotlib.pyplot as plt\n'), ((11233, 11277), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mathbf{iteration\\\\ number}$"""'], {}), "('$\\\\mathbf{iteration\\\\ number}$')\n", (11243, 11277), True, 'import matplotlib.pyplot as plt\n'), ((11281, 11291), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11289, 11291), True, 'import matplotlib.pyplot as plt\n'), ((12516, 12622), 'pandas.read_csv', 'pd.read_csv', (['chain_fname'], {'delim_whitespace': '(True)', 'names': "['Mh_qc', 'Mh_qs', 'mu_c', 'mu_s']", 'header': 'None'}), "(chain_fname, delim_whitespace=True, names=['Mh_qc', 'Mh_qs',\n 'mu_c', 'mu_s'], header=None)\n", (12527, 12622), True, 'import pandas as pd\n'), ((15493, 15546), 'numpy.exp', 'np.exp', (['(-(cen_stellar_mass_arr / 10 ** Mstar_q) ** mu)'], {}), '(-(cen_stellar_mass_arr / 10 ** Mstar_q) ** mu)\n', (15499, 15546), True, 'import numpy as np\n'), ((16862, 16881), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (16879, 16881), True, 'import numpy as np\n'), ((20509, 20542), 'numpy.log10', 'np.log10', (['(10 ** mstar_arr / 2.041)'], {}), '(10 ** mstar_arr / 2.041)\n', (20517, 20542), True, 'import numpy as np\n'), ((20576, 20595), 'numpy.log10', 'np.log10', (['mstar_arr'], {}), '(mstar_arr)\n', (20584, 20595), True, 'import numpy as np\n'), ((21279, 21317), 'numpy.linspace', 'np.linspace', (['bin_min', 'bin_max', 'bin_num'], {}), '(bin_min, bin_max, bin_num)\n', (21290, 21317), True, 'import numpy as np\n'), ((21801, 21816), 'numpy.sqrt', 'np.sqrt', (['counts'], {}), '(counts)\n', (21808, 21816), True, 'import numpy as np\n'), ((25760, 25827), 'pandas.read_csv', 'pd.read_csv', (['path_to_file'], {'delimiter': '""","""', 'header': '(0)', 'usecols': 'columns'}), "(path_to_file, delimiter=',', header=0, usecols=columns)\n", (25771, 25827), True, 'import pandas as pd\n'), ((29249, 29271), 'numpy.mean', 'np.mean', (['diff_sqrd_arr'], {}), '(diff_sqrd_arr)\n', (29256, 29271), True, 'import numpy as np\n'), ((29286, 29309), 'numpy.sqrt', 'np.sqrt', (['mean_diff_sqrd'], {}), '(mean_diff_sqrd)\n', (29293, 29309), True, 'import numpy as np\n'), ((32322, 32347), 'numpy.linspace', 'np.linspace', (['(8.6)', '(11.2)', '(6)'], {}), '(8.6, 11.2, 6)\n', (32333, 32347), True, 'import numpy as np\n'), ((33436, 33461), 'numpy.linspace', 'np.linspace', (['(8.6)', '(10.7)', '(6)'], {}), '(8.6, 10.7, 6)\n', (33447, 33461), True, 'import numpy as np\n'), ((41787, 41812), 'numpy.linspace', 'np.linspace', (['(8.6)', '(11.2)', '(6)'], {}), '(8.6, 11.2, 6)\n', (41798, 41812), True, 'import numpy as np\n'), ((42753, 42778), 'numpy.linspace', 'np.linspace', (['(8.6)', '(10.7)', '(6)'], {}), '(8.6, 10.7, 6)\n', (42764, 42778), True, 'import numpy as np\n'), ((45506, 45530), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (45520, 45530), False, 'import os\n'), ((45699, 45720), 'pandas.read_hdf', 'pd.read_hdf', (['filename'], {}), '(filename)\n', (45710, 45720), True, 'import pandas as pd\n'), ((929, 950), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (935, 950), True, 'import numpy as np\n'), ((8085, 8112), 'numpy.mean', 'np.mean', (['group.Mh_qc.values'], {}), '(group.Mh_qc.values)\n', (8092, 8112), True, 'import numpy as np\n'), ((8133, 8159), 'numpy.std', 'np.std', (['group.Mh_qc.values'], {}), '(group.Mh_qc.values)\n', (8139, 8159), True, 'import numpy as np\n'), ((8250, 8277), 'numpy.mean', 'np.mean', (['group.Mh_qs.values'], {}), '(group.Mh_qs.values)\n', (8257, 8277), True, 'import numpy as np\n'), ((8298, 8324), 'numpy.std', 'np.std', (['group.Mh_qs.values'], {}), '(group.Mh_qs.values)\n', (8304, 8324), True, 'import numpy as np\n'), ((8414, 8440), 'numpy.mean', 'np.mean', (['group.mu_c.values'], {}), '(group.mu_c.values)\n', (8421, 8440), True, 'import numpy as np\n'), ((8460, 8485), 'numpy.std', 'np.std', (['group.mu_c.values'], {}), '(group.mu_c.values)\n', (8466, 8485), True, 'import numpy as np\n'), ((8571, 8597), 'numpy.mean', 'np.mean', (['group.mu_s.values'], {}), '(group.mu_s.values)\n', (8578, 8597), True, 'import numpy as np\n'), ((8617, 8642), 'numpy.std', 'np.std', (['group.mu_s.values'], {}), '(group.mu_s.values)\n', (8623, 8642), True, 'import numpy as np\n'), ((8728, 8754), 'numpy.mean', 'np.mean', (['group.chi2.values'], {}), '(group.chi2.values)\n', (8735, 8754), True, 'import numpy as np\n'), ((8774, 8799), 'numpy.std', 'np.std', (['group.chi2.values'], {}), '(group.chi2.values)\n', (8780, 8799), True, 'import numpy as np\n'), ((13443, 13475), 'numpy.round', 'np.round', (['acceptance_fraction', '(2)'], {}), '(acceptance_fraction, 2)\n', (13451, 13475), True, 'import numpy as np\n'), ((14609, 14641), 'numpy.round', 'np.round', (['acceptance_fraction', '(2)'], {}), '(acceptance_fraction, 2)\n', (14617, 14641), True, 'import numpy as np\n'), ((20672, 20699), 'numpy.log10', 'np.log10', (['(10 ** 8.9 / 2.041)'], {}), '(10 ** 8.9 / 2.041)\n', (20680, 20699), True, 'import numpy as np\n'), ((21483, 21515), 'numpy.linspace', 'np.linspace', (['bin_min', 'bin_max', '(7)'], {}), '(bin_min, bin_max, 7)\n', (21494, 21515), True, 'import numpy as np\n'), ((26407, 26435), 'numpy.median', 'np.median', (['catl.grpcz.values'], {}), '(catl.grpcz.values)\n', (26416, 26435), True, 'import numpy as np\n'), ((26785, 26852), 'pandas.read_csv', 'pd.read_csv', (['path_to_file'], {'delimiter': '""","""', 'header': '(0)', 'usecols': 'columns'}), "(path_to_file, delimiter=',', header=0, usecols=columns)\n", (26796, 26852), True, 'import pandas as pd\n'), ((31851, 31875), 'numpy.mean', 'np.mean', (['group.cz.values'], {}), '(group.cz.values)\n', (31858, 31875), True, 'import numpy as np\n'), ((32409, 32434), 'numpy.linspace', 'np.linspace', (['(8.4)', '(11.0)', '(6)'], {}), '(8.4, 11.0, 6)\n', (32420, 32434), True, 'import numpy as np\n'), ((32959, 32983), 'numpy.mean', 'np.mean', (['group.cz.values'], {}), '(group.cz.values)\n', (32966, 32983), True, 'import numpy as np\n'), ((33524, 33549), 'numpy.linspace', 'np.linspace', (['(8.4)', '(10.4)', '(6)'], {}), '(8.4, 10.4, 6)\n', (33535, 33549), True, 'import numpy as np\n'), ((36507, 36547), 'numpy.log10', 'np.log10', (['(10 ** mock_pd.logmstar / 2.041)'], {}), '(10 ** mock_pd.logmstar / 2.041)\n', (36515, 36547), True, 'import numpy as np\n'), ((36580, 36680), 'numpy.unique', 'np.unique', (["mock_pd.groupid.loc[(mock_pd.colour_label == 'R') & (mock_pd.g_galtype == 1)\n ].values"], {}), "(mock_pd.groupid.loc[(mock_pd.colour_label == 'R') & (mock_pd.\n g_galtype == 1)].values)\n", (36589, 36680), True, 'import numpy as np\n'), ((36729, 36829), 'numpy.unique', 'np.unique', (["mock_pd.groupid.loc[(mock_pd.colour_label == 'B') & (mock_pd.g_galtype == 1)\n ].values"], {}), "(mock_pd.groupid.loc[(mock_pd.colour_label == 'B') & (mock_pd.\n g_galtype == 1)].values)\n", (36738, 36829), True, 'import numpy as np\n'), ((38019, 38036), 'numpy.array', 'np.array', (['std_red'], {}), '(std_red)\n', (38027, 38036), True, 'import numpy as np\n'), ((39273, 39291), 'numpy.array', 'np.array', (['std_blue'], {}), '(std_blue)\n', (39281, 39291), True, 'import numpy as np\n'), ((41425, 41449), 'numpy.mean', 'np.mean', (['group.cz.values'], {}), '(group.cz.values)\n', (41432, 41449), True, 'import numpy as np\n'), ((41874, 41899), 'numpy.linspace', 'np.linspace', (['(8.4)', '(11.0)', '(6)'], {}), '(8.4, 11.0, 6)\n', (41885, 41899), True, 'import numpy as np\n'), ((42388, 42412), 'numpy.mean', 'np.mean', (['group.cz.values'], {}), '(group.cz.values)\n', (42395, 42412), True, 'import numpy as np\n'), ((42841, 42866), 'numpy.linspace', 'np.linspace', (['(8.4)', '(10.4)', '(6)'], {}), '(8.4, 10.4, 6)\n', (42852, 42866), True, 'import numpy as np\n'), ((14146, 14159), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (14154, 14159), True, 'import numpy as np\n'), ((14175, 14188), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (14183, 14188), True, 'import numpy as np\n'), ((20786, 20814), 'numpy.log10', 'np.log10', (['(10 ** 11.5 / 2.041)'], {}), '(10 ** 11.5 / 2.041)\n', (20794, 20814), True, 'import numpy as np\n'), ((21376, 21403), 'numpy.log10', 'np.log10', (['(10 ** 8.7 / 2.041)'], {}), '(10 ** 8.7 / 2.041)\n', (21384, 21403), True, 'import numpy as np\n'), ((21435, 21463), 'numpy.log10', 'np.log10', (['(10 ** 11.8 / 2.041)'], {}), '(10 ** 11.8 / 2.041)\n', (21443, 21463), True, 'import numpy as np\n'), ((37757, 37782), 'numpy.linspace', 'np.linspace', (['(8.6)', '(11.2)', '(6)'], {}), '(8.6, 11.2, 6)\n', (37768, 37782), True, 'import numpy as np\n'), ((39000, 39025), 'numpy.linspace', 'np.linspace', (['(8.6)', '(10.7)', '(6)'], {}), '(8.6, 10.7, 6)\n', (39011, 39025), True, 'import numpy as np\n'), ((1998, 2011), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (2006, 2011), True, 'import numpy as np\n'), ((2027, 2040), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (2035, 2040), True, 'import numpy as np\n'), ((12192, 12205), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (12200, 12205), True, 'import numpy as np\n'), ((12221, 12234), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (12229, 12234), True, 'import numpy as np\n'), ((20927, 20953), 'numpy.log10', 'np.log10', (['(10 ** 11 / 2.041)'], {}), '(10 ** 11 / 2.041)\n', (20935, 20953), True, 'import numpy as np\n'), ((27618, 27656), 'numpy.median', 'np.median', (['resolve_live18.grpcz.values'], {}), '(resolve_live18.grpcz.values)\n', (27627, 27656), True, 'import numpy as np\n'), ((37288, 37312), 'numpy.mean', 'np.mean', (['group.cz.values'], {}), '(group.cz.values)\n', (37295, 37312), True, 'import numpy as np\n'), ((37860, 37885), 'numpy.linspace', 'np.linspace', (['(8.4)', '(11.0)', '(6)'], {}), '(8.4, 11.0, 6)\n', (37871, 37885), True, 'import numpy as np\n'), ((38527, 38551), 'numpy.mean', 'np.mean', (['group.cz.values'], {}), '(group.cz.values)\n', (38534, 38551), True, 'import numpy as np\n'), ((39104, 39129), 'numpy.linspace', 'np.linspace', (['(8.4)', '(10.4)', '(6)'], {}), '(8.4, 10.4, 6)\n', (39115, 39129), True, 'import numpy as np\n'), ((2806, 2819), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (2814, 2819), True, 'import numpy as np\n'), ((2835, 2848), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (2843, 2848), True, 'import numpy as np\n'), ((13000, 13013), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (13008, 13013), True, 'import numpy as np\n'), ((13029, 13042), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (13037, 13042), True, 'import numpy as np\n'), ((21105, 21133), 'numpy.log10', 'np.log10', (['(10 ** 11.5 / 2.041)'], {}), '(10 ** 11.5 / 2.041)\n', (21113, 21133), True, 'import numpy as np\n'), ((21207, 21235), 'numpy.log10', 'np.log10', (['(10 ** 11.5 / 2.041)'], {}), '(10 ** 11.5 / 2.041)\n', (21215, 21235), True, 'import numpy as np\n'), ((28474, 28512), 'numpy.median', 'np.median', (['resolve_live18.grpcz.values'], {}), '(resolve_live18.grpcz.values)\n', (28483, 28512), True, 'import numpy as np\n')]
|
from flask import Blueprint
from flask_restful import Api
from .user import *
from .auth import *
from .zone import *
from .type import *
from .ttl import *
from .record import *
from .ttldata import *
from .content import *
from .content_serial import *
from .dns.create import *
from .command_rest import *
from .admin.auth import *
from .admin.create import *
from .cs_master import *
from .cs_slave_node import *
from .cluster import *
from .check_on import *
from .health import *
api_blueprint = Blueprint("api", __name__, url_prefix='/api')
api = Api(api_blueprint)
api.add_resource(UserdataResource, '/user')
api.add_resource(UserdataResourceById, '/user/<userdata_id>')
api.add_resource(UserdataInsert, '/user')
api.add_resource(UserdataUpdate, '/user/<userdata_id>')
api.add_resource(UserdataResourceByProjectId, '/user/project/<project_id>')
api.add_resource(UserdataResourceByUserId, '/user/id/<user_id>')
api.add_resource(UserdataRemove, '/user/<userdata_id>')
api.add_resource(Usersignin,"/login")
api.add_resource(UserDataZoneInsert,"/userzone")
api.add_resource(UserDataZoneResource,"/userzone")
## DNS API
api.add_resource(ZoneName, '/zone')
api.add_resource(Type, '/type')
api.add_resource(TtlName, '/ttl')
api.add_resource(Record, '/record')
api.add_resource(TtlData, '/ttldata')
api.add_resource(Content, '/content')
api.add_resource(ContentSerial, '/content_serial')
api.add_resource(SendCommandRest, '/sendcommand')
api.add_resource(CreateDNS, '/user/dnscreate')
## ADMIN AUTH
api.add_resource(AdminAuth, '/admin/login')
api.add_resource(CreateDNSAdminRole, '/admin/dnscreate')
## CLUSTERING
api.add_resource(CsMaster,'/master')
api.add_resource(RefreshZoneMaster, '/master/refresh/<id_refresh>')
api.add_resource(CsSlave,'/slave_node')
api.add_resource(RefreshZoneSlave, "/slave_node/refresh/<id_refresh>")
## CLUSTER
api.add_resource(ClusterCheckMaster, '/cluster/master/<id_master>')
api.add_resource(ClusterCheckSlave, '/cluster/slave/<id_slave>')
api.add_resource(ClusterUnsetCheckMaster, '/cluster/unset/master/<id_master>')
api.add_resource(ClusterUnsetCheckSlave, '/cluster/unset/slave/<id_slave>')
# CHECK ON
api.add_resource(NotifyOnAgent, "/agent/check")
api.add_resource(ChekcLogSyncOnMaster, '/agent/master/<id_logs>')
api.add_resource(CheckLogSyncOnSlave, '/agent/slave/<id_logs>')
# Healtch Check
api.add_resource(HealthCheck, "/health")
|
[
"flask_restful.Api",
"flask.Blueprint"
] |
[((503, 548), 'flask.Blueprint', 'Blueprint', (['"""api"""', '__name__'], {'url_prefix': '"""/api"""'}), "('api', __name__, url_prefix='/api')\n", (512, 548), False, 'from flask import Blueprint\n'), ((556, 574), 'flask_restful.Api', 'Api', (['api_blueprint'], {}), '(api_blueprint)\n', (559, 574), False, 'from flask_restful import Api\n')]
|
#!/bin/env python3
import os, sys, argparse
import requests, yaml
default_temp_file = '/tmp/twitch_online.token'
default_auth_file = os.path.join(os.environ['XDG_CONFIG_HOME'], 'twitch_online.creds')
parser = argparse.ArgumentParser(description="CLI Utility to check if a twitch channel is streaming", \
epilog="The credentials file should be a simple yaml file with `client_id` and `client_secret` variables")
parser.add_argument('-a', '--auth-file', default=default_auth_file, help="File used to store credentials")
parser.add_argument('-t', '--temp-file', default=default_temp_file, help="File used to store temporary auth token")
parser.add_argument('CHANNEL', help="Channel to check")
args = vars(parser.parse_args())
token_file = args['temp_file']
# Init temp token file
if not os.path.isfile(token_file):
with open(token_file, 'w') as f:
f.write('')
# only the calling user should be able to read this
os.chmod(token_file, 0o600)
# Handle reading config yaml
with open(args['auth_file'], 'r') as f:
try:
config = yaml.safe_load(f)
client_id = config['client_id']
client_secret = config['client_secret']
os.chmod(token_file, 0o600)
except yaml.YAMLError as pe:
print("Failed to parse credentials file", args['auth_file'])
print(pe)
sys.exit(128)
def check_status():
with open(token_file, 'r') as f:
auth_token = f.read(256).rstrip('\n')
auth_headers = {'Authorization': 'Bearer ' + auth_token, \
'Client-Id': client_id}
r = requests.get('https://api.twitch.tv/helix/streams', \
params={'user_login': args['CHANNEL']}, \
headers=auth_headers)
return r
def authenticate():
auth = requests.post('https://id.twitch.tv/oauth2/token', params={
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials',
})
auth.raise_for_status()
auth_data = auth.json()
with open(token_file, 'w') as f:
f.write(auth_data['access_token'])
def main():
response = check_status()
if response.status_code == 401:
sys.stderr.write("Authentication failed, re-authenticating")
authenticate()
response = check_status()
elif response.status_code == 200:
if response.json()['data'] and response.json()['data'][0]['type'] == 'live':
sys.exit(0)
else:
sys.exit(1)
else:
sys.stderr.write("Unhandled API status code: " + response.status_code)
sys.stderr.write(response.text)
sys.exit(2)
if __name__ == "__main__":
main()
|
[
"os.chmod",
"argparse.ArgumentParser",
"os.path.isfile",
"yaml.safe_load",
"requests.get",
"requests.post",
"sys.stderr.write",
"os.path.join",
"sys.exit"
] |
[((135, 201), 'os.path.join', 'os.path.join', (["os.environ['XDG_CONFIG_HOME']", '"""twitch_online.creds"""'], {}), "(os.environ['XDG_CONFIG_HOME'], 'twitch_online.creds')\n", (147, 201), False, 'import os, sys, argparse\n'), ((212, 426), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""CLI Utility to check if a twitch channel is streaming"""', 'epilog': '"""The credentials file should be a simple yaml file with `client_id` and `client_secret` variables"""'}), "(description=\n 'CLI Utility to check if a twitch channel is streaming', epilog=\n 'The credentials file should be a simple yaml file with `client_id` and `client_secret` variables'\n )\n", (235, 426), False, 'import os, sys, argparse\n'), ((797, 823), 'os.path.isfile', 'os.path.isfile', (['token_file'], {}), '(token_file)\n', (811, 823), False, 'import os, sys, argparse\n'), ((942, 967), 'os.chmod', 'os.chmod', (['token_file', '(384)'], {}), '(token_file, 384)\n', (950, 967), False, 'import os, sys, argparse\n'), ((1564, 1681), 'requests.get', 'requests.get', (['"""https://api.twitch.tv/helix/streams"""'], {'params': "{'user_login': args['CHANNEL']}", 'headers': 'auth_headers'}), "('https://api.twitch.tv/helix/streams', params={'user_login':\n args['CHANNEL']}, headers=auth_headers)\n", (1576, 1681), False, 'import requests, yaml\n'), ((1751, 1910), 'requests.post', 'requests.post', (['"""https://id.twitch.tv/oauth2/token"""'], {'params': "{'client_id': client_id, 'client_secret': client_secret, 'grant_type':\n 'client_credentials'}"}), "('https://id.twitch.tv/oauth2/token', params={'client_id':\n client_id, 'client_secret': client_secret, 'grant_type':\n 'client_credentials'})\n", (1764, 1910), False, 'import requests, yaml\n'), ((1067, 1084), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (1081, 1084), False, 'import requests, yaml\n'), ((1181, 1206), 'os.chmod', 'os.chmod', (['token_file', '(384)'], {}), '(token_file, 384)\n', (1189, 1206), False, 'import os, sys, argparse\n'), ((2149, 2209), 'sys.stderr.write', 'sys.stderr.write', (['"""Authentication failed, re-authenticating"""'], {}), "('Authentication failed, re-authenticating')\n", (2165, 2209), False, 'import os, sys, argparse\n'), ((1337, 1350), 'sys.exit', 'sys.exit', (['(128)'], {}), '(128)\n', (1345, 1350), False, 'import os, sys, argparse\n'), ((2470, 2540), 'sys.stderr.write', 'sys.stderr.write', (["('Unhandled API status code: ' + response.status_code)"], {}), "('Unhandled API status code: ' + response.status_code)\n", (2486, 2540), False, 'import os, sys, argparse\n'), ((2549, 2580), 'sys.stderr.write', 'sys.stderr.write', (['response.text'], {}), '(response.text)\n', (2565, 2580), False, 'import os, sys, argparse\n'), ((2589, 2600), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2597, 2600), False, 'import os, sys, argparse\n'), ((2402, 2413), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2410, 2413), False, 'import os, sys, argparse\n'), ((2440, 2451), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2448, 2451), False, 'import os, sys, argparse\n')]
|
#coding:utf-8
"""
@auther tk0103
@date 2018-07-04
"""
import os, time, sys, copy
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from chainer.dataset import concat_examples
class Unet3DEvaluator(chainer.training.extensions.Evaluator):
def __init__(self, iterator, unet, number_of_label, converter=concat_examples, device=None, eval_hook=None):
if isinstance(iterator, chainer.dataset.Iterator):
iterator = {'main': iterator}
self._iterators = iterator
self._targets = {'unet':unet}
self.converter = converter
self.device = device
self.eval_hook = eval_hook
self._max_label = number_of_label
def loss_softmax_cross_entropy(self, predict, ground_truth):
eps = 1e-16
cross_entropy = -F.mean(F.log(predict+eps) * ground_truth)
return cross_entropy
def dice_coefficent(self, predict, ground_truth):
'''
Assume label 0 is background
'''
dice_numerator = 0.0
dice_denominator = 0.0
eps = 1e-16
predict = F.flatten(predict[:,1:self._max_label,:,:,:])
ground_truth = F.flatten(ground_truth[:,1:self._max_label,:,:,:].astype(np.float32))
dice_numerator = F.sum(predict * ground_truth)
dice_denominator =F.sum(predict+ ground_truth)
dice = 2*dice_numerator/(dice_denominator+eps)
return dice
def evaluate(self):
iterator = self._iterators['main']
unet = self._targets['unet']
#eval_func = self.eval_func or target
it = copy.copy(iterator)#shallow copy
summary = chainer.reporter.DictSummary()
for batch in it:
observation = {}
with chainer.reporter.report_scope(observation):
ground_truth, data = self.converter(batch, self.device)
with chainer.using_config("train", False):
with chainer.no_backprop_mode():
predict = unet(data)
observation['unet/val/loss'] = self.loss_softmax_cross_entropy(predict, ground_truth)
observation['unet/val/dice'] = self.dice_coefficent(predict, ground_truth)
summary.add(observation)
return summary.compute_mean()
|
[
"chainer.functions.flatten",
"chainer.functions.sum",
"copy.copy",
"chainer.reporter.report_scope",
"chainer.functions.log",
"chainer.no_backprop_mode",
"chainer.using_config",
"chainer.reporter.DictSummary"
] |
[((1105, 1154), 'chainer.functions.flatten', 'F.flatten', (['predict[:, 1:self._max_label, :, :, :]'], {}), '(predict[:, 1:self._max_label, :, :, :])\n', (1114, 1154), True, 'import chainer.functions as F\n'), ((1270, 1299), 'chainer.functions.sum', 'F.sum', (['(predict * ground_truth)'], {}), '(predict * ground_truth)\n', (1275, 1299), True, 'import chainer.functions as F\n'), ((1326, 1355), 'chainer.functions.sum', 'F.sum', (['(predict + ground_truth)'], {}), '(predict + ground_truth)\n', (1331, 1355), True, 'import chainer.functions as F\n'), ((1596, 1615), 'copy.copy', 'copy.copy', (['iterator'], {}), '(iterator)\n', (1605, 1615), False, 'import os, time, sys, copy\n'), ((1647, 1677), 'chainer.reporter.DictSummary', 'chainer.reporter.DictSummary', ([], {}), '()\n', (1675, 1677), False, 'import chainer\n'), ((1750, 1792), 'chainer.reporter.report_scope', 'chainer.reporter.report_scope', (['observation'], {}), '(observation)\n', (1779, 1792), False, 'import chainer\n'), ((825, 845), 'chainer.functions.log', 'F.log', (['(predict + eps)'], {}), '(predict + eps)\n', (830, 845), True, 'import chainer.functions as F\n'), ((1887, 1923), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (1907, 1923), False, 'import chainer\n'), ((1950, 1976), 'chainer.no_backprop_mode', 'chainer.no_backprop_mode', ([], {}), '()\n', (1974, 1976), False, 'import chainer\n')]
|
# Generated by Django 2.1.2 on 2018-11-04 06:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0035_userwithprofile_followedcategories'),
]
operations = [
migrations.AddField(
model_name='story',
name='image',
field=models.FileField(blank=True, default='pic_folder/None/no-img.jpg', null=True, upload_to='pic_folder/'),
),
migrations.AlterField(
model_name='userwithprofile',
name='writtenStories',
field=models.ManyToManyField(blank=True, related_name='WrittenStories', to='app.Story', verbose_name='Historias escritas'),
),
]
|
[
"django.db.models.FileField",
"django.db.models.ManyToManyField"
] |
[((344, 451), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'default': '"""pic_folder/None/no-img.jpg"""', 'null': '(True)', 'upload_to': '"""pic_folder/"""'}), "(blank=True, default='pic_folder/None/no-img.jpg', null=\n True, upload_to='pic_folder/')\n", (360, 451), False, 'from django.db import migrations, models\n'), ((585, 706), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""WrittenStories"""', 'to': '"""app.Story"""', 'verbose_name': '"""Historias escritas"""'}), "(blank=True, related_name='WrittenStories', to=\n 'app.Story', verbose_name='Historias escritas')\n", (607, 706), False, 'from django.db import migrations, models\n')]
|
import numpy as np
import matplotlib.pyplot as plt
def plot_tsp(parameters, rank):
rank = np.concatenate([rank, rank[0:1]], axis=0)
plt.figure()
plt.plot(parameters[:, 0], parameters[:, 1], 'ro', color='red')
plt.plot(parameters[:, 0][rank], parameters[:, 1][rank], 'r-', color='blue')
plt.show()
|
[
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.concatenate"
] |
[((96, 137), 'numpy.concatenate', 'np.concatenate', (['[rank, rank[0:1]]'], {'axis': '(0)'}), '([rank, rank[0:1]], axis=0)\n', (110, 137), True, 'import numpy as np\n'), ((143, 155), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (153, 155), True, 'import matplotlib.pyplot as plt\n'), ((160, 223), 'matplotlib.pyplot.plot', 'plt.plot', (['parameters[:, 0]', 'parameters[:, 1]', '"""ro"""'], {'color': '"""red"""'}), "(parameters[:, 0], parameters[:, 1], 'ro', color='red')\n", (168, 223), True, 'import matplotlib.pyplot as plt\n'), ((228, 304), 'matplotlib.pyplot.plot', 'plt.plot', (['parameters[:, 0][rank]', 'parameters[:, 1][rank]', '"""r-"""'], {'color': '"""blue"""'}), "(parameters[:, 0][rank], parameters[:, 1][rank], 'r-', color='blue')\n", (236, 304), True, 'import matplotlib.pyplot as plt\n'), ((309, 319), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (317, 319), True, 'import matplotlib.pyplot as plt\n')]
|
"""
Make a pie charts of varying size - see
http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie for the docstring.
This example shows a basic pie charts with labels optional features,
like autolabeling the percentage, offsetting a slice with "explode"
and adding a shadow, in different sizes.
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# Some data
labels = 'Frogs', 'Hogs', 'Dogs', 'Logs'
fracs = [15, 30, 45, 10]
explode=(0, 0.05, 0, 0)
# Make square figures and axes
the_grid = GridSpec(2, 2)
plt.subplot(the_grid[0, 0], aspect=1)
plt.pie(fracs, labels=labels, autopct='%1.1f%%', shadow=True)
plt.subplot(the_grid[0, 1], aspect=1)
plt.pie(fracs, explode=explode, labels=labels, autopct='%.0f%%', shadow=True)
plt.subplot(the_grid[1, 0], aspect=1)
patches, texts, autotexts = plt.pie(fracs, labels=labels,
autopct='%.0f%%',
shadow=True, radius=0.5)
# Make the labels on the small plot easier to read.
for t in texts:
t.set_size('smaller')
for t in autotexts:
t.set_size('x-small')
autotexts[0].set_color('y')
plt.subplot(the_grid[1, 1], aspect=1)
patches, texts, autotexts = plt.pie(fracs, explode=explode,
labels=labels, autopct='%.0f%%',
shadow=False, radius=0.5)
# Turn off shadow for tiny plot
# with exploded slice.
for t in texts:
t.set_size('smaller')
for t in autotexts:
t.set_size('x-small')
autotexts[0].set_color('y')
plt.show()
|
[
"matplotlib.pyplot.pie",
"matplotlib.pyplot.subplot",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.show"
] |
[((533, 547), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(2)', '(2)'], {}), '(2, 2)\n', (541, 547), False, 'from matplotlib.gridspec import GridSpec\n'), ((549, 586), 'matplotlib.pyplot.subplot', 'plt.subplot', (['the_grid[0, 0]'], {'aspect': '(1)'}), '(the_grid[0, 0], aspect=1)\n', (560, 586), True, 'import matplotlib.pyplot as plt\n'), ((588, 649), 'matplotlib.pyplot.pie', 'plt.pie', (['fracs'], {'labels': 'labels', 'autopct': '"""%1.1f%%"""', 'shadow': '(True)'}), "(fracs, labels=labels, autopct='%1.1f%%', shadow=True)\n", (595, 649), True, 'import matplotlib.pyplot as plt\n'), ((651, 688), 'matplotlib.pyplot.subplot', 'plt.subplot', (['the_grid[0, 1]'], {'aspect': '(1)'}), '(the_grid[0, 1], aspect=1)\n', (662, 688), True, 'import matplotlib.pyplot as plt\n'), ((690, 767), 'matplotlib.pyplot.pie', 'plt.pie', (['fracs'], {'explode': 'explode', 'labels': 'labels', 'autopct': '"""%.0f%%"""', 'shadow': '(True)'}), "(fracs, explode=explode, labels=labels, autopct='%.0f%%', shadow=True)\n", (697, 767), True, 'import matplotlib.pyplot as plt\n'), ((769, 806), 'matplotlib.pyplot.subplot', 'plt.subplot', (['the_grid[1, 0]'], {'aspect': '(1)'}), '(the_grid[1, 0], aspect=1)\n', (780, 806), True, 'import matplotlib.pyplot as plt\n'), ((836, 908), 'matplotlib.pyplot.pie', 'plt.pie', (['fracs'], {'labels': 'labels', 'autopct': '"""%.0f%%"""', 'shadow': '(True)', 'radius': '(0.5)'}), "(fracs, labels=labels, autopct='%.0f%%', shadow=True, radius=0.5)\n", (843, 908), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1188), 'matplotlib.pyplot.subplot', 'plt.subplot', (['the_grid[1, 1]'], {'aspect': '(1)'}), '(the_grid[1, 1], aspect=1)\n', (1162, 1188), True, 'import matplotlib.pyplot as plt\n'), ((1218, 1313), 'matplotlib.pyplot.pie', 'plt.pie', (['fracs'], {'explode': 'explode', 'labels': 'labels', 'autopct': '"""%.0f%%"""', 'shadow': '(False)', 'radius': '(0.5)'}), "(fracs, explode=explode, labels=labels, autopct='%.0f%%', shadow=\n False, radius=0.5)\n", (1225, 1313), True, 'import matplotlib.pyplot as plt\n'), ((1609, 1619), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1617, 1619), True, 'import matplotlib.pyplot as plt\n')]
|
"""build_test_dataset.py -- The functions to build simulated data sets.
"""
import pickle
import numpy as np
from scipy import stats
# import matplotlib.pyplot as plt
# import corner
DATA_NAME = 'simple' # default
DATA_NAME = '3_gaus'
MB_HOST = 'indirect' # default
MB_HOST = 'step' # todo implement this
MB_HOST = 'direct'
np.random.seed(13048293)
N_SNE = 300
YOUNG_FRAC = 0.95
N_YOUNG = int(N_SNE*YOUNG_FRAC)
N_OLD = N_SNE - N_YOUNG
# TRUE VALUES
c_true = np.random.randn(N_SNE)*0.1
mass_young = np.random.randn(N_YOUNG) + 11 - np.random.exponential(0.5, N_YOUNG)
mass_old = np.random.randn(N_OLD)*0.75 + 11
mass_true = np.concatenate((mass_young, mass_old))
x1_true = np.random.randn(N_SNE)*((mass_true>10)*0.75 + (mass_true<=10)*0.9) + ((mass_true>10)*-0.5 + (mass_true<=10)*0.1)
age_young = (np.random.triangular(0.25, 0.5, 6, size=N_YOUNG)*(mass_young/4)
+ np.random.exponential(size=N_YOUNG)*x1_true[:N_YOUNG]/3)
age_old = np.random.randn(N_OLD)*0.75 + 10
age_true = np.append(age_young, age_old)
COFF = [-0.1, 3, 0.05/0.5, 0.05/2]
if MB_HOST == 'direct':
mb_true = COFF[0]*x1_true + COFF[1]*c_true + COFF[2]*mass_true + COFF[3]*age_true - 20
else:
mb_true = COFF[0]*x1_true + COFF[1]*c_true - 20
# corner.corner(np.array([x1_true, c_true, mass_true, age_true, mb_true]).T,
# labels=['x1', 'c', 'mass', 'age', 'M'])
# plt.show()
# OBSERVATIONAL
x1_obs = x1_true + np.random.randn(N_SNE)*0.3
c_obs = c_true + np.random.randn(N_SNE)*0.04
mass_obs = mass_true + np.random.randn(N_SNE)*0.5
# todo add obs systematic to ages
if DATA_NAME == '3_gaus':
AGE_STD = 0.2
# each should be shape (N_SNE, 3) for the 3_gaus model
# tile works if the input array is shape (N_SNE, 1)
age_gaus_mean = np.abs(np.tile(age_true.reshape(N_SNE, 1), 3) +
np.random.randn(N_SNE, 3)*AGE_STD*np.tile(age_true.reshape(N_SNE, 1), 3))
age_gaus_mean = np.expand_dims(age_gaus_mean, 0)
# only apply 1/3 of the uncertainty to each Gaussian
age_gaus_std = np.random.randn(N_SNE, 3)*(AGE_STD*np.tile(age_true.reshape(N_SNE, 1), 3))/3
age_gaus_std = np.expand_dims(age_gaus_std, 0)
# it just works, test it with .sum(axis=1).
age_gaus_A = np.random.dirichlet((1, 1, 1), (N_SNE))
age_gaus_A = np.expand_dims(age_gaus_A, 0)
else:
# defaults to simple model
AGE_STD = 0.2
age_obs = np.abs(age_true + np.random.randn(N_SNE)*AGE_STD*age_true)
age_gaus_std = [np.array([AGE_STD*np.abs(age_true)]).T]
age_gaus_A = np.ones((1, N_SNE, 1), dtype=np.float)
mb_obs = mb_true + np.random.randn(N_SNE)*0.15
# corner.corner(np.array([x1_obs, c_obs, mass_obs, age_obs, mb_obs]).T,
# labels=['x1', 'c', 'mass', 'age', 'M'], show_titles=True)
# plt.show()
# SAVE DATA
if DATA_NAME == '3_gaus':
n_age_mix = 3
else:
n_age_mix = 1
pickle.dump(dict( # general properties
n_sne=N_SNE, n_props=5, n_non_gaus_props=1, n_sn_set=1,
sn_set_inds=[0]*N_SNE,
# redshifts
z_helio=[0.1]*N_SNE, z_CMB=[0.1]*N_SNE,
# Gaussian defined properties
obs_mBx1c=[[mb_obs[i], x1_obs[i], c_obs[i], mass_obs[i]] for i in range(N_SNE)],
obs_mBx1c_cov=[np.diag([0.05**2, 0.3**2, 0.04**2, 0.3**2])]*N_SNE,
# Non-Gaussian properties, aka age
n_age_mix=n_age_mix, age_gaus_mean=age_gaus_mean,
age_gaus_std=age_gaus_std, age_gaus_A=age_gaus_A,
# Other stuff that does not really need to change
do_fullDint=0, outl_frac_prior_lnmean=-4.6, outl_frac_prior_lnwidth=1,
lognormal_intr_prior=0, allow_alpha_S_N=0),
open(f'test_{DATA_NAME}_{N_SNE}_obs.pkl', 'wb'))
pickle.dump({'x1': x1_true, 'c': c_true, 'mass': mass_true,
'age': age_true, 'mb': mb_true},
open(f'test_{DATA_NAME}_{N_SNE}_true.pkl', 'wb'))
|
[
"numpy.random.dirichlet",
"numpy.random.triangular",
"numpy.random.seed",
"numpy.abs",
"numpy.random.randn",
"numpy.random.exponential",
"numpy.expand_dims",
"numpy.ones",
"numpy.append",
"numpy.diag",
"numpy.concatenate"
] |
[((340, 364), 'numpy.random.seed', 'np.random.seed', (['(13048293)'], {}), '(13048293)\n', (354, 364), True, 'import numpy as np\n'), ((644, 682), 'numpy.concatenate', 'np.concatenate', (['(mass_young, mass_old)'], {}), '((mass_young, mass_old))\n', (658, 682), True, 'import numpy as np\n'), ((1011, 1040), 'numpy.append', 'np.append', (['age_young', 'age_old'], {}), '(age_young, age_old)\n', (1020, 1040), True, 'import numpy as np\n'), ((479, 501), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (494, 501), True, 'import numpy as np\n'), ((552, 587), 'numpy.random.exponential', 'np.random.exponential', (['(0.5)', 'N_YOUNG'], {}), '(0.5, N_YOUNG)\n', (573, 587), True, 'import numpy as np\n'), ((1940, 1972), 'numpy.expand_dims', 'np.expand_dims', (['age_gaus_mean', '(0)'], {}), '(age_gaus_mean, 0)\n', (1954, 1972), True, 'import numpy as np\n'), ((2145, 2176), 'numpy.expand_dims', 'np.expand_dims', (['age_gaus_std', '(0)'], {}), '(age_gaus_std, 0)\n', (2159, 2176), True, 'import numpy as np\n'), ((2242, 2279), 'numpy.random.dirichlet', 'np.random.dirichlet', (['(1, 1, 1)', 'N_SNE'], {}), '((1, 1, 1), N_SNE)\n', (2261, 2279), True, 'import numpy as np\n'), ((2299, 2328), 'numpy.expand_dims', 'np.expand_dims', (['age_gaus_A', '(0)'], {}), '(age_gaus_A, 0)\n', (2313, 2328), True, 'import numpy as np\n'), ((2534, 2572), 'numpy.ones', 'np.ones', (['(1, N_SNE, 1)'], {'dtype': 'np.float'}), '((1, N_SNE, 1), dtype=np.float)\n', (2541, 2572), True, 'import numpy as np\n'), ((520, 544), 'numpy.random.randn', 'np.random.randn', (['N_YOUNG'], {}), '(N_YOUNG)\n', (535, 544), True, 'import numpy as np\n'), ((599, 621), 'numpy.random.randn', 'np.random.randn', (['N_OLD'], {}), '(N_OLD)\n', (614, 621), True, 'import numpy as np\n'), ((694, 716), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (709, 716), True, 'import numpy as np\n'), ((821, 869), 'numpy.random.triangular', 'np.random.triangular', (['(0.25)', '(0.5)', '(6)'], {'size': 'N_YOUNG'}), '(0.25, 0.5, 6, size=N_YOUNG)\n', (841, 869), True, 'import numpy as np\n'), ((967, 989), 'numpy.random.randn', 'np.random.randn', (['N_OLD'], {}), '(N_OLD)\n', (982, 989), True, 'import numpy as np\n'), ((1435, 1457), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (1450, 1457), True, 'import numpy as np\n'), ((1479, 1501), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (1494, 1501), True, 'import numpy as np\n'), ((1530, 1552), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (1545, 1552), True, 'import numpy as np\n'), ((2592, 2614), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (2607, 2614), True, 'import numpy as np\n'), ((900, 935), 'numpy.random.exponential', 'np.random.exponential', ([], {'size': 'N_YOUNG'}), '(size=N_YOUNG)\n', (921, 935), True, 'import numpy as np\n'), ((2049, 2074), 'numpy.random.randn', 'np.random.randn', (['N_SNE', '(3)'], {}), '(N_SNE, 3)\n', (2064, 2074), True, 'import numpy as np\n'), ((1846, 1871), 'numpy.random.randn', 'np.random.randn', (['N_SNE', '(3)'], {}), '(N_SNE, 3)\n', (1861, 1871), True, 'import numpy as np\n'), ((2416, 2438), 'numpy.random.randn', 'np.random.randn', (['N_SNE'], {}), '(N_SNE)\n', (2431, 2438), True, 'import numpy as np\n'), ((3294, 3345), 'numpy.diag', 'np.diag', (['[0.05 ** 2, 0.3 ** 2, 0.04 ** 2, 0.3 ** 2]'], {}), '([0.05 ** 2, 0.3 ** 2, 0.04 ** 2, 0.3 ** 2])\n', (3301, 3345), True, 'import numpy as np\n'), ((2495, 2511), 'numpy.abs', 'np.abs', (['age_true'], {}), '(age_true)\n', (2501, 2511), True, 'import numpy as np\n')]
|
"""Static database tables."""
import pandas as pd
FERC_ACCOUNTS: pd.DataFrame = pd.DataFrame(
columns=['row_number', 'ferc_account_id', 'ferc_account_description'],
data=[
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment (Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional Transmission and Market Operation Plant'),
(84, 'subtotal_regional_transmission',
'Subtotal: Transmission and Market Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')
]
)
"""
FERC electric plant account IDs with associated row numbers and descriptions.
From FERC Form 1 pages 204-207, Electric Plant in Service.
Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
"""
FERC_DEPRECIATION_LINES: pd.DataFrame = pd.DataFrame(
columns=['row_number', 'line_id', 'ferc_account_description'],
data=[
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement',
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others',
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing', 'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified', 'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year',
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year', 'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)'),
],
)
"""
Row numbers, FERC account IDs, and FERC account descriptions.
From FERC Form 1 page 219, Accumulated Provision for Depreciation of electric
utility plant (Account 108).
"""
|
[
"pandas.DataFrame"
] |
[((81, 6600), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['row_number', 'ferc_account_id', 'ferc_account_description']", 'data': '[(2, \'301\', \'Intangible: Organization\'), (3, \'302\',\n \'Intangible: Franchises and consents\'), (4, \'303\',\n \'Intangible: Miscellaneous intangible plant\'), (5,\n \'subtotal_intangible\', \'Subtotal: Intangible Plant\'), (8, \'310\',\n \'Steam production: Land and land rights\'), (9, \'311\',\n \'Steam production: Structures and improvements\'), (10, \'312\',\n \'Steam production: Boiler plant equipment\'), (11, \'313\',\n \'Steam production: Engines and engine-driven generators\'), (12, \'314\',\n \'Steam production: Turbogenerator units\'), (13, \'315\',\n \'Steam production: Accessory electric equipment\'), (14, \'316\',\n \'Steam production: Miscellaneous power plant equipment\'), (15, \'317\',\n \'Steam production: Asset retirement costs for steam production plant\'),\n (16, \'subtotal_steam_production\', \'Subtotal: Steam Production Plant\'),\n (18, \'320\', \'Nuclear production: Land and land rights (Major only)\'), (\n 19, \'321\',\n \'Nuclear production: Structures and improvements (Major only)\'), (20,\n \'322\', \'Nuclear production: Reactor plant equipment (Major only)\'), (21,\n \'323\', \'Nuclear production: Turbogenerator units (Major only)\'), (22,\n \'324\', \'Nuclear production: Accessory electric equipment (Major only)\'),\n (23, \'325\',\n \'Nuclear production: Miscellaneous power plant equipment (Major only)\'),\n (24, \'326\',\n \'Nuclear production: Asset retirement costs for nuclear production plant (Major only)\'\n ), (25, \'subtotal_nuclear_produciton\',\n \'Subtotal: Nuclear Production Plant\'), (27, \'330\',\n \'Hydraulic production: Land and land rights\'), (28, \'331\',\n \'Hydraulic production: Structures and improvements\'), (29, \'332\',\n \'Hydraulic production: Reservoirs, dams, and waterways\'), (30, \'333\',\n \'Hydraulic production: Water wheels, turbines and generators\'), (31,\n \'334\', \'Hydraulic production: Accessory electric equipment\'), (32,\n \'335\', \'Hydraulic production: Miscellaneous power plant equipment\'), (\n 33, \'336\', \'Hydraulic production: Roads, railroads and bridges\'), (34,\n \'337\',\n \'Hydraulic production: Asset retirement costs for hydraulic production plant\'\n ), (35, \'subtotal_hydraulic_production\',\n \'Subtotal: Hydraulic Production Plant\'), (37, \'340\',\n \'Other production: Land and land rights\'), (38, \'341\',\n \'Other production: Structures and improvements\'), (39, \'342\',\n \'Other production: Fuel holders, producers, and accessories\'), (40,\n \'343\', \'Other production: Prime movers\'), (41, \'344\',\n \'Other production: Generators\'), (42, \'345\',\n \'Other production: Accessory electric equipment\'), (43, \'346\',\n \'Other production: Miscellaneous power plant equipment\'), (44, \'347\',\n \'Other production: Asset retirement costs for other production plant\'),\n (None, \'348\', \'Other production: Energy Storage Equipment\'), (45,\n \'subtotal_other_production\', \'Subtotal: Other Production Plant\'), (46,\n \'subtotal_production\', \'Subtotal: Production Plant\'), (48, \'350\',\n \'Transmission: Land and land rights\'), (None, \'351\',\n \'Transmission: Energy Storage Equipment\'), (49, \'352\',\n \'Transmission: Structures and improvements\'), (50, \'353\',\n \'Transmission: Station equipment\'), (51, \'354\',\n \'Transmission: Towers and fixtures\'), (52, \'355\',\n \'Transmission: Poles and fixtures\'), (53, \'356\',\n \'Transmission: Overhead conductors and devices\'), (54, \'357\',\n \'Transmission: Underground conduit\'), (55, \'358\',\n \'Transmission: Underground conductors and devices\'), (56, \'359\',\n \'Transmission: Roads and trails\'), (57, \'359.1\',\n \'Transmission: Asset retirement costs for transmission plant\'), (58,\n \'subtotal_transmission\', \'Subtotal: Transmission Plant\'), (60, \'360\',\n \'Distribution: Land and land rights\'), (61, \'361\',\n \'Distribution: Structures and improvements\'), (62, \'362\',\n \'Distribution: Station equipment\'), (63, \'363\',\n \'Distribution: Storage battery equipment\'), (64, \'364\',\n \'Distribution: Poles, towers and fixtures\'), (65, \'365\',\n \'Distribution: Overhead conductors and devices\'), (66, \'366\',\n \'Distribution: Underground conduit\'), (67, \'367\',\n \'Distribution: Underground conductors and devices\'), (68, \'368\',\n \'Distribution: Line transformers\'), (69, \'369\',\n \'Distribution: Services\'), (70, \'370\', \'Distribution: Meters\'), (71,\n \'371\', "Distribution: Installations on customers\' premises"), (72,\n \'372\', "Distribution: Leased property on customers\' premises"), (73,\n \'373\', \'Distribution: Street lighting and signal systems\'), (74, \'374\',\n \'Distribution: Asset retirement costs for distribution plant\'), (75,\n \'subtotal_distribution\', \'Subtotal: Distribution Plant\'), (77, \'380\',\n \'Regional transmission: Land and land rights\'), (78, \'381\',\n \'Regional transmission: Structures and improvements\'), (79, \'382\',\n \'Regional transmission: Computer hardware\'), (80, \'383\',\n \'Regional transmission: Computer software\'), (81, \'384\',\n \'Regional transmission: Communication Equipment\'), (82, \'385\',\n \'Regional transmission: Miscellaneous Regional Transmission and Market Operation Plant\'\n ), (83, \'386\',\n \'Regional transmission: Asset Retirement Costs for Regional Transmission and Market Operation Plant\'\n ), (84, \'subtotal_regional_transmission\',\n \'Subtotal: Transmission and Market Operation Plant\'), (None, \'387\',\n \'Regional transmission: [Reserved]\'), (86, \'389\',\n \'General: Land and land rights\'), (87, \'390\',\n \'General: Structures and improvements\'), (88, \'391\',\n \'General: Office furniture and equipment\'), (89, \'392\',\n \'General: Transportation equipment\'), (90, \'393\',\n \'General: Stores equipment\'), (91, \'394\',\n \'General: Tools, shop and garage equipment\'), (92, \'395\',\n \'General: Laboratory equipment\'), (93, \'396\',\n \'General: Power operated equipment\'), (94, \'397\',\n \'General: Communication equipment\'), (95, \'398\',\n \'General: Miscellaneous equipment\'), (96, \'subtotal_general\',\n \'Subtotal: General Plant\'), (97, \'399\',\n \'General: Other tangible property\'), (98, \'399.1\',\n \'General: Asset retirement costs for general plant\'), (99,\n \'total_general\', \'TOTAL General Plant\'), (100, \'101_and_106\',\n \'Electric plant in service (Major only)\'), (101, \'102_purchased\',\n \'Electric plant purchased\'), (102, \'102_sold\', \'Electric plant sold\'),\n (103, \'103\', \'Experimental plant unclassified\'), (104,\n \'total_electric_plant\', \'TOTAL Electric Plant in Service\')]'}), '(columns=[\'row_number\', \'ferc_account_id\',\n \'ferc_account_description\'], data=[(2, \'301\',\n \'Intangible: Organization\'), (3, \'302\',\n \'Intangible: Franchises and consents\'), (4, \'303\',\n \'Intangible: Miscellaneous intangible plant\'), (5,\n \'subtotal_intangible\', \'Subtotal: Intangible Plant\'), (8, \'310\',\n \'Steam production: Land and land rights\'), (9, \'311\',\n \'Steam production: Structures and improvements\'), (10, \'312\',\n \'Steam production: Boiler plant equipment\'), (11, \'313\',\n \'Steam production: Engines and engine-driven generators\'), (12, \'314\',\n \'Steam production: Turbogenerator units\'), (13, \'315\',\n \'Steam production: Accessory electric equipment\'), (14, \'316\',\n \'Steam production: Miscellaneous power plant equipment\'), (15, \'317\',\n \'Steam production: Asset retirement costs for steam production plant\'),\n (16, \'subtotal_steam_production\', \'Subtotal: Steam Production Plant\'),\n (18, \'320\', \'Nuclear production: Land and land rights (Major only)\'), (\n 19, \'321\',\n \'Nuclear production: Structures and improvements (Major only)\'), (20,\n \'322\', \'Nuclear production: Reactor plant equipment (Major only)\'), (21,\n \'323\', \'Nuclear production: Turbogenerator units (Major only)\'), (22,\n \'324\', \'Nuclear production: Accessory electric equipment (Major only)\'),\n (23, \'325\',\n \'Nuclear production: Miscellaneous power plant equipment (Major only)\'),\n (24, \'326\',\n \'Nuclear production: Asset retirement costs for nuclear production plant (Major only)\'\n ), (25, \'subtotal_nuclear_produciton\',\n \'Subtotal: Nuclear Production Plant\'), (27, \'330\',\n \'Hydraulic production: Land and land rights\'), (28, \'331\',\n \'Hydraulic production: Structures and improvements\'), (29, \'332\',\n \'Hydraulic production: Reservoirs, dams, and waterways\'), (30, \'333\',\n \'Hydraulic production: Water wheels, turbines and generators\'), (31,\n \'334\', \'Hydraulic production: Accessory electric equipment\'), (32,\n \'335\', \'Hydraulic production: Miscellaneous power plant equipment\'), (\n 33, \'336\', \'Hydraulic production: Roads, railroads and bridges\'), (34,\n \'337\',\n \'Hydraulic production: Asset retirement costs for hydraulic production plant\'\n ), (35, \'subtotal_hydraulic_production\',\n \'Subtotal: Hydraulic Production Plant\'), (37, \'340\',\n \'Other production: Land and land rights\'), (38, \'341\',\n \'Other production: Structures and improvements\'), (39, \'342\',\n \'Other production: Fuel holders, producers, and accessories\'), (40,\n \'343\', \'Other production: Prime movers\'), (41, \'344\',\n \'Other production: Generators\'), (42, \'345\',\n \'Other production: Accessory electric equipment\'), (43, \'346\',\n \'Other production: Miscellaneous power plant equipment\'), (44, \'347\',\n \'Other production: Asset retirement costs for other production plant\'),\n (None, \'348\', \'Other production: Energy Storage Equipment\'), (45,\n \'subtotal_other_production\', \'Subtotal: Other Production Plant\'), (46,\n \'subtotal_production\', \'Subtotal: Production Plant\'), (48, \'350\',\n \'Transmission: Land and land rights\'), (None, \'351\',\n \'Transmission: Energy Storage Equipment\'), (49, \'352\',\n \'Transmission: Structures and improvements\'), (50, \'353\',\n \'Transmission: Station equipment\'), (51, \'354\',\n \'Transmission: Towers and fixtures\'), (52, \'355\',\n \'Transmission: Poles and fixtures\'), (53, \'356\',\n \'Transmission: Overhead conductors and devices\'), (54, \'357\',\n \'Transmission: Underground conduit\'), (55, \'358\',\n \'Transmission: Underground conductors and devices\'), (56, \'359\',\n \'Transmission: Roads and trails\'), (57, \'359.1\',\n \'Transmission: Asset retirement costs for transmission plant\'), (58,\n \'subtotal_transmission\', \'Subtotal: Transmission Plant\'), (60, \'360\',\n \'Distribution: Land and land rights\'), (61, \'361\',\n \'Distribution: Structures and improvements\'), (62, \'362\',\n \'Distribution: Station equipment\'), (63, \'363\',\n \'Distribution: Storage battery equipment\'), (64, \'364\',\n \'Distribution: Poles, towers and fixtures\'), (65, \'365\',\n \'Distribution: Overhead conductors and devices\'), (66, \'366\',\n \'Distribution: Underground conduit\'), (67, \'367\',\n \'Distribution: Underground conductors and devices\'), (68, \'368\',\n \'Distribution: Line transformers\'), (69, \'369\',\n \'Distribution: Services\'), (70, \'370\', \'Distribution: Meters\'), (71,\n \'371\', "Distribution: Installations on customers\' premises"), (72,\n \'372\', "Distribution: Leased property on customers\' premises"), (73,\n \'373\', \'Distribution: Street lighting and signal systems\'), (74, \'374\',\n \'Distribution: Asset retirement costs for distribution plant\'), (75,\n \'subtotal_distribution\', \'Subtotal: Distribution Plant\'), (77, \'380\',\n \'Regional transmission: Land and land rights\'), (78, \'381\',\n \'Regional transmission: Structures and improvements\'), (79, \'382\',\n \'Regional transmission: Computer hardware\'), (80, \'383\',\n \'Regional transmission: Computer software\'), (81, \'384\',\n \'Regional transmission: Communication Equipment\'), (82, \'385\',\n \'Regional transmission: Miscellaneous Regional Transmission and Market Operation Plant\'\n ), (83, \'386\',\n \'Regional transmission: Asset Retirement Costs for Regional Transmission and Market Operation Plant\'\n ), (84, \'subtotal_regional_transmission\',\n \'Subtotal: Transmission and Market Operation Plant\'), (None, \'387\',\n \'Regional transmission: [Reserved]\'), (86, \'389\',\n \'General: Land and land rights\'), (87, \'390\',\n \'General: Structures and improvements\'), (88, \'391\',\n \'General: Office furniture and equipment\'), (89, \'392\',\n \'General: Transportation equipment\'), (90, \'393\',\n \'General: Stores equipment\'), (91, \'394\',\n \'General: Tools, shop and garage equipment\'), (92, \'395\',\n \'General: Laboratory equipment\'), (93, \'396\',\n \'General: Power operated equipment\'), (94, \'397\',\n \'General: Communication equipment\'), (95, \'398\',\n \'General: Miscellaneous equipment\'), (96, \'subtotal_general\',\n \'Subtotal: General Plant\'), (97, \'399\',\n \'General: Other tangible property\'), (98, \'399.1\',\n \'General: Asset retirement costs for general plant\'), (99,\n \'total_general\', \'TOTAL General Plant\'), (100, \'101_and_106\',\n \'Electric plant in service (Major only)\'), (101, \'102_purchased\',\n \'Electric plant purchased\'), (102, \'102_sold\', \'Electric plant sold\'),\n (103, \'103\', \'Experimental plant unclassified\'), (104,\n \'total_electric_plant\', \'TOTAL Electric Plant in Service\')])\n', (93, 6600), True, 'import pandas as pd\n'), ((7581, 9751), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['row_number', 'line_id', 'ferc_account_description']", 'data': "[(1, 'balance_beginning_of_year', 'Balance Beginning of Year'), (3,\n 'depreciation_expense', '(403) Depreciation Expense'), (4,\n 'depreciation_expense_asset_retirement',\n '(403.1) Depreciation Expense for Asset Retirement Costs'), (5,\n 'expense_electric_plant_leased_to_others',\n '(413) Exp. of Elec. Plt. Leas. to Others'), (6,\n 'transportation_expenses_clearing', 'Transportation Expenses-Clearing'),\n (7, 'other_clearing_accounts', 'Other Clearing Accounts'), (8,\n 'other_accounts_specified',\n 'Other Accounts (Specify, details in footnote):'), (9, 'other_charges',\n 'Other Charges:'), (10, 'total_depreciation_provision_for_year',\n 'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'), (11,\n 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'), (12,\n 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'), (13,\n 'cost_of_removal', 'Cost of Removal'), (14, 'salvage_credit',\n 'Salvage (Credit)'), (15, 'total_net_charges_for_plant_retired',\n 'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'), (\n 16, 'other_debit_or_credit_items',\n 'Other Debit or Cr. Items (Describe, details in footnote):'), (17,\n 'other_charges_2', 'Other Charges 2'), (18,\n 'book_cost_or_asset_retirement_costs_retired',\n 'Book Cost or Asset Retirement Costs Retired'), (19,\n 'balance_end_of_year',\n 'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'), (\n 20, 'steam_production_end_of_year', 'Steam Production'), (21,\n 'nuclear_production_end_of_year', 'Nuclear Production'), (22,\n 'hydraulic_production_end_of_year', 'Hydraulic Production-Conventional'\n ), (23, 'pumped_storage_end_of_year',\n 'Hydraulic Production-Pumped Storage'), (24, 'other_production',\n 'Other Production'), (25, 'transmission', 'Transmission'), (26,\n 'distribution', 'Distribution'), (27,\n 'regional_transmission_and_market_operation',\n 'Regional Transmission and Market Operation'), (28, 'general',\n 'General'), (29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')]"}), "(columns=['row_number', 'line_id', 'ferc_account_description'],\n data=[(1, 'balance_beginning_of_year', 'Balance Beginning of Year'), (3,\n 'depreciation_expense', '(403) Depreciation Expense'), (4,\n 'depreciation_expense_asset_retirement',\n '(403.1) Depreciation Expense for Asset Retirement Costs'), (5,\n 'expense_electric_plant_leased_to_others',\n '(413) Exp. of Elec. Plt. Leas. to Others'), (6,\n 'transportation_expenses_clearing', 'Transportation Expenses-Clearing'),\n (7, 'other_clearing_accounts', 'Other Clearing Accounts'), (8,\n 'other_accounts_specified',\n 'Other Accounts (Specify, details in footnote):'), (9, 'other_charges',\n 'Other Charges:'), (10, 'total_depreciation_provision_for_year',\n 'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'), (11,\n 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'), (12,\n 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'), (13,\n 'cost_of_removal', 'Cost of Removal'), (14, 'salvage_credit',\n 'Salvage (Credit)'), (15, 'total_net_charges_for_plant_retired',\n 'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'), (\n 16, 'other_debit_or_credit_items',\n 'Other Debit or Cr. Items (Describe, details in footnote):'), (17,\n 'other_charges_2', 'Other Charges 2'), (18,\n 'book_cost_or_asset_retirement_costs_retired',\n 'Book Cost or Asset Retirement Costs Retired'), (19,\n 'balance_end_of_year',\n 'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'), (\n 20, 'steam_production_end_of_year', 'Steam Production'), (21,\n 'nuclear_production_end_of_year', 'Nuclear Production'), (22,\n 'hydraulic_production_end_of_year', 'Hydraulic Production-Conventional'\n ), (23, 'pumped_storage_end_of_year',\n 'Hydraulic Production-Pumped Storage'), (24, 'other_production',\n 'Other Production'), (25, 'transmission', 'Transmission'), (26,\n 'distribution', 'Distribution'), (27,\n 'regional_transmission_and_market_operation',\n 'Regional Transmission and Market Operation'), (28, 'general',\n 'General'), (29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')])\n", (7593, 9751), True, 'import pandas as pd\n')]
|
import asyncio
from aiohttp import web
async def handle(request):
index = open("index.html", 'rb')
content = index.read()
return web.Response(body=content, content_type='text/html')
async def wshandler(request):
app = request.app
ws = web.WebSocketResponse()
await ws.prepare(request)
if app["game_loop"] is None or \
app["game_loop"].cancelled():
app["game_loop"] = asyncio.ensure_future(game_loop(app))
# this is required to propagate exceptions
app["game_loop"].add_done_callback(lambda t: t.result()
if not t.cancelled() else None)
app["sockets"].append(ws)
while 1:
msg = await ws.receive()
if msg.tp == web.MsgType.text:
ws.send_str("Pressed key code: {}".format(msg.data))
print("Got message %s" % msg.data)
elif msg.tp == web.MsgType.close or\
msg.tp == web.MsgType.error:
break
app["sockets"].remove(ws)
if len(app["sockets"]) == 0:
print("Stopping game loop")
app["game_loop"].cancel()
print("Closed connection")
return ws
async def game_loop(app):
print("Game loop started")
while 1:
for ws in app["sockets"]:
ws.send_str("game loop passed")
await asyncio.sleep(2)
app = web.Application()
app["sockets"] = []
app["game_loop"] = None
app.router.add_route('GET', '/connect', wshandler)
app.router.add_route('GET', '/', handle)
web.run_app(app)
|
[
"aiohttp.web.Response",
"aiohttp.web.WebSocketResponse",
"asyncio.sleep",
"aiohttp.web.run_app",
"aiohttp.web.Application"
] |
[((1343, 1360), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (1358, 1360), False, 'from aiohttp import web\n'), ((1500, 1516), 'aiohttp.web.run_app', 'web.run_app', (['app'], {}), '(app)\n', (1511, 1516), False, 'from aiohttp import web\n'), ((142, 194), 'aiohttp.web.Response', 'web.Response', ([], {'body': 'content', 'content_type': '"""text/html"""'}), "(body=content, content_type='text/html')\n", (154, 194), False, 'from aiohttp import web\n'), ((258, 281), 'aiohttp.web.WebSocketResponse', 'web.WebSocketResponse', ([], {}), '()\n', (279, 281), False, 'from aiohttp import web\n'), ((1318, 1334), 'asyncio.sleep', 'asyncio.sleep', (['(2)'], {}), '(2)\n', (1331, 1334), False, 'import asyncio\n')]
|
import os
from mkconfig.conf.utils import Utils
from mkconfig.env import setup_logging_with_details, Configurations
import logging
from cement.utils import test
setup_logging_with_details()
logger = logging.getLogger(__name__)
from mkconfig.core.cli import MkConfigApp
class TestMkConfigApp(test.CementTestCase):
app_class = MkConfigApp
example_dir = Configurations.getProjectRootDir() + '/examples/'
def setUp(self):
logger.info('Unit Test [{}] Start'.format(self.id()))
def tearDown(self):
folder = Configurations.getTmpTemplateDir()
logger.info('Removing all files under %s', folder)
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
logger.info('Removing all files under %s ---- DONE', folder)
logger.info('Unit Test [{}] Stop'.format(self.id()))
#########################################################################################
# Default Behavior
#########################################################################################
def test_normal_with_default_template(self):
config_control_string = """
app_list :
- cassandra
app_conf_dir : """ + self.example_dir
app = self.make_app(argv=['-d'+ config_control_string, '-i ', '-otest.output'])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.type, 'collectd_genericjmx')
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.connection.inc.stub')))
app.close()
def test_file_not_found_1(self):
app1 = self.make_app(argv=['-o1', '-t1', '-i1'])
app1.setup()
with self.assertRaises(IOError):
app1.run()
app1.close()
#########################################################################################
# Collectd-GenericJmx specific
#########################################################################################
def test_normal_start_and_stop_on_jenkins_with_genericjmx(self):
config_control_string = """
app_list :
- jenkins
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_genericjmx', '-otest.output', '-i ', '-d'+ config_control_string,])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.type, 'collectd_genericjmx')
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.connection.inc.stub')))
app.close()
def test_normal_start_and_stop_on_cassandra_with_genericjmx(self):
config_control_string = """
app_list :
- cassandra
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_genericjmx', '-otest.output', '-i ', '-d'+ config_control_string,])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.type, 'collectd_genericjmx')
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.connection.inc.stub')))
app.close()
def test_normal_start_and_stop_with_apps_list_with_genericjmx(self):
config_control_string = """
app_list :
- cassandra
- jenkins
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_genericjmx', '-otest.output', '-i ', '-d'+ config_control_string,])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.connection.inc.stub')))
app.close()
def test_normal_start_and_stop_with_all_exampl_apps_with_genericjmx(self):
config_control_string = """
app_list :
- cassandra
- jenkins
- jira
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_genericjmx', '-otest.output', '-i ', '-d'+ config_control_string])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jira.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jira.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_genericjmx.connection.inc.stub')))
app.close()
#########################################################################################
# Collectd-FastJmx specific
#########################################################################################
def test_normal_start_and_stop_on_jenkins_with_fastjmx(self):
config_control_string = """
app_list :
- jenkins
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_fastjmx', '-otest.output', '-i ', '-d'+ config_control_string])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.type, 'collectd_fastjmx')
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.connection.inc.stub')))
app.close()
def test_normal_start_and_stop_on_cassandra_with_fastjmx(self):
config_control_string = """
app_list :
- cassandra
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_fastjmx', '-otest.output', '-i ', '-d'+ config_control_string])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.type, 'collectd_fastjmx')
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.connection.inc.stub')))
app.close()
def test_normal_start_and_stop_with_apps_list_with_fastjmx(self):
config_control_string = """
app_list :
- cassandra
- jenkins
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_fastjmx', '-otest.output', '-i ', '-d'+ config_control_string])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.connection.inc.stub')))
app.close()
def test_normal_start_and_stop_with_all_exampl_apps_with_fastjmx(self):
config_control_string = """
app_list :
- cassandra
- jenkins
- jira
app_conf_dir : """ + self.example_dir
app = self.make_app(
argv=['-tcollectd_fastjmx', '-otest.output', '-i ', '-d'+ config_control_string])
app.setup()
app.run()
self.assertEqual(app.pargs.transf_desc_file, ' ')
self.assertEqual(app.pargs.transf_desc_string, config_control_string)
self.assertEqual(app.pargs.output, 'test.output')
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'cassandra.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jenkins.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jira.mbean.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'jira.connection.blocks.inc')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.mbean.inc.stub')))
self.assertTrue(Utils.is_file_exist(Configurations.getTmpTemplateFile(
'_collectd_fastjmx.connection.inc.stub')))
app.close()
|
[
"os.path.join",
"os.unlink",
"mkconfig.env.setup_logging_with_details",
"os.path.isfile",
"mkconfig.env.Configurations.getTmpTemplateDir",
"mkconfig.env.Configurations.getTmpTemplateFile",
"mkconfig.env.Configurations.getProjectRootDir",
"os.listdir",
"logging.getLogger"
] |
[((162, 190), 'mkconfig.env.setup_logging_with_details', 'setup_logging_with_details', ([], {}), '()\n', (188, 190), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((200, 227), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (217, 227), False, 'import logging\n'), ((364, 398), 'mkconfig.env.Configurations.getProjectRootDir', 'Configurations.getProjectRootDir', ([], {}), '()\n', (396, 398), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((540, 574), 'mkconfig.env.Configurations.getTmpTemplateDir', 'Configurations.getTmpTemplateDir', ([], {}), '()\n', (572, 574), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((658, 676), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (668, 676), False, 'import os\n'), ((702, 732), 'os.path.join', 'os.path.join', (['folder', 'the_file'], {}), '(folder, the_file)\n', (714, 732), False, 'import os\n'), ((769, 794), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (783, 794), False, 'import os\n'), ((1846, 1909), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.mbean.blocks.inc"""'], {}), "('cassandra.mbean.blocks.inc')\n", (1879, 1909), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((1969, 2037), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.connection.blocks.inc"""'], {}), "('cassandra.connection.blocks.inc')\n", (2002, 2037), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((2097, 2169), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_genericjmx.mbean.inc.stub"""'], {}), "('_collectd_genericjmx.mbean.inc.stub')\n", (2130, 2169), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((2229, 2306), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_genericjmx.connection.inc.stub"""'], {}), "('_collectd_genericjmx.connection.inc.stub')\n", (2262, 2306), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((3431, 3492), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jenkins.mbean.blocks.inc"""'], {}), "('jenkins.mbean.blocks.inc')\n", (3464, 3492), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((3552, 3618), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jenkins.connection.blocks.inc"""'], {}), "('jenkins.connection.blocks.inc')\n", (3585, 3618), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((3678, 3750), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_genericjmx.mbean.inc.stub"""'], {}), "('_collectd_genericjmx.mbean.inc.stub')\n", (3711, 3750), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((3810, 3887), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_genericjmx.connection.inc.stub"""'], {}), "('_collectd_genericjmx.connection.inc.stub')\n", (3843, 3887), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((4591, 4654), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.mbean.blocks.inc"""'], {}), "('cassandra.mbean.blocks.inc')\n", (4624, 4654), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((4714, 4782), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.connection.blocks.inc"""'], {}), "('cassandra.connection.blocks.inc')\n", (4747, 4782), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((4842, 4914), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_genericjmx.mbean.inc.stub"""'], {}), "('_collectd_genericjmx.mbean.inc.stub')\n", (4875, 4914), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((4974, 5051), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_genericjmx.connection.inc.stub"""'], {}), "('_collectd_genericjmx.connection.inc.stub')\n", (5007, 5051), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((5715, 5778), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.mbean.blocks.inc"""'], {}), "('cassandra.mbean.blocks.inc')\n", (5748, 5778), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((5838, 5906), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.connection.blocks.inc"""'], {}), "('cassandra.connection.blocks.inc')\n", (5871, 5906), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((5966, 6027), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jenkins.mbean.blocks.inc"""'], {}), "('jenkins.mbean.blocks.inc')\n", (5999, 6027), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((6087, 6153), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jenkins.connection.blocks.inc"""'], {}), "('jenkins.connection.blocks.inc')\n", (6120, 6153), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((6213, 6285), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_genericjmx.mbean.inc.stub"""'], {}), "('_collectd_genericjmx.mbean.inc.stub')\n", (6246, 6285), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((6345, 6422), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_genericjmx.connection.inc.stub"""'], {}), "('_collectd_genericjmx.connection.inc.stub')\n", (6378, 6422), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((7110, 7173), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.mbean.blocks.inc"""'], {}), "('cassandra.mbean.blocks.inc')\n", (7143, 7173), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((7233, 7301), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.connection.blocks.inc"""'], {}), "('cassandra.connection.blocks.inc')\n", (7266, 7301), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((7361, 7422), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jenkins.mbean.blocks.inc"""'], {}), "('jenkins.mbean.blocks.inc')\n", (7394, 7422), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((7482, 7548), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jenkins.connection.blocks.inc"""'], {}), "('jenkins.connection.blocks.inc')\n", (7515, 7548), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((7608, 7666), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jira.mbean.blocks.inc"""'], {}), "('jira.mbean.blocks.inc')\n", (7641, 7666), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((7726, 7789), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jira.connection.blocks.inc"""'], {}), "('jira.connection.blocks.inc')\n", (7759, 7789), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((7849, 7921), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_genericjmx.mbean.inc.stub"""'], {}), "('_collectd_genericjmx.mbean.inc.stub')\n", (7882, 7921), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((7981, 8058), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_genericjmx.connection.inc.stub"""'], {}), "('_collectd_genericjmx.connection.inc.stub')\n", (8014, 8058), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((8969, 9030), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jenkins.mbean.blocks.inc"""'], {}), "('jenkins.mbean.blocks.inc')\n", (9002, 9030), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((9090, 9156), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jenkins.connection.blocks.inc"""'], {}), "('jenkins.connection.blocks.inc')\n", (9123, 9156), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((9216, 9285), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_fastjmx.mbean.inc.stub"""'], {}), "('_collectd_fastjmx.mbean.inc.stub')\n", (9249, 9285), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((9345, 9419), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_fastjmx.connection.inc.stub"""'], {}), "('_collectd_fastjmx.connection.inc.stub')\n", (9378, 9419), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((10113, 10176), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.mbean.blocks.inc"""'], {}), "('cassandra.mbean.blocks.inc')\n", (10146, 10176), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((10236, 10304), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.connection.blocks.inc"""'], {}), "('cassandra.connection.blocks.inc')\n", (10269, 10304), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((10364, 10433), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_fastjmx.mbean.inc.stub"""'], {}), "('_collectd_fastjmx.mbean.inc.stub')\n", (10397, 10433), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((10493, 10567), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_fastjmx.connection.inc.stub"""'], {}), "('_collectd_fastjmx.connection.inc.stub')\n", (10526, 10567), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((11224, 11287), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.mbean.blocks.inc"""'], {}), "('cassandra.mbean.blocks.inc')\n", (11257, 11287), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((11347, 11415), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.connection.blocks.inc"""'], {}), "('cassandra.connection.blocks.inc')\n", (11380, 11415), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((11475, 11536), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jenkins.mbean.blocks.inc"""'], {}), "('jenkins.mbean.blocks.inc')\n", (11508, 11536), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((11596, 11662), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jenkins.connection.blocks.inc"""'], {}), "('jenkins.connection.blocks.inc')\n", (11629, 11662), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((11722, 11791), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_fastjmx.mbean.inc.stub"""'], {}), "('_collectd_fastjmx.mbean.inc.stub')\n", (11755, 11791), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((11851, 11925), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_fastjmx.connection.inc.stub"""'], {}), "('_collectd_fastjmx.connection.inc.stub')\n", (11884, 11925), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((12607, 12670), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.mbean.blocks.inc"""'], {}), "('cassandra.mbean.blocks.inc')\n", (12640, 12670), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((12730, 12798), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""cassandra.connection.blocks.inc"""'], {}), "('cassandra.connection.blocks.inc')\n", (12763, 12798), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((12858, 12919), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jenkins.mbean.blocks.inc"""'], {}), "('jenkins.mbean.blocks.inc')\n", (12891, 12919), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((12979, 13045), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jenkins.connection.blocks.inc"""'], {}), "('jenkins.connection.blocks.inc')\n", (13012, 13045), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((13105, 13163), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jira.mbean.blocks.inc"""'], {}), "('jira.mbean.blocks.inc')\n", (13138, 13163), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((13223, 13286), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""jira.connection.blocks.inc"""'], {}), "('jira.connection.blocks.inc')\n", (13256, 13286), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((13346, 13415), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_fastjmx.mbean.inc.stub"""'], {}), "('_collectd_fastjmx.mbean.inc.stub')\n", (13379, 13415), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((13475, 13549), 'mkconfig.env.Configurations.getTmpTemplateFile', 'Configurations.getTmpTemplateFile', (['"""_collectd_fastjmx.connection.inc.stub"""'], {}), "('_collectd_fastjmx.connection.inc.stub')\n", (13508, 13549), False, 'from mkconfig.env import setup_logging_with_details, Configurations\n'), ((816, 836), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (825, 836), False, 'import os\n')]
|
"""
This module communicate with the Open Targets REST API with a simple client, and requires not knowledge of the API.
"""
import logging
from opentargets.conn import Connection, IterableResult
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class OpenTargetsClient(object):
'''
main class to use to get data from the Open Targets REST API available at targetvalidation.org, or your private instance
'''
_search_endpoint = '/public/search'
_filter_associations_endpoint = '/public/association/filter'
_get_associations_endpoint = '/public/association'
_filter_evidence_endpoint = '/public/evidence/filter'
_get_evidence_endpoint = '/public/evidence'
_stats_endpoint = '/public/utils/stats'
def __init__(self,
**kwargs
):
'''
:param kwargs: all params forwarded to :class:`opentargets.conn.Connection` object
'''
self.conn = Connection(**kwargs)
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
self.close()
def close(self):
# self.conn.close()
pass
def search(self, query,**kwargs):
kwargs['q']=query
result = IterableResult(self.conn)
result(self._search_endpoint,params=kwargs)
return result
def get_association(self,association_id, **kwargs):
kwargs['id']= association_id
result = IterableResult(self.conn)
result(self._get_associations_endpoint, params=kwargs)
return result
def filter_associations(self,**kwargs):
result = IterableResult(self.conn)
result(self._filter_associations_endpoint, params=kwargs)
return result
def get_associations_for_target(self, target, **kwargs):
if not isinstance(target, str):
raise AttributeError('target must be of type str')
if not target.startswith('ENSG'):
search_result = next(self.search(target, size=1, filter='target'))
if not search_result:
raise AttributeError('cannot find an ensembl gene id for target {}'.format(target))
target_id = search_result['id']
logger.debug('{} resolved to id {}'.format(target, target_id))
else:
target_id = target
return self.filter_associations(target=target_id,**kwargs)
def get_associations_for_disease(self, disease, **kwargs):
if not isinstance(disease, str):
raise AttributeError('disease must be of type str')
results = self.filter_associations(disease=disease)
if not results:
search_result = next(self.search(disease, size=1, filter='disease'))
if not search_result:
raise AttributeError('cannot find an disease id for disease {}'.format(disease))
disease_id = search_result['id']
logger.debug('{} resolved to id {}'.format(disease, disease_id))
results = self.filter_associations(disease=disease_id)
return results
def get_evidence(self, evidence_id, **kwargs):
kwargs['id']= evidence_id
result = IterableResult(self.conn)
result(self._get_evidence_endpoint, params=kwargs)
return result
def filter_evidence(self,**kwargs):
result = IterableResult(self.conn)
result(self._filter_evidence_endpoint, params=kwargs)
return result
def get_evidence_for_target(self, target, **kwargs):
if not isinstance(target, str):
raise AttributeError('target must be of type str')
if not target.startswith('ENSG'):
search_result = next(self.search(target, size=1, filter='target'))
if not search_result:
raise AttributeError('cannot find an ensembl gene id for target {}'.format(target))
target_id = search_result['id']
logger.debug('{} resolved to id {}'.format(target, target_id))
else:
target_id = target
return self.filter_evidence(target=target_id,**kwargs)
def get_evidence_for_disease(self, disease, **kwargs):
if not isinstance(disease, str):
raise AttributeError('disease must be of type str')
results = self.filter_evidence(disease=disease)
if not results:
search_result = next(self.search(disease, size=1, filter='disease'))
if not search_result:
raise AttributeError('cannot find an disease id for disease {}'.format(disease))
disease_id = search_result['id']
logger.debug('{} resolved to id {}'.format(disease, disease_id))
results = self.filter_evidence(disease=disease_id)
return results
def get_stats(self):
result = IterableResult(self.conn)
result(self._stats_endpoint)
return result
|
[
"opentargets.conn.Connection",
"logging.getLogger",
"opentargets.conn.IterableResult"
] |
[((208, 235), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (225, 235), False, 'import logging\n'), ((962, 982), 'opentargets.conn.Connection', 'Connection', ([], {}), '(**kwargs)\n', (972, 982), False, 'from opentargets.conn import Connection, IterableResult\n'), ((1237, 1262), 'opentargets.conn.IterableResult', 'IterableResult', (['self.conn'], {}), '(self.conn)\n', (1251, 1262), False, 'from opentargets.conn import Connection, IterableResult\n'), ((1448, 1473), 'opentargets.conn.IterableResult', 'IterableResult', (['self.conn'], {}), '(self.conn)\n', (1462, 1473), False, 'from opentargets.conn import Connection, IterableResult\n'), ((1621, 1646), 'opentargets.conn.IterableResult', 'IterableResult', (['self.conn'], {}), '(self.conn)\n', (1635, 1646), False, 'from opentargets.conn import Connection, IterableResult\n'), ((3166, 3191), 'opentargets.conn.IterableResult', 'IterableResult', (['self.conn'], {}), '(self.conn)\n', (3180, 3191), False, 'from opentargets.conn import Connection, IterableResult\n'), ((3331, 3356), 'opentargets.conn.IterableResult', 'IterableResult', (['self.conn'], {}), '(self.conn)\n', (3345, 3356), False, 'from opentargets.conn import Connection, IterableResult\n'), ((4792, 4817), 'opentargets.conn.IterableResult', 'IterableResult', (['self.conn'], {}), '(self.conn)\n', (4806, 4817), False, 'from opentargets.conn import Connection, IterableResult\n')]
|
"""
Defines the model to hold application events
"""
from django.conf import settings
from django.db import models
from django.forms.models import model_to_dict
from ..constants import EventKind, EventModel, EventCommonCodes, CODES_PER_MODEL
from .user import get_sentinel_user
class Event(models.Model):
"""
Represents an application event
"""
class Meta:
ordering = ['-timestamp', '-pk']
timestamp = models.DateTimeField()
kind = models.IntegerField(EventKind.choices)
object_kind = models.IntegerField()
event_kind = models.IntegerField()
object_id = models.IntegerField(null=True)
project_id = models.IntegerField(null=True)
video_id = models.IntegerField(null=True)
meta = models.TextField(null=True, blank=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
blank=True,
related_name="actions",
on_delete=models.SET(get_sentinel_user),
db_constraint=False)
@property
def object_type(self):
"""
Gets the :class:`greenday_core.constants.EventModel <greenday_core.constants.EventModel>`
enum type of this object
"""
return EventModel(self.kind/CODES_PER_MODEL).name
@property
def event_type(self):
"""
Gets the :class:`greenday_core.constants.EventKind <greenday_core.constants.EventKind>`
enum type of this object
"""
code = self.kind % CODES_PER_MODEL
try:
common_code = EventCommonCodes(code)
except ValueError:
return EventKind(self.kind).name
else:
return common_code.name
def to_dict(self):
"""
Return dict of the object's field data
"""
d = model_to_dict(self)
d.update({
"object_type": self.object_type,
"event_type": self.event_type
})
return d
def save(self, *args, **kwargs):
"""
Override save to get the `object_kind` and `event_kind` from the compound `kind` field
"""
self.object_kind, self.event_kind = divmod(self.kind, CODES_PER_MODEL)
return super(Event, self).save(*args, **kwargs)
def __repr__(self):
return unicode(self)
def __unicode__(self):
return "{kind}(id={id}, project_id={project_id}, \
by {email} at {timestamp}".format(
kind=EventKind(self.kind).name,
id=self.object_id,
project_id=self.project_id,
email=self.user.email if self.user else "<none>",
timestamp=self.timestamp
)
|
[
"django.db.models.TextField",
"django.forms.models.model_to_dict",
"django.db.models.IntegerField",
"django.db.models.SET",
"django.db.models.DateTimeField"
] |
[((443, 465), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (463, 465), False, 'from django.db import models\n'), ((477, 515), 'django.db.models.IntegerField', 'models.IntegerField', (['EventKind.choices'], {}), '(EventKind.choices)\n', (496, 515), False, 'from django.db import models\n'), ((534, 555), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (553, 555), False, 'from django.db import models\n'), ((573, 594), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (592, 594), False, 'from django.db import models\n'), ((611, 641), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (630, 641), False, 'from django.db import models\n'), ((659, 689), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (678, 689), False, 'from django.db import models\n'), ((705, 735), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (724, 735), False, 'from django.db import models\n'), ((747, 786), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (763, 786), False, 'from django.db import models\n'), ((1804, 1823), 'django.forms.models.model_to_dict', 'model_to_dict', (['self'], {}), '(self)\n', (1817, 1823), False, 'from django.forms.models import model_to_dict\n'), ((940, 969), 'django.db.models.SET', 'models.SET', (['get_sentinel_user'], {}), '(get_sentinel_user)\n', (950, 969), False, 'from django.db import models\n')]
|
#!/usr/bin/env python
from pysphere import VIServer
server = VIServer()
server.connect("my.esx.host.example.org", "username", "secret")
vm = server.get_vm_by_path("[datastore] path/to/file.vmx")
vm.wait_for_tools()
vm.login_in_guest("Administrator", "secret")
vm.get_screenshot("vm_screenshot.png", overwrite=True)
server.disconnect()
|
[
"pysphere.VIServer"
] |
[((62, 72), 'pysphere.VIServer', 'VIServer', ([], {}), '()\n', (70, 72), False, 'from pysphere import VIServer\n')]
|
import os
import shutil
from ._mworks import ReservedEventCode, _MWKFile, _MWKStream
class IndexingException(IOError):
pass
class MWKFile(_MWKFile):
def __init__(self, file_name):
super(MWKFile, self).__init__(file_name)
self._codec = None
self._reverse_codec = None
def close(self):
super(MWKFile, self).close()
self._codec = None
self._reverse_codec = None
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, tb):
self.close()
@property
def exists(self):
return os.path.exists(self.file)
def _prepare_events_iter(self, codes=(), time_range=(None, None)):
if not codes:
codes = []
else:
reverse_codec = self.reverse_codec
codes = [reverse_codec.get(c, c) for c in codes]
min_time, max_time = time_range
if min_time is None:
min_time = self.minimum_time
if max_time is None:
max_time = self.maximum_time
self._select_events(codes, min_time, max_time)
def get_events_iter(self, **kwargs):
self._prepare_events_iter(**kwargs)
while True:
evt = self._get_next_event()
if evt.empty:
break
yield evt
def get_events(self, **kwargs):
self._prepare_events_iter(**kwargs)
return self._get_events()
@property
def codec(self):
if self._codec is not None:
return self._codec
self._select_events([ReservedEventCode.RESERVED_CODEC_CODE],
self.minimum_time,
self.maximum_time)
e = self._get_next_event()
if e.empty:
self._codec = {}
return self._codec
raw_codec = e.value
codec = dict((key, raw_codec[key]["tagname"]) for key in raw_codec)
self._codec = codec
return codec
@property
def reverse_codec(self):
if self._reverse_codec is not None:
return self._reverse_codec
rc = dict((v, k) for k, v in self.codec.items())
self._reverse_codec = rc
return rc
def reindex(self):
self.close()
self.unindex()
self.open()
# erases all contents in the directory except the original mwk file.
def _empty_dir(self): # original DDC's unindex().
if(os.path.isdir(self.file)):
split_file_name = os.path.split(self.file)
file_name = split_file_name[-1:][0]
parent_path = os.pathsep.join(split_file_name[0:-1])
true_mwk_file = os.path.join(self.file, file_name)
#print "parent_path: ", parent_path
#print "file_name: ", file_name
#print "true_mwk_file; ", true_mwk_file
aside_path = os.path.join(parent_path, file_name + ".aside")
os.rename(self.file, aside_path)
#print "rename %s to %s" % ( self.file, aside_path)
os.rename(os.path.join(aside_path, file_name), os.path.join(parent_path,file_name) )
#print "rename %s to %s" % ( os.path.join(aside_path, file_name), os.path.join(parent_path,file_name) )
shutil.rmtree(aside_path, True) # del tree ignoring errors
#print "remove %s" % aside_path
else:
raise IndexingException("Attempt to re-index a file that has not yet been indexed")
def unindex(self, empty_dir=True):
if empty_dir: # erase all files except .mwk
self._empty_dir()
return True
if not os.path.isdir(self.file): return False
# only erase the .idx file
file_name = os.path.basename(self.file)
idx_file = os.path.join(self.file, file_name + '.idx')
if os.path.isfile(idx_file):
os.remove(idx_file)
return True
else:
return False
class MWKStream(_MWKStream):
@classmethod
def _create_file(cls, filename):
super(MWKStream, cls)._create_file(filename)
return cls.open_file(filename, _writable=True)
@classmethod
def open_file(cls, filename, _writable=False):
uri = ('ldobinary:file%s://%s' %
(('' if _writable else '_readonly'), filename))
stream = cls(uri)
stream.open()
return stream
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def __iter__(self):
while True:
try:
yield self._read_event()
except EOFError:
break
def read_event(self):
try:
return self._read_event()
except EOFError:
pass
|
[
"os.remove",
"os.pathsep.join",
"os.path.basename",
"os.path.isdir",
"os.rename",
"os.path.exists",
"os.path.isfile",
"shutil.rmtree",
"os.path.split",
"os.path.join"
] |
[((608, 633), 'os.path.exists', 'os.path.exists', (['self.file'], {}), '(self.file)\n', (622, 633), False, 'import os\n'), ((2478, 2502), 'os.path.isdir', 'os.path.isdir', (['self.file'], {}), '(self.file)\n', (2491, 2502), False, 'import os\n'), ((3861, 3888), 'os.path.basename', 'os.path.basename', (['self.file'], {}), '(self.file)\n', (3877, 3888), False, 'import os\n'), ((3908, 3951), 'os.path.join', 'os.path.join', (['self.file', "(file_name + '.idx')"], {}), "(self.file, file_name + '.idx')\n", (3920, 3951), False, 'import os\n'), ((3963, 3987), 'os.path.isfile', 'os.path.isfile', (['idx_file'], {}), '(idx_file)\n', (3977, 3987), False, 'import os\n'), ((2535, 2559), 'os.path.split', 'os.path.split', (['self.file'], {}), '(self.file)\n', (2548, 2559), False, 'import os\n'), ((2634, 2672), 'os.pathsep.join', 'os.pathsep.join', (['split_file_name[0:-1]'], {}), '(split_file_name[0:-1])\n', (2649, 2672), False, 'import os\n'), ((2714, 2748), 'os.path.join', 'os.path.join', (['self.file', 'file_name'], {}), '(self.file, file_name)\n', (2726, 2748), False, 'import os\n'), ((2949, 2996), 'os.path.join', 'os.path.join', (['parent_path', "(file_name + '.aside')"], {}), "(parent_path, file_name + '.aside')\n", (2961, 2996), False, 'import os\n'), ((3022, 3054), 'os.rename', 'os.rename', (['self.file', 'aside_path'], {}), '(self.file, aside_path)\n', (3031, 3054), False, 'import os\n'), ((3370, 3401), 'shutil.rmtree', 'shutil.rmtree', (['aside_path', '(True)'], {}), '(aside_path, True)\n', (3383, 3401), False, 'import shutil\n'), ((3766, 3790), 'os.path.isdir', 'os.path.isdir', (['self.file'], {}), '(self.file)\n', (3779, 3790), False, 'import os\n'), ((4001, 4020), 'os.remove', 'os.remove', (['idx_file'], {}), '(idx_file)\n', (4010, 4020), False, 'import os\n'), ((3154, 3189), 'os.path.join', 'os.path.join', (['aside_path', 'file_name'], {}), '(aside_path, file_name)\n', (3166, 3189), False, 'import os\n'), ((3191, 3227), 'os.path.join', 'os.path.join', (['parent_path', 'file_name'], {}), '(parent_path, file_name)\n', (3203, 3227), False, 'import os\n')]
|
# Importing modules
from decimal import Decimal
from PyInquirer import prompt
from termcolor import colored
from .get import get
# The function to mint tokens
def mint(currency_module) -> None:
mint_data = prompt([
{
'type': 'input',
'name': 'amount',
'message': 'Enter the amount of tokens to mint',
'default': "1",
},
{
'type': 'confirm',
'name': 'confirmation',
'message': 'Do you want to mint the selected tokens?',
'default': False,
},
])
amount = Decimal(mint_data['amount']) * \
(10 ** get(currency_module)['decimals'])
if mint_data['confirmation']:
try:
currency_module.mint(int(amount))
print(colored('Tokens minted successfully!', 'green'))
except Exception as e:
print(colored('Tokens could not be minted. \n' + e, 'red'))
else:
print(colored('Tokens not minted!', 'blue'))
|
[
"termcolor.colored",
"PyInquirer.prompt",
"decimal.Decimal"
] |
[((213, 454), 'PyInquirer.prompt', 'prompt', (["[{'type': 'input', 'name': 'amount', 'message':\n 'Enter the amount of tokens to mint', 'default': '1'}, {'type':\n 'confirm', 'name': 'confirmation', 'message':\n 'Do you want to mint the selected tokens?', 'default': False}]"], {}), "([{'type': 'input', 'name': 'amount', 'message':\n 'Enter the amount of tokens to mint', 'default': '1'}, {'type':\n 'confirm', 'name': 'confirmation', 'message':\n 'Do you want to mint the selected tokens?', 'default': False}])\n", (219, 454), False, 'from PyInquirer import prompt\n'), ((598, 626), 'decimal.Decimal', 'Decimal', (["mint_data['amount']"], {}), "(mint_data['amount'])\n", (605, 626), False, 'from decimal import Decimal\n'), ((970, 1007), 'termcolor.colored', 'colored', (['"""Tokens not minted!"""', '"""blue"""'], {}), "('Tokens not minted!', 'blue')\n", (977, 1007), False, 'from termcolor import colored\n'), ((792, 839), 'termcolor.colored', 'colored', (['"""Tokens minted successfully!"""', '"""green"""'], {}), "('Tokens minted successfully!', 'green')\n", (799, 839), False, 'from termcolor import colored\n'), ((891, 943), 'termcolor.colored', 'colored', (["('Tokens could not be minted. \\n' + e)", '"""red"""'], {}), "('Tokens could not be minted. \\n' + e, 'red')\n", (898, 943), False, 'from termcolor import colored\n')]
|
import re
import wordMaps
class WordSplitter:
def __init__(self):
self.word_mapper = wordMaps.WordMaps()
self.word_breaker = re.compile(r"([^\W\d]*)", re.MULTILINE)
self.sentence_breaker = re.compile(r"((?!=|\!|\.|\?).)+.\b", re.MULTILINE)
self.swaps = {}
self.contexts = {}
def swap(self, text_block=""):
new_text_block = []
sentence_end_position = 0
for sentence_match in self.sentence_breaker.finditer(text_block):
# Run through word based swaps first
new_sentence = []
sentence_span = sentence_match.span()
# Collect Punctuation and whitespace till next sentence
if sentence_span[0] != sentence_end_position:
new_text_block.append(text_block[sentence_end_position:sentence_span[0]])
# sentence_end_position = sentence_span[0]
source_sentence = text_block[sentence_span[0]:sentence_span[1]]
last_word_position = 0
for word_match in self.word_breaker.finditer(source_sentence):
word_span = word_match.span()
if word_span[0] == word_span[1]:
# blank match. TODO improve regex
continue
# Collect Whitespace till next match
if word_span[0] != last_word_position:
new_sentence.append(source_sentence[last_word_position:word_span[0]])
source_word = source_sentence[word_span[0]:word_span[1]]
word = self.word_mapper.swap(source_word)
new_sentence.append(word)
last_word_position = word_span[1]
if word != source_word:
self.swaps[source_word] = word
if source_sentence not in self.contexts:
self.contexts[source_sentence] = []
self.contexts[source_sentence].append({'source': source_word, 'flip': word, })
new_text_block.append("".join(new_sentence))
sentence_end_position = sentence_span[1]
# Collect final punctuations
if sentence_end_position != len(text_block):
new_text_block.append(text_block[sentence_end_position:])
return "".join(new_text_block)
|
[
"wordMaps.WordMaps",
"re.compile"
] |
[((100, 119), 'wordMaps.WordMaps', 'wordMaps.WordMaps', ([], {}), '()\n', (117, 119), False, 'import wordMaps\n'), ((149, 189), 're.compile', 're.compile', (['"""([^\\\\W\\\\d]*)"""', 're.MULTILINE'], {}), "('([^\\\\W\\\\d]*)', re.MULTILINE)\n", (159, 189), False, 'import re\n'), ((221, 274), 're.compile', 're.compile', (['"""((?!=|\\\\!|\\\\.|\\\\?).)+.\\\\b"""', 're.MULTILINE'], {}), "('((?!=|\\\\!|\\\\.|\\\\?).)+.\\\\b', re.MULTILINE)\n", (231, 274), False, 'import re\n')]
|
#! /usr/bin/env python3
import prime
from memo import memoize
description = '''
Next
Product-sum numbers
Problem 88
A natural number, N, that can be written as the sum and product of a given set of at least two natural numbers, {a1, a2, ... , ak} is called a product-sum number: N = a1 + a2 + ... + ak = a1 × a2 × ... × ak.
For example, 6 = 1 + 2 + 3 = 1 × 2 × 3.
For a given set of size, k, we shall call the smallest N with this property a minimal product-sum number. The minimal product-sum numbers for sets of size, k = 2, 3, 4, 5, and 6 are as follows.
k=2: 4 = 2 × 2 = 2 + 2
k=3: 6 = 1 × 2 × 3 = 1 + 2 + 3
k=4: 8 = 1 × 1 × 2 × 4 = 1 + 1 + 2 + 4
k=5: 8 = 1 × 1 × 2 × 2 × 2 = 1 + 1 + 2 + 2 + 2
k=6: 12 = 1 × 1 × 1 × 1 × 2 × 6 = 1 + 1 + 1 + 1 + 2 + 6
Hence for 2≤k≤6, the sum of all the minimal product-sum numbers is 4+6+8+12 = 30; note that 8 is only counted once in the sum.
In fact, as the complete set of minimal product-sum numbers for 2≤k≤12 is {4, 6, 8, 12, 15, 16}, the sum is 61.
What is the sum of all the minimal product-sum numbers for 2≤k≤12000?
'''
def firstFactor(n):
for p in prime.primes():
if n % p == 0: return p
def replaceTupleElement(tup, idx, val):
return tuple((x if j != idx else val) for (j,x) in enumerate(tup))
@memoize
def factorizations(n):
# inner generator, yields factorizations, but some may be duplicates
def facgen(n):
# base case: prime or 1, only 1 factorization
if n == 1 or prime.isPrime(n):
yield (n,)
return
# recursive case: peel off the first factor of n,
# then combine with all factorizations of n/factor
# Use memoization to avoid repeated factorization of previously seen numbers
factor = firstFactor(n)
for subfac in factorizations(n // factor):
# combine by appending
yield (factor,) + subfac
# combine by multiplying each element of subfactor by the first factor
for i in range(0, len(subfac)):
yield replaceTupleElement(subfac, i, subfac[i] * factor)
# outer function collects all generated factorizations, returns the unique set
return set(tuple(sorted(fac)) for fac in facgen(n))
def sumMinProductSums(maxK):
# the minimum N that is a product-sum number for set size K
minimums = [0] * (maxK + 1)
# min product-sum number for K cannot be greater than 2K
for n in range(2, 2*maxK):
# Test each factorization of n, for potential sum-product solutions
for factorization in factorizations(n):
# sum(factorization) = product(factorization), use 1's to pad the sum until both sides equal
# sum(factorization) + padding1s = n
# => padding1s = n - sum(factorization)
# => k = len(factorization) + padding1s
# = len(factorization) + n - sum(factorization)
k = len(factorization) + n - sum(factorization)
# test and update the minimum
if k <= 1 or k >= len(minimums): continue
if minimums[k] == 0 or n < minimums[k]:
minimums[k] = n
# sum the unique n's that were minimum product-sum numbers
return sum(set(minimums))
print('sum:', sumMinProductSums(12000))
|
[
"prime.isPrime",
"prime.primes"
] |
[((1107, 1121), 'prime.primes', 'prime.primes', ([], {}), '()\n', (1119, 1121), False, 'import prime\n'), ((1451, 1467), 'prime.isPrime', 'prime.isPrime', (['n'], {}), '(n)\n', (1464, 1467), False, 'import prime\n')]
|
import warnings
import numpy as np
from magicgui.widgets import Table
from napari_plugin_engine import napari_hook_implementation
from napari.types import ImageData, LabelsData, LayerDataTuple
from napari import Viewer
from pandas import DataFrame
from qtpy.QtCore import QTimer
from qtpy.QtWidgets import QTableWidget, QTableWidgetItem, QWidget, QGridLayout, QPushButton, QFileDialog
import pyclesperanto_prototype as cle
import napari
from napari_tools_menu import register_function
@register_function(menu="Measurement > Statistics of labeled pixels (clEsperanto)")
def statistics_of_labeled_pixels(image: ImageData, labels_layer: napari.layers.Labels, napari_viewer : Viewer, measure_background=False):
"""
Adds a table widget to a given napari viewer with quantitative analysis results derived from an image-labelimage pair.
"""
labels = labels_layer.data
if image is not None and labels is not None:
# quantitative analysis using clEsperanto's statistics_of_labelled_pixels
if measure_background:
table = cle.statistics_of_background_and_labelled_pixels(image, labels)
else:
table = cle.statistics_of_labelled_pixels(image, labels)
# Store results in the properties dictionary:
labels_layer.properties = table
# turn table into a widget
from napari_skimage_regionprops import add_table
add_table(labels_layer, napari_viewer)
else:
warnings.warn("Image and labels must be set.")
|
[
"napari_skimage_regionprops.add_table",
"pyclesperanto_prototype.statistics_of_background_and_labelled_pixels",
"warnings.warn",
"pyclesperanto_prototype.statistics_of_labelled_pixels",
"napari_tools_menu.register_function"
] |
[((489, 576), 'napari_tools_menu.register_function', 'register_function', ([], {'menu': '"""Measurement > Statistics of labeled pixels (clEsperanto)"""'}), "(menu=\n 'Measurement > Statistics of labeled pixels (clEsperanto)')\n", (506, 576), False, 'from napari_tools_menu import register_function\n'), ((1407, 1445), 'napari_skimage_regionprops.add_table', 'add_table', (['labels_layer', 'napari_viewer'], {}), '(labels_layer, napari_viewer)\n', (1416, 1445), False, 'from napari_skimage_regionprops import add_table\n'), ((1464, 1510), 'warnings.warn', 'warnings.warn', (['"""Image and labels must be set."""'], {}), "('Image and labels must be set.')\n", (1477, 1510), False, 'import warnings\n'), ((1064, 1127), 'pyclesperanto_prototype.statistics_of_background_and_labelled_pixels', 'cle.statistics_of_background_and_labelled_pixels', (['image', 'labels'], {}), '(image, labels)\n', (1112, 1127), True, 'import pyclesperanto_prototype as cle\n'), ((1162, 1210), 'pyclesperanto_prototype.statistics_of_labelled_pixels', 'cle.statistics_of_labelled_pixels', (['image', 'labels'], {}), '(image, labels)\n', (1195, 1210), True, 'import pyclesperanto_prototype as cle\n')]
|
"""Run model ensemble
The canonical form of `job run` is:
job run [OPTIONS] -- EXECUTABLE [OPTIONS]
where `EXECUTABLE` is your model executable or a command, followed by its
arguments. Note the `--` that separates `job run` arguments `OPTIONS` from the
executable. When there is no ambiguity in the command-line arguments (as seen
by python's argparse) it may be dropped. `job run` options determine in which
manner to run the model, which parameter values to vary (the ensemble), and how
to communicate these parameter values to the model.
"""
examples="""
Examples
--------
job run -p a=2,3,4 b=0,1 -o out --shell -- echo --a {a} --b {b} --out {}
--a 2 --b 0 --out out/0
--a 2 --b 1 --out out/1
--a 3 --b 0 --out out/2
--a 3 --b 1 --out out/3
--a 4 --b 0 --out out/4
--a 4 --b 1 --out out/5
The command above runs an ensemble of 6 model versions, by calling `echo --a {a}
--b {b} --out {}` where `{a}`, `{b}` and `{}` are formatted using runtime with
parameter and run directory values, as displayed in the output above. Parameters can also be provided as a file:
job run -p a=2,3,4 b=0,1 -o out --file-name "params.txt" --file-type "linesep" --line-sep " " --shell cat {}/params.txt
a 2
b 0
a 2
b 1
a 3
b 0
a 3
b 1
a 4
b 0
a 4
b 1
Where UNIX `cat` command displays file content into the terminal. File types
that involve grouping, such as namelist, require a group prefix with a `.`
separator in the parameter name:
job run -p g1.a=0,1 g2.b=2. -o out --file-name "params.txt" --file-type "namelist" --shell cat {}/params.txt
&g1
a = 0
/
&g2
b = 2.0
/
&g1
a = 1
/
&g2
b = 2.0
/
"""
import argparse
import tempfile
import numpy as np
from runner.param import MultiParam, DiscreteParam
from runner.model import Model
#from runner.xparams import XParams
from runner.xrun import XParams, XRun, XPARAM
from runner.job.model import interface
from runner.job.config import ParserIO, program
import os
EXPCONFIG = 'experiment.json'
EXPDIR = 'out'
# run
# ---
def parse_slurm_array_indices(a):
indices = []
for i in a.split(","):
if '-' in i:
if ':' in i:
i, step = i.split(':')
step = int(step)
else:
step = 1
start, stop = i.split('-')
start = int(start)
stop = int(stop) + 1 # last index is ignored in python
indices.extend(range(start, stop, step))
else:
indices.append(int(i))
return indices
def _typechecker(type):
def check(string):
try:
type(string) # just a check
except Exception as error:
print('ERROR:', str(error))
raise
return string
submit = argparse.ArgumentParser(add_help=False)
grp = submit.add_argument_group("simulation modes")
#grp.add_argument('--batch-script', help='')
#x = grp.add_mutually_exclusive_group()
grp.add_argument('--max-workers', type=int,
help="number of workers for parallel processing (need to be allocated, e.g. via sbatch) -- default to the number of runs")
grp.add_argument('-t', '--timeout', type=float, default=31536000, help='timeout in seconds (default to %(default)s)')
grp.add_argument('--shell', action='store_true',
help='print output to terminal instead of log file, run sequentially, mostly useful for testing/debugging')
grp.add_argument('--echo', action='store_true',
help='display commands instead of running them (but does setup output directory). Alias for --shell --force echo [model args ...]')
#grp.add_argument('-b', '--array', action='store_true',
# help='submit using sbatch --array (faster!), EXPERIMENTAL)')
grp.add_argument('-f', '--force', action='store_true',
help='perform run even if params.txt already exists directory')
folders = argparse.ArgumentParser(add_help=False)
grp = folders.add_argument_group("simulation settings")
grp.add_argument('-o','--out-dir', default=EXPDIR, dest='expdir',
help='experiment directory \
(params.txt and logs/ will be created, as well as individual model output directories')
grp.add_argument('-a','--auto-dir', action='store_true',
help='run directory named according to parameter values instead of run `id`')
params_parser = argparse.ArgumentParser(add_help=False)
x = params_parser.add_mutually_exclusive_group()
x.add_argument('-p', '--params',
type=DiscreteParam.parse,
help="""Param values to combine.
SPEC specifies discrete parameter values
as a comma-separated list `VALUE[,VALUE...]`
or a range `START:STOP:N`.""",
metavar="NAME=SPEC",
nargs='*')
x.add_argument('-i','--params-file', help='ensemble parameters file')
x.add_argument('--continue', dest="continue_simu", action='store_true',
help=argparse.SUPPRESS)
#help='load params.txt from simulation directory')
params_parser.add_argument('-j','--id', type=_typechecker(parse_slurm_array_indices), dest='runid',
metavar="I,J...,START-STOP:STEP,...",
help='select one or several ensemble members (0-based !), \
slurm sbatch --array syntax, e.g. `0,2,4` or `0-4:2` \
or a combination of these, `0,2,4,5` <==> `0-4:2,5`')
params_parser.add_argument('--include-default',
action='store_true',
help='also run default model version (with no parameters)')
#grp = output_parser.add_argument_group("model output",
# description='model output variables')
#grp.add_argument("-v", "--output-variables", nargs='+', default=[],
# help='list of state variables to include in output.txt')
#
#grp.add_argument('-l', '--likelihood',
# type=ScipyParam.parse,
# help='distribution, to compute weights',
# metavar="NAME=DIST",
# default = [],
# nargs='+')
parser = argparse.ArgumentParser(parents=[interface.parser, params_parser, folders, submit], epilog=examples, description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
runio = interface.join(ParserIO(folders)) # interface + folder: saveit
@program(parser)
def main(o):
if o.echo:
o.model = ['echo'] + o.model
o.shell = True
o.force = True
model = Model(interface.get(o))
pfile = os.path.join(o.expdir, XPARAM)
if o.continue_simu:
o.params_file = pfile
o.force = True
if o.params_file:
xparams = XParams.read(o.params_file)
elif o.params:
prior = MultiParam(o.params)
xparams = prior.product() # only product allowed as direct input
#update = {p.name:p.value for p in o.params}
else:
xparams = XParams(np.empty((0,0)), names=[])
o.include_default = True
xrun = XRun(model, xparams, expdir=o.expdir, autodir=o.auto_dir, max_workers=o.max_workers, timeout=o.timeout)
# create dir, write params.txt file, as well as experiment configuration
try:
if not o.continue_simu:
xrun.setup(force=o.force)
except RuntimeError as error:
print("ERROR :: "+str(error))
print("Use -f/--force to bypass this check")
parser.exit(1)
#write_config(vars(o), os.path.join(o.expdir, EXPCONFIG), parser=experiment)
runio.dump(o, open(os.path.join(o.expdir, EXPCONFIG),'w'))
if o.runid:
indices = parse_slurm_array_indices(o.runid)
else:
indices = np.arange(xparams.size)
if o.include_default:
indices = list(indices) + [None]
# test: run everything serially
if o.shell:
for i in indices:
xrun[i].run(background=False)
# the default
else:
xrun.run(indices=indices)
return
main.register('run', help='run model (single version or ensemble)')
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"numpy.empty",
"runner.param.MultiParam",
"runner.xrun.XParams.read",
"runner.job.config.program",
"runner.job.config.ParserIO",
"numpy.arange",
"runner.job.model.interface.get",
"os.path.join",
"runner.xrun.XRun"
] |
[((2929, 2968), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (2952, 2968), False, 'import argparse\n'), ((4063, 4102), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (4086, 4102), False, 'import argparse\n'), ((4548, 4587), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (4571, 4587), False, 'import argparse\n'), ((6271, 6455), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'parents': '[interface.parser, params_parser, folders, submit]', 'epilog': 'examples', 'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(parents=[interface.parser, params_parser, folders,\n submit], epilog=examples, description=__doc__, formatter_class=argparse\n .RawDescriptionHelpFormatter)\n', (6294, 6455), False, 'import argparse\n'), ((6522, 6537), 'runner.job.config.program', 'program', (['parser'], {}), '(parser)\n', (6529, 6537), False, 'from runner.job.config import ParserIO, program\n'), ((6471, 6488), 'runner.job.config.ParserIO', 'ParserIO', (['folders'], {}), '(folders)\n', (6479, 6488), False, 'from runner.job.config import ParserIO, program\n'), ((6700, 6730), 'os.path.join', 'os.path.join', (['o.expdir', 'XPARAM'], {}), '(o.expdir, XPARAM)\n', (6712, 6730), False, 'import os\n'), ((7169, 7277), 'runner.xrun.XRun', 'XRun', (['model', 'xparams'], {'expdir': 'o.expdir', 'autodir': 'o.auto_dir', 'max_workers': 'o.max_workers', 'timeout': 'o.timeout'}), '(model, xparams, expdir=o.expdir, autodir=o.auto_dir, max_workers=o.\n max_workers, timeout=o.timeout)\n', (7173, 7277), False, 'from runner.xrun import XParams, XRun, XPARAM\n'), ((6669, 6685), 'runner.job.model.interface.get', 'interface.get', (['o'], {}), '(o)\n', (6682, 6685), False, 'from runner.job.model import interface\n'), ((6850, 6877), 'runner.xrun.XParams.read', 'XParams.read', (['o.params_file'], {}), '(o.params_file)\n', (6862, 6877), False, 'from runner.xrun import XParams, XRun, XPARAM\n'), ((7822, 7845), 'numpy.arange', 'np.arange', (['xparams.size'], {}), '(xparams.size)\n', (7831, 7845), True, 'import numpy as np\n'), ((6914, 6934), 'runner.param.MultiParam', 'MultiParam', (['o.params'], {}), '(o.params)\n', (6924, 6934), False, 'from runner.param import MultiParam, DiscreteParam\n'), ((7684, 7717), 'os.path.join', 'os.path.join', (['o.expdir', 'EXPCONFIG'], {}), '(o.expdir, EXPCONFIG)\n', (7696, 7717), False, 'import os\n'), ((7097, 7113), 'numpy.empty', 'np.empty', (['(0, 0)'], {}), '((0, 0))\n', (7105, 7113), True, 'import numpy as np\n')]
|
from datetime import date
now = date.today()
print(now)
print(now.strftime("%m-%d-%y. %d %b %Y is a %A on the %d day of %B"))
past = date(2003, 12, 2)
print(past)
print(past.strftime("%m-%d-%y. %d %b %Y is a %A on the %d day of %B"))
birthday = date(1964, 7, 31)
age = now - birthday
print(age.days)
|
[
"datetime.date",
"datetime.date.today"
] |
[((33, 45), 'datetime.date.today', 'date.today', ([], {}), '()\n', (43, 45), False, 'from datetime import date\n'), ((135, 152), 'datetime.date', 'date', (['(2003)', '(12)', '(2)'], {}), '(2003, 12, 2)\n', (139, 152), False, 'from datetime import date\n'), ((248, 265), 'datetime.date', 'date', (['(1964)', '(7)', '(31)'], {}), '(1964, 7, 31)\n', (252, 265), False, 'from datetime import date\n')]
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2021 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Tests for the CLI."""
from faker import Faker
from invenio_communities.fixtures.demo import create_fake_community
from invenio_communities.fixtures.tasks import create_demo_community
def test_fake_demo_community_creation(
app, db, location, es_clear, community_type_record
):
"""Assert that demo community creation works without failing."""
faker = Faker()
create_demo_community(create_fake_community(faker))
|
[
"faker.Faker",
"invenio_communities.fixtures.demo.create_fake_community"
] |
[((603, 610), 'faker.Faker', 'Faker', ([], {}), '()\n', (608, 610), False, 'from faker import Faker\n'), ((637, 665), 'invenio_communities.fixtures.demo.create_fake_community', 'create_fake_community', (['faker'], {}), '(faker)\n', (658, 665), False, 'from invenio_communities.fixtures.demo import create_fake_community\n')]
|
from dakota_class import DakotaClass
from exceptions import *
import unittest
import xarray as xr
import numpy as np
import os
class TestDakotaClass(unittest.TestCase):
# Try and create an instance of the dakota class
def test_create_dakota_template(self):
my_dakota = DakotaClass()
self.assertEqual( my_dakota.dakota.get_attribute('evaluation_concurrency'), 1 )
self.assertEqual( my_dakota.dakota.get_attribute('response_functions'), 1 )
def test_add_run_settings(self):
attrs = { 'sample_type':'sampling', 'seed':54 }
new_settings = xr.Dataset(attrs=attrs)
my_dakota = DakotaClass()
my_dakota.update_settings(new_settings)
self.assertEqual( my_dakota.dakota.get_attribute('sample_type').strip(), 'sampling' )
self.assertEqual( my_dakota.dakota.get_attribute('seed'), 54 )
def test_add_common_variable(self):
attrs = { 'type':'normal' }
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
means = xr.DataArray( data=means, dims='T' )
sds = xr.DataArray( data=sds, dims='T' )
test_var = xr.Dataset( {'means':means, 'std_deviations':sds }, attrs=attrs )
my_dakota = DakotaClass()
my_dakota.add_variable('test_var', test_var)
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('means'), means ) )
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('std_deviations'), sds ) )
def test_add_lognormal_variable(self):
attrs = { 'type':'lognormal' }
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
means = xr.DataArray( data=means, dims='T' )
sds = xr.DataArray( data=sds, dims='T' )
test_var = xr.Dataset( {'means':means, 'std_deviations':sds }, attrs=attrs )
my_dakota = DakotaClass()
my_dakota.add_variable('test_var', test_var)
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('means'), means ) )
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('std_deviations'), sds ) )
def test_add_scan_variable(self):
attrs = { 'type':'scan' }
lower = [ 0.1,0.2,0.3,0.4 ]
upper = [ 1.0,2.0,3.0,4.0 ]
partitions = [ 2,3,4,5 ]
lower = xr.DataArray( data=lower, dims='T' )
upper = xr.DataArray( data=upper, dims='T' )
partitions = xr.DataArray( data=partitions, dims='T' )
test_var = xr.Dataset( {'lower_bounds':lower, 'upper_bounds':upper, 'partitions':partitions }, attrs=attrs )
my_dakota = DakotaClass()
my_dakota.add_variable('test_var', test_var)
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('lower_bounds'), lower ) )
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('upper_bounds'), upper ) )
def test_add_correlated_scan_variable(self):
attrs = { 'type':'scan_correlated' }
lower = [ 0.1,0.2,0.3,0.4 ]
upper = [ 1.0,2.0,3.0,4.0 ]
partitions = [ 4,4,4,4 ]
lower = xr.DataArray( data=lower, dims='T' )
upper = xr.DataArray( data=upper, dims='T' )
partitions = xr.DataArray( data=partitions, dims='T' )
test_var = xr.Dataset( {'lower_bounds':lower, 'upper_bounds':upper, 'partitions':partitions }, attrs=attrs )
my_dakota = DakotaClass()
my_dakota.add_variable('test_var', test_var)
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('lower_bounds'), [0.0] ) )
self.assertTrue( np.array_equal( my_dakota.dakota.get_attribute('upper_bounds'), [1.0] ) )
def test_write_dakote_file(self):
my_dakota = DakotaClass()
my_dakota.write_input_file('test_dakota.dat')
self.assertTrue( os.path.isfile('test_dakota.dat') )
os.remove('test_dakota.dat')
######################################################
# FAILURE TESTS
######################################################
def test_add_variable_not_dataset(self):
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
test_var = {'means':means, 'std_deviations':sds }
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_with_no_type(self):
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
means = xr.DataArray( data=means, dims='T' )
sds = xr.DataArray( data=sds, dims='T' )
test_var = xr.Dataset( {'means':means, 'std_deviations':sds } )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_unknown_type(self):
attrs = { 'type':'unknown' }
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
test_var = xr.Dataset( {'means':means, 'std_deviations':sds }, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_missing_data(self):
attrs = { 'type':'normal' }
means = [ 1.0,2.0,3.0,4.0 ]
test_var = xr.Dataset( {'means':means}, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_incompatible_data(self):
attrs = { 'type':'normal' }
means = [ 1.0,2.0,3.0,4.0 ]
sds = [ 0.1,0.2,0.3,0.4,0.5 ]
test_var = xr.Dataset( {'means':means, 'std_deviations':sds}, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_variable_with_nans(self):
attrs = { 'type':'normal' }
means = [ 1.0,2.0,np.nan,4.0 ]
sds = [ 0.1,0.2,0.3,0.4 ]
test_var = xr.Dataset( {'means':means, 'std_deviations':sds}, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
def test_add_correlated_scan_variable_with_inconsistent_partitions(self):
attrs = { 'type':'scan_correlated' }
lower = [ 0.1,0.2,0.3,0.4 ]
upper = [ 1.0,2.0,3.0,4.0 ]
partitions = [ 4,5,4,4 ]
lower = xr.DataArray( data=lower, dims='T' )
upper = xr.DataArray( data=upper, dims='T' )
partitions = xr.DataArray( data=partitions, dims='T' )
test_var = xr.Dataset( {'lower_bounds':lower, 'upper_bounds':upper, 'partitions':partitions }, attrs=attrs )
my_dakota = DakotaClass()
with self.assertRaises(DatasetError):
my_dakota.add_variable('test_var', test_var)
|
[
"os.remove",
"xarray.Dataset",
"os.path.isfile",
"xarray.DataArray",
"dakota_class.DakotaClass"
] |
[((288, 301), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (299, 301), False, 'from dakota_class import DakotaClass\n'), ((610, 633), 'xarray.Dataset', 'xr.Dataset', ([], {'attrs': 'attrs'}), '(attrs=attrs)\n', (620, 633), True, 'import xarray as xr\n'), ((663, 676), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (674, 676), False, 'from dakota_class import DakotaClass\n'), ((1074, 1108), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'means', 'dims': '"""T"""'}), "(data=means, dims='T')\n", (1086, 1108), True, 'import xarray as xr\n'), ((1127, 1159), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'sds', 'dims': '"""T"""'}), "(data=sds, dims='T')\n", (1139, 1159), True, 'import xarray as xr\n'), ((1192, 1256), 'xarray.Dataset', 'xr.Dataset', (["{'means': means, 'std_deviations': sds}"], {'attrs': 'attrs'}), "({'means': means, 'std_deviations': sds}, attrs=attrs)\n", (1202, 1256), True, 'import xarray as xr\n'), ((1287, 1300), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (1298, 1300), False, 'from dakota_class import DakotaClass\n'), ((1735, 1769), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'means', 'dims': '"""T"""'}), "(data=means, dims='T')\n", (1747, 1769), True, 'import xarray as xr\n'), ((1788, 1820), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'sds', 'dims': '"""T"""'}), "(data=sds, dims='T')\n", (1800, 1820), True, 'import xarray as xr\n'), ((1853, 1917), 'xarray.Dataset', 'xr.Dataset', (["{'means': means, 'std_deviations': sds}"], {'attrs': 'attrs'}), "({'means': means, 'std_deviations': sds}, attrs=attrs)\n", (1863, 1917), True, 'import xarray as xr\n'), ((1948, 1961), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (1959, 1961), False, 'from dakota_class import DakotaClass\n'), ((2418, 2452), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'lower', 'dims': '"""T"""'}), "(data=lower, dims='T')\n", (2430, 2452), True, 'import xarray as xr\n'), ((2471, 2505), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'upper', 'dims': '"""T"""'}), "(data=upper, dims='T')\n", (2483, 2505), True, 'import xarray as xr\n'), ((2529, 2568), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'partitions', 'dims': '"""T"""'}), "(data=partitions, dims='T')\n", (2541, 2568), True, 'import xarray as xr\n'), ((2599, 2700), 'xarray.Dataset', 'xr.Dataset', (["{'lower_bounds': lower, 'upper_bounds': upper, 'partitions': partitions}"], {'attrs': 'attrs'}), "({'lower_bounds': lower, 'upper_bounds': upper, 'partitions':\n partitions}, attrs=attrs)\n", (2609, 2700), True, 'import xarray as xr\n'), ((2726, 2739), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (2737, 2739), False, 'from dakota_class import DakotaClass\n'), ((3218, 3252), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'lower', 'dims': '"""T"""'}), "(data=lower, dims='T')\n", (3230, 3252), True, 'import xarray as xr\n'), ((3271, 3305), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'upper', 'dims': '"""T"""'}), "(data=upper, dims='T')\n", (3283, 3305), True, 'import xarray as xr\n'), ((3329, 3368), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'partitions', 'dims': '"""T"""'}), "(data=partitions, dims='T')\n", (3341, 3368), True, 'import xarray as xr\n'), ((3399, 3500), 'xarray.Dataset', 'xr.Dataset', (["{'lower_bounds': lower, 'upper_bounds': upper, 'partitions': partitions}"], {'attrs': 'attrs'}), "({'lower_bounds': lower, 'upper_bounds': upper, 'partitions':\n partitions}, attrs=attrs)\n", (3409, 3500), True, 'import xarray as xr\n'), ((3526, 3539), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (3537, 3539), False, 'from dakota_class import DakotaClass\n'), ((3852, 3865), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (3863, 3865), False, 'from dakota_class import DakotaClass\n'), ((3989, 4017), 'os.remove', 'os.remove', (['"""test_dakota.dat"""'], {}), "('test_dakota.dat')\n", (3998, 4017), False, 'import os\n'), ((4360, 4373), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (4371, 4373), False, 'from dakota_class import DakotaClass\n'), ((4623, 4657), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'means', 'dims': '"""T"""'}), "(data=means, dims='T')\n", (4635, 4657), True, 'import xarray as xr\n'), ((4676, 4708), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'sds', 'dims': '"""T"""'}), "(data=sds, dims='T')\n", (4688, 4708), True, 'import xarray as xr\n'), ((4741, 4792), 'xarray.Dataset', 'xr.Dataset', (["{'means': means, 'std_deviations': sds}"], {}), "({'means': means, 'std_deviations': sds})\n", (4751, 4792), True, 'import xarray as xr\n'), ((4823, 4836), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (4834, 4836), False, 'from dakota_class import DakotaClass\n'), ((5126, 5190), 'xarray.Dataset', 'xr.Dataset', (["{'means': means, 'std_deviations': sds}"], {'attrs': 'attrs'}), "({'means': means, 'std_deviations': sds}, attrs=attrs)\n", (5136, 5190), True, 'import xarray as xr\n'), ((5221, 5234), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (5232, 5234), False, 'from dakota_class import DakotaClass\n'), ((5487, 5528), 'xarray.Dataset', 'xr.Dataset', (["{'means': means}"], {'attrs': 'attrs'}), "({'means': means}, attrs=attrs)\n", (5497, 5528), True, 'import xarray as xr\n'), ((5559, 5572), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (5570, 5572), False, 'from dakota_class import DakotaClass\n'), ((5870, 5934), 'xarray.Dataset', 'xr.Dataset', (["{'means': means, 'std_deviations': sds}"], {'attrs': 'attrs'}), "({'means': means, 'std_deviations': sds}, attrs=attrs)\n", (5880, 5934), True, 'import xarray as xr\n'), ((5964, 5977), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (5975, 5977), False, 'from dakota_class import DakotaClass\n'), ((6266, 6330), 'xarray.Dataset', 'xr.Dataset', (["{'means': means, 'std_deviations': sds}"], {'attrs': 'attrs'}), "({'means': means, 'std_deviations': sds}, attrs=attrs)\n", (6276, 6330), True, 'import xarray as xr\n'), ((6360, 6373), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (6371, 6373), False, 'from dakota_class import DakotaClass\n'), ((6725, 6759), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'lower', 'dims': '"""T"""'}), "(data=lower, dims='T')\n", (6737, 6759), True, 'import xarray as xr\n'), ((6778, 6812), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'upper', 'dims': '"""T"""'}), "(data=upper, dims='T')\n", (6790, 6812), True, 'import xarray as xr\n'), ((6836, 6875), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'partitions', 'dims': '"""T"""'}), "(data=partitions, dims='T')\n", (6848, 6875), True, 'import xarray as xr\n'), ((6906, 7007), 'xarray.Dataset', 'xr.Dataset', (["{'lower_bounds': lower, 'upper_bounds': upper, 'partitions': partitions}"], {'attrs': 'attrs'}), "({'lower_bounds': lower, 'upper_bounds': upper, 'partitions':\n partitions}, attrs=attrs)\n", (6916, 7007), True, 'import xarray as xr\n'), ((7033, 7046), 'dakota_class.DakotaClass', 'DakotaClass', ([], {}), '()\n', (7044, 7046), False, 'from dakota_class import DakotaClass\n'), ((3945, 3978), 'os.path.isfile', 'os.path.isfile', (['"""test_dakota.dat"""'], {}), "('test_dakota.dat')\n", (3959, 3978), False, 'import os\n')]
|
# coding: utf-8
# # Download Mmtf Files Demo
#
# Example of downloading a list of PDB entries from [RCSB]("http://mmtf.rcsb.org")
#
# ## Imports
# In[9]:
from pyspark import SparkConf, SparkContext
from mmtfPyspark.io import mmtfReader
from mmtfPyspark.structureViewer import view_structure
# ## Configure Spark
# In[10]:
conf = SparkConf().setMaster("local[*]") .setAppName("DownloadMMTFFiles")
sc = SparkContext(conf = conf)
# ## Download a list of PDB entries using MMTF web services
# In[11]:
pdbIds = ['1AQ1','1B38','1B39','1BUH']
pdb = mmtfReader.download_mmtf_files(pdbIds, sc)
# ## Count the number of entires downloaded
# In[12]:
count = pdb.count()
print(f'number of entries downloaded : {count}')
# ## Visualize Structures
# In[13]:
structures = pdb.keys().collect()
view_structure(structures, style = 'line')
# ## Terminate Spark
# In[14]:
sc.stop()
|
[
"pyspark.SparkContext",
"mmtfPyspark.io.mmtfReader.download_mmtf_files",
"mmtfPyspark.structureViewer.view_structure",
"pyspark.SparkConf"
] |
[((435, 458), 'pyspark.SparkContext', 'SparkContext', ([], {'conf': 'conf'}), '(conf=conf)\n', (447, 458), False, 'from pyspark import SparkConf, SparkContext\n'), ((582, 624), 'mmtfPyspark.io.mmtfReader.download_mmtf_files', 'mmtfReader.download_mmtf_files', (['pdbIds', 'sc'], {}), '(pdbIds, sc)\n', (612, 624), False, 'from mmtfPyspark.io import mmtfReader\n'), ((829, 869), 'mmtfPyspark.structureViewer.view_structure', 'view_structure', (['structures'], {'style': '"""line"""'}), "(structures, style='line')\n", (843, 869), False, 'from mmtfPyspark.structureViewer import view_structure\n'), ((341, 352), 'pyspark.SparkConf', 'SparkConf', ([], {}), '()\n', (350, 352), False, 'from pyspark import SparkConf, SparkContext\n')]
|
from typing import List
from parse import parse_explain
from db import explain, rows_count
from parse import Analyzer
from serializer import Serializer
class Task(Serializer):
''' defines query as a task
'''
def __init__(self, parent_title, title, query, *args, **kwargs):
self.query = query
self.rows_count = kwargs.get('rows_count', 0)
self.table = kwargs.get('table')
self._parent_title = parent_title
self._title = title
self._times = kwargs.get('times', 10)
def title(self) -> str:
return self._title
def parent_title(self) -> str:
return self._parent_title
def __str__(self) -> str:
return 'Title: {0}\nQuery: {1}\n Times:{2}'.format(self._title, self.query, self._times)
def run(self, session) -> List[Analyzer]:
if self.table:
self._rows_count = rows_count(session, self.table)
return [parse_explain(self._title, explain(session, self.query)) for x in range(self._times)]
|
[
"db.rows_count",
"db.explain"
] |
[((891, 922), 'db.rows_count', 'rows_count', (['session', 'self.table'], {}), '(session, self.table)\n', (901, 922), False, 'from db import explain, rows_count\n'), ((966, 994), 'db.explain', 'explain', (['session', 'self.query'], {}), '(session, self.query)\n', (973, 994), False, 'from db import explain, rows_count\n')]
|