code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import unittest
import os.path
from tableaudocumentapi import Datasource, Workbook
TEST_ASSET_DIR = os.path.join(
os.path.dirname(__file__),
'assets'
)
TEST_TDS_FILE = os.path.join(
TEST_ASSET_DIR,
'datasource_test.tds'
)
TEST_TWB_FILE = os.path.join(
TEST_ASSET_DIR,
'datasource_test.twb'
)
class DataSourceFieldsTDS(unittest.TestCase):
def setUp(self):
self.ds = Datasource.from_file(TEST_TDS_FILE)
def test_datasource_returns_correct_fields(self):
self.assertIsNotNone(self.ds.fields)
self.assertIsNotNone(self.ds.fields.get('[Number of Records]', None))
def test_datasource_returns_calculation_from_fields(self):
self.assertEqual('1', self.ds.fields['[Number of Records]'].calculation)
def test_datasource_uses_metadata_record(self):
self.assertEqual('Sum', self.ds.fields['[x]'].default_aggregation)
def test_datasource_column_name_contains_apostrophy(self):
self.assertIsNotNone(self.ds.fields.get("[Today's Date]", None))
def test_datasource_field_can_get_caption(self):
self.assertEqual(self.ds.fields['[a]'].caption, 'A')
self.assertEqual(getattr(self.ds.fields['[a]'], 'caption', None), 'A')
def test_datasource_field_caption_can_be_used_to_query(self):
self.assertIsNotNone(self.ds.fields.get('A', None))
def test_datasource_field_is_nominal(self):
self.assertTrue(self.ds.fields['[a]'].is_nominal)
def test_datasource_field_is_quantitative(self):
self.assertTrue(self.ds.fields['[y]'].is_quantitative)
def test_datasource_field_is_ordinal(self):
self.assertTrue(self.ds.fields['[x]'].is_ordinal)
def test_datasource_field_datatype(self):
self.assertEqual(self.ds.fields['[x]'].datatype, 'integer')
def test_datasource_field_role(self):
self.assertEqual(self.ds.fields['[x]'].role, 'measure')
def test_datasource_field_description(self):
actual = self.ds.fields['[a]'].description
self.assertIsNotNone(actual)
self.assertTrue(u'muted gray' in actual)
class DataSourceFieldsTWB(unittest.TestCase):
def setUp(self):
self.wb = Workbook(TEST_TWB_FILE)
# Assume the first datasource in the file
self.ds = self.wb.datasources[0]
def test_datasource_fields_loaded_in_workbook(self):
self.assertIsNotNone(self.ds.fields)
self.assertIsNotNone(self.ds.fields.get('[Number of Records]', None))
class DataSourceFieldsFoundIn(unittest.TestCase):
def setUp(self):
self.wb = Workbook(TEST_TWB_FILE)
# Assume the first datasource in the file
self.ds = self.wb.datasources[0]
def test_datasource_fields_found_in_returns_fields(self):
actual_values = self.ds.fields.used_by_sheet('Sheet 1')
self.assertIsNotNone(actual_values)
self.assertEqual(1, len(actual_values))
self.assertIn('A', (x.name for x in actual_values))
def test_datasource_fields_found_in_does_not_return_fields_not_used_in_worksheet(self):
actual_values = self.ds.fields.used_by_sheet('Sheet 1')
self.assertIsNotNone(actual_values)
self.assertEqual(1, len(actual_values))
self.assertNotIn('X', (x.name for x in actual_values))
def test_datasource_fields_found_in_returns_multiple_fields(self):
actual_values = self.ds.fields.used_by_sheet('Sheet 2')
self.assertIsNotNone(actual_values)
self.assertEqual(2, len(actual_values))
self.assertIn('A', (x.name for x in actual_values))
self.assertIn('X', (x.name for x in actual_values))
self.assertNotIn('Y', (x.name for x in actual_values))
def test_datasource_fields_found_in_accepts_lists(self):
actual_values = self.ds.fields.used_by_sheet(['Sheet 1', 'Sheet 2'])
self.assertIsNotNone(actual_values)
self.assertEqual(2, len(actual_values))
self.assertIn('A', (x.name for x in actual_values))
self.assertIn('X', (x.name for x in actual_values))
self.assertNotIn('Y', (x.name for x in actual_values)) | test/test_datasource.py | import unittest
import os.path
from tableaudocumentapi import Datasource, Workbook
TEST_ASSET_DIR = os.path.join(
os.path.dirname(__file__),
'assets'
)
TEST_TDS_FILE = os.path.join(
TEST_ASSET_DIR,
'datasource_test.tds'
)
TEST_TWB_FILE = os.path.join(
TEST_ASSET_DIR,
'datasource_test.twb'
)
class DataSourceFieldsTDS(unittest.TestCase):
def setUp(self):
self.ds = Datasource.from_file(TEST_TDS_FILE)
def test_datasource_returns_correct_fields(self):
self.assertIsNotNone(self.ds.fields)
self.assertIsNotNone(self.ds.fields.get('[Number of Records]', None))
def test_datasource_returns_calculation_from_fields(self):
self.assertEqual('1', self.ds.fields['[Number of Records]'].calculation)
def test_datasource_uses_metadata_record(self):
self.assertEqual('Sum', self.ds.fields['[x]'].default_aggregation)
def test_datasource_column_name_contains_apostrophy(self):
self.assertIsNotNone(self.ds.fields.get("[Today's Date]", None))
def test_datasource_field_can_get_caption(self):
self.assertEqual(self.ds.fields['[a]'].caption, 'A')
self.assertEqual(getattr(self.ds.fields['[a]'], 'caption', None), 'A')
def test_datasource_field_caption_can_be_used_to_query(self):
self.assertIsNotNone(self.ds.fields.get('A', None))
def test_datasource_field_is_nominal(self):
self.assertTrue(self.ds.fields['[a]'].is_nominal)
def test_datasource_field_is_quantitative(self):
self.assertTrue(self.ds.fields['[y]'].is_quantitative)
def test_datasource_field_is_ordinal(self):
self.assertTrue(self.ds.fields['[x]'].is_ordinal)
def test_datasource_field_datatype(self):
self.assertEqual(self.ds.fields['[x]'].datatype, 'integer')
def test_datasource_field_role(self):
self.assertEqual(self.ds.fields['[x]'].role, 'measure')
def test_datasource_field_description(self):
actual = self.ds.fields['[a]'].description
self.assertIsNotNone(actual)
self.assertTrue(u'muted gray' in actual)
class DataSourceFieldsTWB(unittest.TestCase):
def setUp(self):
self.wb = Workbook(TEST_TWB_FILE)
# Assume the first datasource in the file
self.ds = self.wb.datasources[0]
def test_datasource_fields_loaded_in_workbook(self):
self.assertIsNotNone(self.ds.fields)
self.assertIsNotNone(self.ds.fields.get('[Number of Records]', None))
class DataSourceFieldsFoundIn(unittest.TestCase):
def setUp(self):
self.wb = Workbook(TEST_TWB_FILE)
# Assume the first datasource in the file
self.ds = self.wb.datasources[0]
def test_datasource_fields_found_in_returns_fields(self):
actual_values = self.ds.fields.used_by_sheet('Sheet 1')
self.assertIsNotNone(actual_values)
self.assertEqual(1, len(actual_values))
self.assertIn('A', (x.name for x in actual_values))
def test_datasource_fields_found_in_does_not_return_fields_not_used_in_worksheet(self):
actual_values = self.ds.fields.used_by_sheet('Sheet 1')
self.assertIsNotNone(actual_values)
self.assertEqual(1, len(actual_values))
self.assertNotIn('X', (x.name for x in actual_values))
def test_datasource_fields_found_in_returns_multiple_fields(self):
actual_values = self.ds.fields.used_by_sheet('Sheet 2')
self.assertIsNotNone(actual_values)
self.assertEqual(2, len(actual_values))
self.assertIn('A', (x.name for x in actual_values))
self.assertIn('X', (x.name for x in actual_values))
self.assertNotIn('Y', (x.name for x in actual_values))
def test_datasource_fields_found_in_accepts_lists(self):
actual_values = self.ds.fields.used_by_sheet(['Sheet 1', 'Sheet 2'])
self.assertIsNotNone(actual_values)
self.assertEqual(2, len(actual_values))
self.assertIn('A', (x.name for x in actual_values))
self.assertIn('X', (x.name for x in actual_values))
self.assertNotIn('Y', (x.name for x in actual_values)) | 0.474388 | 0.644393 |
import math
import tensorflow as tf
import pc.configuration.config as cfg
import pc.model.base as base
import pc.model.point_cnn_patch as patch
import pc.model.point_cnn_util as pf
import pc.model.sampling.sampling as sampling
def xconv(pts, fts, qrs, tag, N, K, D, P, C, C_pts_fts, is_training, depth_multiplier,
with_global=False):
"""
Args:
pts: The input point cloud of size (B, N, DIM), e.g., (None, 2048, 3).
fts: The features of the input point cloud of size (B, N, C1), e.g., (None, 2048, 1)
qrs: The representative points of size (B, N2, DIM), e.g., (None, 768, 3)
tag:
N: The batch size.
K: Number of neighbors to convolve over?
D: Dilation rate
P: Number of representative points
C: The feature dimensionality.
C_pts_fts:
is_training:
depth_multiplier:
with_global:
Returns:
"""
# dilate point cloud
_, indices_dilated = pf.knn_indices_general(qrs, pts, K * D, True)
indices = indices_dilated[:, :, ::D, :]
# move P to local coordinate system of p (line 1 in Algorithm 1)
nn_pts = tf.gather_nd(pts, indices, name=tag + 'nn_pts') # (N, P, K, 3)
nn_pts_center = tf.expand_dims(qrs, axis=2, name=tag + 'nn_pts_center') # (N, P, 1, 3)
nn_pts_local = tf.subtract(nn_pts, nn_pts_center, name=tag + 'nn_pts_local') # (N, P, K, 3)
# Prepare features to be transformed (line 2 in Algorithm 1)
nn_fts_from_pts_0 = pf.dense(nn_pts_local, C_pts_fts, tag + 'nn_fts_from_pts_0', is_training)
nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts, tag + 'nn_fts_from_pts', is_training)
# concatenate features (line 3 in Algorithm 1)
if fts is None:
nn_fts_input = nn_fts_from_pts
else:
nn_fts_from_prev = tf.gather_nd(fts, indices, name=tag + 'nn_fts_from_prev')
nn_fts_input = tf.concat([nn_fts_from_pts, nn_fts_from_prev], axis=-1, name=tag + 'nn_fts_input')
# X-transformation (line 4 in Algorithm 1)
X_0 = pf.conv2d(nn_pts_local, K * K, tag + 'X_0', is_training, (1, K))
X_0_KK = tf.reshape(X_0, (N, P, K, K), name=tag + 'X_0_KK')
X_1 = pf.depthwise_conv2d(X_0_KK, K, tag + 'X_1', is_training, (1, K))
X_1_KK = tf.reshape(X_1, (N, P, K, K), name=tag + 'X_1_KK')
X_2 = pf.depthwise_conv2d(X_1_KK, K, tag + 'X_2', is_training, (1, K), activation=None)
X_2_KK = tf.reshape(X_2, (N, P, K, K), name=tag + 'X_2_KK')
# (line 5 in Algorithm 1)
fts_X = tf.matmul(X_2_KK, nn_fts_input, name=tag + 'fts_X')
# (line 6 in Algorithm 1)
fts_conv = pf.separable_conv2d(fts_X, C, tag + 'fts_conv', is_training, (1, K), depth_multiplier=depth_multiplier)
fts_conv_3d = tf.squeeze(fts_conv, axis=2, name=tag + 'fts_conv_3d')
if with_global:
fts_global_0 = pf.dense(qrs, C // 4, tag + 'fts_global_0', is_training)
fts_global = pf.dense(fts_global_0, C // 4, tag + 'fts_global', is_training)
return tf.concat([fts_global, fts_conv_3d], axis=-1, name=tag + 'fts_conv_3d_with_global')
else:
return fts_conv_3d
class PointCNN(base.BaseModel):
def inference(self, x) -> object:
# x has shape (B, N, C)
if x.shape[-1] > 3:
input_points = x[..., 0:3]
if self.use_point_feature:
point_features = x[..., 3:]
else:
point_features = None
else:
input_points = x
point_features = None
# extract image information features
if self.use_image_information:
e = patch.Encoder(self.image_information_config, self.no_points, self.dropout_p, self.is_training_placeholder)
image_information_features = e.inference(self.image_information_placeholder)
if point_features is not None:
point_features = tf.concat([point_features, image_information_features], axis=-1)
else:
point_features = image_information_features
xconv_param_name = ('K', 'D', 'P', 'C')
xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in
[(8, 1, -1, 32 * self.channel_factor),
(12, 2, 768, 32 * self.channel_factor),
(16, 2, 384, 64 * self.channel_factor),
(16, 6, 128, 128 * self.channel_factor)]]
xdconv_param_name = ('K', 'D', 'pts_layer_idx', 'qrs_layer_idx')
xdconv_params = [dict(zip(xdconv_param_name, xdconv_param)) for xdconv_param in
[(16, 6, 3, 2),
(12, 6, 2, 1),
(8, 6, 1, 0),
(8, 4, 0, 0)]]
fc_param_name = ('C', 'dropout_rate')
fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in
[(32 * self.channel_factor, self.dropout_p),
(32 * self.channel_factor, self.dropout_p)]]
with_global_setting = True
batch_size = tf.shape(input_points)[0]
sampling_config = 'fps'
list_points = [input_points]
list_features = [None] if point_features is None else \
[pf.dense(point_features, xconv_params[0]['C'] // 2, 'features_hd', self.is_training_placeholder)]
# encoding path
for layer_idx, layer_param in enumerate(xconv_params):
tag = 'xconv_' + str(layer_idx + 1) + '_'
K = layer_param['K']
D = layer_param['D']
P = layer_param['P']
C = layer_param['C']
# get k-nearest points
pts = list_points[-1]
fts = list_features[-1]
# check if we need to downsample point cloud
if P == -1 or (layer_idx > 0 and P == xconv_params[layer_idx - 1]['P']):
qrs = list_points[-1]
else:
if sampling_config == 'fps':
fps_indices = sampling.farthest_point_sample(pts, P)
batch_indices = tf.tile(tf.reshape(tf.range(batch_size), (-1, 1, 1)), (1, P, 1))
indices = tf.concat([batch_indices, tf.expand_dims(fps_indices, -1)], axis=-1)
qrs = tf.gather_nd(pts, indices, name=tag + 'qrs') # (N, P, 3)
elif sampling_config == 'ids':
indices = pf.inverse_density_sampling(pts, K, P)
qrs = tf.gather_nd(pts, indices)
else:
raise ValueError('Unknown sampling method "{}"'.format(sampling_config))
list_points.append(qrs)
if layer_idx == 0:
C_pts_fts = C // 2 if fts is None else C // 4
depth_multiplier = 4
else:
C_prev = xconv_params[layer_idx - 1]['C']
C_pts_fts = C_prev // 4
depth_multiplier = math.ceil(C / C_prev)
with_global = (with_global_setting and layer_idx == len(xconv_params) - 1)
print('ENC', pts.shape, qrs.shape[1], C, K, D)
fts_xconv = xconv(pts, fts, qrs, tag, batch_size, K, D, P, C, C_pts_fts, self.is_training_placeholder,
depth_multiplier, with_global)
list_features.append(fts_xconv)
# decoding path
for layer_idx, layer_param in enumerate(xdconv_params):
tag = 'xdconv_' + str(layer_idx + 1) + '_'
K = layer_param['K']
D = layer_param['D']
pts_layer_idx = layer_param['pts_layer_idx']
qrs_layer_idx = layer_param['qrs_layer_idx']
pts = list_points[pts_layer_idx + 1]
fts = list_features[pts_layer_idx + 1] if layer_idx == 0 else list_features[-1] # fts_fuse is used here
qrs = list_points[qrs_layer_idx + 1]
fts_qrs = list_features[qrs_layer_idx + 1]
P = xconv_params[qrs_layer_idx]['P']
C = xconv_params[qrs_layer_idx]['C']
C_prev = xconv_params[pts_layer_idx]['C']
C_pts_fts = C_prev // 4
depth_multiplier = 1
print('DEC', pts.shape, qrs.shape[1], C, K, D)
fts_xdconv = xconv(pts, fts, qrs, tag, batch_size, K, D, P, C, C_pts_fts, self.is_training_placeholder,
depth_multiplier)
fts_concat = tf.concat([fts_xdconv, fts_qrs], axis=-1, name=tag + 'fts_concat')
fts_fuse = pf.dense(fts_concat, C, tag + 'fts_fuse', self.is_training_placeholder)
list_points.append(qrs)
list_features.append(fts_fuse)
# fully-connected at end
fc_layers = [list_features[-1]]
for layer_idx, layer_param in enumerate(fc_params):
C = layer_param['C']
dropout_rate = layer_param['dropout_rate']
fc = pf.dense(fc_layers[-1], C, 'fc{:d}'.format(layer_idx), self.is_training_placeholder)
fc_drop = tf.layers.dropout(fc, dropout_rate, training=self.is_training_placeholder, name='fc{:d}_drop'.format(layer_idx))
fc_layers.append(fc_drop)
logits = pf.dense(fc_layers[-1], cfg.NO_CLASSES, 'logits',
self.is_training_placeholder, with_bn=False, activation=None)
return tf.identity(logits, name='network') | pc/model/point_cnn.py | import math
import tensorflow as tf
import pc.configuration.config as cfg
import pc.model.base as base
import pc.model.point_cnn_patch as patch
import pc.model.point_cnn_util as pf
import pc.model.sampling.sampling as sampling
def xconv(pts, fts, qrs, tag, N, K, D, P, C, C_pts_fts, is_training, depth_multiplier,
with_global=False):
"""
Args:
pts: The input point cloud of size (B, N, DIM), e.g., (None, 2048, 3).
fts: The features of the input point cloud of size (B, N, C1), e.g., (None, 2048, 1)
qrs: The representative points of size (B, N2, DIM), e.g., (None, 768, 3)
tag:
N: The batch size.
K: Number of neighbors to convolve over?
D: Dilation rate
P: Number of representative points
C: The feature dimensionality.
C_pts_fts:
is_training:
depth_multiplier:
with_global:
Returns:
"""
# dilate point cloud
_, indices_dilated = pf.knn_indices_general(qrs, pts, K * D, True)
indices = indices_dilated[:, :, ::D, :]
# move P to local coordinate system of p (line 1 in Algorithm 1)
nn_pts = tf.gather_nd(pts, indices, name=tag + 'nn_pts') # (N, P, K, 3)
nn_pts_center = tf.expand_dims(qrs, axis=2, name=tag + 'nn_pts_center') # (N, P, 1, 3)
nn_pts_local = tf.subtract(nn_pts, nn_pts_center, name=tag + 'nn_pts_local') # (N, P, K, 3)
# Prepare features to be transformed (line 2 in Algorithm 1)
nn_fts_from_pts_0 = pf.dense(nn_pts_local, C_pts_fts, tag + 'nn_fts_from_pts_0', is_training)
nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts, tag + 'nn_fts_from_pts', is_training)
# concatenate features (line 3 in Algorithm 1)
if fts is None:
nn_fts_input = nn_fts_from_pts
else:
nn_fts_from_prev = tf.gather_nd(fts, indices, name=tag + 'nn_fts_from_prev')
nn_fts_input = tf.concat([nn_fts_from_pts, nn_fts_from_prev], axis=-1, name=tag + 'nn_fts_input')
# X-transformation (line 4 in Algorithm 1)
X_0 = pf.conv2d(nn_pts_local, K * K, tag + 'X_0', is_training, (1, K))
X_0_KK = tf.reshape(X_0, (N, P, K, K), name=tag + 'X_0_KK')
X_1 = pf.depthwise_conv2d(X_0_KK, K, tag + 'X_1', is_training, (1, K))
X_1_KK = tf.reshape(X_1, (N, P, K, K), name=tag + 'X_1_KK')
X_2 = pf.depthwise_conv2d(X_1_KK, K, tag + 'X_2', is_training, (1, K), activation=None)
X_2_KK = tf.reshape(X_2, (N, P, K, K), name=tag + 'X_2_KK')
# (line 5 in Algorithm 1)
fts_X = tf.matmul(X_2_KK, nn_fts_input, name=tag + 'fts_X')
# (line 6 in Algorithm 1)
fts_conv = pf.separable_conv2d(fts_X, C, tag + 'fts_conv', is_training, (1, K), depth_multiplier=depth_multiplier)
fts_conv_3d = tf.squeeze(fts_conv, axis=2, name=tag + 'fts_conv_3d')
if with_global:
fts_global_0 = pf.dense(qrs, C // 4, tag + 'fts_global_0', is_training)
fts_global = pf.dense(fts_global_0, C // 4, tag + 'fts_global', is_training)
return tf.concat([fts_global, fts_conv_3d], axis=-1, name=tag + 'fts_conv_3d_with_global')
else:
return fts_conv_3d
class PointCNN(base.BaseModel):
def inference(self, x) -> object:
# x has shape (B, N, C)
if x.shape[-1] > 3:
input_points = x[..., 0:3]
if self.use_point_feature:
point_features = x[..., 3:]
else:
point_features = None
else:
input_points = x
point_features = None
# extract image information features
if self.use_image_information:
e = patch.Encoder(self.image_information_config, self.no_points, self.dropout_p, self.is_training_placeholder)
image_information_features = e.inference(self.image_information_placeholder)
if point_features is not None:
point_features = tf.concat([point_features, image_information_features], axis=-1)
else:
point_features = image_information_features
xconv_param_name = ('K', 'D', 'P', 'C')
xconv_params = [dict(zip(xconv_param_name, xconv_param)) for xconv_param in
[(8, 1, -1, 32 * self.channel_factor),
(12, 2, 768, 32 * self.channel_factor),
(16, 2, 384, 64 * self.channel_factor),
(16, 6, 128, 128 * self.channel_factor)]]
xdconv_param_name = ('K', 'D', 'pts_layer_idx', 'qrs_layer_idx')
xdconv_params = [dict(zip(xdconv_param_name, xdconv_param)) for xdconv_param in
[(16, 6, 3, 2),
(12, 6, 2, 1),
(8, 6, 1, 0),
(8, 4, 0, 0)]]
fc_param_name = ('C', 'dropout_rate')
fc_params = [dict(zip(fc_param_name, fc_param)) for fc_param in
[(32 * self.channel_factor, self.dropout_p),
(32 * self.channel_factor, self.dropout_p)]]
with_global_setting = True
batch_size = tf.shape(input_points)[0]
sampling_config = 'fps'
list_points = [input_points]
list_features = [None] if point_features is None else \
[pf.dense(point_features, xconv_params[0]['C'] // 2, 'features_hd', self.is_training_placeholder)]
# encoding path
for layer_idx, layer_param in enumerate(xconv_params):
tag = 'xconv_' + str(layer_idx + 1) + '_'
K = layer_param['K']
D = layer_param['D']
P = layer_param['P']
C = layer_param['C']
# get k-nearest points
pts = list_points[-1]
fts = list_features[-1]
# check if we need to downsample point cloud
if P == -1 or (layer_idx > 0 and P == xconv_params[layer_idx - 1]['P']):
qrs = list_points[-1]
else:
if sampling_config == 'fps':
fps_indices = sampling.farthest_point_sample(pts, P)
batch_indices = tf.tile(tf.reshape(tf.range(batch_size), (-1, 1, 1)), (1, P, 1))
indices = tf.concat([batch_indices, tf.expand_dims(fps_indices, -1)], axis=-1)
qrs = tf.gather_nd(pts, indices, name=tag + 'qrs') # (N, P, 3)
elif sampling_config == 'ids':
indices = pf.inverse_density_sampling(pts, K, P)
qrs = tf.gather_nd(pts, indices)
else:
raise ValueError('Unknown sampling method "{}"'.format(sampling_config))
list_points.append(qrs)
if layer_idx == 0:
C_pts_fts = C // 2 if fts is None else C // 4
depth_multiplier = 4
else:
C_prev = xconv_params[layer_idx - 1]['C']
C_pts_fts = C_prev // 4
depth_multiplier = math.ceil(C / C_prev)
with_global = (with_global_setting and layer_idx == len(xconv_params) - 1)
print('ENC', pts.shape, qrs.shape[1], C, K, D)
fts_xconv = xconv(pts, fts, qrs, tag, batch_size, K, D, P, C, C_pts_fts, self.is_training_placeholder,
depth_multiplier, with_global)
list_features.append(fts_xconv)
# decoding path
for layer_idx, layer_param in enumerate(xdconv_params):
tag = 'xdconv_' + str(layer_idx + 1) + '_'
K = layer_param['K']
D = layer_param['D']
pts_layer_idx = layer_param['pts_layer_idx']
qrs_layer_idx = layer_param['qrs_layer_idx']
pts = list_points[pts_layer_idx + 1]
fts = list_features[pts_layer_idx + 1] if layer_idx == 0 else list_features[-1] # fts_fuse is used here
qrs = list_points[qrs_layer_idx + 1]
fts_qrs = list_features[qrs_layer_idx + 1]
P = xconv_params[qrs_layer_idx]['P']
C = xconv_params[qrs_layer_idx]['C']
C_prev = xconv_params[pts_layer_idx]['C']
C_pts_fts = C_prev // 4
depth_multiplier = 1
print('DEC', pts.shape, qrs.shape[1], C, K, D)
fts_xdconv = xconv(pts, fts, qrs, tag, batch_size, K, D, P, C, C_pts_fts, self.is_training_placeholder,
depth_multiplier)
fts_concat = tf.concat([fts_xdconv, fts_qrs], axis=-1, name=tag + 'fts_concat')
fts_fuse = pf.dense(fts_concat, C, tag + 'fts_fuse', self.is_training_placeholder)
list_points.append(qrs)
list_features.append(fts_fuse)
# fully-connected at end
fc_layers = [list_features[-1]]
for layer_idx, layer_param in enumerate(fc_params):
C = layer_param['C']
dropout_rate = layer_param['dropout_rate']
fc = pf.dense(fc_layers[-1], C, 'fc{:d}'.format(layer_idx), self.is_training_placeholder)
fc_drop = tf.layers.dropout(fc, dropout_rate, training=self.is_training_placeholder, name='fc{:d}_drop'.format(layer_idx))
fc_layers.append(fc_drop)
logits = pf.dense(fc_layers[-1], cfg.NO_CLASSES, 'logits',
self.is_training_placeholder, with_bn=False, activation=None)
return tf.identity(logits, name='network') | 0.779532 | 0.518059 |
import h5py
import numpy
import sys
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('datafile', metavar='FILE',
help='the HDF5 file containing the dataset')
parser.add_argument('queryfile', metavar='FILE',
help='the text file containing queries')
parser.add_argument('--expansion', action='store_true',
help='the queries are selected by their expansion')
parser.add_argument('--rc', action='store_true',
help='the queries are selected by their RC')
parser.add_argument('--lid', action='store_true',
help='the queries are selected by their LID')
args = parser.parse_args()
fn = args.datafile # sys.argv[1]
gn = args.queryfile # sys.argv[2]
use_expansion = args.expansion
use_rc = args.rc
use_lid = args.lid
assert use_expansion or use_rc or use_lid
# read h5py file completely
f = h5py.File(fn)
attrs = f.attrs
train = f['train']
test = f['test']
nn = f['neighbors']
dd = f['distances']
# choose querysets
with open(gn) as g:
lines = g.readlines()
k = 10
if gn.count("-") == 2:
k = int(gn.split("-")[2].split(".")[0])
easy = list(map(int, lines[1].strip()[1:-1].split(",")))
middle = list(map(int, lines[3].strip()[1:-1].split(",")))
hard = list(map(int, lines[5].strip()[1:-1].split(",")))
diverse = list(map(int, lines[7].strip()[1:-1].split(",")))
# make four different versions containing the different querysets
def create_dataset(f, train, nn, dd, l, name, difficulty_type, k=10):
g = h5py.File(fn.replace('.hdf5','') + '-{}-{}-{}.hdf5'.format(name, difficulty_type, k), 'w')
g.attrs['distance'] = f.attrs['distance']
g.attrs['point_type'] = f.attrs['point_type']
g.create_dataset('train', (len(train), len(train[0])), dtype=train.dtype)[:] = train
queries = []
distances = []
neighbors = []
for i in l:
queries.append(train[i])
neighbors.append(nn[i])
distances.append(dd[i])
g.create_dataset('test', (len(queries), len(queries[0])), dtype=train.dtype)[:] = queries
g.create_dataset('neighbors', (len(neighbors), len(neighbors[0])), dtype='i')[:] = neighbors
g.create_dataset('distances', (len(distances), len(distances[0])), dtype='f')[:] = distances
g.close()
if use_expansion:
difficulty_type = "expansion"
elif use_rc:
difficulty_type = "lrc"
else:
difficulty_type = "lid"
create_dataset(f, train, nn, dd, easy, 'easy', difficulty_type, k)
create_dataset(f, train, nn, dd, middle, 'middle', difficulty_type, k)
create_dataset(f, train, nn, dd, hard, 'hard', difficulty_type, k)
create_dataset(f, train, nn, dd, diverse, 'diverse', difficulty_type, k) | additional-scripts/pick-queries.py | import h5py
import numpy
import sys
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('datafile', metavar='FILE',
help='the HDF5 file containing the dataset')
parser.add_argument('queryfile', metavar='FILE',
help='the text file containing queries')
parser.add_argument('--expansion', action='store_true',
help='the queries are selected by their expansion')
parser.add_argument('--rc', action='store_true',
help='the queries are selected by their RC')
parser.add_argument('--lid', action='store_true',
help='the queries are selected by their LID')
args = parser.parse_args()
fn = args.datafile # sys.argv[1]
gn = args.queryfile # sys.argv[2]
use_expansion = args.expansion
use_rc = args.rc
use_lid = args.lid
assert use_expansion or use_rc or use_lid
# read h5py file completely
f = h5py.File(fn)
attrs = f.attrs
train = f['train']
test = f['test']
nn = f['neighbors']
dd = f['distances']
# choose querysets
with open(gn) as g:
lines = g.readlines()
k = 10
if gn.count("-") == 2:
k = int(gn.split("-")[2].split(".")[0])
easy = list(map(int, lines[1].strip()[1:-1].split(",")))
middle = list(map(int, lines[3].strip()[1:-1].split(",")))
hard = list(map(int, lines[5].strip()[1:-1].split(",")))
diverse = list(map(int, lines[7].strip()[1:-1].split(",")))
# make four different versions containing the different querysets
def create_dataset(f, train, nn, dd, l, name, difficulty_type, k=10):
g = h5py.File(fn.replace('.hdf5','') + '-{}-{}-{}.hdf5'.format(name, difficulty_type, k), 'w')
g.attrs['distance'] = f.attrs['distance']
g.attrs['point_type'] = f.attrs['point_type']
g.create_dataset('train', (len(train), len(train[0])), dtype=train.dtype)[:] = train
queries = []
distances = []
neighbors = []
for i in l:
queries.append(train[i])
neighbors.append(nn[i])
distances.append(dd[i])
g.create_dataset('test', (len(queries), len(queries[0])), dtype=train.dtype)[:] = queries
g.create_dataset('neighbors', (len(neighbors), len(neighbors[0])), dtype='i')[:] = neighbors
g.create_dataset('distances', (len(distances), len(distances[0])), dtype='f')[:] = distances
g.close()
if use_expansion:
difficulty_type = "expansion"
elif use_rc:
difficulty_type = "lrc"
else:
difficulty_type = "lid"
create_dataset(f, train, nn, dd, easy, 'easy', difficulty_type, k)
create_dataset(f, train, nn, dd, middle, 'middle', difficulty_type, k)
create_dataset(f, train, nn, dd, hard, 'hard', difficulty_type, k)
create_dataset(f, train, nn, dd, diverse, 'diverse', difficulty_type, k) | 0.280419 | 0.171685 |
from typing import Dict, List, Optional
from flytekit.common import constants as _constants
from flytekit.common.exceptions import system as _system_exceptions
from flytekit.common.exceptions import user as _user_exceptions
from flytekit.common.mixins import hash as _hash_mixin
from flytekit.core.interface import Interface
from flytekit.core.type_engine import TypeEngine
from flytekit.models import launch_plan as _launch_plan_models
from flytekit.models import task as _task_models
from flytekit.models.core import identifier as _identifier_model
from flytekit.models.core import workflow as _workflow_models
from flytekit.remote import identifier as _identifier
from flytekit.remote import interface as _interfaces
from flytekit.remote import nodes as _nodes
class FlyteWorkflow(_hash_mixin.HashOnReferenceMixin, _workflow_models.WorkflowTemplate):
"""A class encapsulating a remote Flyte workflow."""
def __init__(
self,
nodes: List[_nodes.FlyteNode],
interface,
output_bindings,
id,
metadata,
metadata_defaults,
):
for node in nodes:
for upstream in node.upstream_nodes:
if upstream.id is None:
raise _user_exceptions.FlyteAssertion(
"Some nodes contained in the workflow were not found in the workflow description. Please "
"ensure all nodes are either assigned to attributes within the class or an element in a "
"list, dict, or tuple which is stored as an attribute in the class."
)
super(FlyteWorkflow, self).__init__(
id=id,
metadata=metadata,
metadata_defaults=metadata_defaults,
interface=interface,
nodes=nodes,
outputs=output_bindings,
)
self._flyte_nodes = nodes
self._python_interface = None
@property
def upstream_entities(self):
return set(n.executable_flyte_object for n in self._flyte_nodes)
@property
def interface(self) -> _interfaces.TypedInterface:
return super(FlyteWorkflow, self).interface
@property
def entity_type_text(self) -> str:
return "Workflow"
@property
def resource_type(self):
return _identifier_model.ResourceType.WORKFLOW
@property
def flyte_nodes(self) -> List[_nodes.FlyteNode]:
return self._flyte_nodes
@property
def guessed_python_interface(self) -> Optional[Interface]:
return self._python_interface
@guessed_python_interface.setter
def guessed_python_interface(self, value):
if self._python_interface is not None:
return
self._python_interface = value
def get_sub_workflows(self) -> List["FlyteWorkflow"]:
result = []
for node in self.flyte_nodes:
if node.workflow_node is not None and node.workflow_node.sub_workflow_ref is not None:
if node.flyte_entity is not None and node.flyte_entity.entity_type_text == "Workflow":
result.append(node.flyte_entity)
result.extend(node.flyte_entity.get_sub_workflows())
else:
raise _system_exceptions.FlyteSystemException(
"workflow node with subworkflow found but bad executable " "object {}".format(node.flyte_entity)
)
# get subworkflows in conditional branches
if node.branch_node is not None:
if_else: _workflow_models.IfElseBlock = node.branch_node.if_else
leaf_nodes: List[_nodes.FlyteNode] = filter(
None,
[
if_else.case.then_node,
*([] if if_else.other is None else [x.then_node for x in if_else.other]),
if_else.else_node,
],
)
for leaf_node in leaf_nodes:
exec_flyte_obj = leaf_node.flyte_entity
if exec_flyte_obj is not None and exec_flyte_obj.entity_type_text == "Workflow":
result.append(exec_flyte_obj)
result.extend(exec_flyte_obj.get_sub_workflows())
return result
@classmethod
def get_non_system_nodes(cls, nodes: List[_workflow_models.Node]) -> List[_workflow_models.Node]:
return [n for n in nodes if n.id not in {_constants.START_NODE_ID, _constants.END_NODE_ID}]
@classmethod
def promote_from_model(
cls,
base_model: _workflow_models.WorkflowTemplate,
sub_workflows: Optional[Dict[_identifier.Identifier, _workflow_models.WorkflowTemplate]] = None,
node_launch_plans: Optional[Dict[_identifier.Identifier, _launch_plan_models.LaunchPlanSpec]] = None,
tasks: Optional[Dict[_identifier.Identifier, _task_models.TaskTemplate]] = None,
) -> "FlyteWorkflow":
base_model_non_system_nodes = cls.get_non_system_nodes(base_model.nodes)
sub_workflows = sub_workflows or {}
tasks = tasks or {}
node_map = {
node.id: _nodes.FlyteNode.promote_from_model(node, sub_workflows, node_launch_plans, tasks)
for node in base_model_non_system_nodes
}
# Set upstream nodes for each node
for n in base_model_non_system_nodes:
current = node_map[n.id]
for upstream_id in n.upstream_node_ids:
upstream_node = node_map[upstream_id]
current._upstream.append(upstream_node)
# No inputs/outputs specified, see the constructor for more information on the overrides.
wf = cls(
nodes=list(node_map.values()),
id=_identifier.Identifier.promote_from_model(base_model.id),
metadata=base_model.metadata,
metadata_defaults=base_model.metadata_defaults,
interface=_interfaces.TypedInterface.promote_from_model(base_model.interface),
output_bindings=base_model.outputs,
)
if wf.interface is not None:
wf.guessed_python_interface = Interface(
inputs=TypeEngine.guess_python_types(wf.interface.inputs),
outputs=TypeEngine.guess_python_types(wf.interface.outputs),
)
return wf
def __call__(self, *args, **input_map):
raise NotImplementedError | flytekit/remote/workflow.py | from typing import Dict, List, Optional
from flytekit.common import constants as _constants
from flytekit.common.exceptions import system as _system_exceptions
from flytekit.common.exceptions import user as _user_exceptions
from flytekit.common.mixins import hash as _hash_mixin
from flytekit.core.interface import Interface
from flytekit.core.type_engine import TypeEngine
from flytekit.models import launch_plan as _launch_plan_models
from flytekit.models import task as _task_models
from flytekit.models.core import identifier as _identifier_model
from flytekit.models.core import workflow as _workflow_models
from flytekit.remote import identifier as _identifier
from flytekit.remote import interface as _interfaces
from flytekit.remote import nodes as _nodes
class FlyteWorkflow(_hash_mixin.HashOnReferenceMixin, _workflow_models.WorkflowTemplate):
"""A class encapsulating a remote Flyte workflow."""
def __init__(
self,
nodes: List[_nodes.FlyteNode],
interface,
output_bindings,
id,
metadata,
metadata_defaults,
):
for node in nodes:
for upstream in node.upstream_nodes:
if upstream.id is None:
raise _user_exceptions.FlyteAssertion(
"Some nodes contained in the workflow were not found in the workflow description. Please "
"ensure all nodes are either assigned to attributes within the class or an element in a "
"list, dict, or tuple which is stored as an attribute in the class."
)
super(FlyteWorkflow, self).__init__(
id=id,
metadata=metadata,
metadata_defaults=metadata_defaults,
interface=interface,
nodes=nodes,
outputs=output_bindings,
)
self._flyte_nodes = nodes
self._python_interface = None
@property
def upstream_entities(self):
return set(n.executable_flyte_object for n in self._flyte_nodes)
@property
def interface(self) -> _interfaces.TypedInterface:
return super(FlyteWorkflow, self).interface
@property
def entity_type_text(self) -> str:
return "Workflow"
@property
def resource_type(self):
return _identifier_model.ResourceType.WORKFLOW
@property
def flyte_nodes(self) -> List[_nodes.FlyteNode]:
return self._flyte_nodes
@property
def guessed_python_interface(self) -> Optional[Interface]:
return self._python_interface
@guessed_python_interface.setter
def guessed_python_interface(self, value):
if self._python_interface is not None:
return
self._python_interface = value
def get_sub_workflows(self) -> List["FlyteWorkflow"]:
result = []
for node in self.flyte_nodes:
if node.workflow_node is not None and node.workflow_node.sub_workflow_ref is not None:
if node.flyte_entity is not None and node.flyte_entity.entity_type_text == "Workflow":
result.append(node.flyte_entity)
result.extend(node.flyte_entity.get_sub_workflows())
else:
raise _system_exceptions.FlyteSystemException(
"workflow node with subworkflow found but bad executable " "object {}".format(node.flyte_entity)
)
# get subworkflows in conditional branches
if node.branch_node is not None:
if_else: _workflow_models.IfElseBlock = node.branch_node.if_else
leaf_nodes: List[_nodes.FlyteNode] = filter(
None,
[
if_else.case.then_node,
*([] if if_else.other is None else [x.then_node for x in if_else.other]),
if_else.else_node,
],
)
for leaf_node in leaf_nodes:
exec_flyte_obj = leaf_node.flyte_entity
if exec_flyte_obj is not None and exec_flyte_obj.entity_type_text == "Workflow":
result.append(exec_flyte_obj)
result.extend(exec_flyte_obj.get_sub_workflows())
return result
@classmethod
def get_non_system_nodes(cls, nodes: List[_workflow_models.Node]) -> List[_workflow_models.Node]:
return [n for n in nodes if n.id not in {_constants.START_NODE_ID, _constants.END_NODE_ID}]
@classmethod
def promote_from_model(
cls,
base_model: _workflow_models.WorkflowTemplate,
sub_workflows: Optional[Dict[_identifier.Identifier, _workflow_models.WorkflowTemplate]] = None,
node_launch_plans: Optional[Dict[_identifier.Identifier, _launch_plan_models.LaunchPlanSpec]] = None,
tasks: Optional[Dict[_identifier.Identifier, _task_models.TaskTemplate]] = None,
) -> "FlyteWorkflow":
base_model_non_system_nodes = cls.get_non_system_nodes(base_model.nodes)
sub_workflows = sub_workflows or {}
tasks = tasks or {}
node_map = {
node.id: _nodes.FlyteNode.promote_from_model(node, sub_workflows, node_launch_plans, tasks)
for node in base_model_non_system_nodes
}
# Set upstream nodes for each node
for n in base_model_non_system_nodes:
current = node_map[n.id]
for upstream_id in n.upstream_node_ids:
upstream_node = node_map[upstream_id]
current._upstream.append(upstream_node)
# No inputs/outputs specified, see the constructor for more information on the overrides.
wf = cls(
nodes=list(node_map.values()),
id=_identifier.Identifier.promote_from_model(base_model.id),
metadata=base_model.metadata,
metadata_defaults=base_model.metadata_defaults,
interface=_interfaces.TypedInterface.promote_from_model(base_model.interface),
output_bindings=base_model.outputs,
)
if wf.interface is not None:
wf.guessed_python_interface = Interface(
inputs=TypeEngine.guess_python_types(wf.interface.inputs),
outputs=TypeEngine.guess_python_types(wf.interface.outputs),
)
return wf
def __call__(self, *args, **input_map):
raise NotImplementedError | 0.855293 | 0.149128 |
import json
import dash.html as html
import dash.dcc as dcc
import dash_bootstrap_components as dbc
from apps.sections.m_cyto_elements import get_cyto_elements
import dash_cytoscape as cyto
from app import app
from dash.dependencies import Input, Output
stylesheet_cyto = [
{
'selector': '.box',
'style': {
'background-color': 'red',
'line-color': 'red'
}
}
]
# Restart view button
restart_view_button = dbc.Button(
"Reset view",
id='reset-cytoscape',
outline=True,
color="info",
className="mr-1",
style={
# 'right': '100%',
'marginLeft': '6px',
'zIndex': 20,
'bottom': '36px',
# 'marginTop': '-18px',
'padding': '2px 5px',
'position':'relative'
}
)
# Cytoscape colum
cyto_canvas = html.Div(
style={'flex': '1',
'position': 'relative',
'height': '100%',
},
children =[
cyto.Cytoscape(
id ='cytoscape-methods',
className = 'plot-margin cyto-canvas',
responsive = True,
maxZoom = 1.1,
minZoom = 0.7,
zoom = 1,
layout = {
'name': 'preset',
'fit': True
},
style = {
# 'position': 'absolute',
'width': '100%',
'minHeight': '550px',
# 'maxHeight': '850px',
# 'height': '550px',
# 'max-height':
'zIndex': 10,
# 'margin': 'auto'
},
elements = get_cyto_elements() ,
# stylesheet=stylesheet_cyto
),
restart_view_button
]
)
row_cyto = html.Div(
style={
'display': 'flex',
'flexDirection': 'column',
# 'height': '50%',
'width': '100%'
},
children = [
cyto_canvas
# restart_view_button
],
)
# Texts
row_markdown = dbc.Row(
className='row-title-content',
children= [
html.H2('Methodology Workflow'),
html.P([
html.I(className="fas fa-hand-point-down icon-hand-cyto"),
' ',
html.Mark([' ', html.B('Click'),' on a ']),
html.Mark(html.B('box'),
className='mark-red-cyto'),
html.Mark(' below to see details about the workflow.')
],
style={
'justifyContent': 'center',
'textAlign': 'center'
}
)
]
)
# col_container = dbc.Col(
# lg = 4, md = 12, sm = 12,
# className = 'col-container-methods',
# children = [
# row_markdown,
# row_cyto,
# ],
# )
col_contents = [
row_markdown,
row_cyto,
]
# Callbacks
@app.callback(
[
Output('cytoscape-methods', 'layout'),
Output('cytoscape-methods', 'zoom'),
Output('cytoscape-methods', 'elements'),
],
[Input('reset-cytoscape', 'n_clicks')]
)
def reset_layout(n_clicks):
layout = {'name': 'preset'}
elements = get_cyto_elements()
return [layout, 1, elements] | apps/sections/m_flow.py | import json
import dash.html as html
import dash.dcc as dcc
import dash_bootstrap_components as dbc
from apps.sections.m_cyto_elements import get_cyto_elements
import dash_cytoscape as cyto
from app import app
from dash.dependencies import Input, Output
stylesheet_cyto = [
{
'selector': '.box',
'style': {
'background-color': 'red',
'line-color': 'red'
}
}
]
# Restart view button
restart_view_button = dbc.Button(
"Reset view",
id='reset-cytoscape',
outline=True,
color="info",
className="mr-1",
style={
# 'right': '100%',
'marginLeft': '6px',
'zIndex': 20,
'bottom': '36px',
# 'marginTop': '-18px',
'padding': '2px 5px',
'position':'relative'
}
)
# Cytoscape colum
cyto_canvas = html.Div(
style={'flex': '1',
'position': 'relative',
'height': '100%',
},
children =[
cyto.Cytoscape(
id ='cytoscape-methods',
className = 'plot-margin cyto-canvas',
responsive = True,
maxZoom = 1.1,
minZoom = 0.7,
zoom = 1,
layout = {
'name': 'preset',
'fit': True
},
style = {
# 'position': 'absolute',
'width': '100%',
'minHeight': '550px',
# 'maxHeight': '850px',
# 'height': '550px',
# 'max-height':
'zIndex': 10,
# 'margin': 'auto'
},
elements = get_cyto_elements() ,
# stylesheet=stylesheet_cyto
),
restart_view_button
]
)
row_cyto = html.Div(
style={
'display': 'flex',
'flexDirection': 'column',
# 'height': '50%',
'width': '100%'
},
children = [
cyto_canvas
# restart_view_button
],
)
# Texts
row_markdown = dbc.Row(
className='row-title-content',
children= [
html.H2('Methodology Workflow'),
html.P([
html.I(className="fas fa-hand-point-down icon-hand-cyto"),
' ',
html.Mark([' ', html.B('Click'),' on a ']),
html.Mark(html.B('box'),
className='mark-red-cyto'),
html.Mark(' below to see details about the workflow.')
],
style={
'justifyContent': 'center',
'textAlign': 'center'
}
)
]
)
# col_container = dbc.Col(
# lg = 4, md = 12, sm = 12,
# className = 'col-container-methods',
# children = [
# row_markdown,
# row_cyto,
# ],
# )
col_contents = [
row_markdown,
row_cyto,
]
# Callbacks
@app.callback(
[
Output('cytoscape-methods', 'layout'),
Output('cytoscape-methods', 'zoom'),
Output('cytoscape-methods', 'elements'),
],
[Input('reset-cytoscape', 'n_clicks')]
)
def reset_layout(n_clicks):
layout = {'name': 'preset'}
elements = get_cyto_elements()
return [layout, 1, elements] | 0.307462 | 0.148201 |
import os, pdb
import torch
import torch.utils.data as data
import cv2, pickle
import numpy as np
from .shared import CLASSES, make_lists
np.random.seed(123)
class ActionDetection(data.Dataset):
def __init__(self, args, image_set, transform=None, normlise_boxes=None, anno_transform=None, full_test=False):
self.seq_len = args.seq_len
self.seq_gap = args.seq_gap
self.dataset =args.dataset
if full_test:
seq_gap = args.eval_gap
else:
seq_gap = args.seq_gap
self.input_type_base = args.input_type_base +'-images'
self.input_type_extra = args.input_type_extra + '-images'
self.input_frames_base = args.input_frames_base
self.input_frames_extra = args.input_frames_extra
self.fusion = args.fusion
self.root = args.data_root
self.CLASSES = CLASSES[args.dataset]
self.num_classes = len(CLASSES)
self.image_set = image_set
self.transform = transform
self.normlise_boxes = normlise_boxes
self.anno_transform = anno_transform
self.name = args.dataset
self.ids = list()
trainlist, testlist, video_list, numf_list, self.print_str = make_lists(args.dataset, self.root, self.input_type_base, seq_len=self.seq_len,
seq_gap=seq_gap, split=args.train_split, fulltest=full_test, imgs = image_set)
self.video_list = video_list
self.numf_list = numf_list
self.train_mode = False
if self.image_set == 'train':
self.ids = trainlist
self.train_mode = True
elif self.image_set == 'test':
self.ids = testlist
else:
print('spacify correct subset ')
def __getitem__(self, index):
rgb_images, flow_images, aug_bxsl, prior_labels, prior_gt_locations, num_mt, index = self.pull_item(index)
# rgb_images, flow_images, aug_bxsl, prior_labels, prior_gt_locations, num_mtaa, index
return rgb_images, flow_images, aug_bxsl, prior_labels, prior_gt_locations, num_mt, index
def __len__(self):
return len(self.ids)
def pull_item(self, index):
annot_info = self.ids[index]
actVidName = self.video_list[annot_info[0]]
frm_nos = annot_info[1] + 1
# print(frm_nos,self.seq_len)
assert (len(frm_nos) == self.seq_len)
labels = annot_info[2]
gtbxs = annot_info[3] # boxes are in xmin ymin width and height format
# print(gtbxs)
num_mt = len(labels)
numf = self.numf_list[annot_info[0]]
'''********** Load base input *********'''
num_input_frames = self.input_frames_base
all_frames_ids =[]
first_index = []
count = 0
# print(frm_nos, actVidName)
for fn in frm_nos:
if numf+1 <= fn + num_input_frames//2 + 1:
ef = min(numf+1, fn + num_input_frames//2 + 1)
sf = ef-num_input_frames
else:
sf = max(fn - num_input_frames//2, 1)
ef = sf+num_input_frames
frames_ids = np.arange(sf,ef)
c = 0
for f in frames_ids:
if f not in all_frames_ids:
all_frames_ids.append(f)
c+=1
if count == 0:
first_index.append(0)
else:
first_index.append(c)
first_index[count] += first_index[count-1]
count += 1
img_path = '{}{}/{}'.format(self.root,self.input_type_base, actVidName)
imgs = []
for fn in all_frames_ids:
img_file = '{:s}/{:05d}.jpg'.format(img_path, int(fn))
#print(img_file)
img = cv2.imread(img_file)
height, width, _ = img.shape
imgs.append(img)
num_base_images = len(imgs)
'''******* Load extra input *******'''
if self.fusion:
num_input_frames = self.input_frames_extra
all_frames_ids = []
first_index = []
count = 0
# print(frm_nos, actVidName)
for fn in frm_nos:
if numf + 1 <= fn + num_input_frames // 2 + 1:
ef = min(numf + 1, fn + num_input_frames // 2 + 1)
sf = ef - num_input_frames
else:
sf = max(fn - num_input_frames // 2, 1)
ef = sf + num_input_frames
frames_ids = np.arange(sf, ef)
c = 0
for f in frames_ids:
if f not in all_frames_ids:
all_frames_ids.append(f)
c += 1
if count == 0:
first_index.append(0)
else:
first_index.append(c)
first_index[count] += first_index[count - 1]
count += 1
img_path = '{}{}/{}'.format(self.root, self.input_type_extra, actVidName)
img = 0
for fn in all_frames_ids:
img_file = '{:s}/{:05d}.jpg'.format(img_path, int(fn))
# print(img_file)
img = cv2.imread(img_file)
height, width, _ = img.shape
imgs.append(img)
height, width, _ = img.shape
imgs = np.asarray(imgs)
#print('imgs shape ', imgs.shape)
if self.dataset in ['ucf24','jhmdb21']:
boxes_norm = self.normlise_boxes(gtbxs, width, height, labels, num_mt, self.seq_len)
else:
boxes_norm = self.normlise_boxes(gtbxs, 1.0, 1.0, labels, num_mt, self.seq_len)
# normaized gt boxes ---> [xmin ymin xmax ymax label]
boxes_norm = np.array(boxes_norm, dtype=np.float32) # converting from list numpy array
# pdb.set_trace()
# print(boxes_norm)
if self.image_set == 'train':
aug_imgs, aug_bxs, labels = self.transform(imgs, boxes_norm[:, :4], boxes_norm[:, 4], self.seq_len,
num_mt) # calling SSDAugmentation
else:
aug_imgs, aug_bxs, labels = self.transform(imgs, boxes_norm[:, :4], boxes_norm[:, 4]) # calling BaseTransform
labels = labels.astype(np.int64)
num_bxs = aug_bxs.shape[0]
# number of micro tubes after augmentation -- recall after augmentation some micro tubes may be discarded
# so don't confuse with num_mta and num_mt they are different
num_mtaa = int(num_bxs / self.seq_len) # num_mtaa - num micro tube after augmentation
assert num_mtaa >0
# aug_imgs is in [seq_len x H x W x C] (0,1,2,3) ---> so converting from RGB (0,1,2) to BGR along 4-th dim
aug_imgs = aug_imgs[:, :, :, (2, 1, 0)]
# print('NUm of frame loaded and and required ', aug_imgs.shape[0], ' ', num_input_frames)
rgb_images = aug_imgs[:num_base_images]
if self.input_frames_base > 1:
images = []
for s in range(self.seq_len):
sf = first_index[s]
#print(sf)
img_stack = rgb_images[sf:sf+num_input_frames,:,:,:]
img_stack = torch.from_numpy(img_stack).permute(0, 3, 1, 2).contiguous()
images.append(img_stack.view(-1, img_stack.size(2), img_stack.size(3)))
#print(images[s].size())
rgb_images = torch.stack(images, 0)
else:
rgb_images = torch.from_numpy(rgb_images).permute(0, 3, 1, 2)
flow_images = torch.zeros(1,1,1)
if self.fusion:
flow_images = aug_imgs[num_base_images:]
if self.input_frames_extra > 1:
images = []
for s in range(self.seq_len):
sf = first_index[s]
#print(sf)
img_stack = flow_images[sf:sf+num_input_frames,:,:,:]
img_stack = torch.from_numpy(img_stack).permute(0, 3, 1, 2).contiguous()
images.append(img_stack.view(-1, img_stack.size(2), img_stack.size(3)))
#print(images[s].size())
flow_images = torch.stack(images, 0)
else:
flow_images = torch.from_numpy(flow_images).permute(0, 3, 1, 2)
aug_bxsl = np.hstack((aug_bxs, np.expand_dims(labels, axis=1)))
prior_labels, prior_gt_locations = torch.rand(1,2), torch.rand(2)
if self.train_mode and self.anno_transform:
prior_labels, prior_gt_locations = self.anno_transform(aug_bxs, labels, num_mtaa)
return rgb_images, flow_images, aug_bxsl, prior_labels, prior_gt_locations, num_mtaa, index
def detection_collate(batch):
targets = []
rgb_imgs = []
flow_imgs = []
prior_labels = []
prior_gt_locations = []
num_mt = []
image_ids = []
# fno = []
# rgb_images, flow_images, aug_bxsl, prior_labels, prior_gt_locations, num_mt, index
for sample in batch:
rgb_imgs.append(sample[0])
flow_imgs.append(sample[1])
targets.append(torch.FloatTensor(sample[2]))
prior_labels.append(sample[3])
prior_gt_locations.append(sample[4])
num_mt.append(sample[5])
image_ids.append(sample[6])
rgb_imgs = torch.stack(rgb_imgs, 0)
if flow_imgs[0].size(2)>1:
flow_imgs = torch.stack(flow_imgs, 0)
# images, ground_truths, _ , _, num_mt, img_indexs
return [rgb_imgs, flow_imgs], targets, torch.stack(prior_labels), torch.stack(prior_gt_locations), num_mt, image_ids | data/dataset.py | import os, pdb
import torch
import torch.utils.data as data
import cv2, pickle
import numpy as np
from .shared import CLASSES, make_lists
np.random.seed(123)
class ActionDetection(data.Dataset):
def __init__(self, args, image_set, transform=None, normlise_boxes=None, anno_transform=None, full_test=False):
self.seq_len = args.seq_len
self.seq_gap = args.seq_gap
self.dataset =args.dataset
if full_test:
seq_gap = args.eval_gap
else:
seq_gap = args.seq_gap
self.input_type_base = args.input_type_base +'-images'
self.input_type_extra = args.input_type_extra + '-images'
self.input_frames_base = args.input_frames_base
self.input_frames_extra = args.input_frames_extra
self.fusion = args.fusion
self.root = args.data_root
self.CLASSES = CLASSES[args.dataset]
self.num_classes = len(CLASSES)
self.image_set = image_set
self.transform = transform
self.normlise_boxes = normlise_boxes
self.anno_transform = anno_transform
self.name = args.dataset
self.ids = list()
trainlist, testlist, video_list, numf_list, self.print_str = make_lists(args.dataset, self.root, self.input_type_base, seq_len=self.seq_len,
seq_gap=seq_gap, split=args.train_split, fulltest=full_test, imgs = image_set)
self.video_list = video_list
self.numf_list = numf_list
self.train_mode = False
if self.image_set == 'train':
self.ids = trainlist
self.train_mode = True
elif self.image_set == 'test':
self.ids = testlist
else:
print('spacify correct subset ')
def __getitem__(self, index):
rgb_images, flow_images, aug_bxsl, prior_labels, prior_gt_locations, num_mt, index = self.pull_item(index)
# rgb_images, flow_images, aug_bxsl, prior_labels, prior_gt_locations, num_mtaa, index
return rgb_images, flow_images, aug_bxsl, prior_labels, prior_gt_locations, num_mt, index
def __len__(self):
return len(self.ids)
def pull_item(self, index):
annot_info = self.ids[index]
actVidName = self.video_list[annot_info[0]]
frm_nos = annot_info[1] + 1
# print(frm_nos,self.seq_len)
assert (len(frm_nos) == self.seq_len)
labels = annot_info[2]
gtbxs = annot_info[3] # boxes are in xmin ymin width and height format
# print(gtbxs)
num_mt = len(labels)
numf = self.numf_list[annot_info[0]]
'''********** Load base input *********'''
num_input_frames = self.input_frames_base
all_frames_ids =[]
first_index = []
count = 0
# print(frm_nos, actVidName)
for fn in frm_nos:
if numf+1 <= fn + num_input_frames//2 + 1:
ef = min(numf+1, fn + num_input_frames//2 + 1)
sf = ef-num_input_frames
else:
sf = max(fn - num_input_frames//2, 1)
ef = sf+num_input_frames
frames_ids = np.arange(sf,ef)
c = 0
for f in frames_ids:
if f not in all_frames_ids:
all_frames_ids.append(f)
c+=1
if count == 0:
first_index.append(0)
else:
first_index.append(c)
first_index[count] += first_index[count-1]
count += 1
img_path = '{}{}/{}'.format(self.root,self.input_type_base, actVidName)
imgs = []
for fn in all_frames_ids:
img_file = '{:s}/{:05d}.jpg'.format(img_path, int(fn))
#print(img_file)
img = cv2.imread(img_file)
height, width, _ = img.shape
imgs.append(img)
num_base_images = len(imgs)
'''******* Load extra input *******'''
if self.fusion:
num_input_frames = self.input_frames_extra
all_frames_ids = []
first_index = []
count = 0
# print(frm_nos, actVidName)
for fn in frm_nos:
if numf + 1 <= fn + num_input_frames // 2 + 1:
ef = min(numf + 1, fn + num_input_frames // 2 + 1)
sf = ef - num_input_frames
else:
sf = max(fn - num_input_frames // 2, 1)
ef = sf + num_input_frames
frames_ids = np.arange(sf, ef)
c = 0
for f in frames_ids:
if f not in all_frames_ids:
all_frames_ids.append(f)
c += 1
if count == 0:
first_index.append(0)
else:
first_index.append(c)
first_index[count] += first_index[count - 1]
count += 1
img_path = '{}{}/{}'.format(self.root, self.input_type_extra, actVidName)
img = 0
for fn in all_frames_ids:
img_file = '{:s}/{:05d}.jpg'.format(img_path, int(fn))
# print(img_file)
img = cv2.imread(img_file)
height, width, _ = img.shape
imgs.append(img)
height, width, _ = img.shape
imgs = np.asarray(imgs)
#print('imgs shape ', imgs.shape)
if self.dataset in ['ucf24','jhmdb21']:
boxes_norm = self.normlise_boxes(gtbxs, width, height, labels, num_mt, self.seq_len)
else:
boxes_norm = self.normlise_boxes(gtbxs, 1.0, 1.0, labels, num_mt, self.seq_len)
# normaized gt boxes ---> [xmin ymin xmax ymax label]
boxes_norm = np.array(boxes_norm, dtype=np.float32) # converting from list numpy array
# pdb.set_trace()
# print(boxes_norm)
if self.image_set == 'train':
aug_imgs, aug_bxs, labels = self.transform(imgs, boxes_norm[:, :4], boxes_norm[:, 4], self.seq_len,
num_mt) # calling SSDAugmentation
else:
aug_imgs, aug_bxs, labels = self.transform(imgs, boxes_norm[:, :4], boxes_norm[:, 4]) # calling BaseTransform
labels = labels.astype(np.int64)
num_bxs = aug_bxs.shape[0]
# number of micro tubes after augmentation -- recall after augmentation some micro tubes may be discarded
# so don't confuse with num_mta and num_mt they are different
num_mtaa = int(num_bxs / self.seq_len) # num_mtaa - num micro tube after augmentation
assert num_mtaa >0
# aug_imgs is in [seq_len x H x W x C] (0,1,2,3) ---> so converting from RGB (0,1,2) to BGR along 4-th dim
aug_imgs = aug_imgs[:, :, :, (2, 1, 0)]
# print('NUm of frame loaded and and required ', aug_imgs.shape[0], ' ', num_input_frames)
rgb_images = aug_imgs[:num_base_images]
if self.input_frames_base > 1:
images = []
for s in range(self.seq_len):
sf = first_index[s]
#print(sf)
img_stack = rgb_images[sf:sf+num_input_frames,:,:,:]
img_stack = torch.from_numpy(img_stack).permute(0, 3, 1, 2).contiguous()
images.append(img_stack.view(-1, img_stack.size(2), img_stack.size(3)))
#print(images[s].size())
rgb_images = torch.stack(images, 0)
else:
rgb_images = torch.from_numpy(rgb_images).permute(0, 3, 1, 2)
flow_images = torch.zeros(1,1,1)
if self.fusion:
flow_images = aug_imgs[num_base_images:]
if self.input_frames_extra > 1:
images = []
for s in range(self.seq_len):
sf = first_index[s]
#print(sf)
img_stack = flow_images[sf:sf+num_input_frames,:,:,:]
img_stack = torch.from_numpy(img_stack).permute(0, 3, 1, 2).contiguous()
images.append(img_stack.view(-1, img_stack.size(2), img_stack.size(3)))
#print(images[s].size())
flow_images = torch.stack(images, 0)
else:
flow_images = torch.from_numpy(flow_images).permute(0, 3, 1, 2)
aug_bxsl = np.hstack((aug_bxs, np.expand_dims(labels, axis=1)))
prior_labels, prior_gt_locations = torch.rand(1,2), torch.rand(2)
if self.train_mode and self.anno_transform:
prior_labels, prior_gt_locations = self.anno_transform(aug_bxs, labels, num_mtaa)
return rgb_images, flow_images, aug_bxsl, prior_labels, prior_gt_locations, num_mtaa, index
def detection_collate(batch):
targets = []
rgb_imgs = []
flow_imgs = []
prior_labels = []
prior_gt_locations = []
num_mt = []
image_ids = []
# fno = []
# rgb_images, flow_images, aug_bxsl, prior_labels, prior_gt_locations, num_mt, index
for sample in batch:
rgb_imgs.append(sample[0])
flow_imgs.append(sample[1])
targets.append(torch.FloatTensor(sample[2]))
prior_labels.append(sample[3])
prior_gt_locations.append(sample[4])
num_mt.append(sample[5])
image_ids.append(sample[6])
rgb_imgs = torch.stack(rgb_imgs, 0)
if flow_imgs[0].size(2)>1:
flow_imgs = torch.stack(flow_imgs, 0)
# images, ground_truths, _ , _, num_mt, img_indexs
return [rgb_imgs, flow_imgs], targets, torch.stack(prior_labels), torch.stack(prior_gt_locations), num_mt, image_ids | 0.14851 | 0.256279 |
import torch
from torch import nn
from pysc2.lib import actions, features
from env_wrapper_abstract.env_wrapper import EnvWrapperAbstract
import numpy as np
from math import sqrt
FUNCTIONS = actions.FUNCTIONS
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_PLAYER_NEUTRAL = features.PlayerRelative.NEUTRAL # beacon/minerals
class EnvWrapper(EnvWrapperAbstract):
def __init__(self, env, device, scripted_agent = False):
self.env = env
self.device = device
self.ae_batch_len = 1000 # Gather 1000 frames for ae training
self.ae_batch = np.empty((self.ae_batch_len, 1, 32, 32),dtype=np.float32)
self.ae_index = 0
self.screen_size = 32
self.scripted = scripted_agent
self.reward = 0
self.episode = 0
self.step_num = 0
self.total_steps = 0
self.duration = 0
self.done = False
def get_action(self, state, model, epsilon, train):
# greedy
if np.random.rand() > epsilon.value():
state = torch.from_numpy(state).to(self.device).unsqueeze(0).float()
with torch.no_grad():
action = model(state).detach().cpu().data.numpy().squeeze()
action = np.argmax(action)
# explore
else:
target = np.random.randint(0, self.screen_size, size=2)
action = target[0] * self.screen_size + target[1]
if train:
epsilon.increment()
return action
def get_state(self, state, reduce_dim, reduction_component, pca, ae, latent_space, train_online):
state = state.observation.feature_screen.player_relative
if reduce_dim:
if train_online:
self.add_obs_to_ae_batch(state) # add last 84 x 84 frame to ae training batch
if self.ae_index >= self.ae_batch_len: # Train AE on gathered frames
reduction_component.train_on_trace(self.ae_batch)
self.ae_batch = np.empty((self.ae_batch_len, 1, self.screen_size, self.screen_size),dtype=np.float32)
self.ae_index = 0
state = self.reduce_dim(state, reduction_component, pca, ae, latent_space) # Reduce dim of last frame
return state # Return 1 x 32 x 32 or 1 x 16 x 16 state
def step(self, action):
self.total_steps += 1
self.step_num += 1
obs, reward, done, _ = self.env.step(action)
self.done = done
return obs, reward, done
def reset(self):
self.done = False
self.resetted = True
self.step_num = 0
self.reward = 0
self.episode += 1
self.env.reset()
return self.env.step([self.select_friendly_action()])[0] # Return state after selecting army unit
def close(self):
self.env.close()
def is_last_obs(self):
return self.done
def reduce_dim(self, state, reduction_component, pca, ae, latent_space):
if ae:
state = torch.tensor(state, dtype=torch.float, device=self.device).unsqueeze(0).unsqueeze(0) # Reshape to 1 X 1 X 84 X 84
state = reduction_component.state_dim_reduction(state)
return state.detach().cpu().numpy()
if pca:
state = reduction_component.state_dim_reduction(state)
return np.reshape(state, (1, int(sqrt(latent_space)), int(sqrt(latent_space))))
def add_obs_to_ae_batch(self, state):
if self.ae_index < self.ae_batch.shape[0]:
self.ae_batch[self.ae_index] = np.expand_dims(state, axis=0)
self.ae_index += 1
def get_loss(s,a,s_1,r, policy_network, target_network, gamma, multi_step):
s_q = policy_network(s)
s_1_q = policy_network(s_1)
s_1_target_q = target_network(s_1)
selected_q = s_q.gather(1, a).squeeze(-1)
s_1_q_max = s_1_q.max(1)[1]
s_1_q_max.detach()
s_1_target = s_1_target_q.gather(1, s_1_q_max[:,None]).squeeze(-1).detach()
expected_q = r + (gamma ** multi_step) * s_1_target
loss = nn.MSELoss(selected_q, expected_q)
return loss
def select_friendly_action(self):
return FUNCTIONS.select_army("select")
def get_env_action(self, action, obs):
def _xy_locs(mask):
"""Mask should be a set of bools from comparison with a feature layer."""
y, x = mask.nonzero()
return list(zip(x, y))
if self.scripted:
player_relative = obs.observation.feature_screen.player_relative
beacon = _xy_locs(player_relative == _PLAYER_NEUTRAL)
beacon_center = np.mean(beacon, axis=0).round()
return FUNCTIONS.Move_screen("now", beacon_center)
action = np.unravel_index(action, [self.screen_size, self.screen_size])
target = [action[1], action[0]]
command = _MOVE_SCREEN
if command in obs.observation.available_actions:
return actions.FunctionCall(command, [[0],target])
else:
return actions.FunctionCall(_NO_OP, []) | env_pysc2/env_wrapper.py | import torch
from torch import nn
from pysc2.lib import actions, features
from env_wrapper_abstract.env_wrapper import EnvWrapperAbstract
import numpy as np
from math import sqrt
FUNCTIONS = actions.FUNCTIONS
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_PLAYER_NEUTRAL = features.PlayerRelative.NEUTRAL # beacon/minerals
class EnvWrapper(EnvWrapperAbstract):
def __init__(self, env, device, scripted_agent = False):
self.env = env
self.device = device
self.ae_batch_len = 1000 # Gather 1000 frames for ae training
self.ae_batch = np.empty((self.ae_batch_len, 1, 32, 32),dtype=np.float32)
self.ae_index = 0
self.screen_size = 32
self.scripted = scripted_agent
self.reward = 0
self.episode = 0
self.step_num = 0
self.total_steps = 0
self.duration = 0
self.done = False
def get_action(self, state, model, epsilon, train):
# greedy
if np.random.rand() > epsilon.value():
state = torch.from_numpy(state).to(self.device).unsqueeze(0).float()
with torch.no_grad():
action = model(state).detach().cpu().data.numpy().squeeze()
action = np.argmax(action)
# explore
else:
target = np.random.randint(0, self.screen_size, size=2)
action = target[0] * self.screen_size + target[1]
if train:
epsilon.increment()
return action
def get_state(self, state, reduce_dim, reduction_component, pca, ae, latent_space, train_online):
state = state.observation.feature_screen.player_relative
if reduce_dim:
if train_online:
self.add_obs_to_ae_batch(state) # add last 84 x 84 frame to ae training batch
if self.ae_index >= self.ae_batch_len: # Train AE on gathered frames
reduction_component.train_on_trace(self.ae_batch)
self.ae_batch = np.empty((self.ae_batch_len, 1, self.screen_size, self.screen_size),dtype=np.float32)
self.ae_index = 0
state = self.reduce_dim(state, reduction_component, pca, ae, latent_space) # Reduce dim of last frame
return state # Return 1 x 32 x 32 or 1 x 16 x 16 state
def step(self, action):
self.total_steps += 1
self.step_num += 1
obs, reward, done, _ = self.env.step(action)
self.done = done
return obs, reward, done
def reset(self):
self.done = False
self.resetted = True
self.step_num = 0
self.reward = 0
self.episode += 1
self.env.reset()
return self.env.step([self.select_friendly_action()])[0] # Return state after selecting army unit
def close(self):
self.env.close()
def is_last_obs(self):
return self.done
def reduce_dim(self, state, reduction_component, pca, ae, latent_space):
if ae:
state = torch.tensor(state, dtype=torch.float, device=self.device).unsqueeze(0).unsqueeze(0) # Reshape to 1 X 1 X 84 X 84
state = reduction_component.state_dim_reduction(state)
return state.detach().cpu().numpy()
if pca:
state = reduction_component.state_dim_reduction(state)
return np.reshape(state, (1, int(sqrt(latent_space)), int(sqrt(latent_space))))
def add_obs_to_ae_batch(self, state):
if self.ae_index < self.ae_batch.shape[0]:
self.ae_batch[self.ae_index] = np.expand_dims(state, axis=0)
self.ae_index += 1
def get_loss(s,a,s_1,r, policy_network, target_network, gamma, multi_step):
s_q = policy_network(s)
s_1_q = policy_network(s_1)
s_1_target_q = target_network(s_1)
selected_q = s_q.gather(1, a).squeeze(-1)
s_1_q_max = s_1_q.max(1)[1]
s_1_q_max.detach()
s_1_target = s_1_target_q.gather(1, s_1_q_max[:,None]).squeeze(-1).detach()
expected_q = r + (gamma ** multi_step) * s_1_target
loss = nn.MSELoss(selected_q, expected_q)
return loss
def select_friendly_action(self):
return FUNCTIONS.select_army("select")
def get_env_action(self, action, obs):
def _xy_locs(mask):
"""Mask should be a set of bools from comparison with a feature layer."""
y, x = mask.nonzero()
return list(zip(x, y))
if self.scripted:
player_relative = obs.observation.feature_screen.player_relative
beacon = _xy_locs(player_relative == _PLAYER_NEUTRAL)
beacon_center = np.mean(beacon, axis=0).round()
return FUNCTIONS.Move_screen("now", beacon_center)
action = np.unravel_index(action, [self.screen_size, self.screen_size])
target = [action[1], action[0]]
command = _MOVE_SCREEN
if command in obs.observation.available_actions:
return actions.FunctionCall(command, [[0],target])
else:
return actions.FunctionCall(_NO_OP, []) | 0.846006 | 0.422803 |
from types import *
_dir = lambda o: o.__dict__.keys()
def _get_type_dict(object):
# create a dict with keys: attr, instance, list
type_dict = {'attr': [], 'instance': [], 'list': []}
for membname in _dir(object):
if membname == '__parent__':
continue
object_type = type(getattr(object, membname))
if object_type == InstanceType:
type_dict['instance'].append(membname)
elif object_type == ListType:
type_dict['list'].append(membname)
else:
type_dict['attr'].append(membname)
return type_dict
def XML_printer(object, level=0, type_dict=None):
INDENT=3
descript = ''
if type_dict is None:
type_dict = _get_type_dict(object)
if type_dict['attr']:
for attr_name in type_dict['attr']:
attr_value = getattr(object, attr_name)
descript = descript + (' '+ attr_name +'="'+attr_value+'"')
if level != 0:
if type_dict['instance'] or type_dict['list']:
descript = descript + '>\n'
else:
descript = descript + '/>\n'
for instance_name in type_dict['instance']:
instance = getattr(object, instance_name)
descript = descript + (' '*level)+'<'+instance_name
# look ahead to see if the next instance contains lists or instances
nested_type_dict = _get_type_dict(instance)
descript = descript + XML_printer(instance, level+INDENT,
nested_type_dict)
if nested_type_dict['instance'] or nested_type_dict['list']:
descript = descript + (' '*level)+'</'+instance_name+'>\n'
for list_name in type_dict['list']:
inst_list = getattr(object, list_name)
for instance in inst_list:
descript = descript + (' '*level)+'<'+list_name
nested_type_dict = _get_type_dict(instance)
descript = descript + XML_printer(instance, level+INDENT,
nested_type_dict)
if nested_type_dict['instance'] or nested_type_dict['list']:
descript = descript + (' '*level)+'</'+list_name+'>\n'
return descript
if __name__ == '__main__':
import sys
from gnosis.xml.objectify import XML_Objectify, EXPAT
xml_obj = XML_Objectify(sys.argv[1], EXPAT)
pyobj = xml_obj.make_instance()
print XML_printer(pyobj) | download/gnosis/xml/objectify/_printer.py |
from types import *
_dir = lambda o: o.__dict__.keys()
def _get_type_dict(object):
# create a dict with keys: attr, instance, list
type_dict = {'attr': [], 'instance': [], 'list': []}
for membname in _dir(object):
if membname == '__parent__':
continue
object_type = type(getattr(object, membname))
if object_type == InstanceType:
type_dict['instance'].append(membname)
elif object_type == ListType:
type_dict['list'].append(membname)
else:
type_dict['attr'].append(membname)
return type_dict
def XML_printer(object, level=0, type_dict=None):
INDENT=3
descript = ''
if type_dict is None:
type_dict = _get_type_dict(object)
if type_dict['attr']:
for attr_name in type_dict['attr']:
attr_value = getattr(object, attr_name)
descript = descript + (' '+ attr_name +'="'+attr_value+'"')
if level != 0:
if type_dict['instance'] or type_dict['list']:
descript = descript + '>\n'
else:
descript = descript + '/>\n'
for instance_name in type_dict['instance']:
instance = getattr(object, instance_name)
descript = descript + (' '*level)+'<'+instance_name
# look ahead to see if the next instance contains lists or instances
nested_type_dict = _get_type_dict(instance)
descript = descript + XML_printer(instance, level+INDENT,
nested_type_dict)
if nested_type_dict['instance'] or nested_type_dict['list']:
descript = descript + (' '*level)+'</'+instance_name+'>\n'
for list_name in type_dict['list']:
inst_list = getattr(object, list_name)
for instance in inst_list:
descript = descript + (' '*level)+'<'+list_name
nested_type_dict = _get_type_dict(instance)
descript = descript + XML_printer(instance, level+INDENT,
nested_type_dict)
if nested_type_dict['instance'] or nested_type_dict['list']:
descript = descript + (' '*level)+'</'+list_name+'>\n'
return descript
if __name__ == '__main__':
import sys
from gnosis.xml.objectify import XML_Objectify, EXPAT
xml_obj = XML_Objectify(sys.argv[1], EXPAT)
pyobj = xml_obj.make_instance()
print XML_printer(pyobj) | 0.271059 | 0.056914 |
from random import random
from normals import inverse_cumulative_normal
from sys import version_info
if version_info[0] == 3:
xrange = range
class RandomBase(object):
"""
Abstract random number class.
Uses the standard library's random() for uniform random variables and
normals.inverse_cumulative_normal() to transform them.
"""
def __init__(self, dim):
self.dim = dim
def get_uniforms(self, N):
if self.dim == 1:
return ([random()] for x in xrange(N))
else:
return ([random() for x in xrange(self.dim)] for x in xrange(N))
def get_gaussians(self, N):
if self.dim == 1:
return ([inverse_cumulative_normal(v[0])] for v in self.get_uniforms(N))
else:
return ([inverse_cumulative_normal(x) for x in v] for v in self.get_uniforms(N))
class ParkMiller(object):
"""
Park-Miller random number generator.
stream() method returns a generator producing pseudo-random
numbers in the interval [1, 2147483646].
"""
def __init__(self,seed=1):
self._const_a = 16807
self._const_m = 2147483647
self._const_q = 127773
self._const_r = 2836
self._seed = max(int(seed),1)
self.maximum = self._const_m - 1
self.minimum = 1
def stream(self,N):
a, m, q, r = self._const_a, self._const_m, self._const_q, self._const_r
count = 0
while count < N:
k = self._seed // q # // ensures integer division for Python 3.
self._seed = (a * (self._seed - k * q) - k * r)
if self._seed < 0:
self._seed += m
yield self._seed
count += 1
class RandomParkMiller(RandomBase):
"""
A RandomBase class using the ParkMiller class
to generate the uniform random variables.
"""
def __init__(self,dim,seed=1):
self.dim = dim
self._seed = seed
self._pm = ParkMiller(seed)
self._r = 1/(1. + self._pm.maximum)
def get_uniforms(self,N):
if self.dim == 1:
return ([x * self._r] for x in self._pm.stream(N))
else:
return ([x * self._r for x in self._pm.stream(self.dim)] for i in xrange(N))
def skip(self, nPaths):
for i in self.get_uniforms(nPaths):
pass
def reset(self):
self._pm = ParkMiller(self.seed)
def _get_seed(self):
return self._seed
def _set_seed(self,seed):
self._seed = seed
self.reset(seed)
seed = property(_get_seed, _set_seed)
class AntiThetic(RandomBase):
"""
Anti-thetic sampling class: wraps another RandomBase class.
Currently this only works properly for streams of an even length.
"""
def __init__(self, base):
self._base = base
self._oddEven = True
def get_uniforms(self, N):
if self.dim == 1:
for v in self._base.get_uniforms(N // 2): # the argument must be an 'int' in Python3
yield v
yield [1-v[0]]
else:
for v in self._base.get_uniforms(N // 2): # the argument must be an 'int' in Python3
yield v
yield [1-x for x in v]
def _set_seed(self, seed):
self._base.seed = seed
self._oddEven = True
def skip(self, nPaths):
self._base.skip(nPaths / 2)
def reset(self):
self._base.reset()
self._oddEven = True
def _get_dim(self):
return self._base.dim
def _setDim(self, dim):
self._base.dim = dim
dim = property(_get_dim, _setDim)
def loop(N, s):
"""
Returns a generator of length N, returning
a loop of values modulo s.
"""
i = 0
while i < N:
yield i % s
i += 1
class SimpleStratifiedPM(RandomBase):
"""
Stratified random sampling based on the RandomParkMiller class.
Forces self.dim = 1.
"""
def __init__(self,seed=1,segments=2**8):
self.dim = 1
self._rpm = RandomParkMiller(1,seed)
self._seed = seed
self._segments = segments
def get_uniforms(self, N):
s = self._segments
return ([(l + x[0])/s] for l, x in zip(loop(N, s), self._rpm.get_uniforms(N)))
# Testing
if __name__ == "__main__":
rv = AntiThetic(SimpleStratifiedPM(1,16))
N = 2**8
r = [x[0] for x in rv.get_uniforms(N)]
mean = sum(r)/N
var = sum((x-mean)**2 for x in r)/N
print("mean = %f" % mean)
print("variance = %f" % var) | random_base.py |
from random import random
from normals import inverse_cumulative_normal
from sys import version_info
if version_info[0] == 3:
xrange = range
class RandomBase(object):
"""
Abstract random number class.
Uses the standard library's random() for uniform random variables and
normals.inverse_cumulative_normal() to transform them.
"""
def __init__(self, dim):
self.dim = dim
def get_uniforms(self, N):
if self.dim == 1:
return ([random()] for x in xrange(N))
else:
return ([random() for x in xrange(self.dim)] for x in xrange(N))
def get_gaussians(self, N):
if self.dim == 1:
return ([inverse_cumulative_normal(v[0])] for v in self.get_uniforms(N))
else:
return ([inverse_cumulative_normal(x) for x in v] for v in self.get_uniforms(N))
class ParkMiller(object):
"""
Park-Miller random number generator.
stream() method returns a generator producing pseudo-random
numbers in the interval [1, 2147483646].
"""
def __init__(self,seed=1):
self._const_a = 16807
self._const_m = 2147483647
self._const_q = 127773
self._const_r = 2836
self._seed = max(int(seed),1)
self.maximum = self._const_m - 1
self.minimum = 1
def stream(self,N):
a, m, q, r = self._const_a, self._const_m, self._const_q, self._const_r
count = 0
while count < N:
k = self._seed // q # // ensures integer division for Python 3.
self._seed = (a * (self._seed - k * q) - k * r)
if self._seed < 0:
self._seed += m
yield self._seed
count += 1
class RandomParkMiller(RandomBase):
"""
A RandomBase class using the ParkMiller class
to generate the uniform random variables.
"""
def __init__(self,dim,seed=1):
self.dim = dim
self._seed = seed
self._pm = ParkMiller(seed)
self._r = 1/(1. + self._pm.maximum)
def get_uniforms(self,N):
if self.dim == 1:
return ([x * self._r] for x in self._pm.stream(N))
else:
return ([x * self._r for x in self._pm.stream(self.dim)] for i in xrange(N))
def skip(self, nPaths):
for i in self.get_uniforms(nPaths):
pass
def reset(self):
self._pm = ParkMiller(self.seed)
def _get_seed(self):
return self._seed
def _set_seed(self,seed):
self._seed = seed
self.reset(seed)
seed = property(_get_seed, _set_seed)
class AntiThetic(RandomBase):
"""
Anti-thetic sampling class: wraps another RandomBase class.
Currently this only works properly for streams of an even length.
"""
def __init__(self, base):
self._base = base
self._oddEven = True
def get_uniforms(self, N):
if self.dim == 1:
for v in self._base.get_uniforms(N // 2): # the argument must be an 'int' in Python3
yield v
yield [1-v[0]]
else:
for v in self._base.get_uniforms(N // 2): # the argument must be an 'int' in Python3
yield v
yield [1-x for x in v]
def _set_seed(self, seed):
self._base.seed = seed
self._oddEven = True
def skip(self, nPaths):
self._base.skip(nPaths / 2)
def reset(self):
self._base.reset()
self._oddEven = True
def _get_dim(self):
return self._base.dim
def _setDim(self, dim):
self._base.dim = dim
dim = property(_get_dim, _setDim)
def loop(N, s):
"""
Returns a generator of length N, returning
a loop of values modulo s.
"""
i = 0
while i < N:
yield i % s
i += 1
class SimpleStratifiedPM(RandomBase):
"""
Stratified random sampling based on the RandomParkMiller class.
Forces self.dim = 1.
"""
def __init__(self,seed=1,segments=2**8):
self.dim = 1
self._rpm = RandomParkMiller(1,seed)
self._seed = seed
self._segments = segments
def get_uniforms(self, N):
s = self._segments
return ([(l + x[0])/s] for l, x in zip(loop(N, s), self._rpm.get_uniforms(N)))
# Testing
if __name__ == "__main__":
rv = AntiThetic(SimpleStratifiedPM(1,16))
N = 2**8
r = [x[0] for x in rv.get_uniforms(N)]
mean = sum(r)/N
var = sum((x-mean)**2 for x in r)/N
print("mean = %f" % mean)
print("variance = %f" % var) | 0.615088 | 0.375621 |
"""##### 1 [ Split into training ] #####"""
"""##### 2 [ Extract train and test idx for later merge with geography coord ] #####"""
"""##### 3 [ Fit: Polynomial Regression ] ######"""
## 3. 1 Fit Model: Polynomial Regression
### Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X_train)
pol_reg = LinearRegression()
### Train model
pol_reg.fit(X_poly, y_train)
## 3.2 Predict Test Results
### 3.2.1 TEST: Make prediction using test set
y_pred = pol_reg.predict(poly_reg.fit_transform(X_test))
y_pred
dataTest = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
dataTest['residuals']=dataTest['Actual'] - dataTest['Predicted']
dataTest
#summary descriptive statistics
dataTest.describe()
### 3.2.2 TRAIN: Make prediction using TRAIN set
y_train_predicted = pol_reg.predict(X_poly)
y_train_predicted
dataTrain = pd.DataFrame({'Actual': y_train, 'Predicted': y_train_predicted})
dataTrain['residuals']=dataTrain['Actual'] - dataTrain['Predicted']
dataTrain
#summary descriptive statistics
dataTrain.describe()
### 3.2.3 Plot Predicted vs Observed | Test Set
### Plot A
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='whitegrid')
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(10, 6))
ax = sns.regplot(x="Actual", y="Predicted", data=dataTest, label='siteindex predicted', scatter_kws = {'color': 'white', 'alpha': 0.8, 'edgecolor':'blue', 's':10}, line_kws = {'color': '#f54a19'})
ax.set_ylim(0,55)
ax.set_xlim(0,55)
ax.plot([0, 55], [0, 55], 'k--', lw=2)
ax.legend(title="Test set:", frameon= True, loc='upper left')
#ax.legend(bbox_to_anchor =(0.85, -0.20), ncol = 4)
plt.title('Goodness-of-fit in Validation Set',fontsize=12)
plt.savefig('actualvsPredicted_PolyReg_testSet.jpg', bbox_inches='tight', dpi=300)
### Plot B
ax = sns.regplot(x="Actual", y="Predicted", data=dataTest,
scatter_kws = {'color': 'orange', 'alpha': 0.3}, line_kws = {'color': '#f54a19'},
x_estimator=np.mean, logx=True)
ax.set_ylim(0,55)
ax.set_xlim(0,55)
ax.plot([0, 55], [0, 55], 'k--', lw=2)
### Plot C
ax = sns.regplot(x="Actual", y=y_pred, data=dataTest,
scatter_kws={"s": 80},
order=2, ci=None)
"""##### 4 [ Perfomance and Validation #####"""
## 4.1 ACCURACY FOR TRAINING & TEST SET:
print("Accuracy on train set:: {:.3f}".format(pol_reg.score(X_poly, y_train)))
print("Accuracy on test set:: {:.3f}".format(pol_reg.score(poly_reg.fit_transform(X_test), y_test)))
## 4.2 Accuracy Measures
print("R2 (explained variance) Train Set: {:.3f}".format(metrics.r2_score(y_train, y_train_predicted), 2))
print("R2 (explained variance) Test set: {:.3f}".format(metrics.r2_score(y_test, y_pred), 2))
print('MAE=Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE=Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE=Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
## 4.3 Calculate Squared Error
residSquare = np.square(dataTest['residuals'])
residSquare
### 4.3.1 Plot Squared Errror vs Observed
plt.style.use('seaborn-whitegrid')
fig=plt.figure(figsize = [8, 6])
ax = fig.add_subplot(111)
ax.scatter(x=dataTest['Actual'], y=residSquare, label='Squared Error', c='white', alpha=0.8, edgecolors='#1b346c', s=10)
ax.set_xlabel("Observed 'site index' values") #it's a good idea to label your axes
ax.set_ylabel('Squared Error')
plt.title("Squared Error vs Observed 'site index' values")
plt.legend(title="",loc='upper right', frameon=True)
plt.savefig('SquaredError_PolyReg.png', bbox_inches='tight', dpi=300)
"""##### 5 [ Evaluation: Slope of Coefficients ] #####"""
#pol_reg.coef_
from sklearn.metrics import mean_squared_error, r2_score
## 5.1 Model Output
# a. Intercept
print("Intercept:", pol_reg.intercept_)
for coef, col in enumerate(X_train.columns):
print(f'{col}: {pol_reg.coef_[coef]}')
## 5.2 Build table to check model output
pred_model = pd.DataFrame(['aspect','planCurvature','profileCurvature','slope','TPI','TWI_SAGA','Dh_diffuse','Ih_direct','DEM','meanJanRain','meanJulRain','maxJanTemp','minJulTemp','SYMBOL','soil_order','BDw','CLY','CFG','ECD','SOC','pHw','SND','SLT'])
coeff = pd.DataFrame(pol_reg.coef_)
df = pd.concat([pred_model,coeff], axis=1, join='inner')
df
## 5.3 Plot Slopes
# adding column name to the respective columns
df.columns =['Features', 'Coefficients']
# displaying the DataFrame
print(df)
df = df.sort_values(by='Coefficients', ascending=0)
df
### 5.3.1 Display contribution of features towards dependent variable: 'siteindex' (y)
# %matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(14,6))
sns.set(style="whitegrid")
plt.subplot(1, 1, 1) # 1 row, 2 cols, subplot 1
ax = sns.barplot(df.Features, df.Coefficients)
for p in ax.patches:
ax.annotate(np.round(p.get_height(),decimals=2), (p.get_x()+p.get_width()/2., p.get_height()),
ha='left',
va='baseline',
#textcoords='offset points',
rotation='30')
#Rotate labels x-axis
plt.xticks(rotation=45, horizontalalignment='right')
plt.ylabel('independent variables (x)')
plt.xlabel('Coefficents')
plt.title("Contribution of features towards dependent variable: 'siteindex' (y)")
plt.savefig('polyreg_FI.png', bbox_inches='tight', dpi=300)
## 5.4 Feature Importance
from sklearn.inspection import permutation_importance
r = permutation_importance(polyreg, X_test, y_test,
n_repeats=30,
random_state=0)
for i in r.importances_mean.argsort()[::-1]:
if r.importances_mean[i] - 2 * r.importances_std[i] > 0:
print(f"{EDAsurvey.columns[i]:<8}"
f"{r.importances_mean[i]:.3f}"
f" +/- {r.importances_std[i]:.3f}")
"""##### 6 [ Fit Model: Polynomial regression | K-fold Cross Validation ] #####"""
"""## Model with 10-fold cross-validation with all features ##"""
"""### Option 1 | Assessing Quality of Regression Model ###"""
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X_train)
pol_reg = LinearRegression()
# Train model
pol_reg.fit(X_poly, y_train)
cv_scores_Kfold10 = cross_val_score(pol_reg,X_poly, y_train, cv=10, scoring='r2')
print("Cross-validation scores: {}".format(cv_scores_Kfold10))
print("Average cross-validation score: {:.3f}".format(cv_scores_Kfold10.mean()))
"""### Option 2 | Polynomial Regression | cv=10 ###"""
from sklearn.model_selection import cross_val_score
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
###1. evaluate the model
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X_train)
pol_reg = LinearRegression()
poly10 = cross_val_score(pol_reg , X_poly, y_train, cv=10, scoring='r2')
print("Cross-validation scores: {}".format(poly10))
### 2. report performance
from numpy import mean
from numpy import std
print("Average cross-validation score: {:.3f}".format(poly10.mean()))
print('MAE: %.3f (%.3f)' % (mean(poly10), std(poly10)))
print("Accuracy: %0.3f (+/- %0.3f)" % (poly10.mean(), poly10.std()))
#The mean score and the 95% confidence interval of the score estimate are hence given by:
print("Accuracy for 95perc confidence interval: %0.3f (+/- %0.3f)" % (poly10.mean(), poly10.std() * 2))
# 2.1 Measure for boxplots
import statistics
from scipy import stats
# Median for predicted value
median = statistics.median(poly10)
q1, q2, q3= np.percentile(poly10,[25,50,75])
# IQR which is the difference between third and first quartile
iqr = q3 - q1
# lower_bound is 15.086 and upper bound is 43.249, so anything outside of 15.086 and 43.249 is an outlier.
lower_bound = q1 -(1.5 * iqr)
upper_bound = q3 +(1.5 * iqr)
print('upper_bound: %.3f' % upper_bound)
print('Third quartile (q3): %.3f' % q3)
print('Median: %.3f' % median)
print('First quartile (q1): %.3f' % q1)
#print('Median (q2): %.3f' % q2)
print('IQR: %.3f' % iqr)
print('lower_bound: %.3f' % lower_bound)
# 3. plot performance
# Cool Boxplot
fig = plt.figure()
fig.suptitle('Model with 10-fold cross-validation')
ax = fig.add_subplot(111)
import matplotlib.pyplot as plt
plt.style.use('classic')
fig.set_size_inches(4, 4)
medianprops = dict(linewidth=1.5, linestyle='-', color='#fc3468')
meanprops = dict(marker='D', markerfacecolor='indianred', markersize=4.5)
plt.gca().spines['right'].set_color('#D9D8D6')
plt.gca().spines['top'].set_color('#D9D8D6')
plt.gca().spines['left'].set_color('#D9D8D6')
plt.gca().spines['bottom'].set_color('#D9D8D6')
plt.grid(color='grey', linestyle='-', linewidth=0.25)
plt.boxplot(poly10, medianprops=medianprops, meanprops=meanprops, showmeans=True )
ax.set_xticklabels('')
plt.xlabel('Polynomial Regression')
plt.ylabel('Accuracy Model')
plt.savefig('accuracy_polyReg.png', bbox_inches='tight', dpi=300)
# Boring boxplot
fig = plt.figure()
fig.suptitle('Model with 10-fold cross-validation')
ax = fig.add_subplot(111)
import seaborn as sns
sns.set(style="whitegrid")
plt.boxplot(poly10)
ax.set_xticklabels('')
plt.xlabel('Polynomial Regression')
plt.ylabel('Accuracy Model')
plt.savefig('accuracy_polyReg.png', bbox_inches='tight', dpi=300) | polynomial_regression.py | """##### 1 [ Split into training ] #####"""
"""##### 2 [ Extract train and test idx for later merge with geography coord ] #####"""
"""##### 3 [ Fit: Polynomial Regression ] ######"""
## 3. 1 Fit Model: Polynomial Regression
### Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X_train)
pol_reg = LinearRegression()
### Train model
pol_reg.fit(X_poly, y_train)
## 3.2 Predict Test Results
### 3.2.1 TEST: Make prediction using test set
y_pred = pol_reg.predict(poly_reg.fit_transform(X_test))
y_pred
dataTest = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
dataTest['residuals']=dataTest['Actual'] - dataTest['Predicted']
dataTest
#summary descriptive statistics
dataTest.describe()
### 3.2.2 TRAIN: Make prediction using TRAIN set
y_train_predicted = pol_reg.predict(X_poly)
y_train_predicted
dataTrain = pd.DataFrame({'Actual': y_train, 'Predicted': y_train_predicted})
dataTrain['residuals']=dataTrain['Actual'] - dataTrain['Predicted']
dataTrain
#summary descriptive statistics
dataTrain.describe()
### 3.2.3 Plot Predicted vs Observed | Test Set
### Plot A
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style='whitegrid')
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(10, 6))
ax = sns.regplot(x="Actual", y="Predicted", data=dataTest, label='siteindex predicted', scatter_kws = {'color': 'white', 'alpha': 0.8, 'edgecolor':'blue', 's':10}, line_kws = {'color': '#f54a19'})
ax.set_ylim(0,55)
ax.set_xlim(0,55)
ax.plot([0, 55], [0, 55], 'k--', lw=2)
ax.legend(title="Test set:", frameon= True, loc='upper left')
#ax.legend(bbox_to_anchor =(0.85, -0.20), ncol = 4)
plt.title('Goodness-of-fit in Validation Set',fontsize=12)
plt.savefig('actualvsPredicted_PolyReg_testSet.jpg', bbox_inches='tight', dpi=300)
### Plot B
ax = sns.regplot(x="Actual", y="Predicted", data=dataTest,
scatter_kws = {'color': 'orange', 'alpha': 0.3}, line_kws = {'color': '#f54a19'},
x_estimator=np.mean, logx=True)
ax.set_ylim(0,55)
ax.set_xlim(0,55)
ax.plot([0, 55], [0, 55], 'k--', lw=2)
### Plot C
ax = sns.regplot(x="Actual", y=y_pred, data=dataTest,
scatter_kws={"s": 80},
order=2, ci=None)
"""##### 4 [ Perfomance and Validation #####"""
## 4.1 ACCURACY FOR TRAINING & TEST SET:
print("Accuracy on train set:: {:.3f}".format(pol_reg.score(X_poly, y_train)))
print("Accuracy on test set:: {:.3f}".format(pol_reg.score(poly_reg.fit_transform(X_test), y_test)))
## 4.2 Accuracy Measures
print("R2 (explained variance) Train Set: {:.3f}".format(metrics.r2_score(y_train, y_train_predicted), 2))
print("R2 (explained variance) Test set: {:.3f}".format(metrics.r2_score(y_test, y_pred), 2))
print('MAE=Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('MSE=Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE=Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
## 4.3 Calculate Squared Error
residSquare = np.square(dataTest['residuals'])
residSquare
### 4.3.1 Plot Squared Errror vs Observed
plt.style.use('seaborn-whitegrid')
fig=plt.figure(figsize = [8, 6])
ax = fig.add_subplot(111)
ax.scatter(x=dataTest['Actual'], y=residSquare, label='Squared Error', c='white', alpha=0.8, edgecolors='#1b346c', s=10)
ax.set_xlabel("Observed 'site index' values") #it's a good idea to label your axes
ax.set_ylabel('Squared Error')
plt.title("Squared Error vs Observed 'site index' values")
plt.legend(title="",loc='upper right', frameon=True)
plt.savefig('SquaredError_PolyReg.png', bbox_inches='tight', dpi=300)
"""##### 5 [ Evaluation: Slope of Coefficients ] #####"""
#pol_reg.coef_
from sklearn.metrics import mean_squared_error, r2_score
## 5.1 Model Output
# a. Intercept
print("Intercept:", pol_reg.intercept_)
for coef, col in enumerate(X_train.columns):
print(f'{col}: {pol_reg.coef_[coef]}')
## 5.2 Build table to check model output
pred_model = pd.DataFrame(['aspect','planCurvature','profileCurvature','slope','TPI','TWI_SAGA','Dh_diffuse','Ih_direct','DEM','meanJanRain','meanJulRain','maxJanTemp','minJulTemp','SYMBOL','soil_order','BDw','CLY','CFG','ECD','SOC','pHw','SND','SLT'])
coeff = pd.DataFrame(pol_reg.coef_)
df = pd.concat([pred_model,coeff], axis=1, join='inner')
df
## 5.3 Plot Slopes
# adding column name to the respective columns
df.columns =['Features', 'Coefficients']
# displaying the DataFrame
print(df)
df = df.sort_values(by='Coefficients', ascending=0)
df
### 5.3.1 Display contribution of features towards dependent variable: 'siteindex' (y)
# %matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(14,6))
sns.set(style="whitegrid")
plt.subplot(1, 1, 1) # 1 row, 2 cols, subplot 1
ax = sns.barplot(df.Features, df.Coefficients)
for p in ax.patches:
ax.annotate(np.round(p.get_height(),decimals=2), (p.get_x()+p.get_width()/2., p.get_height()),
ha='left',
va='baseline',
#textcoords='offset points',
rotation='30')
#Rotate labels x-axis
plt.xticks(rotation=45, horizontalalignment='right')
plt.ylabel('independent variables (x)')
plt.xlabel('Coefficents')
plt.title("Contribution of features towards dependent variable: 'siteindex' (y)")
plt.savefig('polyreg_FI.png', bbox_inches='tight', dpi=300)
## 5.4 Feature Importance
from sklearn.inspection import permutation_importance
r = permutation_importance(polyreg, X_test, y_test,
n_repeats=30,
random_state=0)
for i in r.importances_mean.argsort()[::-1]:
if r.importances_mean[i] - 2 * r.importances_std[i] > 0:
print(f"{EDAsurvey.columns[i]:<8}"
f"{r.importances_mean[i]:.3f}"
f" +/- {r.importances_std[i]:.3f}")
"""##### 6 [ Fit Model: Polynomial regression | K-fold Cross Validation ] #####"""
"""## Model with 10-fold cross-validation with all features ##"""
"""### Option 1 | Assessing Quality of Regression Model ###"""
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X_train)
pol_reg = LinearRegression()
# Train model
pol_reg.fit(X_poly, y_train)
cv_scores_Kfold10 = cross_val_score(pol_reg,X_poly, y_train, cv=10, scoring='r2')
print("Cross-validation scores: {}".format(cv_scores_Kfold10))
print("Average cross-validation score: {:.3f}".format(cv_scores_Kfold10.mean()))
"""### Option 2 | Polynomial Regression | cv=10 ###"""
from sklearn.model_selection import cross_val_score
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
###1. evaluate the model
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X_train)
pol_reg = LinearRegression()
poly10 = cross_val_score(pol_reg , X_poly, y_train, cv=10, scoring='r2')
print("Cross-validation scores: {}".format(poly10))
### 2. report performance
from numpy import mean
from numpy import std
print("Average cross-validation score: {:.3f}".format(poly10.mean()))
print('MAE: %.3f (%.3f)' % (mean(poly10), std(poly10)))
print("Accuracy: %0.3f (+/- %0.3f)" % (poly10.mean(), poly10.std()))
#The mean score and the 95% confidence interval of the score estimate are hence given by:
print("Accuracy for 95perc confidence interval: %0.3f (+/- %0.3f)" % (poly10.mean(), poly10.std() * 2))
# 2.1 Measure for boxplots
import statistics
from scipy import stats
# Median for predicted value
median = statistics.median(poly10)
q1, q2, q3= np.percentile(poly10,[25,50,75])
# IQR which is the difference between third and first quartile
iqr = q3 - q1
# lower_bound is 15.086 and upper bound is 43.249, so anything outside of 15.086 and 43.249 is an outlier.
lower_bound = q1 -(1.5 * iqr)
upper_bound = q3 +(1.5 * iqr)
print('upper_bound: %.3f' % upper_bound)
print('Third quartile (q3): %.3f' % q3)
print('Median: %.3f' % median)
print('First quartile (q1): %.3f' % q1)
#print('Median (q2): %.3f' % q2)
print('IQR: %.3f' % iqr)
print('lower_bound: %.3f' % lower_bound)
# 3. plot performance
# Cool Boxplot
fig = plt.figure()
fig.suptitle('Model with 10-fold cross-validation')
ax = fig.add_subplot(111)
import matplotlib.pyplot as plt
plt.style.use('classic')
fig.set_size_inches(4, 4)
medianprops = dict(linewidth=1.5, linestyle='-', color='#fc3468')
meanprops = dict(marker='D', markerfacecolor='indianred', markersize=4.5)
plt.gca().spines['right'].set_color('#D9D8D6')
plt.gca().spines['top'].set_color('#D9D8D6')
plt.gca().spines['left'].set_color('#D9D8D6')
plt.gca().spines['bottom'].set_color('#D9D8D6')
plt.grid(color='grey', linestyle='-', linewidth=0.25)
plt.boxplot(poly10, medianprops=medianprops, meanprops=meanprops, showmeans=True )
ax.set_xticklabels('')
plt.xlabel('Polynomial Regression')
plt.ylabel('Accuracy Model')
plt.savefig('accuracy_polyReg.png', bbox_inches='tight', dpi=300)
# Boring boxplot
fig = plt.figure()
fig.suptitle('Model with 10-fold cross-validation')
ax = fig.add_subplot(111)
import seaborn as sns
sns.set(style="whitegrid")
plt.boxplot(poly10)
ax.set_xticklabels('')
plt.xlabel('Polynomial Regression')
plt.ylabel('Accuracy Model')
plt.savefig('accuracy_polyReg.png', bbox_inches='tight', dpi=300) | 0.887595 | 0.881717 |
import argparse
import json
import logging
from os.path import join as pathjoin
from os.path import abspath, exists, isdir
from time import time
from tsstats import config
from tsstats.exceptions import InvalidConfiguration
from tsstats.log import parse_logs
from tsstats.logger import file_handler, stream_handler
from tsstats.template import render_servers
from tsstats.utils import transform_pretty_identmap
logger = logging.getLogger('tsstats')
def cli():
parser = argparse.ArgumentParser(
description='A simple Teamspeak stats-generator,'
' based solely on server-logs',
argument_default=argparse.SUPPRESS
)
parser.add_argument(
'-c', '--config',
type=str, help='path to config'
)
parser.add_argument(
'--idmap', type=str, help='path to id_map'
)
parser.add_argument(
'-l', '--log',
type=str, help='path to your logfile(s). '
'pass a directory to use all logfiles inside it'
)
parser.add_argument(
'-o', '--output',
type=str, help='path to the output-file'
)
parser.add_argument(
'-d', '--debug',
help='debug mode', action='store_true'
)
parser.add_argument(
'-ds', '--debugstdout',
help='write debug output to stdout', action='store_true'
)
parser.add_argument(
'-nod', '--noonlinedc',
help='don\'t add connect until now to onlinetime',
action='store_false', dest='onlinedc'
)
parser.add_argument(
'-t', '--template',
type=str, help='path to custom template'
)
parser.add_argument(
'-dtf', '--datetimeformat',
type=str, help='format of date/time-values (datetime.strftime)'
)
parser.add_argument(
'-otth', '--onlinetimethreshold',
type=int, help='threshold for displaying onlinetime (in seconds)'
)
parser.add_argument(
'-lsa', '--lastseenabsolute',
help='render last seen timestamp absolute (instead of relative)',
action='store_false', dest='lastseenrelative'
)
options = parser.parse_args()
if 'config' in options:
configuration = config.load(options.config)
else:
configuration = config.load()
for option, value in vars(options).items():
configuration.set('General', option, str(value))
main(configuration)
def main(configuration):
start_time = time()
# setup logging
if configuration.getboolean('General', 'debug'):
logger.setLevel(logging.DEBUG)
if configuration.getboolean('General', 'debugstdout'):
stream_handler.setLevel(logging.DEBUG)
else:
logger.addHandler(file_handler)
# attach handlers
logger.addHandler(stream_handler)
idmap = configuration.get('General', 'idmap')
if idmap:
idmap = abspath(idmap)
if not exists(idmap):
logger.fatal('identmap not found (%s)', idmap)
# read id_map
identmap = json.load(open(idmap))
else:
identmap = None
if isinstance(identmap, list):
identmap = transform_pretty_identmap(identmap)
log = configuration.get('General', 'log')
if not log:
raise InvalidConfiguration('log or output missing')
if isdir(log):
log = pathjoin(log, '*.log')
servers = parse_logs(
log, ident_map=identmap,
online_dc=configuration.getboolean('General', 'onlinedc')
)
render_servers(
sorted(servers, key=lambda s: s.sid),
output=abspath(configuration.get('General', 'output')),
template=configuration.get('General', 'template'),
datetime_fmt=configuration.get('General', 'datetimeformat'),
onlinetime_threshold=int(configuration.get(
'General', 'onlinetimethreshold'
)),
lastseen_relative=configuration.getboolean(
'General', 'lastseenrelative'
)
)
logger.info('Finished after %s seconds', time() - start_time)
if __name__ == '__main__':
cli() | tsstats/__main__.py | import argparse
import json
import logging
from os.path import join as pathjoin
from os.path import abspath, exists, isdir
from time import time
from tsstats import config
from tsstats.exceptions import InvalidConfiguration
from tsstats.log import parse_logs
from tsstats.logger import file_handler, stream_handler
from tsstats.template import render_servers
from tsstats.utils import transform_pretty_identmap
logger = logging.getLogger('tsstats')
def cli():
parser = argparse.ArgumentParser(
description='A simple Teamspeak stats-generator,'
' based solely on server-logs',
argument_default=argparse.SUPPRESS
)
parser.add_argument(
'-c', '--config',
type=str, help='path to config'
)
parser.add_argument(
'--idmap', type=str, help='path to id_map'
)
parser.add_argument(
'-l', '--log',
type=str, help='path to your logfile(s). '
'pass a directory to use all logfiles inside it'
)
parser.add_argument(
'-o', '--output',
type=str, help='path to the output-file'
)
parser.add_argument(
'-d', '--debug',
help='debug mode', action='store_true'
)
parser.add_argument(
'-ds', '--debugstdout',
help='write debug output to stdout', action='store_true'
)
parser.add_argument(
'-nod', '--noonlinedc',
help='don\'t add connect until now to onlinetime',
action='store_false', dest='onlinedc'
)
parser.add_argument(
'-t', '--template',
type=str, help='path to custom template'
)
parser.add_argument(
'-dtf', '--datetimeformat',
type=str, help='format of date/time-values (datetime.strftime)'
)
parser.add_argument(
'-otth', '--onlinetimethreshold',
type=int, help='threshold for displaying onlinetime (in seconds)'
)
parser.add_argument(
'-lsa', '--lastseenabsolute',
help='render last seen timestamp absolute (instead of relative)',
action='store_false', dest='lastseenrelative'
)
options = parser.parse_args()
if 'config' in options:
configuration = config.load(options.config)
else:
configuration = config.load()
for option, value in vars(options).items():
configuration.set('General', option, str(value))
main(configuration)
def main(configuration):
start_time = time()
# setup logging
if configuration.getboolean('General', 'debug'):
logger.setLevel(logging.DEBUG)
if configuration.getboolean('General', 'debugstdout'):
stream_handler.setLevel(logging.DEBUG)
else:
logger.addHandler(file_handler)
# attach handlers
logger.addHandler(stream_handler)
idmap = configuration.get('General', 'idmap')
if idmap:
idmap = abspath(idmap)
if not exists(idmap):
logger.fatal('identmap not found (%s)', idmap)
# read id_map
identmap = json.load(open(idmap))
else:
identmap = None
if isinstance(identmap, list):
identmap = transform_pretty_identmap(identmap)
log = configuration.get('General', 'log')
if not log:
raise InvalidConfiguration('log or output missing')
if isdir(log):
log = pathjoin(log, '*.log')
servers = parse_logs(
log, ident_map=identmap,
online_dc=configuration.getboolean('General', 'onlinedc')
)
render_servers(
sorted(servers, key=lambda s: s.sid),
output=abspath(configuration.get('General', 'output')),
template=configuration.get('General', 'template'),
datetime_fmt=configuration.get('General', 'datetimeformat'),
onlinetime_threshold=int(configuration.get(
'General', 'onlinetimethreshold'
)),
lastseen_relative=configuration.getboolean(
'General', 'lastseenrelative'
)
)
logger.info('Finished after %s seconds', time() - start_time)
if __name__ == '__main__':
cli() | 0.278453 | 0.065455 |
import json
import sys
import unittest
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(SCRIPT_DIR)
CODESEARCH_DIR = os.path.join(
os.path.dirname(SCRIPT_DIR), 'third_party', 'codesearch-py')
sys.path.append(CODESEARCH_DIR)
import render as r
import codesearch as cs
def TestDataPath(p):
return os.path.join(SCRIPT_DIR, 'testdata', p)
def LocationMapToString(l):
s = [
'''\
This file contains the rendered output per line and its associated metadata.
Lines that begin with a line number contains (after the '|') the contents that
will be inserted into the vim buffer at that line. The immediately following
line contains an object representing associated metadata.
-------------------------------------------------------------------------------
'''
]
for index, line in enumerate(l.Lines()):
o = {}
if index in l.jump_map_:
o['j'] = l.jump_map_[index]
if index in l.signature_map_:
o['s'] = l.signature_map_[index]
s.append('{:03d}|{}\n |{}'.format(index + 1, line, str(o)))
return '\n'.join(s) + '\n'
class TestRenderers(unittest.TestCase):
def run_render_test(self, test_file_name, query='unspecified'):
with open(TestDataPath(test_file_name), 'r') as f:
d = json.load(f)
m = cs.Message.Coerce(d, cs.CompoundResponse)
location_map = r.RenderCompoundResponse(m, query)
serialized = LocationMapToString(location_map)
with open(TestDataPath(test_file_name + '.actual'), 'w') as f:
f.write(serialized)
with open(TestDataPath(test_file_name + '.expected'), 'r') as f:
expected = f.read()
self.assertMultiLineEqual(expected, serialized)
return location_map
def test_search_response_01(self):
l_map = self.run_render_test('search-response-01.json')
fn, l, c = l_map.JumpTargetAt(50, 1)
self.assertEqual('src/chrome/browser/download/download_prefs.cc', fn)
self.assertEqual(409, l)
self.assertEqual(1, c)
_, _, c = l_map.JumpTargetAt(50, 30)
self.assertEqual(16, c)
def test_search_response_02(self):
l_map = self.run_render_test('search-response-02.json')
fn, l, c = l_map.JumpTargetAt(22, 1)
self.assertEqual('src/base/at_exit.cc', fn)
self.assertEqual(96, l)
self.assertEqual(1, c)
self.assertEqual(3, l_map.NextFileLocation(1))
self.assertEqual(1, l_map.PreviousFileLocation(1))
self.assertEqual(3, l_map.PreviousFileLocation(35))
self.assertEqual(35, l_map.PreviousFileLocation(45))
self.assertEqual(45, l_map.NextFileLocation(45))
def test_search_response_03(self):
self.run_render_test('search-response-03.json')
def test_search_response_04(self):
self.run_render_test('search-response-04.json')
def test_xref_search_response_01(self):
self.run_render_test('xrefs-response-01.json')
def test_xref_search_response_02(self):
self.run_render_test('xrefs-response-02.json')
def test_xref_search_response_03(self):
self.run_render_test('xrefs-response-03.json')
def test_call_graph_01(self):
self.run_render_test('call-graph-01.json')
def test_call_graph_02(self):
self.run_render_test('call-graph-02.json')
if __name__ == '__main__':
unittest.main() | render/test_render.py |
import json
import sys
import unittest
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(SCRIPT_DIR)
CODESEARCH_DIR = os.path.join(
os.path.dirname(SCRIPT_DIR), 'third_party', 'codesearch-py')
sys.path.append(CODESEARCH_DIR)
import render as r
import codesearch as cs
def TestDataPath(p):
return os.path.join(SCRIPT_DIR, 'testdata', p)
def LocationMapToString(l):
s = [
'''\
This file contains the rendered output per line and its associated metadata.
Lines that begin with a line number contains (after the '|') the contents that
will be inserted into the vim buffer at that line. The immediately following
line contains an object representing associated metadata.
-------------------------------------------------------------------------------
'''
]
for index, line in enumerate(l.Lines()):
o = {}
if index in l.jump_map_:
o['j'] = l.jump_map_[index]
if index in l.signature_map_:
o['s'] = l.signature_map_[index]
s.append('{:03d}|{}\n |{}'.format(index + 1, line, str(o)))
return '\n'.join(s) + '\n'
class TestRenderers(unittest.TestCase):
def run_render_test(self, test_file_name, query='unspecified'):
with open(TestDataPath(test_file_name), 'r') as f:
d = json.load(f)
m = cs.Message.Coerce(d, cs.CompoundResponse)
location_map = r.RenderCompoundResponse(m, query)
serialized = LocationMapToString(location_map)
with open(TestDataPath(test_file_name + '.actual'), 'w') as f:
f.write(serialized)
with open(TestDataPath(test_file_name + '.expected'), 'r') as f:
expected = f.read()
self.assertMultiLineEqual(expected, serialized)
return location_map
def test_search_response_01(self):
l_map = self.run_render_test('search-response-01.json')
fn, l, c = l_map.JumpTargetAt(50, 1)
self.assertEqual('src/chrome/browser/download/download_prefs.cc', fn)
self.assertEqual(409, l)
self.assertEqual(1, c)
_, _, c = l_map.JumpTargetAt(50, 30)
self.assertEqual(16, c)
def test_search_response_02(self):
l_map = self.run_render_test('search-response-02.json')
fn, l, c = l_map.JumpTargetAt(22, 1)
self.assertEqual('src/base/at_exit.cc', fn)
self.assertEqual(96, l)
self.assertEqual(1, c)
self.assertEqual(3, l_map.NextFileLocation(1))
self.assertEqual(1, l_map.PreviousFileLocation(1))
self.assertEqual(3, l_map.PreviousFileLocation(35))
self.assertEqual(35, l_map.PreviousFileLocation(45))
self.assertEqual(45, l_map.NextFileLocation(45))
def test_search_response_03(self):
self.run_render_test('search-response-03.json')
def test_search_response_04(self):
self.run_render_test('search-response-04.json')
def test_xref_search_response_01(self):
self.run_render_test('xrefs-response-01.json')
def test_xref_search_response_02(self):
self.run_render_test('xrefs-response-02.json')
def test_xref_search_response_03(self):
self.run_render_test('xrefs-response-03.json')
def test_call_graph_01(self):
self.run_render_test('call-graph-01.json')
def test_call_graph_02(self):
self.run_render_test('call-graph-02.json')
if __name__ == '__main__':
unittest.main() | 0.453504 | 0.36139 |
import parser_utils
from datetime import datetime
from decimal import *
from typing import Dict
from rbc_statement import Statement
class RbcBankStatement(Statement):
def parse(self, str_data: str):
if "No activity for this period" in str_data:
return
str_data = str_data.splitlines()
# Parses the statement date
for line in str_data:
if "Your opening balance on " in line:
line = line.replace("Your opening balance on ", '')
line = line.strip()
self.start_date = datetime.strptime(line.split(' ')[0], '%B %d, %Y')
continue
if "Your closing balance on " in line:
line = line.replace("Your closing balance on ", '')
line = line.strip()
self.end_date = datetime.strptime(line.split(' ')[0], '%B %d, %Y')
continue
if "From " in line and " to " in line:
output = line.replace("From ", '').split(" to ")
if len(output) != 2:
raise ValueError()
self.start_date = datetime.strptime(output[0].strip(), '%B %d, %Y')
self.end_date = datetime.strptime(output[1].strip(), '%B %d, %Y')
if self.end_date is None or self.start_date is None:
raise ValueError("Could not parse start or end date")
# Start parsing using state from other lines
pos_header: Dict[str, int] = {'date': -1, 'desc': -1, 'with': -1, 'dep': -1, 'bal': -1}
process_date: datetime = None
multiline_desc: str = ''
opening_balance: str = None
closing_balance: str = None
for x in range(len(str_data)):
line = str_data[x]
if "Closing Balance" in line:
closing_balance: str = parser_utils.strip_currency(parser_utils.money_reg.findall(line)[0])
break
if "Opening Balance" in line:
opening_balance: str = parser_utils.strip_currency(parser_utils.money_reg.findall(line)[0])
continue
if 'RBPDA' in line and 'HRI' in line:
continue
temp_header = {'date': line.find("Date"),
'desc': line.find("Description"),
'with': line.find("Withdrawals"),
'dep': line.find("Deposits"),
'bal': line.find("Balance")}
if -1 not in temp_header.values():
pos_header = temp_header
multiline_desc = ''
continue
if -1 not in pos_header.values():
try:
process_date = datetime.strptime(line[pos_header['date']:pos_header['desc']].strip(), '%d %b')
process_date = parser_utils.deduce_date(process_date, self.start_date, self.end_date)
except ValueError:
if process_date is None:
continue
deposit = parser_utils.money_reg.findall(line[pos_header['dep']:pos_header['bal']])
withdrawal = parser_utils.money_reg.findall(line[pos_header['with']:pos_header['dep']])
cur_desc = line[pos_header['desc']:pos_header['with']].strip()
if len(deposit) == 0 and len(withdrawal) == 0:
multiline_desc = cur_desc
continue
if len(deposit) != 0 and len(withdrawal) != 0:
raise Exception("Why is there a withdrawal and deposit in the same transaction")
transaction = {'trans_date': process_date}
if len(withdrawal) != 0:
transaction['amount'] = -1 * parser_utils.str_to_money(withdrawal[0])
else:
transaction['amount'] = parser_utils.str_to_money(deposit[0])
transaction['desc'] = (multiline_desc + " " + cur_desc).strip()
multiline_desc = ''
self.transaction_table = self.transaction_table.append(transaction, ignore_index=True)
opening_balance_d = Decimal(opening_balance)
transactions = round(Decimal(self.transaction_table['amount'].sum()), 2)
estimated_close = opening_balance_d + transactions
close = Decimal(closing_balance)
if estimated_close != close:
raise ValueError("The (closing balance) != (opening balance) + (transactions)")
self.transaction_table = self.transaction_table['Misc Payment RBC CREDIT CARD'
!=
self.transaction_table.desc] | rbc_chequing.py | import parser_utils
from datetime import datetime
from decimal import *
from typing import Dict
from rbc_statement import Statement
class RbcBankStatement(Statement):
def parse(self, str_data: str):
if "No activity for this period" in str_data:
return
str_data = str_data.splitlines()
# Parses the statement date
for line in str_data:
if "Your opening balance on " in line:
line = line.replace("Your opening balance on ", '')
line = line.strip()
self.start_date = datetime.strptime(line.split(' ')[0], '%B %d, %Y')
continue
if "Your closing balance on " in line:
line = line.replace("Your closing balance on ", '')
line = line.strip()
self.end_date = datetime.strptime(line.split(' ')[0], '%B %d, %Y')
continue
if "From " in line and " to " in line:
output = line.replace("From ", '').split(" to ")
if len(output) != 2:
raise ValueError()
self.start_date = datetime.strptime(output[0].strip(), '%B %d, %Y')
self.end_date = datetime.strptime(output[1].strip(), '%B %d, %Y')
if self.end_date is None or self.start_date is None:
raise ValueError("Could not parse start or end date")
# Start parsing using state from other lines
pos_header: Dict[str, int] = {'date': -1, 'desc': -1, 'with': -1, 'dep': -1, 'bal': -1}
process_date: datetime = None
multiline_desc: str = ''
opening_balance: str = None
closing_balance: str = None
for x in range(len(str_data)):
line = str_data[x]
if "Closing Balance" in line:
closing_balance: str = parser_utils.strip_currency(parser_utils.money_reg.findall(line)[0])
break
if "Opening Balance" in line:
opening_balance: str = parser_utils.strip_currency(parser_utils.money_reg.findall(line)[0])
continue
if 'RBPDA' in line and 'HRI' in line:
continue
temp_header = {'date': line.find("Date"),
'desc': line.find("Description"),
'with': line.find("Withdrawals"),
'dep': line.find("Deposits"),
'bal': line.find("Balance")}
if -1 not in temp_header.values():
pos_header = temp_header
multiline_desc = ''
continue
if -1 not in pos_header.values():
try:
process_date = datetime.strptime(line[pos_header['date']:pos_header['desc']].strip(), '%d %b')
process_date = parser_utils.deduce_date(process_date, self.start_date, self.end_date)
except ValueError:
if process_date is None:
continue
deposit = parser_utils.money_reg.findall(line[pos_header['dep']:pos_header['bal']])
withdrawal = parser_utils.money_reg.findall(line[pos_header['with']:pos_header['dep']])
cur_desc = line[pos_header['desc']:pos_header['with']].strip()
if len(deposit) == 0 and len(withdrawal) == 0:
multiline_desc = cur_desc
continue
if len(deposit) != 0 and len(withdrawal) != 0:
raise Exception("Why is there a withdrawal and deposit in the same transaction")
transaction = {'trans_date': process_date}
if len(withdrawal) != 0:
transaction['amount'] = -1 * parser_utils.str_to_money(withdrawal[0])
else:
transaction['amount'] = parser_utils.str_to_money(deposit[0])
transaction['desc'] = (multiline_desc + " " + cur_desc).strip()
multiline_desc = ''
self.transaction_table = self.transaction_table.append(transaction, ignore_index=True)
opening_balance_d = Decimal(opening_balance)
transactions = round(Decimal(self.transaction_table['amount'].sum()), 2)
estimated_close = opening_balance_d + transactions
close = Decimal(closing_balance)
if estimated_close != close:
raise ValueError("The (closing balance) != (opening balance) + (transactions)")
self.transaction_table = self.transaction_table['Misc Payment RBC CREDIT CARD'
!=
self.transaction_table.desc] | 0.346099 | 0.152316 |
class Funcionario(object):
def __init__(self, nome, cpf, salario):
self._nome = nome
self._cpf = cpf
self._salario = salario
def get_bonificacao(self):
return self._salario * 0.10
class Gerente(Funcionario):
def __init__(self, nome, cpf, salario, senha, qtd_funcionarios):
super().__init__(nome, cpf, salario)
self._senha = senha
self._qtd_funcionarios = qtd_funcionarios
def get_bonificacao(self):
return super().get_bonificacao() + 1000
class ControleDeBonificacoes:
def __init__(self, total=0):
self.total = total
def registra(self, funcionario): ### hasattr(objeto, atributo) verifica se o objeto possui aquele atributo
if hasattr(funcionario, 'get_bonificacao'): ### se o attr está em __dict__ do objeto
self.total += funcionario.get_bonificacao()
else:
print(f'instância de {funcionario.__class__.__name__} não implementa o método get_bonificação')
@property
def total_bonificacoes(self):
return self.total
funcionario = Funcionario('Pedro', '111111111-11', 2000.0)
print(f'bonificação funcionário: {funcionario.get_bonificacao()}')
print(f"variaveis de funcionario: {vars(funcionario)}\n")
### vars() retorna __dict__ do objeto especificado
gerente = Gerente('Jose', '222222222-22', 5000.0, '1234', 0)
print(f'bonificação gerente: {gerente.get_bonificacao()}')
print(f"variaveis de gerente: {vars(gerente)}")
controle = ControleDeBonificacoes()
controle.registra(funcionario)
controle.registra(gerente)
print(f'Total: {controle.total_bonificacoes}')
class Cliente(object):
def __init__(self, nome, cpf, idade):
self.nome = nome
self.cpf = cpf
self.idade = idade
print()
cliente = Cliente('Maria', '33333333-33', '1234')
controle.registra(cliente)
var = [x for x in dir(funcionario)] ### dir() tenta retornar uma lista de atributos válidos do objeto
print(f"\nlista de atributos de Funcionario(superclasse): \n{str(var[0:9])[1:-1]}"
f"\n{str(var[9:19])[1:-1]}"
f"\n{str(var[19:])[1:-1]}") | Caelum_Classes.py | class Funcionario(object):
def __init__(self, nome, cpf, salario):
self._nome = nome
self._cpf = cpf
self._salario = salario
def get_bonificacao(self):
return self._salario * 0.10
class Gerente(Funcionario):
def __init__(self, nome, cpf, salario, senha, qtd_funcionarios):
super().__init__(nome, cpf, salario)
self._senha = senha
self._qtd_funcionarios = qtd_funcionarios
def get_bonificacao(self):
return super().get_bonificacao() + 1000
class ControleDeBonificacoes:
def __init__(self, total=0):
self.total = total
def registra(self, funcionario): ### hasattr(objeto, atributo) verifica se o objeto possui aquele atributo
if hasattr(funcionario, 'get_bonificacao'): ### se o attr está em __dict__ do objeto
self.total += funcionario.get_bonificacao()
else:
print(f'instância de {funcionario.__class__.__name__} não implementa o método get_bonificação')
@property
def total_bonificacoes(self):
return self.total
funcionario = Funcionario('Pedro', '111111111-11', 2000.0)
print(f'bonificação funcionário: {funcionario.get_bonificacao()}')
print(f"variaveis de funcionario: {vars(funcionario)}\n")
### vars() retorna __dict__ do objeto especificado
gerente = Gerente('Jose', '222222222-22', 5000.0, '1234', 0)
print(f'bonificação gerente: {gerente.get_bonificacao()}')
print(f"variaveis de gerente: {vars(gerente)}")
controle = ControleDeBonificacoes()
controle.registra(funcionario)
controle.registra(gerente)
print(f'Total: {controle.total_bonificacoes}')
class Cliente(object):
def __init__(self, nome, cpf, idade):
self.nome = nome
self.cpf = cpf
self.idade = idade
print()
cliente = Cliente('Maria', '33333333-33', '1234')
controle.registra(cliente)
var = [x for x in dir(funcionario)] ### dir() tenta retornar uma lista de atributos válidos do objeto
print(f"\nlista de atributos de Funcionario(superclasse): \n{str(var[0:9])[1:-1]}"
f"\n{str(var[9:19])[1:-1]}"
f"\n{str(var[19:])[1:-1]}") | 0.506591 | 0.1495 |
import sys
import serial
import serial.tools.list_ports
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtCore import QTimer
from ui_demo_1 import Ui_Form
from PyQt5.QtGui import QFont
from binascii import *
from crcmod import *
class Pyqt5_Serial(QtWidgets.QWidget, Ui_Form):
def __init__(self):
super(Pyqt5_Serial, self).__init__()
self.setupUi(self)
self.init()
self.setWindowTitle("串口小助手")
self.ser = serial.Serial()
self.port_check()
self.outdata = ''
self.ptz = ''
self.mode = ''
self.frame_lenth = 0
self.zoom = 0
self.video = 0
self.camera = 0
def init(self):
# 串口检测按钮
self.s1__box_1.clicked.connect(self.port_check)
# 串口信息显示
self.s1__box_2.currentTextChanged.connect(self.port_imf)
# 打开串口按钮
self.open_button.clicked.connect(self.port_open)
# 关闭串口按钮
self.close_button.clicked.connect(self.port_close)
# 定时发送数据
self.timer_send = QTimer()
self.timer_send.timeout.connect(self.data_send)
#放大
self.zoom_add.clicked.connect(self.zoom_up)
#缩小
self.zoom_sub.clicked.connect(self.zoom_down)
#停止
self.zoom_stop.clicked.connect(self.zoom_st)
#视频选择1
self.select1.clicked.connect(self.select_hdmi)
# 视频选择1
self.select2.clicked.connect(self.select_inf)
# 视频选择1
self.select3.clicked.connect(self.select_hdmi_inf)
# 视频选择1
self.select4.clicked.connect(self.select_inf_hdmi)
# 可见光拍照
self.k_photo.clicked.connect(self.camera_photo)
# 可见光录像开
self.k_rec1.clicked.connect(self.camera_recon)
# 可见光录像关
self.k_rec2.clicked.connect(self.camera_recoff)
# 红外拍照
self.h_photo.clicked.connect(self.inf_photo)
# 红外录像开
self.h_rec1.clicked.connect(self.inf_recon)
# 红外录像关
self.h_rec2.clicked.connect(self.inf_recoff)
def camera_photo(self):
self.camera = 17
def camera_recon(self):
self.camera = 18
def camera_recoff(self):
self.camera = 19
def inf_photo(self):
self.camera = 33
def inf_recon(self):
self.camera = 34
def inf_recoff(self):
self.camera = 35
def select_hdmi(self):
self.video = 1
def select_inf(self):
self.video = 2
def select_hdmi_inf(self):
self.video = 3
def select_inf_hdmi(self):
self.video = 4
# 放大
def zoom_up(self):
self.zoom = -10000
def zoom_down(self):
self.zoom = 10000
def zoom_st(self):
self.zoom = 0
# 串口检测
def port_check(self):
# 检测所有存在的串口,将信息存储在字典中
self.Com_Dict = {}
port_list = list(serial.tools.list_ports.comports())
self.s1__box_2.clear()
for port in port_list:
self.Com_Dict["%s" % port[0]] = "%s" % port[1]
self.s1__box_2.addItem(port[0])
if len(self.Com_Dict) == 0:
self.state_label.setText(" 无串口")
# 串口信息
def port_imf(self):
# 显示选定的串口的详细信息
imf_s = self.s1__box_2.currentText()
if imf_s != "":
self.state_label.setText(self.Com_Dict[self.s1__box_2.currentText()])
# 打开串口
def port_open(self):
self.ser.port = self.s1__box_2.currentText()
self.ser.baudrate = int(self.s1__box_3.currentText())
self.ser.bytesize = 8
self.ser.stopbits = 1
try:
self.ser.open()
except:
QMessageBox.critical(self, "Port Error", "此串口不能被打开!")
return None
# 打开串口接收定时器,周期为2ms
# self.timer.start(20)
self.timer_send.start(1000)
if self.ser.isOpen():
self.open_button.setEnabled(False)
self.close_button.setEnabled(True)
# 关闭串口
def port_close(self):
self.timer_send.stop()
try:
self.ser.close()
except:
pass
self.open_button.setEnabled(True)
self.close_button.setEnabled(False)
# 发送数据
def data_send(self):
if self.ser.isOpen():
pitch_value=self.silder1.value()
if pitch_value < 0 :
pitch_value = 65536 + pitch_value
pitch_value_H = pitch_value //256
pitch_value_L = pitch_value %256
hex_pitch_H = '{:02X}'.format(pitch_value_H)
hex_pitch_L = '{:02X}'.format(pitch_value_L)
roll_value = self.silder2.value()
if roll_value < 0 :
roll_value = roll_value + 65536
roll_value_H = roll_value // 256
roll_value_L = roll_value % 256
hex_roll_H = '{:02X}'.format(roll_value_H)
hex_roll_L = '{:02X}'.format(roll_value_L)
yaw_value = self.silder3.value()
if yaw_value < 0:
yaw_value = yaw_value + 65536
yaw_value_H = yaw_value // 256
yaw_value_L = yaw_value % 256
hex_yaw_H = '{:02X}'.format(yaw_value_H)
hex_yaw_L = '{:02X}'.format(yaw_value_L)
zoom_value = self.zoom
if zoom_value < 0 :
zoom_value = zoom_value + 65536
zoom_value_H = zoom_value // 256
zoom_value_L = zoom_value % 256
hex_zoom_H = '{:02X}'.format(zoom_value_H)
hex_zoom_L = '{:02X}'.format(zoom_value_L)
self.frame_lenth = 64
hex_frame = '{:02X}'.format(self.frame_lenth)
output_s = 'AB0E' + hex_frame + '013200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000B54008'+hex_pitch_L +hex_pitch_H +hex_roll_L +hex_roll_H +hex_yaw_L +hex_yaw_H + hex_zoom_L +hex_zoom_H
# print(output_s)
if self.video != 0 :
self.frame_lenth = 67
hex_frame = '{:02X}'.format(self.frame_lenth)
hex_video = '{:02X}'.format(self.video)
output_s = 'AB0E' + hex_frame + '013200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000B54008' + hex_pitch_L + hex_pitch_H + hex_roll_L + hex_roll_H + hex_yaw_L + hex_yaw_H + hex_zoom_L + hex_zoom_H + '7001' + hex_video
self.video = 0
if self.camera != 0 :
self.frame_lenth = 67
hex_frame = '{:02X}'.format(self.frame_lenth)
hex_camera = '{:02X}'.format(self.camera)
output_s = 'AB0E' + hex_frame + '013200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000B54008' + hex_pitch_L + hex_pitch_H + hex_roll_L + hex_roll_H + hex_yaw_L + hex_yaw_H + hex_zoom_L + hex_zoom_H + '7101' + hex_camera
self.camera = 0
input_s = output_s
crc16 = crcmod.mkCrcFun(0x18005, rev=True, initCrc=0xFFFF, xorOut=0x0000)
data = input_s.replace(" ", "")
readcrcout = hex(crc16(unhexlify(data))).upper()
str_list = list(readcrcout)
if len(str_list) == 5:
str_list.insert(2, '0') # 位数不足补0
crc_data = "".join(str_list)
input_s= input_s.strip() + crc_data[4:] + crc_data[2:4]
if input_s != "":
send_list = []
while input_s != '':
try:
num = int(input_s[0:2], 16)
except ValueError:
QMessageBox.critical(self, 'wrong data', '请输入十六进制数据,以空格分开!')
return None
input_s = input_s[2:].strip()
send_list.append(num)
input_s = bytes(send_list)
self.ser.write(input_s)
else:
pass
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
myshow = Pyqt5_Serial()
myshow.show()
sys.exit(app.exec_()) | main.py | import sys
import serial
import serial.tools.list_ports
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtCore import QTimer
from ui_demo_1 import Ui_Form
from PyQt5.QtGui import QFont
from binascii import *
from crcmod import *
class Pyqt5_Serial(QtWidgets.QWidget, Ui_Form):
def __init__(self):
super(Pyqt5_Serial, self).__init__()
self.setupUi(self)
self.init()
self.setWindowTitle("串口小助手")
self.ser = serial.Serial()
self.port_check()
self.outdata = ''
self.ptz = ''
self.mode = ''
self.frame_lenth = 0
self.zoom = 0
self.video = 0
self.camera = 0
def init(self):
# 串口检测按钮
self.s1__box_1.clicked.connect(self.port_check)
# 串口信息显示
self.s1__box_2.currentTextChanged.connect(self.port_imf)
# 打开串口按钮
self.open_button.clicked.connect(self.port_open)
# 关闭串口按钮
self.close_button.clicked.connect(self.port_close)
# 定时发送数据
self.timer_send = QTimer()
self.timer_send.timeout.connect(self.data_send)
#放大
self.zoom_add.clicked.connect(self.zoom_up)
#缩小
self.zoom_sub.clicked.connect(self.zoom_down)
#停止
self.zoom_stop.clicked.connect(self.zoom_st)
#视频选择1
self.select1.clicked.connect(self.select_hdmi)
# 视频选择1
self.select2.clicked.connect(self.select_inf)
# 视频选择1
self.select3.clicked.connect(self.select_hdmi_inf)
# 视频选择1
self.select4.clicked.connect(self.select_inf_hdmi)
# 可见光拍照
self.k_photo.clicked.connect(self.camera_photo)
# 可见光录像开
self.k_rec1.clicked.connect(self.camera_recon)
# 可见光录像关
self.k_rec2.clicked.connect(self.camera_recoff)
# 红外拍照
self.h_photo.clicked.connect(self.inf_photo)
# 红外录像开
self.h_rec1.clicked.connect(self.inf_recon)
# 红外录像关
self.h_rec2.clicked.connect(self.inf_recoff)
def camera_photo(self):
self.camera = 17
def camera_recon(self):
self.camera = 18
def camera_recoff(self):
self.camera = 19
def inf_photo(self):
self.camera = 33
def inf_recon(self):
self.camera = 34
def inf_recoff(self):
self.camera = 35
def select_hdmi(self):
self.video = 1
def select_inf(self):
self.video = 2
def select_hdmi_inf(self):
self.video = 3
def select_inf_hdmi(self):
self.video = 4
# 放大
def zoom_up(self):
self.zoom = -10000
def zoom_down(self):
self.zoom = 10000
def zoom_st(self):
self.zoom = 0
# 串口检测
def port_check(self):
# 检测所有存在的串口,将信息存储在字典中
self.Com_Dict = {}
port_list = list(serial.tools.list_ports.comports())
self.s1__box_2.clear()
for port in port_list:
self.Com_Dict["%s" % port[0]] = "%s" % port[1]
self.s1__box_2.addItem(port[0])
if len(self.Com_Dict) == 0:
self.state_label.setText(" 无串口")
# 串口信息
def port_imf(self):
# 显示选定的串口的详细信息
imf_s = self.s1__box_2.currentText()
if imf_s != "":
self.state_label.setText(self.Com_Dict[self.s1__box_2.currentText()])
# 打开串口
def port_open(self):
self.ser.port = self.s1__box_2.currentText()
self.ser.baudrate = int(self.s1__box_3.currentText())
self.ser.bytesize = 8
self.ser.stopbits = 1
try:
self.ser.open()
except:
QMessageBox.critical(self, "Port Error", "此串口不能被打开!")
return None
# 打开串口接收定时器,周期为2ms
# self.timer.start(20)
self.timer_send.start(1000)
if self.ser.isOpen():
self.open_button.setEnabled(False)
self.close_button.setEnabled(True)
# 关闭串口
def port_close(self):
self.timer_send.stop()
try:
self.ser.close()
except:
pass
self.open_button.setEnabled(True)
self.close_button.setEnabled(False)
# 发送数据
def data_send(self):
if self.ser.isOpen():
pitch_value=self.silder1.value()
if pitch_value < 0 :
pitch_value = 65536 + pitch_value
pitch_value_H = pitch_value //256
pitch_value_L = pitch_value %256
hex_pitch_H = '{:02X}'.format(pitch_value_H)
hex_pitch_L = '{:02X}'.format(pitch_value_L)
roll_value = self.silder2.value()
if roll_value < 0 :
roll_value = roll_value + 65536
roll_value_H = roll_value // 256
roll_value_L = roll_value % 256
hex_roll_H = '{:02X}'.format(roll_value_H)
hex_roll_L = '{:02X}'.format(roll_value_L)
yaw_value = self.silder3.value()
if yaw_value < 0:
yaw_value = yaw_value + 65536
yaw_value_H = yaw_value // 256
yaw_value_L = yaw_value % 256
hex_yaw_H = '{:02X}'.format(yaw_value_H)
hex_yaw_L = '{:02X}'.format(yaw_value_L)
zoom_value = self.zoom
if zoom_value < 0 :
zoom_value = zoom_value + 65536
zoom_value_H = zoom_value // 256
zoom_value_L = zoom_value % 256
hex_zoom_H = '{:02X}'.format(zoom_value_H)
hex_zoom_L = '{:02X}'.format(zoom_value_L)
self.frame_lenth = 64
hex_frame = '{:02X}'.format(self.frame_lenth)
output_s = 'AB0E' + hex_frame + '013200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000B54008'+hex_pitch_L +hex_pitch_H +hex_roll_L +hex_roll_H +hex_yaw_L +hex_yaw_H + hex_zoom_L +hex_zoom_H
# print(output_s)
if self.video != 0 :
self.frame_lenth = 67
hex_frame = '{:02X}'.format(self.frame_lenth)
hex_video = '{:02X}'.format(self.video)
output_s = 'AB0E' + hex_frame + '013200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000B54008' + hex_pitch_L + hex_pitch_H + hex_roll_L + hex_roll_H + hex_yaw_L + hex_yaw_H + hex_zoom_L + hex_zoom_H + '7001' + hex_video
self.video = 0
if self.camera != 0 :
self.frame_lenth = 67
hex_frame = '{:02X}'.format(self.frame_lenth)
hex_camera = '{:02X}'.format(self.camera)
output_s = 'AB0E' + hex_frame + '013200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000B54008' + hex_pitch_L + hex_pitch_H + hex_roll_L + hex_roll_H + hex_yaw_L + hex_yaw_H + hex_zoom_L + hex_zoom_H + '7101' + hex_camera
self.camera = 0
input_s = output_s
crc16 = crcmod.mkCrcFun(0x18005, rev=True, initCrc=0xFFFF, xorOut=0x0000)
data = input_s.replace(" ", "")
readcrcout = hex(crc16(unhexlify(data))).upper()
str_list = list(readcrcout)
if len(str_list) == 5:
str_list.insert(2, '0') # 位数不足补0
crc_data = "".join(str_list)
input_s= input_s.strip() + crc_data[4:] + crc_data[2:4]
if input_s != "":
send_list = []
while input_s != '':
try:
num = int(input_s[0:2], 16)
except ValueError:
QMessageBox.critical(self, 'wrong data', '请输入十六进制数据,以空格分开!')
return None
input_s = input_s[2:].strip()
send_list.append(num)
input_s = bytes(send_list)
self.ser.write(input_s)
else:
pass
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
myshow = Pyqt5_Serial()
myshow.show()
sys.exit(app.exec_()) | 0.243822 | 0.080466 |
import sys, time, gpiod
from argparse import *
class buttonpress:
def __init__(self, chip_button, chip_led):
"""Handle possibility of button and led on different GPIO chips
"""
self.chip_button = gpiod.Chip(chip_button, gpiod.Chip.OPEN_BY_PATH)
if chip_led != chip_button:
self.chip_led = gpiod.Chip(chip_led, gpiod.Chip.OPEN_BY_PATH)
else:
self.chip_led = self.chip_button
def main(self, button, led):
"""Print edge events for 10 seconds.
"""
print("Button name: %s, label: %s, lines: %d" % (self.chip_button.name(), self.chip_button.label(), self.chip_button.num_lines()))
print("LED name: %s, label: %s, lines: %d" % (self.chip_led.name(), self.chip_led.label(), self.chip_led.num_lines()))
button_line = self.chip_button.get_line(button)
button_line.request(consumer=sys.argv[0][:-3], type=gpiod.LINE_REQ_EV_BOTH_EDGES)
if led:
led_line = self.chip_led.get_line(led)
led_line.request(consumer=sys.argv[0][:-3], type=gpiod.LINE_REQ_DIR_OUT)
else:
led_line = None
print("Press and release button, timeout in 10 seconds after last press\n")
while button_line.event_wait(sec=10):
event = button_line.event_read()
if event.type == gpiod.LineEvent.RISING_EDGE:
print("Rising edge timestamp %s" % time.strftime('%m/%d/%Y %H:%M:%S', time.localtime(event.sec)))
elif event.type == gpiod.LineEvent.FALLING_EDGE:
print("Falling edge timestamp %s" % time.strftime('%m/%d/%Y %H:%M:%S', time.localtime(event.sec)))
else:
raise TypeError('Invalid event type')
# If led arg passed then turn on and off based on event type
if led_line:
if event.type == gpiod.LineEvent.RISING_EDGE:
led_line.set_value(1)
else:
led_line.set_value(0)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--chip_button", help="GPIO chip name (default '/dev/gpiochip1')", type=str, default="/dev/gpiochip1")
parser.add_argument("--button", help="GPIO line number (default 3 button on NanoPi Duo)", type=int, default=3)
parser.add_argument("--chip_led", help="GPIO chip name (default '/dev/gpiochip0')", type=str, default="/dev/gpiochip0")
parser.add_argument("--led", help="GPIO line number", type=int)
args = parser.parse_args()
obj = buttonpress(args.chip_button, args.chip_led)
obj.main(args.button, args.led) | libgpiod/python/src/buttonpress.py | import sys, time, gpiod
from argparse import *
class buttonpress:
def __init__(self, chip_button, chip_led):
"""Handle possibility of button and led on different GPIO chips
"""
self.chip_button = gpiod.Chip(chip_button, gpiod.Chip.OPEN_BY_PATH)
if chip_led != chip_button:
self.chip_led = gpiod.Chip(chip_led, gpiod.Chip.OPEN_BY_PATH)
else:
self.chip_led = self.chip_button
def main(self, button, led):
"""Print edge events for 10 seconds.
"""
print("Button name: %s, label: %s, lines: %d" % (self.chip_button.name(), self.chip_button.label(), self.chip_button.num_lines()))
print("LED name: %s, label: %s, lines: %d" % (self.chip_led.name(), self.chip_led.label(), self.chip_led.num_lines()))
button_line = self.chip_button.get_line(button)
button_line.request(consumer=sys.argv[0][:-3], type=gpiod.LINE_REQ_EV_BOTH_EDGES)
if led:
led_line = self.chip_led.get_line(led)
led_line.request(consumer=sys.argv[0][:-3], type=gpiod.LINE_REQ_DIR_OUT)
else:
led_line = None
print("Press and release button, timeout in 10 seconds after last press\n")
while button_line.event_wait(sec=10):
event = button_line.event_read()
if event.type == gpiod.LineEvent.RISING_EDGE:
print("Rising edge timestamp %s" % time.strftime('%m/%d/%Y %H:%M:%S', time.localtime(event.sec)))
elif event.type == gpiod.LineEvent.FALLING_EDGE:
print("Falling edge timestamp %s" % time.strftime('%m/%d/%Y %H:%M:%S', time.localtime(event.sec)))
else:
raise TypeError('Invalid event type')
# If led arg passed then turn on and off based on event type
if led_line:
if event.type == gpiod.LineEvent.RISING_EDGE:
led_line.set_value(1)
else:
led_line.set_value(0)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--chip_button", help="GPIO chip name (default '/dev/gpiochip1')", type=str, default="/dev/gpiochip1")
parser.add_argument("--button", help="GPIO line number (default 3 button on NanoPi Duo)", type=int, default=3)
parser.add_argument("--chip_led", help="GPIO chip name (default '/dev/gpiochip0')", type=str, default="/dev/gpiochip0")
parser.add_argument("--led", help="GPIO line number", type=int)
args = parser.parse_args()
obj = buttonpress(args.chip_button, args.chip_led)
obj.main(args.button, args.led) | 0.250821 | 0.102934 |
import os
import logging
import threading
import yaml
from inotify_simple import INotify, flags
from ambianic.config_mgm.config_diff import Config
from ambianic.config_mgm import fileutils
log = logging.getLogger(__name__)
class ConfigurationManager:
"""Configuration manager handles configuration centrally and
notify via callbacks of changes
"""
def __init__(self, work_dir=None, config=None):
self.Config = Config
self.CONFIG_FILE = "config.yaml"
self.SECRETS_FILE = "secrets.yaml"
self.lock = threading.RLock()
self.__config = None
self.watch_thread = None
self.watch_event = threading.Event()
self.handlers = []
if config is not None:
self.set(config)
if work_dir is not None:
self.load(work_dir)
def stop(self):
"""Stop the config manager"""
self.handlers = []
with self.lock:
self.__config = None
self.watch_stop()
if self.watch_thread is not None:
self.watch_thread.join()
self.watch_thread = None
def register_handler(self, callback):
"""Register a callback to trigger when there is a configuration update"""
self.handlers.append(callback)
def unregister_handler(self, callback):
"""Remove a callback from the configuration updates handlers"""
self.handlers.remove(callback)
def __watcher(self):
"""Watch for file changes"""
inotify = INotify()
wd = inotify.add_watch(self.work_dir, flags.MODIFY)
while not self.watch_event.is_set():
for event in inotify.read(timeout=100, read_delay=100):
for filename in [self.CONFIG_FILE, self.SECRETS_FILE]:
if event.name == filename:
log.info("File change detected: %s", filename)
self.load(self.work_dir)
break
# stop watching
inotify.rm_watch(wd)
def watch_start(self):
"""Start watching fs for changes"""
if self.watch_thread is None:
self.watch_event.clear()
self.watch_thread = threading.Thread(target=self.__watcher)
self.watch_thread.start()
def watch_stop(self):
"""Stop watching fs for changes"""
self.watch_event.set()
def save(self):
"""Save configuration to file"""
if self.get() is None:
return
fileutils.save(self.get_config_file(), self.get())
def get_config_file(self) -> str:
"""Return the config file path"""
return os.path.join(self.work_dir, self.CONFIG_FILE)
def get_secrets_file(self) -> str:
"""Return the secrets file path"""
return os.path.join(self.work_dir, self.SECRETS_FILE)
def load(self, work_dir) -> Config:
"""Load configuration from file"""
assert os.path.exists(work_dir), \
'working directory invalid: {}'.format(work_dir)
self.work_dir = work_dir
self.watch_start()
secrets_file = self.get_secrets_file()
config_file = self.get_config_file()
try:
if os.path.isfile(secrets_file):
with open(secrets_file) as sf:
secrets_config = sf.read()
else:
secrets_config = ""
log.warning('Secrets file not found. '
'Proceeding without it: %s',
secrets_file)
with open(config_file) as cf:
base_config = cf.read()
all_config = secrets_config + "\n" + base_config
config = yaml.safe_load(all_config)
log.debug('loaded config from %r: %r',
self.CONFIG_FILE, config)
return self.set(config)
except FileNotFoundError:
log.warning('Configuration file not found: %s', config_file)
log.warning(
'Please provide a configuration file and restart.')
except Exception as e:
log.exception('Configuration Error!', exc_info=True)
return None
def get_sources(self) -> Config:
"""Return sources configuration"""
config = self.get()
if config is None:
return None
if config.get("sources", None) is None:
config.set("sources", {})
return config.get("sources", None)
def get_source(self, source: str) -> Config:
"""Return a source by name"""
sources = self.get_sources()
if sources is None:
return None
return sources.get(source, None)
def get_ai_models(self) -> Config:
"""Return ai_models configuration"""
config = self.get()
if config is None:
return None
if config.get("ai_models", None) is None:
config.set("ai_models", {})
return config.get("ai_models", None)
def get_ai_model(self, ai_model: str) -> Config:
"""Return an ai_model by name"""
ai_models = self.get_ai_models()
if ai_models is None:
return None
return ai_models.get(ai_model, None)
def get_pipelines(self) -> Config:
"""Return ai_models configuration"""
config = self.get()
return config.get("pipelines", None)
def get_pipeline(self, name) -> Config:
"""Return pipeline configuration"""
pipelines = self.get_pipelines()
return pipelines.get(name, None)
def get_data_dir(self) -> Config:
"""Return data_dir configuration"""
config = self.get()
return config.get("data_dir", None)
def get(self) -> Config:
"""Get stored configuration.
Parameters
----------
Returns
-------
dictionary
Returns a dictionary with current configurations.
"""
with self.lock:
return self.__config
def set(self, new_config: dict) -> Config:
"""Set configuration
:Parameters:
----------
new_config : dictionary
The new configurations to apply
:Returns:
-------
config: dictionary
Return the current configurations.
"""
with self.lock:
if self.__config is None:
self.__config = Config(new_config)
else:
self.__config.sync(new_config)
for handler in self.handlers:
handler(self.get())
return self.get() | src/ambianic/config_mgm/configuration_manager.py |
import os
import logging
import threading
import yaml
from inotify_simple import INotify, flags
from ambianic.config_mgm.config_diff import Config
from ambianic.config_mgm import fileutils
log = logging.getLogger(__name__)
class ConfigurationManager:
"""Configuration manager handles configuration centrally and
notify via callbacks of changes
"""
def __init__(self, work_dir=None, config=None):
self.Config = Config
self.CONFIG_FILE = "config.yaml"
self.SECRETS_FILE = "secrets.yaml"
self.lock = threading.RLock()
self.__config = None
self.watch_thread = None
self.watch_event = threading.Event()
self.handlers = []
if config is not None:
self.set(config)
if work_dir is not None:
self.load(work_dir)
def stop(self):
"""Stop the config manager"""
self.handlers = []
with self.lock:
self.__config = None
self.watch_stop()
if self.watch_thread is not None:
self.watch_thread.join()
self.watch_thread = None
def register_handler(self, callback):
"""Register a callback to trigger when there is a configuration update"""
self.handlers.append(callback)
def unregister_handler(self, callback):
"""Remove a callback from the configuration updates handlers"""
self.handlers.remove(callback)
def __watcher(self):
"""Watch for file changes"""
inotify = INotify()
wd = inotify.add_watch(self.work_dir, flags.MODIFY)
while not self.watch_event.is_set():
for event in inotify.read(timeout=100, read_delay=100):
for filename in [self.CONFIG_FILE, self.SECRETS_FILE]:
if event.name == filename:
log.info("File change detected: %s", filename)
self.load(self.work_dir)
break
# stop watching
inotify.rm_watch(wd)
def watch_start(self):
"""Start watching fs for changes"""
if self.watch_thread is None:
self.watch_event.clear()
self.watch_thread = threading.Thread(target=self.__watcher)
self.watch_thread.start()
def watch_stop(self):
"""Stop watching fs for changes"""
self.watch_event.set()
def save(self):
"""Save configuration to file"""
if self.get() is None:
return
fileutils.save(self.get_config_file(), self.get())
def get_config_file(self) -> str:
"""Return the config file path"""
return os.path.join(self.work_dir, self.CONFIG_FILE)
def get_secrets_file(self) -> str:
"""Return the secrets file path"""
return os.path.join(self.work_dir, self.SECRETS_FILE)
def load(self, work_dir) -> Config:
"""Load configuration from file"""
assert os.path.exists(work_dir), \
'working directory invalid: {}'.format(work_dir)
self.work_dir = work_dir
self.watch_start()
secrets_file = self.get_secrets_file()
config_file = self.get_config_file()
try:
if os.path.isfile(secrets_file):
with open(secrets_file) as sf:
secrets_config = sf.read()
else:
secrets_config = ""
log.warning('Secrets file not found. '
'Proceeding without it: %s',
secrets_file)
with open(config_file) as cf:
base_config = cf.read()
all_config = secrets_config + "\n" + base_config
config = yaml.safe_load(all_config)
log.debug('loaded config from %r: %r',
self.CONFIG_FILE, config)
return self.set(config)
except FileNotFoundError:
log.warning('Configuration file not found: %s', config_file)
log.warning(
'Please provide a configuration file and restart.')
except Exception as e:
log.exception('Configuration Error!', exc_info=True)
return None
def get_sources(self) -> Config:
"""Return sources configuration"""
config = self.get()
if config is None:
return None
if config.get("sources", None) is None:
config.set("sources", {})
return config.get("sources", None)
def get_source(self, source: str) -> Config:
"""Return a source by name"""
sources = self.get_sources()
if sources is None:
return None
return sources.get(source, None)
def get_ai_models(self) -> Config:
"""Return ai_models configuration"""
config = self.get()
if config is None:
return None
if config.get("ai_models", None) is None:
config.set("ai_models", {})
return config.get("ai_models", None)
def get_ai_model(self, ai_model: str) -> Config:
"""Return an ai_model by name"""
ai_models = self.get_ai_models()
if ai_models is None:
return None
return ai_models.get(ai_model, None)
def get_pipelines(self) -> Config:
"""Return ai_models configuration"""
config = self.get()
return config.get("pipelines", None)
def get_pipeline(self, name) -> Config:
"""Return pipeline configuration"""
pipelines = self.get_pipelines()
return pipelines.get(name, None)
def get_data_dir(self) -> Config:
"""Return data_dir configuration"""
config = self.get()
return config.get("data_dir", None)
def get(self) -> Config:
"""Get stored configuration.
Parameters
----------
Returns
-------
dictionary
Returns a dictionary with current configurations.
"""
with self.lock:
return self.__config
def set(self, new_config: dict) -> Config:
"""Set configuration
:Parameters:
----------
new_config : dictionary
The new configurations to apply
:Returns:
-------
config: dictionary
Return the current configurations.
"""
with self.lock:
if self.__config is None:
self.__config = Config(new_config)
else:
self.__config.sync(new_config)
for handler in self.handlers:
handler(self.get())
return self.get() | 0.604983 | 0.076477 |
import os
import zipfile
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
from tensorflow.keras import Model
import matplotlib.pyplot as plt
from keras.applications.vgg16 import VGG16
batch_size = 32
img_height = 180
img_width = 180
#Update data root to point at valence dataset
data_root="/Users/sallyann/Documents/Fall 2020/Capstone/aff_wild_annotations_bboxes_landmarks_new/videos/dataset_valence/"
#get raw data into dataset object to train model
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_root+'train',
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
#gets raw data for validation dataset (gives unbiased estimate of the the final model)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_root+'train',
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
#gets raw data for testing model
test_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_root+'train',
seed=123,
image_size=(img_height, img_width),
batch_size = 200
)
#gets three classs of valence (high,low,neutral)
class_names = train_ds.class_names
print(class_names)
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
#mac doesn't use certificates need a work around to use VGG
model = VGG16()
model.compile(
optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_ds,
validation_data=val_ds,
epochs=5
)
#result = model.evaluate(test_ds)
#dict(zip(model.metrics_names, result))
test_images = []
test_labels = []
predictions = []
for image, label in test_ds.take(1):
test_images.append(image.numpy())
test_labels.append(label.numpy())
predictions.append(np.argmax(model.predict(test_images), axis=1))
test_labels = np.array(test_labels)
predictions = np.array(predictions)
y_true = test_labels
test_acc = sum(predictions[0] == y_true[0]) / len(y_true[0])
print(f'Test set accuracy: {test_acc:.0%}')
print(class_names)
confusion_mtx = tf.math.confusion_matrix(y_true[0], predictions[0])
plt.figure(figsize=(10, 8))
sns.heatmap(confusion_mtx, yticklabels = class_names, xticklabels = class_names,annot=True, fmt='g')
plt.xlabel('Prediction')
plt.ylabel('Label')
plt.show() | VGG_Classification.py | import os
import zipfile
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
from tensorflow.keras import Model
import matplotlib.pyplot as plt
from keras.applications.vgg16 import VGG16
batch_size = 32
img_height = 180
img_width = 180
#Update data root to point at valence dataset
data_root="/Users/sallyann/Documents/Fall 2020/Capstone/aff_wild_annotations_bboxes_landmarks_new/videos/dataset_valence/"
#get raw data into dataset object to train model
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_root+'train',
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
#gets raw data for validation dataset (gives unbiased estimate of the the final model)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_root+'train',
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
#gets raw data for testing model
test_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_root+'train',
seed=123,
image_size=(img_height, img_width),
batch_size = 200
)
#gets three classs of valence (high,low,neutral)
class_names = train_ds.class_names
print(class_names)
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
#mac doesn't use certificates need a work around to use VGG
model = VGG16()
model.compile(
optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_ds,
validation_data=val_ds,
epochs=5
)
#result = model.evaluate(test_ds)
#dict(zip(model.metrics_names, result))
test_images = []
test_labels = []
predictions = []
for image, label in test_ds.take(1):
test_images.append(image.numpy())
test_labels.append(label.numpy())
predictions.append(np.argmax(model.predict(test_images), axis=1))
test_labels = np.array(test_labels)
predictions = np.array(predictions)
y_true = test_labels
test_acc = sum(predictions[0] == y_true[0]) / len(y_true[0])
print(f'Test set accuracy: {test_acc:.0%}')
print(class_names)
confusion_mtx = tf.math.confusion_matrix(y_true[0], predictions[0])
plt.figure(figsize=(10, 8))
sns.heatmap(confusion_mtx, yticklabels = class_names, xticklabels = class_names,annot=True, fmt='g')
plt.xlabel('Prediction')
plt.ylabel('Label')
plt.show() | 0.573201 | 0.504822 |
import concurrent.futures
import cv2
import os
import shutil
import imaging_db.filestorage.data_storage as data_storage
class LocalStorage(data_storage.DataStorage):
"""Class for handling image and file transfers to local storage"""
def __init__(self,
storage_dir,
nbr_workers=None,
access_point=None):
"""
Local storage is assumed to be mounted at STORAGE_MOUNT_POINT
unless otherwise specified.
Main directories for both S3 and local storage are
raw_frames: For datasets that have been parsed into individual
2D frames with indices channels, timepoints, slices and positions.
raw_files: For files that have not been separated into frames + metadata.
They're copied to storage as is.
:param str storage_dir: Directory name (dataset ID) in raw_frames or
raw_files (e.g. raw_frames/ID-YYYY-MM-DD-HH-MM-SS-SSSS)
:param int nbr_workers: Number of workers for uploads/downloads
:param str/None access_point: Path to where local storage is mounted.
Default mount point: /Volumes/data_lg/czbiohub-imaging
"""
super().__init__(storage_dir=storage_dir,
nbr_workers=nbr_workers,
access_point=access_point)
if self.access_point is None:
self.mount_point = data_storage.STORAGE_MOUNT_POINT
else:
self.mount_point = self.access_point
assert os.path.exists(self.mount_point),\
"Make sure local storage is mounted, dir {} doesn't exist".format(
self.mount_point,
)
# Path to dataset ID directory in storage
# mount point + raw files/frames + dataset ID
self.id_storage_path = os.path.join(self.mount_point, self.storage_dir)
def assert_unique_id(self):
"""
Makes sure directory with dataset ID doesn't already exist in storage
:raise AssertionError: if directory exists
"""
assert os.path.exists(self.id_storage_path) is False,\
"ID {} already exists in storage".format(self.id_storage_path)
def nonexistent_storage_path(self, storage_path):
"""
Checks that a given path to a file in storage doesn't already exist.
:param str storage_path: Path in local storage
:return bool: True if file doesn't exist in storage, False otherwise
"""
dir_path = os.path.join(self.mount_point, storage_path)
if not os.path.exists(dir_path):
return True
else:
return False
def get_storage_path(self, file_name):
"""
Given a file name without path, return full storage path,
given mount point and storage directory.
:param str file_name: File name with extension, no path
:return str storage_path: Full path to file in storage
"""
storage_path = os.path.join(
self.id_storage_path,
file_name,
)
return storage_path
def upload_frames(self, file_names, im_stack, file_format=".png"):
"""
Writes all frames to storage using threading or multiprocessing
:param list file_names: Image file names (str), with extension, no path
:param np.array im_stack: all 2D frames from file converted to stack
:param str file_format: file format for frames to be written in storage
"""
# Create directory if it doesn't exist already
os.makedirs(self.id_storage_path, exist_ok=True)
# Make sure number of file names matches stack shape
assert len(file_names) == im_stack.shape[-1], \
"Number of file names {} doesn't match frames {}".format(
len(file_names), im_stack.shape[-1])
path_im_tuples = []
for i, file_name in enumerate(file_names):
storage_path = self.get_storage_path(file_name)
path_im_tuples.append((storage_path, im_stack[..., i]))
with concurrent.futures.ProcessPoolExecutor(self.nbr_workers) as ex:
ex.map(self.upload_im_tuple, path_im_tuples)
def upload_im_tuple(self, path_im_tuple):
"""
Save image to storage after checking that the path to file doesn't
already exist in storage.
:param tuple path_im_tuple: (File name str and image np.array)
"""
(im_path, im) = path_im_tuple
if self.nonexistent_storage_path(im_path):
os.makedirs(self.id_storage_path, exist_ok=True)
cv2.imwrite(im_path, im)
else:
print("File {} already exists.".format(im_path))
def upload_im(self, im_name, im, file_format='.png'):
"""
Save image to storage after checking that the path to file doesn't
already exist in storage.
:param str im_name: File name for image, with extension, no path
:param np.array im: 2D image
:param str file_format: File format for writing image
"""
im_path = self.get_storage_path(im_name)
if self.nonexistent_storage_path(im_path):
os.makedirs(self.id_storage_path, exist_ok=True)
cv2.imwrite(im_path, im)
else:
print("File {} already exists.".format(im_path))
def upload_file(self, file_path):
"""
Upload a single file to storage by copying (file is not opened).
:param str file_path: full path to local file to be moved to storage
"""
# ID should be unique, make sure it doesn't already exist
self.assert_unique_id()
# Create directory for file
os.makedirs(self.id_storage_path, exist_ok=True)
file_name = os.path.basename(file_path)
save_path = self.get_storage_path(file_name)
shutil.copy(file_path, save_path)
def get_im(self, file_name):
"""
Given file name, fetch 2D image (frame) from storage.
File name consists of raw_files/raw_frames + dataset ID +
image name (im_c***_z***_t***_p***.png)
:param str file_name: File name of 2D image (frame)
:return np.array im: 2D image
"""
im_path = self.get_storage_path(file_name)
im = cv2.imread(im_path, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)
return im
def download_file(self, file_name, dest_dir):
"""
Downloads/copies a single file from storage to local destination without
reading it.
:param str file_name: File name
:param str dest_dir: Destination directory name
"""
storage_path = self.get_storage_path(file_name)
dest_path = os.path.join(dest_dir, file_name)
shutil.copy(storage_path, dest_path) | imaging_db/filestorage/local_storage.py | import concurrent.futures
import cv2
import os
import shutil
import imaging_db.filestorage.data_storage as data_storage
class LocalStorage(data_storage.DataStorage):
"""Class for handling image and file transfers to local storage"""
def __init__(self,
storage_dir,
nbr_workers=None,
access_point=None):
"""
Local storage is assumed to be mounted at STORAGE_MOUNT_POINT
unless otherwise specified.
Main directories for both S3 and local storage are
raw_frames: For datasets that have been parsed into individual
2D frames with indices channels, timepoints, slices and positions.
raw_files: For files that have not been separated into frames + metadata.
They're copied to storage as is.
:param str storage_dir: Directory name (dataset ID) in raw_frames or
raw_files (e.g. raw_frames/ID-YYYY-MM-DD-HH-MM-SS-SSSS)
:param int nbr_workers: Number of workers for uploads/downloads
:param str/None access_point: Path to where local storage is mounted.
Default mount point: /Volumes/data_lg/czbiohub-imaging
"""
super().__init__(storage_dir=storage_dir,
nbr_workers=nbr_workers,
access_point=access_point)
if self.access_point is None:
self.mount_point = data_storage.STORAGE_MOUNT_POINT
else:
self.mount_point = self.access_point
assert os.path.exists(self.mount_point),\
"Make sure local storage is mounted, dir {} doesn't exist".format(
self.mount_point,
)
# Path to dataset ID directory in storage
# mount point + raw files/frames + dataset ID
self.id_storage_path = os.path.join(self.mount_point, self.storage_dir)
def assert_unique_id(self):
"""
Makes sure directory with dataset ID doesn't already exist in storage
:raise AssertionError: if directory exists
"""
assert os.path.exists(self.id_storage_path) is False,\
"ID {} already exists in storage".format(self.id_storage_path)
def nonexistent_storage_path(self, storage_path):
"""
Checks that a given path to a file in storage doesn't already exist.
:param str storage_path: Path in local storage
:return bool: True if file doesn't exist in storage, False otherwise
"""
dir_path = os.path.join(self.mount_point, storage_path)
if not os.path.exists(dir_path):
return True
else:
return False
def get_storage_path(self, file_name):
"""
Given a file name without path, return full storage path,
given mount point and storage directory.
:param str file_name: File name with extension, no path
:return str storage_path: Full path to file in storage
"""
storage_path = os.path.join(
self.id_storage_path,
file_name,
)
return storage_path
def upload_frames(self, file_names, im_stack, file_format=".png"):
"""
Writes all frames to storage using threading or multiprocessing
:param list file_names: Image file names (str), with extension, no path
:param np.array im_stack: all 2D frames from file converted to stack
:param str file_format: file format for frames to be written in storage
"""
# Create directory if it doesn't exist already
os.makedirs(self.id_storage_path, exist_ok=True)
# Make sure number of file names matches stack shape
assert len(file_names) == im_stack.shape[-1], \
"Number of file names {} doesn't match frames {}".format(
len(file_names), im_stack.shape[-1])
path_im_tuples = []
for i, file_name in enumerate(file_names):
storage_path = self.get_storage_path(file_name)
path_im_tuples.append((storage_path, im_stack[..., i]))
with concurrent.futures.ProcessPoolExecutor(self.nbr_workers) as ex:
ex.map(self.upload_im_tuple, path_im_tuples)
def upload_im_tuple(self, path_im_tuple):
"""
Save image to storage after checking that the path to file doesn't
already exist in storage.
:param tuple path_im_tuple: (File name str and image np.array)
"""
(im_path, im) = path_im_tuple
if self.nonexistent_storage_path(im_path):
os.makedirs(self.id_storage_path, exist_ok=True)
cv2.imwrite(im_path, im)
else:
print("File {} already exists.".format(im_path))
def upload_im(self, im_name, im, file_format='.png'):
"""
Save image to storage after checking that the path to file doesn't
already exist in storage.
:param str im_name: File name for image, with extension, no path
:param np.array im: 2D image
:param str file_format: File format for writing image
"""
im_path = self.get_storage_path(im_name)
if self.nonexistent_storage_path(im_path):
os.makedirs(self.id_storage_path, exist_ok=True)
cv2.imwrite(im_path, im)
else:
print("File {} already exists.".format(im_path))
def upload_file(self, file_path):
"""
Upload a single file to storage by copying (file is not opened).
:param str file_path: full path to local file to be moved to storage
"""
# ID should be unique, make sure it doesn't already exist
self.assert_unique_id()
# Create directory for file
os.makedirs(self.id_storage_path, exist_ok=True)
file_name = os.path.basename(file_path)
save_path = self.get_storage_path(file_name)
shutil.copy(file_path, save_path)
def get_im(self, file_name):
"""
Given file name, fetch 2D image (frame) from storage.
File name consists of raw_files/raw_frames + dataset ID +
image name (im_c***_z***_t***_p***.png)
:param str file_name: File name of 2D image (frame)
:return np.array im: 2D image
"""
im_path = self.get_storage_path(file_name)
im = cv2.imread(im_path, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)
return im
def download_file(self, file_name, dest_dir):
"""
Downloads/copies a single file from storage to local destination without
reading it.
:param str file_name: File name
:param str dest_dir: Destination directory name
"""
storage_path = self.get_storage_path(file_name)
dest_path = os.path.join(dest_dir, file_name)
shutil.copy(storage_path, dest_path) | 0.68342 | 0.326164 |
import hashlib
from os import makedirs, symlink
from shutil import rmtree
from os.path import join, basename
from unittest.mock import patch
from egcg_core import util
from tests import TestEGCG
fastq_dir = join(TestEGCG.assets_path, 'fastqs')
def test_find_files():
expected = [join(TestEGCG.assets_path, f) for f in ('ftest.txt', 'ftest_2.txt')]
assert util.find_files(TestEGCG.assets_path, 'ftest*.txt') == expected
@patch('logging.Logger.warning')
def test_find_file(mocked_log):
assert util.find_file(TestEGCG.assets_path, 'ftest.txt') == join(TestEGCG.assets_path, 'ftest.txt')
assert util.find_file(TestEGCG.assets_path, 'ftest_.txt') is None
assert util.find_file(TestEGCG.assets_path, 'ftest*.txt') is None
mocked_log.assert_called_with(
'Searched pattern %s for one file, but got %s', (TestEGCG.assets_path, 'ftest*.txt'), 2
)
def test_str_join():
assert util.str_join('this', 'that', 'other', separator='/') == 'this/that/other'
def test_find_fastqs():
fastqs = util.find_fastqs(fastq_dir, '10015AT', '10015AT0001')
for file_name in ('10015AT0001_S6_L004_R1_001.fastq.gz', '10015AT0001_S6_L004_R2_001.fastq.gz',
'10015AT0001_S6_L005_R1_001.fastq.gz', '10015AT0001_S6_L005_R2_001.fastq.gz'):
assert join(fastq_dir, '10015AT', '10015AT0001', file_name) in fastqs
def test_find_fastqs_with_lane():
fastqs = util.find_fastqs(fastq_dir, '10015AT', '10015AT0001', lane=4)
for file_name in ('10015AT0001_S6_L004_R1_001.fastq.gz', '10015AT0001_S6_L004_R2_001.fastq.gz'):
assert join(fastq_dir, '10015AT', '10015AT0001', file_name) in fastqs
def test_find_all_fastqs():
fastqs = util.find_all_fastqs(fastq_dir)
for file_name in ('10015AT0001_S6_L004_R1_001.fastq.gz', '10015AT0001_S6_L004_R2_001.fastq.gz',
'10015AT0002_merged_R1.fastq.gz', '10015AT0002_merged_R2.fastq.gz'):
assert file_name in [basename(f) for f in fastqs]
def test_find_all_fastq_pairs():
observed = util.find_all_fastq_pairs(join(fastq_dir, '10015AT', '10015AT0001'))
expected = [('10015AT0001_S6_L004_R1_001.fastq.gz', '10015AT0001_S6_L004_R2_001.fastq.gz'),
('10015AT0001_S6_L005_R1_001.fastq.gz', '10015AT0001_S6_L005_R2_001.fastq.gz')]
assert [(basename(f), basename(g)) for f, g in observed] == expected
def test_same_fs():
test = join(TestEGCG.assets_path, 'ftest.txt')
test_2 = join(TestEGCG.assets_path, 'ftest_2.txt')
test_nonexistent = join(TestEGCG.assets_path, 'ftest_nonexistent.txt')
assert util.same_fs(test, None) is False
assert util.same_fs(test, test_2)
assert util.same_fs(test, test_nonexistent)
class TestMoveDir(TestEGCG):
@staticmethod
def _create_test_file(f, content='This is a test file'):
with open(f, 'w') as of:
of.write(content)
@staticmethod
def _md5(f):
hash_md5 = hashlib.md5()
with open(f, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def setUp(self):
self.test_dir = join(self.assets_path, 'move_dir')
makedirs(join(self.test_dir, 'from', 'subdir'), exist_ok=True)
self._create_test_file(join(self.test_dir, 'from', 'ftest.txt'))
self._create_test_file(join(self.test_dir, 'from', 'subdir', 'ftest.txt'))
makedirs(join(self.test_dir, 'external'), exist_ok=True)
self._create_test_file(join(self.test_dir, 'external', 'external.txt'), 'External file')
symlink(join(self.test_dir, 'external', 'external.txt'), join(self.test_dir, 'from', 'external_renamed.txt'))
makedirs(join(self.test_dir, 'exists', 'subdir'), exist_ok=True)
self._create_test_file(join(self.test_dir, 'exists', 'subdir', 'ftest.txt'), 'another file')
self._create_test_file(join(self.test_dir, 'exists', 'ftest.txt'), 'another file')
def tearDown(self):
for base in ('to', 'from', 'exists', 'external'):
f = util.find_file(self.test_dir, base)
if f:
rmtree(f)
def test_move_dir(self):
frm = join(self.test_dir, 'from')
to = join(self.test_dir, 'to')
md5_from = self._md5(join(frm, 'ftest.txt'))
assert util.find_file(frm, 'ftest.txt')
assert not util.find_file(to)
assert util.move_dir(frm, to) == 0
assert not util.find_file(frm, 'ftest.txt')
assert util.find_file(to, 'ftest.txt')
assert util.find_file(to, 'subdir', 'ftest.txt')
assert md5_from == self._md5(join(to, 'ftest.txt'))
assert util.find_file(to, 'external_renamed.txt')
def test_move_dir_exists(self):
frm = join(self.test_dir, 'from')
to = join(self.test_dir, 'exists')
md5_from1 = self._md5(join(frm, 'ftest.txt'))
md5_from2 = self._md5(join(frm, 'subdir', 'ftest.txt'))
assert util.find_file(frm, 'ftest.txt')
assert util.find_file(to, 'ftest.txt')
assert not md5_from1 == self._md5(join(to, 'ftest.txt'))
assert not md5_from2 == self._md5(join(to, 'subdir', 'ftest.txt'))
util.move_dir(frm, to)
assert not util.find_file(frm, 'ftest.txt')
assert util.find_file(to, 'ftest.txt')
assert md5_from1 == self._md5(join(to, 'ftest.txt'))
assert md5_from2 == self._md5(join(to, 'subdir', 'ftest.txt'))
def test_query_dict():
data = {'this': {'that': 'other'}}
assert util.query_dict(data, 'this') == {'that': 'other'}
assert util.query_dict(data, 'this.that') == 'other'
assert util.query_dict(data, 'nonexistent') is None
assert util.query_dict(data, 'nonexistent', ret_default='things') == 'things' | tests/test_util.py | import hashlib
from os import makedirs, symlink
from shutil import rmtree
from os.path import join, basename
from unittest.mock import patch
from egcg_core import util
from tests import TestEGCG
fastq_dir = join(TestEGCG.assets_path, 'fastqs')
def test_find_files():
expected = [join(TestEGCG.assets_path, f) for f in ('ftest.txt', 'ftest_2.txt')]
assert util.find_files(TestEGCG.assets_path, 'ftest*.txt') == expected
@patch('logging.Logger.warning')
def test_find_file(mocked_log):
assert util.find_file(TestEGCG.assets_path, 'ftest.txt') == join(TestEGCG.assets_path, 'ftest.txt')
assert util.find_file(TestEGCG.assets_path, 'ftest_.txt') is None
assert util.find_file(TestEGCG.assets_path, 'ftest*.txt') is None
mocked_log.assert_called_with(
'Searched pattern %s for one file, but got %s', (TestEGCG.assets_path, 'ftest*.txt'), 2
)
def test_str_join():
assert util.str_join('this', 'that', 'other', separator='/') == 'this/that/other'
def test_find_fastqs():
fastqs = util.find_fastqs(fastq_dir, '10015AT', '10015AT0001')
for file_name in ('10015AT0001_S6_L004_R1_001.fastq.gz', '10015AT0001_S6_L004_R2_001.fastq.gz',
'10015AT0001_S6_L005_R1_001.fastq.gz', '10015AT0001_S6_L005_R2_001.fastq.gz'):
assert join(fastq_dir, '10015AT', '10015AT0001', file_name) in fastqs
def test_find_fastqs_with_lane():
fastqs = util.find_fastqs(fastq_dir, '10015AT', '10015AT0001', lane=4)
for file_name in ('10015AT0001_S6_L004_R1_001.fastq.gz', '10015AT0001_S6_L004_R2_001.fastq.gz'):
assert join(fastq_dir, '10015AT', '10015AT0001', file_name) in fastqs
def test_find_all_fastqs():
fastqs = util.find_all_fastqs(fastq_dir)
for file_name in ('10015AT0001_S6_L004_R1_001.fastq.gz', '10015AT0001_S6_L004_R2_001.fastq.gz',
'10015AT0002_merged_R1.fastq.gz', '10015AT0002_merged_R2.fastq.gz'):
assert file_name in [basename(f) for f in fastqs]
def test_find_all_fastq_pairs():
observed = util.find_all_fastq_pairs(join(fastq_dir, '10015AT', '10015AT0001'))
expected = [('10015AT0001_S6_L004_R1_001.fastq.gz', '10015AT0001_S6_L004_R2_001.fastq.gz'),
('10015AT0001_S6_L005_R1_001.fastq.gz', '10015AT0001_S6_L005_R2_001.fastq.gz')]
assert [(basename(f), basename(g)) for f, g in observed] == expected
def test_same_fs():
test = join(TestEGCG.assets_path, 'ftest.txt')
test_2 = join(TestEGCG.assets_path, 'ftest_2.txt')
test_nonexistent = join(TestEGCG.assets_path, 'ftest_nonexistent.txt')
assert util.same_fs(test, None) is False
assert util.same_fs(test, test_2)
assert util.same_fs(test, test_nonexistent)
class TestMoveDir(TestEGCG):
@staticmethod
def _create_test_file(f, content='This is a test file'):
with open(f, 'w') as of:
of.write(content)
@staticmethod
def _md5(f):
hash_md5 = hashlib.md5()
with open(f, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def setUp(self):
self.test_dir = join(self.assets_path, 'move_dir')
makedirs(join(self.test_dir, 'from', 'subdir'), exist_ok=True)
self._create_test_file(join(self.test_dir, 'from', 'ftest.txt'))
self._create_test_file(join(self.test_dir, 'from', 'subdir', 'ftest.txt'))
makedirs(join(self.test_dir, 'external'), exist_ok=True)
self._create_test_file(join(self.test_dir, 'external', 'external.txt'), 'External file')
symlink(join(self.test_dir, 'external', 'external.txt'), join(self.test_dir, 'from', 'external_renamed.txt'))
makedirs(join(self.test_dir, 'exists', 'subdir'), exist_ok=True)
self._create_test_file(join(self.test_dir, 'exists', 'subdir', 'ftest.txt'), 'another file')
self._create_test_file(join(self.test_dir, 'exists', 'ftest.txt'), 'another file')
def tearDown(self):
for base in ('to', 'from', 'exists', 'external'):
f = util.find_file(self.test_dir, base)
if f:
rmtree(f)
def test_move_dir(self):
frm = join(self.test_dir, 'from')
to = join(self.test_dir, 'to')
md5_from = self._md5(join(frm, 'ftest.txt'))
assert util.find_file(frm, 'ftest.txt')
assert not util.find_file(to)
assert util.move_dir(frm, to) == 0
assert not util.find_file(frm, 'ftest.txt')
assert util.find_file(to, 'ftest.txt')
assert util.find_file(to, 'subdir', 'ftest.txt')
assert md5_from == self._md5(join(to, 'ftest.txt'))
assert util.find_file(to, 'external_renamed.txt')
def test_move_dir_exists(self):
frm = join(self.test_dir, 'from')
to = join(self.test_dir, 'exists')
md5_from1 = self._md5(join(frm, 'ftest.txt'))
md5_from2 = self._md5(join(frm, 'subdir', 'ftest.txt'))
assert util.find_file(frm, 'ftest.txt')
assert util.find_file(to, 'ftest.txt')
assert not md5_from1 == self._md5(join(to, 'ftest.txt'))
assert not md5_from2 == self._md5(join(to, 'subdir', 'ftest.txt'))
util.move_dir(frm, to)
assert not util.find_file(frm, 'ftest.txt')
assert util.find_file(to, 'ftest.txt')
assert md5_from1 == self._md5(join(to, 'ftest.txt'))
assert md5_from2 == self._md5(join(to, 'subdir', 'ftest.txt'))
def test_query_dict():
data = {'this': {'that': 'other'}}
assert util.query_dict(data, 'this') == {'that': 'other'}
assert util.query_dict(data, 'this.that') == 'other'
assert util.query_dict(data, 'nonexistent') is None
assert util.query_dict(data, 'nonexistent', ret_default='things') == 'things' | 0.625896 | 0.469338 |
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\13")
buf.write("\65\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
buf.write("\7\4\b\t\b\4\t\t\t\4\n\t\n\3\2\3\2\3\3\3\3\3\4\3\4\3\5")
buf.write("\3\5\3\6\3\6\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\t\6\t(\n\t")
buf.write("\r\t\16\t)\3\t\7\t-\n\t\f\t\16\t\60\13\t\3\n\3\n\3\n\3")
buf.write("\n\2\2\13\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\3\2")
buf.write("\5\4\2C\\c|\5\2\62;C\\c|\5\2\13\f\17\17\"\"\2\66\2\3\3")
buf.write("\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2")
buf.write("\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2")
buf.write("\3\25\3\2\2\2\5\27\3\2\2\2\7\31\3\2\2\2\t\33\3\2\2\2\13")
buf.write("\35\3\2\2\2\r\37\3\2\2\2\17#\3\2\2\2\21\'\3\2\2\2\23\61")
buf.write("\3\2\2\2\25\26\7*\2\2\26\4\3\2\2\2\27\30\7+\2\2\30\6\3")
buf.write("\2\2\2\31\32\7\u00ae\2\2\32\b\3\2\2\2\33\34\7\u2229\2")
buf.write("\2\34\n\3\2\2\2\35\36\7\u222a\2\2\36\f\3\2\2\2\37 \7>")
buf.write("\2\2 !\7?\2\2!\"\7@\2\2\"\16\3\2\2\2#$\7?\2\2$%\7@\2\2")
buf.write("%\20\3\2\2\2&(\t\2\2\2\'&\3\2\2\2()\3\2\2\2)\'\3\2\2\2")
buf.write(")*\3\2\2\2*.\3\2\2\2+-\t\3\2\2,+\3\2\2\2-\60\3\2\2\2.")
buf.write(",\3\2\2\2./\3\2\2\2/\22\3\2\2\2\60.\3\2\2\2\61\62\t\4")
buf.write("\2\2\62\63\3\2\2\2\63\64\b\n\2\2\64\24\3\2\2\2\5\2).\3")
buf.write("\b\2\2")
return buf.getvalue()
class BooleanExprLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
VARIABLE = 8
WS = 9
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'('", "')'", "'\u00AC'", "'\u2227'", "'\u2228'", "'<=>'", "'=>'" ]
symbolicNames = [ "<INVALID>",
"VARIABLE", "WS" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"VARIABLE", "WS" ]
grammarFileName = "BooleanExpr.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None | antlr/BooleanExprLexer.py | from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2\13")
buf.write("\65\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t")
buf.write("\7\4\b\t\b\4\t\t\t\4\n\t\n\3\2\3\2\3\3\3\3\3\4\3\4\3\5")
buf.write("\3\5\3\6\3\6\3\7\3\7\3\7\3\7\3\b\3\b\3\b\3\t\6\t(\n\t")
buf.write("\r\t\16\t)\3\t\7\t-\n\t\f\t\16\t\60\13\t\3\n\3\n\3\n\3")
buf.write("\n\2\2\13\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\13\3\2")
buf.write("\5\4\2C\\c|\5\2\62;C\\c|\5\2\13\f\17\17\"\"\2\66\2\3\3")
buf.write("\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t\3\2\2\2\2\13\3\2\2")
buf.write("\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2")
buf.write("\3\25\3\2\2\2\5\27\3\2\2\2\7\31\3\2\2\2\t\33\3\2\2\2\13")
buf.write("\35\3\2\2\2\r\37\3\2\2\2\17#\3\2\2\2\21\'\3\2\2\2\23\61")
buf.write("\3\2\2\2\25\26\7*\2\2\26\4\3\2\2\2\27\30\7+\2\2\30\6\3")
buf.write("\2\2\2\31\32\7\u00ae\2\2\32\b\3\2\2\2\33\34\7\u2229\2")
buf.write("\2\34\n\3\2\2\2\35\36\7\u222a\2\2\36\f\3\2\2\2\37 \7>")
buf.write("\2\2 !\7?\2\2!\"\7@\2\2\"\16\3\2\2\2#$\7?\2\2$%\7@\2\2")
buf.write("%\20\3\2\2\2&(\t\2\2\2\'&\3\2\2\2()\3\2\2\2)\'\3\2\2\2")
buf.write(")*\3\2\2\2*.\3\2\2\2+-\t\3\2\2,+\3\2\2\2-\60\3\2\2\2.")
buf.write(",\3\2\2\2./\3\2\2\2/\22\3\2\2\2\60.\3\2\2\2\61\62\t\4")
buf.write("\2\2\62\63\3\2\2\2\63\64\b\n\2\2\64\24\3\2\2\2\5\2).\3")
buf.write("\b\2\2")
return buf.getvalue()
class BooleanExprLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
VARIABLE = 8
WS = 9
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'('", "')'", "'\u00AC'", "'\u2227'", "'\u2228'", "'<=>'", "'=>'" ]
symbolicNames = [ "<INVALID>",
"VARIABLE", "WS" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"VARIABLE", "WS" ]
grammarFileName = "BooleanExpr.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.8")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None | 0.31237 | 0.367128 |
from functools import lru_cache
from typing import Any, cast, Optional, Type, Callable, Union, Dict, Tuple, TypeVar, List
import django
from django.db import router, DEFAULT_DB_ALIAS, connections
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models import Model, Expression
from django.db.models.sql.compiler import SQLCompiler
from dj_hybrid.expression_wrapper.base import FakeQuery
from dj_hybrid.expression_wrapper.types import SupportsConversion
T_SupportsConversion = TypeVar('T_SupportsConversion', bound=SupportsConversion)
ConverterNew = Callable[[Any, T_SupportsConversion, BaseDatabaseWrapper], Any]
ConverterOld = Callable[[Any, T_SupportsConversion, BaseDatabaseWrapper, Dict], Any]
Converter = Union[ConverterNew, ConverterOld]
ConvertersExpressionPair = Tuple[List[Converter], T_SupportsConversion]
ConverterDict = Dict[int, Tuple[List[Converter], T_SupportsConversion]]
def get_converters(expression: T_SupportsConversion, model: Model) -> ConvertersExpressionPair:
db = get_db(model)
if isinstance(model, Model):
model = model._meta.model
compiler = get_compiler_instance(db, model)
return get_converters_with_compiler(expression, compiler)
@lru_cache()
def get_converters_with_compiler(
expression: T_SupportsConversion,
compiler: SQLCompiler
) -> ConvertersExpressionPair:
converters = compiler.get_converters([expression]) # type: ConverterDict
if not converters:
return [], expression
return converters[0]
if django.VERSION >= (2,):
def apply_converters(value: Any, converters_paired: ConvertersExpressionPair, model: Model) -> Any:
if not converters_paired[0]:
return value
db = get_db(model)
connection = get_connection(db)
converters, expression = converters_paired
for converter in converters:
converter = cast(ConverterNew, converter)
value = converter(value, expression, connection)
return value
else:
def apply_converters(value: Any, converters_paired: ConvertersExpressionPair, model: Model) -> Any:
if not converters_paired[0]:
return value
db = get_db(model)
connection = get_connection(db)
converters, expression = converters_paired
for converter in converters:
converter = cast(ConverterOld, converter)
value = converter(value, expression, connection, {})
return value
def get_db(obj: Union[Any, Type[Any]]) -> str:
if isinstance(obj, Model):
return cast(str, router.db_for_read(
obj._meta.model,
hints=dict(instance=obj),
))
elif isinstance(obj, type) and issubclass(obj, Model):
return cast(str, router.db_for_read(obj))
return cast(str, DEFAULT_DB_ALIAS)
def get_connection(db: str) -> BaseDatabaseWrapper:
return connections[db]
@lru_cache(maxsize=None)
def get_compiler_cls(db: str) -> Type[SQLCompiler]:
operations = get_connection(db).ops # type: BaseDatabaseOperations
compiler_name = 'SQLCompiler' # we don't care about other types of compilers
return cast(Type[SQLCompiler], operations.compiler(compiler_name))
@lru_cache(maxsize=None)
def get_compiler_instance(db: str, model_cls: Type[Model]) -> SQLCompiler:
compiler_cls = get_compiler_cls(db)
fake_query = get_fake_query(model_cls)
return compiler_cls(
fake_query,
connection=get_connection(db),
using=db,
)
@lru_cache(maxsize=None)
def _get_fake_query(model_or_none: Optional[Type[Model]]) -> FakeQuery:
return FakeQuery(model=model_or_none)
def get_fake_query(obj: Union[Any, Type[Any]]) -> FakeQuery:
if isinstance(obj, Model):
model = obj._meta.model
elif isinstance(obj, type) and issubclass(obj, Model):
model = obj
else:
model = None
return _get_fake_query(model) | dj_hybrid/expression_wrapper/convert.py | from functools import lru_cache
from typing import Any, cast, Optional, Type, Callable, Union, Dict, Tuple, TypeVar, List
import django
from django.db import router, DEFAULT_DB_ALIAS, connections
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.models import Model, Expression
from django.db.models.sql.compiler import SQLCompiler
from dj_hybrid.expression_wrapper.base import FakeQuery
from dj_hybrid.expression_wrapper.types import SupportsConversion
T_SupportsConversion = TypeVar('T_SupportsConversion', bound=SupportsConversion)
ConverterNew = Callable[[Any, T_SupportsConversion, BaseDatabaseWrapper], Any]
ConverterOld = Callable[[Any, T_SupportsConversion, BaseDatabaseWrapper, Dict], Any]
Converter = Union[ConverterNew, ConverterOld]
ConvertersExpressionPair = Tuple[List[Converter], T_SupportsConversion]
ConverterDict = Dict[int, Tuple[List[Converter], T_SupportsConversion]]
def get_converters(expression: T_SupportsConversion, model: Model) -> ConvertersExpressionPair:
db = get_db(model)
if isinstance(model, Model):
model = model._meta.model
compiler = get_compiler_instance(db, model)
return get_converters_with_compiler(expression, compiler)
@lru_cache()
def get_converters_with_compiler(
expression: T_SupportsConversion,
compiler: SQLCompiler
) -> ConvertersExpressionPair:
converters = compiler.get_converters([expression]) # type: ConverterDict
if not converters:
return [], expression
return converters[0]
if django.VERSION >= (2,):
def apply_converters(value: Any, converters_paired: ConvertersExpressionPair, model: Model) -> Any:
if not converters_paired[0]:
return value
db = get_db(model)
connection = get_connection(db)
converters, expression = converters_paired
for converter in converters:
converter = cast(ConverterNew, converter)
value = converter(value, expression, connection)
return value
else:
def apply_converters(value: Any, converters_paired: ConvertersExpressionPair, model: Model) -> Any:
if not converters_paired[0]:
return value
db = get_db(model)
connection = get_connection(db)
converters, expression = converters_paired
for converter in converters:
converter = cast(ConverterOld, converter)
value = converter(value, expression, connection, {})
return value
def get_db(obj: Union[Any, Type[Any]]) -> str:
if isinstance(obj, Model):
return cast(str, router.db_for_read(
obj._meta.model,
hints=dict(instance=obj),
))
elif isinstance(obj, type) and issubclass(obj, Model):
return cast(str, router.db_for_read(obj))
return cast(str, DEFAULT_DB_ALIAS)
def get_connection(db: str) -> BaseDatabaseWrapper:
return connections[db]
@lru_cache(maxsize=None)
def get_compiler_cls(db: str) -> Type[SQLCompiler]:
operations = get_connection(db).ops # type: BaseDatabaseOperations
compiler_name = 'SQLCompiler' # we don't care about other types of compilers
return cast(Type[SQLCompiler], operations.compiler(compiler_name))
@lru_cache(maxsize=None)
def get_compiler_instance(db: str, model_cls: Type[Model]) -> SQLCompiler:
compiler_cls = get_compiler_cls(db)
fake_query = get_fake_query(model_cls)
return compiler_cls(
fake_query,
connection=get_connection(db),
using=db,
)
@lru_cache(maxsize=None)
def _get_fake_query(model_or_none: Optional[Type[Model]]) -> FakeQuery:
return FakeQuery(model=model_or_none)
def get_fake_query(obj: Union[Any, Type[Any]]) -> FakeQuery:
if isinstance(obj, Model):
model = obj._meta.model
elif isinstance(obj, type) and issubclass(obj, Model):
model = obj
else:
model = None
return _get_fake_query(model) | 0.780788 | 0.12787 |
import asyncio
from typing import List
import pyppeteer
from tqdm import tqdm
from tiktokpy.client import Client
from tiktokpy.utils.client import catch_response_and_store
from tiktokpy.utils.logger import logger
class Trending:
def __init__(self, client: Client):
self.client = client
async def feed(self, amount: int, lang: str = "en"):
page = await self.client.new_page(blocked_resources=["media", "image", "font"])
logger.debug('📨 Request "Trending" page')
result: List[dict] = []
page.on(
"response",
lambda res: asyncio.create_task(catch_response_and_store(res, result)),
)
_ = await self.client.goto(
"/foryou",
query_params={"lang": lang},
page=page,
options={"waitUntil": "networkidle0"},
)
logger.debug('📭 Got response from "Trending" page')
pbar = tqdm(total=amount, desc=f"📈 Getting trending {lang.upper()}")
pbar.n = min(len(result), amount)
pbar.refresh()
while len(result) < amount:
logger.debug("🖱 Trying to scroll to last video item")
last_child_selector = 'div[class*="-ItemContainer"]:last-child'
scroll_command = """
document.querySelector('{selector}')
.scrollIntoView();
"""
try:
await page.evaluate(scroll_command.format(selector=last_child_selector))
except pyppeteer.errors.ElementHandleError:
last_child_selector = ".video-feed-container > .lazyload-wrapper:last-child"
await page.evaluate(scroll_command.format(selector=last_child_selector))
await page.waitFor(1_000)
elements = await page.JJ(".video-feed-item")
logger.debug(f"🔎 Found {len(elements)} items for clear")
pbar.n = min(len(result), amount)
pbar.refresh()
if len(elements) < 500:
logger.debug("🔻 Too less for clearing page")
continue
await page.JJeval(
".video-feed-container > .lazyload-wrapper:not(:last-child)",
pageFunction="(elements) => elements.forEach(el => el.remove())",
)
logger.debug(f"🎉 Cleaned {len(elements) - 1} items from page")
await page.waitFor(30_000)
await page.close()
pbar.close()
return result[:amount] | tiktokpy/client/trending.py | import asyncio
from typing import List
import pyppeteer
from tqdm import tqdm
from tiktokpy.client import Client
from tiktokpy.utils.client import catch_response_and_store
from tiktokpy.utils.logger import logger
class Trending:
def __init__(self, client: Client):
self.client = client
async def feed(self, amount: int, lang: str = "en"):
page = await self.client.new_page(blocked_resources=["media", "image", "font"])
logger.debug('📨 Request "Trending" page')
result: List[dict] = []
page.on(
"response",
lambda res: asyncio.create_task(catch_response_and_store(res, result)),
)
_ = await self.client.goto(
"/foryou",
query_params={"lang": lang},
page=page,
options={"waitUntil": "networkidle0"},
)
logger.debug('📭 Got response from "Trending" page')
pbar = tqdm(total=amount, desc=f"📈 Getting trending {lang.upper()}")
pbar.n = min(len(result), amount)
pbar.refresh()
while len(result) < amount:
logger.debug("🖱 Trying to scroll to last video item")
last_child_selector = 'div[class*="-ItemContainer"]:last-child'
scroll_command = """
document.querySelector('{selector}')
.scrollIntoView();
"""
try:
await page.evaluate(scroll_command.format(selector=last_child_selector))
except pyppeteer.errors.ElementHandleError:
last_child_selector = ".video-feed-container > .lazyload-wrapper:last-child"
await page.evaluate(scroll_command.format(selector=last_child_selector))
await page.waitFor(1_000)
elements = await page.JJ(".video-feed-item")
logger.debug(f"🔎 Found {len(elements)} items for clear")
pbar.n = min(len(result), amount)
pbar.refresh()
if len(elements) < 500:
logger.debug("🔻 Too less for clearing page")
continue
await page.JJeval(
".video-feed-container > .lazyload-wrapper:not(:last-child)",
pageFunction="(elements) => elements.forEach(el => el.remove())",
)
logger.debug(f"🎉 Cleaned {len(elements) - 1} items from page")
await page.waitFor(30_000)
await page.close()
pbar.close()
return result[:amount] | 0.424293 | 0.130951 |
import time
import torch
from torch.utils.data import DataLoader, random_split
from torch.optim import Adam, SGD
from torchvision import transforms
from dataset import Caltech101Data, load_caltech101_pretrained, load_caltech256_pretrained, classes_101, classes_256
from classifier import DIM, ClassifierResnetLight, classifier_bias_key, classifier_key
CLASSES = classes_256
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
LEARNING_RATE = 1e-3
class Trainer():
def __init__(self, loader, optimizer, loss_function, model, device):
self.model = model
self.loader = loader
self.optimizer = optimizer
self.loss_function = loss_function
self.device = device
self.model = model.to(self.device)
if device == torch.device('cuda:0'):
self.model.cuda()
def accuracy(self, output, labels):
with torch.no_grad():
output = torch.argmax(output.view(-1, CLASSES), -1)
acc = torch.mean(torch.eq(output, labels).float())
return acc.cpu().numpy()
def train(self, epochs, loss=None):
for epoch in range(epochs):
mean_loss = 0.0
mean_acc = 0.0
start = time.time()
for batch_idx, data in enumerate(self.loader['train'], 0):
inputs, labels = data
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.loss_function(outputs,
labels) + 0.1 * self.model.l2_norm()
loss.backward()
self.optimizer.step()
acc = self.accuracy(outputs, labels)
mean_loss += loss.item()
mean_acc += acc
print('[Train {}, {}] loss: {} acc {} took {}'.format(
epoch + 1, batch_idx + 1, mean_loss / (batch_idx + 1),
mean_acc / (batch_idx + 1),
time.time() - start))
mean_loss = 0.0
mean_acc = 0.0
start = time.time()
for batch_idx, data in enumerate(self.loader['val']):
inputs, labels = data
with torch.no_grad():
outputs = self.model(inputs)
loss = self.loss_function(outputs, labels)
acc = self.accuracy(outputs, labels)
mean_loss += loss.item()
mean_acc += acc
print('[Validate {}, {}] loss: {} acc {} took {}'.format(
epoch + 1, batch_idx + 1, mean_loss / (batch_idx + 1),
mean_acc / (batch_idx + 1),
time.time() - start))
def main():
model = ClassifierResnetLight(CLASSES)
cd = load_caltech256_pretrained()
optimizer = SGD(model.parameters(), lr=LEARNING_RATE)
loss_function = torch.nn.CrossEntropyLoss()
total = len(cd)
dataset_size = {'train': int(0.9 * total)}
dataset_size["val"] = total - dataset_size["train"]
data = {}
data['train'], data['val'] = random_split(
cd, [dataset_size['train'], dataset_size['val']])
loader = {
phase: DataLoader(data[phase], batch_size=512, shuffle=True)
for phase in ['train', 'val']
}
start = time.time()
trainer = Trainer(loader, optimizer, loss_function, model, DEVICE)
trainer.train(2000)
print("Training took ", time.time() - start)
if __name__ == '__main__':
main() | trainer.py | import time
import torch
from torch.utils.data import DataLoader, random_split
from torch.optim import Adam, SGD
from torchvision import transforms
from dataset import Caltech101Data, load_caltech101_pretrained, load_caltech256_pretrained, classes_101, classes_256
from classifier import DIM, ClassifierResnetLight, classifier_bias_key, classifier_key
CLASSES = classes_256
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
LEARNING_RATE = 1e-3
class Trainer():
def __init__(self, loader, optimizer, loss_function, model, device):
self.model = model
self.loader = loader
self.optimizer = optimizer
self.loss_function = loss_function
self.device = device
self.model = model.to(self.device)
if device == torch.device('cuda:0'):
self.model.cuda()
def accuracy(self, output, labels):
with torch.no_grad():
output = torch.argmax(output.view(-1, CLASSES), -1)
acc = torch.mean(torch.eq(output, labels).float())
return acc.cpu().numpy()
def train(self, epochs, loss=None):
for epoch in range(epochs):
mean_loss = 0.0
mean_acc = 0.0
start = time.time()
for batch_idx, data in enumerate(self.loader['train'], 0):
inputs, labels = data
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.loss_function(outputs,
labels) + 0.1 * self.model.l2_norm()
loss.backward()
self.optimizer.step()
acc = self.accuracy(outputs, labels)
mean_loss += loss.item()
mean_acc += acc
print('[Train {}, {}] loss: {} acc {} took {}'.format(
epoch + 1, batch_idx + 1, mean_loss / (batch_idx + 1),
mean_acc / (batch_idx + 1),
time.time() - start))
mean_loss = 0.0
mean_acc = 0.0
start = time.time()
for batch_idx, data in enumerate(self.loader['val']):
inputs, labels = data
with torch.no_grad():
outputs = self.model(inputs)
loss = self.loss_function(outputs, labels)
acc = self.accuracy(outputs, labels)
mean_loss += loss.item()
mean_acc += acc
print('[Validate {}, {}] loss: {} acc {} took {}'.format(
epoch + 1, batch_idx + 1, mean_loss / (batch_idx + 1),
mean_acc / (batch_idx + 1),
time.time() - start))
def main():
model = ClassifierResnetLight(CLASSES)
cd = load_caltech256_pretrained()
optimizer = SGD(model.parameters(), lr=LEARNING_RATE)
loss_function = torch.nn.CrossEntropyLoss()
total = len(cd)
dataset_size = {'train': int(0.9 * total)}
dataset_size["val"] = total - dataset_size["train"]
data = {}
data['train'], data['val'] = random_split(
cd, [dataset_size['train'], dataset_size['val']])
loader = {
phase: DataLoader(data[phase], batch_size=512, shuffle=True)
for phase in ['train', 'val']
}
start = time.time()
trainer = Trainer(loader, optimizer, loss_function, model, DEVICE)
trainer.train(2000)
print("Training took ", time.time() - start)
if __name__ == '__main__':
main() | 0.86764 | 0.501892 |
import random
import math
class RSA:
def gen_prime(self):
prime=0
while True:
c=0
a=random.randint(100, 1000)
if not a%2==0:
i=3
while i*i<=a:
if a%i==0:
c+=1
i+=2
if c==0:
prime=a
break
return prime
def prime_tuple(self):
p=self.gen_prime()
q=self.gen_prime()
return (p,q)
def totient(self, tuple):
p, q = tuple
return (p-1)*(q-1)
def public_key(self, tuple):
tot=self.totient(tuple)
e=2
while e<tot:
if math.gcd(e, tot)==1:
break
e+=1
return e
def private_key(self, tuple, pub):
tot=self.totient(tuple)
quotient=1
d=0
while True:
value=tot*quotient+1
if value%pub==0:
d=value/pub
break
quotient+=1
return int(d)
def encrypt(self, msg, pub_k):
e1, n1 = pub_k
string=""
for i in msg:
a=ord(i)
a=int(pow(int(a), int(e1)))%n1
string=string+str(a)+"#"
return(string)
def decrypt(self, string, priv_k):
d, n = priv_k
final=""
for i in string.split("#"):
if not i=="":
a=int(pow(int(i), int(d)))%n
final=final+chr(a)
return final
if __name__=="__main__":
obj=RSA()
tuple=obj.prime_tuple()
p, q = tuple
n=p*q
tot=obj.totient(tuple)
e=obj.public_key(tuple)
pub_k=(e, n)
d=obj.private_key(tuple, e)
priv_k=(d, n)
print("Public Key"+ str(pub_k))
print("Private Key"+ str(priv_k))
inp=input("Enter message")
type(inp)
choice=input("Enter choice: 1-> Encrypt, 2-> Decrypt")
type(choice)
if choice == "1":
e1=input("Enter e")
type(e1)
n1=input("Enter n")
type(n1)
res=obj.encrypt(inp, (int(e1), int(n1)))
print(res)
else:
res=obj.decrypt(inp, (int(d), int(n)))
print(res) | rsa.py |
import random
import math
class RSA:
def gen_prime(self):
prime=0
while True:
c=0
a=random.randint(100, 1000)
if not a%2==0:
i=3
while i*i<=a:
if a%i==0:
c+=1
i+=2
if c==0:
prime=a
break
return prime
def prime_tuple(self):
p=self.gen_prime()
q=self.gen_prime()
return (p,q)
def totient(self, tuple):
p, q = tuple
return (p-1)*(q-1)
def public_key(self, tuple):
tot=self.totient(tuple)
e=2
while e<tot:
if math.gcd(e, tot)==1:
break
e+=1
return e
def private_key(self, tuple, pub):
tot=self.totient(tuple)
quotient=1
d=0
while True:
value=tot*quotient+1
if value%pub==0:
d=value/pub
break
quotient+=1
return int(d)
def encrypt(self, msg, pub_k):
e1, n1 = pub_k
string=""
for i in msg:
a=ord(i)
a=int(pow(int(a), int(e1)))%n1
string=string+str(a)+"#"
return(string)
def decrypt(self, string, priv_k):
d, n = priv_k
final=""
for i in string.split("#"):
if not i=="":
a=int(pow(int(i), int(d)))%n
final=final+chr(a)
return final
if __name__=="__main__":
obj=RSA()
tuple=obj.prime_tuple()
p, q = tuple
n=p*q
tot=obj.totient(tuple)
e=obj.public_key(tuple)
pub_k=(e, n)
d=obj.private_key(tuple, e)
priv_k=(d, n)
print("Public Key"+ str(pub_k))
print("Private Key"+ str(priv_k))
inp=input("Enter message")
type(inp)
choice=input("Enter choice: 1-> Encrypt, 2-> Decrypt")
type(choice)
if choice == "1":
e1=input("Enter e")
type(e1)
n1=input("Enter n")
type(n1)
res=obj.encrypt(inp, (int(e1), int(n1)))
print(res)
else:
res=obj.decrypt(inp, (int(d), int(n)))
print(res) | 0.113113 | 0.173323 |
import json
from apischema.json_schema import deserialization_schema, JsonSchemaVersion
from optunaz.config.optconfig import OptimizationConfig
from optunaz.utils.schema import (
replacekey,
addsibling,
delsibling,
copytitle,
replaceenum,
addtitles,
)
def patch_schema_generic(schema):
addtitles(schema)
# Replace singleton enums with const.
# For some reason, this was not needed in AZDock. A mystery.
schema = replaceenum(schema)
# Replace "anyOf" with "oneOf".
schema = replacekey(schema)
# Add "type": "object" to any elements that contain "oneOf": [...].
schema = addsibling(schema)
# Delete "type": "string" for "enum".
schema = delsibling(schema, {"enum": "type"})
# Delete most of the stuff for "const".
schema = delsibling(schema, {"const": "type"})
schema = delsibling(schema, {"const": "default"})
schema = delsibling(schema, {"const": "title"})
# Copy title from $refs into oneOf.
schema = copytitle(schema, schema)
return schema
def patch_schema_optunaz(schema):
(
schema.get("$defs", {})
.get("MolData", {})
.get("properties", {})
.get("file_path", {})
)["format"] = "uri"
# Dataset
(
schema.get("$defs", {})
.get("Dataset", {})
.get("properties", {})
.get("save_intermediate_files", {})
)["const"] = True
(
schema.get("$defs", {})
.get("Dataset", {})
.get("properties", {})
.get("intermediate_training_dataset_file", {})
)["const"] = "{{run.path}}/intermediate_training_dataset_file.csv"
# (
# schema.get("$defs", {})
# .get("Dataset", {})
# .get("properties", {})
# .pop("test_dataset_file", None)
# )
# (
# schema.get("$defs", {})
# .get("Dataset", {})
# .get("properties", {})
# .get("training_dataset_file", {})
# )["format"] = "file"
(
schema.get("$defs", {})
.get("MolData", {})
.get("properties", {})
.get("file_path", {})
)["format"] = "uri"
# Root OptimizationConfig
(
schema.get("$defs", {})
.get("OptimizationConfig", {})
.get("properties", {})
.pop("mode", None)
)
(
schema.get("$defs", {})
.get("OptimizationConfig", {})
.get("properties", {})
.pop("visualization", None)
)
# (
# schema.get("$defs", {})
# .get("OptimizationConfig", {})
# .get("properties", {})
# )["mode"] = {
# "$ref": "#/$defs/ModelMode",
# "title": "Mode mode: regression or classification",
# "default": "regression"
# }
drop_algs = {"PLS", "RandomForest", "XGBregressor"}
drop_refs = {f"#/$defs/{alg}" for alg in drop_algs}
alg_items = (
schema.get("$defs", {})
.get("OptimizationConfig", {})
.get("properties", {})
.get("algorithms", {})
.get("items", {})
)
algs = alg_items.get("anyOf", {})
alg_items["anyOf"] = [alg for alg in algs if alg["$ref"] not in drop_refs]
(
schema.get("$defs", {})
.get("Settings", {})
.get("properties", {})
.pop("mode", None)
)
(schema.get("$defs", {}).get("Settings", {}).get("properties", {}))["n_jobs"] = {
"const": -1
}
(schema.get("$defs", {}).get("Settings", {}).get("properties", {}))[
"track_to_mlflow"
] = {"const": False}
(schema.get("$defs", {}).get("Settings", {}).get("properties", {}))[
"optuna_storage"
] = {"const": "sqlite:///{{run.path}}/optuna_storage.sqlite"}
(
schema.get("$defs", {})
.get("Settings", {})
.get("properties", {})
.pop("shuffle", None)
)
(
schema.get("$defs", {})
.get("Settings", {})
.get("properties", {})
.pop("direction", None)
)
(
schema.get("$defs", {})
.get("Settings", {})
.get("properties", {})
.pop("scoring", None)
)
(
schema.get("$defs", {})
.get("Settings", {})
.get("properties", {})
.pop("tracking_rest_endpoint", None)
)
(
schema.get("$defs", {})
.get("ScaledDescriptorParameters", {})
.get("properties", {})
.pop("scaler", None)
)
(
schema.get("$defs", {})
.get("PhyschemDescriptors", {})
.get("properties", {})
.pop("parameters", {})
)
(
schema.get("$defs", {})
.get("Stratified", {})
.get("properties", {})
.pop("bins", {})
)
(
schema.get("$defs", {})
.get("Dataset", {})
.get("properties", {})
.get("training_dataset_file", {})
)["format"] = "uri"
(
schema.get("$defs", {})
.get("MolDescriptor", {})
.get("anyOf", [])
.remove({"$ref": "#/$defs/PrecomputedDescriptorFromFile"})
)
return schema
def main():
schema = deserialization_schema(
OptimizationConfig, all_refs=True, version=JsonSchemaVersion.DRAFT_2019_09
)
schema = patch_schema_optunaz(schema)
schema = patch_schema_generic(schema)
print(json.dumps(schema, indent=2))
if __name__ == "__main__":
main() | optunaz/schemagen.py | import json
from apischema.json_schema import deserialization_schema, JsonSchemaVersion
from optunaz.config.optconfig import OptimizationConfig
from optunaz.utils.schema import (
replacekey,
addsibling,
delsibling,
copytitle,
replaceenum,
addtitles,
)
def patch_schema_generic(schema):
addtitles(schema)
# Replace singleton enums with const.
# For some reason, this was not needed in AZDock. A mystery.
schema = replaceenum(schema)
# Replace "anyOf" with "oneOf".
schema = replacekey(schema)
# Add "type": "object" to any elements that contain "oneOf": [...].
schema = addsibling(schema)
# Delete "type": "string" for "enum".
schema = delsibling(schema, {"enum": "type"})
# Delete most of the stuff for "const".
schema = delsibling(schema, {"const": "type"})
schema = delsibling(schema, {"const": "default"})
schema = delsibling(schema, {"const": "title"})
# Copy title from $refs into oneOf.
schema = copytitle(schema, schema)
return schema
def patch_schema_optunaz(schema):
(
schema.get("$defs", {})
.get("MolData", {})
.get("properties", {})
.get("file_path", {})
)["format"] = "uri"
# Dataset
(
schema.get("$defs", {})
.get("Dataset", {})
.get("properties", {})
.get("save_intermediate_files", {})
)["const"] = True
(
schema.get("$defs", {})
.get("Dataset", {})
.get("properties", {})
.get("intermediate_training_dataset_file", {})
)["const"] = "{{run.path}}/intermediate_training_dataset_file.csv"
# (
# schema.get("$defs", {})
# .get("Dataset", {})
# .get("properties", {})
# .pop("test_dataset_file", None)
# )
# (
# schema.get("$defs", {})
# .get("Dataset", {})
# .get("properties", {})
# .get("training_dataset_file", {})
# )["format"] = "file"
(
schema.get("$defs", {})
.get("MolData", {})
.get("properties", {})
.get("file_path", {})
)["format"] = "uri"
# Root OptimizationConfig
(
schema.get("$defs", {})
.get("OptimizationConfig", {})
.get("properties", {})
.pop("mode", None)
)
(
schema.get("$defs", {})
.get("OptimizationConfig", {})
.get("properties", {})
.pop("visualization", None)
)
# (
# schema.get("$defs", {})
# .get("OptimizationConfig", {})
# .get("properties", {})
# )["mode"] = {
# "$ref": "#/$defs/ModelMode",
# "title": "Mode mode: regression or classification",
# "default": "regression"
# }
drop_algs = {"PLS", "RandomForest", "XGBregressor"}
drop_refs = {f"#/$defs/{alg}" for alg in drop_algs}
alg_items = (
schema.get("$defs", {})
.get("OptimizationConfig", {})
.get("properties", {})
.get("algorithms", {})
.get("items", {})
)
algs = alg_items.get("anyOf", {})
alg_items["anyOf"] = [alg for alg in algs if alg["$ref"] not in drop_refs]
(
schema.get("$defs", {})
.get("Settings", {})
.get("properties", {})
.pop("mode", None)
)
(schema.get("$defs", {}).get("Settings", {}).get("properties", {}))["n_jobs"] = {
"const": -1
}
(schema.get("$defs", {}).get("Settings", {}).get("properties", {}))[
"track_to_mlflow"
] = {"const": False}
(schema.get("$defs", {}).get("Settings", {}).get("properties", {}))[
"optuna_storage"
] = {"const": "sqlite:///{{run.path}}/optuna_storage.sqlite"}
(
schema.get("$defs", {})
.get("Settings", {})
.get("properties", {})
.pop("shuffle", None)
)
(
schema.get("$defs", {})
.get("Settings", {})
.get("properties", {})
.pop("direction", None)
)
(
schema.get("$defs", {})
.get("Settings", {})
.get("properties", {})
.pop("scoring", None)
)
(
schema.get("$defs", {})
.get("Settings", {})
.get("properties", {})
.pop("tracking_rest_endpoint", None)
)
(
schema.get("$defs", {})
.get("ScaledDescriptorParameters", {})
.get("properties", {})
.pop("scaler", None)
)
(
schema.get("$defs", {})
.get("PhyschemDescriptors", {})
.get("properties", {})
.pop("parameters", {})
)
(
schema.get("$defs", {})
.get("Stratified", {})
.get("properties", {})
.pop("bins", {})
)
(
schema.get("$defs", {})
.get("Dataset", {})
.get("properties", {})
.get("training_dataset_file", {})
)["format"] = "uri"
(
schema.get("$defs", {})
.get("MolDescriptor", {})
.get("anyOf", [])
.remove({"$ref": "#/$defs/PrecomputedDescriptorFromFile"})
)
return schema
def main():
schema = deserialization_schema(
OptimizationConfig, all_refs=True, version=JsonSchemaVersion.DRAFT_2019_09
)
schema = patch_schema_optunaz(schema)
schema = patch_schema_generic(schema)
print(json.dumps(schema, indent=2))
if __name__ == "__main__":
main() | 0.531939 | 0.299656 |
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from django.shortcuts import render
from django.urls import reverse
from ..forms import UserProfileForm
from ..utilities import set_message_and_redirect, set_message
@login_required
def profile(request):
initial_data = {
"first_name": request.user.first_name,
"last_name": request.user.last_name,
"email": request.user.email,
"default_currency": request.user.userprofile.default_currency,
"default_period": request.user.userprofile.default_period,
}
profile_form = UserProfileForm(initial=initial_data)
password_form = PasswordChangeForm(user=request.user)
if request.POST:
if "profile_submit" in request.POST:
profile_form = UserProfileForm(request.POST)
if profile_form.is_valid():
request.user.first_name = profile_form.cleaned_data["first_name"]
request.user.last_name = profile_form.cleaned_data["last_name"]
request.user.email = profile_form.cleaned_data["email"]
request.user.username = profile_form.cleaned_data["email"]
request.user.save()
request.user.userprofile.default_currency = profile_form.cleaned_data["default_currency"]
request.user.userprofile.default_period = profile_form.cleaned_data["default_period"]
request.user.userprofile.save()
return set_message_and_redirect(request, "s|Your profile has been updated succesfully!", reverse("blackbook:profile"))
else:
set_message(request, "f|Your profile could not be saved. Please correct the errors below and try again.")
if "password_submit" in request.POST:
password_form = PasswordChangeForm(request.user, request.POST)
if password_form.is_valid():
password_form.save()
update_session_auth_hash(request, request.user)
return set_message_and_redirect(request, "s|Your password has been changed succesfully!", reverse("blackbook:profile"))
else:
set_message(request, "f|Your password could not be updated. Please correct the errors below and try again.")
return render(request, "blackbook/profile.html", {"profile_form": profile_form, "password_form": password_form}) | blackbook/archive/views/profile.py | from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from django.shortcuts import render
from django.urls import reverse
from ..forms import UserProfileForm
from ..utilities import set_message_and_redirect, set_message
@login_required
def profile(request):
initial_data = {
"first_name": request.user.first_name,
"last_name": request.user.last_name,
"email": request.user.email,
"default_currency": request.user.userprofile.default_currency,
"default_period": request.user.userprofile.default_period,
}
profile_form = UserProfileForm(initial=initial_data)
password_form = PasswordChangeForm(user=request.user)
if request.POST:
if "profile_submit" in request.POST:
profile_form = UserProfileForm(request.POST)
if profile_form.is_valid():
request.user.first_name = profile_form.cleaned_data["first_name"]
request.user.last_name = profile_form.cleaned_data["last_name"]
request.user.email = profile_form.cleaned_data["email"]
request.user.username = profile_form.cleaned_data["email"]
request.user.save()
request.user.userprofile.default_currency = profile_form.cleaned_data["default_currency"]
request.user.userprofile.default_period = profile_form.cleaned_data["default_period"]
request.user.userprofile.save()
return set_message_and_redirect(request, "s|Your profile has been updated succesfully!", reverse("blackbook:profile"))
else:
set_message(request, "f|Your profile could not be saved. Please correct the errors below and try again.")
if "password_submit" in request.POST:
password_form = PasswordChangeForm(request.user, request.POST)
if password_form.is_valid():
password_form.save()
update_session_auth_hash(request, request.user)
return set_message_and_redirect(request, "s|Your password has been changed succesfully!", reverse("blackbook:profile"))
else:
set_message(request, "f|Your password could not be updated. Please correct the errors below and try again.")
return render(request, "blackbook/profile.html", {"profile_form": profile_form, "password_form": password_form}) | 0.420124 | 0.111145 |
import logging
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.utils import shuffle
from tqdm import tqdm
from webias.constants import LOGGING_CONFIG
from webias.utils import build_word_embedding_cache
logging.basicConfig(**LOGGING_CONFIG)
class RNSB:
"""An implementation of the "Relative Negative Sentiment Bias" metric.
This metric was originally proposed in [1]. For a detailed explanation of the score calculation,
see the documentation of the `get_score()` method.
[1] dx.doi.org/10.18653/v1/P19-1162
Arguments:
positive_words -- A list of positive words used to train the classifier.
negative_words -- A list of negative words used to train the classifier.
positive_vectors -- A list of word vectors from positive words used to train the classifier.
They are expected to come from the same embedding model given as
`word_vector_getter`. This can be used as an alternative to the
`positive_words` to speed up the calculation if the class is re-generated
multiple times. Also, this can only be used together with `negative_vectors`
and not with `negative_words`.
negative_vectors -- A list of word vectors from negative words used to train the classifier.
They are expected to come from the same embedding model given as
`word_vector_getter`. This can be used as an alternative to the
`negative_words` to speed up the calculation if the class is re-generated
multiple times. Also, this can only be used together with `positive_vectors`
and not with `positive_words`.
word_vector_getter -- An object that returns a vector given a word as parameter to the
`__getitem__()` function.
random_state -- The random state to be used for shuffling the data before returning it.
"""
def __init__(
self,
word_vector_getter,
positive_words: list = None,
negative_words: list = None,
positive_vectors: list = None,
negative_vectors: list = None,
random_state: int = 42):
self.word_vector_getter = word_vector_getter
self.random_state = random_state
# If words are provided, embedd them first
if positive_words and negative_words:
positive_vectors, negative_vectors = self._get_sentiment_vectors(
positive_words, negative_words)
# Load words lists from disk, embed them and transform them into a labled tuple
self.X, self.y = self._prepare_sentiment_data(positive_vectors, negative_vectors)
# Retrieve the trained logistic regression classifier
self.lrc = self._train_lrc_sentiment_classifier()
def _get_sentiment_vectors(self, positive_words: list, negative_words: list) -> tuple:
"""Retrieve the embedding vectors for the provided words from the provided model.
Return a tuple of positive word vectors and negative word vectors.
Arguments:
positive_words -- A list of positive words used to train the classifier.
negative_words -- A list of negative words used to train the classifier."""
# Receive embeddings for the sentiment words
# Words for which no vector exists are skipped and not included in the returned set
positive_vectors = []
negative_vectors = []
for token in positive_words:
try:
positive_vectors.append(self.word_vector_getter[token])
except KeyError:
logging.debug(f"Couldn't find vector for token {token}. Skipping.")
for token in negative_words:
try:
negative_vectors.append(self.word_vector_getter[token])
except KeyError:
logging.debug(f"Couldn't find vector for token {token}. Skipping.")
return positive_vectors, negative_vectors
def _prepare_sentiment_data(self, positive_vectors: list, negative_vectors: list) -> tuple:
"""Prepare sentiment data for classification.
Return it as labeled data in the form of X and y.
Arguments:
positive_vectors -- A list of word vectors from positive words used to train the classifier.
negative_vectors -- A list of word vectors from negative words used to train the classifier.
"""
# Preapre data in X, y format; shuffle the data before returning
# Positive vectors will have a 0 label, negative a 1 label
X = [*positive_vectors, *negative_vectors]
y = [*np.zeros(len(positive_vectors)), *np.ones(len(negative_vectors))]
return shuffle(X, y, random_state=self.random_state)
def _train_lrc_sentiment_classifier(self):
"""Train a Logistic Regression classifier on the sentiment terms from [1].
Return the trained classifier object.
[1] https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html
"""
# Initialize and fir the LRC using sklearn
lrc = LogisticRegression(random_state=self.random_state)
lrc.fit(self.X, self.y)
return lrc
def _kullback_leibler_divergence(self, distribution_i: list, distribution_j: list) -> np.array:
"""Calculate the difference between two distributions using the Kullback-Leibler divergence.
Return the result of the calculation as a list of divergences. The implemented algorithm is
presented in [1] and can be formulated as:
$D_{KL}(p,q) = \sum_{i=1}^N p_i * log(\frac{p_i}{q_i})$.
[1] https://www.countbayesie.com/blog/2017/5/9/kullback-leibler-divergence-explained
Arguments:
distribution_i -- The source distribution.
distribution_j -- The destination distribution to which compare the `distribution_i` to.
"""
if len(distribution_i) != len(distribution_j):
raise ValueError("Both distributions need to be of the same length.")
divergences = [
(p * np.log(p / distribution_j[idx])) for idx, p in enumerate(distribution_i)]
return np.array(divergences)
def get_score(self, identity_terms: list) -> tuple:
"""Calculate the RNSB score for the given identity terms.
Returns the result of the RNSB calculation presented in [1], alognside the normalized
identity distributions and a list of OOV tokens for the given embedding. The former is
basically the sum of the Kullback-Leibler divergence of the normalized differences between
the predicted negative sentiments of the identity terms and a uniform distribution thereof.
It can be formulated as: $RNSB(P) = D_{KL}(P||U)$, where $P$ is a set of normalized
probabilities of negative sentiments for all identity terms and $U$ is a uniform
distribution.
To predict the negative sentiments, a logistic regression classifier is trained on word
vectors of sentiment words from [2] using the given vector space, as originally proposed
by [1].
[1] dx.doi.org/10.18653/v1/P19-1162
[2] https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html
Arguments:
identity_terms -- A list of terms that describe different groups of the property that
should be investigated, such as national origin identity. In [1] those
properties are also referred to as protected groups. If this is a list of
lists, the inner lists are expected to be vectors.
"""
# Try to embed all identity terms; ignore them if not in embdding vocabulary
# Log missing words to console
identity_vectors = []
oov_tokens = []
if type(identity_terms[0]) == str:
for term in identity_terms:
try:
identity_vectors.append(self.word_vector_getter[term])
except KeyError:
logging.debug(f"Term '{term}' is OOV. Ignoring.")
oov_tokens.append(term)
elif type(identity_terms[0]) == list or type(identity_terms[0]) == np.ndarray:
identity_vectors = identity_terms
# Predict the negative probabilities of the identity terms/vectors
# According to read_sentiment_data, negative probailities are at position 1
identity_probabilities = [probas[1] for probas in self.lrc.predict_proba(identity_vectors)]
# Calculate normalized probabilities to be able to handle them as distribution
identity_probas_normalized = [
(proba / sum(identity_probabilities)) for proba in identity_probabilities]
# Create a uniform distribution of the probabilities and calculate the final score
uniform_distribution = np.full(
len(identity_probas_normalized), 1 / len(identity_probas_normalized))
result = np.sum(
self._kullback_leibler_divergence(identity_probas_normalized, uniform_distribution))
return (
result,
identity_probas_normalized,
oov_tokens)
def rnsb_evaluation(
embedding_model,
lexicon: dict) -> dict:
"""Evaluate the RNSB metric with the given lexicon.
Return a dict containing the results of the different shuffled runs per test type.
Each index in the type list represents the results for one shuffled lexicon (or a shuffled
lexicon combination if there are two lexicons) in the form of a list. In that list, each index
`i` represents a lexicon of size `i * m` where `m` is the step size. Thus, it also represents
the `start_size` ofthe lexicon. Further, `i` also represents the random state used to shuffle
the original list.
Not that, since the RNSB test uses only a single vector for each social group, this evaluation
averages the words in the target group sets. This average is then used to represent the group.
Arguments:
embedding_model -- The embedding model to use for the evaluation.
lexicon -- The lexicon to be used for the evaluation. Is expected to have the following keys:
target_set_1, target_set_2, attribute_set_1, attribute_set_2.
"""
# Dict that holds the results
rnsb_results = {}
# For each bias type (e.g. gender, ethnicity, religion) ...
for test_type, test_lexicons in lexicon.items():
test_type_results = {
"shuffled_attribute_results": [],
"shuffled_target_results": [],
"attribute_set_lengths": [],
"target_set_lengths": []}
# --------------------------------------------------------------------------------
# Conduct evaluation with shuffled attribute lists
word_vector_cache = build_word_embedding_cache(lexicon, embedding_model)
# Averaging the target sets before the shuffle loop saves some time for the shuffled
# attribute set runs (as the targets will stay the same)
averaged_target_sets = [
np.mean([embedding_model[t] for t in test_lexicons["target_set_1"][0][-1]], axis=0),
np.mean([embedding_model[t] for t in test_lexicons["target_set_2"][0][-1]], axis=0)]
# Combine the two attribute sets to always use the same index element from both
shuffled_attribute_runs = list(zip(
test_lexicons["attribute_set_1"], test_lexicons["attribute_set_2"]))
attribute_progress_bar = tqdm(shuffled_attribute_runs, desc=f"RNSB-{test_type}-attributes")
for shuffled_run in attribute_progress_bar:
shuffled_attribute_run_results = []
partial_progress_bar = tqdm(list(zip(shuffled_run[0], shuffled_run[1])), leave=False)
for partial_attribute_1, partial_attribute_2 in partial_progress_bar:
result = rnsb = RNSB(
word_vector_cache,
positive_words=partial_attribute_1,
negative_words=partial_attribute_2)
shuffled_attribute_run_results.append(rnsb.get_score(averaged_target_sets)[0])
# Append information and results to main results object
test_type_results["shuffled_attribute_results"].append(shuffled_attribute_run_results)
# Set lengths should be the same across shuffled runs, so we can simply use the lengths of
# the first one and add them outside the loop
# But since we have multiple attribuite lists, we need to sum them up at each partial
attribute_set_lengths = zip(
map(lambda x: len(x), shuffled_attribute_runs[0][0]),
map(lambda x: len(x), shuffled_attribute_runs[0][1]))
test_type_results["attribute_set_lengths"] = [i + j for i, j in attribute_set_lengths]
# --------------------------------------------------------------------------------
# Conduct evaluation with shuffled target lists
# Initialzing an RNSB instance before the shuffle loop saves some time for the shuffled
# target set runs (the rnsb instance will stay the same as the attribute sets wont change)
rnsb_all_attribute_terms = RNSB(
embedding_model,
positive_words=test_lexicons["attribute_set_1"][0][-1],
negative_words=test_lexicons["attribute_set_2"][0][-1])
# Combine the two target sets to always use the same index element from both
shuffled_target_runs = list(zip(
test_lexicons["target_set_1"], test_lexicons["target_set_2"]))
target_progress_bar = tqdm(shuffled_target_runs, desc=f"RNSB-{test_type}-targets")
for shuffled_run in target_progress_bar:
shuffled_target_run_results = []
partial_progress_bar = tqdm(list(zip(shuffled_run[0], shuffled_run[1])), leave=False)
for partial_target_1, partial_target_2 in partial_progress_bar:
averaged_shuffled_target_sets = [
np.mean([embedding_model[t] for t in partial_target_1], axis=0),
np.mean([embedding_model[t] for t in partial_target_2], axis=0)]
result = rnsb_all_attribute_terms.get_score(averaged_shuffled_target_sets)
shuffled_target_run_results.append(result[0])
# Append information and results to main results object
test_type_results["shuffled_target_results"].append(shuffled_target_run_results)
# Set lengths should be the same across shuffled runs, so we can simply use the lengths of
# the first one and add them outside the loop
# But since we have multiple target lists, we need to sum them up at each partial
target_set_lengths = zip(
map(lambda x: len(x), shuffled_target_runs[0][0]),
map(lambda x: len(x), shuffled_target_runs[0][1]))
test_type_results["target_set_lengths"] = [i + j for i, j in target_set_lengths]
rnsb_results[test_type] = test_type_results
return rnsb_results | webias/rnsb.py |
import logging
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.utils import shuffle
from tqdm import tqdm
from webias.constants import LOGGING_CONFIG
from webias.utils import build_word_embedding_cache
logging.basicConfig(**LOGGING_CONFIG)
class RNSB:
"""An implementation of the "Relative Negative Sentiment Bias" metric.
This metric was originally proposed in [1]. For a detailed explanation of the score calculation,
see the documentation of the `get_score()` method.
[1] dx.doi.org/10.18653/v1/P19-1162
Arguments:
positive_words -- A list of positive words used to train the classifier.
negative_words -- A list of negative words used to train the classifier.
positive_vectors -- A list of word vectors from positive words used to train the classifier.
They are expected to come from the same embedding model given as
`word_vector_getter`. This can be used as an alternative to the
`positive_words` to speed up the calculation if the class is re-generated
multiple times. Also, this can only be used together with `negative_vectors`
and not with `negative_words`.
negative_vectors -- A list of word vectors from negative words used to train the classifier.
They are expected to come from the same embedding model given as
`word_vector_getter`. This can be used as an alternative to the
`negative_words` to speed up the calculation if the class is re-generated
multiple times. Also, this can only be used together with `positive_vectors`
and not with `positive_words`.
word_vector_getter -- An object that returns a vector given a word as parameter to the
`__getitem__()` function.
random_state -- The random state to be used for shuffling the data before returning it.
"""
def __init__(
self,
word_vector_getter,
positive_words: list = None,
negative_words: list = None,
positive_vectors: list = None,
negative_vectors: list = None,
random_state: int = 42):
self.word_vector_getter = word_vector_getter
self.random_state = random_state
# If words are provided, embedd them first
if positive_words and negative_words:
positive_vectors, negative_vectors = self._get_sentiment_vectors(
positive_words, negative_words)
# Load words lists from disk, embed them and transform them into a labled tuple
self.X, self.y = self._prepare_sentiment_data(positive_vectors, negative_vectors)
# Retrieve the trained logistic regression classifier
self.lrc = self._train_lrc_sentiment_classifier()
def _get_sentiment_vectors(self, positive_words: list, negative_words: list) -> tuple:
"""Retrieve the embedding vectors for the provided words from the provided model.
Return a tuple of positive word vectors and negative word vectors.
Arguments:
positive_words -- A list of positive words used to train the classifier.
negative_words -- A list of negative words used to train the classifier."""
# Receive embeddings for the sentiment words
# Words for which no vector exists are skipped and not included in the returned set
positive_vectors = []
negative_vectors = []
for token in positive_words:
try:
positive_vectors.append(self.word_vector_getter[token])
except KeyError:
logging.debug(f"Couldn't find vector for token {token}. Skipping.")
for token in negative_words:
try:
negative_vectors.append(self.word_vector_getter[token])
except KeyError:
logging.debug(f"Couldn't find vector for token {token}. Skipping.")
return positive_vectors, negative_vectors
def _prepare_sentiment_data(self, positive_vectors: list, negative_vectors: list) -> tuple:
"""Prepare sentiment data for classification.
Return it as labeled data in the form of X and y.
Arguments:
positive_vectors -- A list of word vectors from positive words used to train the classifier.
negative_vectors -- A list of word vectors from negative words used to train the classifier.
"""
# Preapre data in X, y format; shuffle the data before returning
# Positive vectors will have a 0 label, negative a 1 label
X = [*positive_vectors, *negative_vectors]
y = [*np.zeros(len(positive_vectors)), *np.ones(len(negative_vectors))]
return shuffle(X, y, random_state=self.random_state)
def _train_lrc_sentiment_classifier(self):
"""Train a Logistic Regression classifier on the sentiment terms from [1].
Return the trained classifier object.
[1] https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html
"""
# Initialize and fir the LRC using sklearn
lrc = LogisticRegression(random_state=self.random_state)
lrc.fit(self.X, self.y)
return lrc
def _kullback_leibler_divergence(self, distribution_i: list, distribution_j: list) -> np.array:
"""Calculate the difference between two distributions using the Kullback-Leibler divergence.
Return the result of the calculation as a list of divergences. The implemented algorithm is
presented in [1] and can be formulated as:
$D_{KL}(p,q) = \sum_{i=1}^N p_i * log(\frac{p_i}{q_i})$.
[1] https://www.countbayesie.com/blog/2017/5/9/kullback-leibler-divergence-explained
Arguments:
distribution_i -- The source distribution.
distribution_j -- The destination distribution to which compare the `distribution_i` to.
"""
if len(distribution_i) != len(distribution_j):
raise ValueError("Both distributions need to be of the same length.")
divergences = [
(p * np.log(p / distribution_j[idx])) for idx, p in enumerate(distribution_i)]
return np.array(divergences)
def get_score(self, identity_terms: list) -> tuple:
"""Calculate the RNSB score for the given identity terms.
Returns the result of the RNSB calculation presented in [1], alognside the normalized
identity distributions and a list of OOV tokens for the given embedding. The former is
basically the sum of the Kullback-Leibler divergence of the normalized differences between
the predicted negative sentiments of the identity terms and a uniform distribution thereof.
It can be formulated as: $RNSB(P) = D_{KL}(P||U)$, where $P$ is a set of normalized
probabilities of negative sentiments for all identity terms and $U$ is a uniform
distribution.
To predict the negative sentiments, a logistic regression classifier is trained on word
vectors of sentiment words from [2] using the given vector space, as originally proposed
by [1].
[1] dx.doi.org/10.18653/v1/P19-1162
[2] https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html
Arguments:
identity_terms -- A list of terms that describe different groups of the property that
should be investigated, such as national origin identity. In [1] those
properties are also referred to as protected groups. If this is a list of
lists, the inner lists are expected to be vectors.
"""
# Try to embed all identity terms; ignore them if not in embdding vocabulary
# Log missing words to console
identity_vectors = []
oov_tokens = []
if type(identity_terms[0]) == str:
for term in identity_terms:
try:
identity_vectors.append(self.word_vector_getter[term])
except KeyError:
logging.debug(f"Term '{term}' is OOV. Ignoring.")
oov_tokens.append(term)
elif type(identity_terms[0]) == list or type(identity_terms[0]) == np.ndarray:
identity_vectors = identity_terms
# Predict the negative probabilities of the identity terms/vectors
# According to read_sentiment_data, negative probailities are at position 1
identity_probabilities = [probas[1] for probas in self.lrc.predict_proba(identity_vectors)]
# Calculate normalized probabilities to be able to handle them as distribution
identity_probas_normalized = [
(proba / sum(identity_probabilities)) for proba in identity_probabilities]
# Create a uniform distribution of the probabilities and calculate the final score
uniform_distribution = np.full(
len(identity_probas_normalized), 1 / len(identity_probas_normalized))
result = np.sum(
self._kullback_leibler_divergence(identity_probas_normalized, uniform_distribution))
return (
result,
identity_probas_normalized,
oov_tokens)
def rnsb_evaluation(
embedding_model,
lexicon: dict) -> dict:
"""Evaluate the RNSB metric with the given lexicon.
Return a dict containing the results of the different shuffled runs per test type.
Each index in the type list represents the results for one shuffled lexicon (or a shuffled
lexicon combination if there are two lexicons) in the form of a list. In that list, each index
`i` represents a lexicon of size `i * m` where `m` is the step size. Thus, it also represents
the `start_size` ofthe lexicon. Further, `i` also represents the random state used to shuffle
the original list.
Not that, since the RNSB test uses only a single vector for each social group, this evaluation
averages the words in the target group sets. This average is then used to represent the group.
Arguments:
embedding_model -- The embedding model to use for the evaluation.
lexicon -- The lexicon to be used for the evaluation. Is expected to have the following keys:
target_set_1, target_set_2, attribute_set_1, attribute_set_2.
"""
# Dict that holds the results
rnsb_results = {}
# For each bias type (e.g. gender, ethnicity, religion) ...
for test_type, test_lexicons in lexicon.items():
test_type_results = {
"shuffled_attribute_results": [],
"shuffled_target_results": [],
"attribute_set_lengths": [],
"target_set_lengths": []}
# --------------------------------------------------------------------------------
# Conduct evaluation with shuffled attribute lists
word_vector_cache = build_word_embedding_cache(lexicon, embedding_model)
# Averaging the target sets before the shuffle loop saves some time for the shuffled
# attribute set runs (as the targets will stay the same)
averaged_target_sets = [
np.mean([embedding_model[t] for t in test_lexicons["target_set_1"][0][-1]], axis=0),
np.mean([embedding_model[t] for t in test_lexicons["target_set_2"][0][-1]], axis=0)]
# Combine the two attribute sets to always use the same index element from both
shuffled_attribute_runs = list(zip(
test_lexicons["attribute_set_1"], test_lexicons["attribute_set_2"]))
attribute_progress_bar = tqdm(shuffled_attribute_runs, desc=f"RNSB-{test_type}-attributes")
for shuffled_run in attribute_progress_bar:
shuffled_attribute_run_results = []
partial_progress_bar = tqdm(list(zip(shuffled_run[0], shuffled_run[1])), leave=False)
for partial_attribute_1, partial_attribute_2 in partial_progress_bar:
result = rnsb = RNSB(
word_vector_cache,
positive_words=partial_attribute_1,
negative_words=partial_attribute_2)
shuffled_attribute_run_results.append(rnsb.get_score(averaged_target_sets)[0])
# Append information and results to main results object
test_type_results["shuffled_attribute_results"].append(shuffled_attribute_run_results)
# Set lengths should be the same across shuffled runs, so we can simply use the lengths of
# the first one and add them outside the loop
# But since we have multiple attribuite lists, we need to sum them up at each partial
attribute_set_lengths = zip(
map(lambda x: len(x), shuffled_attribute_runs[0][0]),
map(lambda x: len(x), shuffled_attribute_runs[0][1]))
test_type_results["attribute_set_lengths"] = [i + j for i, j in attribute_set_lengths]
# --------------------------------------------------------------------------------
# Conduct evaluation with shuffled target lists
# Initialzing an RNSB instance before the shuffle loop saves some time for the shuffled
# target set runs (the rnsb instance will stay the same as the attribute sets wont change)
rnsb_all_attribute_terms = RNSB(
embedding_model,
positive_words=test_lexicons["attribute_set_1"][0][-1],
negative_words=test_lexicons["attribute_set_2"][0][-1])
# Combine the two target sets to always use the same index element from both
shuffled_target_runs = list(zip(
test_lexicons["target_set_1"], test_lexicons["target_set_2"]))
target_progress_bar = tqdm(shuffled_target_runs, desc=f"RNSB-{test_type}-targets")
for shuffled_run in target_progress_bar:
shuffled_target_run_results = []
partial_progress_bar = tqdm(list(zip(shuffled_run[0], shuffled_run[1])), leave=False)
for partial_target_1, partial_target_2 in partial_progress_bar:
averaged_shuffled_target_sets = [
np.mean([embedding_model[t] for t in partial_target_1], axis=0),
np.mean([embedding_model[t] for t in partial_target_2], axis=0)]
result = rnsb_all_attribute_terms.get_score(averaged_shuffled_target_sets)
shuffled_target_run_results.append(result[0])
# Append information and results to main results object
test_type_results["shuffled_target_results"].append(shuffled_target_run_results)
# Set lengths should be the same across shuffled runs, so we can simply use the lengths of
# the first one and add them outside the loop
# But since we have multiple target lists, we need to sum them up at each partial
target_set_lengths = zip(
map(lambda x: len(x), shuffled_target_runs[0][0]),
map(lambda x: len(x), shuffled_target_runs[0][1]))
test_type_results["target_set_lengths"] = [i + j for i, j in target_set_lengths]
rnsb_results[test_type] = test_type_results
return rnsb_results | 0.929648 | 0.72309 |
from nose.tools import eq_
import appvalidator.testcases.markup.markuptester as markuptester
from ..helper import TestCase
from js_helper import silent, TestCase as JSTestCase
class TestCSPTags(TestCase):
def analyze(self, snippet, app_type="web"):
self.setup_err()
self.err.save_resource("app_type", app_type)
markuptester.MarkupParser(self.err, debug=True).process("", snippet)
def test_script_not_js(self):
markup = """
<script type="text/x-jquery-tmpl">foo</script>
"""
self.analyze(markup)
self.assert_silent()
self.analyze(markup, "privileged")
self.assert_silent()
def test_script(self):
markup = """<script>foo</script>"""
self.analyze(markup)
self.assert_failed(with_warnings=True)
self.analyze(markup, "privileged")
self.assert_failed(with_errors=True)
def test_script_attrs(self):
markup = """<button onclick="foo();"></button>"""
self.analyze(markup)
self.assert_failed(with_warnings=True)
self.analyze(markup, "privileged")
self.assert_failed(with_errors=True)
def test_script_remote(self):
markup = """<script src="http://foo.bar/zip.js"></script>"""
self.analyze(markup)
self.assert_failed(with_warnings=True)
self.analyze(markup, "privileged")
self.assert_failed(with_errors=True)
class TestCSP(JSTestCase):
def test_function(self):
self.run_script("var x = Function('foo');")
self.assert_failed(with_warnings=True)
def test_function_new(self):
self.run_script("var x = new Function('foo');")
self.assert_failed(with_warnings=True)
def test_eval(self):
self.run_script("var x = eval('foo');")
self.assert_failed(with_warnings=True)
def test_setTimeout(self):
self.run_script("var x = setTimeout('foo', 0);")
self.assert_failed(with_warnings=True)
def test_setTimeout_pass(self):
self.run_script("var x = setTimeout(function() {}, 0);")
self.assert_silent()
def test_setInterval(self):
self.run_script("var x = setInterval('foo', 0);")
self.assert_failed(with_warnings=True)
@silent
def test_setInterval_pass(self):
self.run_script("var x = setInterval(function() {}, 0);")
@silent
def test_timeouts_less_noisy(self):
self.run_script("var f = function() {};x = setInterval(f, 0);")
self.run_script("var f = function() {};x = setTimeout(f, 0);")
@silent
def test_timeouts_less_noisy_with_bind(self):
self.run_script("var f = function() {};x = setInterval(f.bind(foo), 0);")
self.run_script("var f = function() {};x = setTimeout(f.bind(foo), 0);")
@silent
def test_scope_works(self):
# This code partially borrowed from Ace.
self.run_script("""
exports.delayedCall = function(fcn, defaultTimeout) {
var timer = null;
var callback = function() {
timer = null;
fcn();
};
var _self = function(timeout) {
timer && clearTimeout(timer);
timer = setTimeout(callback, timeout || defaultTimeout);
};
_self.delay = _self;
_self.schedule = function(timeout) {
if (timer == null)
timer = setTimeout(callback, timeout || 0);
};
_self.call = function() {
this.cancel();
fcn();
};
_self.cancel = function() {
timer && clearTimeout(timer);
timer = null;
};
_self.isPending = function() {
return timer;
};
return _self;
};
""")
@silent
def test_literal_objects(self):
"""Test for a weird bug in the way we detected properties."""
self.run_script('var x = {on: "true"}')
@silent
def test_function_prototype(self):
"""Test for a weird bug in the way we detected properties."""
self.run_script('Function.prototype.bind = foo;')
self.run_script('Function.prototype.call(this);')
class TestCreateElement(JSTestCase):
@silent
def test_pass(self):
"Tests that createElement and createElementNS throw errors."
self.run_script("""
var x = foo;
foo.bar.whateverElement("script");
""")
@silent
def test_createElement_pass(self):
self.run_script("var x = document.createElement('b');")
@silent
def test_createElement_var_pass(self):
self.run_script("var a = 'asdf', x = document.createElement(a);")
def test_createElement(self):
self.run_script("var x = document.createElement('script');")
self.assert_failed(with_warnings=True)
@silent
def test_createElementNS_pass(self):
self.run_script("var x = document.createElementNS('ns', 'b');")
def test_createElementNS(self):
self.run_script("var x = document.createElementNS('ns', 'script');")
self.assert_failed(with_warnings=True)
def test_create_split(self):
self.run_script("""
var x = foo;
foo.bar.createElement("scr"+"ipt");
""")
self.assert_failed(with_warnings=True)
def test_create_case(self):
# Part of bug 636835
self.run_script("""
var x = foo;
foo.bar.createElement("scRipt");
""")
self.assert_failed(with_warnings=True)
def test_create_ns(self):
self.run_script("""
var x = foo;
foo.bar.createElementNS("http://foo.bar/", "asdf:" +"scr"+"ipt");
""")
self.assert_failed(with_warnings=True)
def test_create_compiled(self):
self.run_script("""
let scr = "scr";
scr += "ipt";
foo.bar.createElement(scr);
""")
self.assert_failed(with_warnings=True)
@silent
def test_create_other(self):
self.run_script("""
document.createElement("style");
function x(doc) {
doc.createElement("style");
}""")
@silent
def test_create_split_other(self):
self.run_script("""
document.createElement("sty"+"le");
var x = "sty";
x += "le";
document.createElement(x);
""")
def test_create_noop(self):
# Also test an empty call (tests for tracebacks)
self.run_script("document.createElement();") | tests/js/test_csp.py | from nose.tools import eq_
import appvalidator.testcases.markup.markuptester as markuptester
from ..helper import TestCase
from js_helper import silent, TestCase as JSTestCase
class TestCSPTags(TestCase):
def analyze(self, snippet, app_type="web"):
self.setup_err()
self.err.save_resource("app_type", app_type)
markuptester.MarkupParser(self.err, debug=True).process("", snippet)
def test_script_not_js(self):
markup = """
<script type="text/x-jquery-tmpl">foo</script>
"""
self.analyze(markup)
self.assert_silent()
self.analyze(markup, "privileged")
self.assert_silent()
def test_script(self):
markup = """<script>foo</script>"""
self.analyze(markup)
self.assert_failed(with_warnings=True)
self.analyze(markup, "privileged")
self.assert_failed(with_errors=True)
def test_script_attrs(self):
markup = """<button onclick="foo();"></button>"""
self.analyze(markup)
self.assert_failed(with_warnings=True)
self.analyze(markup, "privileged")
self.assert_failed(with_errors=True)
def test_script_remote(self):
markup = """<script src="http://foo.bar/zip.js"></script>"""
self.analyze(markup)
self.assert_failed(with_warnings=True)
self.analyze(markup, "privileged")
self.assert_failed(with_errors=True)
class TestCSP(JSTestCase):
def test_function(self):
self.run_script("var x = Function('foo');")
self.assert_failed(with_warnings=True)
def test_function_new(self):
self.run_script("var x = new Function('foo');")
self.assert_failed(with_warnings=True)
def test_eval(self):
self.run_script("var x = eval('foo');")
self.assert_failed(with_warnings=True)
def test_setTimeout(self):
self.run_script("var x = setTimeout('foo', 0);")
self.assert_failed(with_warnings=True)
def test_setTimeout_pass(self):
self.run_script("var x = setTimeout(function() {}, 0);")
self.assert_silent()
def test_setInterval(self):
self.run_script("var x = setInterval('foo', 0);")
self.assert_failed(with_warnings=True)
@silent
def test_setInterval_pass(self):
self.run_script("var x = setInterval(function() {}, 0);")
@silent
def test_timeouts_less_noisy(self):
self.run_script("var f = function() {};x = setInterval(f, 0);")
self.run_script("var f = function() {};x = setTimeout(f, 0);")
@silent
def test_timeouts_less_noisy_with_bind(self):
self.run_script("var f = function() {};x = setInterval(f.bind(foo), 0);")
self.run_script("var f = function() {};x = setTimeout(f.bind(foo), 0);")
@silent
def test_scope_works(self):
# This code partially borrowed from Ace.
self.run_script("""
exports.delayedCall = function(fcn, defaultTimeout) {
var timer = null;
var callback = function() {
timer = null;
fcn();
};
var _self = function(timeout) {
timer && clearTimeout(timer);
timer = setTimeout(callback, timeout || defaultTimeout);
};
_self.delay = _self;
_self.schedule = function(timeout) {
if (timer == null)
timer = setTimeout(callback, timeout || 0);
};
_self.call = function() {
this.cancel();
fcn();
};
_self.cancel = function() {
timer && clearTimeout(timer);
timer = null;
};
_self.isPending = function() {
return timer;
};
return _self;
};
""")
@silent
def test_literal_objects(self):
"""Test for a weird bug in the way we detected properties."""
self.run_script('var x = {on: "true"}')
@silent
def test_function_prototype(self):
"""Test for a weird bug in the way we detected properties."""
self.run_script('Function.prototype.bind = foo;')
self.run_script('Function.prototype.call(this);')
class TestCreateElement(JSTestCase):
@silent
def test_pass(self):
"Tests that createElement and createElementNS throw errors."
self.run_script("""
var x = foo;
foo.bar.whateverElement("script");
""")
@silent
def test_createElement_pass(self):
self.run_script("var x = document.createElement('b');")
@silent
def test_createElement_var_pass(self):
self.run_script("var a = 'asdf', x = document.createElement(a);")
def test_createElement(self):
self.run_script("var x = document.createElement('script');")
self.assert_failed(with_warnings=True)
@silent
def test_createElementNS_pass(self):
self.run_script("var x = document.createElementNS('ns', 'b');")
def test_createElementNS(self):
self.run_script("var x = document.createElementNS('ns', 'script');")
self.assert_failed(with_warnings=True)
def test_create_split(self):
self.run_script("""
var x = foo;
foo.bar.createElement("scr"+"ipt");
""")
self.assert_failed(with_warnings=True)
def test_create_case(self):
# Part of bug 636835
self.run_script("""
var x = foo;
foo.bar.createElement("scRipt");
""")
self.assert_failed(with_warnings=True)
def test_create_ns(self):
self.run_script("""
var x = foo;
foo.bar.createElementNS("http://foo.bar/", "asdf:" +"scr"+"ipt");
""")
self.assert_failed(with_warnings=True)
def test_create_compiled(self):
self.run_script("""
let scr = "scr";
scr += "ipt";
foo.bar.createElement(scr);
""")
self.assert_failed(with_warnings=True)
@silent
def test_create_other(self):
self.run_script("""
document.createElement("style");
function x(doc) {
doc.createElement("style");
}""")
@silent
def test_create_split_other(self):
self.run_script("""
document.createElement("sty"+"le");
var x = "sty";
x += "le";
document.createElement(x);
""")
def test_create_noop(self):
# Also test an empty call (tests for tracebacks)
self.run_script("document.createElement();") | 0.532425 | 0.382718 |
from __future__ import (absolute_import, print_function, division,
unicode_literals)
import logging
import ema_workbench
try:
import unittest.mock as mock
except ImportError:
import mock
import unittest
from ema_workbench.util import ema_logging
def tearDownModule():
ema_logging._logger = None
ema_logger = logging.getLogger(ema_logging.LOGGER_NAME)
ema_logger.handlers = []
class TestEmaLogging(unittest.TestCase):
def test_log_messages(self):
ema_logging.log_to_stderr(ema_logging.DEBUG)
with mock.patch('ema_workbench.util.ema_logging._logger') as mocked_logger:
ema_logging._module_loggers[__name__] = mocked_logger
message = 'test message'
ema_logging.debug(message)
mocked_logger.debug.assert_called_with(message)
ema_logging.info(message)
mocked_logger.info.assert_called_with(message)
ema_logging.warning(message)
mocked_logger.warning.assert_called_with(message)
ema_logging.error(message)
mocked_logger.error.assert_called_with(message)
ema_logging.exception(message)
mocked_logger.exception.assert_called_with(message)
ema_logging.critical(message)
mocked_logger.critical.assert_called_with(message)
def test_get_logger(self):
ema_logging._logger = None
logger = ema_logging.get_logger()
self.assertEqual(logger, logging.getLogger(ema_logging.LOGGER_NAME))
self.assertEqual(len(logger.handlers), 1)
self.assertEqual(type(logger.handlers[0]), ema_logging.NullHandler)
logger = ema_logging.get_logger()
self.assertEqual(logger, logging.getLogger(ema_logging.LOGGER_NAME))
self.assertEqual(len(logger.handlers), 1)
self.assertEqual(type(logger.handlers[0]), ema_logging.NullHandler)
def test_log_to_stderr(self):
ema_logging._logger = None
logger = ema_logging.log_to_stderr(ema_logging.DEBUG)
self.assertEqual(len(logger.handlers), 2)
self.assertEqual(logger.level, ema_logging.DEBUG)
ema_logging._logger = None
logger = ema_logging.log_to_stderr()
self.assertEqual(len(logger.handlers), 2)
self.assertEqual(logger.level, ema_logging.DEFAULT_LEVEL)
logger = ema_logging.log_to_stderr()
self.assertEqual(len(logger.handlers), 2)
self.assertEqual(logger.level, ema_logging.DEFAULT_LEVEL)
if __name__ == "__main__":
unittest.main() | test/test_util/test_ema_logging.py | from __future__ import (absolute_import, print_function, division,
unicode_literals)
import logging
import ema_workbench
try:
import unittest.mock as mock
except ImportError:
import mock
import unittest
from ema_workbench.util import ema_logging
def tearDownModule():
ema_logging._logger = None
ema_logger = logging.getLogger(ema_logging.LOGGER_NAME)
ema_logger.handlers = []
class TestEmaLogging(unittest.TestCase):
def test_log_messages(self):
ema_logging.log_to_stderr(ema_logging.DEBUG)
with mock.patch('ema_workbench.util.ema_logging._logger') as mocked_logger:
ema_logging._module_loggers[__name__] = mocked_logger
message = 'test message'
ema_logging.debug(message)
mocked_logger.debug.assert_called_with(message)
ema_logging.info(message)
mocked_logger.info.assert_called_with(message)
ema_logging.warning(message)
mocked_logger.warning.assert_called_with(message)
ema_logging.error(message)
mocked_logger.error.assert_called_with(message)
ema_logging.exception(message)
mocked_logger.exception.assert_called_with(message)
ema_logging.critical(message)
mocked_logger.critical.assert_called_with(message)
def test_get_logger(self):
ema_logging._logger = None
logger = ema_logging.get_logger()
self.assertEqual(logger, logging.getLogger(ema_logging.LOGGER_NAME))
self.assertEqual(len(logger.handlers), 1)
self.assertEqual(type(logger.handlers[0]), ema_logging.NullHandler)
logger = ema_logging.get_logger()
self.assertEqual(logger, logging.getLogger(ema_logging.LOGGER_NAME))
self.assertEqual(len(logger.handlers), 1)
self.assertEqual(type(logger.handlers[0]), ema_logging.NullHandler)
def test_log_to_stderr(self):
ema_logging._logger = None
logger = ema_logging.log_to_stderr(ema_logging.DEBUG)
self.assertEqual(len(logger.handlers), 2)
self.assertEqual(logger.level, ema_logging.DEBUG)
ema_logging._logger = None
logger = ema_logging.log_to_stderr()
self.assertEqual(len(logger.handlers), 2)
self.assertEqual(logger.level, ema_logging.DEFAULT_LEVEL)
logger = ema_logging.log_to_stderr()
self.assertEqual(len(logger.handlers), 2)
self.assertEqual(logger.level, ema_logging.DEFAULT_LEVEL)
if __name__ == "__main__":
unittest.main() | 0.524638 | 0.240758 |
from gocept.amqprun.readfiles import FileStoreReader, FileStoreDataManager
from unittest import mock
import gocept.amqprun.interfaces
import gocept.amqprun.testing
import os
import shutil
import tempfile
import unittest
import zope.component
import time
class ReaderTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.sender = mock.Mock()
zope.component.provideUtility(
self.sender, gocept.amqprun.interfaces.ISender)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
super().tearDown()
def test_empty_new_directory_nothing_happens(self):
reader = FileStoreReader(self.tmpdir, 'route')
reader.scan()
self.assertFalse(self.sender.send.called)
def test_should_move_file_to_cur_on_commit(self):
reader = FileStoreReader(self.tmpdir, 'route')
f = open(os.path.join(self.tmpdir, 'new', 'foo'), 'w')
f.write('contents')
f.close()
reader.send = mock.Mock()
reader.session = mock.Mock()
reader.scan()
self.assertTrue(reader.session.mark_done.called)
def test_exception_in_send_should_not_move_file(self):
reader = FileStoreReader(self.tmpdir, 'route')
f = open(os.path.join(self.tmpdir, 'new', 'foo'), 'w')
f.write('contents')
f.close()
reader.send = mock.Mock()
reader.send.side_effect = RuntimeError('provoked')
reader.session = mock.Mock()
reader.scan()
self.assertFalse(reader.session.mark_done.called)
class FileStoreDataManagerTest(unittest.TestCase):
def setUp(self):
self.session = mock.Mock()
self.dm = FileStoreDataManager(self.session)
def test_committing_transaction_should_commit_and_reset_session(self):
UNUSED_TRANSACTION = None
self.dm.tpc_begin(UNUSED_TRANSACTION)
self.dm.commit(UNUSED_TRANSACTION)
self.dm.tpc_vote(UNUSED_TRANSACTION)
self.dm.tpc_finish(UNUSED_TRANSACTION)
self.assertTrue(self.session.commit.called)
self.assertTrue(self.session.reset.called)
def test_aborting_transaction_should_commit_and_reset_session(self):
UNUSED_TRANSACTION = None
self.dm.abort(UNUSED_TRANSACTION)
self.assertFalse(self.session.commit.called)
self.assertTrue(self.session.reset.called)
class ReaderIntegrationTest(gocept.amqprun.testing.MainTestCase):
def setUp(self):
super().setUp()
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
super().tearDown()
shutil.rmtree(self.tmpdir)
def wait_for_directory_present(self, path, timeout=10):
wait = 0
while wait < timeout:
if os.path.exists(path):
return
time.sleep(0.25)
wait += 0.25
raise RuntimeError
def test_should_send_message_and_move_file(self):
self.make_config(__name__, 'readfiles')
self.expect_message_on('test.data')
self.start_server_in_subprocess(
self.tmpdir, 'test.data', module='gocept.amqprun.readfiles')
new_path = os.path.join(self.tmpdir, 'new')
self.wait_for_directory_present(new_path)
with open(os.path.join(new_path, 'foo.xml'), 'w') as f:
f.write('contents')
message = self.wait_for_message()
self.assertEqual('contents', message.body)
self.assertEqual(
'foo.xml', message.properties['application_headers']['X-Filename'])
self.assertEqual(0, len(os.listdir(os.path.join(self.tmpdir, 'new'))))
self.assertEqual(1, len(os.listdir(os.path.join(self.tmpdir, 'cur'))))
self.stop_server_in_subprocess()
def test_process_should_exit_on_filesystem_error(self):
self.make_config(__name__, 'readfiles-error')
self.start_server_in_subprocess(
self.tmpdir, 'test.route', module='gocept.amqprun.readfiles')
directory_path = os.path.join(self.tmpdir, 'new')
self.wait_for_directory_present(directory_path)
os.rmdir(directory_path)
status = self.wait_for_subprocess_exit()
self.assertNotEqual(0, status)
self.stdout.seek(0)
self.assertIn('Unhandled exception, terminating.', self.stdout.read()) | src/gocept/amqprun/tests/test_readfiles.py | from gocept.amqprun.readfiles import FileStoreReader, FileStoreDataManager
from unittest import mock
import gocept.amqprun.interfaces
import gocept.amqprun.testing
import os
import shutil
import tempfile
import unittest
import zope.component
import time
class ReaderTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.sender = mock.Mock()
zope.component.provideUtility(
self.sender, gocept.amqprun.interfaces.ISender)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
super().tearDown()
def test_empty_new_directory_nothing_happens(self):
reader = FileStoreReader(self.tmpdir, 'route')
reader.scan()
self.assertFalse(self.sender.send.called)
def test_should_move_file_to_cur_on_commit(self):
reader = FileStoreReader(self.tmpdir, 'route')
f = open(os.path.join(self.tmpdir, 'new', 'foo'), 'w')
f.write('contents')
f.close()
reader.send = mock.Mock()
reader.session = mock.Mock()
reader.scan()
self.assertTrue(reader.session.mark_done.called)
def test_exception_in_send_should_not_move_file(self):
reader = FileStoreReader(self.tmpdir, 'route')
f = open(os.path.join(self.tmpdir, 'new', 'foo'), 'w')
f.write('contents')
f.close()
reader.send = mock.Mock()
reader.send.side_effect = RuntimeError('provoked')
reader.session = mock.Mock()
reader.scan()
self.assertFalse(reader.session.mark_done.called)
class FileStoreDataManagerTest(unittest.TestCase):
def setUp(self):
self.session = mock.Mock()
self.dm = FileStoreDataManager(self.session)
def test_committing_transaction_should_commit_and_reset_session(self):
UNUSED_TRANSACTION = None
self.dm.tpc_begin(UNUSED_TRANSACTION)
self.dm.commit(UNUSED_TRANSACTION)
self.dm.tpc_vote(UNUSED_TRANSACTION)
self.dm.tpc_finish(UNUSED_TRANSACTION)
self.assertTrue(self.session.commit.called)
self.assertTrue(self.session.reset.called)
def test_aborting_transaction_should_commit_and_reset_session(self):
UNUSED_TRANSACTION = None
self.dm.abort(UNUSED_TRANSACTION)
self.assertFalse(self.session.commit.called)
self.assertTrue(self.session.reset.called)
class ReaderIntegrationTest(gocept.amqprun.testing.MainTestCase):
def setUp(self):
super().setUp()
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
super().tearDown()
shutil.rmtree(self.tmpdir)
def wait_for_directory_present(self, path, timeout=10):
wait = 0
while wait < timeout:
if os.path.exists(path):
return
time.sleep(0.25)
wait += 0.25
raise RuntimeError
def test_should_send_message_and_move_file(self):
self.make_config(__name__, 'readfiles')
self.expect_message_on('test.data')
self.start_server_in_subprocess(
self.tmpdir, 'test.data', module='gocept.amqprun.readfiles')
new_path = os.path.join(self.tmpdir, 'new')
self.wait_for_directory_present(new_path)
with open(os.path.join(new_path, 'foo.xml'), 'w') as f:
f.write('contents')
message = self.wait_for_message()
self.assertEqual('contents', message.body)
self.assertEqual(
'foo.xml', message.properties['application_headers']['X-Filename'])
self.assertEqual(0, len(os.listdir(os.path.join(self.tmpdir, 'new'))))
self.assertEqual(1, len(os.listdir(os.path.join(self.tmpdir, 'cur'))))
self.stop_server_in_subprocess()
def test_process_should_exit_on_filesystem_error(self):
self.make_config(__name__, 'readfiles-error')
self.start_server_in_subprocess(
self.tmpdir, 'test.route', module='gocept.amqprun.readfiles')
directory_path = os.path.join(self.tmpdir, 'new')
self.wait_for_directory_present(directory_path)
os.rmdir(directory_path)
status = self.wait_for_subprocess_exit()
self.assertNotEqual(0, status)
self.stdout.seek(0)
self.assertIn('Unhandled exception, terminating.', self.stdout.read()) | 0.454714 | 0.305341 |
from DBC import *
import unit_testing as ut
# Class tests
class TestArgumentError(ut.ClassTest):
def __init__(self):
ut.ClassTest.__init__(self)
def test___init__(self):
ut.test(isinstance(ArgumentError('function', 'parameter', tuple),
ArgumentError))
argumenterror1 = ArgumentError('test', 'expression', bool)
ut.test(argumenterror1.function_name == 'test')
ut.test(argumenterror1.parameter_name == 'expression')
ut.test(argumenterror1.acceptable_types == bool)
ut.test(argumenterror1.error_message == ('test requires bool for '
'expression.'))
argumenterror = TestArgumentError()
class TestOutputError(ut.ClassTest):
def __init__(self):
ut.ClassTest.__init__(self)
def test___init__(self):
ut.test(isinstance(OutputError('hi'), OutputError))
outputerror1 = OutputError('Function should return int.')
ut.test(outputerror1.error_message == 'Function should return int.')
outputerror = TestOutputError()
# Function tests
def test_check_arg_type():
ut.test(check_arg_type(1, 'num', (int, float)) == None)
ut.test(check_arg_type('', 'string', str) == None)
ut.test(check_arg_type([], 'data', (str, int, list, tuple)) == None)
errors = 0
try:
ut.test(check_arg_type(1, 'string', str) == None)
except ArgumentError:
errors += 1
try:
ut.test(check_arg_type('', 'num', (int, float)) == None)
except ArgumentError:
errors += 1
try:
ut.test(check_arg_type(1.5, 'parameter', (int, str, tuple, list, bool))
== None)
except ArgumentError:
errors += 1
ut.test(errors == 3)
def test_check_output_type():
ut.test(check_output_type(5, int) == None)
ut.test(check_output_type('', str) == None)
ut.test(check_output_type('', (list, str, int)) == None)
errors = 0
try:
ut.test(check_output_type('', int) == None)
except OutputError:
errors += 1
try:
ut.test(check_output_type('', list) == None)
except OutputError:
errors += 1
try:
ut.test(check_output_type(8.7, (list, str, tuple)) == None)
except OutputError:
errors += 1
ut.test(errors == 3)
def main():
# Class tests
argumenterror.test_all()
outputerror.test_all()
# Function tests
test_check_arg_type()
test_check_output_type()
print('All tests passed.')
if __name__ == '__main__':
main() | DBC_test.py | from DBC import *
import unit_testing as ut
# Class tests
class TestArgumentError(ut.ClassTest):
def __init__(self):
ut.ClassTest.__init__(self)
def test___init__(self):
ut.test(isinstance(ArgumentError('function', 'parameter', tuple),
ArgumentError))
argumenterror1 = ArgumentError('test', 'expression', bool)
ut.test(argumenterror1.function_name == 'test')
ut.test(argumenterror1.parameter_name == 'expression')
ut.test(argumenterror1.acceptable_types == bool)
ut.test(argumenterror1.error_message == ('test requires bool for '
'expression.'))
argumenterror = TestArgumentError()
class TestOutputError(ut.ClassTest):
def __init__(self):
ut.ClassTest.__init__(self)
def test___init__(self):
ut.test(isinstance(OutputError('hi'), OutputError))
outputerror1 = OutputError('Function should return int.')
ut.test(outputerror1.error_message == 'Function should return int.')
outputerror = TestOutputError()
# Function tests
def test_check_arg_type():
ut.test(check_arg_type(1, 'num', (int, float)) == None)
ut.test(check_arg_type('', 'string', str) == None)
ut.test(check_arg_type([], 'data', (str, int, list, tuple)) == None)
errors = 0
try:
ut.test(check_arg_type(1, 'string', str) == None)
except ArgumentError:
errors += 1
try:
ut.test(check_arg_type('', 'num', (int, float)) == None)
except ArgumentError:
errors += 1
try:
ut.test(check_arg_type(1.5, 'parameter', (int, str, tuple, list, bool))
== None)
except ArgumentError:
errors += 1
ut.test(errors == 3)
def test_check_output_type():
ut.test(check_output_type(5, int) == None)
ut.test(check_output_type('', str) == None)
ut.test(check_output_type('', (list, str, int)) == None)
errors = 0
try:
ut.test(check_output_type('', int) == None)
except OutputError:
errors += 1
try:
ut.test(check_output_type('', list) == None)
except OutputError:
errors += 1
try:
ut.test(check_output_type(8.7, (list, str, tuple)) == None)
except OutputError:
errors += 1
ut.test(errors == 3)
def main():
# Class tests
argumenterror.test_all()
outputerror.test_all()
# Function tests
test_check_arg_type()
test_check_output_type()
print('All tests passed.')
if __name__ == '__main__':
main() | 0.432663 | 0.419351 |
from __future__ import unicode_literals
import tarfile
from .buffer import DockerTempFile
from .dockerfile import DockerFile
class DockerContext(DockerTempFile):
"""
Class for constructing a Docker context tarball, that can be sent to the remote API. If a :class:`~DockerFile`
instance is added, the resulting Dockerfile and files added there are considered automatically.
:param dockerfile: Optional :class:`~DockerFile` instance, or file path to a Dockerfile.
:type dockerfile: DockerFile or unicode
:param compression: Compression for the tarball; default is gzip (`gz`); use `bz2` for bzip2.
:type compression: unicode
:param encoding: Encoding for the tarfile; default is `utf-8`.
:type encoding: unicode
:param finalize: Finalize the tarball immediately.
:type finalize: bool
:param kwargs: Additional kwargs for :func:`tarfile.open`.
"""
def __init__(self, dockerfile=None, compression='gz', encoding='utf-8', finalize=False, **kwargs):
super(DockerContext, self).__init__()
open_mode = ':'.join(('w', compression or ''))
if compression == 'gz':
self._stream_encoding = 'gzip'
elif compression == 'bz2':
self._stream_encoding = 'bzip2'
else:
self._stream_encoding = None
self.tarfile = tarfile.open(mode=open_mode, fileobj=self._fileobj, encoding=encoding, **kwargs)
if dockerfile is not None:
self.add_dockerfile(dockerfile)
if finalize:
if dockerfile is None:
raise ValueError("Cannot finalize the docker context tarball without a dockerfile object.")
self.finalize()
def add(self, name, *args, **kwargs):
"""
Add a file or directory to the context tarball.
:param name: File or directory path.
:type name: unicode
:param args: Additional args for :meth:`tarfile.TarFile.add`.
:param kwargs: Additional kwargs for :meth:`tarfile.TarFile.add`.
"""
self.tarfile.add(name, *args, **kwargs)
def addfile(self, *args, **kwargs):
"""
Add a file to the tarball using a :class:`~tarfile.TarInfo` object. For details, see
:meth:`tarfile.TarFile.addfile`.
:param args: Args to :meth:`tarfile.TarFile.addfile`.
:param kwargs: Kwargs to :meth:`tarfile.TarFile.addfile`
"""
self.tarfile.addfile(*args, **kwargs)
def addarchive(self, name):
"""
Add (i.e. copy) the contents of another tarball to this one.
:param name: File path to the tar archive.
:type name: unicode
"""
with tarfile.open(name, 'r') as st:
for member in st.getmembers():
self.tarfile.addfile(member, st.extractfile(member.name))
def add_dockerfile(self, dockerfile):
"""
Add a Dockerfile to the context. If it is a :class:`DockerFile` instance, files and archive contents added there
will automatically be copied to the tarball. The :class:`DockerFile` will be finalized.
:param dockerfile: :class:`DockerFile` instance or file path to a Dockerfile.
:type dockerfile: DockerFile or unicode
"""
if isinstance(dockerfile, DockerFile):
dockerfile.finalize()
dockerfile_obj = dockerfile.fileobj
for path, arcname in dockerfile._files:
self.add(path, arcname=arcname)
for archive in dockerfile._archives:
self.addarchive(archive)
tarinfo = tarfile.TarInfo('Dockerfile')
tarinfo.size = dockerfile_obj.tell()
dockerfile_obj.seek(0)
self.tarfile.addfile(tarinfo, dockerfile_obj)
else:
self.add(dockerfile, arcname='Dockerfile')
def gettarinfo(self, *args, **kwargs):
"""
Returns a :class:`~tarfile.TarInfo` object. See :meth:`tarfile.TarFile.gettarinfo`.
:param args: Args to :meth:`tarfile.TarFile.gettarinfo`.
:param kwargs: Kwargs to :meth:`tarfile.TarFile.gettarinfo`.
:return: :class:`~tarfile.TarInfo` object.
:rtype: tarfile.TarInfo
"""
return self.tarfile.gettarinfo(*args, **kwargs)
def finalize(self):
"""
Finalizes the context tarball and sets the file position to 0. The tar file is then closed, but the underlying
file object can still be read.
"""
self.tarfile.close()
self._fileobj.seek(0)
@property
def name(self):
"""
Returns the name of the underlying file object.
:return: Name of the file object.
:rtype: unicode
"""
return self._fileobj.name
@property
def stream_encoding(self):
"""
Returns the stream encoding, as used when calling :meth:`docker.client.Client.build`.
:return: Stream encoding.
:rtype: unicode
"""
return self._stream_encoding
def save(self, name):
"""
Saves the entire Docker context tarball to a separate file.
:param name: File path to save the tarball into.
:type name: unicode
"""
with open(name, 'wb+') as f:
while True:
buf = self._fileobj.read()
if not buf:
break
f.write(buf) | dockermap/build/context.py | from __future__ import unicode_literals
import tarfile
from .buffer import DockerTempFile
from .dockerfile import DockerFile
class DockerContext(DockerTempFile):
"""
Class for constructing a Docker context tarball, that can be sent to the remote API. If a :class:`~DockerFile`
instance is added, the resulting Dockerfile and files added there are considered automatically.
:param dockerfile: Optional :class:`~DockerFile` instance, or file path to a Dockerfile.
:type dockerfile: DockerFile or unicode
:param compression: Compression for the tarball; default is gzip (`gz`); use `bz2` for bzip2.
:type compression: unicode
:param encoding: Encoding for the tarfile; default is `utf-8`.
:type encoding: unicode
:param finalize: Finalize the tarball immediately.
:type finalize: bool
:param kwargs: Additional kwargs for :func:`tarfile.open`.
"""
def __init__(self, dockerfile=None, compression='gz', encoding='utf-8', finalize=False, **kwargs):
super(DockerContext, self).__init__()
open_mode = ':'.join(('w', compression or ''))
if compression == 'gz':
self._stream_encoding = 'gzip'
elif compression == 'bz2':
self._stream_encoding = 'bzip2'
else:
self._stream_encoding = None
self.tarfile = tarfile.open(mode=open_mode, fileobj=self._fileobj, encoding=encoding, **kwargs)
if dockerfile is not None:
self.add_dockerfile(dockerfile)
if finalize:
if dockerfile is None:
raise ValueError("Cannot finalize the docker context tarball without a dockerfile object.")
self.finalize()
def add(self, name, *args, **kwargs):
"""
Add a file or directory to the context tarball.
:param name: File or directory path.
:type name: unicode
:param args: Additional args for :meth:`tarfile.TarFile.add`.
:param kwargs: Additional kwargs for :meth:`tarfile.TarFile.add`.
"""
self.tarfile.add(name, *args, **kwargs)
def addfile(self, *args, **kwargs):
"""
Add a file to the tarball using a :class:`~tarfile.TarInfo` object. For details, see
:meth:`tarfile.TarFile.addfile`.
:param args: Args to :meth:`tarfile.TarFile.addfile`.
:param kwargs: Kwargs to :meth:`tarfile.TarFile.addfile`
"""
self.tarfile.addfile(*args, **kwargs)
def addarchive(self, name):
"""
Add (i.e. copy) the contents of another tarball to this one.
:param name: File path to the tar archive.
:type name: unicode
"""
with tarfile.open(name, 'r') as st:
for member in st.getmembers():
self.tarfile.addfile(member, st.extractfile(member.name))
def add_dockerfile(self, dockerfile):
"""
Add a Dockerfile to the context. If it is a :class:`DockerFile` instance, files and archive contents added there
will automatically be copied to the tarball. The :class:`DockerFile` will be finalized.
:param dockerfile: :class:`DockerFile` instance or file path to a Dockerfile.
:type dockerfile: DockerFile or unicode
"""
if isinstance(dockerfile, DockerFile):
dockerfile.finalize()
dockerfile_obj = dockerfile.fileobj
for path, arcname in dockerfile._files:
self.add(path, arcname=arcname)
for archive in dockerfile._archives:
self.addarchive(archive)
tarinfo = tarfile.TarInfo('Dockerfile')
tarinfo.size = dockerfile_obj.tell()
dockerfile_obj.seek(0)
self.tarfile.addfile(tarinfo, dockerfile_obj)
else:
self.add(dockerfile, arcname='Dockerfile')
def gettarinfo(self, *args, **kwargs):
"""
Returns a :class:`~tarfile.TarInfo` object. See :meth:`tarfile.TarFile.gettarinfo`.
:param args: Args to :meth:`tarfile.TarFile.gettarinfo`.
:param kwargs: Kwargs to :meth:`tarfile.TarFile.gettarinfo`.
:return: :class:`~tarfile.TarInfo` object.
:rtype: tarfile.TarInfo
"""
return self.tarfile.gettarinfo(*args, **kwargs)
def finalize(self):
"""
Finalizes the context tarball and sets the file position to 0. The tar file is then closed, but the underlying
file object can still be read.
"""
self.tarfile.close()
self._fileobj.seek(0)
@property
def name(self):
"""
Returns the name of the underlying file object.
:return: Name of the file object.
:rtype: unicode
"""
return self._fileobj.name
@property
def stream_encoding(self):
"""
Returns the stream encoding, as used when calling :meth:`docker.client.Client.build`.
:return: Stream encoding.
:rtype: unicode
"""
return self._stream_encoding
def save(self, name):
"""
Saves the entire Docker context tarball to a separate file.
:param name: File path to save the tarball into.
:type name: unicode
"""
with open(name, 'wb+') as f:
while True:
buf = self._fileobj.read()
if not buf:
break
f.write(buf) | 0.786336 | 0.151341 |
"""Module containing the get_ece_bias function."""
import numpy as np
from caltrain.run_calibration import estimate_ece
from caltrain.utils import get_hash_key
def get_ece_bias(config,
n_samples,
ce_types,
params,
cached_result=None,
data_dir=None):
"""Get ece bias for given sample sizes, ce_types, and parametric datasets.
Args:
config (dict): Configuration dictionary:
* num_reps (str): The number of repititions
* num_bins (int): The number of bins for binning scheme
* split (string): train/test split (default="")
* a (float): Coefficient for logistic model: E[Y | f(x)] =
1/(1+exp(-a*(fx-b)))
* b (float): Coefficient for logistic model: E[Y | f(x)] =
1/(1+exp(-a*(fx-b)))
* d (float): Exponent for polynomial model: E[Y | f(x)] = f(x)^d
* alpha (float): Parameter for Beta distribution: f(x)~Beta(alpha, beta)
* beta (float): Parameter for Beta distribution: f(x)~Beta(alpha, beta)
n_samples (int): Number of samples from the model
ce_types (list[str]): = list of calibration error types: 'em_ece_bin',
'ew_ece_bin', 'em_ece_sweep', 'ew_ece_sweep'
params (dict): Dictionary of dataset configurations; each value is of
len(num_datasets)
cached_result (bool): Use cached result (default=True)
data_dir (str): location of data dir
Returns:
array (n_samples, ce_types, num_datasets): Computed ECE Biases.
"""
num_datasets = len(params['a'])
ece_bias = np.zeros((len(n_samples), len(ce_types), num_datasets))
for i in range(len(n_samples)):
config['num_samples'] = n_samples[i]
for ce_idx in range(len(ce_types)):
config['ce_type'] = ce_types[ce_idx]
for j in range(num_datasets):
config['a'] = params['a'][j]
config['b'] = params['b'][j]
config['alpha'] = params['alpha'][j]
config['beta'] = params['beta'][j]
config['d'] = params['d'][j]
config['dataset'] = params['dataset'][j]
hash_key = get_hash_key(config)
if cached_result:
if hash_key in cached_result:
# print(f'{hash_key} already computed, loading cached result.')
mean = cached_result[hash_key]['bias']
else:
mean, _, _ = estimate_ece(config, data_dir=data_dir)
ece_bias[i, ce_idx, j] = mean
return ece_bias | caltrain/get_ece_bias.py |
"""Module containing the get_ece_bias function."""
import numpy as np
from caltrain.run_calibration import estimate_ece
from caltrain.utils import get_hash_key
def get_ece_bias(config,
n_samples,
ce_types,
params,
cached_result=None,
data_dir=None):
"""Get ece bias for given sample sizes, ce_types, and parametric datasets.
Args:
config (dict): Configuration dictionary:
* num_reps (str): The number of repititions
* num_bins (int): The number of bins for binning scheme
* split (string): train/test split (default="")
* a (float): Coefficient for logistic model: E[Y | f(x)] =
1/(1+exp(-a*(fx-b)))
* b (float): Coefficient for logistic model: E[Y | f(x)] =
1/(1+exp(-a*(fx-b)))
* d (float): Exponent for polynomial model: E[Y | f(x)] = f(x)^d
* alpha (float): Parameter for Beta distribution: f(x)~Beta(alpha, beta)
* beta (float): Parameter for Beta distribution: f(x)~Beta(alpha, beta)
n_samples (int): Number of samples from the model
ce_types (list[str]): = list of calibration error types: 'em_ece_bin',
'ew_ece_bin', 'em_ece_sweep', 'ew_ece_sweep'
params (dict): Dictionary of dataset configurations; each value is of
len(num_datasets)
cached_result (bool): Use cached result (default=True)
data_dir (str): location of data dir
Returns:
array (n_samples, ce_types, num_datasets): Computed ECE Biases.
"""
num_datasets = len(params['a'])
ece_bias = np.zeros((len(n_samples), len(ce_types), num_datasets))
for i in range(len(n_samples)):
config['num_samples'] = n_samples[i]
for ce_idx in range(len(ce_types)):
config['ce_type'] = ce_types[ce_idx]
for j in range(num_datasets):
config['a'] = params['a'][j]
config['b'] = params['b'][j]
config['alpha'] = params['alpha'][j]
config['beta'] = params['beta'][j]
config['d'] = params['d'][j]
config['dataset'] = params['dataset'][j]
hash_key = get_hash_key(config)
if cached_result:
if hash_key in cached_result:
# print(f'{hash_key} already computed, loading cached result.')
mean = cached_result[hash_key]['bias']
else:
mean, _, _ = estimate_ece(config, data_dir=data_dir)
ece_bias[i, ce_idx, j] = mean
return ece_bias | 0.804483 | 0.487978 |
import tracking.helpers as hlp
from config.locations import locations
class Sensor():
"""
Handles data and new events for a single sensor.
One Sensor object per sensor in scheme.
Handles buffering scheme for grouping CCONs.
"""
def __init__(self, device, sensor_id):
"""
Sensor class constructor.
Parameters
----------
device : dict
Dictionary of device information fetched from API by Director.
sensor_id : str
Sensor identifier.
"""
# give arguments to self
self.device = device
self.sensor_id = sensor_id
# initialise lists and dictionaries
self.unixtime = []
self.values = []
self.rssi = []
self.ccons = {}
self.ccon_ids = []
self.max_rssi = []
# initialise variables
self.n_events = 0
self.last_event = -1
self.buffering = False
self.event_buffer = []
# initialise ccon list with zones information
self.__initialise_ccons_list()
def __initialise_ccons_list(self):
"""
In order to keep the order of zones, initialise internal
CCON- and data lists in order provided by locations list.
"""
# iterate predefined locations
for loc in locations:
# iterate ccons at location
for ccon in loc['ccons']:
# update internal ccon list
self.ccons[ccon] = len(self.ccons)
self.rssi.append([None])
# update data lists with initial None value
# this is to ensure same length in all lists
self.unixtime.append(None)
self.max_rssi.append(None)
# iterate event counter to reflect initialisation
self.n_events += 1
# create locations map
# this is to relate a location id to each CCON
self.location_map = []
# iterate locaitons
for i in range(len(locations)):
# iterate ccons at location
for j in range(len(locations[i]['ccons'])):
# update ccon with the location identifier integer
self.location_map.append(i)
# unknown ccons will have the the id n+1
self.location_map_unknown = i + 1
def get_timestamps(self):
"""
Returns unixtime axis converted to pandas datetime for visualization purposes.
Returns
-------
return : list
List of pandas datetime objects converted from unixtimes.
"""
return hlp.ux2tx(self.unixtime)
def get_values(self):
"""
Returns values stored in sensor main list.
Returns
-------
return : list
List of main values of sensor object.
"""
return self.values
def new_event_data(self, event):
"""
Receive new event data from Director.
Apply new event to buffering scheme where the same
event som different CCONs are combined.
Parameters
----------
event : dict
Dictionary of event information received in the stream.
"""
# isolate event ccon
ccon = event['data']['networkStatus']['cloudConnectors'][0]
# check if ccon already in buffer
exists = False
for i in range(len(self.event_buffer)):
e_ccon = self.event_buffer[i]['data']['networkStatus']['cloudConnectors'][0]
if e_ccon['id'] == ccon['id']:
self.event_buffer[i] = event
exists = True
# add to buffer
if not exists:
self.event_buffer.append(event)
# get unixtime of this event
_, ux = hlp.convert_event_data_timestamp(event['data']['networkStatus']['updateTime'])
# update buffer timer
self.last_event = ux
self.buffering = True
def update_event_data(self, ux_calltime):
"""
Updates rssi matrix and CCON lists.
Is called when buffer is complete.
Parameters
----------
ux_calltime : int
Unixtime when function is called.
"""
# get unixtime of events
_, ux = hlp.convert_event_data_timestamp(self.event_buffer[-1]['data']['networkStatus']['updateTime'])
self.unixtime.append(ux_calltime)
# update event counter
self.n_events += 1
# iterate each event ccon
for event in self.event_buffer:
# isolate ccon
ccon = event['data']['networkStatus']['cloudConnectors'][0]
# add new ccon if not yet known
if ccon['id'] not in self.ccons:
# add new row to rssi matrix
self.rssi.append([0 for i in range(self.n_events-1)])
# add ccon id to index lookup dictionary
self.ccons[ccon['id']] = len(self.ccons)
self.ccon_ids.append(ccon['id'])
self.location_map.append(self.location_map_unknown)
# append rssi
self.rssi[self.ccons[ccon['id']]].append(ccon['signalStrength'])
# append minimum value to non-talking ccon rows
for i in range(len(self.rssi)):
if len(self.rssi[i]) < self.n_events:
self.rssi[i].append(0)
# get max rssi
argmax = -1
valmax = -1
for i in range(len(self.rssi)):
if self.rssi[i][-1] > valmax:
valmax = self.rssi[i][-1]
argmax = i
self.max_rssi.append(argmax)
# reset buffer variables
self.buffering = False
self.event_buffer = []
def update_empty(self, ux_calltime):
"""
Appends rssi matrix and other lists with empty / None values.
Called when sensor has not talked to a CCON in some time.
Parameters
----------
ux_calltime : int
Unixtime when function is called.
"""
self.n_events += 1
# check how much time has passed since last event
self.unixtime.append(ux_calltime)
self.max_rssi.append(None)
# append minimum value to non-talking ccon rows
for i in range(len(self.rssi)):
if len(self.rssi[i]) < self.n_events:
self.rssi[i].append(0) | tracking/sensors.py | import tracking.helpers as hlp
from config.locations import locations
class Sensor():
"""
Handles data and new events for a single sensor.
One Sensor object per sensor in scheme.
Handles buffering scheme for grouping CCONs.
"""
def __init__(self, device, sensor_id):
"""
Sensor class constructor.
Parameters
----------
device : dict
Dictionary of device information fetched from API by Director.
sensor_id : str
Sensor identifier.
"""
# give arguments to self
self.device = device
self.sensor_id = sensor_id
# initialise lists and dictionaries
self.unixtime = []
self.values = []
self.rssi = []
self.ccons = {}
self.ccon_ids = []
self.max_rssi = []
# initialise variables
self.n_events = 0
self.last_event = -1
self.buffering = False
self.event_buffer = []
# initialise ccon list with zones information
self.__initialise_ccons_list()
def __initialise_ccons_list(self):
"""
In order to keep the order of zones, initialise internal
CCON- and data lists in order provided by locations list.
"""
# iterate predefined locations
for loc in locations:
# iterate ccons at location
for ccon in loc['ccons']:
# update internal ccon list
self.ccons[ccon] = len(self.ccons)
self.rssi.append([None])
# update data lists with initial None value
# this is to ensure same length in all lists
self.unixtime.append(None)
self.max_rssi.append(None)
# iterate event counter to reflect initialisation
self.n_events += 1
# create locations map
# this is to relate a location id to each CCON
self.location_map = []
# iterate locaitons
for i in range(len(locations)):
# iterate ccons at location
for j in range(len(locations[i]['ccons'])):
# update ccon with the location identifier integer
self.location_map.append(i)
# unknown ccons will have the the id n+1
self.location_map_unknown = i + 1
def get_timestamps(self):
"""
Returns unixtime axis converted to pandas datetime for visualization purposes.
Returns
-------
return : list
List of pandas datetime objects converted from unixtimes.
"""
return hlp.ux2tx(self.unixtime)
def get_values(self):
"""
Returns values stored in sensor main list.
Returns
-------
return : list
List of main values of sensor object.
"""
return self.values
def new_event_data(self, event):
"""
Receive new event data from Director.
Apply new event to buffering scheme where the same
event som different CCONs are combined.
Parameters
----------
event : dict
Dictionary of event information received in the stream.
"""
# isolate event ccon
ccon = event['data']['networkStatus']['cloudConnectors'][0]
# check if ccon already in buffer
exists = False
for i in range(len(self.event_buffer)):
e_ccon = self.event_buffer[i]['data']['networkStatus']['cloudConnectors'][0]
if e_ccon['id'] == ccon['id']:
self.event_buffer[i] = event
exists = True
# add to buffer
if not exists:
self.event_buffer.append(event)
# get unixtime of this event
_, ux = hlp.convert_event_data_timestamp(event['data']['networkStatus']['updateTime'])
# update buffer timer
self.last_event = ux
self.buffering = True
def update_event_data(self, ux_calltime):
"""
Updates rssi matrix and CCON lists.
Is called when buffer is complete.
Parameters
----------
ux_calltime : int
Unixtime when function is called.
"""
# get unixtime of events
_, ux = hlp.convert_event_data_timestamp(self.event_buffer[-1]['data']['networkStatus']['updateTime'])
self.unixtime.append(ux_calltime)
# update event counter
self.n_events += 1
# iterate each event ccon
for event in self.event_buffer:
# isolate ccon
ccon = event['data']['networkStatus']['cloudConnectors'][0]
# add new ccon if not yet known
if ccon['id'] not in self.ccons:
# add new row to rssi matrix
self.rssi.append([0 for i in range(self.n_events-1)])
# add ccon id to index lookup dictionary
self.ccons[ccon['id']] = len(self.ccons)
self.ccon_ids.append(ccon['id'])
self.location_map.append(self.location_map_unknown)
# append rssi
self.rssi[self.ccons[ccon['id']]].append(ccon['signalStrength'])
# append minimum value to non-talking ccon rows
for i in range(len(self.rssi)):
if len(self.rssi[i]) < self.n_events:
self.rssi[i].append(0)
# get max rssi
argmax = -1
valmax = -1
for i in range(len(self.rssi)):
if self.rssi[i][-1] > valmax:
valmax = self.rssi[i][-1]
argmax = i
self.max_rssi.append(argmax)
# reset buffer variables
self.buffering = False
self.event_buffer = []
def update_empty(self, ux_calltime):
"""
Appends rssi matrix and other lists with empty / None values.
Called when sensor has not talked to a CCON in some time.
Parameters
----------
ux_calltime : int
Unixtime when function is called.
"""
self.n_events += 1
# check how much time has passed since last event
self.unixtime.append(ux_calltime)
self.max_rssi.append(None)
# append minimum value to non-talking ccon rows
for i in range(len(self.rssi)):
if len(self.rssi[i]) < self.n_events:
self.rssi[i].append(0) | 0.649467 | 0.339061 |
from shop.shopper_base import ShopperBase
import datetime
import os
import time
import math
import random
from typing import Dict, Tuple, Union, List, Callable
import keyboard
import numpy as np
from screen import convert_screen_to_monitor, grab, convert_abs_to_monitor, convert_screen_to_abs, convert_monitor_to_screen
from config import Config
from logger import Logger
from npc_manager import Npc, open_npc_menu, press_npc_btn
from template_finder import TemplateFinder
from utils.custom_mouse import mouse
from utils.misc import wait
def exit(run_obj):
run_time = str(datetime.timedelta(seconds=round(time.time() - run_obj.start_time)))
Logger.info("Exiting shopping mall...")
print(
"STATS \truns \t\ttime \titems_evaluated \titems_bought\n"
f"\t{run_obj.run_count} \t\t{run_time}"
f"\t\t{run_obj.items_evaluated} \t\t\t{run_obj.items_bought}"
)
os._exit(0)
class DrognanShopper(ShopperBase):
"""
Shop at Drognan for Items.
Currently supported: Hammerdin scepters
In order to start the shopping bot:
1.) Run this this file in Python.
2.) Be ingame in Lut Golein (Act 2 town)
3.) Stand close to Drognan and the town exit (must be top right layout)
4.) While being ingame, press resume_key (default F11) to start the shopping, and exit_key (default F12) to stop it.
"""
def __init__(self):
# Set look_for variables to False if you dont like your personal shopper to look for these
# Obviously something need to be set to True, or your shopper will be very confused
self.look_for_scepters = Config().shop["shop_hammerdin_scepters"]
self.speed_factor = 1.0 + Config().shop["speed_factor"]
if (self.speed_factor <= 0):
Logger.error("Can not use a speed factor less than negative 1!! Please update shop.ini. Exiting.")
os._exit(0)
self.apply_pather_adjustment = Config().shop["apply_pather_adjustment"]
self.run_count = 0
self.start_time = time.time()
# items config
self.roi_shop_item_stats = [0, 0, Config().ui_pos["screen_width"] // 2, Config().ui_pos["screen_height"] - 100]
self.roi_vendor = Config().ui_roi["left_inventory"]
self.rx, self.ry, _, _ = self.roi_vendor
self.sb_x, self.sb_y = convert_screen_to_monitor((180, 77))
self.c_x, self.c_y = convert_screen_to_monitor((Config().ui_pos["center_x"], Config().ui_pos["center_y"]))
self.items_evaluated = 0
self.items_bought = 0
self.look_for_leaf_runeword_base = Config().shop["shop_leaf_runeword_base"]
self.look_for_wand_of_life_tap = Config().shop["shop_weapon_life_tap"]
self.look_for_wand_of_lower_resist = Config().shop["shop_weapon_lower_resist"]
super(DrognanShopper, self).__init__()
self.get_tabs()
def get_name(self):
return "Drognan"
def run(self):
Logger.info("Personal Drognan Shopper at your service! Hang on, running some errands...")
self.reset_shop()
self.shop_loop()
def shop_loop(self):
# This is the main shopping loop. It can be further generalized to more easily support new items,
# But this is sufficient for now.
while True:
self.check_run_time()
trade_is_open = False
while not trade_is_open:
open_npc_menu(Npc.DROGNAN)
press_npc_btn(Npc.DROGNAN, "trade")
trade_is_open = self.is_trade_open()
time.sleep(0.1)
img = grab()
for search_tab in self.search_tabs:
self.click_tab(search_tab)
self.search_for_leaf_runeword_base()
self.search_for_wand_of_life_tap()
self.search_for_wand_of_lower_resist()
if self.look_for_scepters is True:
mouse.move(self.sb_x, self.sb_y, randomize=3, delay_factor=[0.6, 0.8])
wait(0.05, 0.1)
mouse.press(button="left")
wait(0.05, 0.1)
mouse.release(button="left")
wait(0.3, 0.4)
# Search for items
item_pos = []
img = grab().copy()
item_keys = ["SCEPTER1", "SCEPTER2", "SCEPTER3", "SCEPTER4", "SCEPTER5"]
for ck in item_keys:
template_match = TemplateFinder(True).search(ck, img, roi=self.roi_vendor)
if template_match.valid:
item_pos.append(template_match.center)
# check out each item
for pos in item_pos:
x_m, y_m = convert_screen_to_monitor(pos)
mouse.move(x_m, y_m, randomize=3, delay_factor=[0.5, 0.6])
wait(0.5, 0.6)
img_stats = grab()
# First check for +2 Paladin Skills. This weeds out most scepters right away.
if TemplateFinder(True).search("2_TO_PALADIN_SKILLS", img_stats, roi=self.roi_shop_item_stats, threshold=0.94).valid:
# Has 2 Pally skills, check blessed hammers next
if TemplateFinder(True).search("TO_BLESSED_HAMMERS", img_stats, roi=self.roi_shop_item_stats, threshold=0.9).valid:
# Has 2 Pally skills AND Blessed Hammers, check Concentration next
if TemplateFinder(True).search("TO_CONCENTRATION", img_stats, roi=self.roi_shop_item_stats, threshold=0.9).valid:
# Has 2 Pally skills AND Blessed Hammers AND Concentration. We're good! Buy it!
mouse.click(button="right")
Logger.info(f"Item bought!")
self.items_bought += 1
time.sleep(1)
self.items_evaluated += 1
keyboard.send("esc")
# Done with this shopping round
self.reset_shop()
self.run_count += 1
def reset_shop(self):
# We want to walk out the town exit to the top right and come back down to drognan
# This can probably be tweaked but seems to work well enough for now.
# Exit town
self.move_shopper(200, -100, 2.5)
self.move_shopper(-200, 100, 2)
def get_tabs(self):
"""
Sets up which tabs we want to search in
"""
if self.look_for_wand_of_life_tap or self.look_for_wand_of_lower_resist:
self.search_tabs.add(2)
if self.look_for_leaf_runeword_base:
self.search_tabs.add(2)
self.search_tabs.add(3) | src/shop/shopper_drognan.py | from shop.shopper_base import ShopperBase
import datetime
import os
import time
import math
import random
from typing import Dict, Tuple, Union, List, Callable
import keyboard
import numpy as np
from screen import convert_screen_to_monitor, grab, convert_abs_to_monitor, convert_screen_to_abs, convert_monitor_to_screen
from config import Config
from logger import Logger
from npc_manager import Npc, open_npc_menu, press_npc_btn
from template_finder import TemplateFinder
from utils.custom_mouse import mouse
from utils.misc import wait
def exit(run_obj):
run_time = str(datetime.timedelta(seconds=round(time.time() - run_obj.start_time)))
Logger.info("Exiting shopping mall...")
print(
"STATS \truns \t\ttime \titems_evaluated \titems_bought\n"
f"\t{run_obj.run_count} \t\t{run_time}"
f"\t\t{run_obj.items_evaluated} \t\t\t{run_obj.items_bought}"
)
os._exit(0)
class DrognanShopper(ShopperBase):
"""
Shop at Drognan for Items.
Currently supported: Hammerdin scepters
In order to start the shopping bot:
1.) Run this this file in Python.
2.) Be ingame in Lut Golein (Act 2 town)
3.) Stand close to Drognan and the town exit (must be top right layout)
4.) While being ingame, press resume_key (default F11) to start the shopping, and exit_key (default F12) to stop it.
"""
def __init__(self):
# Set look_for variables to False if you dont like your personal shopper to look for these
# Obviously something need to be set to True, or your shopper will be very confused
self.look_for_scepters = Config().shop["shop_hammerdin_scepters"]
self.speed_factor = 1.0 + Config().shop["speed_factor"]
if (self.speed_factor <= 0):
Logger.error("Can not use a speed factor less than negative 1!! Please update shop.ini. Exiting.")
os._exit(0)
self.apply_pather_adjustment = Config().shop["apply_pather_adjustment"]
self.run_count = 0
self.start_time = time.time()
# items config
self.roi_shop_item_stats = [0, 0, Config().ui_pos["screen_width"] // 2, Config().ui_pos["screen_height"] - 100]
self.roi_vendor = Config().ui_roi["left_inventory"]
self.rx, self.ry, _, _ = self.roi_vendor
self.sb_x, self.sb_y = convert_screen_to_monitor((180, 77))
self.c_x, self.c_y = convert_screen_to_monitor((Config().ui_pos["center_x"], Config().ui_pos["center_y"]))
self.items_evaluated = 0
self.items_bought = 0
self.look_for_leaf_runeword_base = Config().shop["shop_leaf_runeword_base"]
self.look_for_wand_of_life_tap = Config().shop["shop_weapon_life_tap"]
self.look_for_wand_of_lower_resist = Config().shop["shop_weapon_lower_resist"]
super(DrognanShopper, self).__init__()
self.get_tabs()
def get_name(self):
return "Drognan"
def run(self):
Logger.info("Personal Drognan Shopper at your service! Hang on, running some errands...")
self.reset_shop()
self.shop_loop()
def shop_loop(self):
# This is the main shopping loop. It can be further generalized to more easily support new items,
# But this is sufficient for now.
while True:
self.check_run_time()
trade_is_open = False
while not trade_is_open:
open_npc_menu(Npc.DROGNAN)
press_npc_btn(Npc.DROGNAN, "trade")
trade_is_open = self.is_trade_open()
time.sleep(0.1)
img = grab()
for search_tab in self.search_tabs:
self.click_tab(search_tab)
self.search_for_leaf_runeword_base()
self.search_for_wand_of_life_tap()
self.search_for_wand_of_lower_resist()
if self.look_for_scepters is True:
mouse.move(self.sb_x, self.sb_y, randomize=3, delay_factor=[0.6, 0.8])
wait(0.05, 0.1)
mouse.press(button="left")
wait(0.05, 0.1)
mouse.release(button="left")
wait(0.3, 0.4)
# Search for items
item_pos = []
img = grab().copy()
item_keys = ["SCEPTER1", "SCEPTER2", "SCEPTER3", "SCEPTER4", "SCEPTER5"]
for ck in item_keys:
template_match = TemplateFinder(True).search(ck, img, roi=self.roi_vendor)
if template_match.valid:
item_pos.append(template_match.center)
# check out each item
for pos in item_pos:
x_m, y_m = convert_screen_to_monitor(pos)
mouse.move(x_m, y_m, randomize=3, delay_factor=[0.5, 0.6])
wait(0.5, 0.6)
img_stats = grab()
# First check for +2 Paladin Skills. This weeds out most scepters right away.
if TemplateFinder(True).search("2_TO_PALADIN_SKILLS", img_stats, roi=self.roi_shop_item_stats, threshold=0.94).valid:
# Has 2 Pally skills, check blessed hammers next
if TemplateFinder(True).search("TO_BLESSED_HAMMERS", img_stats, roi=self.roi_shop_item_stats, threshold=0.9).valid:
# Has 2 Pally skills AND Blessed Hammers, check Concentration next
if TemplateFinder(True).search("TO_CONCENTRATION", img_stats, roi=self.roi_shop_item_stats, threshold=0.9).valid:
# Has 2 Pally skills AND Blessed Hammers AND Concentration. We're good! Buy it!
mouse.click(button="right")
Logger.info(f"Item bought!")
self.items_bought += 1
time.sleep(1)
self.items_evaluated += 1
keyboard.send("esc")
# Done with this shopping round
self.reset_shop()
self.run_count += 1
def reset_shop(self):
# We want to walk out the town exit to the top right and come back down to drognan
# This can probably be tweaked but seems to work well enough for now.
# Exit town
self.move_shopper(200, -100, 2.5)
self.move_shopper(-200, 100, 2)
def get_tabs(self):
"""
Sets up which tabs we want to search in
"""
if self.look_for_wand_of_life_tap or self.look_for_wand_of_lower_resist:
self.search_tabs.add(2)
if self.look_for_leaf_runeword_base:
self.search_tabs.add(2)
self.search_tabs.add(3) | 0.47025 | 0.202049 |
import numpy
import h5py
from optparse import OptionParser
import matplotlib
parser = OptionParser()
parser.add_option("--folder", type="string", default='full/pass1/', dest="folder", help="folder of the output data to be plotted")
parser.add_option("--savePlots", action="store_true", dest="savePlots", help="include this flag save plots to files instead of displaying them")
parser.add_option("--scatterFileName", type="string", default='outScatteredVelocity.h5', dest="scatterFileName")
parser.add_option("--figurePrefix", type="string", default='fig', dest="figurePrefix")
options, args = parser.parse_args()
if options.savePlots:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
folder = options.folder
scatterFileName = '%s/%s'%(folder,options.scatterFileName)
# width and height of each figure (inches)
width = 12
height = 10
h5File = h5py.File(scatterFileName, 'r')
x = h5File["x"][...]
y = h5File["y"][...]
resX = h5File["residualX"][...]
resY = h5File["residualY"][...]
h5File.close()
fig = plt.figure(1, figsize=[width,height])
fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111, aspect='equal')
plt.plot(x, resX, '.k')
plt.xlabel('x')
plt.ylabel('resX')
plt.axis('tight')
fig = plt.figure(2, figsize=[width,height])
fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111, aspect='equal')
plt.plot(y, resX, '.k')
plt.xlabel('y')
plt.ylabel('resX')
plt.axis('tight')
fig = plt.figure(3, figsize=[width,height])
fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111, aspect='equal')
plt.plot(x, resY, '.k')
plt.xlabel('x')
plt.ylabel('resY')
plt.axis('tight')
fig = plt.figure(4, figsize=[width,height])
fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111, aspect='equal')
plt.plot(y, resY, '.k')
plt.xlabel('y')
plt.ylabel('resY')
plt.axis('tight')
plt.draw()
if options.savePlots:
outFileName = '%s/%s_x_resX.png'%(folder,options.figurePrefix)
plt.figure(1)
plt.savefig(outFileName)
outFileName = '%s/%s_y_resX.png'%(folder,options.figurePrefix)
plt.figure(2)
plt.savefig(outFileName)
outFileName = '%s/%s_x_resY.png'%(folder,options.figurePrefix)
plt.figure(3)
plt.savefig(outFileName)
outFileName = '%s/%s_y_resY.png'%(folder,options.figurePrefix)
plt.figure(4)
plt.savefig(outFileName)
else:
plt.show() | tests/syntheticTestScripts/plotResiduals.py | import numpy
import h5py
from optparse import OptionParser
import matplotlib
parser = OptionParser()
parser.add_option("--folder", type="string", default='full/pass1/', dest="folder", help="folder of the output data to be plotted")
parser.add_option("--savePlots", action="store_true", dest="savePlots", help="include this flag save plots to files instead of displaying them")
parser.add_option("--scatterFileName", type="string", default='outScatteredVelocity.h5', dest="scatterFileName")
parser.add_option("--figurePrefix", type="string", default='fig', dest="figurePrefix")
options, args = parser.parse_args()
if options.savePlots:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
folder = options.folder
scatterFileName = '%s/%s'%(folder,options.scatterFileName)
# width and height of each figure (inches)
width = 12
height = 10
h5File = h5py.File(scatterFileName, 'r')
x = h5File["x"][...]
y = h5File["y"][...]
resX = h5File["residualX"][...]
resY = h5File["residualY"][...]
h5File.close()
fig = plt.figure(1, figsize=[width,height])
fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111, aspect='equal')
plt.plot(x, resX, '.k')
plt.xlabel('x')
plt.ylabel('resX')
plt.axis('tight')
fig = plt.figure(2, figsize=[width,height])
fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111, aspect='equal')
plt.plot(y, resX, '.k')
plt.xlabel('y')
plt.ylabel('resX')
plt.axis('tight')
fig = plt.figure(3, figsize=[width,height])
fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111, aspect='equal')
plt.plot(x, resY, '.k')
plt.xlabel('x')
plt.ylabel('resY')
plt.axis('tight')
fig = plt.figure(4, figsize=[width,height])
fig.subplots_adjust(left=0.075, right=0.975, bottom=0.05, top=0.95, wspace=0.2, hspace=0.25)
ax = fig.add_subplot(111, aspect='equal')
plt.plot(y, resY, '.k')
plt.xlabel('y')
plt.ylabel('resY')
plt.axis('tight')
plt.draw()
if options.savePlots:
outFileName = '%s/%s_x_resX.png'%(folder,options.figurePrefix)
plt.figure(1)
plt.savefig(outFileName)
outFileName = '%s/%s_y_resX.png'%(folder,options.figurePrefix)
plt.figure(2)
plt.savefig(outFileName)
outFileName = '%s/%s_x_resY.png'%(folder,options.figurePrefix)
plt.figure(3)
plt.savefig(outFileName)
outFileName = '%s/%s_y_resY.png'%(folder,options.figurePrefix)
plt.figure(4)
plt.savefig(outFileName)
else:
plt.show() | 0.477311 | 0.377455 |
"""Dynamic Data"""
import asyncio
import base64
import random
import uuid
from fastapi import APIRouter
from fastapi import Path
from fastapi import Query
from fastapi.responses import PlainTextResponse
from starlette import status
from starlette.requests import Request
from starlette.responses import JSONResponse
from httpbin.constants import AWESOME_HTTPBIN_BASE64ENCODED
from httpbin.helpers import get_request_attrs
from httpbin.responses import OctetStreamResponse
from httpbin.schemas import RequestDictModel
router = APIRouter()
@router.get(
'/base64/{value}',
response_class=PlainTextResponse,
description='Decodes base64url-encoded string.',
)
async def decode_base64(
value: str = Path(
...,
title='base64-encoded string',
example=AWESOME_HTTPBIN_BASE64ENCODED
)
):
encoded: bytes = value.encode('utf-8')
try:
decoded = base64.urlsafe_b64decode(encoded).decode('utf-8')
return PlainTextResponse(content=decoded)
except Exception as err:
return PlainTextResponse(
content=f'Incorrect Base64 data: {value}, err_msg: {err}',
status_code=status.HTTP_400_BAD_REQUEST
)
@router.get(
'/bytes/{n}',
response_class=OctetStreamResponse,
description='Returns n random bytes generated with given seed'
)
async def random_bytes(
n: int = Path(..., title='binary file size', gt=0),
seed: int = Query(
None,
title='random seed',
description='Initialize the random number generator'
)
):
# set 100KB limit
n = min(n, 100 * 1024)
if seed is not None:
random.seed(seed)
# Note: can't just use os.urandom here because it ignores the seed
# https://docs.python.org/3/library/random.html?highlight=random%20seed#random.seed
content = bytes(random.randint(0, 255) for _ in range(n))
return OctetStreamResponse(content=content)
@router.api_route(
'/delay/{delay}',
methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'TRACE'],
response_model=RequestDictModel,
description='Returns a delayed response (max of 10 seconds).',
response_description='A delayed response.'
)
async def delay_response(
*,
delay: float = Path(..., ge=0, le=10, description='delay seconds'),
request: Request
):
await asyncio.sleep(delay)
return get_request_attrs(
request,
keys=('url', 'args', 'form', 'data', 'origin', 'headers', 'files')
)
@router.get('/drip')
async def drip():
"""Drips data over a duration after an optional initial delay."""
pass
@router.get('/links/{n}/{offset}')
async def link_page(
*,
n: int = Path(..., ge=1, le=200),
offset: int
):
"""Generate a page containing n links to other pages which do the same."""
pass
@router.get('/range/{numbytes}')
async def range_request(numbytes: int):
"""Streams n random bytes generated with given seed,
at given chunk size per packet."""
pass
@router.get('/stream-bytes/{n}')
async def stream_random_bytes(n: int):
"""Streams n random bytes generated with given seed,
at given chunk size per packet."""
pass
@router.get('/uuid')
async def get_uuid4():
"""Return a UUID4."""
out = {'uuid': str(uuid.uuid4())}
return JSONResponse(content=out) | httpbin/routers/dynamicdata.py | """Dynamic Data"""
import asyncio
import base64
import random
import uuid
from fastapi import APIRouter
from fastapi import Path
from fastapi import Query
from fastapi.responses import PlainTextResponse
from starlette import status
from starlette.requests import Request
from starlette.responses import JSONResponse
from httpbin.constants import AWESOME_HTTPBIN_BASE64ENCODED
from httpbin.helpers import get_request_attrs
from httpbin.responses import OctetStreamResponse
from httpbin.schemas import RequestDictModel
router = APIRouter()
@router.get(
'/base64/{value}',
response_class=PlainTextResponse,
description='Decodes base64url-encoded string.',
)
async def decode_base64(
value: str = Path(
...,
title='base64-encoded string',
example=AWESOME_HTTPBIN_BASE64ENCODED
)
):
encoded: bytes = value.encode('utf-8')
try:
decoded = base64.urlsafe_b64decode(encoded).decode('utf-8')
return PlainTextResponse(content=decoded)
except Exception as err:
return PlainTextResponse(
content=f'Incorrect Base64 data: {value}, err_msg: {err}',
status_code=status.HTTP_400_BAD_REQUEST
)
@router.get(
'/bytes/{n}',
response_class=OctetStreamResponse,
description='Returns n random bytes generated with given seed'
)
async def random_bytes(
n: int = Path(..., title='binary file size', gt=0),
seed: int = Query(
None,
title='random seed',
description='Initialize the random number generator'
)
):
# set 100KB limit
n = min(n, 100 * 1024)
if seed is not None:
random.seed(seed)
# Note: can't just use os.urandom here because it ignores the seed
# https://docs.python.org/3/library/random.html?highlight=random%20seed#random.seed
content = bytes(random.randint(0, 255) for _ in range(n))
return OctetStreamResponse(content=content)
@router.api_route(
'/delay/{delay}',
methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'TRACE'],
response_model=RequestDictModel,
description='Returns a delayed response (max of 10 seconds).',
response_description='A delayed response.'
)
async def delay_response(
*,
delay: float = Path(..., ge=0, le=10, description='delay seconds'),
request: Request
):
await asyncio.sleep(delay)
return get_request_attrs(
request,
keys=('url', 'args', 'form', 'data', 'origin', 'headers', 'files')
)
@router.get('/drip')
async def drip():
"""Drips data over a duration after an optional initial delay."""
pass
@router.get('/links/{n}/{offset}')
async def link_page(
*,
n: int = Path(..., ge=1, le=200),
offset: int
):
"""Generate a page containing n links to other pages which do the same."""
pass
@router.get('/range/{numbytes}')
async def range_request(numbytes: int):
"""Streams n random bytes generated with given seed,
at given chunk size per packet."""
pass
@router.get('/stream-bytes/{n}')
async def stream_random_bytes(n: int):
"""Streams n random bytes generated with given seed,
at given chunk size per packet."""
pass
@router.get('/uuid')
async def get_uuid4():
"""Return a UUID4."""
out = {'uuid': str(uuid.uuid4())}
return JSONResponse(content=out) | 0.763131 | 0.143788 |
import mock
from armada import const
from armada.exceptions import manifest_exceptions
from armada.handlers import helm
from armada.handlers import wait
from armada.tests.unit import base
test_chart = {'wait': {'timeout': 10, 'native': {'enabled': False}}}
class ChartWaitTestCase(base.ArmadaTestCase):
def get_unit(self, chart_data, timeout=None, version=2):
chart = {
'schema': 'armada/Chart/v{}'.format(str(version)),
'metadata': {
'name': 'test'
},
const.KEYWORD_DATA: chart_data
}
return wait.ChartWait(
k8s=mock.MagicMock(),
release_id=helm.HelmReleaseId('test', 'test-test'),
chart=chart,
k8s_wait_attempts=1,
k8s_wait_attempt_sleep=1,
timeout=timeout)
def test_get_timeout(self):
unit = self.get_unit({'timeout': 5, 'wait': {'timeout': 10}})
self.assertEquals(unit.get_timeout(), 10)
def test_get_timeout_default(self):
unit = self.get_unit({})
self.assertEquals(unit.get_timeout(), const.DEFAULT_CHART_TIMEOUT)
def test_get_timeout_override(self):
unit = self.get_unit(
timeout=20, chart_data={
'timeout': 5,
'wait': {
'timeout': 10
}
})
self.assertEquals(unit.get_timeout(), 20)
def test_get_timeout_deprecated(self):
unit = self.get_unit({'timeout': 5})
self.assertEquals(unit.get_timeout(), 5)
def test_is_native_enabled_default_false(self):
unit = self.get_unit({})
self.assertEquals(unit.is_native_enabled(), False)
def test_is_native_enabled_true(self):
unit = self.get_unit({'wait': {'native': {'enabled': True}}})
self.assertEquals(unit.is_native_enabled(), True)
def test_is_native_enabled_false(self):
unit = self.get_unit({'wait': {'native': {'enabled': False}}})
self.assertEquals(unit.is_native_enabled(), False)
def test_waits_init(self):
unit = self.get_unit({
'wait': {
'resources': [{
'type': 'pod',
'labels': {
'foo': 'bar'
}
}, {
'type': 'job',
'labels': {
'foo': 'bar'
}
}, {
'type': 'daemonset',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}, {
'type': 'deployment',
'labels': {
'foo': 'bar'
},
'min_ready': '50%'
}, {
'type': 'statefulset',
'labels': {
'foo': 'bar'
}
}]
}
}) # yapf: disable
self.assertEqual(5, len(unit.waits))
self.assertIsInstance(unit.waits[0], wait.PodWait)
self.assertIsInstance(unit.waits[1], wait.JobWait)
self.assertIsInstance(unit.waits[2], wait.DaemonSetWait)
self.assertIsInstance(unit.waits[3], wait.DeploymentWait)
self.assertIsInstance(unit.waits[4], wait.StatefulSetWait)
def test_waits_init_min_ready_fails_if_not_controller(self):
def create_pod_wait_min_ready():
self.get_unit(
{
'wait': {
'resources': [
{
'type': 'pod',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}
]
}
})
self.assertRaises(
manifest_exceptions.ManifestException, create_pod_wait_min_ready)
def create_job_wait_min_ready():
self.get_unit(
{
'wait': {
'resources': [
{
'type': 'job',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}
]
}
})
self.assertRaises(
manifest_exceptions.ManifestException, create_job_wait_min_ready)
def test_waits_init_invalid_type(self):
def create_with_invalid_type():
self.get_unit(
{
'wait': {
'resources': [
{
'type': 'invalid',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}
]
}
})
self.assertRaises(
manifest_exceptions.ManifestException, create_with_invalid_type)
@mock.patch.object(wait.ChartWait, 'get_resource_wait')
def test_wait(self, get_resource_wait):
def return_mock(*args, **kwargs):
return mock.MagicMock()
get_resource_wait.side_effect = return_mock
unit = self.get_unit(
{'wait': {
'resources': [{
'type': 'foo'
}, {
'type': 'bar'
}]
}})
unit.wait(10)
self.assertEqual(2, len(unit.waits))
for w in unit.waits:
w.wait.assert_called_once()
class PodWaitTestCase(base.ArmadaTestCase):
def get_unit(self, labels, version=2):
return wait.PodWait(
resource_type='pod',
chart_wait=ChartWaitTestCase.get_unit(None, {}, version=version),
labels=labels)
def test_include_resource(self):
def mock_resource(annotations={}, owner_references=None):
resource = mock.Mock()
resource.metadata.annotations = annotations
resource.metadata.owner_references = owner_references
return resource
test_pods = [
mock_resource({
'key': 'value',
'helm.sh/hook': 'test'
}),
mock_resource({'helm.sh/hook': 'test-success'}),
mock_resource({'helm.sh/hook': 'test-failure'}),
mock_resource({'helm.sh/hook': 'test,pre-install'}),
]
job_pods = [
mock_resource(owner_references=[mock.Mock(kind='Job')]),
mock_resource(
owner_references=[
mock.Mock(kind='NotAJob'),
mock.Mock(kind='Job')
])
]
included_pods = [
mock_resource(),
mock_resource(owner_references=[]),
mock_resource({'helm.sh/hook': 'pre-install'}),
mock_resource({'key': 'value'}),
mock_resource(owner_references=[mock.Mock(kind='NotAJob')]),
]
evicted_pods = [
mock.Mock(
metadata=mock.Mock(annotations={}, owner_references=None),
status=mock.Mock(phase='Evicted')),
]
unit = self.get_unit({}, version=1)
# Validate test pods excluded
for pod in test_pods:
self.assertFalse(unit.include_resource(pod))
# Validate test pods excluded
for pod in job_pods:
self.assertFalse(unit.include_resource(pod))
# Validate other resources included
for pod in included_pods:
self.assertTrue(unit.include_resource(pod))
# Validate evicted pods are excluded
for pod in evicted_pods:
self.assertFalse(unit.include_resource(pod))
class JobWaitTestCase(base.ArmadaTestCase):
def get_unit(self, labels):
return wait.JobWait(
resource_type='job', chart_wait=mock.MagicMock(), labels=labels)
def test_include_resource(self):
def mock_resource(annotations={}, owner_references=None):
resource = mock.Mock()
resource.metadata.annotations = annotations
resource.metadata.owner_references = owner_references
return resource
cronjob_jobs = [
mock_resource(owner_references=[mock.Mock(kind='CronJob')]),
mock_resource(
owner_references=[
mock.Mock(kind='NotACronJob'),
mock.Mock(kind='CronJob')
])
]
included_jobs = [
mock_resource(),
mock_resource(owner_references=[]),
mock_resource(owner_references=[mock.Mock(kind='NotAJob')])
]
unit = self.get_unit({})
# Validate test pods excluded
for job in cronjob_jobs:
self.assertFalse(unit.include_resource(job))
# Validate other resources included
for job in included_jobs:
self.assertTrue(unit.include_resource(job)) | armada/tests/unit/handlers/test_wait.py |
import mock
from armada import const
from armada.exceptions import manifest_exceptions
from armada.handlers import helm
from armada.handlers import wait
from armada.tests.unit import base
test_chart = {'wait': {'timeout': 10, 'native': {'enabled': False}}}
class ChartWaitTestCase(base.ArmadaTestCase):
def get_unit(self, chart_data, timeout=None, version=2):
chart = {
'schema': 'armada/Chart/v{}'.format(str(version)),
'metadata': {
'name': 'test'
},
const.KEYWORD_DATA: chart_data
}
return wait.ChartWait(
k8s=mock.MagicMock(),
release_id=helm.HelmReleaseId('test', 'test-test'),
chart=chart,
k8s_wait_attempts=1,
k8s_wait_attempt_sleep=1,
timeout=timeout)
def test_get_timeout(self):
unit = self.get_unit({'timeout': 5, 'wait': {'timeout': 10}})
self.assertEquals(unit.get_timeout(), 10)
def test_get_timeout_default(self):
unit = self.get_unit({})
self.assertEquals(unit.get_timeout(), const.DEFAULT_CHART_TIMEOUT)
def test_get_timeout_override(self):
unit = self.get_unit(
timeout=20, chart_data={
'timeout': 5,
'wait': {
'timeout': 10
}
})
self.assertEquals(unit.get_timeout(), 20)
def test_get_timeout_deprecated(self):
unit = self.get_unit({'timeout': 5})
self.assertEquals(unit.get_timeout(), 5)
def test_is_native_enabled_default_false(self):
unit = self.get_unit({})
self.assertEquals(unit.is_native_enabled(), False)
def test_is_native_enabled_true(self):
unit = self.get_unit({'wait': {'native': {'enabled': True}}})
self.assertEquals(unit.is_native_enabled(), True)
def test_is_native_enabled_false(self):
unit = self.get_unit({'wait': {'native': {'enabled': False}}})
self.assertEquals(unit.is_native_enabled(), False)
def test_waits_init(self):
unit = self.get_unit({
'wait': {
'resources': [{
'type': 'pod',
'labels': {
'foo': 'bar'
}
}, {
'type': 'job',
'labels': {
'foo': 'bar'
}
}, {
'type': 'daemonset',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}, {
'type': 'deployment',
'labels': {
'foo': 'bar'
},
'min_ready': '50%'
}, {
'type': 'statefulset',
'labels': {
'foo': 'bar'
}
}]
}
}) # yapf: disable
self.assertEqual(5, len(unit.waits))
self.assertIsInstance(unit.waits[0], wait.PodWait)
self.assertIsInstance(unit.waits[1], wait.JobWait)
self.assertIsInstance(unit.waits[2], wait.DaemonSetWait)
self.assertIsInstance(unit.waits[3], wait.DeploymentWait)
self.assertIsInstance(unit.waits[4], wait.StatefulSetWait)
def test_waits_init_min_ready_fails_if_not_controller(self):
def create_pod_wait_min_ready():
self.get_unit(
{
'wait': {
'resources': [
{
'type': 'pod',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}
]
}
})
self.assertRaises(
manifest_exceptions.ManifestException, create_pod_wait_min_ready)
def create_job_wait_min_ready():
self.get_unit(
{
'wait': {
'resources': [
{
'type': 'job',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}
]
}
})
self.assertRaises(
manifest_exceptions.ManifestException, create_job_wait_min_ready)
def test_waits_init_invalid_type(self):
def create_with_invalid_type():
self.get_unit(
{
'wait': {
'resources': [
{
'type': 'invalid',
'labels': {
'foo': 'bar'
},
'min_ready': 5
}
]
}
})
self.assertRaises(
manifest_exceptions.ManifestException, create_with_invalid_type)
@mock.patch.object(wait.ChartWait, 'get_resource_wait')
def test_wait(self, get_resource_wait):
def return_mock(*args, **kwargs):
return mock.MagicMock()
get_resource_wait.side_effect = return_mock
unit = self.get_unit(
{'wait': {
'resources': [{
'type': 'foo'
}, {
'type': 'bar'
}]
}})
unit.wait(10)
self.assertEqual(2, len(unit.waits))
for w in unit.waits:
w.wait.assert_called_once()
class PodWaitTestCase(base.ArmadaTestCase):
def get_unit(self, labels, version=2):
return wait.PodWait(
resource_type='pod',
chart_wait=ChartWaitTestCase.get_unit(None, {}, version=version),
labels=labels)
def test_include_resource(self):
def mock_resource(annotations={}, owner_references=None):
resource = mock.Mock()
resource.metadata.annotations = annotations
resource.metadata.owner_references = owner_references
return resource
test_pods = [
mock_resource({
'key': 'value',
'helm.sh/hook': 'test'
}),
mock_resource({'helm.sh/hook': 'test-success'}),
mock_resource({'helm.sh/hook': 'test-failure'}),
mock_resource({'helm.sh/hook': 'test,pre-install'}),
]
job_pods = [
mock_resource(owner_references=[mock.Mock(kind='Job')]),
mock_resource(
owner_references=[
mock.Mock(kind='NotAJob'),
mock.Mock(kind='Job')
])
]
included_pods = [
mock_resource(),
mock_resource(owner_references=[]),
mock_resource({'helm.sh/hook': 'pre-install'}),
mock_resource({'key': 'value'}),
mock_resource(owner_references=[mock.Mock(kind='NotAJob')]),
]
evicted_pods = [
mock.Mock(
metadata=mock.Mock(annotations={}, owner_references=None),
status=mock.Mock(phase='Evicted')),
]
unit = self.get_unit({}, version=1)
# Validate test pods excluded
for pod in test_pods:
self.assertFalse(unit.include_resource(pod))
# Validate test pods excluded
for pod in job_pods:
self.assertFalse(unit.include_resource(pod))
# Validate other resources included
for pod in included_pods:
self.assertTrue(unit.include_resource(pod))
# Validate evicted pods are excluded
for pod in evicted_pods:
self.assertFalse(unit.include_resource(pod))
class JobWaitTestCase(base.ArmadaTestCase):
def get_unit(self, labels):
return wait.JobWait(
resource_type='job', chart_wait=mock.MagicMock(), labels=labels)
def test_include_resource(self):
def mock_resource(annotations={}, owner_references=None):
resource = mock.Mock()
resource.metadata.annotations = annotations
resource.metadata.owner_references = owner_references
return resource
cronjob_jobs = [
mock_resource(owner_references=[mock.Mock(kind='CronJob')]),
mock_resource(
owner_references=[
mock.Mock(kind='NotACronJob'),
mock.Mock(kind='CronJob')
])
]
included_jobs = [
mock_resource(),
mock_resource(owner_references=[]),
mock_resource(owner_references=[mock.Mock(kind='NotAJob')])
]
unit = self.get_unit({})
# Validate test pods excluded
for job in cronjob_jobs:
self.assertFalse(unit.include_resource(job))
# Validate other resources included
for job in included_jobs:
self.assertTrue(unit.include_resource(job)) | 0.655777 | 0.222594 |
import unittest
import pytest
from pants.option.option_value_container import OptionValueContainerBuilder
from pants.option.ranked_value import Rank, RankedValue
class OptionValueContainerTest(unittest.TestCase):
def test_unknown_values(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.HARDCODED, 1)
o = ob.build()
assert 1 == o.foo
with pytest.raises(AttributeError):
o.bar
def test_value_ranking(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.CONFIG, 11)
o = ob.build()
assert 11 == o.foo
assert Rank.CONFIG == o.get_rank("foo")
ob.foo = RankedValue(Rank.HARDCODED, 22)
o = ob.build()
assert 11 == o.foo
assert Rank.CONFIG == o.get_rank("foo")
ob.foo = RankedValue(Rank.ENVIRONMENT, 33)
o = ob.build()
assert 33 == o.foo
assert Rank.ENVIRONMENT == o.get_rank("foo")
ob.foo = RankedValue(Rank.FLAG, 44)
o = ob.build()
assert 44 == o.foo
assert Rank.FLAG == o.get_rank("foo")
def test_is_flagged(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.NONE, 11)
assert not ob.build().is_flagged("foo")
ob.foo = RankedValue(Rank.CONFIG, 11)
assert not ob.build().is_flagged("foo")
ob.foo = RankedValue(Rank.ENVIRONMENT, 11)
assert not ob.build().is_flagged("foo")
ob.foo = RankedValue(Rank.FLAG, 11)
assert ob.build().is_flagged("foo")
def test_indexing(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.CONFIG, 1)
o = ob.build()
assert 1 == o["foo"]
assert 1 == o.get("foo")
assert 1 == o.get("foo", 2)
assert o.get("unknown") is None
assert 2 == o.get("unknown", 2)
with pytest.raises(AttributeError):
o["bar"]
def test_iterator(self) -> None:
ob = OptionValueContainerBuilder()
ob.a = RankedValue(Rank.FLAG, 3)
ob.b = RankedValue(Rank.FLAG, 2)
ob.c = RankedValue(Rank.FLAG, 1)
o = ob.build()
names = list(iter(o))
assert ["a", "b", "c"] == names
def test_copy(self) -> None:
# copy semantics can get hairy when overriding __setattr__/__getattr__, so we test them.
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.FLAG, 1)
ob.bar = RankedValue(Rank.FLAG, {"a": 111})
p = ob.build()
z = ob.build()
# Verify that the result is in fact a copy.
assert 1 == p.foo # Has original attribute.
ob.baz = RankedValue(Rank.FLAG, 42)
assert not hasattr(p, "baz") # Does not have attribute added after the copy.
# Verify that it's a shallow copy by modifying a referent in o and reading it in p.
p.bar["b"] = 222
assert {"a": 111, "b": 222} == z.bar | src/python/pants/option/option_value_container_test.py |
import unittest
import pytest
from pants.option.option_value_container import OptionValueContainerBuilder
from pants.option.ranked_value import Rank, RankedValue
class OptionValueContainerTest(unittest.TestCase):
def test_unknown_values(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.HARDCODED, 1)
o = ob.build()
assert 1 == o.foo
with pytest.raises(AttributeError):
o.bar
def test_value_ranking(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.CONFIG, 11)
o = ob.build()
assert 11 == o.foo
assert Rank.CONFIG == o.get_rank("foo")
ob.foo = RankedValue(Rank.HARDCODED, 22)
o = ob.build()
assert 11 == o.foo
assert Rank.CONFIG == o.get_rank("foo")
ob.foo = RankedValue(Rank.ENVIRONMENT, 33)
o = ob.build()
assert 33 == o.foo
assert Rank.ENVIRONMENT == o.get_rank("foo")
ob.foo = RankedValue(Rank.FLAG, 44)
o = ob.build()
assert 44 == o.foo
assert Rank.FLAG == o.get_rank("foo")
def test_is_flagged(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.NONE, 11)
assert not ob.build().is_flagged("foo")
ob.foo = RankedValue(Rank.CONFIG, 11)
assert not ob.build().is_flagged("foo")
ob.foo = RankedValue(Rank.ENVIRONMENT, 11)
assert not ob.build().is_flagged("foo")
ob.foo = RankedValue(Rank.FLAG, 11)
assert ob.build().is_flagged("foo")
def test_indexing(self) -> None:
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.CONFIG, 1)
o = ob.build()
assert 1 == o["foo"]
assert 1 == o.get("foo")
assert 1 == o.get("foo", 2)
assert o.get("unknown") is None
assert 2 == o.get("unknown", 2)
with pytest.raises(AttributeError):
o["bar"]
def test_iterator(self) -> None:
ob = OptionValueContainerBuilder()
ob.a = RankedValue(Rank.FLAG, 3)
ob.b = RankedValue(Rank.FLAG, 2)
ob.c = RankedValue(Rank.FLAG, 1)
o = ob.build()
names = list(iter(o))
assert ["a", "b", "c"] == names
def test_copy(self) -> None:
# copy semantics can get hairy when overriding __setattr__/__getattr__, so we test them.
ob = OptionValueContainerBuilder()
ob.foo = RankedValue(Rank.FLAG, 1)
ob.bar = RankedValue(Rank.FLAG, {"a": 111})
p = ob.build()
z = ob.build()
# Verify that the result is in fact a copy.
assert 1 == p.foo # Has original attribute.
ob.baz = RankedValue(Rank.FLAG, 42)
assert not hasattr(p, "baz") # Does not have attribute added after the copy.
# Verify that it's a shallow copy by modifying a referent in o and reading it in p.
p.bar["b"] = 222
assert {"a": 111, "b": 222} == z.bar | 0.703957 | 0.492798 |
import subprocess
import re
from enum import Enum
class SolverQueryResult(Enum):
"""
Enum to store the result of a single solver query through "(check-sat)" query.
"""
SAT = 0 # solver query reports SAT
UNSAT = 1 # solver query reports UNSAT
UNKNOWN = 2 # solver query reports UNKNOWN
def sr2str(sol_res):
if sol_res == SolverQueryResult.SAT: return "sat"
if sol_res == SolverQueryResult.UNSAT: return "unsat"
if sol_res == SolverQueryResult.UNKNOWN: return "unknown"
class SolverResult:
"""
Class to store the result of multiple solver querys throught "(check-sat)" query.
:lst a list of multiple "SolverQueryResult" items
"""
def __init__(self, result=None):
self.lst = []
if result != None: self.lst.append(result)
def append(self, result):
self.lst.append(result)
def equals(self, rhs):
if type(rhs) == SolverQueryResult:
return len(self.lst) == 1 and self.lst[0] == rhs
elif type(rhs) == SolverResult:
if len(self.lst) != len(rhs.lst): return False
for index in range(0,len(self.lst)):
if self.lst[index] != SolverQueryResult.UNKNOWN and \
rhs.lst[index] != SolverQueryResult.UNKNOWN and \
self.lst[index] != rhs.lst[index]:
return False
return True
else:
return False
def __str__(self):
s = sr2str(self.lst[0])
for res in self.lst[1:]:
s+= "\n" + sr2str(res)
return s
class Solver:
def __init__ (self, cil):
self.cil = cil
def solve(self, file, timeout, debug=False):
try:
cmd = list(filter(None, self.cil.split(" "))) + [file]
if debug:
print(" ".join(cmd), flush=True)
output = subprocess.run(cmd, timeout=timeout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
except subprocess.TimeoutExpired as te:
if te.stdout != None and te.stderr != None:
stdout = te.stdout.decode()
stderr = te.stderr.decode()
else:
stdout = ""
stderr = ""
return stdout, stderr, 137
except KeyboardInterrupt:
print("Accepted keyboard interrupt. Stop.", end="\r", flush=True)
exit(0)
except ValueError as e:
print("Subprocess bug.")
stdout = ""
stderr = ""
return stdout, stderr, 0
except Exception as e:
print("Exception rises when running solver:")
print(e, '\n')
exit(1)
stdout = output.stdout.decode()
stderr = output.stderr.decode()
returncode = output.returncode
if debug:
print(stdout+"\n"+stderr)
return stdout, stderr, returncode | src/modules/Solver.py | import subprocess
import re
from enum import Enum
class SolverQueryResult(Enum):
"""
Enum to store the result of a single solver query through "(check-sat)" query.
"""
SAT = 0 # solver query reports SAT
UNSAT = 1 # solver query reports UNSAT
UNKNOWN = 2 # solver query reports UNKNOWN
def sr2str(sol_res):
if sol_res == SolverQueryResult.SAT: return "sat"
if sol_res == SolverQueryResult.UNSAT: return "unsat"
if sol_res == SolverQueryResult.UNKNOWN: return "unknown"
class SolverResult:
"""
Class to store the result of multiple solver querys throught "(check-sat)" query.
:lst a list of multiple "SolverQueryResult" items
"""
def __init__(self, result=None):
self.lst = []
if result != None: self.lst.append(result)
def append(self, result):
self.lst.append(result)
def equals(self, rhs):
if type(rhs) == SolverQueryResult:
return len(self.lst) == 1 and self.lst[0] == rhs
elif type(rhs) == SolverResult:
if len(self.lst) != len(rhs.lst): return False
for index in range(0,len(self.lst)):
if self.lst[index] != SolverQueryResult.UNKNOWN and \
rhs.lst[index] != SolverQueryResult.UNKNOWN and \
self.lst[index] != rhs.lst[index]:
return False
return True
else:
return False
def __str__(self):
s = sr2str(self.lst[0])
for res in self.lst[1:]:
s+= "\n" + sr2str(res)
return s
class Solver:
def __init__ (self, cil):
self.cil = cil
def solve(self, file, timeout, debug=False):
try:
cmd = list(filter(None, self.cil.split(" "))) + [file]
if debug:
print(" ".join(cmd), flush=True)
output = subprocess.run(cmd, timeout=timeout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
except subprocess.TimeoutExpired as te:
if te.stdout != None and te.stderr != None:
stdout = te.stdout.decode()
stderr = te.stderr.decode()
else:
stdout = ""
stderr = ""
return stdout, stderr, 137
except KeyboardInterrupt:
print("Accepted keyboard interrupt. Stop.", end="\r", flush=True)
exit(0)
except ValueError as e:
print("Subprocess bug.")
stdout = ""
stderr = ""
return stdout, stderr, 0
except Exception as e:
print("Exception rises when running solver:")
print(e, '\n')
exit(1)
stdout = output.stdout.decode()
stderr = output.stderr.decode()
returncode = output.returncode
if debug:
print(stdout+"\n"+stderr)
return stdout, stderr, returncode | 0.524882 | 0.337968 |
import requests_mock
import json
from unittest.mock import patch
from iconsdk.builder.transaction_builder import DepositTransactionBuilder
from iconsdk.signed_transaction import SignedTransaction
from iconsdk.utils.validation import is_T_HASH
from tests.api_send.test_send_super import TestSendSuper
from tests.example_config import BASE_DOMAIN_URL_V3_FOR_TEST
@patch('iconsdk.providers.http_provider.HTTPProvider._make_id', return_value=1234)
class TestSendDeposit(TestSendSuper):
def test_add_deposit(self, _make_id):
# transaction instance for add action
action = "add"
deposit_transaction = DepositTransactionBuilder() \
.from_(self.setting["from"]) \
.to(self.setting["to"]) \
.value(self.setting["value"]) \
.timestamp(self.setting["timestamp"]) \
.step_limit(self.setting["step_limit"]) \
.nid(self.setting["nid"]) \
.nonce(self.setting["nonce"]) \
.action("add") \
.build()
signed_transaction = SignedTransaction(deposit_transaction, self.wallet)
with requests_mock.Mocker() as m:
tx_hash = "0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"
expected_request = {
'id': 1234,
'jsonrpc': '2.0',
'method': 'icx_sendTransaction',
'params': {
'data': {
'action': action
},
'dataType': 'deposit',
'from': self.setting["from"],
'nid': hex(self.setting["nid"]),
'nonce': hex(self.setting["nonce"]),
'signature': signed_transaction.signed_transaction_dict["signature"],
'stepLimit': hex(self.setting["step_limit"]),
'timestamp': hex(self.setting["timestamp"]),
'to': self.setting["to"],
'value': hex(self.setting["value"]),
'version': hex(3)
}
}
response_json = {
"jsonrpc": "2.0",
"result": tx_hash,
"id": 1234
}
m.post(f"{BASE_DOMAIN_URL_V3_FOR_TEST}/api/v3/", json=response_json)
result = self.icon_service.send_transaction(signed_transaction)
actual_request = json.loads(m._adapter.last_request.text)
self.assertEqual(expected_request, actual_request)
self.assertTrue(result)
# Checks if sending transaction correctly
# signed_transaction_dict = SignedTransaction(deposit_transaction_of_add_0, self.wallet)
# result = self.icon_service.send_transaction(signed_transaction_dict)
# self.assertTrue(is_T_HASH(result))
def test_withdraw_deposit(self, _make_id):
# transaction instance for withdraw action
action = 'withdraw'
withdraw_transaction = DepositTransactionBuilder() \
.from_(self.setting["from"]) \
.to(self.setting["to"]) \
.step_limit(self.setting["step_limit"]) \
.timestamp(self.setting["timestamp"]) \
.nid(self.setting["nid"]) \
.nonce(self.setting["nonce"]) \
.id(self.setting["id"]) \
.action(action) \
.build()
signed_transaction = SignedTransaction(withdraw_transaction, self.wallet)
with requests_mock.Mocker() as m:
tx_hash = "0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"
expected_request = {
'id': 1234,
'jsonrpc': '2.0',
'method': 'icx_sendTransaction',
'params': {
'data': {
'action': action,
'id': self.setting["id"],
},
'dataType': 'deposit',
'from': self.setting["from"],
'nid': hex(self.setting["nid"]),
'nonce': hex(self.setting["nonce"]),
'signature': signed_transaction.signed_transaction_dict["signature"],
'stepLimit': hex(self.setting["step_limit"]),
'timestamp': hex(self.setting["timestamp"]),
'to': self.setting["to"],
'version': hex(3)
}
}
response_json = {
"jsonrpc": "2.0",
"result": tx_hash,
"id": 1234
}
# Checks if sending transaction correctly
m.post(f"{BASE_DOMAIN_URL_V3_FOR_TEST}/api/v3/", json=response_json)
result = self.icon_service.send_transaction(signed_transaction)
self.assertTrue(is_T_HASH(result))
actual_request = json.loads(m._adapter.last_request.text)
self.assertEqual(expected_request, actual_request) | tests/api_send/test_send_deposit.py | import requests_mock
import json
from unittest.mock import patch
from iconsdk.builder.transaction_builder import DepositTransactionBuilder
from iconsdk.signed_transaction import SignedTransaction
from iconsdk.utils.validation import is_T_HASH
from tests.api_send.test_send_super import TestSendSuper
from tests.example_config import BASE_DOMAIN_URL_V3_FOR_TEST
@patch('iconsdk.providers.http_provider.HTTPProvider._make_id', return_value=1234)
class TestSendDeposit(TestSendSuper):
def test_add_deposit(self, _make_id):
# transaction instance for add action
action = "add"
deposit_transaction = DepositTransactionBuilder() \
.from_(self.setting["from"]) \
.to(self.setting["to"]) \
.value(self.setting["value"]) \
.timestamp(self.setting["timestamp"]) \
.step_limit(self.setting["step_limit"]) \
.nid(self.setting["nid"]) \
.nonce(self.setting["nonce"]) \
.action("add") \
.build()
signed_transaction = SignedTransaction(deposit_transaction, self.wallet)
with requests_mock.Mocker() as m:
tx_hash = "0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"
expected_request = {
'id': 1234,
'jsonrpc': '2.0',
'method': 'icx_sendTransaction',
'params': {
'data': {
'action': action
},
'dataType': 'deposit',
'from': self.setting["from"],
'nid': hex(self.setting["nid"]),
'nonce': hex(self.setting["nonce"]),
'signature': signed_transaction.signed_transaction_dict["signature"],
'stepLimit': hex(self.setting["step_limit"]),
'timestamp': hex(self.setting["timestamp"]),
'to': self.setting["to"],
'value': hex(self.setting["value"]),
'version': hex(3)
}
}
response_json = {
"jsonrpc": "2.0",
"result": tx_hash,
"id": 1234
}
m.post(f"{BASE_DOMAIN_URL_V3_FOR_TEST}/api/v3/", json=response_json)
result = self.icon_service.send_transaction(signed_transaction)
actual_request = json.loads(m._adapter.last_request.text)
self.assertEqual(expected_request, actual_request)
self.assertTrue(result)
# Checks if sending transaction correctly
# signed_transaction_dict = SignedTransaction(deposit_transaction_of_add_0, self.wallet)
# result = self.icon_service.send_transaction(signed_transaction_dict)
# self.assertTrue(is_T_HASH(result))
def test_withdraw_deposit(self, _make_id):
# transaction instance for withdraw action
action = 'withdraw'
withdraw_transaction = DepositTransactionBuilder() \
.from_(self.setting["from"]) \
.to(self.setting["to"]) \
.step_limit(self.setting["step_limit"]) \
.timestamp(self.setting["timestamp"]) \
.nid(self.setting["nid"]) \
.nonce(self.setting["nonce"]) \
.id(self.setting["id"]) \
.action(action) \
.build()
signed_transaction = SignedTransaction(withdraw_transaction, self.wallet)
with requests_mock.Mocker() as m:
tx_hash = "0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"
expected_request = {
'id': 1234,
'jsonrpc': '2.0',
'method': 'icx_sendTransaction',
'params': {
'data': {
'action': action,
'id': self.setting["id"],
},
'dataType': 'deposit',
'from': self.setting["from"],
'nid': hex(self.setting["nid"]),
'nonce': hex(self.setting["nonce"]),
'signature': signed_transaction.signed_transaction_dict["signature"],
'stepLimit': hex(self.setting["step_limit"]),
'timestamp': hex(self.setting["timestamp"]),
'to': self.setting["to"],
'version': hex(3)
}
}
response_json = {
"jsonrpc": "2.0",
"result": tx_hash,
"id": 1234
}
# Checks if sending transaction correctly
m.post(f"{BASE_DOMAIN_URL_V3_FOR_TEST}/api/v3/", json=response_json)
result = self.icon_service.send_transaction(signed_transaction)
self.assertTrue(is_T_HASH(result))
actual_request = json.loads(m._adapter.last_request.text)
self.assertEqual(expected_request, actual_request) | 0.631708 | 0.234988 |
# Standard Imports
from mysql.connector import connect, errorcode, Error
import pandas as pd
from .privatekeys import config
import os
def get_weatherData():
"""
API to query data for all available weather data from the database
Input: None
Output: Pandas Dataframe
"""
try:
cnx = connect(**config)
cursor = cnx.cursor()
cursor.execute("select * from weatherHistory")
records = cursor.fetchall()
df = pd.DataFrame(records)
df.columns = cursor.column_names
return df
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
def get_weatherData_byCount(n):
"""
API to query data for last 'n' records from available weather data from the database
Input: n (Int: count)
Output: Pandas Dataframe
"""
try:
cnx = connect(**config)
cursor = cnx.cursor()
cursor.execute("select * from weatherHistory order by id desc limit {}".format(n))
records = cursor.fetchall()
df = pd.DataFrame(records)
df.columns = cursor.column_names
return df
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
def get_weatherData_byYear(n):
"""
API to query data for particular year from available weather data from the database
Input: n (String: year value)
Output: Pandas Dataframe
"""
try:
cnx = connect(**config)
cursor = cnx.cursor()
cursor.execute("select * from weatherHistory where reading_time like '{}%'".format(n))
records = cursor.fetchall()
df = pd.DataFrame(records)
df.columns = cursor.column_names
return df
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
def get_weatherData_bySummary(summary):
"""
API to query all available weather data for defined summary (for eg. Clear, Foggy etc.)
Input: summary
Output: Pandas Dataframe
"""
try:
cnx = connect(**config)
cursor = cnx.cursor()
cursor.execute("select * from weatherHistory where summary={}".format(summary))
records = cursor.fetchall()
df = pd.DataFrame(records)
df.columns = cursor.column_names
return df
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
def add_weatherData(reading_time, summary, precip_type, temperature, apparent_temperature, humidity, wind_speed, wind_bearing, visibility, pressure):
"""
API to add new weather data to the database
Input: reading_time, summary, precip_type, temperature, apparent_temperature, humidity, wind_speed, wind_bearing, visibility, pressure
Output: None
"""
try:
cnx = connect(**config)
cursor = cnx.cursor()
cursor.execute("INSERT INTO weatherHistory(reading_time, summary, precip_type, temperature, apparent_temperature, humidity, wind_speed, wind_bearing, visibility, pressure) VALUES ('{}','{}','{}','{}','{}','{}','{}','{}','{}','{}')".format(reading_time, summary, precip_type, temperature, apparent_temperature, humidity, wind_speed, wind_bearing, visibility, pressure))
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.commit()
cnx.close() | db/connection.py | # Standard Imports
from mysql.connector import connect, errorcode, Error
import pandas as pd
from .privatekeys import config
import os
def get_weatherData():
"""
API to query data for all available weather data from the database
Input: None
Output: Pandas Dataframe
"""
try:
cnx = connect(**config)
cursor = cnx.cursor()
cursor.execute("select * from weatherHistory")
records = cursor.fetchall()
df = pd.DataFrame(records)
df.columns = cursor.column_names
return df
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
def get_weatherData_byCount(n):
"""
API to query data for last 'n' records from available weather data from the database
Input: n (Int: count)
Output: Pandas Dataframe
"""
try:
cnx = connect(**config)
cursor = cnx.cursor()
cursor.execute("select * from weatherHistory order by id desc limit {}".format(n))
records = cursor.fetchall()
df = pd.DataFrame(records)
df.columns = cursor.column_names
return df
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
def get_weatherData_byYear(n):
"""
API to query data for particular year from available weather data from the database
Input: n (String: year value)
Output: Pandas Dataframe
"""
try:
cnx = connect(**config)
cursor = cnx.cursor()
cursor.execute("select * from weatherHistory where reading_time like '{}%'".format(n))
records = cursor.fetchall()
df = pd.DataFrame(records)
df.columns = cursor.column_names
return df
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
def get_weatherData_bySummary(summary):
"""
API to query all available weather data for defined summary (for eg. Clear, Foggy etc.)
Input: summary
Output: Pandas Dataframe
"""
try:
cnx = connect(**config)
cursor = cnx.cursor()
cursor.execute("select * from weatherHistory where summary={}".format(summary))
records = cursor.fetchall()
df = pd.DataFrame(records)
df.columns = cursor.column_names
return df
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.close()
def add_weatherData(reading_time, summary, precip_type, temperature, apparent_temperature, humidity, wind_speed, wind_bearing, visibility, pressure):
"""
API to add new weather data to the database
Input: reading_time, summary, precip_type, temperature, apparent_temperature, humidity, wind_speed, wind_bearing, visibility, pressure
Output: None
"""
try:
cnx = connect(**config)
cursor = cnx.cursor()
cursor.execute("INSERT INTO weatherHistory(reading_time, summary, precip_type, temperature, apparent_temperature, humidity, wind_speed, wind_bearing, visibility, pressure) VALUES ('{}','{}','{}','{}','{}','{}','{}','{}','{}','{}')".format(reading_time, summary, precip_type, temperature, apparent_temperature, humidity, wind_speed, wind_bearing, visibility, pressure))
except Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cnx.commit()
cnx.close() | 0.403802 | 0.188137 |
import setuptools
from PyTrinamic.version import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="PyTrinamic",
version=__version__,
author="ED, LK, LH, JM, ..",
author_email="<EMAIL>",
description="TRINAMIC's Python Technology Access Package.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/trinamic/PyTrinamic",
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=[
"python-can>=3,<4",
"canopen",
"pyserial>=3"
],
py_modules=[
"PyTrinamic/connections/connection_interface",
"PyTrinamic/connections/ConnectionManager",
"PyTrinamic/connections/dummy_tmcl_interface",
"PyTrinamic/connections/pcan_tmcl_interface",
"PyTrinamic/connections/socketcan_tmcl_interface",
"PyTrinamic/connections/serial_tmcl_interface",
"PyTrinamic/connections/tmcl_interface",
"PyTrinamic/connections/uart_ic_interface",
"PyTrinamic/connections/usb_tmcl_interface",
"PyTrinamic/connections/CANopen_interface",
"PyTrinamic/connections/pcan_CANopen_interface",
"PyTrinamic/evalboards/eval_interface",
"PyTrinamic/evalboards/TMC2041_eval",
"PyTrinamic/evalboards/TMC2100_eval",
"PyTrinamic/evalboards/TMC2130_eval",
"PyTrinamic/evalboards/TMC2160_eval",
"PyTrinamic/evalboards/TMC2208_eval",
"PyTrinamic/evalboards/TMC2209_eval",
"PyTrinamic/evalboards/TMC2224_eval",
"PyTrinamic/evalboards/TMC2225_eval",
"PyTrinamic/evalboards/TMC2300_eval",
"PyTrinamic/evalboards/TMC2590_eval",
"PyTrinamic/evalboards/TMC2660_eval",
"PyTrinamic/evalboards/TMC4361_eval",
"PyTrinamic/evalboards/TMC4671_eval",
"PyTrinamic/evalboards/TMC5031_eval",
"PyTrinamic/evalboards/TMC5041_eval",
"PyTrinamic/evalboards/TMC5062_eval",
"PyTrinamic/evalboards/TMC5072_eval",
"PyTrinamic/evalboards/TMC5130_eval",
"PyTrinamic/evalboards/TMC5160_eval",
"PyTrinamic/evalboards/TMC5160_shield",
"PyTrinamic/evalboards/TMC5161_eval",
"PyTrinamic/evalboards/TMC6100_eval",
"PyTrinamic/evalboards/TMC6200_eval",
"PyTrinamic/evalboards/TMC7300_eval",
"PyTrinamic/ic/ic_interface",
"PyTrinamic/ic/TMC2041/TMC2041_fields",
"PyTrinamic/ic/TMC2041/TMC2041_register_variant",
"PyTrinamic/ic/TMC2041/TMC2041_register",
"PyTrinamic/ic/TMC2041/TMC2041",
"PyTrinamic/ic/TMC2100/TMC2100_fields",
"PyTrinamic/ic/TMC2100/TMC2100_register_variant",
"PyTrinamic/ic/TMC2100/TMC2100_register",
"PyTrinamic/ic/TMC2100/TMC2100",
"PyTrinamic/ic/TMC2130/TMC2130_fields",
"PyTrinamic/ic/TMC2130/TMC2130_register_variant",
"PyTrinamic/ic/TMC2130/TMC2130_register",
"PyTrinamic/ic/TMC2130/TMC2130",
"PyTrinamic/ic/TMC2160/TMC2160_fields",
"PyTrinamic/ic/TMC2160/TMC2160_register_variant",
"PyTrinamic/ic/TMC2160/TMC2160_register",
"PyTrinamic/ic/TMC2160/TMC2160",
"PyTrinamic/ic/TMC2208/TMC2208_fields",
"PyTrinamic/ic/TMC2208/TMC2208_register_variant",
"PyTrinamic/ic/TMC2208/TMC2208_register",
"PyTrinamic/ic/TMC2208/TMC2208",
"PyTrinamic/ic/TMC2209/TMC2209_fields",
"PyTrinamic/ic/TMC2209/TMC2209_register_variant",
"PyTrinamic/ic/TMC2209/TMC2209_register",
"PyTrinamic/ic/TMC2209/TMC2209",
"PyTrinamic/ic/TMC2224/TMC2224_fields",
"PyTrinamic/ic/TMC2224/TMC2224_register_variant",
"PyTrinamic/ic/TMC2224/TMC2224_register",
"PyTrinamic/ic/TMC2224/TMC2224",
"PyTrinamic/ic/TMC2225/TMC2225_fields",
"PyTrinamic/ic/TMC2225/TMC2225_register_variant",
"PyTrinamic/ic/TMC2225/TMC2225_register",
"PyTrinamic/ic/TMC2225/TMC2225",
"PyTrinamic/ic/TMC2300/TMC2300_fields",
"PyTrinamic/ic/TMC2300/TMC2300_register_variant",
"PyTrinamic/ic/TMC2300/TMC2300_register",
"PyTrinamic/ic/TMC2300/TMC2300",
"PyTrinamic/ic/TMC2590/TMC2590_fields",
"PyTrinamic/ic/TMC2590/TMC2590_register_variant",
"PyTrinamic/ic/TMC2590/TMC2590_register",
"PyTrinamic/ic/TMC2590/TMC2590",
"PyTrinamic/ic/TMC2660/TMC2660_fields",
"PyTrinamic/ic/TMC2660/TMC2660_register_variant",
"PyTrinamic/ic/TMC2660/TMC2660_register",
"PyTrinamic/ic/TMC2660/TMC2660",
"PyTrinamic/ic/TMC4330/TMC4330_fields",
"PyTrinamic/ic/TMC4330/TMC4330_register_variant",
"PyTrinamic/ic/TMC4330/TMC4330_register",
"PyTrinamic/ic/TMC4330/TMC4330",
"PyTrinamic/ic/TMC4331/TMC4331_fields",
"PyTrinamic/ic/TMC4331/TMC4331_register_variant",
"PyTrinamic/ic/TMC4331/TMC4331_register",
"PyTrinamic/ic/TMC4331/TMC4331",
"PyTrinamic/ic/TMC4361/TMC4361_fields",
"PyTrinamic/ic/TMC4361/TMC4361_register_variant",
"PyTrinamic/ic/TMC4361/TMC4361_register",
"PyTrinamic/ic/TMC4361/TMC4361",
"PyTrinamic/ic/TMC4671/TMC4671_fields",
"PyTrinamic/ic/TMC4671/TMC4671_register_variant",
"PyTrinamic/ic/TMC4671/TMC4671_register",
"PyTrinamic/ic/TMC4671/TMC4671",
"PyTrinamic/ic/TMC5031/TMC5031_fields",
"PyTrinamic/ic/TMC5031/TMC5031_register_variant",
"PyTrinamic/ic/TMC5031/TMC5031_register",
"PyTrinamic/ic/TMC5031/TMC5031",
"PyTrinamic/ic/TMC5041/TMC5041_fields",
"PyTrinamic/ic/TMC5041/TMC5041_register_variant",
"PyTrinamic/ic/TMC5041/TMC5041_register",
"PyTrinamic/ic/TMC5041/TMC5041",
"PyTrinamic/ic/TMC5062/TMC5062_fields",
"PyTrinamic/ic/TMC5062/TMC5062_register_variant",
"PyTrinamic/ic/TMC5062/TMC5062_register",
"PyTrinamic/ic/TMC5062/TMC5062",
"PyTrinamic/ic/TMC5072/TMC5072_fields",
"PyTrinamic/ic/TMC5072/TMC5072_register_variant",
"PyTrinamic/ic/TMC5072/TMC5072_register",
"PyTrinamic/ic/TMC5072/TMC5072",
"PyTrinamic/ic/TMC5130/TMC5130_fields",
"PyTrinamic/ic/TMC5130/TMC5130_register_variant",
"PyTrinamic/ic/TMC5130/TMC5130_register",
"PyTrinamic/ic/TMC5130/TMC5130",
"PyTrinamic/ic/TMC5160/TMC5160_fields",
"PyTrinamic/ic/TMC5160/TMC5160_register_variant",
"PyTrinamic/ic/TMC5160/TMC5160_register",
"PyTrinamic/ic/TMC5160/TMC5160",
"PyTrinamic/ic/TMC5161/TMC5161_fields",
"PyTrinamic/ic/TMC5161/TMC5161_register_variant",
"PyTrinamic/ic/TMC5161/TMC5161_register",
"PyTrinamic/ic/TMC5161/TMC5161",
"PyTrinamic/ic/TMC6100/TMC6100_fields",
"PyTrinamic/ic/TMC6100/TMC6100_register_variant",
"PyTrinamic/ic/TMC6100/TMC6100_register",
"PyTrinamic/ic/TMC6100/TMC6100",
"PyTrinamic/ic/TMC6200/TMC6200_fields",
"PyTrinamic/ic/TMC6200/TMC6200_register_variant",
"PyTrinamic/ic/TMC6200/TMC6200_register",
"PyTrinamic/ic/TMC6200/TMC6200",
"PyTrinamic/ic/TMC7300/TMC7300_fields",
"PyTrinamic/ic/TMC7300/TMC7300_register_variant",
"PyTrinamic/ic/TMC7300/TMC7300_register",
"PyTrinamic/ic/TMC7300/TMC7300",
"PyTrinamic/modules/TMC_EvalShield",
"PyTrinamic/modules/TMC603/TMC_603",
"PyTrinamic/modules/TMCC160/TMCC_160",
"PyTrinamic/modules/TMCM0010OPC/TMCM_0010_OPC",
"PyTrinamic/modules/TMCM1160/TMCM_1160",
"PyTrinamic/modules/TMCM1161/TMCM_1161",
"PyTrinamic/modules/TMCM1270/TMCM_1270",
"PyTrinamic/modules/TMCM1276/TMCM_1276",
"PyTrinamic/modules/TMCM1617/TMCM_1617",
"PyTrinamic/modules/TMCM1630/TMCM_1630",
"PyTrinamic/modules/TMCM1633/TMCM_1633",
"PyTrinamic/modules/TMCM1636/TMCM_1636",
"PyTrinamic/modules/TMCM1640/TMCM_1640",
"PyTrinamic/modules/TMCM1670/TMCM_1670",
"PyTrinamic/modules/TMCM6212/TMCM_6212",
"PyTrinamic/helpers",
"PyTrinamic/version",
"PyTrinamic/features/Feature",
"PyTrinamic/features/StallGuard",
"PyTrinamic/features/CoolStep",
],
scripts=[
"PyTrinamic/examples/evalboards/TMC2041/TMC2041_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2041/TMC2041_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2100/TMC2100_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2100/TMC2100_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2130/TMC2130_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2130/TMC2130_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2160/TMC2160_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2160/TMC2160_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2208/TMC2208_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2208/TMC2208_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2209/TMC2209_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2209/TMC2209_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2224/TMC2224_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2224/TMC2224_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2225/TMC2225_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2225/TMC2225_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2300/TMC2300_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2300/TMC2300_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2590/TMC2590_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2590/TMC2590_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2660/TMC2660_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2660/TMC2660_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC4331/TMC4331_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC4331/TMC4331_eval_TMC2130_eval_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC4330/TMC4330_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC4330/TMC4330_eval_TMC2160_eval_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC4361/TMC4361_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC4361/TMC4361_eval_TMC2660_eval_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_BLDC_ABN_encoder_offset_estimation.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_BLDC_ABN_encoder.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_BLDC_open_loop.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_TMC6100_eval_BLDC_ABN_encoder.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_TMC6100_eval_BLDC_open_loop.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_TMC6200_eval_BLDC_ABN_encoder.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_TMC6200_eval_BLDC_open_loop.py",
"PyTrinamic/examples/evalboards/TMC5031/TMC5031_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5031/TMC5031_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC5031/TMC5031_stallGuardDemo.py",
"PyTrinamic/examples/evalboards/TMC5041/TMC5041_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5041/TMC5041_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC5041/TMC5041_stallGuardDemo.py",
"PyTrinamic/examples/evalboards/TMC5062/TMC5062_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5062/TMC5062_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC5072/TMC5072_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5072/TMC5072_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC5130/TMC5130_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5130/TMC5130_MicroStep.py",
"PyTrinamic/examples/evalboards/TMC5160/TMC5160_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5160/TMC5160_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC5160_shield/TMC5160_shield_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5160_shield/TMC5160_coolStep_demo.py",
"PyTrinamic/examples/evalboards/TMC5160_shield/TMC5160_stallGuard_demo.py",
"PyTrinamic/examples/evalboards/TMC5160_shield/TMC5160_coolStep_demo_min.py",
"PyTrinamic/examples/evalboards/TMC5160_shield/TMC5160_stallGuard_demo_min.py",
"PyTrinamic/examples/evalboards/TMC5161/TMC5161_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5161/TMC5161_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC7300/TMC7300_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC7300/TMC7300_rotateDemo.py",
"PyTrinamic/examples/modules/TMC_603/TMC_603_TMCL_encoder_analog_input_test.py",
"PyTrinamic/examples/modules/TMC_603/TMC_603_TMCL_encoder_positioning_test.py",
"PyTrinamic/examples/modules/TMC_603/TMC_603_TMCL_hall_digital_input_test.py",
"PyTrinamic/examples/modules/TMC_603/TMC_603_TMCL_hall_positioning_test.py",
"PyTrinamic/examples/modules/TMCC_160/TMCC_160_TMCL_foc_encoder_positioning_test.py",
"PyTrinamic/examples/modules/TMCC_160/TMCC_160_TMCL_foc_hall_digital_analog_input_test.py",
"PyTrinamic/examples/modules/TMCM_0010_OPC/TMCM_0010_TMCL_OPC_config_check.py",
"PyTrinamic/examples/modules/TMCM_0010_OPC/TMCM_0010_TMCL_OPC_config_update.py",
"PyTrinamic/examples/modules/TMCM_1160/TMCM_1160_TMCL_rotateDemo.py",
"PyTrinamic/examples/modules/TMCM_1160/TMCM_1160_CANopen_PP_Mode.py",
"PyTrinamic/examples/modules/TMCM_1160/TMCM_1160_CANopen_PV_Mode.py",
"PyTrinamic/examples/modules/TMCM_1161/TMCM_1161_TMCL_rotateDemo.py",
"PyTrinamic/examples/modules/TMCM_1270/TMCM_1270_CANopen_PP_Mode.py",
"PyTrinamic/examples/modules/TMCM_1270/TMCM_1270_CANopen_PV_Mode.py",
"PyTrinamic/examples/modules/TMCM_1270/TMCM_1270_TMCL_rotateDemo.py",
"PyTrinamic/examples/modules/TMCM_1276/TMCM_1276_CANopen_PP_Mode.py",
"PyTrinamic/examples/modules/TMCM_1276/TMCM_1276_CANopen_PV_Mode.py",
"PyTrinamic/examples/modules/TMCM_1276/TMCM_1276_TMCL_rotateDemo.py",
"PyTrinamic/examples/modules/TMCM_1617/TMCM_1617_TMCL_encoder_positioning_test.py",
"PyTrinamic/examples/modules/TMCM_1617/TMCM_1617_TMCL_hall_digital_input_test.py",
"PyTrinamic/examples/modules/TMCM_1617/TMCM_1617_TMCL_hall_positioning_test.py",
"PyTrinamic/examples/modules/TMCM_1630/TMCM_1630_TMCL_encoder_analog_input_test.py",
"PyTrinamic/examples/modules/TMCM_1630/TMCM_1630_TMCL_encoder_positioning_test.py",
"PyTrinamic/examples/modules/TMCM_1630/TMCM_1630_TMCL_hall_digital_input_test.py",
"PyTrinamic/examples/modules/TMCM_1630/TMCM_1630_TMCL_hall_positioning_test.py",
"PyTrinamic/examples/modules/TMCM_1633/TMCM_1633_TMCL_encoder_limit_switches.py",
"PyTrinamic/examples/modules/TMCM_1633/TMCM_1633_TMCL_hall_limit_switches.py",
"PyTrinamic/examples/modules/TMCM_1636/TMCM_1636_TMCL_position_abn_abs.py",
"PyTrinamic/examples/modules/TMCM_1636/TMCM_1636_TMCL_rotate_hall_endstop.py",
"PyTrinamic/examples/modules/TMCM_1636/TMCM_1636_TMCL_rotate_hall.py",
"PyTrinamic/examples/modules/TMCM_1636/TMCM_1636_TMCL_rotate_openloop.py",
"PyTrinamic/examples/modules/TMCM_1640/TMCM_1640_TMCL_encoder_analog_input_test.py",
"PyTrinamic/examples/modules/TMCM_1640/TMCM_1640_TMCL_encoder_positioning_test.py",
"PyTrinamic/examples/modules/TMCM_1640/TMCM_1640_TMCL_hall_digital_input_test.py",
"PyTrinamic/examples/modules/TMCM_1640/TMCM_1640_TMCL_hall_positioning_test.py",
"PyTrinamic/examples/modules/TMCM_1670/TMCM_1670_TMCL_encoder_n_channel.py",
"PyTrinamic/examples/modules/TMCM_1670/TMCM_1670_TMCL_limit_switches.py",
"PyTrinamic/examples/modules/TMCM_1670/TMCM_1670_TMCL_positioning.py",
"PyTrinamic/examples/modules/TMCM_6212/TMCM_6212_TMCL_rotateDemo.py",
"PyTrinamic/examples/tools/FirmwareUpdate.py",
],
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
license="MIT",
zip_safe=False,
) | setup.py | import setuptools
from PyTrinamic.version import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="PyTrinamic",
version=__version__,
author="ED, LK, LH, JM, ..",
author_email="<EMAIL>",
description="TRINAMIC's Python Technology Access Package.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/trinamic/PyTrinamic",
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=[
"python-can>=3,<4",
"canopen",
"pyserial>=3"
],
py_modules=[
"PyTrinamic/connections/connection_interface",
"PyTrinamic/connections/ConnectionManager",
"PyTrinamic/connections/dummy_tmcl_interface",
"PyTrinamic/connections/pcan_tmcl_interface",
"PyTrinamic/connections/socketcan_tmcl_interface",
"PyTrinamic/connections/serial_tmcl_interface",
"PyTrinamic/connections/tmcl_interface",
"PyTrinamic/connections/uart_ic_interface",
"PyTrinamic/connections/usb_tmcl_interface",
"PyTrinamic/connections/CANopen_interface",
"PyTrinamic/connections/pcan_CANopen_interface",
"PyTrinamic/evalboards/eval_interface",
"PyTrinamic/evalboards/TMC2041_eval",
"PyTrinamic/evalboards/TMC2100_eval",
"PyTrinamic/evalboards/TMC2130_eval",
"PyTrinamic/evalboards/TMC2160_eval",
"PyTrinamic/evalboards/TMC2208_eval",
"PyTrinamic/evalboards/TMC2209_eval",
"PyTrinamic/evalboards/TMC2224_eval",
"PyTrinamic/evalboards/TMC2225_eval",
"PyTrinamic/evalboards/TMC2300_eval",
"PyTrinamic/evalboards/TMC2590_eval",
"PyTrinamic/evalboards/TMC2660_eval",
"PyTrinamic/evalboards/TMC4361_eval",
"PyTrinamic/evalboards/TMC4671_eval",
"PyTrinamic/evalboards/TMC5031_eval",
"PyTrinamic/evalboards/TMC5041_eval",
"PyTrinamic/evalboards/TMC5062_eval",
"PyTrinamic/evalboards/TMC5072_eval",
"PyTrinamic/evalboards/TMC5130_eval",
"PyTrinamic/evalboards/TMC5160_eval",
"PyTrinamic/evalboards/TMC5160_shield",
"PyTrinamic/evalboards/TMC5161_eval",
"PyTrinamic/evalboards/TMC6100_eval",
"PyTrinamic/evalboards/TMC6200_eval",
"PyTrinamic/evalboards/TMC7300_eval",
"PyTrinamic/ic/ic_interface",
"PyTrinamic/ic/TMC2041/TMC2041_fields",
"PyTrinamic/ic/TMC2041/TMC2041_register_variant",
"PyTrinamic/ic/TMC2041/TMC2041_register",
"PyTrinamic/ic/TMC2041/TMC2041",
"PyTrinamic/ic/TMC2100/TMC2100_fields",
"PyTrinamic/ic/TMC2100/TMC2100_register_variant",
"PyTrinamic/ic/TMC2100/TMC2100_register",
"PyTrinamic/ic/TMC2100/TMC2100",
"PyTrinamic/ic/TMC2130/TMC2130_fields",
"PyTrinamic/ic/TMC2130/TMC2130_register_variant",
"PyTrinamic/ic/TMC2130/TMC2130_register",
"PyTrinamic/ic/TMC2130/TMC2130",
"PyTrinamic/ic/TMC2160/TMC2160_fields",
"PyTrinamic/ic/TMC2160/TMC2160_register_variant",
"PyTrinamic/ic/TMC2160/TMC2160_register",
"PyTrinamic/ic/TMC2160/TMC2160",
"PyTrinamic/ic/TMC2208/TMC2208_fields",
"PyTrinamic/ic/TMC2208/TMC2208_register_variant",
"PyTrinamic/ic/TMC2208/TMC2208_register",
"PyTrinamic/ic/TMC2208/TMC2208",
"PyTrinamic/ic/TMC2209/TMC2209_fields",
"PyTrinamic/ic/TMC2209/TMC2209_register_variant",
"PyTrinamic/ic/TMC2209/TMC2209_register",
"PyTrinamic/ic/TMC2209/TMC2209",
"PyTrinamic/ic/TMC2224/TMC2224_fields",
"PyTrinamic/ic/TMC2224/TMC2224_register_variant",
"PyTrinamic/ic/TMC2224/TMC2224_register",
"PyTrinamic/ic/TMC2224/TMC2224",
"PyTrinamic/ic/TMC2225/TMC2225_fields",
"PyTrinamic/ic/TMC2225/TMC2225_register_variant",
"PyTrinamic/ic/TMC2225/TMC2225_register",
"PyTrinamic/ic/TMC2225/TMC2225",
"PyTrinamic/ic/TMC2300/TMC2300_fields",
"PyTrinamic/ic/TMC2300/TMC2300_register_variant",
"PyTrinamic/ic/TMC2300/TMC2300_register",
"PyTrinamic/ic/TMC2300/TMC2300",
"PyTrinamic/ic/TMC2590/TMC2590_fields",
"PyTrinamic/ic/TMC2590/TMC2590_register_variant",
"PyTrinamic/ic/TMC2590/TMC2590_register",
"PyTrinamic/ic/TMC2590/TMC2590",
"PyTrinamic/ic/TMC2660/TMC2660_fields",
"PyTrinamic/ic/TMC2660/TMC2660_register_variant",
"PyTrinamic/ic/TMC2660/TMC2660_register",
"PyTrinamic/ic/TMC2660/TMC2660",
"PyTrinamic/ic/TMC4330/TMC4330_fields",
"PyTrinamic/ic/TMC4330/TMC4330_register_variant",
"PyTrinamic/ic/TMC4330/TMC4330_register",
"PyTrinamic/ic/TMC4330/TMC4330",
"PyTrinamic/ic/TMC4331/TMC4331_fields",
"PyTrinamic/ic/TMC4331/TMC4331_register_variant",
"PyTrinamic/ic/TMC4331/TMC4331_register",
"PyTrinamic/ic/TMC4331/TMC4331",
"PyTrinamic/ic/TMC4361/TMC4361_fields",
"PyTrinamic/ic/TMC4361/TMC4361_register_variant",
"PyTrinamic/ic/TMC4361/TMC4361_register",
"PyTrinamic/ic/TMC4361/TMC4361",
"PyTrinamic/ic/TMC4671/TMC4671_fields",
"PyTrinamic/ic/TMC4671/TMC4671_register_variant",
"PyTrinamic/ic/TMC4671/TMC4671_register",
"PyTrinamic/ic/TMC4671/TMC4671",
"PyTrinamic/ic/TMC5031/TMC5031_fields",
"PyTrinamic/ic/TMC5031/TMC5031_register_variant",
"PyTrinamic/ic/TMC5031/TMC5031_register",
"PyTrinamic/ic/TMC5031/TMC5031",
"PyTrinamic/ic/TMC5041/TMC5041_fields",
"PyTrinamic/ic/TMC5041/TMC5041_register_variant",
"PyTrinamic/ic/TMC5041/TMC5041_register",
"PyTrinamic/ic/TMC5041/TMC5041",
"PyTrinamic/ic/TMC5062/TMC5062_fields",
"PyTrinamic/ic/TMC5062/TMC5062_register_variant",
"PyTrinamic/ic/TMC5062/TMC5062_register",
"PyTrinamic/ic/TMC5062/TMC5062",
"PyTrinamic/ic/TMC5072/TMC5072_fields",
"PyTrinamic/ic/TMC5072/TMC5072_register_variant",
"PyTrinamic/ic/TMC5072/TMC5072_register",
"PyTrinamic/ic/TMC5072/TMC5072",
"PyTrinamic/ic/TMC5130/TMC5130_fields",
"PyTrinamic/ic/TMC5130/TMC5130_register_variant",
"PyTrinamic/ic/TMC5130/TMC5130_register",
"PyTrinamic/ic/TMC5130/TMC5130",
"PyTrinamic/ic/TMC5160/TMC5160_fields",
"PyTrinamic/ic/TMC5160/TMC5160_register_variant",
"PyTrinamic/ic/TMC5160/TMC5160_register",
"PyTrinamic/ic/TMC5160/TMC5160",
"PyTrinamic/ic/TMC5161/TMC5161_fields",
"PyTrinamic/ic/TMC5161/TMC5161_register_variant",
"PyTrinamic/ic/TMC5161/TMC5161_register",
"PyTrinamic/ic/TMC5161/TMC5161",
"PyTrinamic/ic/TMC6100/TMC6100_fields",
"PyTrinamic/ic/TMC6100/TMC6100_register_variant",
"PyTrinamic/ic/TMC6100/TMC6100_register",
"PyTrinamic/ic/TMC6100/TMC6100",
"PyTrinamic/ic/TMC6200/TMC6200_fields",
"PyTrinamic/ic/TMC6200/TMC6200_register_variant",
"PyTrinamic/ic/TMC6200/TMC6200_register",
"PyTrinamic/ic/TMC6200/TMC6200",
"PyTrinamic/ic/TMC7300/TMC7300_fields",
"PyTrinamic/ic/TMC7300/TMC7300_register_variant",
"PyTrinamic/ic/TMC7300/TMC7300_register",
"PyTrinamic/ic/TMC7300/TMC7300",
"PyTrinamic/modules/TMC_EvalShield",
"PyTrinamic/modules/TMC603/TMC_603",
"PyTrinamic/modules/TMCC160/TMCC_160",
"PyTrinamic/modules/TMCM0010OPC/TMCM_0010_OPC",
"PyTrinamic/modules/TMCM1160/TMCM_1160",
"PyTrinamic/modules/TMCM1161/TMCM_1161",
"PyTrinamic/modules/TMCM1270/TMCM_1270",
"PyTrinamic/modules/TMCM1276/TMCM_1276",
"PyTrinamic/modules/TMCM1617/TMCM_1617",
"PyTrinamic/modules/TMCM1630/TMCM_1630",
"PyTrinamic/modules/TMCM1633/TMCM_1633",
"PyTrinamic/modules/TMCM1636/TMCM_1636",
"PyTrinamic/modules/TMCM1640/TMCM_1640",
"PyTrinamic/modules/TMCM1670/TMCM_1670",
"PyTrinamic/modules/TMCM6212/TMCM_6212",
"PyTrinamic/helpers",
"PyTrinamic/version",
"PyTrinamic/features/Feature",
"PyTrinamic/features/StallGuard",
"PyTrinamic/features/CoolStep",
],
scripts=[
"PyTrinamic/examples/evalboards/TMC2041/TMC2041_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2041/TMC2041_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2100/TMC2100_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2100/TMC2100_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2130/TMC2130_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2130/TMC2130_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2160/TMC2160_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2160/TMC2160_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2208/TMC2208_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2208/TMC2208_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2209/TMC2209_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2209/TMC2209_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2224/TMC2224_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2224/TMC2224_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2225/TMC2225_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2225/TMC2225_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2300/TMC2300_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2300/TMC2300_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2590/TMC2590_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2590/TMC2590_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC2660/TMC2660_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC2660/TMC2660_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC4331/TMC4331_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC4331/TMC4331_eval_TMC2130_eval_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC4330/TMC4330_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC4330/TMC4330_eval_TMC2160_eval_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC4361/TMC4361_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC4361/TMC4361_eval_TMC2660_eval_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_BLDC_ABN_encoder_offset_estimation.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_BLDC_ABN_encoder.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_BLDC_open_loop.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_TMC6100_eval_BLDC_ABN_encoder.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_TMC6100_eval_BLDC_open_loop.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_TMC6200_eval_BLDC_ABN_encoder.py",
"PyTrinamic/examples/evalboards/TMC4671/TMC4671_eval_TMC6200_eval_BLDC_open_loop.py",
"PyTrinamic/examples/evalboards/TMC5031/TMC5031_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5031/TMC5031_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC5031/TMC5031_stallGuardDemo.py",
"PyTrinamic/examples/evalboards/TMC5041/TMC5041_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5041/TMC5041_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC5041/TMC5041_stallGuardDemo.py",
"PyTrinamic/examples/evalboards/TMC5062/TMC5062_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5062/TMC5062_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC5072/TMC5072_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5072/TMC5072_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC5130/TMC5130_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5130/TMC5130_MicroStep.py",
"PyTrinamic/examples/evalboards/TMC5160/TMC5160_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5160/TMC5160_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC5160_shield/TMC5160_shield_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5160_shield/TMC5160_coolStep_demo.py",
"PyTrinamic/examples/evalboards/TMC5160_shield/TMC5160_stallGuard_demo.py",
"PyTrinamic/examples/evalboards/TMC5160_shield/TMC5160_coolStep_demo_min.py",
"PyTrinamic/examples/evalboards/TMC5160_shield/TMC5160_stallGuard_demo_min.py",
"PyTrinamic/examples/evalboards/TMC5161/TMC5161_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC5161/TMC5161_rotateDemo.py",
"PyTrinamic/examples/evalboards/TMC7300/TMC7300_eval_register_dump.py",
"PyTrinamic/examples/evalboards/TMC7300/TMC7300_rotateDemo.py",
"PyTrinamic/examples/modules/TMC_603/TMC_603_TMCL_encoder_analog_input_test.py",
"PyTrinamic/examples/modules/TMC_603/TMC_603_TMCL_encoder_positioning_test.py",
"PyTrinamic/examples/modules/TMC_603/TMC_603_TMCL_hall_digital_input_test.py",
"PyTrinamic/examples/modules/TMC_603/TMC_603_TMCL_hall_positioning_test.py",
"PyTrinamic/examples/modules/TMCC_160/TMCC_160_TMCL_foc_encoder_positioning_test.py",
"PyTrinamic/examples/modules/TMCC_160/TMCC_160_TMCL_foc_hall_digital_analog_input_test.py",
"PyTrinamic/examples/modules/TMCM_0010_OPC/TMCM_0010_TMCL_OPC_config_check.py",
"PyTrinamic/examples/modules/TMCM_0010_OPC/TMCM_0010_TMCL_OPC_config_update.py",
"PyTrinamic/examples/modules/TMCM_1160/TMCM_1160_TMCL_rotateDemo.py",
"PyTrinamic/examples/modules/TMCM_1160/TMCM_1160_CANopen_PP_Mode.py",
"PyTrinamic/examples/modules/TMCM_1160/TMCM_1160_CANopen_PV_Mode.py",
"PyTrinamic/examples/modules/TMCM_1161/TMCM_1161_TMCL_rotateDemo.py",
"PyTrinamic/examples/modules/TMCM_1270/TMCM_1270_CANopen_PP_Mode.py",
"PyTrinamic/examples/modules/TMCM_1270/TMCM_1270_CANopen_PV_Mode.py",
"PyTrinamic/examples/modules/TMCM_1270/TMCM_1270_TMCL_rotateDemo.py",
"PyTrinamic/examples/modules/TMCM_1276/TMCM_1276_CANopen_PP_Mode.py",
"PyTrinamic/examples/modules/TMCM_1276/TMCM_1276_CANopen_PV_Mode.py",
"PyTrinamic/examples/modules/TMCM_1276/TMCM_1276_TMCL_rotateDemo.py",
"PyTrinamic/examples/modules/TMCM_1617/TMCM_1617_TMCL_encoder_positioning_test.py",
"PyTrinamic/examples/modules/TMCM_1617/TMCM_1617_TMCL_hall_digital_input_test.py",
"PyTrinamic/examples/modules/TMCM_1617/TMCM_1617_TMCL_hall_positioning_test.py",
"PyTrinamic/examples/modules/TMCM_1630/TMCM_1630_TMCL_encoder_analog_input_test.py",
"PyTrinamic/examples/modules/TMCM_1630/TMCM_1630_TMCL_encoder_positioning_test.py",
"PyTrinamic/examples/modules/TMCM_1630/TMCM_1630_TMCL_hall_digital_input_test.py",
"PyTrinamic/examples/modules/TMCM_1630/TMCM_1630_TMCL_hall_positioning_test.py",
"PyTrinamic/examples/modules/TMCM_1633/TMCM_1633_TMCL_encoder_limit_switches.py",
"PyTrinamic/examples/modules/TMCM_1633/TMCM_1633_TMCL_hall_limit_switches.py",
"PyTrinamic/examples/modules/TMCM_1636/TMCM_1636_TMCL_position_abn_abs.py",
"PyTrinamic/examples/modules/TMCM_1636/TMCM_1636_TMCL_rotate_hall_endstop.py",
"PyTrinamic/examples/modules/TMCM_1636/TMCM_1636_TMCL_rotate_hall.py",
"PyTrinamic/examples/modules/TMCM_1636/TMCM_1636_TMCL_rotate_openloop.py",
"PyTrinamic/examples/modules/TMCM_1640/TMCM_1640_TMCL_encoder_analog_input_test.py",
"PyTrinamic/examples/modules/TMCM_1640/TMCM_1640_TMCL_encoder_positioning_test.py",
"PyTrinamic/examples/modules/TMCM_1640/TMCM_1640_TMCL_hall_digital_input_test.py",
"PyTrinamic/examples/modules/TMCM_1640/TMCM_1640_TMCL_hall_positioning_test.py",
"PyTrinamic/examples/modules/TMCM_1670/TMCM_1670_TMCL_encoder_n_channel.py",
"PyTrinamic/examples/modules/TMCM_1670/TMCM_1670_TMCL_limit_switches.py",
"PyTrinamic/examples/modules/TMCM_1670/TMCM_1670_TMCL_positioning.py",
"PyTrinamic/examples/modules/TMCM_6212/TMCM_6212_TMCL_rotateDemo.py",
"PyTrinamic/examples/tools/FirmwareUpdate.py",
],
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
license="MIT",
zip_safe=False,
) | 0.393385 | 0.14885 |
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from users.serializers import UserSerializer
from .models import Event
class EventSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.HyperlinkedRelatedField(view_name='user-detail', blank=True)
title = serializers.WritableField(source='title', blank=True)
description = serializers.WritableField(source='description', blank=True)
chapter = serializers.HyperlinkedRelatedField('get_chapter', view_name='chapter-detail', read_only=True)
attend_url = serializers.Field('get_attend_url')
rsvp_url = serializers.Field('get_rsvp_url')
text_color_class = serializers.Field('get_status_text_class')
ctype_id = serializers.SerializerMethodField('get_content_type_id')
allDay = serializers.Field(source='all_day')
name = serializers.Field('name')
api_url = serializers.Field('get_api_url')
class Meta:
model = Event
fields = [
'id',
'ctype_id',
'url',
'api_url',
'name',
'title',
'slug',
'chapter',
'chapter_title',
'fraternity_title',
'chapter_id',
'status',
'description',
'start',
'end',
'allDay',
'owner',
'attend_url',
'rsvp_url',
'text_color_class',
#'enable_comments',
#'viewers',
#'attendees',
]
def get_chapter_id(self, obj):
if obj:
return obj.get_chapter().id
else:
return None
def get_content_type_id(self, obj):
return ContentType.objects.get_for_model(Event).id
class EventNestedSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.HyperlinkedRelatedField(view_name='user-detail', blank=True)
title = serializers.WritableField(source='title', blank=True)
description = serializers.WritableField(source='description', blank=True)
chapter = serializers.HyperlinkedRelatedField('get_chapter', view_name='chapter-detail', read_only=True)
attend_url = serializers.Field('get_attend_url')
rsvp_url = serializers.Field('get_rsvp_url')
text_color_class = serializers.Field('get_status_text_class')
get_attendees = UserSerializer(many=True)
get_rsvps = UserSerializer(many=True)
get_rsvps_not_attendees = UserSerializer(many=True)
allDay = serializers.Field(source='all_day')
name = serializers.Field('name')
api_url = serializers.Field('get_api_url')
class Meta:
model = Event
fields = [
'id',
'url',
'api_url',
'name',
'title',
'slug',
'status',
'description',
'start',
'end',
'allDay',
'owner',
'attend_url',
'rsvp_url',
'text_color_class',
'get_attendees',
'get_rsvps',
'get_rsvps_not_attendees',
] | onegreek/events/serializers.py | from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from users.serializers import UserSerializer
from .models import Event
class EventSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.HyperlinkedRelatedField(view_name='user-detail', blank=True)
title = serializers.WritableField(source='title', blank=True)
description = serializers.WritableField(source='description', blank=True)
chapter = serializers.HyperlinkedRelatedField('get_chapter', view_name='chapter-detail', read_only=True)
attend_url = serializers.Field('get_attend_url')
rsvp_url = serializers.Field('get_rsvp_url')
text_color_class = serializers.Field('get_status_text_class')
ctype_id = serializers.SerializerMethodField('get_content_type_id')
allDay = serializers.Field(source='all_day')
name = serializers.Field('name')
api_url = serializers.Field('get_api_url')
class Meta:
model = Event
fields = [
'id',
'ctype_id',
'url',
'api_url',
'name',
'title',
'slug',
'chapter',
'chapter_title',
'fraternity_title',
'chapter_id',
'status',
'description',
'start',
'end',
'allDay',
'owner',
'attend_url',
'rsvp_url',
'text_color_class',
#'enable_comments',
#'viewers',
#'attendees',
]
def get_chapter_id(self, obj):
if obj:
return obj.get_chapter().id
else:
return None
def get_content_type_id(self, obj):
return ContentType.objects.get_for_model(Event).id
class EventNestedSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.HyperlinkedRelatedField(view_name='user-detail', blank=True)
title = serializers.WritableField(source='title', blank=True)
description = serializers.WritableField(source='description', blank=True)
chapter = serializers.HyperlinkedRelatedField('get_chapter', view_name='chapter-detail', read_only=True)
attend_url = serializers.Field('get_attend_url')
rsvp_url = serializers.Field('get_rsvp_url')
text_color_class = serializers.Field('get_status_text_class')
get_attendees = UserSerializer(many=True)
get_rsvps = UserSerializer(many=True)
get_rsvps_not_attendees = UserSerializer(many=True)
allDay = serializers.Field(source='all_day')
name = serializers.Field('name')
api_url = serializers.Field('get_api_url')
class Meta:
model = Event
fields = [
'id',
'url',
'api_url',
'name',
'title',
'slug',
'status',
'description',
'start',
'end',
'allDay',
'owner',
'attend_url',
'rsvp_url',
'text_color_class',
'get_attendees',
'get_rsvps',
'get_rsvps_not_attendees',
] | 0.656878 | 0.127653 |
import pytest
from eth.db.atomic import AtomicDB
from tests.core.integration_test_helpers import (
load_fixture_db,
load_mining_chain,
DBFixture,
)
@pytest.fixture
def leveldb_20():
yield from load_fixture_db(DBFixture.TWENTY_POW_HEADERS)
@pytest.fixture
def leveldb_1000():
yield from load_fixture_db(DBFixture.THOUSAND_POW_HEADERS)
@pytest.fixture
def leveldb_uncle_chain():
yield from load_fixture_db(DBFixture.UNCLE_CHAIN)
@pytest.fixture
def chaindb_uncle(leveldb_1000, leveldb_uncle_chain):
canoncical_chain = load_mining_chain(AtomicDB(leveldb_1000))
uncle_chain = load_mining_chain(AtomicDB(leveldb_uncle_chain))
# This fixture shares a common history with `leveldb_1000` from genesis till block 474.
# It then forks of and contains uncles from 475 till 1000. These numbers were picked because
# it fully spans the first gap defined in `chaindb_with_gaps` (test_sync.py) and only
# partially spans the second gap defined in `chaindb_with_gaps`.
header_before_fork = canoncical_chain.get_canonical_block_header_by_number(474)
assert uncle_chain.get_canonical_block_header_by_number(474) == header_before_fork
# Forks at header 475
fork_header = canoncical_chain.get_canonical_block_header_by_number(475)
assert uncle_chain.get_canonical_block_header_by_number(475) != fork_header
assert uncle_chain.chaindb.get_canonical_head().block_number == 1000
return uncle_chain.chaindb
@pytest.fixture
def chaindb_1000(leveldb_1000):
chain = load_mining_chain(AtomicDB(leveldb_1000))
assert chain.chaindb.get_canonical_head().block_number == 1000
return chain.chaindb
@pytest.fixture
def chaindb_20(leveldb_20):
chain = load_mining_chain(AtomicDB(leveldb_20))
assert chain.chaindb.get_canonical_head().block_number == 20
return chain.chaindb
@pytest.fixture
def chaindb_fresh():
chain = load_mining_chain(AtomicDB())
assert chain.chaindb.get_canonical_head().block_number == 0
return chain.chaindb | tests/core/p2p-proto/conftest.py | import pytest
from eth.db.atomic import AtomicDB
from tests.core.integration_test_helpers import (
load_fixture_db,
load_mining_chain,
DBFixture,
)
@pytest.fixture
def leveldb_20():
yield from load_fixture_db(DBFixture.TWENTY_POW_HEADERS)
@pytest.fixture
def leveldb_1000():
yield from load_fixture_db(DBFixture.THOUSAND_POW_HEADERS)
@pytest.fixture
def leveldb_uncle_chain():
yield from load_fixture_db(DBFixture.UNCLE_CHAIN)
@pytest.fixture
def chaindb_uncle(leveldb_1000, leveldb_uncle_chain):
canoncical_chain = load_mining_chain(AtomicDB(leveldb_1000))
uncle_chain = load_mining_chain(AtomicDB(leveldb_uncle_chain))
# This fixture shares a common history with `leveldb_1000` from genesis till block 474.
# It then forks of and contains uncles from 475 till 1000. These numbers were picked because
# it fully spans the first gap defined in `chaindb_with_gaps` (test_sync.py) and only
# partially spans the second gap defined in `chaindb_with_gaps`.
header_before_fork = canoncical_chain.get_canonical_block_header_by_number(474)
assert uncle_chain.get_canonical_block_header_by_number(474) == header_before_fork
# Forks at header 475
fork_header = canoncical_chain.get_canonical_block_header_by_number(475)
assert uncle_chain.get_canonical_block_header_by_number(475) != fork_header
assert uncle_chain.chaindb.get_canonical_head().block_number == 1000
return uncle_chain.chaindb
@pytest.fixture
def chaindb_1000(leveldb_1000):
chain = load_mining_chain(AtomicDB(leveldb_1000))
assert chain.chaindb.get_canonical_head().block_number == 1000
return chain.chaindb
@pytest.fixture
def chaindb_20(leveldb_20):
chain = load_mining_chain(AtomicDB(leveldb_20))
assert chain.chaindb.get_canonical_head().block_number == 20
return chain.chaindb
@pytest.fixture
def chaindb_fresh():
chain = load_mining_chain(AtomicDB())
assert chain.chaindb.get_canonical_head().block_number == 0
return chain.chaindb | 0.668015 | 0.429489 |
import pandas as pd
import world_bank_data as wb
from common import *
import info_gdf
def info_countries_df():
countries = wb.get_countries()
# Population dataset, by the World Bank (most recent value), indexed with the country code
population = wb.get_series('SP.POP.TOTL', id_or_value='id', simplify_index=True, mrv=1)
# PATCH: if last line is not working (sometimes World Bank doesn't work) replace with the line below
# population = pd.read_csv('countries_population.csv').set_index('id')['population']
# Aggregate region, country and population
df = countries[['region', 'latitude', 'longitude','name']].loc[countries.region != 'Aggregates']
df['population'] = population
df = df.reset_index().rename(columns={'id':'LOCATION'})
df['LOCATION']=df['LOCATION'].apply(normalize_str)
df['POPULATION']=df['population']
gdf_indexed = info_gdf.GLOBAL_INFO_GDF.set_index('LOCATION')
df = df.set_index('LOCATION')
df['LAT'] = gdf_indexed['geometry'].centroid.apply(lambda p : p.coords[0][1])
df['LONG'] = gdf_indexed['geometry'].centroid.apply(lambda p : p.coords[0][0])
df = df.reset_index()
df = df[['LOCATION','POPULATION', 'LAT', 'LONG','name']]
df['name']=df['name'].apply(normalize_str)
name_replace = {
'Brunei Darussalam': 'Brunei',
'Congo, Dem. Rep.': 'Congo (Kinshasa)',
'Congo, Rep.': 'Congo (Brazzaville)',
'Czech Republic': 'Czechia',
'Egypt, Arab Rep.': 'Egypt',
'Iran, Islamic Rep.': 'Iran',
'Korea, Rep.': 'Korea, South',
'St. Lucia': 'Saint Lucia',
'Russian Federation': 'Russia',
'Slovak Republic': 'Slovakia',
'United States': 'US',
'St. Vincent and the Grenadines': 'Saint Vincent and the Grenadines',
'Venezuela, RB': 'Venezuela',
'Taiwan, China': 'Taiwan*',
'Lao PDR': 'Laos',
'Syrian Arab Republic': 'Syria',
'BAHAMAS, THE': 'Bahamas',
'ST. KITTS AND NEVIS': 'SAINT KITTS AND NEVIS',
'KYRGYZ REPUBLIC': 'KYRGYZSTAN',
'GAMBIA, THE': 'GAMBIA',
'MYANMAR': 'BURMA',
'YEMEN, REP.': 'YEMEN',
}
name_replace = { normalize_str(k): normalize_str(v) for k,v in name_replace.items() }
df['name']=df['name'].replace(name_replace)
return df
def location_to_iso(df_info):
location_to_iso = {r['name']:r['LOCATION'] for _,r in df_info.iterrows()}
return location_to_iso
GLOBAL_INFO_DF = None
GLOBAL_LOCATION_TO_ISO_COUNTRIES = None
GLOBAL_BARRIOS_TO_COMUNA = None
def info_df():
global GLOBAL_INFO_DF, GLOBAL_LOCATION_TO_ISO_COUNTRIES, GLOBAL_BARRIOS_TO_COMUNA
df_info_world = info_countries_df()
GLOBAL_LOCATION_TO_ISO_COUNTRIES = location_to_iso(df_info_world)
df_info_arg = pd.read_csv(DATA_IN_CSV_INFO_ARG)
GLOBAL_INFO_DF = pd.concat([df_info_world,df_info_arg],ignore_index=True)
df_caba = GLOBAL_INFO_DF[GLOBAL_INFO_DF['LOCATION'].apply(
lambda l: l.startswith('ARGENTINA/CABA/') and l.count('/')==3 )].copy()
GLOBAL_BARRIOS_TO_COMUNA = [ r['LOCATION'].split('/') for _,r in df_caba.iterrows() ]
GLOBAL_BARRIOS_TO_COMUNA = { barrio:comuna for _, _, comuna, barrio in GLOBAL_BARRIOS_TO_COMUNA }
info_df() | info_df.py | import pandas as pd
import world_bank_data as wb
from common import *
import info_gdf
def info_countries_df():
countries = wb.get_countries()
# Population dataset, by the World Bank (most recent value), indexed with the country code
population = wb.get_series('SP.POP.TOTL', id_or_value='id', simplify_index=True, mrv=1)
# PATCH: if last line is not working (sometimes World Bank doesn't work) replace with the line below
# population = pd.read_csv('countries_population.csv').set_index('id')['population']
# Aggregate region, country and population
df = countries[['region', 'latitude', 'longitude','name']].loc[countries.region != 'Aggregates']
df['population'] = population
df = df.reset_index().rename(columns={'id':'LOCATION'})
df['LOCATION']=df['LOCATION'].apply(normalize_str)
df['POPULATION']=df['population']
gdf_indexed = info_gdf.GLOBAL_INFO_GDF.set_index('LOCATION')
df = df.set_index('LOCATION')
df['LAT'] = gdf_indexed['geometry'].centroid.apply(lambda p : p.coords[0][1])
df['LONG'] = gdf_indexed['geometry'].centroid.apply(lambda p : p.coords[0][0])
df = df.reset_index()
df = df[['LOCATION','POPULATION', 'LAT', 'LONG','name']]
df['name']=df['name'].apply(normalize_str)
name_replace = {
'Brunei Darussalam': 'Brunei',
'Congo, Dem. Rep.': 'Congo (Kinshasa)',
'Congo, Rep.': 'Congo (Brazzaville)',
'Czech Republic': 'Czechia',
'Egypt, Arab Rep.': 'Egypt',
'Iran, Islamic Rep.': 'Iran',
'Korea, Rep.': 'Korea, South',
'St. Lucia': 'Saint Lucia',
'Russian Federation': 'Russia',
'Slovak Republic': 'Slovakia',
'United States': 'US',
'St. Vincent and the Grenadines': 'Saint Vincent and the Grenadines',
'Venezuela, RB': 'Venezuela',
'Taiwan, China': 'Taiwan*',
'Lao PDR': 'Laos',
'Syrian Arab Republic': 'Syria',
'BAHAMAS, THE': 'Bahamas',
'ST. KITTS AND NEVIS': 'SAINT KITTS AND NEVIS',
'KYRGYZ REPUBLIC': 'KYRGYZSTAN',
'GAMBIA, THE': 'GAMBIA',
'MYANMAR': 'BURMA',
'YEMEN, REP.': 'YEMEN',
}
name_replace = { normalize_str(k): normalize_str(v) for k,v in name_replace.items() }
df['name']=df['name'].replace(name_replace)
return df
def location_to_iso(df_info):
location_to_iso = {r['name']:r['LOCATION'] for _,r in df_info.iterrows()}
return location_to_iso
GLOBAL_INFO_DF = None
GLOBAL_LOCATION_TO_ISO_COUNTRIES = None
GLOBAL_BARRIOS_TO_COMUNA = None
def info_df():
global GLOBAL_INFO_DF, GLOBAL_LOCATION_TO_ISO_COUNTRIES, GLOBAL_BARRIOS_TO_COMUNA
df_info_world = info_countries_df()
GLOBAL_LOCATION_TO_ISO_COUNTRIES = location_to_iso(df_info_world)
df_info_arg = pd.read_csv(DATA_IN_CSV_INFO_ARG)
GLOBAL_INFO_DF = pd.concat([df_info_world,df_info_arg],ignore_index=True)
df_caba = GLOBAL_INFO_DF[GLOBAL_INFO_DF['LOCATION'].apply(
lambda l: l.startswith('ARGENTINA/CABA/') and l.count('/')==3 )].copy()
GLOBAL_BARRIOS_TO_COMUNA = [ r['LOCATION'].split('/') for _,r in df_caba.iterrows() ]
GLOBAL_BARRIOS_TO_COMUNA = { barrio:comuna for _, _, comuna, barrio in GLOBAL_BARRIOS_TO_COMUNA }
info_df() | 0.315209 | 0.386416 |
from __future__ import unicode_literals
import django
from django.test.testcases import TestCase
from data.data.data import Data
from data.data.exceptions import InvalidData
from data.data.value import FileValue, JsonValue
from data.interface.parameter import FileParameter, JsonParameter
from data.dataset.dataset import DataSetDefinition
from data.exceptions import InvalidDataSetDefinition
class TestDataSetDefinition(TestCase):
"""Tests related to the DataSetDefinition class"""
def setUp(self):
django.setup()
self.definition = DataSetDefinition()
self.file_param = FileParameter('input_a', ['application/json'])
self.json_param = JsonParameter('input_b', 'integer')
self.file_param2 = FileParameter('input_c', ['application/json'])
self.json_param2 = JsonParameter('input_d', 'integer')
self.definition.add_global_parameter(self.file_param)
self.definition.add_global_parameter(self.json_param)
self.definition.add_parameter(self.file_param2)
self.definition.add_parameter(self.json_param2)
def test_add_parameter(self):
"""Tests calling DataSetDefinition.add_value()"""
self.assertSetEqual(set(self.definition.get_parameters()), {'input_a', 'input_b', 'input_c', 'input_d'})
file_param = FileParameter('input_e', ['application/json'])
self.definition.add_parameter(file_param)
self.assertSetEqual(set(self.definition.get_parameters()), {'input_a', 'input_b', 'input_c', 'input_d', 'input_e'})
#test adding duplicate
with self.assertRaises(InvalidDataSetDefinition) as context:
self.definition.add_parameter(self.file_param)
self.assertEqual(context.exception.error.name, 'DUPLICATE_PARAMETER')
with self.assertRaises(InvalidDataSetDefinition) as context:
self.definition.add_global_parameter(self.file_param2)
self.assertEqual(context.exception.error.name, 'DUPLICATE_PARAMETER')
def test_validate(self):
"""Tests calling DataSetDefinition.validate()"""
data = Data()
data.add_value(FileValue('input_c', [124]))
#missing global data
with self.assertRaises(InvalidDataSetDefinition) as context:
self.definition.validate(data=data)
self.assertEqual(context.exception.error.name, 'MISSING_GLOBAL_DATA')
#incorrect global data
gd = Data()
gd.add_value(FileValue('input_a', [123]))
gd.add_value(FileValue('input_b', [123]))
self.definition.global_data = gd
with self.assertRaises(InvalidData) as context:
self.definition.validate(data=data)
self.assertEqual(context.exception.error.name, 'MISMATCHED_PARAM_TYPE')
#missing data
gd2 = Data()
gd2.add_value(FileValue('input_a', [123]))
gd2.add_value(JsonValue('input_b', 100))
self.definition.global_data = gd2
with self.assertRaises(InvalidData) as context:
self.definition.validate(data=data)
self.assertEqual(context.exception.error.name, 'PARAM_REQUIRED')
#successful validation
data.add_value(JsonValue('input_d', 100))
self.definition.validate(data=data)
#incorrect data
data2 = Data()
data2.add_value(FileValue('input_c', [124]))
data2.add_value(FileValue('input_d', [124]))
with self.assertRaises(InvalidData) as context:
self.definition.validate(data=data2)
self.assertEqual(context.exception.error.name, 'MISMATCHED_PARAM_TYPE') | scale/data/test/dataset/test_dataset.py | from __future__ import unicode_literals
import django
from django.test.testcases import TestCase
from data.data.data import Data
from data.data.exceptions import InvalidData
from data.data.value import FileValue, JsonValue
from data.interface.parameter import FileParameter, JsonParameter
from data.dataset.dataset import DataSetDefinition
from data.exceptions import InvalidDataSetDefinition
class TestDataSetDefinition(TestCase):
"""Tests related to the DataSetDefinition class"""
def setUp(self):
django.setup()
self.definition = DataSetDefinition()
self.file_param = FileParameter('input_a', ['application/json'])
self.json_param = JsonParameter('input_b', 'integer')
self.file_param2 = FileParameter('input_c', ['application/json'])
self.json_param2 = JsonParameter('input_d', 'integer')
self.definition.add_global_parameter(self.file_param)
self.definition.add_global_parameter(self.json_param)
self.definition.add_parameter(self.file_param2)
self.definition.add_parameter(self.json_param2)
def test_add_parameter(self):
"""Tests calling DataSetDefinition.add_value()"""
self.assertSetEqual(set(self.definition.get_parameters()), {'input_a', 'input_b', 'input_c', 'input_d'})
file_param = FileParameter('input_e', ['application/json'])
self.definition.add_parameter(file_param)
self.assertSetEqual(set(self.definition.get_parameters()), {'input_a', 'input_b', 'input_c', 'input_d', 'input_e'})
#test adding duplicate
with self.assertRaises(InvalidDataSetDefinition) as context:
self.definition.add_parameter(self.file_param)
self.assertEqual(context.exception.error.name, 'DUPLICATE_PARAMETER')
with self.assertRaises(InvalidDataSetDefinition) as context:
self.definition.add_global_parameter(self.file_param2)
self.assertEqual(context.exception.error.name, 'DUPLICATE_PARAMETER')
def test_validate(self):
"""Tests calling DataSetDefinition.validate()"""
data = Data()
data.add_value(FileValue('input_c', [124]))
#missing global data
with self.assertRaises(InvalidDataSetDefinition) as context:
self.definition.validate(data=data)
self.assertEqual(context.exception.error.name, 'MISSING_GLOBAL_DATA')
#incorrect global data
gd = Data()
gd.add_value(FileValue('input_a', [123]))
gd.add_value(FileValue('input_b', [123]))
self.definition.global_data = gd
with self.assertRaises(InvalidData) as context:
self.definition.validate(data=data)
self.assertEqual(context.exception.error.name, 'MISMATCHED_PARAM_TYPE')
#missing data
gd2 = Data()
gd2.add_value(FileValue('input_a', [123]))
gd2.add_value(JsonValue('input_b', 100))
self.definition.global_data = gd2
with self.assertRaises(InvalidData) as context:
self.definition.validate(data=data)
self.assertEqual(context.exception.error.name, 'PARAM_REQUIRED')
#successful validation
data.add_value(JsonValue('input_d', 100))
self.definition.validate(data=data)
#incorrect data
data2 = Data()
data2.add_value(FileValue('input_c', [124]))
data2.add_value(FileValue('input_d', [124]))
with self.assertRaises(InvalidData) as context:
self.definition.validate(data=data2)
self.assertEqual(context.exception.error.name, 'MISMATCHED_PARAM_TYPE') | 0.491456 | 0.569613 |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.162012,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.32994,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.745442,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.619429,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.07263,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.615182,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.30724,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.497994,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.28625,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.14083,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0224548,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.228296,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.166067,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.369126,
'Execution Unit/Register Files/Runtime Dynamic': 0.188522,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.596041,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.51672,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 4.74779,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00278807,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00278807,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00242055,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000932735,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00238557,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0103823,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0270125,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.159644,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.447088,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.542224,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.18635,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0402615,
'L2/Runtime Dynamic': 0.00968829,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 7.03273,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.80174,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.187502,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.187502,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 7.92176,
'Load Store Unit/Runtime Dynamic': 3.91393,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.462348,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.924695,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.164089,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.164491,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0738926,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.842172,
'Memory Management Unit/Runtime Dynamic': 0.238383,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 29.6209,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.491324,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0375864,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.31613,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.84504,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.9412,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.00458175,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.206287,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0216698,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.317077,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.511433,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.258154,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.08666,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.359322,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.56614,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00409388,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0132996,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0980133,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0983589,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.102107,
'Execution Unit/Register Files/Runtime Dynamic': 0.111659,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.207632,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.646052,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.45604,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00150225,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00150225,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0013273,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00052412,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00141293,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00574474,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0137305,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.094555,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.0145,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.261456,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.321151,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.52492,
'Instruction Fetch Unit/Runtime Dynamic': 0.696638,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0183011,
'L2/Runtime Dynamic': 0.00589454,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.76954,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.70463,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.114282,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.114282,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 5.30921,
'Load Store Unit/Runtime Dynamic': 2.38252,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.281801,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.563602,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.100012,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.100258,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.37396,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0429464,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.701872,
'Memory Management Unit/Runtime Dynamic': 0.143205,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 22.7099,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0107697,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0144367,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.167092,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.192298,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.87659,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0179935,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.216821,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.126694,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.13945,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.224928,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.113536,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.477915,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.140067,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.3032,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0239352,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00584917,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.047833,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0432582,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0717682,
'Execution Unit/Register Files/Runtime Dynamic': 0.0491074,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.10527,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.282031,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.43125,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00110283,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00110283,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00100859,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000416708,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000621407,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00383566,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00885802,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0415852,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.64518,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.102457,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.141242,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.99207,
'Instruction Fetch Unit/Runtime Dynamic': 0.297978,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0355386,
'L2/Runtime Dynamic': 0.0161514,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.44317,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.614337,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0390186,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0390186,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.62742,
'Load Store Unit/Runtime Dynamic': 0.845782,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0962131,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.192426,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0341463,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0346774,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.164467,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0168044,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.379234,
'Memory Management Unit/Runtime Dynamic': 0.0514818,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.9269,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0629632,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00705786,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0705958,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.140617,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.78326,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0661906,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.254678,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.466152,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.133064,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.214628,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.108337,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.456029,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0807194,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.73977,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.088066,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00558131,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0607209,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0412772,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.148787,
'Execution Unit/Register Files/Runtime Dynamic': 0.0468585,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.144473,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.341988,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.50493,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000346961,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000346961,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000313772,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000127794,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000592951,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00160065,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00291328,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0396808,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.52404,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0856879,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.134774,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.86505,
'Instruction Fetch Unit/Runtime Dynamic': 0.264657,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0837037,
'L2/Runtime Dynamic': 0.0496867,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.21647,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.589147,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0316844,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0316843,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.36609,
'Load Store Unit/Runtime Dynamic': 0.777088,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0781284,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.156256,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.027728,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0289767,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.156936,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0140722,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.360676,
'Memory Management Unit/Runtime Dynamic': 0.0430489,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.0048,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.231662,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00882276,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0624642,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.302949,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.94236,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 5.192647385760747,
'Runtime Dynamic': 5.192647385760747,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.20448,
'Runtime Dynamic': 0.14288,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 84.467,
'Peak Power': 117.579,
'Runtime Dynamic': 22.6863,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 84.2625,
'Total Cores/Runtime Dynamic': 22.5434,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.20448,
'Total L3s/Runtime Dynamic': 0.14288,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | benchmarks/SimResults/combinations_spec_ml_deepnet/cmp_astarlbmtontoh264ref/power.py | power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.162012,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.32994,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.745442,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.619429,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 1.07263,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.615182,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 2.30724,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.497994,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 7.28625,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.14083,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0224548,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.228296,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.166067,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.369126,
'Execution Unit/Register Files/Runtime Dynamic': 0.188522,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.596041,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 1.51672,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 4.74779,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00278807,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00278807,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00242055,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000932735,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00238557,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0103823,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0270125,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.159644,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.43323,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.447088,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.542224,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.96874,
'Instruction Fetch Unit/Runtime Dynamic': 1.18635,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0402615,
'L2/Runtime Dynamic': 0.00968829,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 7.03273,
'Load Store Unit/Data Cache/Runtime Dynamic': 2.80174,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.187502,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.187502,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 7.92176,
'Load Store Unit/Runtime Dynamic': 3.91393,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.462348,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.924695,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.164089,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.164491,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.399995,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0738926,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.842172,
'Memory Management Unit/Runtime Dynamic': 0.238383,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 29.6209,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.491324,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0375864,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.31613,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.84504,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 10.9412,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.00458175,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.206287,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0216698,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.317077,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.511433,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.258154,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.08666,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.359322,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.56614,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.00409388,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0132996,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0980133,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0983589,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.102107,
'Execution Unit/Register Files/Runtime Dynamic': 0.111659,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.207632,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.646052,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.45604,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00150225,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00150225,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.0013273,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00052412,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00141293,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00574474,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0137305,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.094555,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 6.0145,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.261456,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.321151,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.52492,
'Instruction Fetch Unit/Runtime Dynamic': 0.696638,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0183011,
'L2/Runtime Dynamic': 0.00589454,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 4.76954,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.70463,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.114282,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.114282,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 5.30921,
'Load Store Unit/Runtime Dynamic': 2.38252,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.281801,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.563602,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.100012,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.100258,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.37396,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0429464,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.701872,
'Memory Management Unit/Runtime Dynamic': 0.143205,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 22.7099,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0107697,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0144367,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.167092,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.192298,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 5.87659,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0179935,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.216821,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.126694,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.13945,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.224928,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.113536,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.477915,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.140067,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.3032,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0239352,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00584917,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.047833,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0432582,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0717682,
'Execution Unit/Register Files/Runtime Dynamic': 0.0491074,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.10527,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.282031,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.43125,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00110283,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00110283,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00100859,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000416708,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000621407,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00383566,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00885802,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0415852,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.64518,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.102457,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.141242,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.99207,
'Instruction Fetch Unit/Runtime Dynamic': 0.297978,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0355386,
'L2/Runtime Dynamic': 0.0161514,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.44317,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.614337,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0390186,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0390186,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.62742,
'Load Store Unit/Runtime Dynamic': 0.845782,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0962131,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.192426,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0341463,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0346774,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.164467,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0168044,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.379234,
'Memory Management Unit/Runtime Dynamic': 0.0514818,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 15.9269,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0629632,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00705786,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0705958,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.140617,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.78326,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0661906,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.254678,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.466152,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.133064,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.214628,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.108337,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.456029,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0807194,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.73977,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.088066,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00558131,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0607209,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0412772,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.148787,
'Execution Unit/Register Files/Runtime Dynamic': 0.0468585,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.144473,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.341988,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.50493,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000346961,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000346961,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000313772,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000127794,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000592951,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00160065,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00291328,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0396808,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.52404,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0856879,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.134774,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.86505,
'Instruction Fetch Unit/Runtime Dynamic': 0.264657,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0837037,
'L2/Runtime Dynamic': 0.0496867,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.21647,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.589147,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0316844,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0316843,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.36609,
'Load Store Unit/Runtime Dynamic': 0.777088,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0781284,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.156256,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.027728,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0289767,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.156936,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0140722,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.360676,
'Memory Management Unit/Runtime Dynamic': 0.0430489,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 16.0048,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.231662,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00882276,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0624642,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.302949,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.94236,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 5.192647385760747,
'Runtime Dynamic': 5.192647385760747,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.20448,
'Runtime Dynamic': 0.14288,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 84.467,
'Peak Power': 117.579,
'Runtime Dynamic': 22.6863,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 84.2625,
'Total Cores/Runtime Dynamic': 22.5434,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.20448,
'Total L3s/Runtime Dynamic': 0.14288,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | 0.600188 | 0.285646 |
import os
import random
import numpy as np
import glob
import librosa
import tensorflow as tf
from models.relgan import RelGAN
from speech_tools import load_pickle, sample_train_data
from speech_tools import *
import argparse
from hparams import *
random.seed(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
def main():
log_dir = os.path.join(argv.output_dir, 'log', argv.model_name)
os.makedirs(log_dir, exist_ok=True)
exp_dirs = []
for f in os.listdir(argv.dataset_dir):
exp_dirs.append(os.path.join(argv.dataset_dir, f)) # /Dataset root/Emotions`
print(exp_dirs)
print('Loading cached data...')
coded_sps_norms = []
coded_sps_means = []
coded_sps_stds = []
log_f0s_means = []
log_f0s_stds = []
for f in exp_dirs:
coded_sps_A_norm, coded_sps_A_mean, coded_sps_A_std, log_f0s_mean_A, log_f0s_std_A = load_pickle(
os.path.join(f, 'cache{}.p'.format(num_mcep)))
coded_sps_norms.append(coded_sps_A_norm)
coded_sps_means.append(coded_sps_A_mean)
coded_sps_stds.append(coded_sps_A_std)
log_f0s_means.append(log_f0s_mean_A)
log_f0s_stds.append(log_f0s_std_A)
num_domains = len(coded_sps_norms)
model = RelGAN(num_features=num_mcep, num_domains=num_domains, batch_size=mini_batch_size, log_dir=log_dir)
os.makedirs(os.path.join(argv.output_dir, 'experiment', argv.model_name, 'checkpoints'), exist_ok=True)
ckpt = tf.train.get_checkpoint_state(os.path.join(argv.output_dir, 'experiment', argv.model_name, 'checkpoints'))
if ckpt:
# last_model = ckpt.all_model_checkpoint_paths[1]
last_model = ckpt.model_checkpoint_path
print("loading {}".format(last_model))
model.load(filepath=last_model)
else:
print("checkpoints are not found")
iteration = 1
while iteration <= num_iterations:
if (iteration % 10000 == 0):
lambda_triangle *= 0.9
lambda_backward *= 0.9
generator_learning_rate *= 0.99999
discriminator_learning_rate *= 0.99999
x, x2, x_atr, y, y_atr, z, z_atr = sample_train_data(dataset_A=coded_sps_norms, nBatch=mini_batch_size,
num_mcep=num_mcep, n_frames=n_frames)
x_labels = np.zeros([mini_batch_size, num_domains])
y_labels = np.zeros([mini_batch_size, num_domains])
z_labels = np.zeros([mini_batch_size, num_domains])
for b in range(mini_batch_size):
x_labels[b] = np.identity(num_domains)[x_atr[b]]
y_labels[b] = np.identity(num_domains)[y_atr[b]]
z_labels[b] = np.identity(num_domains)[z_atr[b]]
rnd = np.random.randint(2)
alp = np.random.uniform(0, 0.5, size=mini_batch_size) if rnd == 0 else np.random.uniform(0.5, 1.0,
size=mini_batch_size)
generator_loss, discriminator_loss, gen_adv_loss, gen_cond_loss, gen_int_loss, gen_rec_loss, gen_self_loss, dis_adv_loss, dis_cond_loss, dis_int_loss, lossb, lossm, losst = model.train(
input_A=x,
input_A2=x2, input_B=y, input_C=z, label_A=x_labels, label_B=y_labels, label_C=z_labels,
alpha=alp, rand=rnd, lambda_cycle=lambda_cycle, lambda_identity=lambda_identity,
lambda_triangle=lambda_triangle, lambda_backward=lambda_backward,
generator_learning_rate=generator_learning_rate,
discriminator_learning_rate=discriminator_learning_rate)
if iteration % 10 == 0:
print('Iteration: {:07d}, Generator Loss : {:.3f}, Discriminator Loss : {:.3f}'.format(iteration,
generator_loss,
discriminator_loss))
print("d_a=%.3f, d_c=%.3f, d_i=%.3f" % (dis_adv_loss, dis_cond_loss, dis_int_loss))
print("g_a=%.3f, g_c=%.3f, g_i=%.3f, g_r=%.3f, g_s=%.3f, g_b=%.3f, g_m=%.3f, g_t=%.3f" % (
gen_adv_loss, gen_cond_loss, gen_int_loss, gen_rec_loss, gen_self_loss, lossb, lossm, losst))
if iteration % 5000 == 0:
print('Checkpointing...')
model.save(directory=os.path.join('experiments', argv.model_name, 'checkpoints'),
filename='{}_{}.ckpt'.format(argv.model_name, iteration))
if val_flag and iteration % 1000 == 0:
for q in range(3):
eval_dirs = os.listdir('datasets_val')
assert len(eval_dirs) == num_domains
x, x2, x_atr, y, y_atr, z, z_atr = sample_train_data(dataset_A=coded_sps_norms, nBatch=1,
num_mcep=num_mcep, n_frames=n_frames)
x_labels = np.zeros([1, num_domains])
y_labels = np.zeros([1, num_domains])
for b in range(1):
x_labels[b] = np.identity(num_domains)[x_atr[b]]
y_labels[b] = np.identity(num_domains)[y_atr[b]]
x_atr = x_atr[0]
y_atr = y_atr[0]
eval_A_dir = os.path.join('datasets_val', eval_dirs[x_atr])
print(eval_A_dir)
for file in glob.glob(eval_A_dir + '/*.wav'):
alpha = np.random.uniform(0, 1, size=1) if q != 0 else np.ones(1)
wav, _ = librosa.load(file, sr=sampling_rate, mono=True)
wav *= 1. / max(0.01, np.max(np.abs(wav)))
wav = wav_padding(wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4)
f0, timeaxis, sp, ap = world_decompose(wav=wav, fs=sampling_rate, frame_period=frame_period)
f0s_mean_A = np.exp(log_f0s_means[x_atr])
f0s_mean_B = np.exp(log_f0s_means[y_atr])
f0s_mean_AB = alpha * f0s_mean_B + (1 - alpha) * f0s_mean_A
log_f0s_mean_AB = np.log(f0s_mean_AB)
f0s_std_A = np.exp(log_f0s_stds[x_atr])
f0s_std_B = np.exp(log_f0s_stds[y_atr])
f0s_std_AB = alpha * f0s_std_B + (1 - alpha) * f0s_std_A
log_f0s_std_AB = np.log(f0s_std_AB)
f0_converted = pitch_conversion(f0=f0, mean_log_src=log_f0s_means[x_atr],
std_log_src=log_f0s_stds[x_atr],
mean_log_target=log_f0s_mean_AB, std_log_target=log_f0s_std_AB)
coded_sp = world_encode_spectral_envelop(sp=sp, fs=sampling_rate, dim=num_mcep)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (coded_sp_transposed - coded_sps_means[x_atr]) / coded_sps_stds[x_atr]
coded_sp_converted_norm = \
model.test(inputs=np.array([coded_sp_norm]), label_A=x_labels, label_B=y_labels, alpha=alpha)[0]
if coded_sp_converted_norm.shape[1] > len(f0):
coded_sp_converted_norm = coded_sp_converted_norm[:, :-1]
coded_sps_AB_mean = (1 - alpha) * coded_sps_means[x_atr] + alpha * coded_sps_means[y_atr]
coded_sps_AB_std = (1 - alpha) * coded_sps_stds[x_atr] + alpha * coded_sps_stds[y_atr]
coded_sp_converted = coded_sp_converted_norm * coded_sps_AB_std + coded_sps_AB_mean
coded_sp_converted = coded_sp_converted.T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decoded_sp_converted = world_decode_spectral_envelop(coded_sp=coded_sp_converted, fs=sampling_rate)
wav_transformed = world_speech_synthesis(f0=f0_converted, decoded_sp=decoded_sp_converted, ap=ap,
fs=sampling_rate,
frame_period=frame_period)
wav_transformed *= 1. / max(0.01, np.max(np.abs(wav_transformed)))
validation_A_output_dir = 'test'
os.makedirs(validation_A_output_dir, exist_ok=True)
librosa.output.write_wav(os.path.join(validation_A_output_dir,
"{:06d}_{}_to_{}_{:.3f}_{}".format(iteration, x_atr, y_atr,
alpha[0],
os.path.basename(file))),
wav_transformed,
sampling_rate)
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='train RelGAN')
parser.add_argument('--dataset_dir', type=str, help='Directory of dataset', default="datasets")
parser.add_argument('--val_dataset_dir', type=str, help='Directory of validation set', default="datasets_val")
parser.add_argument('--output_dir', type=str, help='Directory for save output', default="result")
parser.add_argument('--val_flag', type=bool, help='Flag of Validation during Training', default=True)
parser.add_argument('--model_name', type=str, help='Model name', default='relgan_vm')
argv = parser.parse_args()
print('args ', argv)
main() | train_relgan_vm.py | import os
import random
import numpy as np
import glob
import librosa
import tensorflow as tf
from models.relgan import RelGAN
from speech_tools import load_pickle, sample_train_data
from speech_tools import *
import argparse
from hparams import *
random.seed(seed)
np.random.seed(seed)
tf.set_random_seed(seed)
def main():
log_dir = os.path.join(argv.output_dir, 'log', argv.model_name)
os.makedirs(log_dir, exist_ok=True)
exp_dirs = []
for f in os.listdir(argv.dataset_dir):
exp_dirs.append(os.path.join(argv.dataset_dir, f)) # /Dataset root/Emotions`
print(exp_dirs)
print('Loading cached data...')
coded_sps_norms = []
coded_sps_means = []
coded_sps_stds = []
log_f0s_means = []
log_f0s_stds = []
for f in exp_dirs:
coded_sps_A_norm, coded_sps_A_mean, coded_sps_A_std, log_f0s_mean_A, log_f0s_std_A = load_pickle(
os.path.join(f, 'cache{}.p'.format(num_mcep)))
coded_sps_norms.append(coded_sps_A_norm)
coded_sps_means.append(coded_sps_A_mean)
coded_sps_stds.append(coded_sps_A_std)
log_f0s_means.append(log_f0s_mean_A)
log_f0s_stds.append(log_f0s_std_A)
num_domains = len(coded_sps_norms)
model = RelGAN(num_features=num_mcep, num_domains=num_domains, batch_size=mini_batch_size, log_dir=log_dir)
os.makedirs(os.path.join(argv.output_dir, 'experiment', argv.model_name, 'checkpoints'), exist_ok=True)
ckpt = tf.train.get_checkpoint_state(os.path.join(argv.output_dir, 'experiment', argv.model_name, 'checkpoints'))
if ckpt:
# last_model = ckpt.all_model_checkpoint_paths[1]
last_model = ckpt.model_checkpoint_path
print("loading {}".format(last_model))
model.load(filepath=last_model)
else:
print("checkpoints are not found")
iteration = 1
while iteration <= num_iterations:
if (iteration % 10000 == 0):
lambda_triangle *= 0.9
lambda_backward *= 0.9
generator_learning_rate *= 0.99999
discriminator_learning_rate *= 0.99999
x, x2, x_atr, y, y_atr, z, z_atr = sample_train_data(dataset_A=coded_sps_norms, nBatch=mini_batch_size,
num_mcep=num_mcep, n_frames=n_frames)
x_labels = np.zeros([mini_batch_size, num_domains])
y_labels = np.zeros([mini_batch_size, num_domains])
z_labels = np.zeros([mini_batch_size, num_domains])
for b in range(mini_batch_size):
x_labels[b] = np.identity(num_domains)[x_atr[b]]
y_labels[b] = np.identity(num_domains)[y_atr[b]]
z_labels[b] = np.identity(num_domains)[z_atr[b]]
rnd = np.random.randint(2)
alp = np.random.uniform(0, 0.5, size=mini_batch_size) if rnd == 0 else np.random.uniform(0.5, 1.0,
size=mini_batch_size)
generator_loss, discriminator_loss, gen_adv_loss, gen_cond_loss, gen_int_loss, gen_rec_loss, gen_self_loss, dis_adv_loss, dis_cond_loss, dis_int_loss, lossb, lossm, losst = model.train(
input_A=x,
input_A2=x2, input_B=y, input_C=z, label_A=x_labels, label_B=y_labels, label_C=z_labels,
alpha=alp, rand=rnd, lambda_cycle=lambda_cycle, lambda_identity=lambda_identity,
lambda_triangle=lambda_triangle, lambda_backward=lambda_backward,
generator_learning_rate=generator_learning_rate,
discriminator_learning_rate=discriminator_learning_rate)
if iteration % 10 == 0:
print('Iteration: {:07d}, Generator Loss : {:.3f}, Discriminator Loss : {:.3f}'.format(iteration,
generator_loss,
discriminator_loss))
print("d_a=%.3f, d_c=%.3f, d_i=%.3f" % (dis_adv_loss, dis_cond_loss, dis_int_loss))
print("g_a=%.3f, g_c=%.3f, g_i=%.3f, g_r=%.3f, g_s=%.3f, g_b=%.3f, g_m=%.3f, g_t=%.3f" % (
gen_adv_loss, gen_cond_loss, gen_int_loss, gen_rec_loss, gen_self_loss, lossb, lossm, losst))
if iteration % 5000 == 0:
print('Checkpointing...')
model.save(directory=os.path.join('experiments', argv.model_name, 'checkpoints'),
filename='{}_{}.ckpt'.format(argv.model_name, iteration))
if val_flag and iteration % 1000 == 0:
for q in range(3):
eval_dirs = os.listdir('datasets_val')
assert len(eval_dirs) == num_domains
x, x2, x_atr, y, y_atr, z, z_atr = sample_train_data(dataset_A=coded_sps_norms, nBatch=1,
num_mcep=num_mcep, n_frames=n_frames)
x_labels = np.zeros([1, num_domains])
y_labels = np.zeros([1, num_domains])
for b in range(1):
x_labels[b] = np.identity(num_domains)[x_atr[b]]
y_labels[b] = np.identity(num_domains)[y_atr[b]]
x_atr = x_atr[0]
y_atr = y_atr[0]
eval_A_dir = os.path.join('datasets_val', eval_dirs[x_atr])
print(eval_A_dir)
for file in glob.glob(eval_A_dir + '/*.wav'):
alpha = np.random.uniform(0, 1, size=1) if q != 0 else np.ones(1)
wav, _ = librosa.load(file, sr=sampling_rate, mono=True)
wav *= 1. / max(0.01, np.max(np.abs(wav)))
wav = wav_padding(wav=wav, sr=sampling_rate, frame_period=frame_period, multiple=4)
f0, timeaxis, sp, ap = world_decompose(wav=wav, fs=sampling_rate, frame_period=frame_period)
f0s_mean_A = np.exp(log_f0s_means[x_atr])
f0s_mean_B = np.exp(log_f0s_means[y_atr])
f0s_mean_AB = alpha * f0s_mean_B + (1 - alpha) * f0s_mean_A
log_f0s_mean_AB = np.log(f0s_mean_AB)
f0s_std_A = np.exp(log_f0s_stds[x_atr])
f0s_std_B = np.exp(log_f0s_stds[y_atr])
f0s_std_AB = alpha * f0s_std_B + (1 - alpha) * f0s_std_A
log_f0s_std_AB = np.log(f0s_std_AB)
f0_converted = pitch_conversion(f0=f0, mean_log_src=log_f0s_means[x_atr],
std_log_src=log_f0s_stds[x_atr],
mean_log_target=log_f0s_mean_AB, std_log_target=log_f0s_std_AB)
coded_sp = world_encode_spectral_envelop(sp=sp, fs=sampling_rate, dim=num_mcep)
coded_sp_transposed = coded_sp.T
coded_sp_norm = (coded_sp_transposed - coded_sps_means[x_atr]) / coded_sps_stds[x_atr]
coded_sp_converted_norm = \
model.test(inputs=np.array([coded_sp_norm]), label_A=x_labels, label_B=y_labels, alpha=alpha)[0]
if coded_sp_converted_norm.shape[1] > len(f0):
coded_sp_converted_norm = coded_sp_converted_norm[:, :-1]
coded_sps_AB_mean = (1 - alpha) * coded_sps_means[x_atr] + alpha * coded_sps_means[y_atr]
coded_sps_AB_std = (1 - alpha) * coded_sps_stds[x_atr] + alpha * coded_sps_stds[y_atr]
coded_sp_converted = coded_sp_converted_norm * coded_sps_AB_std + coded_sps_AB_mean
coded_sp_converted = coded_sp_converted.T
coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
decoded_sp_converted = world_decode_spectral_envelop(coded_sp=coded_sp_converted, fs=sampling_rate)
wav_transformed = world_speech_synthesis(f0=f0_converted, decoded_sp=decoded_sp_converted, ap=ap,
fs=sampling_rate,
frame_period=frame_period)
wav_transformed *= 1. / max(0.01, np.max(np.abs(wav_transformed)))
validation_A_output_dir = 'test'
os.makedirs(validation_A_output_dir, exist_ok=True)
librosa.output.write_wav(os.path.join(validation_A_output_dir,
"{:06d}_{}_to_{}_{:.3f}_{}".format(iteration, x_atr, y_atr,
alpha[0],
os.path.basename(file))),
wav_transformed,
sampling_rate)
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='train RelGAN')
parser.add_argument('--dataset_dir', type=str, help='Directory of dataset', default="datasets")
parser.add_argument('--val_dataset_dir', type=str, help='Directory of validation set', default="datasets_val")
parser.add_argument('--output_dir', type=str, help='Directory for save output', default="result")
parser.add_argument('--val_flag', type=bool, help='Flag of Validation during Training', default=True)
parser.add_argument('--model_name', type=str, help='Model name', default='relgan_vm')
argv = parser.parse_args()
print('args ', argv)
main() | 0.378459 | 0.174164 |
import logging
import pygame
import pygame.event
from and_beyond.client import globals
from and_beyond.client.consts import SERVER_DISCONNECT_EVENT
from and_beyond.client.ui import Ui, UiButton, UiLabel
from and_beyond.client.ui.label_screen import LabelScreen
from and_beyond.client.ui.options_menu import OptionsMenu
from and_beyond.client.ui.question_screen import QuestionScreen
from and_beyond.common import PORT
from and_beyond.pipe_commands import PipeCommandsToServer, write_pipe
from pygame import *
from pygame.locals import *
class PauseMenu(Ui):
open_to_lan_button: UiButton
disconnect_button: UiButton
def __init__(self) -> None:
super().__init__([
UiLabel('Game Paused'),
UiButton('Continue Game', self.continue_game),
UiButton('Options', self.show_options),
])
self.open_to_lan_button = UiButton('Open to LAN', self.open_to_lan)
self.disconnect_button = UiButton('Disconnect', self.disconnect)
self.elements.extend((
self.open_to_lan_button,
self.disconnect_button,
))
def draw_and_call(self, surf: pygame.surface.Surface):
gray = Surface(surf.get_size()).convert_alpha()
gray.fill((0, 0, 0, 128))
surf.blit(gray, gray.get_rect())
if globals.ui_override is not None:
return globals.ui_override.draw_and_call(surf)
self.open_to_lan_button.hidden = globals.singleplayer_popen is None
self.disconnect_button.label = 'Disconnect' if globals.singleplayer_popen is None else 'Save and Quit'
return super().draw_and_call(surf)
def pause_game(self) -> None:
pipe = globals.singleplayer_pipe_out
if pipe is not None:
logging.debug('Sending pause command...')
write_pipe(pipe, PipeCommandsToServer.PAUSE)
pipe.flush()
globals.paused = True
def continue_game(self) -> None:
pipe = globals.singleplayer_pipe_out
if pipe is not None:
logging.debug('Sending unpause command...')
write_pipe(pipe, PipeCommandsToServer.UNPAUSE)
pipe.flush()
globals.paused = False
def show_options(self) -> None:
globals.ui_override = OptionsMenu()
def open_to_lan(self) -> None:
def internal(port_str: str):
port_str = port_str.strip()
if not port_str:
port = 0
else:
try:
port = int(port_str)
except ValueError:
LabelScreen.show_message(f'Not a valid integer: {port_str}', closed_callback=screen.show)
return
if 0 <= port < 65536:
pipe = globals.singleplayer_pipe_out
if pipe is None:
return
write_pipe(pipe, PipeCommandsToServer.OPEN_TO_LAN)
write_pipe(pipe, port)
pipe.flush()
globals.paused = False
else:
LabelScreen.show_message(f'Port number must be between 0 and 65535 (inclusive)', closed_callback=screen.show)
screen = QuestionScreen('Enter a port number (empty for random):', ok_callback=internal, default_text=str(PORT))
screen.show()
def disconnect(self) -> None:
pygame.event.post(pygame.event.Event(SERVER_DISCONNECT_EVENT, reason=None))
globals.paused = False | and_beyond/client/ui/pause_menu.py | import logging
import pygame
import pygame.event
from and_beyond.client import globals
from and_beyond.client.consts import SERVER_DISCONNECT_EVENT
from and_beyond.client.ui import Ui, UiButton, UiLabel
from and_beyond.client.ui.label_screen import LabelScreen
from and_beyond.client.ui.options_menu import OptionsMenu
from and_beyond.client.ui.question_screen import QuestionScreen
from and_beyond.common import PORT
from and_beyond.pipe_commands import PipeCommandsToServer, write_pipe
from pygame import *
from pygame.locals import *
class PauseMenu(Ui):
open_to_lan_button: UiButton
disconnect_button: UiButton
def __init__(self) -> None:
super().__init__([
UiLabel('Game Paused'),
UiButton('Continue Game', self.continue_game),
UiButton('Options', self.show_options),
])
self.open_to_lan_button = UiButton('Open to LAN', self.open_to_lan)
self.disconnect_button = UiButton('Disconnect', self.disconnect)
self.elements.extend((
self.open_to_lan_button,
self.disconnect_button,
))
def draw_and_call(self, surf: pygame.surface.Surface):
gray = Surface(surf.get_size()).convert_alpha()
gray.fill((0, 0, 0, 128))
surf.blit(gray, gray.get_rect())
if globals.ui_override is not None:
return globals.ui_override.draw_and_call(surf)
self.open_to_lan_button.hidden = globals.singleplayer_popen is None
self.disconnect_button.label = 'Disconnect' if globals.singleplayer_popen is None else 'Save and Quit'
return super().draw_and_call(surf)
def pause_game(self) -> None:
pipe = globals.singleplayer_pipe_out
if pipe is not None:
logging.debug('Sending pause command...')
write_pipe(pipe, PipeCommandsToServer.PAUSE)
pipe.flush()
globals.paused = True
def continue_game(self) -> None:
pipe = globals.singleplayer_pipe_out
if pipe is not None:
logging.debug('Sending unpause command...')
write_pipe(pipe, PipeCommandsToServer.UNPAUSE)
pipe.flush()
globals.paused = False
def show_options(self) -> None:
globals.ui_override = OptionsMenu()
def open_to_lan(self) -> None:
def internal(port_str: str):
port_str = port_str.strip()
if not port_str:
port = 0
else:
try:
port = int(port_str)
except ValueError:
LabelScreen.show_message(f'Not a valid integer: {port_str}', closed_callback=screen.show)
return
if 0 <= port < 65536:
pipe = globals.singleplayer_pipe_out
if pipe is None:
return
write_pipe(pipe, PipeCommandsToServer.OPEN_TO_LAN)
write_pipe(pipe, port)
pipe.flush()
globals.paused = False
else:
LabelScreen.show_message(f'Port number must be between 0 and 65535 (inclusive)', closed_callback=screen.show)
screen = QuestionScreen('Enter a port number (empty for random):', ok_callback=internal, default_text=str(PORT))
screen.show()
def disconnect(self) -> None:
pygame.event.post(pygame.event.Event(SERVER_DISCONNECT_EVENT, reason=None))
globals.paused = False | 0.272799 | 0.091463 |
# Copyright (c) 2016 GoSecure Inc.
OPCODES = {
0 : "NOP",
1 : "ADD",
2 : "SUB",
3 : "MUL",
4 : "DIV",
5 : "MOD",
6 : "SL",
7 : "SR",
8 : "CONCAT",
9 : "BW_OR",
10 : "BW_AND",
11 : "BW_XOR",
12 : "BW_NOT",
13 : "BOOL_NOT",
14 : "BOOL_XOR",
15 : "IS_IDENTICAL",
16 : "IS_NOT_IDENTICAL",
17 : "IS_EQUAL",
18 : "IS_NOT_EQUAL",
19 : "IS_SMALLER",
20 : "IS_SMALLER_OR_EQUAL",
21 : "CAST",
22 : "QM_ASSIGN",
23 : "ASSIGN_ADD",
24 : "ASSIGN_SUB",
25 : "ASSIGN_MUL",
26 : "ASSIGN_DIV",
27 : "ASSIGN_MOD",
28 : "ASSIGN_SL",
29 : "ASSIGN_SR",
30 : "ASSIGN_CONCAT",
31 : "ASSIGN_BW_OR",
32 : "ASSIGN_BW_AND",
33 : "ASSIGN_BW_XOR",
34 : "PRE_INC",
35 : "PRE_DEC",
36 : "POST_INC",
37 : "POST_DEC",
38 : "ASSIGN",
39 : "ASSIGN_REF",
40 : "ECHO",
41 : "PRINT",
42 : "JMP",
43 : "JMPZ",
44 : "JMPNZ",
45 : "JMPZNZ",
46 : "JMPZ_EX",
47 : "JMPNZ_EX",
48 : "CASE",
49 : "SWITCH_FREE",
50 : "BRK",
51 : "CONT",
52 : "BOOL",
53 : "INIT_STRING",
54 : "ADD_CHAR",
55 : "ADD_STRING",
56 : "ADD_VAR",
57 : "BEGIN_SILENCE",
58 : "END_SILENCE",
59 : "INIT_FCALL_BY_NAME",
60 : "DO_FCALL",
61 : "DO_FCALL_BY_NAME",
62 : "RETURN",
63 : "RECV",
64 : "RECV_INIT",
65 : "SEND_VAL",
66 : "SEND_VAR",
67 : "SEND_REF",
68 : "NEW",
69 : "INIT_NS_FCALL_BY_NAME",
70 : "FREE",
71 : "INIT_ARRAY",
72 : "ADD_ARRAY_ELEMENT",
73 : "INCLUDE_OR_EVAL",
74 : "UNSET_VAR",
75 : "UNSET_DIM",
76 : "UNSET_OBJ",
77 : "FE_RESET",
78 : "FE_FETCH",
79 : "EXIT",
80 : "FETCH_R",
81 : "FETCH_DIM_R",
82 : "FETCH_OBJ_R",
83 : "FETCH_W",
84 : "FETCH_DIM_W",
85 : "FETCH_OBJ_W",
86 : "FETCH_RW",
87 : "FETCH_DIM_RW",
88 : "FETCH_OBJ_RW",
89 : "FETCH_IS",
90 : "FETCH_DIM_IS",
91 : "FETCH_OBJ_IS",
92 : "FETCH_FUNC_ARG",
93 : "FETCH_DIM_FUNC_ARG",
94 : "FETCH_OBJ_FUNC_ARG",
95 : "FETCH_UNSET",
96 : "FETCH_DIM_UNSET",
97 : "FETCH_OBJ_UNSET",
98 : "FETCH_DIM_TMP_VAR",
99 : "FETCH_CONSTANT",
100 : "GOTO",
101 : "EXT_STMT",
102 : "EXT_FCALL_BEGIN",
103 : "EXT_FCALL_END",
104 : "EXT_NOP",
105 : "TICKS",
106 : "SEND_VAR_NO_REF",
107 : "CATCH",
108 : "THROW",
109 : "FETCH_CLASS",
110 : "CLONE",
111 : "RETURN_BY_REF",
112 : "INIT_METHOD_CALL",
113 : "INIT_STATIC_METHOD_CALL",
114 : "ISSET_ISEMPTY_VAR",
115 : "ISSET_ISEMPTY_DIM_OBJ",
116 : "SEND_VAL_EX",
117 : "SEND_VAR",
118 : "INIT_USER_CALL",
119 : "SEND_ARRAY",
120 : "SEND_USER",
121 : "STRLEN",
122 : "DEFINED",
123 : "TYPE_CHECK",
124 : "VERIFY_RETURN_TYPE",
125 : "FE_RESET_RW",
126 : "FE_FETCH_RW",
127 : "FE_FREE",
128 : "INIT_DYNAMIC_CALL",
129 : "DO_ICALL",
130 : "DO_UCALL",
131 : "DO_FCALL_BY_NAME",
132 : "PRE_INC_OBJ",
133 : "PRE_DEC_OBJ",
134 : "POST_INC_OBJ",
135 : "POST_DEC_OBJ",
136 : "ASSIGN_OBJ",
137 : "OP_DATA",
138 : "INSTANCEOF",
139 : "DECLARE_CLASS",
140 : "DECLARE_INHERITED_CLASS",
141 : "DECLARE_FUNCTION",
142 : "RAISE_ABSTRACT_ERROR",
143 : "DECLARE_CONST",
144 : "ADD_INTERFACE",
145 : "DECLARE_INHERITED_CLASS_DELAYED",
146 : "VERIFY_ABSTRACT_CLASS",
147 : "ASSIGN_DIM",
148 : "ISSET_ISEMPTY_PROP_OBJ",
149 : "HANDLE_EXCEPTION",
150 : "USER_OPCODE",
152 : "ZEND_JMP_SET",
153 : "ZEND_DECLARE_LAMBDA_FUNCTION",
154 : "ZEND_ADD_TRAIT",
155 : "ZEND_BIND_TRAITS",
156 : "ZEND_SEPARATE",
157 : "ZEND_FETCH_CLASS_NAME",
158 : "ZEND_CALL_TRAMPOLINE",
159 : "ZEND_DISCARD_EXCEPTION",
160 : "ZEND_YIELD",
161 : "ZEND_GENERATOR_RETURN",
162 : "ZEND_FAST_CALL",
163 : "ZEND_FAST_RET",
164 : "ZEND_RECV_VARIADIC",
165 : "ZEND_SEND_UNPACK",
166 : "ZEND_POW",
167 : "ZEND_ASSIGN_POW",
168 : "ZEND_BIND_GLOBAL",
169 : "ZEND_COALESCE",
170 : "ZEND_SPACESHIP",
171 : "ZEND_DECLARE_ANON_CLASS",
172 : "ZEND_DECLARE_ANON_INHERITED_CLASS",
}
# regular data types
IS_UNDEF = 0
IS_NULL = 1
IS_FALSE = 2
IS_TRUE = 3
IS_LONG = 4
IS_DOUBLE = 5
IS_STRING = 6
IS_ARRAY = 7
IS_OBJECT = 8
IS_RESOURCE = 9
IS_REFERENCE = 10
# constant expressions
IS_CONSTANT = 11
IS_CONSTANT_AST = 12
# fake types
_IS_BOOL = 13
IS_CALLABLE = 14
IS_VOID = 18
# internal types
IS_INDIRECT = 15
IS_PTR = 17
_IS_ERROR = 19
# Op Types
IS_CONST = 1 << 0
IS_TMP_VAR = 1 << 1
IS_VAR = 1 << 2
IS_UNUSED = 1 << 3
IS_CV = 1 << 4 | analysis_tools/definitions.py |
# Copyright (c) 2016 GoSecure Inc.
OPCODES = {
0 : "NOP",
1 : "ADD",
2 : "SUB",
3 : "MUL",
4 : "DIV",
5 : "MOD",
6 : "SL",
7 : "SR",
8 : "CONCAT",
9 : "BW_OR",
10 : "BW_AND",
11 : "BW_XOR",
12 : "BW_NOT",
13 : "BOOL_NOT",
14 : "BOOL_XOR",
15 : "IS_IDENTICAL",
16 : "IS_NOT_IDENTICAL",
17 : "IS_EQUAL",
18 : "IS_NOT_EQUAL",
19 : "IS_SMALLER",
20 : "IS_SMALLER_OR_EQUAL",
21 : "CAST",
22 : "QM_ASSIGN",
23 : "ASSIGN_ADD",
24 : "ASSIGN_SUB",
25 : "ASSIGN_MUL",
26 : "ASSIGN_DIV",
27 : "ASSIGN_MOD",
28 : "ASSIGN_SL",
29 : "ASSIGN_SR",
30 : "ASSIGN_CONCAT",
31 : "ASSIGN_BW_OR",
32 : "ASSIGN_BW_AND",
33 : "ASSIGN_BW_XOR",
34 : "PRE_INC",
35 : "PRE_DEC",
36 : "POST_INC",
37 : "POST_DEC",
38 : "ASSIGN",
39 : "ASSIGN_REF",
40 : "ECHO",
41 : "PRINT",
42 : "JMP",
43 : "JMPZ",
44 : "JMPNZ",
45 : "JMPZNZ",
46 : "JMPZ_EX",
47 : "JMPNZ_EX",
48 : "CASE",
49 : "SWITCH_FREE",
50 : "BRK",
51 : "CONT",
52 : "BOOL",
53 : "INIT_STRING",
54 : "ADD_CHAR",
55 : "ADD_STRING",
56 : "ADD_VAR",
57 : "BEGIN_SILENCE",
58 : "END_SILENCE",
59 : "INIT_FCALL_BY_NAME",
60 : "DO_FCALL",
61 : "DO_FCALL_BY_NAME",
62 : "RETURN",
63 : "RECV",
64 : "RECV_INIT",
65 : "SEND_VAL",
66 : "SEND_VAR",
67 : "SEND_REF",
68 : "NEW",
69 : "INIT_NS_FCALL_BY_NAME",
70 : "FREE",
71 : "INIT_ARRAY",
72 : "ADD_ARRAY_ELEMENT",
73 : "INCLUDE_OR_EVAL",
74 : "UNSET_VAR",
75 : "UNSET_DIM",
76 : "UNSET_OBJ",
77 : "FE_RESET",
78 : "FE_FETCH",
79 : "EXIT",
80 : "FETCH_R",
81 : "FETCH_DIM_R",
82 : "FETCH_OBJ_R",
83 : "FETCH_W",
84 : "FETCH_DIM_W",
85 : "FETCH_OBJ_W",
86 : "FETCH_RW",
87 : "FETCH_DIM_RW",
88 : "FETCH_OBJ_RW",
89 : "FETCH_IS",
90 : "FETCH_DIM_IS",
91 : "FETCH_OBJ_IS",
92 : "FETCH_FUNC_ARG",
93 : "FETCH_DIM_FUNC_ARG",
94 : "FETCH_OBJ_FUNC_ARG",
95 : "FETCH_UNSET",
96 : "FETCH_DIM_UNSET",
97 : "FETCH_OBJ_UNSET",
98 : "FETCH_DIM_TMP_VAR",
99 : "FETCH_CONSTANT",
100 : "GOTO",
101 : "EXT_STMT",
102 : "EXT_FCALL_BEGIN",
103 : "EXT_FCALL_END",
104 : "EXT_NOP",
105 : "TICKS",
106 : "SEND_VAR_NO_REF",
107 : "CATCH",
108 : "THROW",
109 : "FETCH_CLASS",
110 : "CLONE",
111 : "RETURN_BY_REF",
112 : "INIT_METHOD_CALL",
113 : "INIT_STATIC_METHOD_CALL",
114 : "ISSET_ISEMPTY_VAR",
115 : "ISSET_ISEMPTY_DIM_OBJ",
116 : "SEND_VAL_EX",
117 : "SEND_VAR",
118 : "INIT_USER_CALL",
119 : "SEND_ARRAY",
120 : "SEND_USER",
121 : "STRLEN",
122 : "DEFINED",
123 : "TYPE_CHECK",
124 : "VERIFY_RETURN_TYPE",
125 : "FE_RESET_RW",
126 : "FE_FETCH_RW",
127 : "FE_FREE",
128 : "INIT_DYNAMIC_CALL",
129 : "DO_ICALL",
130 : "DO_UCALL",
131 : "DO_FCALL_BY_NAME",
132 : "PRE_INC_OBJ",
133 : "PRE_DEC_OBJ",
134 : "POST_INC_OBJ",
135 : "POST_DEC_OBJ",
136 : "ASSIGN_OBJ",
137 : "OP_DATA",
138 : "INSTANCEOF",
139 : "DECLARE_CLASS",
140 : "DECLARE_INHERITED_CLASS",
141 : "DECLARE_FUNCTION",
142 : "RAISE_ABSTRACT_ERROR",
143 : "DECLARE_CONST",
144 : "ADD_INTERFACE",
145 : "DECLARE_INHERITED_CLASS_DELAYED",
146 : "VERIFY_ABSTRACT_CLASS",
147 : "ASSIGN_DIM",
148 : "ISSET_ISEMPTY_PROP_OBJ",
149 : "HANDLE_EXCEPTION",
150 : "USER_OPCODE",
152 : "ZEND_JMP_SET",
153 : "ZEND_DECLARE_LAMBDA_FUNCTION",
154 : "ZEND_ADD_TRAIT",
155 : "ZEND_BIND_TRAITS",
156 : "ZEND_SEPARATE",
157 : "ZEND_FETCH_CLASS_NAME",
158 : "ZEND_CALL_TRAMPOLINE",
159 : "ZEND_DISCARD_EXCEPTION",
160 : "ZEND_YIELD",
161 : "ZEND_GENERATOR_RETURN",
162 : "ZEND_FAST_CALL",
163 : "ZEND_FAST_RET",
164 : "ZEND_RECV_VARIADIC",
165 : "ZEND_SEND_UNPACK",
166 : "ZEND_POW",
167 : "ZEND_ASSIGN_POW",
168 : "ZEND_BIND_GLOBAL",
169 : "ZEND_COALESCE",
170 : "ZEND_SPACESHIP",
171 : "ZEND_DECLARE_ANON_CLASS",
172 : "ZEND_DECLARE_ANON_INHERITED_CLASS",
}
# regular data types
IS_UNDEF = 0
IS_NULL = 1
IS_FALSE = 2
IS_TRUE = 3
IS_LONG = 4
IS_DOUBLE = 5
IS_STRING = 6
IS_ARRAY = 7
IS_OBJECT = 8
IS_RESOURCE = 9
IS_REFERENCE = 10
# constant expressions
IS_CONSTANT = 11
IS_CONSTANT_AST = 12
# fake types
_IS_BOOL = 13
IS_CALLABLE = 14
IS_VOID = 18
# internal types
IS_INDIRECT = 15
IS_PTR = 17
_IS_ERROR = 19
# Op Types
IS_CONST = 1 << 0
IS_TMP_VAR = 1 << 1
IS_VAR = 1 << 2
IS_UNUSED = 1 << 3
IS_CV = 1 << 4 | 0.357904 | 0.465752 |
try:
import tensorflow as tf
except ImportError:
print 'Need to install the "tensorflow" module (available on Linux and MacOS only for python 2.7) : "pip install tensorflow" or with Anaconda "conda install tensorflow"'
exit()
import sys
sys.path.insert(0, '../src/') # To be able to import packages from parent directory
sys.path.insert(0, '../src/Methods')
from utils.DAE import DAE
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
learning_rate=0.01
log_path = '/tmp/y101'
x = tf.placeholder(tf.float32,[None,784])
Y = tf.placeholder(tf.float32,[None,10])
data = mnist.train.next_batch(50000)[0]
W1, B1 = DAE(data,[400,102,10])
Weights1, Biases1 = W1.copy(), B1.copy()
#it is important you get the session of DAE.py
sess=tf.get_default_session()
#tensorboard
tf.summary.histogram('RN1_W1',Weights1['W1'])
tf.summary.histogram('RN1_W2',Weights1['W2'])
tf.summary.histogram('RN1_W2',Weights1['W3'])
tf.summary.histogram('RN1_B1',Biases1['B1'])
tf.summary.histogram('RN1_B2',Biases1['B2'])
tf.summary.histogram('RN1_B2',Biases1['B2'])
def RN1(x):
L1 = tf.sigmoid(tf.add(tf.matmul(x,Weights1['W1']),Biases1['B1']))
L2 = tf.sigmoid(tf.add(tf.matmul(L1,Weights1['W2']),Biases1['B2']))
l3 = tf.sigmoid(tf.add(tf.matmul(L2,Weights1['W3']),Biases1['B3']))
return l3
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y,logits=RN1(x)))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
correct_prediction = tf.equal(tf.argmax(Y,1), tf.argmax(RN1(x),1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('perte',loss)
tf.summary.scalar('accuracy',accuracy)
merge_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(log_path)
writer.add_graph(sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
W_aux = Weights1['W1'].eval()
print("\nstarting trainging")
for i in range(1000):
batch = mnist.train.next_batch(100)
_,s = sess.run([optimizer,merge_op ],feed_dict={x: batch[0], Y: batch[1]})
writer.add_summary(s,i)
test = mnist.test.next_batch(10000)
acc = sess.run(accuracy, feed_dict={x: test[0], Y: test[1]})
print("your accuracy on the testing set is : " + str(acc)) | examples/DAE_pre_processing.py | try:
import tensorflow as tf
except ImportError:
print 'Need to install the "tensorflow" module (available on Linux and MacOS only for python 2.7) : "pip install tensorflow" or with Anaconda "conda install tensorflow"'
exit()
import sys
sys.path.insert(0, '../src/') # To be able to import packages from parent directory
sys.path.insert(0, '../src/Methods')
from utils.DAE import DAE
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
learning_rate=0.01
log_path = '/tmp/y101'
x = tf.placeholder(tf.float32,[None,784])
Y = tf.placeholder(tf.float32,[None,10])
data = mnist.train.next_batch(50000)[0]
W1, B1 = DAE(data,[400,102,10])
Weights1, Biases1 = W1.copy(), B1.copy()
#it is important you get the session of DAE.py
sess=tf.get_default_session()
#tensorboard
tf.summary.histogram('RN1_W1',Weights1['W1'])
tf.summary.histogram('RN1_W2',Weights1['W2'])
tf.summary.histogram('RN1_W2',Weights1['W3'])
tf.summary.histogram('RN1_B1',Biases1['B1'])
tf.summary.histogram('RN1_B2',Biases1['B2'])
tf.summary.histogram('RN1_B2',Biases1['B2'])
def RN1(x):
L1 = tf.sigmoid(tf.add(tf.matmul(x,Weights1['W1']),Biases1['B1']))
L2 = tf.sigmoid(tf.add(tf.matmul(L1,Weights1['W2']),Biases1['B2']))
l3 = tf.sigmoid(tf.add(tf.matmul(L2,Weights1['W3']),Biases1['B3']))
return l3
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y,logits=RN1(x)))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
correct_prediction = tf.equal(tf.argmax(Y,1), tf.argmax(RN1(x),1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('perte',loss)
tf.summary.scalar('accuracy',accuracy)
merge_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(log_path)
writer.add_graph(sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
W_aux = Weights1['W1'].eval()
print("\nstarting trainging")
for i in range(1000):
batch = mnist.train.next_batch(100)
_,s = sess.run([optimizer,merge_op ],feed_dict={x: batch[0], Y: batch[1]})
writer.add_summary(s,i)
test = mnist.test.next_batch(10000)
acc = sess.run(accuracy, feed_dict={x: test[0], Y: test[1]})
print("your accuracy on the testing set is : " + str(acc)) | 0.463201 | 0.427217 |
from __future__ import absolute_import, print_function
from abc import ABCMeta, abstractmethod
import six
from .utilities import inherit_docstrings, Parantheses
__all__ = ['ReprHelper', 'PrettyReprHelper']
@six.add_metaclass(ABCMeta)
class BaseReprHelper(object):
def __init__(self, other):
self.parantheses = Parantheses(left='(', right=')')
self.other = other
self.other_cls = other.__class__
self.iarg = 0
self.keyword_started = False
@property
def parantheses(self):
return self._parantheses
@parantheses.setter
def parantheses(self, value):
self._parantheses = Parantheses._make(value)
@abstractmethod
def positional_from_attr(self, attr_name):
"""Add positional argument by retrieving attribute `attr_name`
:param str attr_name: Attribute name such that
:code:`getattr(self, attr_name)` returns the correct value.
"""
@abstractmethod
def positional_with_value(self, value, raw=False):
"""Add positional argument with value `value`
:param value: Value for positional argument.
:param bool raw: If false (default), :code:`repr(value)` is used.
Otherwise, the value is used as is.
"""
@abstractmethod
def keyword_from_attr(self, name, attr_name=None):
"""Add keyword argument from attribute `attr_name`
:param str name: Keyword name. Also used as attribute name such that
:code:`getattr(self, name)` returns the correct value.
:param str attr_name: Attribute name, if different than `name`.
.. versionchanged:: 1.4
Method argument names swapped, didn't make sense before.
"""
@abstractmethod
def keyword_with_value(self, name, value, raw=False):
"""Add keyword argument `name` with value `value`.
:param str name: Keyword name.
:param value: Value for keyword argument.
:param bool raw: If false (default), :code:`repr(value)` is used.
Otherwise, the value is used as is.
"""
@inherit_docstrings
class ReprHelper(BaseReprHelper):
"""Help manual construction of :code:`__repr__`.
It should be used as follows:
.. code-block:: python
def __repr__(self)
r = ReprHelper(self)
r.keyword_from_attr('name')
return str(r)
.. versionchanged:: 1.4
`parantheses` property added. Must be set before `str(r)` is called:
.. code-block:: python
def __repr__(self)
r = ReprHelper(self)
r.parantheses = ('<', '>')
r.keyword_from_attr('name')
return str(r)
"""
def __init__(self, other):
self.repr_parts = []
super(ReprHelper, self).__init__(other)
def positional_from_attr(self, attr_name):
if self.keyword_started:
raise ValueError('positional arguments cannot '
'follow keyword arguments')
self._ensure_comma()
self.repr_parts.append(repr(getattr(self.other, attr_name)))
self.iarg += 1
def positional_with_value(self, value, raw=False):
if self.keyword_started:
raise ValueError('positional arguments cannot '
'follow keyword arguments')
self._ensure_comma()
value = value if raw else repr(value)
self.repr_parts.append(value)
self.iarg += 1
def keyword_from_attr(self, name, attr_name=None):
self.keyword_started = True
self._ensure_comma()
attr_name = attr_name or name
self.repr_parts.append(
'{}={!r}'.format(name, getattr(self.other, attr_name)))
self.iarg += 1
def keyword_with_value(self, name, value, raw=False):
self.keyword_started = True
self._ensure_comma()
value = value if raw else repr(value)
self.repr_parts.append('{}={}'.format(name, value))
self.iarg += 1
def _ensure_comma(self):
if self.iarg:
self.repr_parts.append(', ')
def __str__(self):
beginning = [self.other_cls.__name__, self.parantheses.left]
end = [self.parantheses.right]
all_parts = beginning + self.repr_parts + end
return ''.join(all_parts)
class PrettyReprHelper(BaseReprHelper):
"""Help manual construction of :code:`_repr_pretty_` for
:py:mod:`IPython.lib.pretty`.
It should be used as follows:
.. code-block:: python
def _repr_pretty_(self, p, cycle)
with PrettyReprHelper(self, p, cycle) as r:
r.keyword_from_attr('name')
.. versionchanged:: 1.4
`parantheses` property added. Must be set before
:py:meth:`PrettyReprHelper.open` is called (usually by context manager).
.. code-block:: python
def _repr_pretty_(self, p, cycle)
r = PrettyReprHelper(self, p, cycle)
r.parantheses = ('<', '>')
with r:
r.keyword_from_attr('name')
"""
def __init__(self, other, p, cycle):
self.p = p
self.cycle = cycle
super(PrettyReprHelper, self).__init__(other)
def positional_from_attr(self, attr_name):
if self.cycle:
return
if self.keyword_started:
raise ValueError('positional arguments cannot '
'follow keyword arguments')
self._ensure_comma()
self.p.pretty(getattr(self.other, attr_name))
self.iarg += 1
def positional_with_value(self, value, raw=False):
if self.cycle:
return
if self.keyword_started:
raise ValueError('positional arguments cannot '
'follow keyword arguments')
self._ensure_comma()
if raw:
self.p.text(str(value))
else:
self.p.pretty(value)
self.iarg += 1
def keyword_from_attr(self, name, attr_name=None):
if self.cycle:
return
self.keyword_started = True
self._ensure_comma()
attr_name = attr_name or name
with self.p.group(len(name) + 1, name + '='):
self.p.pretty(getattr(self.other, attr_name))
self.iarg += 1
def keyword_with_value(self, name, value, raw=False):
if self.cycle:
return
self.keyword_started = True
self._ensure_comma()
with self.p.group(len(name) + 1, name + '='):
if raw:
self.p.text(str(value))
else:
self.p.pretty(value)
self.iarg += 1
def _ensure_comma(self):
if self.iarg:
self.p.text(',')
self.p.breakable()
def __enter__(self):
"""Return self for use as context manager.
Context manager calls self.close() on exit."""
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Call self.close() during exit from context manager."""
if exc_type:
return False
self.close()
def open(self):
"""Open group with class name.
This is normally called by using as a context manager.
"""
clsname = self.other_cls.__name__
self.p.begin_group(len(clsname) + 1, clsname + self.parantheses.left)
def close(self):
"""Close group with final bracket.
This is normally called by using as a context manager.
"""
if self.cycle:
self.p.text('...')
clsname = self.other_cls.__name__
self.p.end_group(len(clsname) + 1, self.parantheses.right) | dbops_venv/lib/python3.5/site-packages/represent/helper.py | from __future__ import absolute_import, print_function
from abc import ABCMeta, abstractmethod
import six
from .utilities import inherit_docstrings, Parantheses
__all__ = ['ReprHelper', 'PrettyReprHelper']
@six.add_metaclass(ABCMeta)
class BaseReprHelper(object):
def __init__(self, other):
self.parantheses = Parantheses(left='(', right=')')
self.other = other
self.other_cls = other.__class__
self.iarg = 0
self.keyword_started = False
@property
def parantheses(self):
return self._parantheses
@parantheses.setter
def parantheses(self, value):
self._parantheses = Parantheses._make(value)
@abstractmethod
def positional_from_attr(self, attr_name):
"""Add positional argument by retrieving attribute `attr_name`
:param str attr_name: Attribute name such that
:code:`getattr(self, attr_name)` returns the correct value.
"""
@abstractmethod
def positional_with_value(self, value, raw=False):
"""Add positional argument with value `value`
:param value: Value for positional argument.
:param bool raw: If false (default), :code:`repr(value)` is used.
Otherwise, the value is used as is.
"""
@abstractmethod
def keyword_from_attr(self, name, attr_name=None):
"""Add keyword argument from attribute `attr_name`
:param str name: Keyword name. Also used as attribute name such that
:code:`getattr(self, name)` returns the correct value.
:param str attr_name: Attribute name, if different than `name`.
.. versionchanged:: 1.4
Method argument names swapped, didn't make sense before.
"""
@abstractmethod
def keyword_with_value(self, name, value, raw=False):
"""Add keyword argument `name` with value `value`.
:param str name: Keyword name.
:param value: Value for keyword argument.
:param bool raw: If false (default), :code:`repr(value)` is used.
Otherwise, the value is used as is.
"""
@inherit_docstrings
class ReprHelper(BaseReprHelper):
"""Help manual construction of :code:`__repr__`.
It should be used as follows:
.. code-block:: python
def __repr__(self)
r = ReprHelper(self)
r.keyword_from_attr('name')
return str(r)
.. versionchanged:: 1.4
`parantheses` property added. Must be set before `str(r)` is called:
.. code-block:: python
def __repr__(self)
r = ReprHelper(self)
r.parantheses = ('<', '>')
r.keyword_from_attr('name')
return str(r)
"""
def __init__(self, other):
self.repr_parts = []
super(ReprHelper, self).__init__(other)
def positional_from_attr(self, attr_name):
if self.keyword_started:
raise ValueError('positional arguments cannot '
'follow keyword arguments')
self._ensure_comma()
self.repr_parts.append(repr(getattr(self.other, attr_name)))
self.iarg += 1
def positional_with_value(self, value, raw=False):
if self.keyword_started:
raise ValueError('positional arguments cannot '
'follow keyword arguments')
self._ensure_comma()
value = value if raw else repr(value)
self.repr_parts.append(value)
self.iarg += 1
def keyword_from_attr(self, name, attr_name=None):
self.keyword_started = True
self._ensure_comma()
attr_name = attr_name or name
self.repr_parts.append(
'{}={!r}'.format(name, getattr(self.other, attr_name)))
self.iarg += 1
def keyword_with_value(self, name, value, raw=False):
self.keyword_started = True
self._ensure_comma()
value = value if raw else repr(value)
self.repr_parts.append('{}={}'.format(name, value))
self.iarg += 1
def _ensure_comma(self):
if self.iarg:
self.repr_parts.append(', ')
def __str__(self):
beginning = [self.other_cls.__name__, self.parantheses.left]
end = [self.parantheses.right]
all_parts = beginning + self.repr_parts + end
return ''.join(all_parts)
class PrettyReprHelper(BaseReprHelper):
"""Help manual construction of :code:`_repr_pretty_` for
:py:mod:`IPython.lib.pretty`.
It should be used as follows:
.. code-block:: python
def _repr_pretty_(self, p, cycle)
with PrettyReprHelper(self, p, cycle) as r:
r.keyword_from_attr('name')
.. versionchanged:: 1.4
`parantheses` property added. Must be set before
:py:meth:`PrettyReprHelper.open` is called (usually by context manager).
.. code-block:: python
def _repr_pretty_(self, p, cycle)
r = PrettyReprHelper(self, p, cycle)
r.parantheses = ('<', '>')
with r:
r.keyword_from_attr('name')
"""
def __init__(self, other, p, cycle):
self.p = p
self.cycle = cycle
super(PrettyReprHelper, self).__init__(other)
def positional_from_attr(self, attr_name):
if self.cycle:
return
if self.keyword_started:
raise ValueError('positional arguments cannot '
'follow keyword arguments')
self._ensure_comma()
self.p.pretty(getattr(self.other, attr_name))
self.iarg += 1
def positional_with_value(self, value, raw=False):
if self.cycle:
return
if self.keyword_started:
raise ValueError('positional arguments cannot '
'follow keyword arguments')
self._ensure_comma()
if raw:
self.p.text(str(value))
else:
self.p.pretty(value)
self.iarg += 1
def keyword_from_attr(self, name, attr_name=None):
if self.cycle:
return
self.keyword_started = True
self._ensure_comma()
attr_name = attr_name or name
with self.p.group(len(name) + 1, name + '='):
self.p.pretty(getattr(self.other, attr_name))
self.iarg += 1
def keyword_with_value(self, name, value, raw=False):
if self.cycle:
return
self.keyword_started = True
self._ensure_comma()
with self.p.group(len(name) + 1, name + '='):
if raw:
self.p.text(str(value))
else:
self.p.pretty(value)
self.iarg += 1
def _ensure_comma(self):
if self.iarg:
self.p.text(',')
self.p.breakable()
def __enter__(self):
"""Return self for use as context manager.
Context manager calls self.close() on exit."""
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Call self.close() during exit from context manager."""
if exc_type:
return False
self.close()
def open(self):
"""Open group with class name.
This is normally called by using as a context manager.
"""
clsname = self.other_cls.__name__
self.p.begin_group(len(clsname) + 1, clsname + self.parantheses.left)
def close(self):
"""Close group with final bracket.
This is normally called by using as a context manager.
"""
if self.cycle:
self.p.text('...')
clsname = self.other_cls.__name__
self.p.end_group(len(clsname) + 1, self.parantheses.right) | 0.839832 | 0.154058 |
from game import Game
import sys
class TicTacToe(Game):
"""Tic Tac Toe game class."""
def __init__(self):
"""Construct new tictactoe game instance."""
self.board = ['-', '-', '-', '-', '-', '-', '-', '-', '-']
self.player = 'X'
self.winner = None
def reset(self):
"""Reset board between games."""
self.board = ['-', '-', '-', '-', '-', '-', '-', '-', '-']
self.player = 'X'
self.winner = None
def get_open_moves(self):
"""Returns list of available moves given current states and next states."""
actions = []
states = []
for i, val in enumerate(self.board):
if val == '-':
actions.append(i)
self.board[i] = self.player
states.append(self.get_state(self.board))
self.board[i] = '-'
return states, actions
def get_state(self, board):
"""Returns board state as String."""
return ''.join(board)
def is_win(self):
"""Check the board for win condition.
Possible outputs are X, O, Draw, None.
"""
# Check win condition
row_1 = self.board[0] + self.board[1] + self.board[2]
row_2 = self.board[3] + self.board[4] + self.board[5]
row_3 = self.board[6] + self.board[7] + self.board[8]
col_1 = self.board[0] + self.board[3] + self.board[6]
col_2 = self.board[1] + self.board[4] + self.board[7]
col_3 = self.board[2] + self.board[5] + self.board[8]
diag_1 = self.board[0] + self.board[4] + self.board[8]
diag_2 = self.board[2] + self.board[4] + self.board[6]
triples = [row_1, row_2, row_3, col_1, col_2, col_3, diag_1, diag_2]
for triple in triples:
if (triple == 'OOO'):
return 'O'
elif (triple == 'XXX'):
return 'X'
# Check draw condition
if '-' not in self.board:
return 'Draw'
return None
def is_valid_move(self, position):
"""Check that potential move is in a valid position.
Valid means inbounds and not occupied.
"""
if position >= 0 and position < len(self.board):
return self.board[position] == '-'
else:
return False
def make_move(self, position):
"""Makes move by setting position to player value.
Also toggles player and returns is_win result.
"""
self.board[position] = self.player
self.player = 'O' if self.player == 'X' else 'X'
return self.is_win()
def read_input(self):
"""Define game specific read in function from command line."""
return int(sys.stdin.readline()[:-1])
def print_board(self):
print('{} {} {}\n{} {} {}\n{} {} {}'.format(self.board[0], self.board[1], self.board[2],
self.board[3], self.board[4], self.board[5],
self.board[6], self.board[7], self.board[8]))
print('=====')
def print_instructions(self):
print('===============\n'
'How to play:\n'
'Possible moves are [0,9) corresponding to these spaces on the board:\n\n'
'0 | 1 | 2\n'
'3 | 4 | 5\n'
'6 | 7 | 8\n') | game/tictactoe.py | from game import Game
import sys
class TicTacToe(Game):
"""Tic Tac Toe game class."""
def __init__(self):
"""Construct new tictactoe game instance."""
self.board = ['-', '-', '-', '-', '-', '-', '-', '-', '-']
self.player = 'X'
self.winner = None
def reset(self):
"""Reset board between games."""
self.board = ['-', '-', '-', '-', '-', '-', '-', '-', '-']
self.player = 'X'
self.winner = None
def get_open_moves(self):
"""Returns list of available moves given current states and next states."""
actions = []
states = []
for i, val in enumerate(self.board):
if val == '-':
actions.append(i)
self.board[i] = self.player
states.append(self.get_state(self.board))
self.board[i] = '-'
return states, actions
def get_state(self, board):
"""Returns board state as String."""
return ''.join(board)
def is_win(self):
"""Check the board for win condition.
Possible outputs are X, O, Draw, None.
"""
# Check win condition
row_1 = self.board[0] + self.board[1] + self.board[2]
row_2 = self.board[3] + self.board[4] + self.board[5]
row_3 = self.board[6] + self.board[7] + self.board[8]
col_1 = self.board[0] + self.board[3] + self.board[6]
col_2 = self.board[1] + self.board[4] + self.board[7]
col_3 = self.board[2] + self.board[5] + self.board[8]
diag_1 = self.board[0] + self.board[4] + self.board[8]
diag_2 = self.board[2] + self.board[4] + self.board[6]
triples = [row_1, row_2, row_3, col_1, col_2, col_3, diag_1, diag_2]
for triple in triples:
if (triple == 'OOO'):
return 'O'
elif (triple == 'XXX'):
return 'X'
# Check draw condition
if '-' not in self.board:
return 'Draw'
return None
def is_valid_move(self, position):
"""Check that potential move is in a valid position.
Valid means inbounds and not occupied.
"""
if position >= 0 and position < len(self.board):
return self.board[position] == '-'
else:
return False
def make_move(self, position):
"""Makes move by setting position to player value.
Also toggles player and returns is_win result.
"""
self.board[position] = self.player
self.player = 'O' if self.player == 'X' else 'X'
return self.is_win()
def read_input(self):
"""Define game specific read in function from command line."""
return int(sys.stdin.readline()[:-1])
def print_board(self):
print('{} {} {}\n{} {} {}\n{} {} {}'.format(self.board[0], self.board[1], self.board[2],
self.board[3], self.board[4], self.board[5],
self.board[6], self.board[7], self.board[8]))
print('=====')
def print_instructions(self):
print('===============\n'
'How to play:\n'
'Possible moves are [0,9) corresponding to these spaces on the board:\n\n'
'0 | 1 | 2\n'
'3 | 4 | 5\n'
'6 | 7 | 8\n') | 0.718792 | 0.408749 |
import mock
import uuid
from cinder import test
import cinder.volume.drivers.netapp.api as ntapi
import cinder.volume.drivers.netapp.iscsi as ntap_iscsi
class NetAppDirectISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(NetAppDirectISCSIDriverTestCase, self).setUp()
self.driver = ntap_iscsi.NetAppDirectISCSIDriver(
configuration=mock.Mock())
self.driver.client = mock.Mock()
self.fake_volume = str(uuid.uuid4())
self.fake_lun = str(uuid.uuid4())
self.fake_size = '1024'
self.fake_metadata = {
'OsType': 'linux',
'SpaceReserved': 'true',
}
self.mock_request = mock.Mock()
def tearDown(self):
super(NetAppDirectISCSIDriverTestCase, self).tearDown()
def test_create_lun(self):
expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
with mock.patch.object(ntapi.NaElement, 'create_node_with_children',
return_value=self.mock_request
) as mock_create_node:
self.driver.create_lun(self.fake_volume,
self.fake_lun,
self.fake_size,
self.fake_metadata)
mock_create_node.assert_called_once_with(
'lun-create-by-size',
**{'path': expected_path,
'size': self.fake_size,
'ostype': self.fake_metadata['OsType'],
'space-reservation-enabled':
self.fake_metadata['SpaceReserved']})
self.driver.client.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_create_lun_with_qos_policy_group(self):
expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
expected_qos_group = 'qos_1'
with mock.patch.object(ntapi.NaElement, 'create_node_with_children',
return_value=self.mock_request
) as mock_create_node:
self.driver.create_lun(self.fake_volume,
self.fake_lun,
self.fake_size,
self.fake_metadata,
qos_policy_group=expected_qos_group)
mock_create_node.assert_called_once_with(
'lun-create-by-size',
**{'path': expected_path, 'size': self.fake_size,
'ostype': self.fake_metadata['OsType'],
'space-reservation-enabled':
self.fake_metadata['SpaceReserved']})
self.mock_request.add_new_child.assert_called_once_with(
'qos-policy-group', expected_qos_group)
self.driver.client.invoke_successfully.assert_called_once_with(
mock.ANY, True)
class NetAppiSCSICModeTestCase(test.TestCase):
"""Test case for NetApp's C-Mode iSCSI driver."""
def setUp(self):
super(NetAppiSCSICModeTestCase, self).setUp()
self.driver = ntap_iscsi.NetAppDirectCmodeISCSIDriver(
configuration=mock.Mock())
self.driver.client = mock.Mock()
self.driver.vserver = mock.Mock()
def tearDown(self):
super(NetAppiSCSICModeTestCase, self).tearDown()
def test_clone_lun_multiple_zapi_calls(self):
"""Test for when lun clone requires more than one zapi call."""
# Max block-ranges per call = 32, max blocks per range = 2^24
# Force 2 calls
bc = 2 ** 24 * 32 * 2
self.driver._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN'})
self.driver.client.invoke_successfully = mock.Mock()
lun = ntapi.NaElement.create_node_with_children(
'lun-info',
**{'alignment': 'indeterminate',
'block-size': '512',
'comment': '',
'creation-timestamp': '1354536362',
'is-space-alloc-enabled': 'false',
'is-space-reservation-enabled': 'true',
'mapped': 'false',
'multiprotocol-type': 'linux',
'online': 'true',
'path': '/vol/fakeLUN/lun1',
'prefix-size': '0',
'qtree': '',
'read-only': 'false',
'serial-number': '2FfGI$APyN68',
'share-state': 'none',
'size': '20971520',
'size-used': '0',
'staging': 'false',
'suffix-size': '0',
'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412',
'volume': 'fakeLUN',
'vserver': 'fake_vserver'})
self.driver._get_lun_by_args = mock.Mock(return_value=[lun])
self.driver._add_lun_to_table = mock.Mock()
self.driver._update_stale_vols = mock.Mock()
self.driver._clone_lun('fakeLUN', 'newFakeLUN', block_count=bc)
self.assertEqual(2, self.driver.client.invoke_successfully.call_count)
def test_clone_lun_zero_block_count(self):
"""Test for when clone lun is not passed a block count."""
self.driver._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN'})
self.driver.client.invoke_successfully = mock.Mock()
lun = ntapi.NaElement.create_node_with_children(
'lun-info',
**{'alignment': 'indeterminate',
'block-size': '512',
'comment': '',
'creation-timestamp': '1354536362',
'is-space-alloc-enabled': 'false',
'is-space-reservation-enabled': 'true',
'mapped': 'false',
'multiprotocol-type': 'linux',
'online': 'true',
'path': '/vol/fakeLUN/lun1',
'prefix-size': '0',
'qtree': '',
'read-only': 'false',
'serial-number': '2FfGI$APyN68',
'share-state': 'none',
'size': '20971520',
'size-used': '0',
'staging': 'false',
'suffix-size': '0',
'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412',
'volume': 'fakeLUN',
'vserver': 'fake_vserver'})
self.driver._get_lun_by_args = mock.Mock(return_value=[lun])
self.driver._add_lun_to_table = mock.Mock()
self.driver._update_stale_vols = mock.Mock()
self.driver._clone_lun('fakeLUN', 'newFakeLUN')
self.assertEqual(1, self.driver.client.invoke_successfully.call_count)
class NetAppiSCSI7ModeTestCase(test.TestCase):
"""Test case for NetApp's 7-Mode iSCSI driver."""
def setUp(self):
super(NetAppiSCSI7ModeTestCase, self).setUp()
self.driver = ntap_iscsi.NetAppDirect7modeISCSIDriver(
configuration=mock.Mock())
self.driver.client = mock.Mock()
self.driver.vfiler = mock.Mock()
def tearDown(self):
super(NetAppiSCSI7ModeTestCase, self).tearDown()
def test_clone_lun_multiple_zapi_calls(self):
"""Test for when lun clone requires more than one zapi call."""
# Max block-ranges per call = 32, max blocks per range = 2^24
# Force 2 calls
bc = 2 ** 24 * 32 * 2
self.driver._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN',
'Path':
'/vol/fake/lun1'})
self.driver.client.invoke_successfully = mock.Mock(
return_value=mock.MagicMock())
lun = ntapi.NaElement.create_node_with_children(
'lun-info',
**{'alignment': 'indeterminate',
'block-size': '512',
'comment': '',
'creation-timestamp': '1354536362',
'is-space-alloc-enabled': 'false',
'is-space-reservation-enabled': 'true',
'mapped': 'false',
'multiprotocol-type': 'linux',
'online': 'true',
'path': '/vol/fakeLUN/lun1',
'prefix-size': '0',
'qtree': '',
'read-only': 'false',
'serial-number': '2FfGI$APyN68',
'share-state': 'none',
'size': '20971520',
'size-used': '0',
'staging': 'false',
'suffix-size': '0',
'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412',
'volume': 'fakeLUN',
'vserver': 'fake_vserver'})
self.driver._get_lun_by_args = mock.Mock(return_value=[lun])
self.driver._add_lun_to_table = mock.Mock()
self.driver._update_stale_vols = mock.Mock()
self.driver._check_clone_status = mock.Mock()
self.driver._set_space_reserve = mock.Mock()
self.driver._clone_lun('fakeLUN', 'newFakeLUN', block_count=bc)
self.assertEqual(2, self.driver.client.invoke_successfully.call_count)
def test_clone_lun_zero_block_count(self):
"""Test for when clone lun is not passed a block count."""
self.driver._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN',
'Path':
'/vol/fake/lun1'})
self.driver.client.invoke_successfully = mock.Mock(
return_value=mock.MagicMock())
lun = ntapi.NaElement.create_node_with_children(
'lun-info',
**{'alignment': 'indeterminate',
'block-size': '512',
'comment': '',
'creation-timestamp': '1354536362',
'is-space-alloc-enabled': 'false',
'is-space-reservation-enabled': 'true',
'mapped': 'false',
'multiprotocol-type': 'linux',
'online': 'true',
'path': '/vol/fakeLUN/lun1',
'prefix-size': '0',
'qtree': '',
'read-only': 'false',
'serial-number': '2FfGI$APyN68',
'share-state': 'none',
'size': '20971520',
'size-used': '0',
'staging': 'false',
'suffix-size': '0',
'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412',
'volume': 'fakeLUN',
'vserver': 'fake_vserver'})
self.driver._get_lun_by_args = mock.Mock(return_value=[lun])
self.driver._add_lun_to_table = mock.Mock()
self.driver._update_stale_vols = mock.Mock()
self.driver._check_clone_status = mock.Mock()
self.driver._set_space_reserve = mock.Mock()
self.driver._clone_lun('fakeLUN', 'newFakeLUN')
self.assertEqual(1, self.driver.client.invoke_successfully.call_count) | cinder/tests/volume/drivers/netapp/test_iscsi.py | import mock
import uuid
from cinder import test
import cinder.volume.drivers.netapp.api as ntapi
import cinder.volume.drivers.netapp.iscsi as ntap_iscsi
class NetAppDirectISCSIDriverTestCase(test.TestCase):
def setUp(self):
super(NetAppDirectISCSIDriverTestCase, self).setUp()
self.driver = ntap_iscsi.NetAppDirectISCSIDriver(
configuration=mock.Mock())
self.driver.client = mock.Mock()
self.fake_volume = str(uuid.uuid4())
self.fake_lun = str(uuid.uuid4())
self.fake_size = '1024'
self.fake_metadata = {
'OsType': 'linux',
'SpaceReserved': 'true',
}
self.mock_request = mock.Mock()
def tearDown(self):
super(NetAppDirectISCSIDriverTestCase, self).tearDown()
def test_create_lun(self):
expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
with mock.patch.object(ntapi.NaElement, 'create_node_with_children',
return_value=self.mock_request
) as mock_create_node:
self.driver.create_lun(self.fake_volume,
self.fake_lun,
self.fake_size,
self.fake_metadata)
mock_create_node.assert_called_once_with(
'lun-create-by-size',
**{'path': expected_path,
'size': self.fake_size,
'ostype': self.fake_metadata['OsType'],
'space-reservation-enabled':
self.fake_metadata['SpaceReserved']})
self.driver.client.invoke_successfully.assert_called_once_with(
mock.ANY, True)
def test_create_lun_with_qos_policy_group(self):
expected_path = '/vol/%s/%s' % (self.fake_volume, self.fake_lun)
expected_qos_group = 'qos_1'
with mock.patch.object(ntapi.NaElement, 'create_node_with_children',
return_value=self.mock_request
) as mock_create_node:
self.driver.create_lun(self.fake_volume,
self.fake_lun,
self.fake_size,
self.fake_metadata,
qos_policy_group=expected_qos_group)
mock_create_node.assert_called_once_with(
'lun-create-by-size',
**{'path': expected_path, 'size': self.fake_size,
'ostype': self.fake_metadata['OsType'],
'space-reservation-enabled':
self.fake_metadata['SpaceReserved']})
self.mock_request.add_new_child.assert_called_once_with(
'qos-policy-group', expected_qos_group)
self.driver.client.invoke_successfully.assert_called_once_with(
mock.ANY, True)
class NetAppiSCSICModeTestCase(test.TestCase):
"""Test case for NetApp's C-Mode iSCSI driver."""
def setUp(self):
super(NetAppiSCSICModeTestCase, self).setUp()
self.driver = ntap_iscsi.NetAppDirectCmodeISCSIDriver(
configuration=mock.Mock())
self.driver.client = mock.Mock()
self.driver.vserver = mock.Mock()
def tearDown(self):
super(NetAppiSCSICModeTestCase, self).tearDown()
def test_clone_lun_multiple_zapi_calls(self):
"""Test for when lun clone requires more than one zapi call."""
# Max block-ranges per call = 32, max blocks per range = 2^24
# Force 2 calls
bc = 2 ** 24 * 32 * 2
self.driver._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN'})
self.driver.client.invoke_successfully = mock.Mock()
lun = ntapi.NaElement.create_node_with_children(
'lun-info',
**{'alignment': 'indeterminate',
'block-size': '512',
'comment': '',
'creation-timestamp': '1354536362',
'is-space-alloc-enabled': 'false',
'is-space-reservation-enabled': 'true',
'mapped': 'false',
'multiprotocol-type': 'linux',
'online': 'true',
'path': '/vol/fakeLUN/lun1',
'prefix-size': '0',
'qtree': '',
'read-only': 'false',
'serial-number': '2FfGI$APyN68',
'share-state': 'none',
'size': '20971520',
'size-used': '0',
'staging': 'false',
'suffix-size': '0',
'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412',
'volume': 'fakeLUN',
'vserver': 'fake_vserver'})
self.driver._get_lun_by_args = mock.Mock(return_value=[lun])
self.driver._add_lun_to_table = mock.Mock()
self.driver._update_stale_vols = mock.Mock()
self.driver._clone_lun('fakeLUN', 'newFakeLUN', block_count=bc)
self.assertEqual(2, self.driver.client.invoke_successfully.call_count)
def test_clone_lun_zero_block_count(self):
"""Test for when clone lun is not passed a block count."""
self.driver._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN'})
self.driver.client.invoke_successfully = mock.Mock()
lun = ntapi.NaElement.create_node_with_children(
'lun-info',
**{'alignment': 'indeterminate',
'block-size': '512',
'comment': '',
'creation-timestamp': '1354536362',
'is-space-alloc-enabled': 'false',
'is-space-reservation-enabled': 'true',
'mapped': 'false',
'multiprotocol-type': 'linux',
'online': 'true',
'path': '/vol/fakeLUN/lun1',
'prefix-size': '0',
'qtree': '',
'read-only': 'false',
'serial-number': '2FfGI$APyN68',
'share-state': 'none',
'size': '20971520',
'size-used': '0',
'staging': 'false',
'suffix-size': '0',
'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412',
'volume': 'fakeLUN',
'vserver': 'fake_vserver'})
self.driver._get_lun_by_args = mock.Mock(return_value=[lun])
self.driver._add_lun_to_table = mock.Mock()
self.driver._update_stale_vols = mock.Mock()
self.driver._clone_lun('fakeLUN', 'newFakeLUN')
self.assertEqual(1, self.driver.client.invoke_successfully.call_count)
class NetAppiSCSI7ModeTestCase(test.TestCase):
"""Test case for NetApp's 7-Mode iSCSI driver."""
def setUp(self):
super(NetAppiSCSI7ModeTestCase, self).setUp()
self.driver = ntap_iscsi.NetAppDirect7modeISCSIDriver(
configuration=mock.Mock())
self.driver.client = mock.Mock()
self.driver.vfiler = mock.Mock()
def tearDown(self):
super(NetAppiSCSI7ModeTestCase, self).tearDown()
def test_clone_lun_multiple_zapi_calls(self):
"""Test for when lun clone requires more than one zapi call."""
# Max block-ranges per call = 32, max blocks per range = 2^24
# Force 2 calls
bc = 2 ** 24 * 32 * 2
self.driver._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN',
'Path':
'/vol/fake/lun1'})
self.driver.client.invoke_successfully = mock.Mock(
return_value=mock.MagicMock())
lun = ntapi.NaElement.create_node_with_children(
'lun-info',
**{'alignment': 'indeterminate',
'block-size': '512',
'comment': '',
'creation-timestamp': '1354536362',
'is-space-alloc-enabled': 'false',
'is-space-reservation-enabled': 'true',
'mapped': 'false',
'multiprotocol-type': 'linux',
'online': 'true',
'path': '/vol/fakeLUN/lun1',
'prefix-size': '0',
'qtree': '',
'read-only': 'false',
'serial-number': '2FfGI$APyN68',
'share-state': 'none',
'size': '20971520',
'size-used': '0',
'staging': 'false',
'suffix-size': '0',
'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412',
'volume': 'fakeLUN',
'vserver': 'fake_vserver'})
self.driver._get_lun_by_args = mock.Mock(return_value=[lun])
self.driver._add_lun_to_table = mock.Mock()
self.driver._update_stale_vols = mock.Mock()
self.driver._check_clone_status = mock.Mock()
self.driver._set_space_reserve = mock.Mock()
self.driver._clone_lun('fakeLUN', 'newFakeLUN', block_count=bc)
self.assertEqual(2, self.driver.client.invoke_successfully.call_count)
def test_clone_lun_zero_block_count(self):
"""Test for when clone lun is not passed a block count."""
self.driver._get_lun_attr = mock.Mock(return_value={'Volume':
'fakeLUN',
'Path':
'/vol/fake/lun1'})
self.driver.client.invoke_successfully = mock.Mock(
return_value=mock.MagicMock())
lun = ntapi.NaElement.create_node_with_children(
'lun-info',
**{'alignment': 'indeterminate',
'block-size': '512',
'comment': '',
'creation-timestamp': '1354536362',
'is-space-alloc-enabled': 'false',
'is-space-reservation-enabled': 'true',
'mapped': 'false',
'multiprotocol-type': 'linux',
'online': 'true',
'path': '/vol/fakeLUN/lun1',
'prefix-size': '0',
'qtree': '',
'read-only': 'false',
'serial-number': '2FfGI$APyN68',
'share-state': 'none',
'size': '20971520',
'size-used': '0',
'staging': 'false',
'suffix-size': '0',
'uuid': 'cec1f3d7-3d41-11e2-9cf4-123478563412',
'volume': 'fakeLUN',
'vserver': 'fake_vserver'})
self.driver._get_lun_by_args = mock.Mock(return_value=[lun])
self.driver._add_lun_to_table = mock.Mock()
self.driver._update_stale_vols = mock.Mock()
self.driver._check_clone_status = mock.Mock()
self.driver._set_space_reserve = mock.Mock()
self.driver._clone_lun('fakeLUN', 'newFakeLUN')
self.assertEqual(1, self.driver.client.invoke_successfully.call_count) | 0.580471 | 0.182608 |
from collections.abc import Sequence
import numpy as np
class Vertex:
"""
# VERTEX
The vertex class is a fundemental, versatille and easy to use class containing positional and color data.
The data is stored as x,y,z,r,g,b attributes. However the data can be accessed and modified in a variety of ways.
Attributes:
- `x`,`y`,`z` : `np.float32` - 3d coordinates of vertex
- `r`,`g`,`b` : `np.float32` - color of vertex
Methods:
- `copy()` -> `Vertex` - returns a copy of the vertex
- `magnitude()` -> `np.float32` - returns the magnitude of the vertex
- `brightness()` -> `np.float32` - returns the brightness of the vertex
- `normalize()` -> `None` - normalizes the vertex to a unit vector
---
## Usage
Groups of vertices are used to define objects through the Mesh Class and what color they should be.
Use the `vert.xyz` or `vert.rgb` or variants like `vert.zxy` and `vert.xyrg` to use the data as a numpy array in the order of the attributes.
Use the `vert.coords` or `vert.color` to get the just the positional or color data as a numpy array. The functionality is designed to be very flexible however,
the user should be familiar with how the class works. Comparison and math operations only affect the positional data and behave similarly to assignment
Operations:
- `==` : compares the `x`,`y`,`z` values element wise
- `<`,`>` : compares the `magnitude()` of the vertex
- `+`,`-`,`*`,`/` : acts accordingly based on the type of the other object
- if other is a Vertex : acts element wise
- if other is a array : acts element wise for values that exist in array
- if other is a number : acts on all `x`,`y`,`z` values
---
## Notes
All values are stored a numpy float32 which hold 32 bits or 4 bytes of data. This is helpful to know the exact size for createing vertex buffers.
"""
variables = 'xyzrgb'
"""All the variable a Vertex should hold, like __dict__ but as string"""
one = np.float32(1.0)
"""1.0 as a numpy float32"""
zero = np.float32(0.0)
"""0.0 as a numpy float32"""
def __init__(self, *args, default : np.float32 = zero, **kwargs):
"""
Create a vertex with 3d coordinates and rgb colors.
Can pass a Squence like object or a numpy array as the input parmaters as well.
Args:
- x,y,z,r,g,b : np.float32 values in order, pass as many values as needed the rest will be set to 0.0
if one value is passed it will be used for all 3d coordinates and color will be set to 0.0
Keyword Args:
- x,y,z,r,g,b : np.float32 values in any order, overrides already set values by the args
- default : np.float32 value to use if no value is passed
"""
# If the input is a sequence take the sequence as the args
if isinstance(args[0], (Sequence, np.ndarray)):
args = args[0]
args_length = len(args)
# If one args is passed set it to only the coordinates
if args_length == 1:
args = [args[0]]*3
args_length = len(args)
# Make sure the default is a numpy float32
default = np.float32(default)
# Set x,y,z values if the args exist else set them to 0
self.x : np.float32 = np.float32(args[0]) if args_length >= 1 else default
"""X postion as a float"""
self.y : np.float32 = np.float32(args[1]) if args_length >= 2 else default
"""Y postion as a float"""
self.z : np.float32 = np.float32(args[2]) if args_length >= 3 else default
"""Z postion as a float"""
# Set r,g,b values if the args exists else set them to 0
self.r : np.float32 = np.float32(args[3]) if args_length >= 4 else default
"""Red value as a float"""
self.g : np.float32 = np.float32(args[4]) if args_length >= 5 else default
"""Green value as a float"""
self.b : np.float32 = np.float32(args[5]) if args_length >= 6 else default
"""Blue value as a float"""
# Use keyword args to override the values
for key, value in kwargs.items():
if key in self.variables:
self.__dict__[key] = np.float32(value)
else:
raise AttributeError(key)
def copy(self):
"""Retuns a copy of the vertex"""
return Vertex(self.x, self.y, self.z, self.r, self.g, self.b)
def magnitude(self):
"""Returns the magnitude of the vertex"""
return np.sqrt(self.x**2 + self.y**2 + self.z**2)
def brightness(self):
"""Returns the brightness of the vertex"""
return np.sqrt(self.r**2 + self.g**2 + self.b**2)
def normalize(self):
"""Normalizes the vertex to a unit vector"""
mag = self.magnitude()
if mag != 0.0:
self.x /= mag
self.y /= mag
self.z /= mag
def normalize_color(self):
"""Normalize the color to a unit vector"""
mag = self.brightness()
if mag != 0.0:
self.r /= mag
self.g /= mag
self.b /= mag
def __getattr__(self, items : str):
"""Lets us retrive any values in any order as a numpy array"""
if items == 'coords':
return np.array([self.x, self.y, self.z], dtype=np.float32)
elif items == 'color':
return np.array([self.r, self.g, self.b], dtype=np.float32)
else:
# Then its something like xyz, or xy or yzr - in short any combination of x y z / r g b
attributes = np.zeros(len(items), dtype=np.float32)
for i, item in enumerate(items):
if item in self.variables:
attributes[i] = self.__dict__[item]
else:
raise AttributeError(item)
return attributes
def _set_values(self, items : str, inp):
"""For internal use - assign values directly with "vert.xyz = (0.1, 0.2, 0.3)" instead"""
if isinstance(inp, (Sequence, np.ndarray)):
# the input is a sequence
for i, item in enumerate(items):
if i < len(inp) and item in self.variables:
self.__dict__[item] = np.float32(inp[i])
else:
# The input is a single value
for i, item in enumerate(items):
if item in self.variables:
self.__dict__[item] = np.float32(inp)
def __setattr__(self, items : str, inp):
"""Set values of the vertex. For example `[vert.xyz | vert.x | vert.rxyb] = ... ` are all valid"""
# Set some aliases
if items == 'coords':
self._set_values('xyz', inp)
elif items == 'color':
self._set_values('rgb', inp)
else:
# Is it a list of inputs like xyz, or xy or yzr? - in short any combination of x y z / r g b
self._set_values(items, inp)
# create math methods for the vertex class
def __add__(self, other):
"""Add other to self only considering positional data"""
if isinstance(other, Vertex):
return Vertex(self.x + other.x, self.y + other.y, self.z + other.z, self.r, self.g, self.b)
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex and call again
vert2 = Vertex(other)
return self.__add__(vert2)
else:
return Vertex(self.x + other, self.y + other, self.z + other, self.r, self.g, self.b)
def __radd__(self, other):
"""Add other to self only considering positional data"""
return self.__add__(other)
def __sub__(self, other):
"""Subtract other from self only considering positional data"""
if isinstance(other, Vertex):
return Vertex(self.x - other.x, self.y - other.y, self.z - other.z, self.r, self.g, self.b)
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex and call again
vert2 = Vertex(other)
return self.__sub__(vert2)
else:
return Vertex(self.x - other, self.y - other, self.z - other, self.r, self.g, self.b)
def __rsub__(self, other):
"""Subtract other from self only considering positional data"""
return self.__sub__(other)
def __mul__(self, other):
"""Multiply self by other only considering positional data"""
if isinstance(other, Vertex):
return Vertex(self.x * other.x, self.y * other.y, self.z * other.z, self.r, self.g, self.b)
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex and call again
vert2 = Vertex(other, default=1.0)
return self.__mul__(vert2)
else:
return Vertex(self.x * other, self.y * other, self.z * other, self.r, self.g, self.b)
def __rmul__(self, other):
"""Multiply self by other only considering positional data"""
return self.__mul__(other)
def __div__(self, other):
"""Divide self by other only considering positional data"""
if isinstance(other, Vertex):
return Vertex(self.x / other.x, self.y / other.y, self.z / other.z, self.r, self.g, self.b)
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex and call again
vert2 = Vertex(other, default=1.0)
return self.__div__(vert2)
else:
return Vertex(self.x / other, self.y / other, self.z / other, self.r, self.g, self.b)
def __rdiv__(self, other):
"""Divide self by other only considering positional data"""
return self.__div__(other)
# true div use div for now
def __truediv__(self, other):
"""Divide self by other only considering positional data"""
return self.__div__(other)
def __rtruediv__(self, other):
"""Divide self by other only considering positional data"""
return self.__rdiv__(other)
# Define comparison operators
def __eq__(self, other):
"""Equality operator for only positional data"""
if isinstance(other, Vertex):
return self.x == other.x and self.y == other.y and self.z == other.z
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex call again
vert2 = Vertex(other)
return self.__eq__(vert2)
else:
return self.x == other and self.y == other and self.z == other
# Less then and greater then operators based on magnitude
def __lt__(self, other):
"""Less than operator for only positional data"""
if isinstance(other, Vertex):
return self.magnitude() < other.magnitude()
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex call again
vert2 = Vertex(other)
return self.__lt__(vert2)
else:
return self.magnitude() < other
def __gt__(self, other):
"""Greater than operator for only positional data"""
if isinstance(other, Vertex):
return self.magnitude() > other.magnitude()
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex call again
vert2 = Vertex(other)
return self.__gt__(vert2)
else:
return self.magnitude() > other
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
def __repr__(self):
return f'Vertex({self.x}, {self.y}, {self.z}, {self.r}, {self.g}, {self.b})'
# TODO add __str__ | src/pygline/common.py | from collections.abc import Sequence
import numpy as np
class Vertex:
"""
# VERTEX
The vertex class is a fundemental, versatille and easy to use class containing positional and color data.
The data is stored as x,y,z,r,g,b attributes. However the data can be accessed and modified in a variety of ways.
Attributes:
- `x`,`y`,`z` : `np.float32` - 3d coordinates of vertex
- `r`,`g`,`b` : `np.float32` - color of vertex
Methods:
- `copy()` -> `Vertex` - returns a copy of the vertex
- `magnitude()` -> `np.float32` - returns the magnitude of the vertex
- `brightness()` -> `np.float32` - returns the brightness of the vertex
- `normalize()` -> `None` - normalizes the vertex to a unit vector
---
## Usage
Groups of vertices are used to define objects through the Mesh Class and what color they should be.
Use the `vert.xyz` or `vert.rgb` or variants like `vert.zxy` and `vert.xyrg` to use the data as a numpy array in the order of the attributes.
Use the `vert.coords` or `vert.color` to get the just the positional or color data as a numpy array. The functionality is designed to be very flexible however,
the user should be familiar with how the class works. Comparison and math operations only affect the positional data and behave similarly to assignment
Operations:
- `==` : compares the `x`,`y`,`z` values element wise
- `<`,`>` : compares the `magnitude()` of the vertex
- `+`,`-`,`*`,`/` : acts accordingly based on the type of the other object
- if other is a Vertex : acts element wise
- if other is a array : acts element wise for values that exist in array
- if other is a number : acts on all `x`,`y`,`z` values
---
## Notes
All values are stored a numpy float32 which hold 32 bits or 4 bytes of data. This is helpful to know the exact size for createing vertex buffers.
"""
variables = 'xyzrgb'
"""All the variable a Vertex should hold, like __dict__ but as string"""
one = np.float32(1.0)
"""1.0 as a numpy float32"""
zero = np.float32(0.0)
"""0.0 as a numpy float32"""
def __init__(self, *args, default : np.float32 = zero, **kwargs):
"""
Create a vertex with 3d coordinates and rgb colors.
Can pass a Squence like object or a numpy array as the input parmaters as well.
Args:
- x,y,z,r,g,b : np.float32 values in order, pass as many values as needed the rest will be set to 0.0
if one value is passed it will be used for all 3d coordinates and color will be set to 0.0
Keyword Args:
- x,y,z,r,g,b : np.float32 values in any order, overrides already set values by the args
- default : np.float32 value to use if no value is passed
"""
# If the input is a sequence take the sequence as the args
if isinstance(args[0], (Sequence, np.ndarray)):
args = args[0]
args_length = len(args)
# If one args is passed set it to only the coordinates
if args_length == 1:
args = [args[0]]*3
args_length = len(args)
# Make sure the default is a numpy float32
default = np.float32(default)
# Set x,y,z values if the args exist else set them to 0
self.x : np.float32 = np.float32(args[0]) if args_length >= 1 else default
"""X postion as a float"""
self.y : np.float32 = np.float32(args[1]) if args_length >= 2 else default
"""Y postion as a float"""
self.z : np.float32 = np.float32(args[2]) if args_length >= 3 else default
"""Z postion as a float"""
# Set r,g,b values if the args exists else set them to 0
self.r : np.float32 = np.float32(args[3]) if args_length >= 4 else default
"""Red value as a float"""
self.g : np.float32 = np.float32(args[4]) if args_length >= 5 else default
"""Green value as a float"""
self.b : np.float32 = np.float32(args[5]) if args_length >= 6 else default
"""Blue value as a float"""
# Use keyword args to override the values
for key, value in kwargs.items():
if key in self.variables:
self.__dict__[key] = np.float32(value)
else:
raise AttributeError(key)
def copy(self):
"""Retuns a copy of the vertex"""
return Vertex(self.x, self.y, self.z, self.r, self.g, self.b)
def magnitude(self):
"""Returns the magnitude of the vertex"""
return np.sqrt(self.x**2 + self.y**2 + self.z**2)
def brightness(self):
"""Returns the brightness of the vertex"""
return np.sqrt(self.r**2 + self.g**2 + self.b**2)
def normalize(self):
"""Normalizes the vertex to a unit vector"""
mag = self.magnitude()
if mag != 0.0:
self.x /= mag
self.y /= mag
self.z /= mag
def normalize_color(self):
"""Normalize the color to a unit vector"""
mag = self.brightness()
if mag != 0.0:
self.r /= mag
self.g /= mag
self.b /= mag
def __getattr__(self, items : str):
"""Lets us retrive any values in any order as a numpy array"""
if items == 'coords':
return np.array([self.x, self.y, self.z], dtype=np.float32)
elif items == 'color':
return np.array([self.r, self.g, self.b], dtype=np.float32)
else:
# Then its something like xyz, or xy or yzr - in short any combination of x y z / r g b
attributes = np.zeros(len(items), dtype=np.float32)
for i, item in enumerate(items):
if item in self.variables:
attributes[i] = self.__dict__[item]
else:
raise AttributeError(item)
return attributes
def _set_values(self, items : str, inp):
"""For internal use - assign values directly with "vert.xyz = (0.1, 0.2, 0.3)" instead"""
if isinstance(inp, (Sequence, np.ndarray)):
# the input is a sequence
for i, item in enumerate(items):
if i < len(inp) and item in self.variables:
self.__dict__[item] = np.float32(inp[i])
else:
# The input is a single value
for i, item in enumerate(items):
if item in self.variables:
self.__dict__[item] = np.float32(inp)
def __setattr__(self, items : str, inp):
"""Set values of the vertex. For example `[vert.xyz | vert.x | vert.rxyb] = ... ` are all valid"""
# Set some aliases
if items == 'coords':
self._set_values('xyz', inp)
elif items == 'color':
self._set_values('rgb', inp)
else:
# Is it a list of inputs like xyz, or xy or yzr? - in short any combination of x y z / r g b
self._set_values(items, inp)
# create math methods for the vertex class
def __add__(self, other):
"""Add other to self only considering positional data"""
if isinstance(other, Vertex):
return Vertex(self.x + other.x, self.y + other.y, self.z + other.z, self.r, self.g, self.b)
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex and call again
vert2 = Vertex(other)
return self.__add__(vert2)
else:
return Vertex(self.x + other, self.y + other, self.z + other, self.r, self.g, self.b)
def __radd__(self, other):
"""Add other to self only considering positional data"""
return self.__add__(other)
def __sub__(self, other):
"""Subtract other from self only considering positional data"""
if isinstance(other, Vertex):
return Vertex(self.x - other.x, self.y - other.y, self.z - other.z, self.r, self.g, self.b)
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex and call again
vert2 = Vertex(other)
return self.__sub__(vert2)
else:
return Vertex(self.x - other, self.y - other, self.z - other, self.r, self.g, self.b)
def __rsub__(self, other):
"""Subtract other from self only considering positional data"""
return self.__sub__(other)
def __mul__(self, other):
"""Multiply self by other only considering positional data"""
if isinstance(other, Vertex):
return Vertex(self.x * other.x, self.y * other.y, self.z * other.z, self.r, self.g, self.b)
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex and call again
vert2 = Vertex(other, default=1.0)
return self.__mul__(vert2)
else:
return Vertex(self.x * other, self.y * other, self.z * other, self.r, self.g, self.b)
def __rmul__(self, other):
"""Multiply self by other only considering positional data"""
return self.__mul__(other)
def __div__(self, other):
"""Divide self by other only considering positional data"""
if isinstance(other, Vertex):
return Vertex(self.x / other.x, self.y / other.y, self.z / other.z, self.r, self.g, self.b)
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex and call again
vert2 = Vertex(other, default=1.0)
return self.__div__(vert2)
else:
return Vertex(self.x / other, self.y / other, self.z / other, self.r, self.g, self.b)
def __rdiv__(self, other):
"""Divide self by other only considering positional data"""
return self.__div__(other)
# true div use div for now
def __truediv__(self, other):
"""Divide self by other only considering positional data"""
return self.__div__(other)
def __rtruediv__(self, other):
"""Divide self by other only considering positional data"""
return self.__rdiv__(other)
# Define comparison operators
def __eq__(self, other):
"""Equality operator for only positional data"""
if isinstance(other, Vertex):
return self.x == other.x and self.y == other.y and self.z == other.z
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex call again
vert2 = Vertex(other)
return self.__eq__(vert2)
else:
return self.x == other and self.y == other and self.z == other
# Less then and greater then operators based on magnitude
def __lt__(self, other):
"""Less than operator for only positional data"""
if isinstance(other, Vertex):
return self.magnitude() < other.magnitude()
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex call again
vert2 = Vertex(other)
return self.__lt__(vert2)
else:
return self.magnitude() < other
def __gt__(self, other):
"""Greater than operator for only positional data"""
if isinstance(other, Vertex):
return self.magnitude() > other.magnitude()
elif isinstance(other, (Sequence, np.ndarray)):
# Convert other to vertex call again
vert2 = Vertex(other)
return self.__gt__(vert2)
else:
return self.magnitude() > other
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
def __repr__(self):
return f'Vertex({self.x}, {self.y}, {self.z}, {self.r}, {self.g}, {self.b})'
# TODO add __str__ | 0.955236 | 0.882022 |
"""Tests for the FileSystemTimelineJob job."""
from __future__ import unicode_literals
import glob
import unittest
import os
import mock
from turbinia.evidence import BodyFile
from turbinia.workers import file_system_timeline
from turbinia.workers import TurbiniaTaskResult
from turbinia.workers.workers_test import TestTurbiniaTaskBase
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
class FileSystemTimelineTest(TestTurbiniaTaskBase):
"""Tests for FileSystemTimelineJob."""
def setUp(self):
super(FileSystemTimelineTest, self).setUp(
task_class=file_system_timeline.FileSystemTimelineTask,
evidence_class=BodyFile)
self.setResults(mock_run=False)
self.task.output_dir = self.task.base_output_dir
@mock.patch('turbinia.state_manager.get_state_manager')
@mock.patch('dfvfs.helpers.volume_scanner.VolumeScanner.GetBasePathSpecs')
def testRun(self, mock_getbasepathspecs, _):
"""Test FileSystemTimelineJob task run."""
self.result.setup(self.task)
filedir = os.path.dirname(os.path.realpath(__file__))
test_data = os.path.join(filedir, '..', '..', 'test_data', 'gpt.raw')
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_data)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_gpt_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_GPT, location='/p1',
parent=test_raw_path_spec)
test_ext_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_EXT, location='/',
parent=test_gpt_path_spec)
mock_getbasepathspecs.return_value = [test_ext_path_spec]
self.task = file_system_timeline.FileSystemTimelineTask(
base_output_dir='/tmp/bodyfile')
self.task.output_dir = self.base_output_dir
self.remove_files.append(self.task.output_dir + '/file_system.bodyfile')
result = self.task.run(self.evidence, self.result)
# Check the task name.
task_name = result.task_name
self.assertEqual(task_name, 'FileSystemTimelineTask')
# Check the bodyfile contains the expected file entries.
number_of_entries = result.evidence[0].number_of_entries
self.assertEqual(number_of_entries, 7)
# Ensure run method returns a TurbiniaTaskResult instance.
self.assertIsInstance(result, TurbiniaTaskResult)
if __name__ == '__main__':
unittest.main() | turbinia/workers/file_system_timeline_test.py | """Tests for the FileSystemTimelineJob job."""
from __future__ import unicode_literals
import glob
import unittest
import os
import mock
from turbinia.evidence import BodyFile
from turbinia.workers import file_system_timeline
from turbinia.workers import TurbiniaTaskResult
from turbinia.workers.workers_test import TestTurbiniaTaskBase
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
class FileSystemTimelineTest(TestTurbiniaTaskBase):
"""Tests for FileSystemTimelineJob."""
def setUp(self):
super(FileSystemTimelineTest, self).setUp(
task_class=file_system_timeline.FileSystemTimelineTask,
evidence_class=BodyFile)
self.setResults(mock_run=False)
self.task.output_dir = self.task.base_output_dir
@mock.patch('turbinia.state_manager.get_state_manager')
@mock.patch('dfvfs.helpers.volume_scanner.VolumeScanner.GetBasePathSpecs')
def testRun(self, mock_getbasepathspecs, _):
"""Test FileSystemTimelineJob task run."""
self.result.setup(self.task)
filedir = os.path.dirname(os.path.realpath(__file__))
test_data = os.path.join(filedir, '..', '..', 'test_data', 'gpt.raw')
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_data)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_gpt_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_GPT, location='/p1',
parent=test_raw_path_spec)
test_ext_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_EXT, location='/',
parent=test_gpt_path_spec)
mock_getbasepathspecs.return_value = [test_ext_path_spec]
self.task = file_system_timeline.FileSystemTimelineTask(
base_output_dir='/tmp/bodyfile')
self.task.output_dir = self.base_output_dir
self.remove_files.append(self.task.output_dir + '/file_system.bodyfile')
result = self.task.run(self.evidence, self.result)
# Check the task name.
task_name = result.task_name
self.assertEqual(task_name, 'FileSystemTimelineTask')
# Check the bodyfile contains the expected file entries.
number_of_entries = result.evidence[0].number_of_entries
self.assertEqual(number_of_entries, 7)
# Ensure run method returns a TurbiniaTaskResult instance.
self.assertIsInstance(result, TurbiniaTaskResult)
if __name__ == '__main__':
unittest.main() | 0.624523 | 0.241054 |
from helpers import show_dotted_image
import cv2
import numpy as np
class BirdsEye:
def __init__(self, source_points, dest_points, cam_matrix, distortion_coef):
self.spoints = source_points
self.dpoints = dest_points
self.src_points = np.array(source_points, np.float32)
self.dest_points = np.array(dest_points, np.float32)
self.cam_matrix = cam_matrix
self.dist_coef = distortion_coef
self.warp_matrix = cv2.getPerspectiveTransform(self.src_points, self.dest_points)
self.inv_warp_matrix = cv2.getPerspectiveTransform(self.dest_points, self.src_points)
def undistort(self, raw_image, show_dotted = False):
image = cv2.undistort(raw_image, self.cam_matrix, self.dist_coef, None, self.cam_matrix)
if show_dotted:
show_dotted_image(image, self.spoints)
return image
def sky_view(self, ground_image, show_dotted = False):
temp_image = self.undistort(ground_image, show_dotted = False)
shape = (temp_image.shape[1], temp_image.shape[0])
warp_image = cv2.warpPerspective(temp_image, self.warp_matrix, shape, flags = cv2.INTER_LINEAR)
if show_dotted:
show_dotted_image(warp_image, self.dpoints)
return warp_image
def project(self, ground_image, sky_lane, left_fit, right_fit, color = (0, 255, 0)):
z = np.zeros_like(sky_lane)
sky_lane = np.dstack((z, z, z))
kl, kr = left_fit, right_fit
h = sky_lane.shape[0]
ys = np.linspace(0, h - 1, h)
lxs = kl[0] * (ys**2) + kl[1]* ys + kl[2]
rxs = kr[0] * (ys**2) + kr[1]* ys + kr[2]
pts_left = np.array([np.transpose(np.vstack([lxs, ys]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([rxs, ys])))])
pts = np.hstack((pts_left, pts_right))
cv2.fillPoly(sky_lane, np.int_(pts), color)
shape = (sky_lane.shape[1], sky_lane.shape[0])
ground_lane = cv2.warpPerspective(sky_lane, self.inv_warp_matrix, shape)
result = cv2.addWeighted(ground_image, 1, ground_lane, 0.3, 0)
return result | birdseye.py | from helpers import show_dotted_image
import cv2
import numpy as np
class BirdsEye:
def __init__(self, source_points, dest_points, cam_matrix, distortion_coef):
self.spoints = source_points
self.dpoints = dest_points
self.src_points = np.array(source_points, np.float32)
self.dest_points = np.array(dest_points, np.float32)
self.cam_matrix = cam_matrix
self.dist_coef = distortion_coef
self.warp_matrix = cv2.getPerspectiveTransform(self.src_points, self.dest_points)
self.inv_warp_matrix = cv2.getPerspectiveTransform(self.dest_points, self.src_points)
def undistort(self, raw_image, show_dotted = False):
image = cv2.undistort(raw_image, self.cam_matrix, self.dist_coef, None, self.cam_matrix)
if show_dotted:
show_dotted_image(image, self.spoints)
return image
def sky_view(self, ground_image, show_dotted = False):
temp_image = self.undistort(ground_image, show_dotted = False)
shape = (temp_image.shape[1], temp_image.shape[0])
warp_image = cv2.warpPerspective(temp_image, self.warp_matrix, shape, flags = cv2.INTER_LINEAR)
if show_dotted:
show_dotted_image(warp_image, self.dpoints)
return warp_image
def project(self, ground_image, sky_lane, left_fit, right_fit, color = (0, 255, 0)):
z = np.zeros_like(sky_lane)
sky_lane = np.dstack((z, z, z))
kl, kr = left_fit, right_fit
h = sky_lane.shape[0]
ys = np.linspace(0, h - 1, h)
lxs = kl[0] * (ys**2) + kl[1]* ys + kl[2]
rxs = kr[0] * (ys**2) + kr[1]* ys + kr[2]
pts_left = np.array([np.transpose(np.vstack([lxs, ys]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([rxs, ys])))])
pts = np.hstack((pts_left, pts_right))
cv2.fillPoly(sky_lane, np.int_(pts), color)
shape = (sky_lane.shape[1], sky_lane.shape[0])
ground_lane = cv2.warpPerspective(sky_lane, self.inv_warp_matrix, shape)
result = cv2.addWeighted(ground_image, 1, ground_lane, 0.3, 0)
return result | 0.536799 | 0.330228 |
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
print(f"PyTorch version: {torch.__version__}")
# Device configuration
if torch.cuda.is_available():
device = torch.device("cuda")
print("GPU found :)")
else:
device = torch.device("cpu")
print("No GPU :(")
def main():
"""Main function"""
# Hyper-parameters
input_size = 1
output_size = 1
num_epochs = 60
learning_rate = 0.001
# Toy dataset
x_train = torch.tensor(
[
[3.3],
[4.4],
[5.5],
[6.71],
[6.93],
[4.168],
[9.779],
[6.182],
[7.59],
[2.167],
[7.042],
[10.791],
[5.313],
[7.997],
[3.1],
],
dtype=torch.float32,
).to(device)
y_train = torch.tensor(
[
[1.7],
[2.76],
[2.09],
[3.19],
[1.694],
[1.573],
[3.366],
[2.596],
[2.53],
[1.221],
[2.827],
[3.465],
[1.65],
[2.904],
[1.3],
],
dtype=torch.float32,
).to(device)
# Linear regression model
model = nn.Linear(input_size, output_size).to(device)
# Training configuration
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Training loop
for epoch in range(num_epochs):
# Forward pass
y_pred = model(x_train)
loss = criterion(y_pred, y_train)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1) % 5 == 0:
print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.5f}")
# Plot data and predictions
with torch.no_grad():
y_pred = model(x_train)
plt.plot(x_train.cpu(), y_train.cpu(), "ro", label="Data")
plt.plot(x_train.cpu(), y_pred.cpu(), label="Prediction")
plt.title("Linear Regression with PyTorch")
plt.legend()
plt.show()
if __name__ == "__main__":
main() | pytorch/linear_regression.py |
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
print(f"PyTorch version: {torch.__version__}")
# Device configuration
if torch.cuda.is_available():
device = torch.device("cuda")
print("GPU found :)")
else:
device = torch.device("cpu")
print("No GPU :(")
def main():
"""Main function"""
# Hyper-parameters
input_size = 1
output_size = 1
num_epochs = 60
learning_rate = 0.001
# Toy dataset
x_train = torch.tensor(
[
[3.3],
[4.4],
[5.5],
[6.71],
[6.93],
[4.168],
[9.779],
[6.182],
[7.59],
[2.167],
[7.042],
[10.791],
[5.313],
[7.997],
[3.1],
],
dtype=torch.float32,
).to(device)
y_train = torch.tensor(
[
[1.7],
[2.76],
[2.09],
[3.19],
[1.694],
[1.573],
[3.366],
[2.596],
[2.53],
[1.221],
[2.827],
[3.465],
[1.65],
[2.904],
[1.3],
],
dtype=torch.float32,
).to(device)
# Linear regression model
model = nn.Linear(input_size, output_size).to(device)
# Training configuration
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Training loop
for epoch in range(num_epochs):
# Forward pass
y_pred = model(x_train)
loss = criterion(y_pred, y_train)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1) % 5 == 0:
print(f"Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.5f}")
# Plot data and predictions
with torch.no_grad():
y_pred = model(x_train)
plt.plot(x_train.cpu(), y_train.cpu(), "ro", label="Data")
plt.plot(x_train.cpu(), y_pred.cpu(), label="Prediction")
plt.title("Linear Regression with PyTorch")
plt.legend()
plt.show()
if __name__ == "__main__":
main() | 0.788135 | 0.603465 |
import datasets
import torch # type: ignore
import numpy as np
import absl.flags
import absl.app
import os
import aux
from typing import Tuple
# user flags
absl.flags.DEFINE_string("path_models", None, "Path of the trained model")
absl.flags.DEFINE_string("dir_dataset", 'datasets/', "dir path where datasets are stored")
absl.flags.mark_flag_as_required("path_models")
FLAGS = absl.flags.FLAGS
def get_explanation_accuracy(model:torch.nn.Module,loader:torch.utils.data.DataLoader,mem_loader:torch.utils.data.DataLoader,device:torch.device)-> Tuple[float, float, float]:
""" Method to compute the explanation accuracy for different settings, as
described in the paper. Explanation accuracy checks how many times the
sample in the memory set with the highest weight is predicted in the same
class of the current sample.
image
Args:
model (torch.nn.Module): Trained model to evaluate
loader (torch.utils.data.DataLoader): loader containing testing
samples where evaluate the model
mem_loader (torch.utils.data.DataLoader): loader containing training
samples to be used as memory set
device (torch.device): device where the model is stored
Returns:
Tuple[float, float, float]: Explanation accuracy for the sample with
the highest weight in memory, explanation accuracy for the sample with
the highest weight in memory when it is a counterfactual, explanation accuracy for the sample with the highest weight in memory when it is an explanation by example.
"""
model.eval()
expl_max = 0
top_counter = 0
correct_counter = 0
top_example = 0
correct_example = 0
with torch.no_grad():
for index_batch, (data, target) in enumerate(loader):
data = data.to(device)
target = target.to(device)
memory, _ = next(iter(mem_loader))
memory = memory.to(device)
# get output
output, rw = model(data,memory, return_weights=True)
pred = output.data.max(1, keepdim=True)[1]
# auxiliar memory to get memory output
aux_mem, _ = next(iter(mem_loader))
aux_mem = aux_mem.to(device)
# get memory output
exp_output = model(memory,aux_mem)
exp_pred = exp_output.data.max(1,keepdim=True)[1]
# get index of sample with highest weight
_, index_max = torch.max(rw,dim=1)
sorted_exp_pred = exp_pred[index_max]
for row in range(len(sorted_exp_pred)):
if sorted_exp_pred[row] != pred[row]:
# counterfactual
correct_counter += pred[row].eq(target[row].data.view_as(pred[row])).sum().item()
top_counter+=1
else:
# explanation by example
correct_example += pred[row].eq(target[row].data.view_as(pred[row])).sum().item()
top_example+=1
# explanation accuracy
expl_max += pred.eq(exp_pred[index_max].data.view_as(pred)).sum().item()
print("batch:{}\{}".format(index_batch,len(loader)),end='\r')
counter_accuracy = 100.*(torch.true_divide(correct_counter,top_counter))
example_accuracy = 100.*(torch.true_divide(correct_example,top_example))
explanation_accuracy = 100.*(torch.true_divide(expl_max,len(loader.dataset)))
return explanation_accuracy, counter_accuracy, example_accuracy
def run_evaluation(path:str,dataset_dir:str):
""" Function to print the explanation accuracy of a set of models inside a dir. It prints the mean and standard deviation of explanation accuracy of the top sample in memory on different settings (see paper).
Args:
path (str): dir path
dataset_dir (str): dir where datasets are stored
"""
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device:{}".format(device))
# load data
# load model
expl_acc = []
expl_acc_counter = []
expl_acc_ex = []
list_models = [name_file for name_file in os.listdir(path) if name_file.endswith('.pt')]
for indx, name_model in enumerate(list_models):
# load model
checkpoint = torch.load(path+name_model)
model_name = checkpoint['model_name']
modality = checkpoint['modality']
dataset_name = checkpoint['dataset_name']
load_dataset = getattr(datasets, 'get_'+dataset_name)
model = aux.get_model(model_name,checkpoint['num_classes'],model_type=modality)
model.load_state_dict(checkpoint['model_state_dict'])
model = model.to(device)
#load data
_, _, test_loader, mem_loader = load_dataset(dataset_dir,batch_size_train=checkpoint['train_examples'], batch_size_test=500,batch_size_memory=100)
print("Loaded models:{}/{}".format(indx+1,len(list_models),end='\r'))
# perform validation
cum_acc = []
cum_acc_counter = []
cum_acc_ex = []
for _ in range(5):
exp_max_acc, counter_acc,example_acc = get_explanation_accuracy(model,test_loader,mem_loader,device)
cum_acc.append(exp_max_acc)
cum_acc_counter.append(counter_acc)
cum_acc_ex.append(example_acc)
# store mean of model's accuracies
expl_acc.append(np.mean(cum_acc))
expl_acc_counter.append(np.mean(cum_acc_counter))
expl_acc_ex.append(np.mean(cum_acc_ex))
# log for each model
print("Explanation accuracy max (mean):{:.2f}\t(std_dev):{:.2f}\t counterfactual mean:{:.2f}\t counterfactual std:{:.2f}\t example mean:{:.2f}\t example std:{:.2f}".format(np.mean(expl_acc),np.std(expl_acc),np.mean(expl_acc_counter),np.std(expl_acc_counter),np.mean(expl_acc_ex),np.std(expl_acc_ex)))
# final summary
print()
print("Explanation accuracy (mean):{:.2f}\t(std_dev):{:.2f}\t counterfactual acc mean:{:.2f}\t counterfactual std:{:.2f}\t example acc mean:{:.2f}\t example std:{:.2f}".format(np.mean(expl_acc),np.std(expl_acc),np.mean(expl_acc_counter),np.std(expl_acc_counter),np.mean(expl_acc_ex),np.std(expl_acc_ex)))
def main(args):
run_evaluation(FLAGS.path_models,FLAGS.dir_dataset)
if __name__ == '__main__':
absl.app.run(main) | paper/explanation_accuracy.py | import datasets
import torch # type: ignore
import numpy as np
import absl.flags
import absl.app
import os
import aux
from typing import Tuple
# user flags
absl.flags.DEFINE_string("path_models", None, "Path of the trained model")
absl.flags.DEFINE_string("dir_dataset", 'datasets/', "dir path where datasets are stored")
absl.flags.mark_flag_as_required("path_models")
FLAGS = absl.flags.FLAGS
def get_explanation_accuracy(model:torch.nn.Module,loader:torch.utils.data.DataLoader,mem_loader:torch.utils.data.DataLoader,device:torch.device)-> Tuple[float, float, float]:
""" Method to compute the explanation accuracy for different settings, as
described in the paper. Explanation accuracy checks how many times the
sample in the memory set with the highest weight is predicted in the same
class of the current sample.
image
Args:
model (torch.nn.Module): Trained model to evaluate
loader (torch.utils.data.DataLoader): loader containing testing
samples where evaluate the model
mem_loader (torch.utils.data.DataLoader): loader containing training
samples to be used as memory set
device (torch.device): device where the model is stored
Returns:
Tuple[float, float, float]: Explanation accuracy for the sample with
the highest weight in memory, explanation accuracy for the sample with
the highest weight in memory when it is a counterfactual, explanation accuracy for the sample with the highest weight in memory when it is an explanation by example.
"""
model.eval()
expl_max = 0
top_counter = 0
correct_counter = 0
top_example = 0
correct_example = 0
with torch.no_grad():
for index_batch, (data, target) in enumerate(loader):
data = data.to(device)
target = target.to(device)
memory, _ = next(iter(mem_loader))
memory = memory.to(device)
# get output
output, rw = model(data,memory, return_weights=True)
pred = output.data.max(1, keepdim=True)[1]
# auxiliar memory to get memory output
aux_mem, _ = next(iter(mem_loader))
aux_mem = aux_mem.to(device)
# get memory output
exp_output = model(memory,aux_mem)
exp_pred = exp_output.data.max(1,keepdim=True)[1]
# get index of sample with highest weight
_, index_max = torch.max(rw,dim=1)
sorted_exp_pred = exp_pred[index_max]
for row in range(len(sorted_exp_pred)):
if sorted_exp_pred[row] != pred[row]:
# counterfactual
correct_counter += pred[row].eq(target[row].data.view_as(pred[row])).sum().item()
top_counter+=1
else:
# explanation by example
correct_example += pred[row].eq(target[row].data.view_as(pred[row])).sum().item()
top_example+=1
# explanation accuracy
expl_max += pred.eq(exp_pred[index_max].data.view_as(pred)).sum().item()
print("batch:{}\{}".format(index_batch,len(loader)),end='\r')
counter_accuracy = 100.*(torch.true_divide(correct_counter,top_counter))
example_accuracy = 100.*(torch.true_divide(correct_example,top_example))
explanation_accuracy = 100.*(torch.true_divide(expl_max,len(loader.dataset)))
return explanation_accuracy, counter_accuracy, example_accuracy
def run_evaluation(path:str,dataset_dir:str):
""" Function to print the explanation accuracy of a set of models inside a dir. It prints the mean and standard deviation of explanation accuracy of the top sample in memory on different settings (see paper).
Args:
path (str): dir path
dataset_dir (str): dir where datasets are stored
"""
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Device:{}".format(device))
# load data
# load model
expl_acc = []
expl_acc_counter = []
expl_acc_ex = []
list_models = [name_file for name_file in os.listdir(path) if name_file.endswith('.pt')]
for indx, name_model in enumerate(list_models):
# load model
checkpoint = torch.load(path+name_model)
model_name = checkpoint['model_name']
modality = checkpoint['modality']
dataset_name = checkpoint['dataset_name']
load_dataset = getattr(datasets, 'get_'+dataset_name)
model = aux.get_model(model_name,checkpoint['num_classes'],model_type=modality)
model.load_state_dict(checkpoint['model_state_dict'])
model = model.to(device)
#load data
_, _, test_loader, mem_loader = load_dataset(dataset_dir,batch_size_train=checkpoint['train_examples'], batch_size_test=500,batch_size_memory=100)
print("Loaded models:{}/{}".format(indx+1,len(list_models),end='\r'))
# perform validation
cum_acc = []
cum_acc_counter = []
cum_acc_ex = []
for _ in range(5):
exp_max_acc, counter_acc,example_acc = get_explanation_accuracy(model,test_loader,mem_loader,device)
cum_acc.append(exp_max_acc)
cum_acc_counter.append(counter_acc)
cum_acc_ex.append(example_acc)
# store mean of model's accuracies
expl_acc.append(np.mean(cum_acc))
expl_acc_counter.append(np.mean(cum_acc_counter))
expl_acc_ex.append(np.mean(cum_acc_ex))
# log for each model
print("Explanation accuracy max (mean):{:.2f}\t(std_dev):{:.2f}\t counterfactual mean:{:.2f}\t counterfactual std:{:.2f}\t example mean:{:.2f}\t example std:{:.2f}".format(np.mean(expl_acc),np.std(expl_acc),np.mean(expl_acc_counter),np.std(expl_acc_counter),np.mean(expl_acc_ex),np.std(expl_acc_ex)))
# final summary
print()
print("Explanation accuracy (mean):{:.2f}\t(std_dev):{:.2f}\t counterfactual acc mean:{:.2f}\t counterfactual std:{:.2f}\t example acc mean:{:.2f}\t example std:{:.2f}".format(np.mean(expl_acc),np.std(expl_acc),np.mean(expl_acc_counter),np.std(expl_acc_counter),np.mean(expl_acc_ex),np.std(expl_acc_ex)))
def main(args):
run_evaluation(FLAGS.path_models,FLAGS.dir_dataset)
if __name__ == '__main__':
absl.app.run(main) | 0.853058 | 0.497864 |
import logging
import os
import shutil
from tempfile import mkdtemp
from service_buddy.ci.ci import BuildCreator
from service_buddy.ci.travis_build_creator import TravisBuildCreator
from service_buddy.service import loader
from service_buddy.service.service import Service
from service_buddy.util import pretty_printer
from testcase_parent import ParentTestCase
DIRNAME = os.path.dirname(os.path.abspath(__file__))
class TravisBuildTestCase(ParentTestCase):
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
super(TravisBuildTestCase, cls).setUpClass()
cls.test_resources = os.path.join(DIRNAME, '../resources/travis_build_test')
cls.yml_folder = os.path.join(cls.test_resources, "app1", "service")
cls.app_dir = os.path.join(cls.test_resources, "app1")
def test_travis_file_detection(self):
build_creator = BuildCreator(dry_run=True, template_directory=self.test_resources)
test_service = Service(app="app1", role="service", definition={"service-type": "test"})
build_creator.create_project(service_definition=test_service, app_dir=self.app_dir)
self._assertInYaml({"ubar":"Overwrote existing travis.yml"},self.yml_folder)
temp = mkdtemp()
loader.safe_mkdir(test_service.get_service_directory(temp))
build_creator._get_default_build_creator().dry_run = False
build_creator.create_project(service_definition=test_service, app_dir=temp)
def test_travis_arg_render(self):
items = "infra-buddy validate-template --service-template-directory . --service-type {role}"
item2 = "pyb install_dependencies package -P build_number=0.1.${TRAVIS_BUILD_NUMBER}"
list_args = []
TravisBuildCreator._append_rendered_arguments(list_args, items, {'role': 'vbar'})
self.assertTrue("vbar" in list_args[0],"Did not render properly")
TravisBuildCreator._append_rendered_arguments(list_args, item2, {'role': 'vbar'})
self.assertTrue("${TRAVIS_BUILD_NUMBER}" in list_args[1],"Did not render properly")
def test_yml_update(self):
temp = mkdtemp()
source = os.path.join(self.yml_folder, '.travis.yml')
destination = os.path.join(temp, '.travis.yml')
shutil.copy(source, destination)
build_creator = BuildCreator(dry_run=True, template_directory=self.test_resources)
build_creator._get_default_build_creator()._write_deploy_stanza(temp)
self._assertInYaml({"deploy":"Cound not find deploy stanza"},temp)
def _assertInList(self, param, line_list, error_message):
for line in line_list:
if param in line:
return
self.fail(error_message)
def _assertInYaml(self, expected_error_msg, directory):
destination = os.path.join(directory, '.travis.yml')
with open(destination) as desty:
readlines = desty.readlines()
for expected, error_msg in expected_error_msg.iteritems():
self._assertInList(expected, readlines, error_msg) | src/unittest/python/travis_build_tests.py | import logging
import os
import shutil
from tempfile import mkdtemp
from service_buddy.ci.ci import BuildCreator
from service_buddy.ci.travis_build_creator import TravisBuildCreator
from service_buddy.service import loader
from service_buddy.service.service import Service
from service_buddy.util import pretty_printer
from testcase_parent import ParentTestCase
DIRNAME = os.path.dirname(os.path.abspath(__file__))
class TravisBuildTestCase(ParentTestCase):
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
super(TravisBuildTestCase, cls).setUpClass()
cls.test_resources = os.path.join(DIRNAME, '../resources/travis_build_test')
cls.yml_folder = os.path.join(cls.test_resources, "app1", "service")
cls.app_dir = os.path.join(cls.test_resources, "app1")
def test_travis_file_detection(self):
build_creator = BuildCreator(dry_run=True, template_directory=self.test_resources)
test_service = Service(app="app1", role="service", definition={"service-type": "test"})
build_creator.create_project(service_definition=test_service, app_dir=self.app_dir)
self._assertInYaml({"ubar":"Overwrote existing travis.yml"},self.yml_folder)
temp = mkdtemp()
loader.safe_mkdir(test_service.get_service_directory(temp))
build_creator._get_default_build_creator().dry_run = False
build_creator.create_project(service_definition=test_service, app_dir=temp)
def test_travis_arg_render(self):
items = "infra-buddy validate-template --service-template-directory . --service-type {role}"
item2 = "pyb install_dependencies package -P build_number=0.1.${TRAVIS_BUILD_NUMBER}"
list_args = []
TravisBuildCreator._append_rendered_arguments(list_args, items, {'role': 'vbar'})
self.assertTrue("vbar" in list_args[0],"Did not render properly")
TravisBuildCreator._append_rendered_arguments(list_args, item2, {'role': 'vbar'})
self.assertTrue("${TRAVIS_BUILD_NUMBER}" in list_args[1],"Did not render properly")
def test_yml_update(self):
temp = mkdtemp()
source = os.path.join(self.yml_folder, '.travis.yml')
destination = os.path.join(temp, '.travis.yml')
shutil.copy(source, destination)
build_creator = BuildCreator(dry_run=True, template_directory=self.test_resources)
build_creator._get_default_build_creator()._write_deploy_stanza(temp)
self._assertInYaml({"deploy":"Cound not find deploy stanza"},temp)
def _assertInList(self, param, line_list, error_message):
for line in line_list:
if param in line:
return
self.fail(error_message)
def _assertInYaml(self, expected_error_msg, directory):
destination = os.path.join(directory, '.travis.yml')
with open(destination) as desty:
readlines = desty.readlines()
for expected, error_msg in expected_error_msg.iteritems():
self._assertInList(expected, readlines, error_msg) | 0.213131 | 0.251418 |
import time
from donkeycar.gym import remote_controller
class DonkeyCar:
def __init__(self, car = "kari", server = "mqtt.eclipse.org"):
self.control = remote_controller.DonkeyRemoteContoller(car, server)
self.state = self.control.observe()
self.action = [0, 0]
self.throttle = 0.15
def reset(self):
self.control.take_action(action=[0, 0])
time.sleep(1)
self.state = self.control.observe()
throttle_input = input("Input throttle. Previous was {}\n".format(self.throttle))
if throttle_input:
try:
self.throttle = float(throttle_input)
except ValueError:
pass
return self.state
def step(self, control, step_length):
steering = control[0]
throttle = self.throttle if control[1]> 0 else 0
action = [steering, throttle]
self.control.take_action(action=action)
time.sleep(step_length)
obs = self.control.observe()
self.state = obs
done = self.is_dead()
return self.state, done
def is_dead(self):
crop_height = 20
crop_width = 20
threshold = 70
pixels_percentage = 0.10
pixels_required = (self.state.shape[1] - 2 * crop_width) * crop_height * pixels_percentage
#(im[:,:,0] > threshold) & (im[:,:,1] < 150) & (im[:,:,1] < 150)
crop = self.state[-crop_height:, crop_width:-crop_width]
#gs = np.dot(crop, [0.299, 0.587, 0.114])
r = crop[:,:,0] < threshold
g = crop[:,:,1] < threshold
b = crop[:,:,2] < threshold
pixels = (r & g & b).sum()
#im = self.state[-crop_height:, crop_width:-crop_width]
#gs = (im[:,:,0] > 150) & (im[:,:,1] < 150) & (im[:,:,1] < 150)
#pixels = len(gs[gs])
#pixels = len(gs[gs < threshold])
print("Pixels: {}, Required: {}".format(pixels, pixels_required))
return pixels < pixels_required | environments/donkey_car.py | import time
from donkeycar.gym import remote_controller
class DonkeyCar:
def __init__(self, car = "kari", server = "mqtt.eclipse.org"):
self.control = remote_controller.DonkeyRemoteContoller(car, server)
self.state = self.control.observe()
self.action = [0, 0]
self.throttle = 0.15
def reset(self):
self.control.take_action(action=[0, 0])
time.sleep(1)
self.state = self.control.observe()
throttle_input = input("Input throttle. Previous was {}\n".format(self.throttle))
if throttle_input:
try:
self.throttle = float(throttle_input)
except ValueError:
pass
return self.state
def step(self, control, step_length):
steering = control[0]
throttle = self.throttle if control[1]> 0 else 0
action = [steering, throttle]
self.control.take_action(action=action)
time.sleep(step_length)
obs = self.control.observe()
self.state = obs
done = self.is_dead()
return self.state, done
def is_dead(self):
crop_height = 20
crop_width = 20
threshold = 70
pixels_percentage = 0.10
pixels_required = (self.state.shape[1] - 2 * crop_width) * crop_height * pixels_percentage
#(im[:,:,0] > threshold) & (im[:,:,1] < 150) & (im[:,:,1] < 150)
crop = self.state[-crop_height:, crop_width:-crop_width]
#gs = np.dot(crop, [0.299, 0.587, 0.114])
r = crop[:,:,0] < threshold
g = crop[:,:,1] < threshold
b = crop[:,:,2] < threshold
pixels = (r & g & b).sum()
#im = self.state[-crop_height:, crop_width:-crop_width]
#gs = (im[:,:,0] > 150) & (im[:,:,1] < 150) & (im[:,:,1] < 150)
#pixels = len(gs[gs])
#pixels = len(gs[gs < threshold])
print("Pixels: {}, Required: {}".format(pixels, pixels_required))
return pixels < pixels_required | 0.446977 | 0.180504 |
import os
import re
import subprocess
BASE_COMMAND = 'python bert_experiments.py'
def evaluate(model, task, max_seq_length, checkpoint_dir,
linformer_k=None, blocks=None):
command = ('{0} --model {1} --task {2} --max_seq_length {3} '
'--load_from_checkpoint --checkpoint_dir {4} --eval').format(
BASE_COMMAND, model, task, max_seq_length, checkpoint_dir)
if linformer_k is not None:
assert(blocks is not None)
blocks_str = ','.join([str(block) for block in blocks])
command = '{0} --linformer_k {1} --linformer_blocks {2}'.format(
command, linformer_k, blocks_str)
output = subprocess.check_output(
command, shell=True).decode('utf-8').strip()
for line in output.split('\n'):
match = re.search('Validation loss: ([-+]?[0-9]*\.?[0-9]+)', line)
if match is not None:
validation_loss = float(match.group(1))
continue
match = re.search('Validation accuracy: ([-+]?[0-9]*\.?[0-9]+)%', line)
if match is not None:
validation_accuracy = float(match.group(1))
continue
match = re.search(
'Median per-batch runtime: ([-+]?[0-9]*\.?[0-9]+) ms', line)
if match is not None:
runtime = float(match.group(1))
return validation_loss, validation_accuracy, runtime
raise RuntimeError('Could not get accuracy and loss!')
def train(model, task, max_seq_length, batch_size, epochs,
valid_acc_target, max_valid_loss, linformer_k, blocks,
input_checkpoint_dir, output_checkpoint_dir):
blocks_str = ','.join([str(block) for block in blocks])
command = ('{0} --model {1} --task {2} --max_seq_length {3} '
'--batch_size {4} --epochs {5} --valid_acc_target {6} '
'--max_valid_loss {7} --linformer_k {8} --linformer_blocks {9} '
'--load_from_checkpoint --checkpoint_dir {10} '
'--save_to_checkpoint --save_checkpoint_dir {11}').format(
BASE_COMMAND, model, task, max_seq_length, batch_size,
epochs, valid_acc_target, max_valid_loss, linformer_k,
blocks_str, input_checkpoint_dir, output_checkpoint_dir)
output = subprocess.check_output(
command, shell=True).decode('utf-8').strip()
for line in output.split('\n'):
match = \
re.search('Validation accuracy: ([-+]?[0-9]*\.?[0-9]+)%', line)
if match is not None:
validation_accuracy = float(match.group(1))
return validation_accuracy
raise RuntimeError('Could not get accuracy!')
def main():
model = 'roberta'
task = 'mrpc'
batch_size = 16
epochs = 1
max_seq_length = 512
linformer_k = 128
base_checkpoint_dir = '/lfs/1/keshav2/bert_checkpoints'
valid_loss, valid_acc, orig_runtime = evaluate(model, task, max_seq_length,
base_checkpoint_dir)
valid_acc_target = valid_acc - 1.0
max_valid_loss = valid_loss * 1.75
candidates = set(range(12))
opt_sequence = []
current_checkpoint_dir = base_checkpoint_dir
round_num = 0
best_accuracy = 0.0
print('Validation accuracy target: {0:.2f}%%'.format(valid_acc_target))
print('Maximum validation loss: {0:.4f}'.format(max_valid_loss))
print('Original runtime: {0:.2f} ms\n'.format(orig_runtime))
while len(candidates) > 0:
best_candidate = None
found_new_opt = False
print('*** Round {0} ***'.format(round_num))
for candidate in candidates:
blocks = sorted(opt_sequence + [candidate])
blocks_str = ','.join([str(block) for block in blocks])
output_checkpoint_dir = \
os.path.join(base_checkpoint_dir,
'optimizer_blocks={0}'.format(blocks_str))
if not os.path.isdir(output_checkpoint_dir):
os.mkdir(output_checkpoint_dir)
accuracy = train(model, task, max_seq_length, batch_size, epochs,
valid_acc_target / 100.0, max_valid_loss,
linformer_k, blocks, current_checkpoint_dir,
output_checkpoint_dir)
print('Candidate: {0}, accuracy={1:.2f}'.format(
candidate, accuracy))
if accuracy >= valid_acc_target:
if best_candidate is None or accuracy >= best_accuracy:
best_accuracy = accuracy
best_candidate = candidate
found_new_opt = True
print()
if not found_new_opt:
break
else:
round_num += 1
opt_sequence.append(best_candidate)
candidates.remove(best_candidate)
blocks = sorted(opt_sequence)
blocks_str = ','.join([str(block) for block in blocks])
current_checkpoint_dir = \
os.path.join(base_checkpoint_dir,
'optimizer_blocks={0}'.format(blocks_str))
if len(opt_sequence) == 0:
print('Could not optimize model!')
return
blocks = sorted(opt_sequence)
blocks_str = ','.join([str(block) for block in blocks])
output_checkpoint_dir = \
os.path.join(base_checkpoint_dir,
'optimizer_blocks={0}'.format(blocks_str))
opt_loss, opt_acc, opt_runtime = evaluate(model, task, max_seq_length,
output_checkpoint_dir,
linformer_k, blocks)
print('Final optimized sequence: {0}'.format(opt_sequence))
print('Final optimized accuracy: {0:.2f}%'.format(opt_acc))
print('Final optimized runtime: {0:.2f} ms'.format(opt_runtime))
print('Speedup: {0:.2f}x'.format(orig_runtime / opt_runtime))
if __name__=='__main__':
main() | src/bert_optimizer.py | import os
import re
import subprocess
BASE_COMMAND = 'python bert_experiments.py'
def evaluate(model, task, max_seq_length, checkpoint_dir,
linformer_k=None, blocks=None):
command = ('{0} --model {1} --task {2} --max_seq_length {3} '
'--load_from_checkpoint --checkpoint_dir {4} --eval').format(
BASE_COMMAND, model, task, max_seq_length, checkpoint_dir)
if linformer_k is not None:
assert(blocks is not None)
blocks_str = ','.join([str(block) for block in blocks])
command = '{0} --linformer_k {1} --linformer_blocks {2}'.format(
command, linformer_k, blocks_str)
output = subprocess.check_output(
command, shell=True).decode('utf-8').strip()
for line in output.split('\n'):
match = re.search('Validation loss: ([-+]?[0-9]*\.?[0-9]+)', line)
if match is not None:
validation_loss = float(match.group(1))
continue
match = re.search('Validation accuracy: ([-+]?[0-9]*\.?[0-9]+)%', line)
if match is not None:
validation_accuracy = float(match.group(1))
continue
match = re.search(
'Median per-batch runtime: ([-+]?[0-9]*\.?[0-9]+) ms', line)
if match is not None:
runtime = float(match.group(1))
return validation_loss, validation_accuracy, runtime
raise RuntimeError('Could not get accuracy and loss!')
def train(model, task, max_seq_length, batch_size, epochs,
valid_acc_target, max_valid_loss, linformer_k, blocks,
input_checkpoint_dir, output_checkpoint_dir):
blocks_str = ','.join([str(block) for block in blocks])
command = ('{0} --model {1} --task {2} --max_seq_length {3} '
'--batch_size {4} --epochs {5} --valid_acc_target {6} '
'--max_valid_loss {7} --linformer_k {8} --linformer_blocks {9} '
'--load_from_checkpoint --checkpoint_dir {10} '
'--save_to_checkpoint --save_checkpoint_dir {11}').format(
BASE_COMMAND, model, task, max_seq_length, batch_size,
epochs, valid_acc_target, max_valid_loss, linformer_k,
blocks_str, input_checkpoint_dir, output_checkpoint_dir)
output = subprocess.check_output(
command, shell=True).decode('utf-8').strip()
for line in output.split('\n'):
match = \
re.search('Validation accuracy: ([-+]?[0-9]*\.?[0-9]+)%', line)
if match is not None:
validation_accuracy = float(match.group(1))
return validation_accuracy
raise RuntimeError('Could not get accuracy!')
def main():
model = 'roberta'
task = 'mrpc'
batch_size = 16
epochs = 1
max_seq_length = 512
linformer_k = 128
base_checkpoint_dir = '/lfs/1/keshav2/bert_checkpoints'
valid_loss, valid_acc, orig_runtime = evaluate(model, task, max_seq_length,
base_checkpoint_dir)
valid_acc_target = valid_acc - 1.0
max_valid_loss = valid_loss * 1.75
candidates = set(range(12))
opt_sequence = []
current_checkpoint_dir = base_checkpoint_dir
round_num = 0
best_accuracy = 0.0
print('Validation accuracy target: {0:.2f}%%'.format(valid_acc_target))
print('Maximum validation loss: {0:.4f}'.format(max_valid_loss))
print('Original runtime: {0:.2f} ms\n'.format(orig_runtime))
while len(candidates) > 0:
best_candidate = None
found_new_opt = False
print('*** Round {0} ***'.format(round_num))
for candidate in candidates:
blocks = sorted(opt_sequence + [candidate])
blocks_str = ','.join([str(block) for block in blocks])
output_checkpoint_dir = \
os.path.join(base_checkpoint_dir,
'optimizer_blocks={0}'.format(blocks_str))
if not os.path.isdir(output_checkpoint_dir):
os.mkdir(output_checkpoint_dir)
accuracy = train(model, task, max_seq_length, batch_size, epochs,
valid_acc_target / 100.0, max_valid_loss,
linformer_k, blocks, current_checkpoint_dir,
output_checkpoint_dir)
print('Candidate: {0}, accuracy={1:.2f}'.format(
candidate, accuracy))
if accuracy >= valid_acc_target:
if best_candidate is None or accuracy >= best_accuracy:
best_accuracy = accuracy
best_candidate = candidate
found_new_opt = True
print()
if not found_new_opt:
break
else:
round_num += 1
opt_sequence.append(best_candidate)
candidates.remove(best_candidate)
blocks = sorted(opt_sequence)
blocks_str = ','.join([str(block) for block in blocks])
current_checkpoint_dir = \
os.path.join(base_checkpoint_dir,
'optimizer_blocks={0}'.format(blocks_str))
if len(opt_sequence) == 0:
print('Could not optimize model!')
return
blocks = sorted(opt_sequence)
blocks_str = ','.join([str(block) for block in blocks])
output_checkpoint_dir = \
os.path.join(base_checkpoint_dir,
'optimizer_blocks={0}'.format(blocks_str))
opt_loss, opt_acc, opt_runtime = evaluate(model, task, max_seq_length,
output_checkpoint_dir,
linformer_k, blocks)
print('Final optimized sequence: {0}'.format(opt_sequence))
print('Final optimized accuracy: {0:.2f}%'.format(opt_acc))
print('Final optimized runtime: {0:.2f} ms'.format(opt_runtime))
print('Speedup: {0:.2f}x'.format(orig_runtime / opt_runtime))
if __name__=='__main__':
main() | 0.595963 | 0.178222 |
import ROOT
import pyhf
import click
import json
@click.command()
@click.option(
"--root-workspace",
help="The location of the root file containing the combined root workspace",
)
@click.option(
"--pyhf-json",
help="The location of the json file containing the pyhf likelihood info",
)
def compare_nuisance(root_workspace, pyhf_json):
# Get the root nuisance params
infile = ROOT.TFile.Open(root_workspace)
workspace = infile.Get("combined")
mc = workspace.obj("ModelConfig")
def exhaust_argset(s):
it = s.fwdIterator()
while True:
n = it.next()
if not n:
break
yield n
pars = [x.GetName() for x in exhaust_argset(mc.GetNuisanceParameters())] + [
x.GetName() for x in exhaust_argset(mc.GetParametersOfInterest())
]
# Replace some strings to match root nuisance param names to pyhf naming scheme
pars_root = [
sub.replace("alpha_", "")
.replace("gamma_stat_", "staterror_")
.replace("gamma_stat_", "staterror_")
.replace("lumi", "Lumi")
.replace("_bin", "")
for sub in pars
]
# Get pyhf nuisance params
ws = pyhf.Workspace(json.load(open(pyhf_json)))
model = ws.model()
pars_pyhf = []
for k, v in model.config.par_map.items():
sl = v["slice"]
npars = sl.stop - sl.start
if npars > 1 or "staterror" in k:
for i in range(npars):
pars_pyhf.append(f"{k}_{i}")
else:
pars_pyhf.append(k)
# Compare the nuisance params
nuisance_dict = {"root": pars_root, "pyhf": pars_pyhf}
unique_dict = {"root": [], "pyhf": []}
unique_dict["pyhf"] = set(nuisance_dict["pyhf"]) - set(nuisance_dict["root"])
unique_dict["root"] = set(nuisance_dict["root"]) - set(nuisance_dict["pyhf"])
print("Nuisance params unique to pyhf:")
for param in unique_dict["pyhf"]:
print(param)
print("\nNuisance params unique to root:")
for param in unique_dict["root"]:
print(param)
if __name__ == "__main__":
compare_nuisance() | scripts/compare_nuisance.py | import ROOT
import pyhf
import click
import json
@click.command()
@click.option(
"--root-workspace",
help="The location of the root file containing the combined root workspace",
)
@click.option(
"--pyhf-json",
help="The location of the json file containing the pyhf likelihood info",
)
def compare_nuisance(root_workspace, pyhf_json):
# Get the root nuisance params
infile = ROOT.TFile.Open(root_workspace)
workspace = infile.Get("combined")
mc = workspace.obj("ModelConfig")
def exhaust_argset(s):
it = s.fwdIterator()
while True:
n = it.next()
if not n:
break
yield n
pars = [x.GetName() for x in exhaust_argset(mc.GetNuisanceParameters())] + [
x.GetName() for x in exhaust_argset(mc.GetParametersOfInterest())
]
# Replace some strings to match root nuisance param names to pyhf naming scheme
pars_root = [
sub.replace("alpha_", "")
.replace("gamma_stat_", "staterror_")
.replace("gamma_stat_", "staterror_")
.replace("lumi", "Lumi")
.replace("_bin", "")
for sub in pars
]
# Get pyhf nuisance params
ws = pyhf.Workspace(json.load(open(pyhf_json)))
model = ws.model()
pars_pyhf = []
for k, v in model.config.par_map.items():
sl = v["slice"]
npars = sl.stop - sl.start
if npars > 1 or "staterror" in k:
for i in range(npars):
pars_pyhf.append(f"{k}_{i}")
else:
pars_pyhf.append(k)
# Compare the nuisance params
nuisance_dict = {"root": pars_root, "pyhf": pars_pyhf}
unique_dict = {"root": [], "pyhf": []}
unique_dict["pyhf"] = set(nuisance_dict["pyhf"]) - set(nuisance_dict["root"])
unique_dict["root"] = set(nuisance_dict["root"]) - set(nuisance_dict["pyhf"])
print("Nuisance params unique to pyhf:")
for param in unique_dict["pyhf"]:
print(param)
print("\nNuisance params unique to root:")
for param in unique_dict["root"]:
print(param)
if __name__ == "__main__":
compare_nuisance() | 0.38885 | 0.245571 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def epilepsy(path):
"""Epilepsy Attacks Data Set
Data from a clinical trial of 59 patients with epilepsy (Breslow, 1996)
in order to illustrate diagnostic techniques in Poisson regression.
A data frame with 59 observations on the following 11 variables.
`ID`
Patient identification number
`Y1`
Number of epilepsy attacks patients have during the first follow-up
period
`Y2`
Number of epilepsy attacks patients have during the second follow-up
period
`Y3`
Number of epilepsy attacks patients have during the third follow-up
period
`Y4`
Number of epilepsy attacks patients have during the forth follow-up
period
`Base`
Number of epileptic attacks recorded during 8 week period prior to
randomization
`Age`
Age of the patients
`Trt`
a factor with levels `placebo` `progabide` indicating whether
the anti-epilepsy drug Progabide has been applied or not
`Ysum`
Total number of epilepsy attacks patients have during the four
follow-up periods
`Age10`
Age of the patients devided by 10
`Base4`
Variable `Base` devided by 4
<NAME>. and <NAME>. (1990) Some covariance models for longitudinal
count data with overdispersion. *Biometrics* **46**, 657–671.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `epilepsy.csv`.
Returns:
Tuple of np.ndarray `x_train` with 236 rows and 6 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'epilepsy.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/robustbase/epilepsy.csv'
maybe_download_and_extract(path, url,
save_file_name='epilepsy.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | observations/r/epilepsy.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def epilepsy(path):
"""Epilepsy Attacks Data Set
Data from a clinical trial of 59 patients with epilepsy (Breslow, 1996)
in order to illustrate diagnostic techniques in Poisson regression.
A data frame with 59 observations on the following 11 variables.
`ID`
Patient identification number
`Y1`
Number of epilepsy attacks patients have during the first follow-up
period
`Y2`
Number of epilepsy attacks patients have during the second follow-up
period
`Y3`
Number of epilepsy attacks patients have during the third follow-up
period
`Y4`
Number of epilepsy attacks patients have during the forth follow-up
period
`Base`
Number of epileptic attacks recorded during 8 week period prior to
randomization
`Age`
Age of the patients
`Trt`
a factor with levels `placebo` `progabide` indicating whether
the anti-epilepsy drug Progabide has been applied or not
`Ysum`
Total number of epilepsy attacks patients have during the four
follow-up periods
`Age10`
Age of the patients devided by 10
`Base4`
Variable `Base` devided by 4
<NAME>. and <NAME>. (1990) Some covariance models for longitudinal
count data with overdispersion. *Biometrics* **46**, 657–671.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `epilepsy.csv`.
Returns:
Tuple of np.ndarray `x_train` with 236 rows and 6 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'epilepsy.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/robustbase/epilepsy.csv'
maybe_download_and_extract(path, url,
save_file_name='epilepsy.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | 0.784484 | 0.504089 |
from __future__ import print_function
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
# --------------------------------------------------------
# Comments added by <NAME>.
# --------------------------------------------------------
import numpy as np
import pdb
def GenerateAnchors(base_size, anchor_ratios, anchor_scales):
"""
The base_anchor is a square box of dimensions base_size.
The value of base_size is chosen as the feature stride of
the feature extractor network being used (35 for ShuffleNet).
The aim of this function is to transform the base_anchor
using all combination of anchor_scales and anchor_ratios.
Each anchor is a rectanglt defined by a 4-tuple containing
coordinates (x_min, y_min, x_max, y_max) of two non-adjacent
vertices.
"""
base_anchor = np.array([0, 0, base_size-1, base_size-1])
ratioed_anchors = ApplyRatios(base_anchor, anchor_ratios)
anchors = np.vstack([ApplyScales(anchor_, anchor_scales) \
for anchor_ in ratioed_anchors])
return anchors
def ApplyRatios(anchor, anchor_ratios):
"""
This assume that aspect_ratio is defined as height/width.
Let the ratio be r, then assuming the width is w', the height
is rw'. Then the area is rw'^2, and so given the area
and a ratio, we can calculate the corresponding w' and h'.
P.S. It doesn't really matter whether ratio is defined as w/h or
h/w because in the end we use a "symmetric" list of ratios, e.g.
[0.5, 1.0, 2.0]
"""
w, h, ctr_x, ctr_y = GetAnchorTuple(anchor)
anchor_area = w*h
w_arr = np.round(np.sqrt(anchor_area/anchor_ratios))
h_arr = np.round(w_arr*anchor_ratios)
# The shape of anchors will be len(anchor_ratios) x 4
anchors = MakeAnchors(w_arr, h_arr, ctr_x, ctr_y)
return anchors
def ApplyScales(anchor, anchor_scales):
w, h, ctr_x, ctr_y = GetAnchorTuple(anchor)
w_arr = w*anchor_scales
h_arr = h*anchor_scales
# The shape of anchors will be len(anchor_scales) x 4
anchors = MakeAnchors(w_arr, h_arr, ctr_x, ctr_y)
return anchors
def MakeAnchors(w_arr, h_arr, ctr_x, ctr_y):
# We add a new axis because np.hstack concatenates along
# the second dimension.
w_arr = w_arr[:, np.newaxis]
h_arr = h_arr[:, np.newaxis]
# The shape of anchors will be len(w_arr) x 4
anchors = np.hstack((ctr_x - 0.5*(w_arr - 1),
ctr_y - 0.5*(h_arr -1),
ctr_x + 0.5*(w_arr - 1),
ctr_y + 0.5*(h_arr -1)))
return anchors
def GetAnchorTuple(anchor):
"""
Returns height, width and center coordinates for an anchor.
"""
h = anchor[3] - anchor[1] + 1
w = anchor[2] - anchor[0] + 1
ctr_x = anchor[0] + 0.5*(w-1)
ctr_y = anchor[1] + 0.5*(h-1)
return w, h, ctr_x, ctr_y
if __name__ == '__main__':
# Test GenerateAnchors below
_anchors = GenerateAnchors(base_size=16,
anchor_ratios=[0.5, 1.0, 2.0],
anchor_scales=2**np.arange(3,6))
print(_anchors) | lib/model/rpn_v1/generate_anchors.py | from __future__ import print_function
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
# --------------------------------------------------------
# Comments added by <NAME>.
# --------------------------------------------------------
import numpy as np
import pdb
def GenerateAnchors(base_size, anchor_ratios, anchor_scales):
"""
The base_anchor is a square box of dimensions base_size.
The value of base_size is chosen as the feature stride of
the feature extractor network being used (35 for ShuffleNet).
The aim of this function is to transform the base_anchor
using all combination of anchor_scales and anchor_ratios.
Each anchor is a rectanglt defined by a 4-tuple containing
coordinates (x_min, y_min, x_max, y_max) of two non-adjacent
vertices.
"""
base_anchor = np.array([0, 0, base_size-1, base_size-1])
ratioed_anchors = ApplyRatios(base_anchor, anchor_ratios)
anchors = np.vstack([ApplyScales(anchor_, anchor_scales) \
for anchor_ in ratioed_anchors])
return anchors
def ApplyRatios(anchor, anchor_ratios):
"""
This assume that aspect_ratio is defined as height/width.
Let the ratio be r, then assuming the width is w', the height
is rw'. Then the area is rw'^2, and so given the area
and a ratio, we can calculate the corresponding w' and h'.
P.S. It doesn't really matter whether ratio is defined as w/h or
h/w because in the end we use a "symmetric" list of ratios, e.g.
[0.5, 1.0, 2.0]
"""
w, h, ctr_x, ctr_y = GetAnchorTuple(anchor)
anchor_area = w*h
w_arr = np.round(np.sqrt(anchor_area/anchor_ratios))
h_arr = np.round(w_arr*anchor_ratios)
# The shape of anchors will be len(anchor_ratios) x 4
anchors = MakeAnchors(w_arr, h_arr, ctr_x, ctr_y)
return anchors
def ApplyScales(anchor, anchor_scales):
w, h, ctr_x, ctr_y = GetAnchorTuple(anchor)
w_arr = w*anchor_scales
h_arr = h*anchor_scales
# The shape of anchors will be len(anchor_scales) x 4
anchors = MakeAnchors(w_arr, h_arr, ctr_x, ctr_y)
return anchors
def MakeAnchors(w_arr, h_arr, ctr_x, ctr_y):
# We add a new axis because np.hstack concatenates along
# the second dimension.
w_arr = w_arr[:, np.newaxis]
h_arr = h_arr[:, np.newaxis]
# The shape of anchors will be len(w_arr) x 4
anchors = np.hstack((ctr_x - 0.5*(w_arr - 1),
ctr_y - 0.5*(h_arr -1),
ctr_x + 0.5*(w_arr - 1),
ctr_y + 0.5*(h_arr -1)))
return anchors
def GetAnchorTuple(anchor):
"""
Returns height, width and center coordinates for an anchor.
"""
h = anchor[3] - anchor[1] + 1
w = anchor[2] - anchor[0] + 1
ctr_x = anchor[0] + 0.5*(w-1)
ctr_y = anchor[1] + 0.5*(h-1)
return w, h, ctr_x, ctr_y
if __name__ == '__main__':
# Test GenerateAnchors below
_anchors = GenerateAnchors(base_size=16,
anchor_ratios=[0.5, 1.0, 2.0],
anchor_scales=2**np.arange(3,6))
print(_anchors) | 0.809314 | 0.391435 |
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import seaborn as sns
import sys
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from keyname import keyname as kn
from fileshash import fileshash as fsh
import math
dataframe_filename = sys.argv[2]
df_key = pd.read_csv(sys.argv[1])
df_data = pd.read_csv(dataframe_filename)
print("data loaded!")
key = {
row['Metric'] : {
col : row[col]
for col, val in row.iteritems() if col != 'Metric'
}
for idx, row in df_key.iterrows()
}
df_data['Dimension'] = df_data.apply(
lambda x: key[x['Metric']]['Dimension'],
axis=1
)
df_data['Dimension Type'] = df_data.apply(
lambda x: key[x['Metric']]['Dimension Type'],
axis=1
)
df_data['Dimension'] = df_data.apply(
lambda x: x['Dimension Type'] + " " + str(x['Dimension']),
axis=1
)
df_data['Metric'] = df_data.apply(
lambda x: (
('Sliding ' if key[x['Metric']]['Sliding'] else '')
+ key[x['Metric']]['Base Metric']
),
axis=1
)
df_data['Metric'] = df_data.apply(
lambda x: {
'Hamming Metric' : 'Hamming',
'Hash Metric' : 'Hash',
'Asymmetric Wrap Metric' : 'Integer',
'Symmetric Wrap Metric' : 'Integer (bi)',
'Approx Dual Streak Metric' : 'Streak',
}[x['Metric']],
axis=1
)
df_data['Rank'] = 0
for metric in df_data['Metric'].unique():
df_data.loc[df_data['Metric'] == metric, 'Rank'] = (
df_data[df_data['Metric'] == metric][
'Match Distance'
].rank(ascending=0, method='first')
)
print("data crunched!")
print("MEAN")
for metric in df_data['Metric'].unique():
print(
metric,
df_data[df_data['Metric'] == metric]["Match Distance"].mean()
)
print("MIN")
for metric in df_data['Metric'].unique():
print(
metric,
df_data[df_data['Metric'] == metric]["Match Distance"].min()
)
print("MAX")
for metric in df_data['Metric'].unique():
print(
metric,
df_data[df_data['Metric'] == metric]["Match Distance"].max()
) | script/LowDimensionalityStat.py | import matplotlib
matplotlib.use('Agg')
import pandas as pd
import seaborn as sns
import sys
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from keyname import keyname as kn
from fileshash import fileshash as fsh
import math
dataframe_filename = sys.argv[2]
df_key = pd.read_csv(sys.argv[1])
df_data = pd.read_csv(dataframe_filename)
print("data loaded!")
key = {
row['Metric'] : {
col : row[col]
for col, val in row.iteritems() if col != 'Metric'
}
for idx, row in df_key.iterrows()
}
df_data['Dimension'] = df_data.apply(
lambda x: key[x['Metric']]['Dimension'],
axis=1
)
df_data['Dimension Type'] = df_data.apply(
lambda x: key[x['Metric']]['Dimension Type'],
axis=1
)
df_data['Dimension'] = df_data.apply(
lambda x: x['Dimension Type'] + " " + str(x['Dimension']),
axis=1
)
df_data['Metric'] = df_data.apply(
lambda x: (
('Sliding ' if key[x['Metric']]['Sliding'] else '')
+ key[x['Metric']]['Base Metric']
),
axis=1
)
df_data['Metric'] = df_data.apply(
lambda x: {
'Hamming Metric' : 'Hamming',
'Hash Metric' : 'Hash',
'Asymmetric Wrap Metric' : 'Integer',
'Symmetric Wrap Metric' : 'Integer (bi)',
'Approx Dual Streak Metric' : 'Streak',
}[x['Metric']],
axis=1
)
df_data['Rank'] = 0
for metric in df_data['Metric'].unique():
df_data.loc[df_data['Metric'] == metric, 'Rank'] = (
df_data[df_data['Metric'] == metric][
'Match Distance'
].rank(ascending=0, method='first')
)
print("data crunched!")
print("MEAN")
for metric in df_data['Metric'].unique():
print(
metric,
df_data[df_data['Metric'] == metric]["Match Distance"].mean()
)
print("MIN")
for metric in df_data['Metric'].unique():
print(
metric,
df_data[df_data['Metric'] == metric]["Match Distance"].min()
)
print("MAX")
for metric in df_data['Metric'].unique():
print(
metric,
df_data[df_data['Metric'] == metric]["Match Distance"].max()
) | 0.277767 | 0.198472 |
import mmap
import time
import struct
import ctypes
import io
import _thread
import weakref
from time import sleep
from ctypes import wintypes
INVALID = 0
# https://stackoverflow.com/questions/31495461/mmap-cant-attach-to-existing-region-without-knowing-its-size-windows
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
FILE_MAP_ALL_ACCESS = 0x001f
class MEMORY_BASIC_INFORMATION(ctypes.Structure):
_fields_ = (('BaseAddress', wintypes.LPVOID),
('AllocationBase', wintypes.LPVOID),
('AllocationProtect', wintypes.DWORD),
('RegionSize', ctypes.c_size_t),
('State', wintypes.DWORD),
('Protect', wintypes.DWORD),
('Type', wintypes.DWORD))
PMEMORY_BASIC_INFORMATION = ctypes.POINTER(MEMORY_BASIC_INFORMATION)
kernel32.VirtualQuery.restype = ctypes.c_size_t
kernel32.VirtualQuery.argtypes = (wintypes.LPCVOID, PMEMORY_BASIC_INFORMATION, ctypes.c_size_t)
kernel32.OpenFileMappingW.restype = wintypes.HANDLE
kernel32.OpenFileMappingW.argtypes = (wintypes.DWORD, wintypes.BOOL, wintypes.LPCWSTR)
kernel32.MapViewOfFile.restype = wintypes.LPVOID
kernel32.MapViewOfFile.argtypes = (wintypes.HANDLE, wintypes.DWORD, wintypes.DWORD, wintypes.DWORD, ctypes.c_size_t)
kernel32.CloseHandle.argtypes = (wintypes.HANDLE,)
def get_mmap_tagname_size(tagname):
#print("REGION SIZE:", tagname)
hMap = kernel32.OpenFileMappingW(FILE_MAP_ALL_ACCESS, False, tagname)
pBuf = kernel32.MapViewOfFile(hMap, FILE_MAP_ALL_ACCESS, 0, 0, 0)
kernel32.CloseHandle(hMap)
mbi = MEMORY_BASIC_INFORMATION()
kernel32.VirtualQuery(pBuf, ctypes.byref(mbi), mmap.PAGESIZE)
return mbi.RegionSize
def _runme():
while True:
clean_out = False
for shm_inst_ref in _shm_insts:
shm_inst = shm_inst_ref()
try:
if shm_inst is None:
clean_out = True
elif shm_inst[0] == INVALID:
shm_inst._reconnect()
except (AttributeError, IndexError, ValueError):
pass
if clean_out:
_shm_insts[:] = [i for i in _shm_insts if i() is not None]
time.sleep(0.1)
_thread.start_new_thread(_runme, ())
_shm_insts = []
class Win32SHM:
def __init__(self, location, create, new_size=None):
self.tagname = 'ssvc_%s' % location.decode('ascii')
self._lock = _thread.allocate_lock()
if create:
self._create(new_size)
else:
self._connect()
_shm_insts.append(weakref.ref(self))
def _create(self, new_size):
assert new_size is not None
new_size = self.__get_size(new_size)
while True:
try:
memory = mmap.mmap(-1,
length=new_size,
tagname=self.tagname,
access=mmap.ACCESS_WRITE)
break
except PermissionError:
time.sleep(0.01)
memory[0] = 1
self.memory = memory
self.__getitem__ = self.memory.__getitem__
#self.__setitem__ = self.memory.__setitem__
self.__len__ = self.memory.__len__
def _connect(self):
while True:
try:
memory = mmap.mmap(-1,
length=get_mmap_tagname_size(self.tagname),
tagname=self.tagname,
access=mmap.ACCESS_WRITE)
if memory[0] == INVALID:
memory.close()
time.sleep(0.01)
else:
break
except PermissionError:
time.sleep(0.01)
continue
self.memory = memory
self.__getitem__ = self.memory.__getitem__
#self.__setitem__ = self.memory.__setitem__
self.__len__ = self.memory.__len__
def _reconnect(self):
# Emulate access to the old data by copying
# it before we can open the new data
old_memory = self.memory
self.memory = old_memory[:]
self.__getitem__ = self.memory.__getitem__
#self.__setitem__ = self.memory.__setitem__
self.__len__ = self.memory.__len__
old_memory.close()
self._connect()
def __get_size(self, size):
chk_size = mmap.PAGESIZE
while chk_size < size:
chk_size *= 2
assert chk_size >= size
return chk_size
def __len__(self):
return len(self.memory)
def __getitem__(self, item):
return self.memory[item]
def __setitem__(self, key, value):
self.memory[key] = value
def close(self):
try:
self.memory.close()
except AttributeError:
pass | speedysvc/client_server/shared_memory/Win32SHM.py | import mmap
import time
import struct
import ctypes
import io
import _thread
import weakref
from time import sleep
from ctypes import wintypes
INVALID = 0
# https://stackoverflow.com/questions/31495461/mmap-cant-attach-to-existing-region-without-knowing-its-size-windows
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
FILE_MAP_ALL_ACCESS = 0x001f
class MEMORY_BASIC_INFORMATION(ctypes.Structure):
_fields_ = (('BaseAddress', wintypes.LPVOID),
('AllocationBase', wintypes.LPVOID),
('AllocationProtect', wintypes.DWORD),
('RegionSize', ctypes.c_size_t),
('State', wintypes.DWORD),
('Protect', wintypes.DWORD),
('Type', wintypes.DWORD))
PMEMORY_BASIC_INFORMATION = ctypes.POINTER(MEMORY_BASIC_INFORMATION)
kernel32.VirtualQuery.restype = ctypes.c_size_t
kernel32.VirtualQuery.argtypes = (wintypes.LPCVOID, PMEMORY_BASIC_INFORMATION, ctypes.c_size_t)
kernel32.OpenFileMappingW.restype = wintypes.HANDLE
kernel32.OpenFileMappingW.argtypes = (wintypes.DWORD, wintypes.BOOL, wintypes.LPCWSTR)
kernel32.MapViewOfFile.restype = wintypes.LPVOID
kernel32.MapViewOfFile.argtypes = (wintypes.HANDLE, wintypes.DWORD, wintypes.DWORD, wintypes.DWORD, ctypes.c_size_t)
kernel32.CloseHandle.argtypes = (wintypes.HANDLE,)
def get_mmap_tagname_size(tagname):
#print("REGION SIZE:", tagname)
hMap = kernel32.OpenFileMappingW(FILE_MAP_ALL_ACCESS, False, tagname)
pBuf = kernel32.MapViewOfFile(hMap, FILE_MAP_ALL_ACCESS, 0, 0, 0)
kernel32.CloseHandle(hMap)
mbi = MEMORY_BASIC_INFORMATION()
kernel32.VirtualQuery(pBuf, ctypes.byref(mbi), mmap.PAGESIZE)
return mbi.RegionSize
def _runme():
while True:
clean_out = False
for shm_inst_ref in _shm_insts:
shm_inst = shm_inst_ref()
try:
if shm_inst is None:
clean_out = True
elif shm_inst[0] == INVALID:
shm_inst._reconnect()
except (AttributeError, IndexError, ValueError):
pass
if clean_out:
_shm_insts[:] = [i for i in _shm_insts if i() is not None]
time.sleep(0.1)
_thread.start_new_thread(_runme, ())
_shm_insts = []
class Win32SHM:
def __init__(self, location, create, new_size=None):
self.tagname = 'ssvc_%s' % location.decode('ascii')
self._lock = _thread.allocate_lock()
if create:
self._create(new_size)
else:
self._connect()
_shm_insts.append(weakref.ref(self))
def _create(self, new_size):
assert new_size is not None
new_size = self.__get_size(new_size)
while True:
try:
memory = mmap.mmap(-1,
length=new_size,
tagname=self.tagname,
access=mmap.ACCESS_WRITE)
break
except PermissionError:
time.sleep(0.01)
memory[0] = 1
self.memory = memory
self.__getitem__ = self.memory.__getitem__
#self.__setitem__ = self.memory.__setitem__
self.__len__ = self.memory.__len__
def _connect(self):
while True:
try:
memory = mmap.mmap(-1,
length=get_mmap_tagname_size(self.tagname),
tagname=self.tagname,
access=mmap.ACCESS_WRITE)
if memory[0] == INVALID:
memory.close()
time.sleep(0.01)
else:
break
except PermissionError:
time.sleep(0.01)
continue
self.memory = memory
self.__getitem__ = self.memory.__getitem__
#self.__setitem__ = self.memory.__setitem__
self.__len__ = self.memory.__len__
def _reconnect(self):
# Emulate access to the old data by copying
# it before we can open the new data
old_memory = self.memory
self.memory = old_memory[:]
self.__getitem__ = self.memory.__getitem__
#self.__setitem__ = self.memory.__setitem__
self.__len__ = self.memory.__len__
old_memory.close()
self._connect()
def __get_size(self, size):
chk_size = mmap.PAGESIZE
while chk_size < size:
chk_size *= 2
assert chk_size >= size
return chk_size
def __len__(self):
return len(self.memory)
def __getitem__(self, item):
return self.memory[item]
def __setitem__(self, key, value):
self.memory[key] = value
def close(self):
try:
self.memory.close()
except AttributeError:
pass | 0.255622 | 0.109658 |
from typing import Optional, List
from rdflib import Graph, URIRef, Literal
from rdflib.namespace import RDF, RDFS, OWL, DCTERMS, XSD
from client.model._TERN import TERN
from client.model.klass import Klass
from client.model.agent import Agent
from client.model.concept import Concept
import re
class RDFDataset(Klass):
def __init__(
self,
iri: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
subjects: List[Concept] = None,
creator: Optional[Agent] = None,
publisher: Optional[Agent] = None,
contributors: Optional[List[Agent]] = None,
modified: Optional[str] = None,
created: Optional[str] = None,
issued: Optional[str] = None,
):
if title is not None:
assert isinstance(title.__class__, str.__class__), \
"If you provide a value for the title parameter, it must be of type string"
if description is not None:
assert isinstance(description.__class__, str.__class__), \
"If you provide a value for the description parameter, it must be of type string"
if subjects is not None:
for subject in subjects:
assert isinstance(subject.__class__, Concept.__class__), \
"If supplied, each subject must be of type Concept"
if creator is not None:
assert isinstance(creator.__class__, Agent.__class__), "If supplied a creator must be of type Agent"
if contributors is not None:
for contributor in contributors:
assert isinstance(contributor.__class__, Agent.__class__), \
"If supplied, each contributor must be of type Agent"
if publisher is not None:
assert isinstance(publisher.__class__, Agent.__class__), "If supplied a publisher must be of type Agent"
date_pattern = re.compile("[0-9]{4}-[0-9]{2}-[0-9]{2}")
if created is not None:
assert isinstance(created.__class__, str.__class__), \
"If you provide a value for the created parameter, it must be of type string"
assert date_pattern.match(created), "The value for created you provided is not in the YYYY-MM-DD format"
if modified is not None:
assert isinstance(modified.__class__, str.__class__), \
"If you provide a value for the modified parameter, it must be of type string"
assert date_pattern.match(modified), "The value for modified you provided is not in the YYYY-MM-DD format"
if issued is not None:
assert isinstance(issued.__class__, str.__class__), \
"If you provide a value for the issued parameter, it must be of type string"
assert date_pattern.match(issued), "The value for issued you provided is not in the YYYY-MM-DD format"
"""Receive and use or make an IRI"""
if iri is None:
self.id = self.make_uuid()
iri = URIRef(f"http://example.com/rdfdataset/{self.id}")
self.iri = URIRef(iri)
super().__init__(iri)
if title is not None:
self.title = title
self.label = title
else:
self.title = f"RDF Dataset with ID {self.id if hasattr(self, 'id') else self.iri.split('/')[-1]}"
self.label = self.title
if description is not None:
self.description = description
if subjects is not None:
self.subjects = subjects
if creator is not None:
self.creator = creator
if contributors is not None:
self.contributors = contributors
if publisher is not None:
self.publisher = publisher
if created is not None:
self.created = created
if modified is not None:
self.modified = modified
if issued is not None:
self.issued = issued
def to_graph(self) -> Graph:
g = super().to_graph()
g.remove((self.iri, RDF.type, OWL.Class))
g.add((self.iri, RDF.type, TERN.RDFDataset))
g.remove((self.iri, RDFS.label, None))
g.add((self.iri, RDFS.label, Literal(self.label)))
if hasattr(self, "title"):
g.add((self.iri, DCTERMS.title, Literal(self.title)))
if hasattr(self, "description"):
g.add((self.iri, DCTERMS.description, Literal(self.description)))
if hasattr(self, "subjects"):
for subject in self.subjects:
g.add((self.iri, DCTERMS.creator, subject.iri))
if (subject.iri, RDF.type, None) not in g:
g += subject.to_graph()
if hasattr(self, "creator"):
g.add((self.iri, DCTERMS.creator, self.creator.iri))
if (self.creator.iri, RDF.type, None) not in g:
g += self.creator.to_graph()
if hasattr(self, "contributors"):
for contributor in self.contributors:
g.add((self.iri, DCTERMS.creator, contributor.iri))
if (contributor.iri, RDF.type, None) not in g:
g += contributor.to_graph()
if hasattr(self, "publisher"):
g.add((self.iri, DCTERMS.publisher, self.publisher.iri))
if (self.publisher.iri, RDF.type, None) not in g:
g += self.publisher.to_graph()
if hasattr(self, "created"):
g.add((self.iri, DCTERMS.created, Literal(self.created, datatype=XSD.date)))
if hasattr(self, "modified"):
g.add((self.iri, DCTERMS.modified, Literal(self.modified, datatype=XSD.date)))
if hasattr(self, "issued"):
g.add((self.iri, DCTERMS.issued, Literal(self.issued, datatype=XSD.date)))
return g | client/model/rdf_dataset.py | from typing import Optional, List
from rdflib import Graph, URIRef, Literal
from rdflib.namespace import RDF, RDFS, OWL, DCTERMS, XSD
from client.model._TERN import TERN
from client.model.klass import Klass
from client.model.agent import Agent
from client.model.concept import Concept
import re
class RDFDataset(Klass):
def __init__(
self,
iri: Optional[str] = None,
title: Optional[str] = None,
description: Optional[str] = None,
subjects: List[Concept] = None,
creator: Optional[Agent] = None,
publisher: Optional[Agent] = None,
contributors: Optional[List[Agent]] = None,
modified: Optional[str] = None,
created: Optional[str] = None,
issued: Optional[str] = None,
):
if title is not None:
assert isinstance(title.__class__, str.__class__), \
"If you provide a value for the title parameter, it must be of type string"
if description is not None:
assert isinstance(description.__class__, str.__class__), \
"If you provide a value for the description parameter, it must be of type string"
if subjects is not None:
for subject in subjects:
assert isinstance(subject.__class__, Concept.__class__), \
"If supplied, each subject must be of type Concept"
if creator is not None:
assert isinstance(creator.__class__, Agent.__class__), "If supplied a creator must be of type Agent"
if contributors is not None:
for contributor in contributors:
assert isinstance(contributor.__class__, Agent.__class__), \
"If supplied, each contributor must be of type Agent"
if publisher is not None:
assert isinstance(publisher.__class__, Agent.__class__), "If supplied a publisher must be of type Agent"
date_pattern = re.compile("[0-9]{4}-[0-9]{2}-[0-9]{2}")
if created is not None:
assert isinstance(created.__class__, str.__class__), \
"If you provide a value for the created parameter, it must be of type string"
assert date_pattern.match(created), "The value for created you provided is not in the YYYY-MM-DD format"
if modified is not None:
assert isinstance(modified.__class__, str.__class__), \
"If you provide a value for the modified parameter, it must be of type string"
assert date_pattern.match(modified), "The value for modified you provided is not in the YYYY-MM-DD format"
if issued is not None:
assert isinstance(issued.__class__, str.__class__), \
"If you provide a value for the issued parameter, it must be of type string"
assert date_pattern.match(issued), "The value for issued you provided is not in the YYYY-MM-DD format"
"""Receive and use or make an IRI"""
if iri is None:
self.id = self.make_uuid()
iri = URIRef(f"http://example.com/rdfdataset/{self.id}")
self.iri = URIRef(iri)
super().__init__(iri)
if title is not None:
self.title = title
self.label = title
else:
self.title = f"RDF Dataset with ID {self.id if hasattr(self, 'id') else self.iri.split('/')[-1]}"
self.label = self.title
if description is not None:
self.description = description
if subjects is not None:
self.subjects = subjects
if creator is not None:
self.creator = creator
if contributors is not None:
self.contributors = contributors
if publisher is not None:
self.publisher = publisher
if created is not None:
self.created = created
if modified is not None:
self.modified = modified
if issued is not None:
self.issued = issued
def to_graph(self) -> Graph:
g = super().to_graph()
g.remove((self.iri, RDF.type, OWL.Class))
g.add((self.iri, RDF.type, TERN.RDFDataset))
g.remove((self.iri, RDFS.label, None))
g.add((self.iri, RDFS.label, Literal(self.label)))
if hasattr(self, "title"):
g.add((self.iri, DCTERMS.title, Literal(self.title)))
if hasattr(self, "description"):
g.add((self.iri, DCTERMS.description, Literal(self.description)))
if hasattr(self, "subjects"):
for subject in self.subjects:
g.add((self.iri, DCTERMS.creator, subject.iri))
if (subject.iri, RDF.type, None) not in g:
g += subject.to_graph()
if hasattr(self, "creator"):
g.add((self.iri, DCTERMS.creator, self.creator.iri))
if (self.creator.iri, RDF.type, None) not in g:
g += self.creator.to_graph()
if hasattr(self, "contributors"):
for contributor in self.contributors:
g.add((self.iri, DCTERMS.creator, contributor.iri))
if (contributor.iri, RDF.type, None) not in g:
g += contributor.to_graph()
if hasattr(self, "publisher"):
g.add((self.iri, DCTERMS.publisher, self.publisher.iri))
if (self.publisher.iri, RDF.type, None) not in g:
g += self.publisher.to_graph()
if hasattr(self, "created"):
g.add((self.iri, DCTERMS.created, Literal(self.created, datatype=XSD.date)))
if hasattr(self, "modified"):
g.add((self.iri, DCTERMS.modified, Literal(self.modified, datatype=XSD.date)))
if hasattr(self, "issued"):
g.add((self.iri, DCTERMS.issued, Literal(self.issued, datatype=XSD.date)))
return g | 0.846863 | 0.267393 |
reftable = """
CREATE TABLE `by0001` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`fld1` char(10) DEFAULT NULL,
`fld2` smallint(6) DEFAULT NULL,
`fld4` mediumint(9) DEFAULT NULL,
`fld5` int(11) DEFAULT NULL,
`fld6` bigint(20) DEFAULT NULL,
`fld7` float DEFAULT NULL,
`fld8` double DEFAULT NULL,
`fld9` double DEFAULT NULL,
`fld10` double DEFAULT NULL,
`fld11` decimal(10,2) DEFAULT NULL,
`fld12` bit(1) DEFAULT NULL,
`fld13` double DEFAULT NULL,
`fld14` tinyint(1) DEFAULT NULL,
`fld15` text DEFAULT NULL,
`fld16` blob DEFAULT NULL,
`fld17` enum('123','234') DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4;
"""
usertable = """
CREATE TABLE `users_1111` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`first_name` varchar(100) DEFAULT NULL,
`uid` varchar(100) DEFAULT NULL,
`email` varchar(100) DEFAULT NULL,
`owner_id` int(11) unsigned DEFAULT NULL,
`created_on` timestamp NULL DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4;
"""
userdata = [
"insert into users_1111 (first_name, uid, email, owner_id) \
values ('hhhh', '13221312-123123123-123123', '<EMAIL>', 1);",
"insert into users_1111 (first_name, uid, email, owner_id) \
values ('bbbb', '13221312-123123123-343434', '<EMAIL>', 1);"
]
garbagetable = """
CREATE TABLE `garbage_1111` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`scoops` int(11) DEFAULT NULL,
`boops` varchar(50) DEFAULT NULL,
`user_id` int(11) default null,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=143 DEFAULT CHARSET=utf8mb3;
"""
garbagedata = [
"insert into garbage_1111 (scoops, boops, user_id) \
values (44, 'jjjkkkkkkkkkkkk', 1);",
"insert into garbage_1111 (scoops, boops, user_id) \
values (88, 'dfsdfdfsdfsdfsdf', 1);",
]
garbagetruckstable = """
CREATE TABLE `garbage_trucks_1111` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`garbage_id` int(11) DEFAULT NULL,
`truck_id` varchar(50) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=143 DEFAULT CHARSET=utf8mb3;
"""
truckstable = """
CREATE TABLE `trucks_1111` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`truck_name` varchar(50) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=143 DEFAULT CHARSET=utf8mb3;
"""
garbagetrucksdata = [
"insert into garbage_trucks_1111 (garbage_id, truck_id) \
values (143, 143);",
"insert into garbage_trucks_1111 (garbage_id, truck_id) \
values (144, 143);"
]
trucksdata = [
"insert into trucks_1111 (truck_name) \
values ('big mac');",
"insert into trucks_1111 (truck_name) \
values ('little jack');"
]
teardown = [
"drop table if exists users_1111;",
"drop table if exists garbage_trucks_1111;",
"drop table if exists trucks_1111;",
"drop table if exists garbage_1111;"
] | tests/pretest.py | reftable = """
CREATE TABLE `by0001` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`fld1` char(10) DEFAULT NULL,
`fld2` smallint(6) DEFAULT NULL,
`fld4` mediumint(9) DEFAULT NULL,
`fld5` int(11) DEFAULT NULL,
`fld6` bigint(20) DEFAULT NULL,
`fld7` float DEFAULT NULL,
`fld8` double DEFAULT NULL,
`fld9` double DEFAULT NULL,
`fld10` double DEFAULT NULL,
`fld11` decimal(10,2) DEFAULT NULL,
`fld12` bit(1) DEFAULT NULL,
`fld13` double DEFAULT NULL,
`fld14` tinyint(1) DEFAULT NULL,
`fld15` text DEFAULT NULL,
`fld16` blob DEFAULT NULL,
`fld17` enum('123','234') DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4;
"""
usertable = """
CREATE TABLE `users_1111` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`first_name` varchar(100) DEFAULT NULL,
`uid` varchar(100) DEFAULT NULL,
`email` varchar(100) DEFAULT NULL,
`owner_id` int(11) unsigned DEFAULT NULL,
`created_on` timestamp NULL DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4;
"""
userdata = [
"insert into users_1111 (first_name, uid, email, owner_id) \
values ('hhhh', '13221312-123123123-123123', '<EMAIL>', 1);",
"insert into users_1111 (first_name, uid, email, owner_id) \
values ('bbbb', '13221312-123123123-343434', '<EMAIL>', 1);"
]
garbagetable = """
CREATE TABLE `garbage_1111` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`scoops` int(11) DEFAULT NULL,
`boops` varchar(50) DEFAULT NULL,
`user_id` int(11) default null,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=143 DEFAULT CHARSET=utf8mb3;
"""
garbagedata = [
"insert into garbage_1111 (scoops, boops, user_id) \
values (44, 'jjjkkkkkkkkkkkk', 1);",
"insert into garbage_1111 (scoops, boops, user_id) \
values (88, 'dfsdfdfsdfsdfsdf', 1);",
]
garbagetruckstable = """
CREATE TABLE `garbage_trucks_1111` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`garbage_id` int(11) DEFAULT NULL,
`truck_id` varchar(50) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=143 DEFAULT CHARSET=utf8mb3;
"""
truckstable = """
CREATE TABLE `trucks_1111` (
`id` int(11) unsigned NOT NULL AUTO_INCREMENT,
`truck_name` varchar(50) DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=143 DEFAULT CHARSET=utf8mb3;
"""
garbagetrucksdata = [
"insert into garbage_trucks_1111 (garbage_id, truck_id) \
values (143, 143);",
"insert into garbage_trucks_1111 (garbage_id, truck_id) \
values (144, 143);"
]
trucksdata = [
"insert into trucks_1111 (truck_name) \
values ('big mac');",
"insert into trucks_1111 (truck_name) \
values ('little jack');"
]
teardown = [
"drop table if exists users_1111;",
"drop table if exists garbage_trucks_1111;",
"drop table if exists trucks_1111;",
"drop table if exists garbage_1111;"
] | 0.481698 | 0.101056 |
def padded_binary(num, length=36):
binary_value = bin(num).removeprefix("0b")
return "0" * (length - len(binary_value)) + binary_value
class Memory:
def __init__(self):
self._mask = "X" * 36
self._data = dict()
def __setitem__(self, key, value):
binary_value = padded_binary(value)
masked_binary_value = self._apply_mask(binary_value)
self._data[key] = int(masked_binary_value, 2)
def _apply_mask(self, binary_value):
masked_value = []
for i, m in zip(binary_value, self._mask):
if m == "X":
masked_value.append(i)
else:
masked_value.append(m)
return "".join(masked_value)
def get_sum(self):
return sum(self._data.values())
def set_mask(self, new_mask):
self._mask = new_mask
class MemoryV2:
def __init__(self):
self._mask = "X" * 36
self._data = dict()
def __setitem__(self, key, value):
for address in self._get_addresses(padded_binary(key)):
self._data[address] = value
def _get_addresses(self, binary_address):
floating_indices = []
masked_address = []
for i, (j, m) in enumerate(zip(binary_address, self._mask)):
if m == "0":
masked_address.append(j)
elif m == "1":
masked_address.append("1")
else:
masked_address.append("X")
floating_indices.append(i)
for i in range(2 ** len(floating_indices)):
b = padded_binary(i, len(floating_indices))
for j, k in zip(floating_indices, b):
masked_address[j] = k
yield "".join(masked_address)
def get_sum(self):
return sum(self._data.values())
def set_mask(self, new_mask):
self._mask = new_mask
def execute(memory, instructions):
for inst in instructions:
command, arg = inst.split(" = ")
if command == "mask":
memory.set_mask(arg)
else:
key = int(command[4:-1])
memory[key] = int(arg)
def main():
with open("input.txt", "r") as fp:
instructions = [line.strip() for line in fp]
mem = Memory()
execute(mem, instructions)
print("Part I:", mem.get_sum())
mem = MemoryV2()
execute(mem, instructions)
print("Part II:", mem.get_sum())
if __name__ == "__main__":
main() | 2020/day14/day14.py | def padded_binary(num, length=36):
binary_value = bin(num).removeprefix("0b")
return "0" * (length - len(binary_value)) + binary_value
class Memory:
def __init__(self):
self._mask = "X" * 36
self._data = dict()
def __setitem__(self, key, value):
binary_value = padded_binary(value)
masked_binary_value = self._apply_mask(binary_value)
self._data[key] = int(masked_binary_value, 2)
def _apply_mask(self, binary_value):
masked_value = []
for i, m in zip(binary_value, self._mask):
if m == "X":
masked_value.append(i)
else:
masked_value.append(m)
return "".join(masked_value)
def get_sum(self):
return sum(self._data.values())
def set_mask(self, new_mask):
self._mask = new_mask
class MemoryV2:
def __init__(self):
self._mask = "X" * 36
self._data = dict()
def __setitem__(self, key, value):
for address in self._get_addresses(padded_binary(key)):
self._data[address] = value
def _get_addresses(self, binary_address):
floating_indices = []
masked_address = []
for i, (j, m) in enumerate(zip(binary_address, self._mask)):
if m == "0":
masked_address.append(j)
elif m == "1":
masked_address.append("1")
else:
masked_address.append("X")
floating_indices.append(i)
for i in range(2 ** len(floating_indices)):
b = padded_binary(i, len(floating_indices))
for j, k in zip(floating_indices, b):
masked_address[j] = k
yield "".join(masked_address)
def get_sum(self):
return sum(self._data.values())
def set_mask(self, new_mask):
self._mask = new_mask
def execute(memory, instructions):
for inst in instructions:
command, arg = inst.split(" = ")
if command == "mask":
memory.set_mask(arg)
else:
key = int(command[4:-1])
memory[key] = int(arg)
def main():
with open("input.txt", "r") as fp:
instructions = [line.strip() for line in fp]
mem = Memory()
execute(mem, instructions)
print("Part I:", mem.get_sum())
mem = MemoryV2()
execute(mem, instructions)
print("Part II:", mem.get_sum())
if __name__ == "__main__":
main() | 0.582491 | 0.369998 |
import numpy as np
import pytest
from psyneulink.core.components.functions.transferfunctions import Linear
from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.core.components.process import Process
from psyneulink.core.components.system import System
from psyneulink.core.scheduling.condition import Never
from psyneulink.library.components.mechanisms.processing.transfer.lcamechanism import LCAMechanism
class TestLCA:
def test_LCAMechanism_length_1(self):
T = TransferMechanism(function=Linear(slope=1.0))
L = LCAMechanism(function=Linear(slope=2.0),
self_excitation=3.0,
leak=0.5,
competition=1.0, # competition does not matter because we only have one unit
time_step_size=0.1)
P = Process(pathway=[T, L])
S = System(processes=[P])
L.reinitialize_when = Never()
# - - - - - - - Equations to be executed - - - - - - -
# new_transfer_input =
# previous_transfer_input
# + (leak * previous_transfer_input_1 + self_excitation * result1 + competition * result2 + outside_input1) * dt
# + noise
# result = new_transfer_input*2.0
# recurrent_matrix = [[3.0]]
# - - - - - - - - - - - - - - - - - - - - - - - - - -
results=[]
def record_execution():
results.append(L.parameters.value.get(S)[0][0])
S.run(inputs={T: [1.0]},
num_trials=3,
call_after_trial=record_execution)
# - - - - - - - TRIAL 1 - - - - - - -
# new_transfer_input = 0.0 + ( 0.5 * 0.0 + 3.0 * 0.0 + 0.0 + 1.0)*0.1 + 0.0 = 0.1
# f(new_transfer_input) = 0.1 * 2.0 = 0.2
# - - - - - - - TRIAL 2 - - - - - - -
# new_transfer_input = 0.1 + ( 0.5 * 0.1 + 3.0 * 0.2 + 0.0 + 1.0)*0.1 + 0.0 = 0.265
# f(new_transfer_input) = 0.265 * 2.0 = 0.53
# - - - - - - - TRIAL 3 - - - - - - -
# new_transfer_input = 0.265 + ( 0.5 * 0.265 + 3.0 * 0.53 + 0.0 + 1.0)*0.1 + 0.0 = 0.53725
# f(new_transfer_input) = 0.53725 * 2.0 = 1.0745
assert np.allclose(results, [0.2, 0.53, 1.0745])
def test_LCAMechanism_length_2(self):
T = TransferMechanism(function=Linear(slope=1.0),
size=2)
L = LCAMechanism(function=Linear(slope=2.0),
size=2,
self_excitation=3.0,
leak=0.5,
competition=1.0,
time_step_size=0.1)
P = Process(pathway=[T, L])
S = System(processes=[P])
L.reinitialize_when = Never()
# - - - - - - - Equations to be executed - - - - - - -
# new_transfer_input =
# previous_transfer_input
# + (leak * previous_transfer_input_1 + self_excitation * result1 + competition * result2 + outside_input1) * dt
# + noise
# result = new_transfer_input*2.0
# recurrent_matrix = [[3.0]]
# - - - - - - - - - - - - - - - - - - - - - - - - - -
results=[]
def record_execution():
results.append(L.parameters.value.get(S)[0])
S.run(inputs={T: [1.0, 2.0]},
num_trials=3,
call_after_trial=record_execution)
# - - - - - - - TRIAL 1 - - - - - - -
# new_transfer_input_1 = 0.0 + ( 0.5 * 0.0 + 3.0 * 0.0 - 1.0*0.0 + 1.0)*0.1 + 0.0 = 0.1
# f(new_transfer_input_1) = 0.1 * 2.0 = 0.2
# new_transfer_input_2 = 0.0 + ( 0.5 * 0.0 + 3.0 * 0.0 - 1.0*0.0 + 2.0)*0.1 + 0.0 = 0.2
# f(new_transfer_input_2) = 0.2 * 2.0 = 0.4
# - - - - - - - TRIAL 2 - - - - - - -
# new_transfer_input = 0.1 + ( 0.5 * 0.1 + 3.0 * 0.2 - 1.0*0.4 + 1.0)*0.1 + 0.0 = 0.225
# f(new_transfer_input) = 0.265 * 2.0 = 0.45
# new_transfer_input_2 = 0.2 + ( 0.5 * 0.2 + 3.0 * 0.4 - 1.0*0.2 + 2.0)*0.1 + 0.0 = 0.51
# f(new_transfer_input_2) = 0.1 * 2.0 = 1.02
# - - - - - - - TRIAL 3 - - - - - - -
# new_transfer_input = 0.225 + ( 0.5 * 0.225 + 3.0 * 0.45 - 1.0*1.02 + 1.0)*0.1 + 0.0 = 0.36925
# f(new_transfer_input) = 0.36925 * 2.0 = 0.7385
# new_transfer_input_2 = 0.51 + ( 0.5 * 0.51 + 3.0 * 1.02 - 1.0*0.45 + 2.0)*0.1 + 0.0 = 0.9965
# f(new_transfer_input_2) = 0.9965 * 2.0 = 1.463
assert np.allclose(results, [[0.2, 0.4], [0.45, 1.02], [0.7385, 1.993]])
class TestLCAReinitialize:
def test_reinitialize_run(self):
L = LCAMechanism(name="L",
function=Linear,
initial_value=0.5,
integrator_mode=True,
leak=0.1,
competition=0,
self_excitation=1.0,
time_step_size=1.0,
noise=0.0)
P = Process(name="P",
pathway=[L])
S = System(name="S",
processes=[P])
L.reinitialize_when = Never()
assert np.allclose(L.integrator_function.previous_value, 0.5)
assert np.allclose(L.initial_value, 0.5)
assert np.allclose(L.integrator_function.initializer, 0.5)
S.run(inputs={L: 1.0},
num_trials=2,
initialize=True,
initial_values={L: 0.0})
# IntegratorFunction fn: previous_value + (rate*previous_value + new_value)*time_step_size + noise*(time_step_size**0.5)
# Trial 1 | variable = 1.0 + 0.0
# integration: 0.5 + (0.1*0.5 + 1.0)*1.0 + 0.0 = 1.55
# linear fn: 1.55*1.0 = 1.55
# Trial 2 | variable = 1.0 + 1.55
# integration: 1.55 + (0.1*1.55 + 2.55)*1.0 + 0.0 = 4.255
# linear fn: 4.255*1.0 = 4.255
assert np.allclose(L.integrator_function.parameters.previous_value.get(S), 4.255)
L.integrator_function.reinitialize(0.9, execution_context=S)
assert np.allclose(L.integrator_function.parameters.previous_value.get(S), 0.9)
assert np.allclose(L.parameters.value.get(S), 4.255)
L.reinitialize(0.5, execution_context=S)
assert np.allclose(L.integrator_function.parameters.previous_value.get(S), 0.5)
assert np.allclose(L.parameters.value.get(S), 0.5)
S.run(inputs={L: 1.0},
num_trials=2)
# Trial 3 | variable = 1.0 + 0.5
# integration: 0.5 + (0.1*0.5 + 1.5)*1.0 + 0.0 = 2.05
# linear fn: 2.05*1.0 = 2.05
# Trial 4 | variable = 1.0 + 2.05
# integration: 2.05 + (0.1*2.05 + 3.05)*1.0 + 0.0 = 5.305
# linear fn: 5.305*1.0 = 5.305
assert np.allclose(L.integrator_function.parameters.previous_value.get(S), 5.305)
assert np.allclose(L.initial_value, 0.5)
assert np.allclose(L.integrator_function.initializer, 0.5)
class TestClip:
def test_clip_float(self):
L = LCAMechanism(clip=[-2.0, 2.0],
function=Linear,
integrator_mode=False)
assert np.allclose(L.execute(3.0), 2.0)
assert np.allclose(L.execute(-3.0), -2.0)
def test_clip_array(self):
L = LCAMechanism(default_variable=[[0.0, 0.0, 0.0]],
clip=[-2.0, 2.0],
function=Linear,
integrator_mode=False)
assert np.allclose(L.execute([3.0, 0.0, -3.0]), [2.0, 0.0, -2.0])
def test_clip_2d_array(self):
L = LCAMechanism(default_variable=[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
clip=[-2.0, 2.0],
function=Linear,
integrator_mode=False)
assert np.allclose(L.execute([[-5.0, -1.0, 5.0], [5.0, -5.0, 1.0], [1.0, 5.0, 5.0]]),
[[-2.0, -1.0, 2.0], [2.0, -2.0, 1.0], [1.0, 2.0, 2.0]]) | tests/mechanisms/test_lca.py | import numpy as np
import pytest
from psyneulink.core.components.functions.transferfunctions import Linear
from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism
from psyneulink.core.components.process import Process
from psyneulink.core.components.system import System
from psyneulink.core.scheduling.condition import Never
from psyneulink.library.components.mechanisms.processing.transfer.lcamechanism import LCAMechanism
class TestLCA:
def test_LCAMechanism_length_1(self):
T = TransferMechanism(function=Linear(slope=1.0))
L = LCAMechanism(function=Linear(slope=2.0),
self_excitation=3.0,
leak=0.5,
competition=1.0, # competition does not matter because we only have one unit
time_step_size=0.1)
P = Process(pathway=[T, L])
S = System(processes=[P])
L.reinitialize_when = Never()
# - - - - - - - Equations to be executed - - - - - - -
# new_transfer_input =
# previous_transfer_input
# + (leak * previous_transfer_input_1 + self_excitation * result1 + competition * result2 + outside_input1) * dt
# + noise
# result = new_transfer_input*2.0
# recurrent_matrix = [[3.0]]
# - - - - - - - - - - - - - - - - - - - - - - - - - -
results=[]
def record_execution():
results.append(L.parameters.value.get(S)[0][0])
S.run(inputs={T: [1.0]},
num_trials=3,
call_after_trial=record_execution)
# - - - - - - - TRIAL 1 - - - - - - -
# new_transfer_input = 0.0 + ( 0.5 * 0.0 + 3.0 * 0.0 + 0.0 + 1.0)*0.1 + 0.0 = 0.1
# f(new_transfer_input) = 0.1 * 2.0 = 0.2
# - - - - - - - TRIAL 2 - - - - - - -
# new_transfer_input = 0.1 + ( 0.5 * 0.1 + 3.0 * 0.2 + 0.0 + 1.0)*0.1 + 0.0 = 0.265
# f(new_transfer_input) = 0.265 * 2.0 = 0.53
# - - - - - - - TRIAL 3 - - - - - - -
# new_transfer_input = 0.265 + ( 0.5 * 0.265 + 3.0 * 0.53 + 0.0 + 1.0)*0.1 + 0.0 = 0.53725
# f(new_transfer_input) = 0.53725 * 2.0 = 1.0745
assert np.allclose(results, [0.2, 0.53, 1.0745])
def test_LCAMechanism_length_2(self):
T = TransferMechanism(function=Linear(slope=1.0),
size=2)
L = LCAMechanism(function=Linear(slope=2.0),
size=2,
self_excitation=3.0,
leak=0.5,
competition=1.0,
time_step_size=0.1)
P = Process(pathway=[T, L])
S = System(processes=[P])
L.reinitialize_when = Never()
# - - - - - - - Equations to be executed - - - - - - -
# new_transfer_input =
# previous_transfer_input
# + (leak * previous_transfer_input_1 + self_excitation * result1 + competition * result2 + outside_input1) * dt
# + noise
# result = new_transfer_input*2.0
# recurrent_matrix = [[3.0]]
# - - - - - - - - - - - - - - - - - - - - - - - - - -
results=[]
def record_execution():
results.append(L.parameters.value.get(S)[0])
S.run(inputs={T: [1.0, 2.0]},
num_trials=3,
call_after_trial=record_execution)
# - - - - - - - TRIAL 1 - - - - - - -
# new_transfer_input_1 = 0.0 + ( 0.5 * 0.0 + 3.0 * 0.0 - 1.0*0.0 + 1.0)*0.1 + 0.0 = 0.1
# f(new_transfer_input_1) = 0.1 * 2.0 = 0.2
# new_transfer_input_2 = 0.0 + ( 0.5 * 0.0 + 3.0 * 0.0 - 1.0*0.0 + 2.0)*0.1 + 0.0 = 0.2
# f(new_transfer_input_2) = 0.2 * 2.0 = 0.4
# - - - - - - - TRIAL 2 - - - - - - -
# new_transfer_input = 0.1 + ( 0.5 * 0.1 + 3.0 * 0.2 - 1.0*0.4 + 1.0)*0.1 + 0.0 = 0.225
# f(new_transfer_input) = 0.265 * 2.0 = 0.45
# new_transfer_input_2 = 0.2 + ( 0.5 * 0.2 + 3.0 * 0.4 - 1.0*0.2 + 2.0)*0.1 + 0.0 = 0.51
# f(new_transfer_input_2) = 0.1 * 2.0 = 1.02
# - - - - - - - TRIAL 3 - - - - - - -
# new_transfer_input = 0.225 + ( 0.5 * 0.225 + 3.0 * 0.45 - 1.0*1.02 + 1.0)*0.1 + 0.0 = 0.36925
# f(new_transfer_input) = 0.36925 * 2.0 = 0.7385
# new_transfer_input_2 = 0.51 + ( 0.5 * 0.51 + 3.0 * 1.02 - 1.0*0.45 + 2.0)*0.1 + 0.0 = 0.9965
# f(new_transfer_input_2) = 0.9965 * 2.0 = 1.463
assert np.allclose(results, [[0.2, 0.4], [0.45, 1.02], [0.7385, 1.993]])
class TestLCAReinitialize:
def test_reinitialize_run(self):
L = LCAMechanism(name="L",
function=Linear,
initial_value=0.5,
integrator_mode=True,
leak=0.1,
competition=0,
self_excitation=1.0,
time_step_size=1.0,
noise=0.0)
P = Process(name="P",
pathway=[L])
S = System(name="S",
processes=[P])
L.reinitialize_when = Never()
assert np.allclose(L.integrator_function.previous_value, 0.5)
assert np.allclose(L.initial_value, 0.5)
assert np.allclose(L.integrator_function.initializer, 0.5)
S.run(inputs={L: 1.0},
num_trials=2,
initialize=True,
initial_values={L: 0.0})
# IntegratorFunction fn: previous_value + (rate*previous_value + new_value)*time_step_size + noise*(time_step_size**0.5)
# Trial 1 | variable = 1.0 + 0.0
# integration: 0.5 + (0.1*0.5 + 1.0)*1.0 + 0.0 = 1.55
# linear fn: 1.55*1.0 = 1.55
# Trial 2 | variable = 1.0 + 1.55
# integration: 1.55 + (0.1*1.55 + 2.55)*1.0 + 0.0 = 4.255
# linear fn: 4.255*1.0 = 4.255
assert np.allclose(L.integrator_function.parameters.previous_value.get(S), 4.255)
L.integrator_function.reinitialize(0.9, execution_context=S)
assert np.allclose(L.integrator_function.parameters.previous_value.get(S), 0.9)
assert np.allclose(L.parameters.value.get(S), 4.255)
L.reinitialize(0.5, execution_context=S)
assert np.allclose(L.integrator_function.parameters.previous_value.get(S), 0.5)
assert np.allclose(L.parameters.value.get(S), 0.5)
S.run(inputs={L: 1.0},
num_trials=2)
# Trial 3 | variable = 1.0 + 0.5
# integration: 0.5 + (0.1*0.5 + 1.5)*1.0 + 0.0 = 2.05
# linear fn: 2.05*1.0 = 2.05
# Trial 4 | variable = 1.0 + 2.05
# integration: 2.05 + (0.1*2.05 + 3.05)*1.0 + 0.0 = 5.305
# linear fn: 5.305*1.0 = 5.305
assert np.allclose(L.integrator_function.parameters.previous_value.get(S), 5.305)
assert np.allclose(L.initial_value, 0.5)
assert np.allclose(L.integrator_function.initializer, 0.5)
class TestClip:
def test_clip_float(self):
L = LCAMechanism(clip=[-2.0, 2.0],
function=Linear,
integrator_mode=False)
assert np.allclose(L.execute(3.0), 2.0)
assert np.allclose(L.execute(-3.0), -2.0)
def test_clip_array(self):
L = LCAMechanism(default_variable=[[0.0, 0.0, 0.0]],
clip=[-2.0, 2.0],
function=Linear,
integrator_mode=False)
assert np.allclose(L.execute([3.0, 0.0, -3.0]), [2.0, 0.0, -2.0])
def test_clip_2d_array(self):
L = LCAMechanism(default_variable=[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
clip=[-2.0, 2.0],
function=Linear,
integrator_mode=False)
assert np.allclose(L.execute([[-5.0, -1.0, 5.0], [5.0, -5.0, 1.0], [1.0, 5.0, 5.0]]),
[[-2.0, -1.0, 2.0], [2.0, -2.0, 1.0], [1.0, 2.0, 2.0]]) | 0.413359 | 0.393618 |
"""Analyze the simulation results."""
import os
import shutil
import numpy as np
import pandas as pd
from wfa_cardinality_estimation_evaluation_framework.common import plotting
from wfa_cardinality_estimation_evaluation_framework.evaluations import evaluator
from wfa_cardinality_estimation_evaluation_framework.simulations import simulator
ERROR_MARGIN = 0.05
PROPORTION_OF_RUNS = 0.95
ESTIMATOR_NAME = 'estimator'
SCENARIO_NAME = 'scenario'
NUM_ESTIMABLE_SETS = 'num_estimable_sets'
RAW_RESULT_DF = 'df'
# The file that summarize the maximum number of sets that can be estimated
# within 5% (or specified by the error_margin) relative error for at least 95%
# (or specified by the proportion_of_runs) runs. It has columns of estimator,
# scenario and num_estimable_sets.
NUM_ESTIMABLE_SETS_FILENAME = 'num_estimable_sets.csv'
BOXPLOT_FILENAME = 'boxplot.png'
BOXPLOT_SIZE_WIDTH_INCH = 'boxplot_size_width_inch'
BOXPLOT_SIZE_HEIGHT_INCH = 'boxplot_size_width_inch'
PLOT_PARAMS = {
BOXPLOT_SIZE_WIDTH_INCH: 12,
BOXPLOT_SIZE_HEIGHT_INCH: 6,
}
def get_num_estimable_sets(df, num_sets=simulator.NUM_SETS,
relative_error=simulator.RELATIVE_ERROR,
error_margin=ERROR_MARGIN,
proportion_of_runs=PROPORTION_OF_RUNS):
"""Get the number of estimable sets.
For example, set error_margin = 0.05 and proportion_of_runs = 0.95. Then
the number of estimable sets is defined as the number of sets whose union
cardinality can be estimated such that 95% of the runs are within a 5%
relative error.
Args:
df: a pd.DataFrame that have columns of num_sets and relative_error.
num_sets: a column name in df that specifies the number of sets.
relative_error: a column name in df that specifies the relative error.
error_margin: a positive number setting the upper bound of the error. By
default, set to 0.05.
proportion_of_runs: a number between 0 and 1 that specifies the proportion
of runs. By default, set to 0.95.
Returns:
The number of estimable sets.
"""
if not set([num_sets, relative_error]).issubset(df.columns):
raise ValueError(f'{num_sets} or {relative_error} not found in df.')
def count_estimable(e):
return np.mean(np.abs(e) < error_margin) >= proportion_of_runs
df_estimable = (
df.groupby(num_sets).agg({relative_error: count_estimable}))
df_estimable = df_estimable.rename(
columns={relative_error: 'is_estimable'})
num_of_estimable = 0
for n in df_estimable.index.values:
if df_estimable.loc[n, 'is_estimable']:
num_of_estimable = n
else:
break
return num_of_estimable
class CardinalityEstimatorEvaluationAnalyzer(object):
"""Analyze the cardinality estimator evaluation results."""
def __init__(self, out_dir, evaluation_directory, evaluation_run_name,
evaluation_name, error_margin=ERROR_MARGIN,
proportion_of_runs=PROPORTION_OF_RUNS,
plot_params=None):
"""Construct an analyzer.
Args:
out_dir: the output directory of analysis results.
evaluation_directory: the output directory of evaluation results. The
analyzer will read the evaluation results and output summary tables and
plots.
evaluation_run_name: the run name of the evaluation.
evaluation_name: the name of the evaluation config.
error_margin: a positive number setting the upper bound of the error. By
default, set to 0.05.
proportion_of_runs: a number between 0 and 1 that specifies the desired
proportion of runs within the error margin. By default, set to 0.95.
plot_params: a dictionary of the parameters of plot functions. If not
given, will use PLOT_PARAMS. Also see PLOT_PARAMS for how it is defined.
"""
self.error_margin = error_margin
self.proportion_of_runs = proportion_of_runs
self.plot_params = plot_params or PLOT_PARAMS
# Get all the raw results.
self.evaluation_file_dirs = evaluator.load_directory_tree(
out_dir=evaluation_directory,
run_name=evaluation_run_name,
evaluation_name=evaluation_name)
self.raw_df = (
CardinalityEstimatorEvaluationAnalyzer
.read_evaluation_results(self.evaluation_file_dirs))
# Create the analysis directory.
if out_dir is None:
out_dir = os.getcwd()
if out_dir != evaluation_directory:
shutil.copytree(
self.evaluation_file_dirs[evaluator.KEY_RUN_DIR],
os.path.join(out_dir, evaluation_run_name))
self.analysis_file_dirs = evaluator.load_directory_tree(
out_dir=out_dir,
run_name=evaluation_run_name,
evaluation_name=evaluation_name)
def __call__(self):
num_estimable_sets_df = self.get_num_estimable_sets_df()
df_filename = os.path.join(
self.analysis_file_dirs[evaluator.KEY_EVALUATION_DIR],
NUM_ESTIMABLE_SETS_FILENAME)
with open(df_filename, 'w') as f:
num_estimable_sets_df.to_csv(f, index=False)
self.save_plot_num_sets_vs_relative_error()
@classmethod
def read_evaluation_results(cls, file_dirs):
"""Read evaluation results.
Args:
file_dirs: a dictionary of file directories of the evaluation which is
generated by the create_directory method of evaluator.Evaluation.
Returns:
A pandas.DataFrame containing columns of the estimator name, the scenario
name, and the corresponding raw evaluation result data frame.
"""
df_list = []
for estimator_name in file_dirs[evaluator.KEY_ESTIMATOR_DIRS].keys():
for scenario_name in file_dirs[estimator_name].keys():
df_file = os.path.join(
file_dirs[estimator_name][scenario_name],
evaluator.RAW_RESULT_DF_FILENAME)
with open(df_file, 'r') as f:
df = pd.read_csv(f)
df_list.append((estimator_name, scenario_name, df))
return pd.DataFrame(
df_list, columns=[ESTIMATOR_NAME, SCENARIO_NAME, RAW_RESULT_DF])
def get_num_estimable_sets_df(self):
"""Summarize the number of estimable sets by estimators and scenarios."""
def f(x):
num_estimable_sets = get_num_estimable_sets(
x[RAW_RESULT_DF], num_sets=simulator.NUM_SETS,
relative_error=simulator.RELATIVE_ERROR,
error_margin=self.error_margin,
proportion_of_runs=self.proportion_of_runs)
return pd.Series({
ESTIMATOR_NAME: x[ESTIMATOR_NAME],
SCENARIO_NAME: x[SCENARIO_NAME],
NUM_ESTIMABLE_SETS: num_estimable_sets})
return self.raw_df.apply(f, axis=1)
def save_plot_num_sets_vs_relative_error(self):
"""Make and save plots for number of sets versus relative error."""
def f(x):
# Make a plot.
ax = plotting.boxplot_relative_error(
x[RAW_RESULT_DF],
num_sets=simulator.NUM_SETS,
relative_error=simulator.RELATIVE_ERROR)
ax.set_title(f'{x[SCENARIO_NAME]}\n{x[ESTIMATOR_NAME]}')
ax.set_ylim(-0.1, 0.1)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
# Save the plot to file.
fig = ax.get_figure()
plot_file = os.path.join(
self.analysis_file_dirs[x[ESTIMATOR_NAME]][x[SCENARIO_NAME]],
BOXPLOT_FILENAME)
fig.set_size_inches(
w=self.plot_params[BOXPLOT_SIZE_WIDTH_INCH],
h=self.plot_params[BOXPLOT_SIZE_HEIGHT_INCH])
fig.savefig(plot_file)
self.raw_df.apply(f, axis=1) | evaluations/analyzer.py |
"""Analyze the simulation results."""
import os
import shutil
import numpy as np
import pandas as pd
from wfa_cardinality_estimation_evaluation_framework.common import plotting
from wfa_cardinality_estimation_evaluation_framework.evaluations import evaluator
from wfa_cardinality_estimation_evaluation_framework.simulations import simulator
ERROR_MARGIN = 0.05
PROPORTION_OF_RUNS = 0.95
ESTIMATOR_NAME = 'estimator'
SCENARIO_NAME = 'scenario'
NUM_ESTIMABLE_SETS = 'num_estimable_sets'
RAW_RESULT_DF = 'df'
# The file that summarize the maximum number of sets that can be estimated
# within 5% (or specified by the error_margin) relative error for at least 95%
# (or specified by the proportion_of_runs) runs. It has columns of estimator,
# scenario and num_estimable_sets.
NUM_ESTIMABLE_SETS_FILENAME = 'num_estimable_sets.csv'
BOXPLOT_FILENAME = 'boxplot.png'
BOXPLOT_SIZE_WIDTH_INCH = 'boxplot_size_width_inch'
BOXPLOT_SIZE_HEIGHT_INCH = 'boxplot_size_width_inch'
PLOT_PARAMS = {
BOXPLOT_SIZE_WIDTH_INCH: 12,
BOXPLOT_SIZE_HEIGHT_INCH: 6,
}
def get_num_estimable_sets(df, num_sets=simulator.NUM_SETS,
relative_error=simulator.RELATIVE_ERROR,
error_margin=ERROR_MARGIN,
proportion_of_runs=PROPORTION_OF_RUNS):
"""Get the number of estimable sets.
For example, set error_margin = 0.05 and proportion_of_runs = 0.95. Then
the number of estimable sets is defined as the number of sets whose union
cardinality can be estimated such that 95% of the runs are within a 5%
relative error.
Args:
df: a pd.DataFrame that have columns of num_sets and relative_error.
num_sets: a column name in df that specifies the number of sets.
relative_error: a column name in df that specifies the relative error.
error_margin: a positive number setting the upper bound of the error. By
default, set to 0.05.
proportion_of_runs: a number between 0 and 1 that specifies the proportion
of runs. By default, set to 0.95.
Returns:
The number of estimable sets.
"""
if not set([num_sets, relative_error]).issubset(df.columns):
raise ValueError(f'{num_sets} or {relative_error} not found in df.')
def count_estimable(e):
return np.mean(np.abs(e) < error_margin) >= proportion_of_runs
df_estimable = (
df.groupby(num_sets).agg({relative_error: count_estimable}))
df_estimable = df_estimable.rename(
columns={relative_error: 'is_estimable'})
num_of_estimable = 0
for n in df_estimable.index.values:
if df_estimable.loc[n, 'is_estimable']:
num_of_estimable = n
else:
break
return num_of_estimable
class CardinalityEstimatorEvaluationAnalyzer(object):
"""Analyze the cardinality estimator evaluation results."""
def __init__(self, out_dir, evaluation_directory, evaluation_run_name,
evaluation_name, error_margin=ERROR_MARGIN,
proportion_of_runs=PROPORTION_OF_RUNS,
plot_params=None):
"""Construct an analyzer.
Args:
out_dir: the output directory of analysis results.
evaluation_directory: the output directory of evaluation results. The
analyzer will read the evaluation results and output summary tables and
plots.
evaluation_run_name: the run name of the evaluation.
evaluation_name: the name of the evaluation config.
error_margin: a positive number setting the upper bound of the error. By
default, set to 0.05.
proportion_of_runs: a number between 0 and 1 that specifies the desired
proportion of runs within the error margin. By default, set to 0.95.
plot_params: a dictionary of the parameters of plot functions. If not
given, will use PLOT_PARAMS. Also see PLOT_PARAMS for how it is defined.
"""
self.error_margin = error_margin
self.proportion_of_runs = proportion_of_runs
self.plot_params = plot_params or PLOT_PARAMS
# Get all the raw results.
self.evaluation_file_dirs = evaluator.load_directory_tree(
out_dir=evaluation_directory,
run_name=evaluation_run_name,
evaluation_name=evaluation_name)
self.raw_df = (
CardinalityEstimatorEvaluationAnalyzer
.read_evaluation_results(self.evaluation_file_dirs))
# Create the analysis directory.
if out_dir is None:
out_dir = os.getcwd()
if out_dir != evaluation_directory:
shutil.copytree(
self.evaluation_file_dirs[evaluator.KEY_RUN_DIR],
os.path.join(out_dir, evaluation_run_name))
self.analysis_file_dirs = evaluator.load_directory_tree(
out_dir=out_dir,
run_name=evaluation_run_name,
evaluation_name=evaluation_name)
def __call__(self):
num_estimable_sets_df = self.get_num_estimable_sets_df()
df_filename = os.path.join(
self.analysis_file_dirs[evaluator.KEY_EVALUATION_DIR],
NUM_ESTIMABLE_SETS_FILENAME)
with open(df_filename, 'w') as f:
num_estimable_sets_df.to_csv(f, index=False)
self.save_plot_num_sets_vs_relative_error()
@classmethod
def read_evaluation_results(cls, file_dirs):
"""Read evaluation results.
Args:
file_dirs: a dictionary of file directories of the evaluation which is
generated by the create_directory method of evaluator.Evaluation.
Returns:
A pandas.DataFrame containing columns of the estimator name, the scenario
name, and the corresponding raw evaluation result data frame.
"""
df_list = []
for estimator_name in file_dirs[evaluator.KEY_ESTIMATOR_DIRS].keys():
for scenario_name in file_dirs[estimator_name].keys():
df_file = os.path.join(
file_dirs[estimator_name][scenario_name],
evaluator.RAW_RESULT_DF_FILENAME)
with open(df_file, 'r') as f:
df = pd.read_csv(f)
df_list.append((estimator_name, scenario_name, df))
return pd.DataFrame(
df_list, columns=[ESTIMATOR_NAME, SCENARIO_NAME, RAW_RESULT_DF])
def get_num_estimable_sets_df(self):
"""Summarize the number of estimable sets by estimators and scenarios."""
def f(x):
num_estimable_sets = get_num_estimable_sets(
x[RAW_RESULT_DF], num_sets=simulator.NUM_SETS,
relative_error=simulator.RELATIVE_ERROR,
error_margin=self.error_margin,
proportion_of_runs=self.proportion_of_runs)
return pd.Series({
ESTIMATOR_NAME: x[ESTIMATOR_NAME],
SCENARIO_NAME: x[SCENARIO_NAME],
NUM_ESTIMABLE_SETS: num_estimable_sets})
return self.raw_df.apply(f, axis=1)
def save_plot_num_sets_vs_relative_error(self):
"""Make and save plots for number of sets versus relative error."""
def f(x):
# Make a plot.
ax = plotting.boxplot_relative_error(
x[RAW_RESULT_DF],
num_sets=simulator.NUM_SETS,
relative_error=simulator.RELATIVE_ERROR)
ax.set_title(f'{x[SCENARIO_NAME]}\n{x[ESTIMATOR_NAME]}')
ax.set_ylim(-0.1, 0.1)
for tick in ax.get_xticklabels():
tick.set_rotation(90)
# Save the plot to file.
fig = ax.get_figure()
plot_file = os.path.join(
self.analysis_file_dirs[x[ESTIMATOR_NAME]][x[SCENARIO_NAME]],
BOXPLOT_FILENAME)
fig.set_size_inches(
w=self.plot_params[BOXPLOT_SIZE_WIDTH_INCH],
h=self.plot_params[BOXPLOT_SIZE_HEIGHT_INCH])
fig.savefig(plot_file)
self.raw_df.apply(f, axis=1) | 0.908958 | 0.430626 |
import amqpstorm
from amqpstorm import Message
class FibonacciRpcClient(object):
def __init__(self, host, username, password):
"""
:param host: RabbitMQ Server e.g. localhost
:param username: RabbitMQ Username e.g. guest
:param password: <PASSWORD>MQ Password e.g. <PASSWORD>
:return:
"""
self.host = host
self.username = username
self.password = password
self.channel = None
self.response = None
self.connection = None
self.callback_queue = None
self.correlation_id = None
self.open()
def open(self):
self.connection = amqpstorm.Connection(self.host,
self.username,
self.password)
self.channel = self.connection.channel()
result = self.channel.queue.declare(exclusive=True)
self.callback_queue = result['queue']
self.channel.basic.consume(self._on_response, no_ack=True,
queue=self.callback_queue)
def close(self):
self.channel.stop_consuming()
self.channel.close()
self.connection.close()
def call(self, number):
self.response = None
message = Message.create(self.channel, body=str(number))
message.reply_to = self.callback_queue
self.correlation_id = message.correlation_id
message.publish(routing_key='rpc_queue')
while not self.response:
self.channel.process_data_events()
return int(self.response)
def _on_response(self, message):
if self.correlation_id != message.correlation_id:
return
self.response = message.body
if __name__ == '__main__':
FIBONACCI_RPC = FibonacciRpcClient('localhost', 'guest', 'guest')
print(" [x] Requesting fib(30)")
RESPONSE = FIBONACCI_RPC.call(30)
print(" [.] Got %r" % (RESPONSE,))
FIBONACCI_RPC.close() | examples/simple_rpc_client.py | import amqpstorm
from amqpstorm import Message
class FibonacciRpcClient(object):
def __init__(self, host, username, password):
"""
:param host: RabbitMQ Server e.g. localhost
:param username: RabbitMQ Username e.g. guest
:param password: <PASSWORD>MQ Password e.g. <PASSWORD>
:return:
"""
self.host = host
self.username = username
self.password = password
self.channel = None
self.response = None
self.connection = None
self.callback_queue = None
self.correlation_id = None
self.open()
def open(self):
self.connection = amqpstorm.Connection(self.host,
self.username,
self.password)
self.channel = self.connection.channel()
result = self.channel.queue.declare(exclusive=True)
self.callback_queue = result['queue']
self.channel.basic.consume(self._on_response, no_ack=True,
queue=self.callback_queue)
def close(self):
self.channel.stop_consuming()
self.channel.close()
self.connection.close()
def call(self, number):
self.response = None
message = Message.create(self.channel, body=str(number))
message.reply_to = self.callback_queue
self.correlation_id = message.correlation_id
message.publish(routing_key='rpc_queue')
while not self.response:
self.channel.process_data_events()
return int(self.response)
def _on_response(self, message):
if self.correlation_id != message.correlation_id:
return
self.response = message.body
if __name__ == '__main__':
FIBONACCI_RPC = FibonacciRpcClient('localhost', 'guest', 'guest')
print(" [x] Requesting fib(30)")
RESPONSE = FIBONACCI_RPC.call(30)
print(" [.] Got %r" % (RESPONSE,))
FIBONACCI_RPC.close() | 0.465145 | 0.061312 |
from rest_framework.test import APITestCase, APIClient
from api.models import Sample, Disease, Mutation, Gene
class SampleTests(APITestCase):
sample_keys = ['sample_id',
'disease',
'mutations',
'gender',
'age_diagnosed']
def setUp(self):
self.gene1 = Gene.objects.create(entrez_gene_id=123456,
symbol='GENE123',
description='foo',
chromosome='1',
gene_type='bar',
synonyms=['foo', 'bar'],
aliases=['foo', 'bar'])
self.disease1 = Disease.objects.create(acronym='BLCA',
name='bladder urothelial carcinoma')
self.sample1 = Sample.objects.create(sample_id='TCGA-22-4593-01',
disease=self.disease1,
gender='female',
age_diagnosed=37)
self.sample2 = Sample.objects.create(sample_id='TCGA-2G-AALW-01',
disease=self.disease1,
gender='male',
age_diagnosed=43)
self.mutation1 = Mutation.objects.create(gene=self.gene1,
sample=self.sample1)
self.mutation2 = Mutation.objects.create(gene=self.gene1,
sample=self.sample2)
def test_list_samples(self):
client = APIClient()
list_response = client.get('/samples')
self.assertEqual(list_response.status_code, 200)
self.assertEqual(list(list_response.data.keys()), ['count',
'next',
'previous',
'results'])
self.assertEqual(len(list_response.data['results']), 2)
self.assertEqual(list(list_response.data['results'][0].keys()), self.sample_keys)
self.assertEqual(list(list_response.data['results'][1].keys()), self.sample_keys)
def test_get_sample(self):
client = APIClient()
get_response = client.get('/samples/' + str(self.sample1.sample_id))
self.assertEqual(get_response.status_code, 200)
self.assertEqual(list(get_response.data.keys()), self.sample_keys) | api/test/test_sample.py | from rest_framework.test import APITestCase, APIClient
from api.models import Sample, Disease, Mutation, Gene
class SampleTests(APITestCase):
sample_keys = ['sample_id',
'disease',
'mutations',
'gender',
'age_diagnosed']
def setUp(self):
self.gene1 = Gene.objects.create(entrez_gene_id=123456,
symbol='GENE123',
description='foo',
chromosome='1',
gene_type='bar',
synonyms=['foo', 'bar'],
aliases=['foo', 'bar'])
self.disease1 = Disease.objects.create(acronym='BLCA',
name='bladder urothelial carcinoma')
self.sample1 = Sample.objects.create(sample_id='TCGA-22-4593-01',
disease=self.disease1,
gender='female',
age_diagnosed=37)
self.sample2 = Sample.objects.create(sample_id='TCGA-2G-AALW-01',
disease=self.disease1,
gender='male',
age_diagnosed=43)
self.mutation1 = Mutation.objects.create(gene=self.gene1,
sample=self.sample1)
self.mutation2 = Mutation.objects.create(gene=self.gene1,
sample=self.sample2)
def test_list_samples(self):
client = APIClient()
list_response = client.get('/samples')
self.assertEqual(list_response.status_code, 200)
self.assertEqual(list(list_response.data.keys()), ['count',
'next',
'previous',
'results'])
self.assertEqual(len(list_response.data['results']), 2)
self.assertEqual(list(list_response.data['results'][0].keys()), self.sample_keys)
self.assertEqual(list(list_response.data['results'][1].keys()), self.sample_keys)
def test_get_sample(self):
client = APIClient()
get_response = client.get('/samples/' + str(self.sample1.sample_id))
self.assertEqual(get_response.status_code, 200)
self.assertEqual(list(get_response.data.keys()), self.sample_keys) | 0.58261 | 0.281511 |
import random
import numpy as np
import torch
import torch.nn.functional as F
from base_batch_gen import Base_batch_generator
from helper_functions import encode_content
class CNNFisherBatchGen(Base_batch_generator):
def __init__(self, num_rows, num_classes, action_to_id):
super(CNNFisherBatchGen, self).__init__()
self.num_rows = num_rows
self.num_classes = num_classes
self.action_to_id = action_to_id
def read_data(self, list_of_videos, list_of_fisher_vectors=None):
for video, fisher_file in zip(list_of_videos, list_of_fisher_vectors):
fisher_vectors = np.loadtxt(fisher_file, dtype=np.float32, ndmin=2)
fisher_vectors = fisher_vectors[:, 1:] # remove frame index
with open(video, mode='r') as f:
actions_per_frame = [line.rstrip() for line in f]
num_frames = len(actions_per_frame)
observed_fractions = [0.1, 0.2, 0.3, 0.5]
for observed_fraction in observed_fractions:
num_observed_frames = int(observed_fraction * num_frames)
num_observed_plus_unobserved_frames = int((0.5 + observed_fraction) * num_frames)
observed_fisher_vector = fisher_vectors[:num_observed_frames]
input_video = self.up_or_down_sample_fisher_vector(observed_fisher_vector)
target = actions_per_frame[num_observed_frames:num_observed_plus_unobserved_frames]
target = encode_content(target, self.num_rows, self.num_classes, self.action_to_id)
target = np.reshape(target, [self.num_rows, self.num_classes, 1])
example = [input_video, target]
self.list_of_examples.append(example)
random.shuffle(self.list_of_examples)
def next_batch(self, batch_size):
batch = self.list_of_examples[self.index:self.index + batch_size]
self.index += batch_size
batch_input = []
batch_target = []
for batch_example in batch:
batch_input.append(batch_example[0])
batch_target.append(batch_example[1])
return batch_input, batch_target
@staticmethod
def up_or_down_sample_fisher_vector(observed_fisher_vector):
observed_fisher_vector = torch.from_numpy(observed_fisher_vector).permute([1, 0])
observed_fisher_vector = torch.unsqueeze(observed_fisher_vector, dim=0)
observed_fisher_vector = F.interpolate(observed_fisher_vector, size=128, mode='linear')
observed_fisher_vector = observed_fisher_vector.squeeze().permute([1, 0])
observed_fisher_vector = torch.unsqueeze(observed_fisher_vector, dim=-1).numpy()
return observed_fisher_vector | utils/cnn_fisher_batch_gen.py | import random
import numpy as np
import torch
import torch.nn.functional as F
from base_batch_gen import Base_batch_generator
from helper_functions import encode_content
class CNNFisherBatchGen(Base_batch_generator):
def __init__(self, num_rows, num_classes, action_to_id):
super(CNNFisherBatchGen, self).__init__()
self.num_rows = num_rows
self.num_classes = num_classes
self.action_to_id = action_to_id
def read_data(self, list_of_videos, list_of_fisher_vectors=None):
for video, fisher_file in zip(list_of_videos, list_of_fisher_vectors):
fisher_vectors = np.loadtxt(fisher_file, dtype=np.float32, ndmin=2)
fisher_vectors = fisher_vectors[:, 1:] # remove frame index
with open(video, mode='r') as f:
actions_per_frame = [line.rstrip() for line in f]
num_frames = len(actions_per_frame)
observed_fractions = [0.1, 0.2, 0.3, 0.5]
for observed_fraction in observed_fractions:
num_observed_frames = int(observed_fraction * num_frames)
num_observed_plus_unobserved_frames = int((0.5 + observed_fraction) * num_frames)
observed_fisher_vector = fisher_vectors[:num_observed_frames]
input_video = self.up_or_down_sample_fisher_vector(observed_fisher_vector)
target = actions_per_frame[num_observed_frames:num_observed_plus_unobserved_frames]
target = encode_content(target, self.num_rows, self.num_classes, self.action_to_id)
target = np.reshape(target, [self.num_rows, self.num_classes, 1])
example = [input_video, target]
self.list_of_examples.append(example)
random.shuffle(self.list_of_examples)
def next_batch(self, batch_size):
batch = self.list_of_examples[self.index:self.index + batch_size]
self.index += batch_size
batch_input = []
batch_target = []
for batch_example in batch:
batch_input.append(batch_example[0])
batch_target.append(batch_example[1])
return batch_input, batch_target
@staticmethod
def up_or_down_sample_fisher_vector(observed_fisher_vector):
observed_fisher_vector = torch.from_numpy(observed_fisher_vector).permute([1, 0])
observed_fisher_vector = torch.unsqueeze(observed_fisher_vector, dim=0)
observed_fisher_vector = F.interpolate(observed_fisher_vector, size=128, mode='linear')
observed_fisher_vector = observed_fisher_vector.squeeze().permute([1, 0])
observed_fisher_vector = torch.unsqueeze(observed_fisher_vector, dim=-1).numpy()
return observed_fisher_vector | 0.802556 | 0.262257 |
from urlextract import URLExtract
from Nemesis.lib.Color import Color
# General
# Matches url but not direct path
#url_regex = "((http|https)://)[a-zA-Z0-9\./?:@-_=]+\.([a-zA-Z]){2,6}([a-zA-Z0-9\.\&\/\?\:@\-_=#])*"
#url_regex_without_netloc = "((http|https)://)?[a-zA-Z0-9\./?:@-_=]+\.([a-zA-Z]){2,6}([a-zA-Z0-9\.\&\/\?\:@\-_=#])*"
single_path_regex = """('|"|\(|\))(\/){1}[a-zA-Z0-9-_]+(/)?('|"|\(|\))""" #(hi/) or "hi/"
path_regex = "([a-zA-Z0-9]+\.[a-zA-Z0-9]{3,6})?\/(([0-9a-zA-Z+.-]+)([\/&| ])){1,30}([a-zA-Z0-9]+(\.[a-zA-Z0-9]*)?)?(\?|;)?([a-zA-Z\[\]&=]*)?"
experimental_path_regex = "/?\w+(?:/\w+)*\?\w+=\w+(?:&\w+=\w+)*|(?:\w+(?:/\w+){2,}/?|\w+(?:/\w+)+/|/\w+/)(?:&\w+=\w+)*"
experimental_path_regex = "/?\w+(?:/\w+)*\?\w+=\w+(?:&\w+=\w+)*|(?:\w*(?:/\w+){2,}/?|\w+(?:/\w+)+/|/\w+/)(?:&\w+=\w+)*"
subdomain_regex = lambda subdomain: '(.*\.)?{}(\.)?'.format(subdomain)
dom_sources_regex = [
'document.url',
'document.documenturi',
'Document.URLUnencoded',
'Document.baseURI',
'Location.href',
'Location.search',
'Location.hash',
'Location.pathname',
'Document.cookie',
'Document.referrer',
'Window.name',
'History.pushState',
'History.replaceState',
'LocalStorage',
'SessionStorage',
'window.location',
'document.location'
]
dom_sinks_regex = [
'eval',
'setTimeout',
'setInterval',
'setImmediate',
'execScript',
'cyrpto.generateCRMFRequest',
'ScriptElement',
'(\.src)( )?=()?',
'(\.text)( )?=()?',
'(\.textContent)( )?=()?',
'(\.innerText)( )?=()?',
'(\.innerElement)( )?=()?',
'(\.innerHTML)( )?=()?',
'document.write',
'document.writeln',
]
custom_regex_not = [
''
]
custom_regex_insensitive = [
'secret',
'admin',
]
custom_regex_sensitive = [
'sourceMappingURL',
]
web_services_regex = [
'([0-9a-zA-Z-.]*s3[a-zA-Z0-9-.]*\.?amazonaws\.com\/?[a-zA-Z-.]*)',
'([0-9a-zA-Z-.]*?storage\.googleapis\.com\/?[a-zA-Z-.]*)',
'([0-9a-zA-Z-.]*?digitaloceanspaces\.com\/?[a-zA-Z-.]*)',
'([0-9a-zA-Z-.]*?blob\.core\.windows\.net\/?[a-zA-Z-.]*)',
]
hexchar = "1234567890abcdefABCDEF"
base64char = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="
Color = Color()
extractor = URLExtract() | src/Nemesis/lib/Globals.py | from urlextract import URLExtract
from Nemesis.lib.Color import Color
# General
# Matches url but not direct path
#url_regex = "((http|https)://)[a-zA-Z0-9\./?:@-_=]+\.([a-zA-Z]){2,6}([a-zA-Z0-9\.\&\/\?\:@\-_=#])*"
#url_regex_without_netloc = "((http|https)://)?[a-zA-Z0-9\./?:@-_=]+\.([a-zA-Z]){2,6}([a-zA-Z0-9\.\&\/\?\:@\-_=#])*"
single_path_regex = """('|"|\(|\))(\/){1}[a-zA-Z0-9-_]+(/)?('|"|\(|\))""" #(hi/) or "hi/"
path_regex = "([a-zA-Z0-9]+\.[a-zA-Z0-9]{3,6})?\/(([0-9a-zA-Z+.-]+)([\/&| ])){1,30}([a-zA-Z0-9]+(\.[a-zA-Z0-9]*)?)?(\?|;)?([a-zA-Z\[\]&=]*)?"
experimental_path_regex = "/?\w+(?:/\w+)*\?\w+=\w+(?:&\w+=\w+)*|(?:\w+(?:/\w+){2,}/?|\w+(?:/\w+)+/|/\w+/)(?:&\w+=\w+)*"
experimental_path_regex = "/?\w+(?:/\w+)*\?\w+=\w+(?:&\w+=\w+)*|(?:\w*(?:/\w+){2,}/?|\w+(?:/\w+)+/|/\w+/)(?:&\w+=\w+)*"
subdomain_regex = lambda subdomain: '(.*\.)?{}(\.)?'.format(subdomain)
dom_sources_regex = [
'document.url',
'document.documenturi',
'Document.URLUnencoded',
'Document.baseURI',
'Location.href',
'Location.search',
'Location.hash',
'Location.pathname',
'Document.cookie',
'Document.referrer',
'Window.name',
'History.pushState',
'History.replaceState',
'LocalStorage',
'SessionStorage',
'window.location',
'document.location'
]
dom_sinks_regex = [
'eval',
'setTimeout',
'setInterval',
'setImmediate',
'execScript',
'cyrpto.generateCRMFRequest',
'ScriptElement',
'(\.src)( )?=()?',
'(\.text)( )?=()?',
'(\.textContent)( )?=()?',
'(\.innerText)( )?=()?',
'(\.innerElement)( )?=()?',
'(\.innerHTML)( )?=()?',
'document.write',
'document.writeln',
]
custom_regex_not = [
''
]
custom_regex_insensitive = [
'secret',
'admin',
]
custom_regex_sensitive = [
'sourceMappingURL',
]
web_services_regex = [
'([0-9a-zA-Z-.]*s3[a-zA-Z0-9-.]*\.?amazonaws\.com\/?[a-zA-Z-.]*)',
'([0-9a-zA-Z-.]*?storage\.googleapis\.com\/?[a-zA-Z-.]*)',
'([0-9a-zA-Z-.]*?digitaloceanspaces\.com\/?[a-zA-Z-.]*)',
'([0-9a-zA-Z-.]*?blob\.core\.windows\.net\/?[a-zA-Z-.]*)',
]
hexchar = "1234567890abcdefABCDEF"
base64char = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/="
Color = Color()
extractor = URLExtract() | 0.259638 | 0.111434 |
import bcrypt
import pymysql.cursors
import json
import os
import sys
from flask import Flask, request, session, redirect, render_template
from flask_api import status
from werkzeug.serving import run_simple
from datetime import timezone, datetime
def utc_to_local(utc_dt):
return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=None)
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# open up the config file
with open("config.json") as config:
config = json.loads(config.read())
app = Flask(__name__, template_folder='templates')
app.debug = config["debug"]
app.secret_key = config["session-secret"]
# check if the user is logged in
def isLoggedin():
try:
if session["loggedin"] == True:
return True
else:
return False
# this happens if session["loggedin"] is undefined
except:
return False
# create the vars that we use for the sessions
def createSession(userID, chapterID):
session["loggedin"] = True
session["userID"] = userID
session["chapterID"] = chapterID
# wrapper to create DB connections
def createDBConnection():
return pymysql.connect(host=config["host"],
user=config["user"],
password=config["password"],
db=config["dbname"],
charset=config["charset"],
cursorclass=pymysql.cursors.DictCursor)
# wraper to hash passwords
def hashPassword(passwrd):
return bcrypt.hashpw(passwrd.encode(), bcrypt.gensalt())
# wraper to check hashed passwords, returns a bool
def checkPassword(passwrd, hashedPass):
return hashedPass.encode() == bcrypt.hashpw(passwrd.encode(),
hashedPass.encode())
@app.route("/", methods=["GET"])
def index():
if isLoggedin():
connection = createDBConnection()
cursor = connection.cursor()
sql = "SELECT users.email, chapters.name, chapters.short_name FROM users, chapters WHERE " + \
"users.id=%s AND chapters.id = users.chapterID"
cursor.execute(sql, (session["userID"]))
results = cursor.fetchone()
sql = "SELECT id, name, chapterID FROM events WHERE chapterID = %s " + \
"ORDER BY time_stamp DESC"
cursor.execute(sql, (session["chapterID"]))
event_list = cursor.fetchall()
try:
eventID = request.args.get("event_id")
if eventID == None:
eventID = event_list[0]["id"]
except:
eventID = 0
for item in event_list:
if item["id"] == int(eventID):
item["selected"] = True
else:
item["selected"] = False
sql = "SELECT name, studentID, time_stamp FROM dataList WHERE " + \
"chapterID = %s AND eventID = %s ORDER BY time_stamp DESC"
cursor.execute(sql, (session["chapterID"], eventID))
dataList = cursor.fetchall()
for item in dataList:
item["time_stamp"] = item[
"time_stamp"].strftime("%I:%M %p %m/%d/%Y ")
sql = "SELECT chapters.id, chapters.short_name FROM chapters INNER JOIN " + \
"blacklist ON blacklist.chapterID = chapters.id WHERE " + \
"studentID=%s AND blacklisted=1"
cursor.execute(sql, (item["studentID"]))
blacklist_names = cursor.fetchall()
blacklist_names_string = ""
item["self_blacklisted"] = False
item["blacklisted"] = False
for bl_item in blacklist_names:
item["blacklisted"] = True
if bl_item["id"] == session["chapterID"]:
item["self_blacklisted"] = True
blacklist_names_string = bl_item[
"short_name"] + blacklist_names_string
else:
blacklist_names_string += "/" + bl_item["short_name"]
try:
if blacklist_names_string[0] == "/":
blacklist_names_string = blacklist_names_string[1:]
except:
pass
item["blacklist"] = blacklist_names_string
cursor.close()
connection.close()
return render_template('index.html', chapter=results["name"],
email=results["email"], dataList=dataList,
event_list=event_list, eventID=eventID,
chapter_short=results["short_name"])
else:
return render_template('login.html')
@app.route("/add_event", methods=["POST"])
def createEvent():
connection = createDBConnection()
name = request.form["event_name"]
chapterID = session["chapterID"]
cursor = connection.cursor()
sql = "INSERT INTO events(name, chapterID) VALUES(%s, %s)"
cursor.execute(sql, (name, chapterID))
cursor.execute("SELECT LAST_INSERT_ID()")
new_id = cursor.fetchone()["LAST_INSERT_ID()"]
cursor.close()
connection.commit()
connection.close()
returnDic = {"name": name, "url": new_id}
return json.dumps(returnDic), status.HTTP_202_ACCEPTED
@app.route("/get_event", methods=["GET"])
def getEvent():
event_id = request.args.get("event_id")
connection = createDBConnection()
cursor = connection.cursor()
sql = "SELECT name, studentID, time_stamp FROM dataList WHERE" + \
" eventID=%s AND chapterID=%s"
cursor.execute(sql, (event_id, session["chapterID"]))
dataList = cursor.fetchall()
for item in dataList:
item["time_stamp"] = item[
"time_stamp"].strftime("%I:%M %p %m/%d/%Y ")
sql = "SELECT chapters.id, chapters.short_name FROM chapters INNER JOIN " + \
"blacklist ON blacklist.chapterID = chapters.id WHERE " + \
"studentID=%s AND blacklisted=1"
cursor.execute(sql, (item["studentID"]))
blacklist_names = cursor.fetchall()
blacklist_names_string = ""
item["self_blacklisted"] = False
item["blacklisted"] = False
for bl_item in blacklist_names:
item["blacklisted"] = True
if bl_item["id"] == session["chapterID"]:
item["self_blacklisted"] = True
blacklist_names_string = bl_item[
"short_name"] + blacklist_names_string
else:
blacklist_names_string += "/" + bl_item["short_name"]
try:
if blacklist_names_string[0] == "/":
blacklist_names_string = blacklist_names_string[1:]
except:
pass
item["blacklist"] = blacklist_names_string
cursor.close()
connection.close()
return json.dumps(dataList), status.HTTP_202_ACCEPTED
@app.route("/login", methods=["POST"])
def login():
email = request.form["email"]
password = request.form["password"]
connection = createDBConnection()
cursor = connection.cursor()
sql = "SELECT id, password, chapterID FROM users WHERE email=%s"
cursor.execute(sql, (email))
results = cursor.fetchone()
cursor.close()
connection.close()
validCredentials = False
try:
if checkPassword(password, results["password"]):
validCredentials = True
createSession(results["id"], results["chapterID"])
except:
pass
if validCredentials:
return "", status.HTTP_202_ACCEPTED
else:
return "", status.HTTP_401_UNAUTHORIZED
@app.route("/logout", methods=["GET"])
def removeSession():
session["loggedin"] = False
session.clear()
return redirect("/", code=303)
@app.route("/blacklist", methods=["POST"])
def blacklist():
studentID = request.form["studentID"]
adminPassword = request.form["password"]
shouldBlacklist = not bool(request.form["shouldBlacklist"])
connection = createDBConnection()
cursor = connection.cursor()
sql = "SELECT password FROM users WHERE id=%s"
cursor.execute(sql, (session["userID"]))
dbPassword = cursor.fetchone()["password"]
if not checkPassword(adminPassword, dbPassword):
return "", status.HTTP_401_UNAUTHORIZED
if shouldBlacklist == True:
sql = "INSERT INTO blacklist(studentID, chapterID) VALUES(%s, %s) " + \
" ON DUPLICATE KEY UPDATE blacklisted = %s"
cursor.execute(sql, (studentID, session["chapterID"], shouldBlacklist))
else:
sql = "UPDATE blacklist SET blacklisted = 0 WHERE studentID = %s AND chapterID = %s"
cursor.execute(sql, (studentID, session["chapterID"]))
cursor.close()
connection.commit()
connection.close()
return "", status.HTTP_202_ACCEPTED
@app.route("/card-reader", methods=["POST"])
def cardReader():
if isLoggedin() == False:
return "", status.HTTP_401_UNAUTHORIZED
try:
studentID = request.form["studentID"]
name = request.form["name"]
raw = request.form["raw"]
eventID = request.form["eventID"]
except:
return "", status.HTTP_400_BAD_REQUEST
userID = session["userID"]
chapterID = session["chapterID"]
connection = createDBConnection()
cursor = connection.cursor()
sql = "INSERT INTO dataList(name, studentID, card_text, userID, " + \
"chapterID, eventID) VALUES(%s, %s, %s, %s, %s, %s)"
cursor.execute(sql, (name, studentID, raw, userID, chapterID, eventID))
sql = "SELECT chapters.id, chapters.short_name FROM chapters INNER JOIN " + \
"blacklist ON blacklist.chapterID = chapters.id WHERE " + \
"studentID=%s AND blacklisted=1"
cursor.execute(sql, (studentID))
blacklist_names = cursor.fetchall()
cursor.close()
connection.commit()
connection.close()
blacklist_names_string = ""
self_blacklisted = False
blacklisted = False
for bl_item in blacklist_names:
blacklisted = True
if bl_item["id"] == session["chapterID"]:
self_blacklisted = True
blacklist_names_string = bl_item[
"short_name"] + blacklist_names_string
else:
blacklist_names_string += "/" + bl_item["short_name"]
try:
if blacklist_names_string[0] == "/":
blacklist_names_string = blacklist_names_string[1:]
except:
pass
returnDic = {"name": name, "time": datetime.now(
).strftime("%I:%M %p %m/%d/%Y "), "blackList": blacklist_names_string,
"self_blacklisted": self_blacklisted, "blacklisted": blacklisted}
return json.dumps(returnDic), status.HTTP_202_ACCEPTED
# reload the templates without restarting
extra_dirs = ['templates']
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = os.path.join(dirname, filename)
if os.path.isfile(filename):
extra_files.append(filename)
if __name__ == "__main__":
run_simple(config["website"], config["port"], app,
use_reloader=True, use_debugger=True, use_evalex=True,
extra_files=extra_files) | main.py | import bcrypt
import pymysql.cursors
import json
import os
import sys
from flask import Flask, request, session, redirect, render_template
from flask_api import status
from werkzeug.serving import run_simple
from datetime import timezone, datetime
def utc_to_local(utc_dt):
return utc_dt.replace(tzinfo=timezone.utc).astimezone(tz=None)
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
# open up the config file
with open("config.json") as config:
config = json.loads(config.read())
app = Flask(__name__, template_folder='templates')
app.debug = config["debug"]
app.secret_key = config["session-secret"]
# check if the user is logged in
def isLoggedin():
try:
if session["loggedin"] == True:
return True
else:
return False
# this happens if session["loggedin"] is undefined
except:
return False
# create the vars that we use for the sessions
def createSession(userID, chapterID):
session["loggedin"] = True
session["userID"] = userID
session["chapterID"] = chapterID
# wrapper to create DB connections
def createDBConnection():
return pymysql.connect(host=config["host"],
user=config["user"],
password=config["password"],
db=config["dbname"],
charset=config["charset"],
cursorclass=pymysql.cursors.DictCursor)
# wraper to hash passwords
def hashPassword(passwrd):
return bcrypt.hashpw(passwrd.encode(), bcrypt.gensalt())
# wraper to check hashed passwords, returns a bool
def checkPassword(passwrd, hashedPass):
return hashedPass.encode() == bcrypt.hashpw(passwrd.encode(),
hashedPass.encode())
@app.route("/", methods=["GET"])
def index():
if isLoggedin():
connection = createDBConnection()
cursor = connection.cursor()
sql = "SELECT users.email, chapters.name, chapters.short_name FROM users, chapters WHERE " + \
"users.id=%s AND chapters.id = users.chapterID"
cursor.execute(sql, (session["userID"]))
results = cursor.fetchone()
sql = "SELECT id, name, chapterID FROM events WHERE chapterID = %s " + \
"ORDER BY time_stamp DESC"
cursor.execute(sql, (session["chapterID"]))
event_list = cursor.fetchall()
try:
eventID = request.args.get("event_id")
if eventID == None:
eventID = event_list[0]["id"]
except:
eventID = 0
for item in event_list:
if item["id"] == int(eventID):
item["selected"] = True
else:
item["selected"] = False
sql = "SELECT name, studentID, time_stamp FROM dataList WHERE " + \
"chapterID = %s AND eventID = %s ORDER BY time_stamp DESC"
cursor.execute(sql, (session["chapterID"], eventID))
dataList = cursor.fetchall()
for item in dataList:
item["time_stamp"] = item[
"time_stamp"].strftime("%I:%M %p %m/%d/%Y ")
sql = "SELECT chapters.id, chapters.short_name FROM chapters INNER JOIN " + \
"blacklist ON blacklist.chapterID = chapters.id WHERE " + \
"studentID=%s AND blacklisted=1"
cursor.execute(sql, (item["studentID"]))
blacklist_names = cursor.fetchall()
blacklist_names_string = ""
item["self_blacklisted"] = False
item["blacklisted"] = False
for bl_item in blacklist_names:
item["blacklisted"] = True
if bl_item["id"] == session["chapterID"]:
item["self_blacklisted"] = True
blacklist_names_string = bl_item[
"short_name"] + blacklist_names_string
else:
blacklist_names_string += "/" + bl_item["short_name"]
try:
if blacklist_names_string[0] == "/":
blacklist_names_string = blacklist_names_string[1:]
except:
pass
item["blacklist"] = blacklist_names_string
cursor.close()
connection.close()
return render_template('index.html', chapter=results["name"],
email=results["email"], dataList=dataList,
event_list=event_list, eventID=eventID,
chapter_short=results["short_name"])
else:
return render_template('login.html')
@app.route("/add_event", methods=["POST"])
def createEvent():
connection = createDBConnection()
name = request.form["event_name"]
chapterID = session["chapterID"]
cursor = connection.cursor()
sql = "INSERT INTO events(name, chapterID) VALUES(%s, %s)"
cursor.execute(sql, (name, chapterID))
cursor.execute("SELECT LAST_INSERT_ID()")
new_id = cursor.fetchone()["LAST_INSERT_ID()"]
cursor.close()
connection.commit()
connection.close()
returnDic = {"name": name, "url": new_id}
return json.dumps(returnDic), status.HTTP_202_ACCEPTED
@app.route("/get_event", methods=["GET"])
def getEvent():
event_id = request.args.get("event_id")
connection = createDBConnection()
cursor = connection.cursor()
sql = "SELECT name, studentID, time_stamp FROM dataList WHERE" + \
" eventID=%s AND chapterID=%s"
cursor.execute(sql, (event_id, session["chapterID"]))
dataList = cursor.fetchall()
for item in dataList:
item["time_stamp"] = item[
"time_stamp"].strftime("%I:%M %p %m/%d/%Y ")
sql = "SELECT chapters.id, chapters.short_name FROM chapters INNER JOIN " + \
"blacklist ON blacklist.chapterID = chapters.id WHERE " + \
"studentID=%s AND blacklisted=1"
cursor.execute(sql, (item["studentID"]))
blacklist_names = cursor.fetchall()
blacklist_names_string = ""
item["self_blacklisted"] = False
item["blacklisted"] = False
for bl_item in blacklist_names:
item["blacklisted"] = True
if bl_item["id"] == session["chapterID"]:
item["self_blacklisted"] = True
blacklist_names_string = bl_item[
"short_name"] + blacklist_names_string
else:
blacklist_names_string += "/" + bl_item["short_name"]
try:
if blacklist_names_string[0] == "/":
blacklist_names_string = blacklist_names_string[1:]
except:
pass
item["blacklist"] = blacklist_names_string
cursor.close()
connection.close()
return json.dumps(dataList), status.HTTP_202_ACCEPTED
@app.route("/login", methods=["POST"])
def login():
email = request.form["email"]
password = request.form["password"]
connection = createDBConnection()
cursor = connection.cursor()
sql = "SELECT id, password, chapterID FROM users WHERE email=%s"
cursor.execute(sql, (email))
results = cursor.fetchone()
cursor.close()
connection.close()
validCredentials = False
try:
if checkPassword(password, results["password"]):
validCredentials = True
createSession(results["id"], results["chapterID"])
except:
pass
if validCredentials:
return "", status.HTTP_202_ACCEPTED
else:
return "", status.HTTP_401_UNAUTHORIZED
@app.route("/logout", methods=["GET"])
def removeSession():
session["loggedin"] = False
session.clear()
return redirect("/", code=303)
@app.route("/blacklist", methods=["POST"])
def blacklist():
studentID = request.form["studentID"]
adminPassword = request.form["password"]
shouldBlacklist = not bool(request.form["shouldBlacklist"])
connection = createDBConnection()
cursor = connection.cursor()
sql = "SELECT password FROM users WHERE id=%s"
cursor.execute(sql, (session["userID"]))
dbPassword = cursor.fetchone()["password"]
if not checkPassword(adminPassword, dbPassword):
return "", status.HTTP_401_UNAUTHORIZED
if shouldBlacklist == True:
sql = "INSERT INTO blacklist(studentID, chapterID) VALUES(%s, %s) " + \
" ON DUPLICATE KEY UPDATE blacklisted = %s"
cursor.execute(sql, (studentID, session["chapterID"], shouldBlacklist))
else:
sql = "UPDATE blacklist SET blacklisted = 0 WHERE studentID = %s AND chapterID = %s"
cursor.execute(sql, (studentID, session["chapterID"]))
cursor.close()
connection.commit()
connection.close()
return "", status.HTTP_202_ACCEPTED
@app.route("/card-reader", methods=["POST"])
def cardReader():
if isLoggedin() == False:
return "", status.HTTP_401_UNAUTHORIZED
try:
studentID = request.form["studentID"]
name = request.form["name"]
raw = request.form["raw"]
eventID = request.form["eventID"]
except:
return "", status.HTTP_400_BAD_REQUEST
userID = session["userID"]
chapterID = session["chapterID"]
connection = createDBConnection()
cursor = connection.cursor()
sql = "INSERT INTO dataList(name, studentID, card_text, userID, " + \
"chapterID, eventID) VALUES(%s, %s, %s, %s, %s, %s)"
cursor.execute(sql, (name, studentID, raw, userID, chapterID, eventID))
sql = "SELECT chapters.id, chapters.short_name FROM chapters INNER JOIN " + \
"blacklist ON blacklist.chapterID = chapters.id WHERE " + \
"studentID=%s AND blacklisted=1"
cursor.execute(sql, (studentID))
blacklist_names = cursor.fetchall()
cursor.close()
connection.commit()
connection.close()
blacklist_names_string = ""
self_blacklisted = False
blacklisted = False
for bl_item in blacklist_names:
blacklisted = True
if bl_item["id"] == session["chapterID"]:
self_blacklisted = True
blacklist_names_string = bl_item[
"short_name"] + blacklist_names_string
else:
blacklist_names_string += "/" + bl_item["short_name"]
try:
if blacklist_names_string[0] == "/":
blacklist_names_string = blacklist_names_string[1:]
except:
pass
returnDic = {"name": name, "time": datetime.now(
).strftime("%I:%M %p %m/%d/%Y "), "blackList": blacklist_names_string,
"self_blacklisted": self_blacklisted, "blacklisted": blacklisted}
return json.dumps(returnDic), status.HTTP_202_ACCEPTED
# reload the templates without restarting
extra_dirs = ['templates']
extra_files = extra_dirs[:]
for extra_dir in extra_dirs:
for dirname, dirs, files in os.walk(extra_dir):
for filename in files:
filename = os.path.join(dirname, filename)
if os.path.isfile(filename):
extra_files.append(filename)
if __name__ == "__main__":
run_simple(config["website"], config["port"], app,
use_reloader=True, use_debugger=True, use_evalex=True,
extra_files=extra_files) | 0.214691 | 0.081666 |
import ast
from collections import ChainMap
from dataclasses import dataclass, field
from functools import singledispatch
from typing import ChainMap as CM
from typing import Iterator, List, Optional, Tuple, Type
from breakfast.position import Position
from breakfast.source import Source
from tests import make_source
@dataclass
class Scope:
lookup: CM[ # pylint: disable=unsubscriptable-object
str, List["Occurrence"]
] = field(default_factory=ChainMap)
node_type: Optional[Type[ast.AST]] = None
def add_occurrence(
self,
name: str,
position: Position,
node: ast.AST,
new_scope: Optional["Scope"] = None,
) -> "Occurrence":
occurrence = Occurrence(name, position, node, new_scope or self.new_scope(node))
self.lookup.setdefault(name, []).append(occurrence) # pylint: disable=no-member
return occurrence
def new_scope(
self,
node: ast.AST,
new_lookup: Optional[
CM[str, List["Occurrence"]] # pylint: disable=unsubscriptable-object
] = None,
) -> "Scope":
new_lookup = new_lookup or self.lookup.new_child() # pylint: disable=no-member
return Scope(node_type=node.__class__, lookup=new_lookup)
@dataclass
class Occurrence:
name: str
position: Position
node: ast.AST
scope: Scope
def node_position(
node: ast.AST, source: Source, row_offset=0, column_offset=0
) -> Position:
return Position(
source=source,
row=(node.lineno - 1) + row_offset,
column=node.col_offset + column_offset,
)
@singledispatch
def visit(
node: ast.AST, source: Source, scope: Scope
) -> Iterator[Tuple[Scope, Occurrence]]:
"""Visit a node.
Copied and reworked from NodeVisitor in:
https://github.com/python/cpython/blob/master/Lib/ast.py
"""
name = None
if isinstance(node, ast.Module):
name = source.module_name
elif isinstance(node, ast.Name):
name = node.id
if name:
position = node_position(node, source)
occurrence = scope.add_occurrence(name, position, node)
if isinstance(node, ast.Name):
scope = Scope(node_type=node.__class__)
yield scope, occurrence
yield from generic_visit(node, source, scope)
@visit.register
def visit_function(node: ast.FunctionDef, source: Source, scope: Scope):
row_offset, column_offset = len(node.decorator_list), len("def ")
position = node_position(
node, source, row_offset=row_offset, column_offset=column_offset
)
if scope.node_type == ast.ClassDef:
new_scope = scope.new_scope(node, new_lookup=scope.lookup.parents.new_child())
else:
new_scope = scope.new_scope(node)
occurrence = scope.add_occurrence(node.name, position, node, new_scope=new_scope)
yield occurrence.scope, occurrence
for arg in node.args.args:
arg_position = node_position(arg, source)
occurrence = Occurrence(
name=arg.arg, position=arg_position, node=node, scope=new_scope
)
yield from generic_visit(node, source, new_scope)
@visit.register
def visit_call(node: ast.Call, source: Source, scope: Scope):
call_position = node_position(node, source)
new_scope = scope
for new_scope, occurrence in visit(node.func, source, scope):
yield new_scope, occurrence
for keyword in node.keywords:
if not keyword.arg:
continue
position = source.find_after(keyword.arg, call_position)
occurrence = Occurrence(
name=keyword.arg, position=position, node=node, scope=new_scope
)
yield new_scope, occurrence
@visit.register
def visit_class(node: ast.ClassDef, source: Source, scope: Scope):
row_offset, column_offset = len(node.decorator_list), len("class ")
position = node_position(
node, source, row_offset=row_offset, column_offset=column_offset
)
occurrence = scope.add_occurrence(node.name, position, node)
yield occurrence.scope, occurrence
yield from generic_visit(node, source, scope)
@visit.register
def visit_attribute(
node: ast.Attribute, source: Source, scope: Scope
) -> Iterator[Tuple[Scope, Occurrence]]:
"""Visit an Attribute node.
For Attributes, we have to sort of turn things inside out to build up the nested
scope correctly, because a.b.c shows up as `Attribute(value=a.b, attr=c)`.
"""
occurrence = None
new_scope = scope
for new_scope, occurrence in visit(node.value, source, scope):
yield new_scope, occurrence
position = node_position(node, source)
occurrence = new_scope.add_occurrence(node.attr, position, node)
yield occurrence.scope, occurrence
def generic_visit(
node, source: Source, scope: Scope
) -> Iterator[Tuple[Scope, Occurrence]]:
"""Called if no explicit visitor function exists for a node.
Copied and reworked from NodeVisitor in:
https://github.com/python/cpython/blob/master/Lib/ast.py
"""
for _, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
yield from visit(item, source, scope)
elif isinstance(value, ast.AST):
yield from visit(value, source, scope)
def collect_occurrences(source: Source) -> List[Occurrence]:
initial_node = source.get_ast()
top_level_scope = Scope()
return [o for _, o in visit(initial_node, source=source, scope=top_level_scope)]
def all_occurrences_of(position: Position) -> List[Occurrence]:
original_occurrence = next(
(o for o in collect_occurrences(position.source) if o.position == position),
None,
)
if not original_occurrence:
return []
return original_occurrence.scope.lookup[original_occurrence.name]
def test_finds_all_occurrences_of_function_local():
source = make_source(
"""
def fun():
old = 12
old2 = 13
result = old + old2
del old
return result
old = 20
"""
)
position = Position(source=source, row=2, column=4)
results = all_occurrences_of(position)
assert len(results) == 3
def test_module_global_does_not_see_function_local():
source = make_source(
"""
def fun():
old = 12
old2 = 13
result = old + old2
del old
return result
old = 20
"""
)
position = Position(source=source, row=8, column=0)
results = all_occurrences_of(position)
assert len(results) == 1
def test_distinguishes_between_variable_and_attribute():
source = make_source(
"""
import os
path = os.path.dirname(__file__)
"""
)
position = Position(source=source, row=3, column=0)
results = all_occurrences_of(position)
assert len(results) == 1
def test_finds_variable_in_closure():
source = make_source(
"""
old = 12
def fun():
result = old + 1
return result
old = 20
"""
)
position = Position(source=source, row=1, column=0)
results = all_occurrences_of(position)
assert len(results) == 3 | tests/test_attempt_6.py | import ast
from collections import ChainMap
from dataclasses import dataclass, field
from functools import singledispatch
from typing import ChainMap as CM
from typing import Iterator, List, Optional, Tuple, Type
from breakfast.position import Position
from breakfast.source import Source
from tests import make_source
@dataclass
class Scope:
lookup: CM[ # pylint: disable=unsubscriptable-object
str, List["Occurrence"]
] = field(default_factory=ChainMap)
node_type: Optional[Type[ast.AST]] = None
def add_occurrence(
self,
name: str,
position: Position,
node: ast.AST,
new_scope: Optional["Scope"] = None,
) -> "Occurrence":
occurrence = Occurrence(name, position, node, new_scope or self.new_scope(node))
self.lookup.setdefault(name, []).append(occurrence) # pylint: disable=no-member
return occurrence
def new_scope(
self,
node: ast.AST,
new_lookup: Optional[
CM[str, List["Occurrence"]] # pylint: disable=unsubscriptable-object
] = None,
) -> "Scope":
new_lookup = new_lookup or self.lookup.new_child() # pylint: disable=no-member
return Scope(node_type=node.__class__, lookup=new_lookup)
@dataclass
class Occurrence:
name: str
position: Position
node: ast.AST
scope: Scope
def node_position(
node: ast.AST, source: Source, row_offset=0, column_offset=0
) -> Position:
return Position(
source=source,
row=(node.lineno - 1) + row_offset,
column=node.col_offset + column_offset,
)
@singledispatch
def visit(
node: ast.AST, source: Source, scope: Scope
) -> Iterator[Tuple[Scope, Occurrence]]:
"""Visit a node.
Copied and reworked from NodeVisitor in:
https://github.com/python/cpython/blob/master/Lib/ast.py
"""
name = None
if isinstance(node, ast.Module):
name = source.module_name
elif isinstance(node, ast.Name):
name = node.id
if name:
position = node_position(node, source)
occurrence = scope.add_occurrence(name, position, node)
if isinstance(node, ast.Name):
scope = Scope(node_type=node.__class__)
yield scope, occurrence
yield from generic_visit(node, source, scope)
@visit.register
def visit_function(node: ast.FunctionDef, source: Source, scope: Scope):
row_offset, column_offset = len(node.decorator_list), len("def ")
position = node_position(
node, source, row_offset=row_offset, column_offset=column_offset
)
if scope.node_type == ast.ClassDef:
new_scope = scope.new_scope(node, new_lookup=scope.lookup.parents.new_child())
else:
new_scope = scope.new_scope(node)
occurrence = scope.add_occurrence(node.name, position, node, new_scope=new_scope)
yield occurrence.scope, occurrence
for arg in node.args.args:
arg_position = node_position(arg, source)
occurrence = Occurrence(
name=arg.arg, position=arg_position, node=node, scope=new_scope
)
yield from generic_visit(node, source, new_scope)
@visit.register
def visit_call(node: ast.Call, source: Source, scope: Scope):
call_position = node_position(node, source)
new_scope = scope
for new_scope, occurrence in visit(node.func, source, scope):
yield new_scope, occurrence
for keyword in node.keywords:
if not keyword.arg:
continue
position = source.find_after(keyword.arg, call_position)
occurrence = Occurrence(
name=keyword.arg, position=position, node=node, scope=new_scope
)
yield new_scope, occurrence
@visit.register
def visit_class(node: ast.ClassDef, source: Source, scope: Scope):
row_offset, column_offset = len(node.decorator_list), len("class ")
position = node_position(
node, source, row_offset=row_offset, column_offset=column_offset
)
occurrence = scope.add_occurrence(node.name, position, node)
yield occurrence.scope, occurrence
yield from generic_visit(node, source, scope)
@visit.register
def visit_attribute(
node: ast.Attribute, source: Source, scope: Scope
) -> Iterator[Tuple[Scope, Occurrence]]:
"""Visit an Attribute node.
For Attributes, we have to sort of turn things inside out to build up the nested
scope correctly, because a.b.c shows up as `Attribute(value=a.b, attr=c)`.
"""
occurrence = None
new_scope = scope
for new_scope, occurrence in visit(node.value, source, scope):
yield new_scope, occurrence
position = node_position(node, source)
occurrence = new_scope.add_occurrence(node.attr, position, node)
yield occurrence.scope, occurrence
def generic_visit(
node, source: Source, scope: Scope
) -> Iterator[Tuple[Scope, Occurrence]]:
"""Called if no explicit visitor function exists for a node.
Copied and reworked from NodeVisitor in:
https://github.com/python/cpython/blob/master/Lib/ast.py
"""
for _, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
yield from visit(item, source, scope)
elif isinstance(value, ast.AST):
yield from visit(value, source, scope)
def collect_occurrences(source: Source) -> List[Occurrence]:
initial_node = source.get_ast()
top_level_scope = Scope()
return [o for _, o in visit(initial_node, source=source, scope=top_level_scope)]
def all_occurrences_of(position: Position) -> List[Occurrence]:
original_occurrence = next(
(o for o in collect_occurrences(position.source) if o.position == position),
None,
)
if not original_occurrence:
return []
return original_occurrence.scope.lookup[original_occurrence.name]
def test_finds_all_occurrences_of_function_local():
source = make_source(
"""
def fun():
old = 12
old2 = 13
result = old + old2
del old
return result
old = 20
"""
)
position = Position(source=source, row=2, column=4)
results = all_occurrences_of(position)
assert len(results) == 3
def test_module_global_does_not_see_function_local():
source = make_source(
"""
def fun():
old = 12
old2 = 13
result = old + old2
del old
return result
old = 20
"""
)
position = Position(source=source, row=8, column=0)
results = all_occurrences_of(position)
assert len(results) == 1
def test_distinguishes_between_variable_and_attribute():
source = make_source(
"""
import os
path = os.path.dirname(__file__)
"""
)
position = Position(source=source, row=3, column=0)
results = all_occurrences_of(position)
assert len(results) == 1
def test_finds_variable_in_closure():
source = make_source(
"""
old = 12
def fun():
result = old + 1
return result
old = 20
"""
)
position = Position(source=source, row=1, column=0)
results = all_occurrences_of(position)
assert len(results) == 3 | 0.840897 | 0.196363 |
from django.contrib import admin
from apps.website.models.article import Article
from apps.website.models.inbox import Inbox
from apps.website.models.comments import Comments
from apps.website.models.authors import Authors
from apps.website.models.settings import WebsiteSettings
from django.utils import timezone
from django import forms
from ckeditor_uploader.fields import RichTextUploadingFormField
from apps.website.helpers.utils import get_article_read_time_from_file, \
get_article_read_time_from_html, \
get_article_dir_path, get_article_file_name, \
remove_file, read_article_html_text
admin.site.index_title = 'TechBlog administration'
class ArticleAdminForm(forms.ModelForm):
content = RichTextUploadingFormField(required=False)
class Meta:
model = Article
fields = [
'author',
'page_name',
'title',
'keywords',
'description',
'public_image',
'status'
]
def get_initial_for_field(self, field, field_name):
try:
# Adding existing html into the CKEditor
page_name = self.initial['page_name']
if page_name:
article_file_path = f'{get_article_dir_path()}' \
f'/{page_name}.html'
raw_html = read_article_html_text(article_file_path)
self.initial['content'] = raw_html
except Exception:
pass
return super(ArticleAdminForm, self) \
.get_initial_for_field(field, field_name)
class ArticleAdmin(admin.ModelAdmin):
form = ArticleAdminForm
list_display = ['title', 'status', 'last_updated', 'author']
search_fields = ('author', 'id', 'title', 'status')
list_filter = ('author', 'status', 'created_at', 'last_updated',)
date_hierarchy = 'last_updated'
ordering = ('-last_updated',)
fieldsets = [
['General Information', {
'fields': [
'author',
'title',
'keywords',
'description',
'public_image',
'status',
'content'
]
}],
['Template Page Information (Don\'t play with it)', {
'classes': ['collapse'],
'fields': ['page_name'],
}],
]
# Save new or update existing model
def save_model(self, request, obj, form, change):
# Article Page Content
html_content = '{% extends "../article_base.html" %}\n' \
'{% block article_content %}\n' + \
str(form["content"].value()) + \
'{% endblock %}'
# Get article file name and dir to save html file
article_dir_path = get_article_dir_path()
article_file_name = get_article_file_name(str(form['title'].value()))
if form["page_name"].value():
article_file_name = form["page_name"].value()
# Delete an existing file
remove_file(f'{article_dir_path}/{obj.page_name}.html')
# Create a new article file
article_file = open(f'{article_dir_path}'
f'/{article_file_name}.html', 'w')
article_file.write(html_content)
article_file.close()
# Map newly created article file name in the database
obj.page_name = article_file_name
# Update read time for the article
obj.read_time = get_article_read_time_from_html(html_content)
super().save_model(request, obj, form, change)
# Don't delete articles
def has_delete_permission(self, request, obj=None):
return False
# Update article status to Draft
def make_draft(modeladmin, request, queryset):
queryset.update(status='d', last_updated=timezone.now())
make_draft.short_description = "Draft selected articles"
# Update article status to Published
def make_published(modeladmin, request, queryset):
queryset.update(status='p', last_updated=timezone.now())
make_published.short_description = "Publish selected articles"
# Update article status to Withdrawn
def make_withdrawn(modeladmin, request, queryset):
queryset.update(status='w', last_updated=timezone.now())
make_withdrawn.short_description = "Withdraw selected articles"
# Update article read time
def update_readtime(modeladmin, request, queryset):
for article in queryset:
Article.objects.filter(id=article.id). \
update(
read_time=get_article_read_time_from_file(
request,
article.page_name
),
last_updated=timezone.now()
)
update_readtime.short_description = "Update the read time"
actions = [make_draft, make_published, make_withdrawn, update_readtime]
class InboxAdmin(admin.ModelAdmin):
list_display = ['name', 'email', 'message', 'submitted_on', 'status']
ordering = ('-status',)
search_fields = ('name', 'email')
list_filter = ('submitted_on',)
date_hierarchy = 'submitted_on'
# Update message status to seen
def mark_as_seen(modeladmin, request, queryset):
queryset.update(status='SN')
mark_as_seen.short_description = "Mark selected messages as seen"
# Update message status to seen
def mark_as_unseen(modeladmin, request, queryset):
queryset.update(status='UN')
mark_as_unseen.short_description = "Mark selected messages as unseen"
# Deon't delete messages
def has_delete_permission(self, request, obj=None):
return False
actions = [mark_as_seen, mark_as_unseen]
class CommentsAdmin(admin.ModelAdmin):
list_display = ['name', 'email', 'comments', 'submitted_on', 'status']
ordering = ('-submitted_on',)
search_fields = ('name', 'email')
list_filter = ('submitted_on', 'status', 'article')
date_hierarchy = 'submitted_on'
# Update comments status to hide
def mark_as_hide(modeladmin, request, queryset):
queryset.update(status='HD')
mark_as_hide.short_description = "Mark selected comments as hide"
# Update comments status to show
def mark_as_show(modeladmin, request, queryset):
queryset.update(status='SH')
mark_as_show.short_description = "Mark selected comments as show"
# Deon't delete messages
def has_delete_permission(self, request, obj=None):
return False
actions = [mark_as_hide, mark_as_show]
class AuthorsAdminForm(forms.ModelForm):
bio = RichTextUploadingFormField(config_name='authors_config')
class Meta:
model = Article
fields = '__all__'
class AuthorsAdmin(admin.ModelAdmin):
form = AuthorsAdminForm
list_display = ['name', 'email', 'joined_at', 'status']
ordering = ('-name',)
search_fields = ('name', 'email')
list_filter = ('joined_at', 'status')
date_hierarchy = 'joined_at'
# Update comments status to hide
def mark_as_inactive(modeladmin, request, queryset):
queryset.update(status='IN')
mark_as_inactive.short_description = "Mark selected authors as inactive"
# Update comments status to show
def mark_as_active(modeladmin, request, queryset):
queryset.update(status='AC')
mark_as_active.short_description = "Mark selected authors as active"
# Deon't delete messages
def has_delete_permission(self, request, obj=None):
return False
actions = [mark_as_inactive, mark_as_active]
class WebsiteSettingsAdmin(admin.ModelAdmin):
list_display = ['under_maintenance']
# Deon't delete messages
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(Article, ArticleAdmin)
admin.site.register(Inbox, InboxAdmin)
admin.site.register(Comments, CommentsAdmin)
admin.site.register(Authors, AuthorsAdmin)
admin.site.register(WebsiteSettings, WebsiteSettingsAdmin)
# Globally disable delete selected
admin.site.disable_action('delete_selected') | apps/website/admin.py | from django.contrib import admin
from apps.website.models.article import Article
from apps.website.models.inbox import Inbox
from apps.website.models.comments import Comments
from apps.website.models.authors import Authors
from apps.website.models.settings import WebsiteSettings
from django.utils import timezone
from django import forms
from ckeditor_uploader.fields import RichTextUploadingFormField
from apps.website.helpers.utils import get_article_read_time_from_file, \
get_article_read_time_from_html, \
get_article_dir_path, get_article_file_name, \
remove_file, read_article_html_text
admin.site.index_title = 'TechBlog administration'
class ArticleAdminForm(forms.ModelForm):
content = RichTextUploadingFormField(required=False)
class Meta:
model = Article
fields = [
'author',
'page_name',
'title',
'keywords',
'description',
'public_image',
'status'
]
def get_initial_for_field(self, field, field_name):
try:
# Adding existing html into the CKEditor
page_name = self.initial['page_name']
if page_name:
article_file_path = f'{get_article_dir_path()}' \
f'/{page_name}.html'
raw_html = read_article_html_text(article_file_path)
self.initial['content'] = raw_html
except Exception:
pass
return super(ArticleAdminForm, self) \
.get_initial_for_field(field, field_name)
class ArticleAdmin(admin.ModelAdmin):
form = ArticleAdminForm
list_display = ['title', 'status', 'last_updated', 'author']
search_fields = ('author', 'id', 'title', 'status')
list_filter = ('author', 'status', 'created_at', 'last_updated',)
date_hierarchy = 'last_updated'
ordering = ('-last_updated',)
fieldsets = [
['General Information', {
'fields': [
'author',
'title',
'keywords',
'description',
'public_image',
'status',
'content'
]
}],
['Template Page Information (Don\'t play with it)', {
'classes': ['collapse'],
'fields': ['page_name'],
}],
]
# Save new or update existing model
def save_model(self, request, obj, form, change):
# Article Page Content
html_content = '{% extends "../article_base.html" %}\n' \
'{% block article_content %}\n' + \
str(form["content"].value()) + \
'{% endblock %}'
# Get article file name and dir to save html file
article_dir_path = get_article_dir_path()
article_file_name = get_article_file_name(str(form['title'].value()))
if form["page_name"].value():
article_file_name = form["page_name"].value()
# Delete an existing file
remove_file(f'{article_dir_path}/{obj.page_name}.html')
# Create a new article file
article_file = open(f'{article_dir_path}'
f'/{article_file_name}.html', 'w')
article_file.write(html_content)
article_file.close()
# Map newly created article file name in the database
obj.page_name = article_file_name
# Update read time for the article
obj.read_time = get_article_read_time_from_html(html_content)
super().save_model(request, obj, form, change)
# Don't delete articles
def has_delete_permission(self, request, obj=None):
return False
# Update article status to Draft
def make_draft(modeladmin, request, queryset):
queryset.update(status='d', last_updated=timezone.now())
make_draft.short_description = "Draft selected articles"
# Update article status to Published
def make_published(modeladmin, request, queryset):
queryset.update(status='p', last_updated=timezone.now())
make_published.short_description = "Publish selected articles"
# Update article status to Withdrawn
def make_withdrawn(modeladmin, request, queryset):
queryset.update(status='w', last_updated=timezone.now())
make_withdrawn.short_description = "Withdraw selected articles"
# Update article read time
def update_readtime(modeladmin, request, queryset):
for article in queryset:
Article.objects.filter(id=article.id). \
update(
read_time=get_article_read_time_from_file(
request,
article.page_name
),
last_updated=timezone.now()
)
update_readtime.short_description = "Update the read time"
actions = [make_draft, make_published, make_withdrawn, update_readtime]
class InboxAdmin(admin.ModelAdmin):
list_display = ['name', 'email', 'message', 'submitted_on', 'status']
ordering = ('-status',)
search_fields = ('name', 'email')
list_filter = ('submitted_on',)
date_hierarchy = 'submitted_on'
# Update message status to seen
def mark_as_seen(modeladmin, request, queryset):
queryset.update(status='SN')
mark_as_seen.short_description = "Mark selected messages as seen"
# Update message status to seen
def mark_as_unseen(modeladmin, request, queryset):
queryset.update(status='UN')
mark_as_unseen.short_description = "Mark selected messages as unseen"
# Deon't delete messages
def has_delete_permission(self, request, obj=None):
return False
actions = [mark_as_seen, mark_as_unseen]
class CommentsAdmin(admin.ModelAdmin):
list_display = ['name', 'email', 'comments', 'submitted_on', 'status']
ordering = ('-submitted_on',)
search_fields = ('name', 'email')
list_filter = ('submitted_on', 'status', 'article')
date_hierarchy = 'submitted_on'
# Update comments status to hide
def mark_as_hide(modeladmin, request, queryset):
queryset.update(status='HD')
mark_as_hide.short_description = "Mark selected comments as hide"
# Update comments status to show
def mark_as_show(modeladmin, request, queryset):
queryset.update(status='SH')
mark_as_show.short_description = "Mark selected comments as show"
# Deon't delete messages
def has_delete_permission(self, request, obj=None):
return False
actions = [mark_as_hide, mark_as_show]
class AuthorsAdminForm(forms.ModelForm):
bio = RichTextUploadingFormField(config_name='authors_config')
class Meta:
model = Article
fields = '__all__'
class AuthorsAdmin(admin.ModelAdmin):
form = AuthorsAdminForm
list_display = ['name', 'email', 'joined_at', 'status']
ordering = ('-name',)
search_fields = ('name', 'email')
list_filter = ('joined_at', 'status')
date_hierarchy = 'joined_at'
# Update comments status to hide
def mark_as_inactive(modeladmin, request, queryset):
queryset.update(status='IN')
mark_as_inactive.short_description = "Mark selected authors as inactive"
# Update comments status to show
def mark_as_active(modeladmin, request, queryset):
queryset.update(status='AC')
mark_as_active.short_description = "Mark selected authors as active"
# Deon't delete messages
def has_delete_permission(self, request, obj=None):
return False
actions = [mark_as_inactive, mark_as_active]
class WebsiteSettingsAdmin(admin.ModelAdmin):
list_display = ['under_maintenance']
# Deon't delete messages
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(Article, ArticleAdmin)
admin.site.register(Inbox, InboxAdmin)
admin.site.register(Comments, CommentsAdmin)
admin.site.register(Authors, AuthorsAdmin)
admin.site.register(WebsiteSettings, WebsiteSettingsAdmin)
# Globally disable delete selected
admin.site.disable_action('delete_selected') | 0.5564 | 0.08218 |
import unittest
import app.gpio as gpio
class TestMemoryUtils(unittest.TestCase):
def test_constructor_rejects_invalid_pin_numbers(self):
self.assertRaises(Exception, gpio.GpioInfo, -1)
self.assertRaises(Exception, gpio.GpioInfo, 54)
def test_constructor_calculates_correct_pin_bit_offset(self):
self.assertEqual(0, gpio.GpioInfo(0).pin_flip_bit_shift)
self.assertEqual(31, gpio.GpioInfo(31).pin_flip_bit_shift)
self.assertEqual(0, gpio.GpioInfo(32).pin_flip_bit_shift)
self.assertEqual(21, gpio.GpioInfo(53).pin_flip_bit_shift)
def test_constructor_calculates_correct_set_and_clr_register_index(self):
self.assertEqual(0, gpio.GpioInfo(0).set_clr_register_index)
self.assertEqual(0, gpio.GpioInfo(31).set_clr_register_index)
self.assertEqual(1, gpio.GpioInfo(32).set_clr_register_index)
self.assertEqual(1, gpio.GpioInfo(53).set_clr_register_index)
def test_constructor_calculates_correct_fsel_register_byte_offset(self):
self.assertEqual(0, gpio.GpioInfo(0).gp_fsel_reg_offset)
self.assertEqual(0, gpio.GpioInfo(9).gp_fsel_reg_offset)
self.assertEqual(4, gpio.GpioInfo(10).gp_fsel_reg_offset)
self.assertEqual(4, gpio.GpioInfo(19).gp_fsel_reg_offset)
self.assertEqual(8, gpio.GpioInfo(20).gp_fsel_reg_offset)
self.assertEqual(8, gpio.GpioInfo(29).gp_fsel_reg_offset)
self.assertEqual(12, gpio.GpioInfo(30).gp_fsel_reg_offset)
self.assertEqual(12, gpio.GpioInfo(39).gp_fsel_reg_offset)
self.assertEqual(16, gpio.GpioInfo(40).gp_fsel_reg_offset)
self.assertEqual(16, gpio.GpioInfo(49).gp_fsel_reg_offset)
self.assertEqual(20, gpio.GpioInfo(50).gp_fsel_reg_offset)
self.assertEqual(20, gpio.GpioInfo(53).gp_fsel_reg_offset)
def test_constructor_calculates_correct_fsel_pin_bit_shift(self):
self.assertEqual(0, gpio.GpioInfo(0).gp_fsel_bit_shift)
self.assertEqual(15, gpio.GpioInfo(5).gp_fsel_bit_shift)
self.assertEqual(0, gpio.GpioInfo(10).gp_fsel_bit_shift)
self.assertEqual(21, gpio.GpioInfo(17).gp_fsel_bit_shift)
if __name__ == '__main__':
unittest.main() | tests/test_gpio_info.py | import unittest
import app.gpio as gpio
class TestMemoryUtils(unittest.TestCase):
def test_constructor_rejects_invalid_pin_numbers(self):
self.assertRaises(Exception, gpio.GpioInfo, -1)
self.assertRaises(Exception, gpio.GpioInfo, 54)
def test_constructor_calculates_correct_pin_bit_offset(self):
self.assertEqual(0, gpio.GpioInfo(0).pin_flip_bit_shift)
self.assertEqual(31, gpio.GpioInfo(31).pin_flip_bit_shift)
self.assertEqual(0, gpio.GpioInfo(32).pin_flip_bit_shift)
self.assertEqual(21, gpio.GpioInfo(53).pin_flip_bit_shift)
def test_constructor_calculates_correct_set_and_clr_register_index(self):
self.assertEqual(0, gpio.GpioInfo(0).set_clr_register_index)
self.assertEqual(0, gpio.GpioInfo(31).set_clr_register_index)
self.assertEqual(1, gpio.GpioInfo(32).set_clr_register_index)
self.assertEqual(1, gpio.GpioInfo(53).set_clr_register_index)
def test_constructor_calculates_correct_fsel_register_byte_offset(self):
self.assertEqual(0, gpio.GpioInfo(0).gp_fsel_reg_offset)
self.assertEqual(0, gpio.GpioInfo(9).gp_fsel_reg_offset)
self.assertEqual(4, gpio.GpioInfo(10).gp_fsel_reg_offset)
self.assertEqual(4, gpio.GpioInfo(19).gp_fsel_reg_offset)
self.assertEqual(8, gpio.GpioInfo(20).gp_fsel_reg_offset)
self.assertEqual(8, gpio.GpioInfo(29).gp_fsel_reg_offset)
self.assertEqual(12, gpio.GpioInfo(30).gp_fsel_reg_offset)
self.assertEqual(12, gpio.GpioInfo(39).gp_fsel_reg_offset)
self.assertEqual(16, gpio.GpioInfo(40).gp_fsel_reg_offset)
self.assertEqual(16, gpio.GpioInfo(49).gp_fsel_reg_offset)
self.assertEqual(20, gpio.GpioInfo(50).gp_fsel_reg_offset)
self.assertEqual(20, gpio.GpioInfo(53).gp_fsel_reg_offset)
def test_constructor_calculates_correct_fsel_pin_bit_shift(self):
self.assertEqual(0, gpio.GpioInfo(0).gp_fsel_bit_shift)
self.assertEqual(15, gpio.GpioInfo(5).gp_fsel_bit_shift)
self.assertEqual(0, gpio.GpioInfo(10).gp_fsel_bit_shift)
self.assertEqual(21, gpio.GpioInfo(17).gp_fsel_bit_shift)
if __name__ == '__main__':
unittest.main() | 0.597843 | 0.730819 |
from abc import ABCMeta, abstractmethod
from hackernewslib.mixins import KidsMixin, UserMixin, ContentMixin
class Item(object, metaclass=ABCMeta):
def __init__(self, client, id, data):
self.client = client
self.id = id
self.data = data
self.type = data.get("type")
@classmethod
@abstractmethod
def parse(cls, client, item):
pass
class Story(Item, KidsMixin, UserMixin, ContentMixin):
def __init__(self, client, id, data, by=None, descendants=None, score=None,
time=None, title=None, url=None, kids=None, text=None):
super(Story, self).__init__(
client=client,
id=id,
data=data
)
self.username = by
self._by = None
self.descendants = descendants
self.score = score
self.time = time
self.title = title
self.url = url
self._content = None
self.kid_ids = kids
self._kids = None
self.text = text
@classmethod
def parse(cls, client, item):
return cls(
client=client,
id=item["id"],
data=item,
by=item.get("by"),
descendants=item.get("descendants"),
score=item.get("score"),
time=item.get("time"),
title=item.get("title"),
url=item.get("url"),
kids=item.get("kids"),
text=item.get("text")
)
class Comment(Item, KidsMixin, UserMixin):
def __init__(self, client, id, data, by=None, text=None, time=None,
kids=None, parent=None):
super(Comment, self).__init__(
client=client,
id=id,
data=data
)
self.username = by
self._by = None
self.text = text
self.time = time
self.kid_ids = kids
self._kids = None
self.parent_id = parent
self._parent = None
@classmethod
def parse(cls, client, item):
return cls(
client=client,
id=item["id"],
data=item,
by=item.get("by"),
text=item.get("text"),
time=item.get("time"),
kids=item.get("kids"),
parent=item.get("parent")
)
@property
def parent(self):
if self.parent_id is not None and self._parent is None:
self._parent = self.client.item(self.parent_id)
return self._parent
class Job(Item, UserMixin, ContentMixin):
def __init__(self, client, id, data, by=None, score=None, text=None,
time=None, title=None, url=None):
super(Job, self).__init__(
client=client,
id=id,
data=data
)
self.username = by
self._by = None
self.score = score
self.text = text
self.time = time
self.title = title
self.url = url
self._content = None
@classmethod
def parse(cls, client, item):
return cls(
client=client,
id=item["id"],
data=item,
by=item.get("by"),
score=item.get("score"),
text=item.get("text"),
time=item.get("time"),
title=item.get("title"),
url=item.get("url")
)
class Poll(Item, KidsMixin, UserMixin):
def __init__(self, client, id, data, by=None, descendants=None, kids=None,
parts=None, score=None, text=None, time=None, title=None):
super(Poll, self).__init__(
client=client,
id=id,
data=data
)
self.username = by
self._by = None
self.descendants = descendants
self.kid_ids = kids
self._kids = None
self.part_ids = parts
self._parts = None
self.score = score
self.text = text
self.time = time
self.title = title
@classmethod
def parse(cls, client, item):
return cls(
client=client,
id=item["id"],
data=item,
by=item.get("by"),
descendants=item.get("descendants"),
kids=item.get("kids"),
parts=item.get("parts"),
score=item.get("score"),
text=item.get("text"),
time=item.get("time"),
title=item.get("title")
)
@property
def parts(self):
if self._parts is not None:
for part in self._parts:
yield part
else:
part_ids = self.part_ids or []
self._parts = []
for part in self.client.items(part_ids):
self._parts.append(part)
yield part
class Part(Item, UserMixin):
fields = ["by", "poll", "score", "text", "time"]
def __init__(self, client, id, data, by=None, poll=None, score=None,
text=None, time=None):
super(Part, self).__init__(
client=client,
id=id,
data=data
)
self.username = by
self._by = None
self.poll_id = poll
self._poll = None
self.score = score
self.text = text
self.time = time
@classmethod
def parse(cls, client, item):
return cls(
client=client,
id=item["id"],
data=item,
by=item.get("by"),
poll=item.get("poll"),
score=item.get("score"),
text=item.get("text"),
time=item.get("time")
)
@property
def poll(self):
if self.poll_id is not None and self._poll is None:
self._poll = self.client.item(self.poll_id)
return self._poll
class Raw(Item):
@classmethod
def parse(cls, client, item):
return cls(
client=client,
id=item["id"],
data=item
)
class User(object):
def __init__(self, client, id, created, karma, about=None, delay=None,
submitted=None):
self.client = client
self.id = id
self.created = created
self.karma = karma
self.about = about
self.delay = delay
self.submitted_ids = submitted
self._submitted = None
@property
def submitted(self):
if self._submitted is not None:
for item in self._submitted:
yield item
else:
submitted_ids = self.submitted_ids or []
self._submitted = []
for item in self.client.items(submitted_ids):
self._submitted.append(item)
yield item
class Content(object):
def __init__(self, url, response):
self.url = url
self.status_code = response.status_code
self.headers = dict(response.headers)
self.content = response.content
self.text = response.text | hackernewslib/models.py | from abc import ABCMeta, abstractmethod
from hackernewslib.mixins import KidsMixin, UserMixin, ContentMixin
class Item(object, metaclass=ABCMeta):
def __init__(self, client, id, data):
self.client = client
self.id = id
self.data = data
self.type = data.get("type")
@classmethod
@abstractmethod
def parse(cls, client, item):
pass
class Story(Item, KidsMixin, UserMixin, ContentMixin):
def __init__(self, client, id, data, by=None, descendants=None, score=None,
time=None, title=None, url=None, kids=None, text=None):
super(Story, self).__init__(
client=client,
id=id,
data=data
)
self.username = by
self._by = None
self.descendants = descendants
self.score = score
self.time = time
self.title = title
self.url = url
self._content = None
self.kid_ids = kids
self._kids = None
self.text = text
@classmethod
def parse(cls, client, item):
return cls(
client=client,
id=item["id"],
data=item,
by=item.get("by"),
descendants=item.get("descendants"),
score=item.get("score"),
time=item.get("time"),
title=item.get("title"),
url=item.get("url"),
kids=item.get("kids"),
text=item.get("text")
)
class Comment(Item, KidsMixin, UserMixin):
def __init__(self, client, id, data, by=None, text=None, time=None,
kids=None, parent=None):
super(Comment, self).__init__(
client=client,
id=id,
data=data
)
self.username = by
self._by = None
self.text = text
self.time = time
self.kid_ids = kids
self._kids = None
self.parent_id = parent
self._parent = None
@classmethod
def parse(cls, client, item):
return cls(
client=client,
id=item["id"],
data=item,
by=item.get("by"),
text=item.get("text"),
time=item.get("time"),
kids=item.get("kids"),
parent=item.get("parent")
)
@property
def parent(self):
if self.parent_id is not None and self._parent is None:
self._parent = self.client.item(self.parent_id)
return self._parent
class Job(Item, UserMixin, ContentMixin):
def __init__(self, client, id, data, by=None, score=None, text=None,
time=None, title=None, url=None):
super(Job, self).__init__(
client=client,
id=id,
data=data
)
self.username = by
self._by = None
self.score = score
self.text = text
self.time = time
self.title = title
self.url = url
self._content = None
@classmethod
def parse(cls, client, item):
return cls(
client=client,
id=item["id"],
data=item,
by=item.get("by"),
score=item.get("score"),
text=item.get("text"),
time=item.get("time"),
title=item.get("title"),
url=item.get("url")
)
class Poll(Item, KidsMixin, UserMixin):
def __init__(self, client, id, data, by=None, descendants=None, kids=None,
parts=None, score=None, text=None, time=None, title=None):
super(Poll, self).__init__(
client=client,
id=id,
data=data
)
self.username = by
self._by = None
self.descendants = descendants
self.kid_ids = kids
self._kids = None
self.part_ids = parts
self._parts = None
self.score = score
self.text = text
self.time = time
self.title = title
@classmethod
def parse(cls, client, item):
return cls(
client=client,
id=item["id"],
data=item,
by=item.get("by"),
descendants=item.get("descendants"),
kids=item.get("kids"),
parts=item.get("parts"),
score=item.get("score"),
text=item.get("text"),
time=item.get("time"),
title=item.get("title")
)
@property
def parts(self):
if self._parts is not None:
for part in self._parts:
yield part
else:
part_ids = self.part_ids or []
self._parts = []
for part in self.client.items(part_ids):
self._parts.append(part)
yield part
class Part(Item, UserMixin):
fields = ["by", "poll", "score", "text", "time"]
def __init__(self, client, id, data, by=None, poll=None, score=None,
text=None, time=None):
super(Part, self).__init__(
client=client,
id=id,
data=data
)
self.username = by
self._by = None
self.poll_id = poll
self._poll = None
self.score = score
self.text = text
self.time = time
@classmethod
def parse(cls, client, item):
return cls(
client=client,
id=item["id"],
data=item,
by=item.get("by"),
poll=item.get("poll"),
score=item.get("score"),
text=item.get("text"),
time=item.get("time")
)
@property
def poll(self):
if self.poll_id is not None and self._poll is None:
self._poll = self.client.item(self.poll_id)
return self._poll
class Raw(Item):
@classmethod
def parse(cls, client, item):
return cls(
client=client,
id=item["id"],
data=item
)
class User(object):
def __init__(self, client, id, created, karma, about=None, delay=None,
submitted=None):
self.client = client
self.id = id
self.created = created
self.karma = karma
self.about = about
self.delay = delay
self.submitted_ids = submitted
self._submitted = None
@property
def submitted(self):
if self._submitted is not None:
for item in self._submitted:
yield item
else:
submitted_ids = self.submitted_ids or []
self._submitted = []
for item in self.client.items(submitted_ids):
self._submitted.append(item)
yield item
class Content(object):
def __init__(self, url, response):
self.url = url
self.status_code = response.status_code
self.headers = dict(response.headers)
self.content = response.content
self.text = response.text | 0.701815 | 0.060169 |
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=1, version=1)
class Microsoft_AppV_Client_StreamingUX_1_1(Etw):
pattern = Struct(
"astring" / WString,
"uint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=2, version=1)
class Microsoft_AppV_Client_StreamingUX_2_1(Etw):
pattern = Struct(
"astring" / WString,
"uint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=3, version=1)
class Microsoft_AppV_Client_StreamingUX_3_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=4, version=1)
class Microsoft_AppV_Client_StreamingUX_4_1(Etw):
pattern = Struct(
"unint64" / Int64ul,
"uint642" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=5, version=1)
class Microsoft_AppV_Client_StreamingUX_5_1(Etw):
pattern = Struct(
"astring" / WString
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=6, version=1)
class Microsoft_AppV_Client_StreamingUX_6_1(Etw):
pattern = Struct(
"astring" / WString
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=20, version=1)
class Microsoft_AppV_Client_StreamingUX_20_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=21, version=1)
class Microsoft_AppV_Client_StreamingUX_21_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=40, version=1)
class Microsoft_AppV_Client_StreamingUX_40_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=41, version=1)
class Microsoft_AppV_Client_StreamingUX_41_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=42, version=1)
class Microsoft_AppV_Client_StreamingUX_42_1(Etw):
pattern = Struct(
"astring" / WString,
"uint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=43, version=1)
class Microsoft_AppV_Client_StreamingUX_43_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=44, version=1)
class Microsoft_AppV_Client_StreamingUX_44_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=45, version=1)
class Microsoft_AppV_Client_StreamingUX_45_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=60, version=1)
class Microsoft_AppV_Client_StreamingUX_60_1(Etw):
pattern = Struct(
"packageId" / Guid,
"versionId" / Guid
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=61, version=1)
class Microsoft_AppV_Client_StreamingUX_61_1(Etw):
pattern = Struct(
"packageId" / Guid,
"versionId" / Guid,
"percentageComplete" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=80, version=1)
class Microsoft_AppV_Client_StreamingUX_80_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=101, version=1)
class Microsoft_AppV_Client_StreamingUX_101_1(Etw):
pattern = Struct(
"uint32" / Int32ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=102, version=1)
class Microsoft_AppV_Client_StreamingUX_102_1(Etw):
pattern = Struct(
"astring" / WString
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=120, version=1)
class Microsoft_AppV_Client_StreamingUX_120_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=121, version=1)
class Microsoft_AppV_Client_StreamingUX_121_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=122, version=1)
class Microsoft_AppV_Client_StreamingUX_122_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=150, version=1)
class Microsoft_AppV_Client_StreamingUX_150_1(Etw):
pattern = Struct(
"server" / WString,
"global" / Int8ul,
"packageTotal" / Int32ul,
"packageSucceeded" / Int32ul,
"packageFailed" / Int32ul,
"groupTotal" / Int32ul,
"groupSucceeded" / Int32ul,
"groupFailed" / Int32ul
) | etl/parsers/etw/Microsoft_AppV_Client_StreamingUX.py | from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=1, version=1)
class Microsoft_AppV_Client_StreamingUX_1_1(Etw):
pattern = Struct(
"astring" / WString,
"uint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=2, version=1)
class Microsoft_AppV_Client_StreamingUX_2_1(Etw):
pattern = Struct(
"astring" / WString,
"uint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=3, version=1)
class Microsoft_AppV_Client_StreamingUX_3_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=4, version=1)
class Microsoft_AppV_Client_StreamingUX_4_1(Etw):
pattern = Struct(
"unint64" / Int64ul,
"uint642" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=5, version=1)
class Microsoft_AppV_Client_StreamingUX_5_1(Etw):
pattern = Struct(
"astring" / WString
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=6, version=1)
class Microsoft_AppV_Client_StreamingUX_6_1(Etw):
pattern = Struct(
"astring" / WString
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=20, version=1)
class Microsoft_AppV_Client_StreamingUX_20_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=21, version=1)
class Microsoft_AppV_Client_StreamingUX_21_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=40, version=1)
class Microsoft_AppV_Client_StreamingUX_40_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=41, version=1)
class Microsoft_AppV_Client_StreamingUX_41_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=42, version=1)
class Microsoft_AppV_Client_StreamingUX_42_1(Etw):
pattern = Struct(
"astring" / WString,
"uint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=43, version=1)
class Microsoft_AppV_Client_StreamingUX_43_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=44, version=1)
class Microsoft_AppV_Client_StreamingUX_44_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=45, version=1)
class Microsoft_AppV_Client_StreamingUX_45_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=60, version=1)
class Microsoft_AppV_Client_StreamingUX_60_1(Etw):
pattern = Struct(
"packageId" / Guid,
"versionId" / Guid
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=61, version=1)
class Microsoft_AppV_Client_StreamingUX_61_1(Etw):
pattern = Struct(
"packageId" / Guid,
"versionId" / Guid,
"percentageComplete" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=80, version=1)
class Microsoft_AppV_Client_StreamingUX_80_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=101, version=1)
class Microsoft_AppV_Client_StreamingUX_101_1(Etw):
pattern = Struct(
"uint32" / Int32ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=102, version=1)
class Microsoft_AppV_Client_StreamingUX_102_1(Etw):
pattern = Struct(
"astring" / WString
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=120, version=1)
class Microsoft_AppV_Client_StreamingUX_120_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=121, version=1)
class Microsoft_AppV_Client_StreamingUX_121_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=122, version=1)
class Microsoft_AppV_Client_StreamingUX_122_1(Etw):
pattern = Struct(
"unint64" / Int64ul
)
@declare(guid=guid("28cb46c7-4003-4e50-8bd9-442086762d12"), event_id=150, version=1)
class Microsoft_AppV_Client_StreamingUX_150_1(Etw):
pattern = Struct(
"server" / WString,
"global" / Int8ul,
"packageTotal" / Int32ul,
"packageSucceeded" / Int32ul,
"packageFailed" / Int32ul,
"groupTotal" / Int32ul,
"groupSucceeded" / Int32ul,
"groupFailed" / Int32ul
) | 0.326701 | 0.069447 |
import json
from tornado import escape
from tornado.gen import coroutine, Return
from tornado.web import RequestHandler
from torndsession.sessionhandler import SessionBaseHandler
from urllib import quote, urlencode
import constant
from core.utils import send_request
from wechat import get_access_token
class BaseHandler(SessionBaseHandler):
def initialize(self):
self.access_token = None
@coroutine
def prepare(self):
self.access_token = yield get_access_token()
# 通过查询参数取出code后获取人员信息并进行缓存
code = self.get_query_argument("code", None)
if code :
get_user_info_url = constant.QUERY_USER_INFO_URL % (self.access_token, code)
response = yield send_request(get_user_info_url)
result = json.loads(response.body)
user_id = result.get('UserId', None)
if user_id:
self.session['user_id'] = user_id
# 获取安全的code,并且缓存用户
user_id = self.session.get('user_id', None)
if not user_id:
next_url = constant.BASE_URL + self.request.uri
args = {"CorpID": self.settings[constant.WEIXIN_SETTINGS][constant.CorpID],
"redirect_uri": urlencode({'l':next_url})[2:]}
url = constant.QUERY_AUTH_CODE_URL % args
# response=yield send_request(url)
self.redirect(url)
SessionBaseHandler.prepare(self) # 调用父类方法
@coroutine
def get_current_user(self):
"""获取当前登陆的用户"""
session_id=self.session.id
if session_id:
user_id=self.session.get('user_id',None)
if not user_id:
raise Return(None)
else:
url=constant.QUYER_USER_DEATIL_URL%(self.access_token,user_id)
res = yield send_request(url)
raise Return(json.loads(res.body))
class IndexHandler(BaseHandler):
@coroutine
def get(self, *args, **kwargs):
user_id=self.get_secure_cookie('userid')
if user_id:
self.write('userid')
else:
user=yield self.get_current_user()
self.write(json.dumps(user))
#get
pass
@coroutine
def post(self, *args, **kwargs):
pass
class UserHandler(BaseHandler):
@coroutine
def get(self, *args, **kwargs):
next_url=self.get_query_argument("next",None)
if next_url:
self.redirect(next_url)
else:
user=yield self.get_current_user()
self.write(json.dumps(user))
route = [(r'/mobile/index.html', IndexHandler),
(r'/mobile/user',UserHandler),]
if __name__== "__main__":
pass | handler/mobile.py | import json
from tornado import escape
from tornado.gen import coroutine, Return
from tornado.web import RequestHandler
from torndsession.sessionhandler import SessionBaseHandler
from urllib import quote, urlencode
import constant
from core.utils import send_request
from wechat import get_access_token
class BaseHandler(SessionBaseHandler):
def initialize(self):
self.access_token = None
@coroutine
def prepare(self):
self.access_token = yield get_access_token()
# 通过查询参数取出code后获取人员信息并进行缓存
code = self.get_query_argument("code", None)
if code :
get_user_info_url = constant.QUERY_USER_INFO_URL % (self.access_token, code)
response = yield send_request(get_user_info_url)
result = json.loads(response.body)
user_id = result.get('UserId', None)
if user_id:
self.session['user_id'] = user_id
# 获取安全的code,并且缓存用户
user_id = self.session.get('user_id', None)
if not user_id:
next_url = constant.BASE_URL + self.request.uri
args = {"CorpID": self.settings[constant.WEIXIN_SETTINGS][constant.CorpID],
"redirect_uri": urlencode({'l':next_url})[2:]}
url = constant.QUERY_AUTH_CODE_URL % args
# response=yield send_request(url)
self.redirect(url)
SessionBaseHandler.prepare(self) # 调用父类方法
@coroutine
def get_current_user(self):
"""获取当前登陆的用户"""
session_id=self.session.id
if session_id:
user_id=self.session.get('user_id',None)
if not user_id:
raise Return(None)
else:
url=constant.QUYER_USER_DEATIL_URL%(self.access_token,user_id)
res = yield send_request(url)
raise Return(json.loads(res.body))
class IndexHandler(BaseHandler):
@coroutine
def get(self, *args, **kwargs):
user_id=self.get_secure_cookie('userid')
if user_id:
self.write('userid')
else:
user=yield self.get_current_user()
self.write(json.dumps(user))
#get
pass
@coroutine
def post(self, *args, **kwargs):
pass
class UserHandler(BaseHandler):
@coroutine
def get(self, *args, **kwargs):
next_url=self.get_query_argument("next",None)
if next_url:
self.redirect(next_url)
else:
user=yield self.get_current_user()
self.write(json.dumps(user))
route = [(r'/mobile/index.html', IndexHandler),
(r'/mobile/user',UserHandler),]
if __name__== "__main__":
pass | 0.287868 | 0.046357 |
from collections import defaultdict
import sys
from nordlys.logic.entity.entity import Entity
from nordlys.logic.query.mention import Mention
from nordlys.logic.query.query import Query
class Cmns(object):
def __init__(self, query, entity, cmns_th=0.1):
self.__query = query
self.__entity = entity
self.__cmns_th = cmns_th
self.__ngrams = None
self.__ranked_ens = {}
def __get_ngrams(self):
"""Returns n-grams grouped by length.
:return: dictionary {1:["xx", ...], 2: ["xx yy", ...], ...}
"""
if self.__ngrams is None:
self.__ngrams = defaultdict(list)
for ngram in self.__query.get_ngrams():
self.__ngrams[len(ngram.split())].append(ngram)
def link(self):
"""Links the query to the entity.
dictionary {mention: (en_id, score), ..}
"""
self.__get_ngrams()
self.rank_ens(len(self.__query.query.split()))
linked_ens = self.disambiguate()
return linked_ens
def disambiguate(self):
"""Selects only one entity per mention.
:return dictionary {mention: (en_id, score), ..}
"""
linked_ens = {}
for men, ens in self.__ranked_ens.items():
sorted_ens = sorted(ens.items(), key=lambda x: x[1], reverse=True)
linked_ens[men] = sorted_ens[0]
return linked_ens
def rank_ens(self, n):
"""Generates list of entities for each mention in the query.
The algorithm starts from the longest possible n-gram and gets all matched entities.
If no entities founs, the algorithm recurse and tries to find entities with (n-1)-gram.
:param n: length of n-gram
:return: dictionary {(dbp_uri, fb_id):commonness, ..}
"""
matched = False
for ngram in self.__ngrams[n]:
cand_ens = Mention(ngram, self.__entity, self.__cmns_th).get_cand_ens()
if len(cand_ens) > 0:
matched = True
self.__ranked_ens[ngram] = cand_ens
if (not matched) and (n > 1):
self.rank_ens(n - 1)
else:
return
def main(args):
entity = Entity()
query = Query(args[0])
cmns = Cmns(query, entity, cmns_th=0.1)
print(cmns.link())
if __name__ == "__main__":
main(sys.argv[1:]) | nordlys/logic/el/cmns.py | from collections import defaultdict
import sys
from nordlys.logic.entity.entity import Entity
from nordlys.logic.query.mention import Mention
from nordlys.logic.query.query import Query
class Cmns(object):
def __init__(self, query, entity, cmns_th=0.1):
self.__query = query
self.__entity = entity
self.__cmns_th = cmns_th
self.__ngrams = None
self.__ranked_ens = {}
def __get_ngrams(self):
"""Returns n-grams grouped by length.
:return: dictionary {1:["xx", ...], 2: ["xx yy", ...], ...}
"""
if self.__ngrams is None:
self.__ngrams = defaultdict(list)
for ngram in self.__query.get_ngrams():
self.__ngrams[len(ngram.split())].append(ngram)
def link(self):
"""Links the query to the entity.
dictionary {mention: (en_id, score), ..}
"""
self.__get_ngrams()
self.rank_ens(len(self.__query.query.split()))
linked_ens = self.disambiguate()
return linked_ens
def disambiguate(self):
"""Selects only one entity per mention.
:return dictionary {mention: (en_id, score), ..}
"""
linked_ens = {}
for men, ens in self.__ranked_ens.items():
sorted_ens = sorted(ens.items(), key=lambda x: x[1], reverse=True)
linked_ens[men] = sorted_ens[0]
return linked_ens
def rank_ens(self, n):
"""Generates list of entities for each mention in the query.
The algorithm starts from the longest possible n-gram and gets all matched entities.
If no entities founs, the algorithm recurse and tries to find entities with (n-1)-gram.
:param n: length of n-gram
:return: dictionary {(dbp_uri, fb_id):commonness, ..}
"""
matched = False
for ngram in self.__ngrams[n]:
cand_ens = Mention(ngram, self.__entity, self.__cmns_th).get_cand_ens()
if len(cand_ens) > 0:
matched = True
self.__ranked_ens[ngram] = cand_ens
if (not matched) and (n > 1):
self.rank_ens(n - 1)
else:
return
def main(args):
entity = Entity()
query = Query(args[0])
cmns = Cmns(query, entity, cmns_th=0.1)
print(cmns.link())
if __name__ == "__main__":
main(sys.argv[1:]) | 0.621771 | 0.205874 |
import torch
import torch.nn.functional
from torch import nn
from typing import List, Dict, Tuple
from ..utils import build_stack_conv_layers, random_init_weights
from ..builder import HEADS, build_loss
from ...core import PointGenerator, generate_gt, assign_and_sample
from ...utils import multi_apply
@HEADS.register_module
class FCHead(nn.Module):
def __init__(self,
stride: int,
in_channels: int,
scale_factor: float,
head_convs: Dict = None,
loss: Dict = None,
init_type: str = 'xavier_uniform'):
""" The head of SiamFC. It takes the fused features (or response map) as input and output the
response map.
Args:
stride (int): feature stride of input feature maps
in_channels (int): number of input channels,
scale_factor (float): because the response map after cross-correlation is large, we scale them down
by a scale factor (typically 0.001).
head_convs (dict): head conv layers configurations
loss (dict): classification loss configuration
init_type (str): initialization type.
"""
super(FCHead, self).__init__()
self.stride = stride
self.in_channels = in_channels
self.scale_factor = scale_factor
if head_convs is not None:
self.post_convs = build_stack_conv_layers(**head_convs)
else:
assert self.in_channels == 1
self.post_convs = None
self.score_bias = nn.Parameter(torch.zeros(1), requires_grad=True)
self.point_gen = PointGenerator()
self.cls_loss_obj = build_loss(loss)
if init_type is not None:
random_init_weights(self.modules(), init_type)
def forward(self, fused_feat: torch.Tensor) -> torch.Tensor:
""" Process the fused features (or response maps).
Args:
fused_feat (torch.Tensor): in shape of [N, C, H, W] (typically [1, 1, 17, 17])
"""
if self.post_convs is not None:
cls_logits = self.post_convs(fused_feat) * self.scale_factor
else:
cls_logits = fused_feat * self.scale_factor + self.score_bias
return cls_logits
def loss(self,
cls_logits: torch.Tensor,
z_boxes: List[torch.Tensor],
x_boxes: List[torch.Tensor],
flags: List[torch.Tensor],
cfg: Dict,
**kwargs) -> Dict[str, torch.Tensor]:
""" Calculate the loss.
Args:
cls_logits (torch.Tensor): the predicted classification results, in shape of [N, 1, H, W]
z_boxes (List[torch.Tensor]): the ground-truth boxes in template images. each element is in shape of [1, 6]
x_boxes (List[torch.Tensor]): the ground-truth boxes in search regions. each element is in shape of [K, 6]
flags (List[torch.Tensor]): bool tensors that denotes whether the search region and template are
come from same sequence.
cfg (dict): training configuration
"""
num_imgs = len(x_boxes)
# Generate ground-truth boxes
gt_boxes_list = generate_gt(z_boxes, x_boxes, flags, same_category_as_positive=False)
# Generate point coordinates for each pixel in the response map.
img_ctr = (cfg.x_size - 1) / 2.0
ctr_pts = self.point_gen.grid_points((cls_logits.size(2), cls_logits.size(3)),
stride=self.stride).type_as(cls_logits) + img_ctr
prior_boxes_list = [self.point_gen.assign_prior_boxes(ctr_pts, z_box[0:4].view(1, 4)) for z_box in z_boxes]
# Generate ground-truth labels for each pixel.
labels, label_weights = self.point_target(prior_boxes_list, gt_boxes_list, cfg.siamfc)
cls_loss = self.cls_loss_obj(cls_logits.view(num_imgs, -1),
labels.view(num_imgs, -1),
label_weights.view(num_imgs, -1),
average_factor=num_imgs)
return dict(cls_loss=cls_loss)
@staticmethod
def point_target(prior_boxes_list: List[torch.Tensor],
gt_boxes_list: List[torch.Tensor],
cfg: Dict) -> Tuple[torch.Tensor, torch.Tensor]:
""" Generate ground-truth target for each element.
Args:
prior_boxes_list (List[torch.Tensor]): each element in the list is a tensor in shape of [N, 4]
gt_boxes_list (List[torch.Tensor]): ground-truth boxes list.
cfg (dict): training configurations.
"""
labels_list, label_weights_list = multi_apply(
FCHead.point_target_single,
prior_boxes_list,
gt_boxes_list,
cfg=cfg
)
# group into one
labels = torch.cat([_ for _ in labels_list], dim=0)
label_weights = torch.cat([_ for _ in label_weights_list], dim=0)
return labels, label_weights
@staticmethod
def point_target_single(prior_boxes: torch.Tensor,
gt_boxes: torch.Tensor,
cfg: Dict) -> Tuple[torch.Tensor, torch.Tensor]:
""" Generate ground-truth target for single image pair. """
num_pts = len(prior_boxes)
assign_result, sampling_result = assign_and_sample(prior_boxes, gt_boxes, None, cfg)
labels = prior_boxes.new_zeros(num_pts).long()
label_weights = prior_boxes.new_zeros(num_pts).float()
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
labels[pos_inds] = 1
if cfg.pos_weight > 0:
sum_weight = 1.0 if len(neg_inds) <= 0 else cfg.pos_weight
label_weights[pos_inds] = sum_weight / len(pos_inds)
if len(neg_inds) > 0:
if cfg.pos_weight > 0:
sum_weight = 1.0 if len(pos_inds) <= 0 else 1 - cfg.pos_weight
label_weights[neg_inds] = (sum_weight / len(neg_inds))
total_samples = max(len(pos_inds) + len(neg_inds), 1)
if cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0 / total_samples
label_weights[neg_inds] = 1.0 / total_samples
return labels, label_weights
@HEADS.register_module
class RankFCHead(FCHead):
def __init__(self,
stride: int,
in_channels: int,
scale_factor: float,
head_convs: List[int] = None,
head_ksize: int = None,
loss: Dict = None,
rank_loss: Dict = None,
init_type: str = 'xavier_uniform'):
super(RankFCHead, self).__init__(stride, in_channels, scale_factor, head_convs, head_ksize, loss, init_type)
self.rank_loss_obj = build_loss(rank_loss)
def loss(self, cls_logits, z_boxes, x_boxes, flags, cfg, **kwargs):
""" Calculate the loss. """
num_imgs = len(x_boxes)
gt_boxes_list = generate_gt(z_boxes, x_boxes, flags, same_category_as_positive=False)
img_ctr = (cfg.x_size - 1) / 2.0
# generator center point list
ctr_pts = self.point_gen.grid_points((cls_logits.size(2), cls_logits.size(3)),
stride=self.stride).type_as(cls_logits) + img_ctr
prior_boxes_list = [self.point_gen.assign_prior_boxes(ctr_pts, z_box[0:4].view(1, 4)) for z_box in z_boxes]
labels, label_weights, metrics = self.point_target(prior_boxes_list, gt_boxes_list, cfg.siamfc)
cls_loss = self.cls_loss_obj(cls_logits.view(num_imgs, -1),
labels.view(num_imgs, -1),
label_weights.view(num_imgs, -1),
average_factor=num_imgs)
losses = dict(cls_loss=cls_loss)
if self.rank_loss_obj is not None:
losses['rank_loss'] = self.rank_loss_obj(cls_logits.view(num_imgs, -1),
labels.view(num_imgs, -1),
metrics.view(num_imgs, -1))
return losses
@staticmethod
def point_target(prior_boxes_list, gt_boxes_list, cfg):
labels_list, label_weights_list, metrics_list = multi_apply(
FCHead.point_target_single,
prior_boxes_list,
gt_boxes_list,
cfg=cfg
)
# group into one
labels = torch.cat([_ for _ in labels_list], dim=0)
label_weights = torch.cat([_ for _ in label_weights_list], dim=0)
metrics = torch.cat([_ for _ in metrics_list], dim=0)
return labels, label_weights, metrics
@staticmethod
def point_target_single(prior_boxes, gt_boxes, cfg):
num_pts = len(prior_boxes)
assign_result, sampling_result = assign_and_sample(prior_boxes, gt_boxes, None, cfg)
labels = prior_boxes.new_zeros(num_pts).long()
label_weights = prior_boxes.new_zeros(num_pts).float()
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
metrics = prior_boxes.new_zeros(num_pts)
if len(pos_inds) > 0:
labels[pos_inds] = 1
metrics[pos_inds] = assign_result.max_overlaps[pos_inds]
if cfg.pos_weight > 0:
sum_weight = 1.0 if len(neg_inds) <= 0 else cfg.pos_weight
label_weights[pos_inds] = sum_weight / len(pos_inds)
if len(neg_inds) > 0:
if cfg.pos_weight > 0:
sum_weight = 1.0 if len(pos_inds) <= 0 else 1 - cfg.pos_weight
label_weights[neg_inds] = (sum_weight / len(neg_inds))
total_samples = max(len(pos_inds) + len(neg_inds), 1)
if cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0 / total_samples
label_weights[neg_inds] = 1.0 / total_samples
return labels, label_weights, metrics | siam_tracker/models/heads/fc_head.py |
import torch
import torch.nn.functional
from torch import nn
from typing import List, Dict, Tuple
from ..utils import build_stack_conv_layers, random_init_weights
from ..builder import HEADS, build_loss
from ...core import PointGenerator, generate_gt, assign_and_sample
from ...utils import multi_apply
@HEADS.register_module
class FCHead(nn.Module):
def __init__(self,
stride: int,
in_channels: int,
scale_factor: float,
head_convs: Dict = None,
loss: Dict = None,
init_type: str = 'xavier_uniform'):
""" The head of SiamFC. It takes the fused features (or response map) as input and output the
response map.
Args:
stride (int): feature stride of input feature maps
in_channels (int): number of input channels,
scale_factor (float): because the response map after cross-correlation is large, we scale them down
by a scale factor (typically 0.001).
head_convs (dict): head conv layers configurations
loss (dict): classification loss configuration
init_type (str): initialization type.
"""
super(FCHead, self).__init__()
self.stride = stride
self.in_channels = in_channels
self.scale_factor = scale_factor
if head_convs is not None:
self.post_convs = build_stack_conv_layers(**head_convs)
else:
assert self.in_channels == 1
self.post_convs = None
self.score_bias = nn.Parameter(torch.zeros(1), requires_grad=True)
self.point_gen = PointGenerator()
self.cls_loss_obj = build_loss(loss)
if init_type is not None:
random_init_weights(self.modules(), init_type)
def forward(self, fused_feat: torch.Tensor) -> torch.Tensor:
""" Process the fused features (or response maps).
Args:
fused_feat (torch.Tensor): in shape of [N, C, H, W] (typically [1, 1, 17, 17])
"""
if self.post_convs is not None:
cls_logits = self.post_convs(fused_feat) * self.scale_factor
else:
cls_logits = fused_feat * self.scale_factor + self.score_bias
return cls_logits
def loss(self,
cls_logits: torch.Tensor,
z_boxes: List[torch.Tensor],
x_boxes: List[torch.Tensor],
flags: List[torch.Tensor],
cfg: Dict,
**kwargs) -> Dict[str, torch.Tensor]:
""" Calculate the loss.
Args:
cls_logits (torch.Tensor): the predicted classification results, in shape of [N, 1, H, W]
z_boxes (List[torch.Tensor]): the ground-truth boxes in template images. each element is in shape of [1, 6]
x_boxes (List[torch.Tensor]): the ground-truth boxes in search regions. each element is in shape of [K, 6]
flags (List[torch.Tensor]): bool tensors that denotes whether the search region and template are
come from same sequence.
cfg (dict): training configuration
"""
num_imgs = len(x_boxes)
# Generate ground-truth boxes
gt_boxes_list = generate_gt(z_boxes, x_boxes, flags, same_category_as_positive=False)
# Generate point coordinates for each pixel in the response map.
img_ctr = (cfg.x_size - 1) / 2.0
ctr_pts = self.point_gen.grid_points((cls_logits.size(2), cls_logits.size(3)),
stride=self.stride).type_as(cls_logits) + img_ctr
prior_boxes_list = [self.point_gen.assign_prior_boxes(ctr_pts, z_box[0:4].view(1, 4)) for z_box in z_boxes]
# Generate ground-truth labels for each pixel.
labels, label_weights = self.point_target(prior_boxes_list, gt_boxes_list, cfg.siamfc)
cls_loss = self.cls_loss_obj(cls_logits.view(num_imgs, -1),
labels.view(num_imgs, -1),
label_weights.view(num_imgs, -1),
average_factor=num_imgs)
return dict(cls_loss=cls_loss)
@staticmethod
def point_target(prior_boxes_list: List[torch.Tensor],
gt_boxes_list: List[torch.Tensor],
cfg: Dict) -> Tuple[torch.Tensor, torch.Tensor]:
""" Generate ground-truth target for each element.
Args:
prior_boxes_list (List[torch.Tensor]): each element in the list is a tensor in shape of [N, 4]
gt_boxes_list (List[torch.Tensor]): ground-truth boxes list.
cfg (dict): training configurations.
"""
labels_list, label_weights_list = multi_apply(
FCHead.point_target_single,
prior_boxes_list,
gt_boxes_list,
cfg=cfg
)
# group into one
labels = torch.cat([_ for _ in labels_list], dim=0)
label_weights = torch.cat([_ for _ in label_weights_list], dim=0)
return labels, label_weights
@staticmethod
def point_target_single(prior_boxes: torch.Tensor,
gt_boxes: torch.Tensor,
cfg: Dict) -> Tuple[torch.Tensor, torch.Tensor]:
""" Generate ground-truth target for single image pair. """
num_pts = len(prior_boxes)
assign_result, sampling_result = assign_and_sample(prior_boxes, gt_boxes, None, cfg)
labels = prior_boxes.new_zeros(num_pts).long()
label_weights = prior_boxes.new_zeros(num_pts).float()
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
labels[pos_inds] = 1
if cfg.pos_weight > 0:
sum_weight = 1.0 if len(neg_inds) <= 0 else cfg.pos_weight
label_weights[pos_inds] = sum_weight / len(pos_inds)
if len(neg_inds) > 0:
if cfg.pos_weight > 0:
sum_weight = 1.0 if len(pos_inds) <= 0 else 1 - cfg.pos_weight
label_weights[neg_inds] = (sum_weight / len(neg_inds))
total_samples = max(len(pos_inds) + len(neg_inds), 1)
if cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0 / total_samples
label_weights[neg_inds] = 1.0 / total_samples
return labels, label_weights
@HEADS.register_module
class RankFCHead(FCHead):
def __init__(self,
stride: int,
in_channels: int,
scale_factor: float,
head_convs: List[int] = None,
head_ksize: int = None,
loss: Dict = None,
rank_loss: Dict = None,
init_type: str = 'xavier_uniform'):
super(RankFCHead, self).__init__(stride, in_channels, scale_factor, head_convs, head_ksize, loss, init_type)
self.rank_loss_obj = build_loss(rank_loss)
def loss(self, cls_logits, z_boxes, x_boxes, flags, cfg, **kwargs):
""" Calculate the loss. """
num_imgs = len(x_boxes)
gt_boxes_list = generate_gt(z_boxes, x_boxes, flags, same_category_as_positive=False)
img_ctr = (cfg.x_size - 1) / 2.0
# generator center point list
ctr_pts = self.point_gen.grid_points((cls_logits.size(2), cls_logits.size(3)),
stride=self.stride).type_as(cls_logits) + img_ctr
prior_boxes_list = [self.point_gen.assign_prior_boxes(ctr_pts, z_box[0:4].view(1, 4)) for z_box in z_boxes]
labels, label_weights, metrics = self.point_target(prior_boxes_list, gt_boxes_list, cfg.siamfc)
cls_loss = self.cls_loss_obj(cls_logits.view(num_imgs, -1),
labels.view(num_imgs, -1),
label_weights.view(num_imgs, -1),
average_factor=num_imgs)
losses = dict(cls_loss=cls_loss)
if self.rank_loss_obj is not None:
losses['rank_loss'] = self.rank_loss_obj(cls_logits.view(num_imgs, -1),
labels.view(num_imgs, -1),
metrics.view(num_imgs, -1))
return losses
@staticmethod
def point_target(prior_boxes_list, gt_boxes_list, cfg):
labels_list, label_weights_list, metrics_list = multi_apply(
FCHead.point_target_single,
prior_boxes_list,
gt_boxes_list,
cfg=cfg
)
# group into one
labels = torch.cat([_ for _ in labels_list], dim=0)
label_weights = torch.cat([_ for _ in label_weights_list], dim=0)
metrics = torch.cat([_ for _ in metrics_list], dim=0)
return labels, label_weights, metrics
@staticmethod
def point_target_single(prior_boxes, gt_boxes, cfg):
num_pts = len(prior_boxes)
assign_result, sampling_result = assign_and_sample(prior_boxes, gt_boxes, None, cfg)
labels = prior_boxes.new_zeros(num_pts).long()
label_weights = prior_boxes.new_zeros(num_pts).float()
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
metrics = prior_boxes.new_zeros(num_pts)
if len(pos_inds) > 0:
labels[pos_inds] = 1
metrics[pos_inds] = assign_result.max_overlaps[pos_inds]
if cfg.pos_weight > 0:
sum_weight = 1.0 if len(neg_inds) <= 0 else cfg.pos_weight
label_weights[pos_inds] = sum_weight / len(pos_inds)
if len(neg_inds) > 0:
if cfg.pos_weight > 0:
sum_weight = 1.0 if len(pos_inds) <= 0 else 1 - cfg.pos_weight
label_weights[neg_inds] = (sum_weight / len(neg_inds))
total_samples = max(len(pos_inds) + len(neg_inds), 1)
if cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0 / total_samples
label_weights[neg_inds] = 1.0 / total_samples
return labels, label_weights, metrics | 0.953697 | 0.58602 |
import pandas as pd
from woodwork.exceptions import TypeConversionError
from woodwork.logical_types import Datetime, LatLong, Ordinal
from woodwork.type_sys.utils import _get_ltype_class
from woodwork.utils import (
_get_column_logical_type,
_reformat_to_latlong,
import_or_none
)
dd = import_or_none('dask.dataframe')
ks = import_or_none('databricks.koalas')
def init_series(series, logical_type=None, semantic_tags=None,
use_standard_tags=True, description=None, metadata=None):
"""Initializes Woodwork typing information for a Series, returning a new Series. The dtype
of the returned series will be converted to match the dtype associated with the LogicalType.
Args:
series (pd.Series, dd.Series, or ks.Series): The original series from which to create
the Woodwork initialized series.
logical_type (LogicalType or str, optional): The logical type that should be assigned
to the series. If no value is provided, the LogicalType for the series will
be inferred.
semantic_tags (str or list or set, optional): Semantic tags to assign to the series.
Defaults to an empty set if not specified. There are two options for
specifying the semantic tags:
(str) If only one semantic tag is being set, a single string can be passed.
(list or set) If multiple tags are being set, a list or set of strings can be passed.
use_standard_tags (bool, optional): If True, will add standard semantic tags to the series
based on the inferred or specified logical type of the series. Defaults to True.
description (str, optional): Optional text describing the contents of the series.
metadata (dict[str -> json serializable], optional): Metadata associated with the series.
Returns:
Series: A series with Woodwork typing information initialized
"""
logical_type = _get_column_logical_type(series, logical_type, series.name)
new_series = _update_column_dtype(series, logical_type)
new_series.ww.init(logical_type=logical_type,
semantic_tags=semantic_tags,
use_standard_tags=use_standard_tags,
description=description,
metadata=metadata)
return new_series
def _update_column_dtype(series, logical_type):
"""Update the dtype of the underlying series to match the dtype corresponding
to the LogicalType for the column."""
if isinstance(logical_type, Ordinal):
logical_type._validate_data(series)
if _get_ltype_class(logical_type) == LatLong:
# Reformat LatLong columns to be a length two tuple (or list for Koalas) of floats
if dd and isinstance(series, dd.Series):
name = series.name
meta = (series, tuple([float, float]))
series = series.apply(_reformat_to_latlong, meta=meta)
series.name = name
elif ks and isinstance(series, ks.Series):
formatted_series = series.to_pandas().apply(_reformat_to_latlong, use_list=True)
series = ks.from_pandas(formatted_series)
else:
series = series.apply(_reformat_to_latlong)
new_dtype = _get_valid_dtype(type(series), logical_type)
if new_dtype != str(series.dtype):
# Update the underlying series
error_msg = f'Error converting datatype for {series.name} from type {str(series.dtype)} ' \
f'to type {new_dtype}. Please confirm the underlying data is consistent with ' \
f'logical type {logical_type}.'
try:
if _get_ltype_class(logical_type) == Datetime:
if dd and isinstance(series, dd.Series):
name = series.name
series = dd.to_datetime(series, format=logical_type.datetime_format)
series.name = name
elif ks and isinstance(series, ks.Series):
series = ks.Series(ks.to_datetime(series.to_numpy(),
format=logical_type.datetime_format),
name=series.name)
else:
series = pd.to_datetime(series, format=logical_type.datetime_format)
else:
series = series.astype(new_dtype)
if str(series.dtype) != new_dtype:
# Catch conditions when Panads does not error but did not
# convert to the specified dtype (example: 'category' -> 'bool')
raise TypeConversionError(error_msg)
except (TypeError, ValueError):
raise TypeConversionError(error_msg)
return series
def _is_series(data):
if isinstance(data, pd.Series):
return True
elif dd and isinstance(data, dd.Series):
return True
elif ks and isinstance(data, ks.Series):
return True
return False
def _is_dataframe(data):
if isinstance(data, pd.DataFrame):
return True
elif dd and isinstance(data, dd.DataFrame):
return True
elif ks and isinstance(data, ks.DataFrame):
return True
return False
def _get_valid_dtype(series_type, logical_type):
"""Return the dtype that is considered valid for a series
with the given logical_type"""
backup_dtype = logical_type.backup_dtype
if ks and series_type == ks.Series and backup_dtype:
valid_dtype = backup_dtype
else:
valid_dtype = logical_type.primary_dtype
return valid_dtype
def get_invalid_schema_message(dataframe, schema):
"""Return a message indicating the reason that the provided schema cannot be used to
initialize Woodwork on the dataframe. If the schema is valid for the dataframe,
None will be returned.
Args:
dataframe (DataFrame): The dataframe against which to check the schema.
schema (ww.TableSchema): The schema to use in the validity check.
Returns:
str or None: The reason that the schema is invalid for the dataframe
"""
dataframe_cols = set(dataframe.columns)
schema_cols = set(schema.columns.keys())
df_cols_not_in_schema = dataframe_cols - schema_cols
if df_cols_not_in_schema:
return f'The following columns in the DataFrame were missing from the typing information: '\
f'{df_cols_not_in_schema}'
schema_cols_not_in_df = schema_cols - dataframe_cols
if schema_cols_not_in_df:
return f'The following columns in the typing information were missing from the DataFrame: '\
f'{schema_cols_not_in_df}'
for name in dataframe.columns:
df_dtype = dataframe[name].dtype
valid_dtype = _get_valid_dtype(type(dataframe[name]), schema.logical_types[name])
if str(df_dtype) != valid_dtype:
return f'dtype mismatch for column {name} between DataFrame dtype, '\
f'{df_dtype}, and {schema.logical_types[name]} dtype, {valid_dtype}'
if schema.index is not None and isinstance(dataframe, pd.DataFrame):
# Index validation not performed for Dask/Koalas
if not pd.Series(dataframe.index, dtype=dataframe[schema.index].dtype).equals(pd.Series(dataframe[schema.index].values)):
return 'Index mismatch between DataFrame and typing information'
elif not dataframe[schema.index].is_unique:
return 'Index column is not unique'
def is_schema_valid(dataframe, schema):
"""Check if a schema is valid for initializing Woodwork on a dataframe
Args:
dataframe (DataFrame): The dataframe against which to check the schema.
schema (ww.TableSchema): The schema to use in the validity check.
Returns:
boolean: Boolean indicating whether the schema is valid for the dataframe
"""
invalid_schema_message = get_invalid_schema_message(dataframe, schema)
if invalid_schema_message:
return False
return True | woodwork/accessor_utils.py | import pandas as pd
from woodwork.exceptions import TypeConversionError
from woodwork.logical_types import Datetime, LatLong, Ordinal
from woodwork.type_sys.utils import _get_ltype_class
from woodwork.utils import (
_get_column_logical_type,
_reformat_to_latlong,
import_or_none
)
dd = import_or_none('dask.dataframe')
ks = import_or_none('databricks.koalas')
def init_series(series, logical_type=None, semantic_tags=None,
use_standard_tags=True, description=None, metadata=None):
"""Initializes Woodwork typing information for a Series, returning a new Series. The dtype
of the returned series will be converted to match the dtype associated with the LogicalType.
Args:
series (pd.Series, dd.Series, or ks.Series): The original series from which to create
the Woodwork initialized series.
logical_type (LogicalType or str, optional): The logical type that should be assigned
to the series. If no value is provided, the LogicalType for the series will
be inferred.
semantic_tags (str or list or set, optional): Semantic tags to assign to the series.
Defaults to an empty set if not specified. There are two options for
specifying the semantic tags:
(str) If only one semantic tag is being set, a single string can be passed.
(list or set) If multiple tags are being set, a list or set of strings can be passed.
use_standard_tags (bool, optional): If True, will add standard semantic tags to the series
based on the inferred or specified logical type of the series. Defaults to True.
description (str, optional): Optional text describing the contents of the series.
metadata (dict[str -> json serializable], optional): Metadata associated with the series.
Returns:
Series: A series with Woodwork typing information initialized
"""
logical_type = _get_column_logical_type(series, logical_type, series.name)
new_series = _update_column_dtype(series, logical_type)
new_series.ww.init(logical_type=logical_type,
semantic_tags=semantic_tags,
use_standard_tags=use_standard_tags,
description=description,
metadata=metadata)
return new_series
def _update_column_dtype(series, logical_type):
"""Update the dtype of the underlying series to match the dtype corresponding
to the LogicalType for the column."""
if isinstance(logical_type, Ordinal):
logical_type._validate_data(series)
if _get_ltype_class(logical_type) == LatLong:
# Reformat LatLong columns to be a length two tuple (or list for Koalas) of floats
if dd and isinstance(series, dd.Series):
name = series.name
meta = (series, tuple([float, float]))
series = series.apply(_reformat_to_latlong, meta=meta)
series.name = name
elif ks and isinstance(series, ks.Series):
formatted_series = series.to_pandas().apply(_reformat_to_latlong, use_list=True)
series = ks.from_pandas(formatted_series)
else:
series = series.apply(_reformat_to_latlong)
new_dtype = _get_valid_dtype(type(series), logical_type)
if new_dtype != str(series.dtype):
# Update the underlying series
error_msg = f'Error converting datatype for {series.name} from type {str(series.dtype)} ' \
f'to type {new_dtype}. Please confirm the underlying data is consistent with ' \
f'logical type {logical_type}.'
try:
if _get_ltype_class(logical_type) == Datetime:
if dd and isinstance(series, dd.Series):
name = series.name
series = dd.to_datetime(series, format=logical_type.datetime_format)
series.name = name
elif ks and isinstance(series, ks.Series):
series = ks.Series(ks.to_datetime(series.to_numpy(),
format=logical_type.datetime_format),
name=series.name)
else:
series = pd.to_datetime(series, format=logical_type.datetime_format)
else:
series = series.astype(new_dtype)
if str(series.dtype) != new_dtype:
# Catch conditions when Panads does not error but did not
# convert to the specified dtype (example: 'category' -> 'bool')
raise TypeConversionError(error_msg)
except (TypeError, ValueError):
raise TypeConversionError(error_msg)
return series
def _is_series(data):
if isinstance(data, pd.Series):
return True
elif dd and isinstance(data, dd.Series):
return True
elif ks and isinstance(data, ks.Series):
return True
return False
def _is_dataframe(data):
if isinstance(data, pd.DataFrame):
return True
elif dd and isinstance(data, dd.DataFrame):
return True
elif ks and isinstance(data, ks.DataFrame):
return True
return False
def _get_valid_dtype(series_type, logical_type):
"""Return the dtype that is considered valid for a series
with the given logical_type"""
backup_dtype = logical_type.backup_dtype
if ks and series_type == ks.Series and backup_dtype:
valid_dtype = backup_dtype
else:
valid_dtype = logical_type.primary_dtype
return valid_dtype
def get_invalid_schema_message(dataframe, schema):
"""Return a message indicating the reason that the provided schema cannot be used to
initialize Woodwork on the dataframe. If the schema is valid for the dataframe,
None will be returned.
Args:
dataframe (DataFrame): The dataframe against which to check the schema.
schema (ww.TableSchema): The schema to use in the validity check.
Returns:
str or None: The reason that the schema is invalid for the dataframe
"""
dataframe_cols = set(dataframe.columns)
schema_cols = set(schema.columns.keys())
df_cols_not_in_schema = dataframe_cols - schema_cols
if df_cols_not_in_schema:
return f'The following columns in the DataFrame were missing from the typing information: '\
f'{df_cols_not_in_schema}'
schema_cols_not_in_df = schema_cols - dataframe_cols
if schema_cols_not_in_df:
return f'The following columns in the typing information were missing from the DataFrame: '\
f'{schema_cols_not_in_df}'
for name in dataframe.columns:
df_dtype = dataframe[name].dtype
valid_dtype = _get_valid_dtype(type(dataframe[name]), schema.logical_types[name])
if str(df_dtype) != valid_dtype:
return f'dtype mismatch for column {name} between DataFrame dtype, '\
f'{df_dtype}, and {schema.logical_types[name]} dtype, {valid_dtype}'
if schema.index is not None and isinstance(dataframe, pd.DataFrame):
# Index validation not performed for Dask/Koalas
if not pd.Series(dataframe.index, dtype=dataframe[schema.index].dtype).equals(pd.Series(dataframe[schema.index].values)):
return 'Index mismatch between DataFrame and typing information'
elif not dataframe[schema.index].is_unique:
return 'Index column is not unique'
def is_schema_valid(dataframe, schema):
"""Check if a schema is valid for initializing Woodwork on a dataframe
Args:
dataframe (DataFrame): The dataframe against which to check the schema.
schema (ww.TableSchema): The schema to use in the validity check.
Returns:
boolean: Boolean indicating whether the schema is valid for the dataframe
"""
invalid_schema_message = get_invalid_schema_message(dataframe, schema)
if invalid_schema_message:
return False
return True | 0.795301 | 0.425247 |
import model_wrapper as mw
import torch
from torchvision import datasets, transforms, models
from torch import nn, optim
import torch.nn.functional as F
from collections import OrderedDict
from torch.optim import lr_scheduler
from PIL import Image
import json
import pprint
def train(mw,epochs,gpu,learning_rate):
print('Training the model with '+str(epochs)+' epochs and with GPU = '+str(gpu))
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
learn_rate = float(learning_rate)
optimizer = optim.Adam(mw.model.classifier.parameters(), lr=learn_rate)
mw.model.optimizer = optimizer
device = "cuda"
if gpu == False:
device = "cpu"
mw.model.to(device)
steps = 0
running_loss = 0
print_every = 5
for epoch in range(epochs):
for inputs, labels in mw.dataloaders['train']:
steps += 1
# Move input and label tensors to the default device
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = mw.model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
mw.model.eval()
with torch.no_grad():
for inputs, labels in mw.dataloaders['valid']:
inputs, labels = inputs.to(device), labels.to(device)
logps = mw.model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
# Rubric Section - Validation Loss and Accuracy
# During training, the validation loss and accuracy are displayed
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Valid loss: {test_loss/len(mw.dataloaders['valid']):.3f}.. "
f"Valid accuracy: {accuracy/len(mw.dataloaders['valid']):.3f}")
running_loss = 0
mw.model.train()
return
def test(mw,gpu):
print('Test run with GPU = '+str(gpu))
accuracy = 0
test_loss = 0
criterion = nn.NLLLoss()
device = "cuda"
if gpu == False:
device = "cpu"
mw.model.eval()
with torch.no_grad():
for inputs, labels in mw.dataloaders['test']:
inputs, labels = inputs.to(device), labels.to(device)
logps = mw.model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Test accuracy: {accuracy/len(mw.dataloaders['test']):.3f}"
)
mw.model.train()
return
def loadModel(checkpoint,category_names):
print('Loading checkpoint from '+checkpoint)
model_2_return = None
chpt = torch.load(checkpoint)
model_2_return = mw.ModelWrapper(chpt['arch'])
model_2_return.freeze_params()
model_2_return.hidden_layers = chpt['classifier_layers'][1:len(chpt['classifier_layers'])-1]
model_2_return.create_classifier(get_layers_as_comma_sep_string(model_2_return.hidden_layers))
model_2_return.class_to_idx = chpt['class_to_idx']
model_2_return.imagenet_means = chpt['imagenet_means']
model_2_return.imagenet_stdevs = chpt['imagenet_stdevs']
model_2_return.model.load_state_dict(chpt['state_dict'])
model_2_return.model.optimizer = optim.Adam(model_2_return.model.classifier.parameters(), lr=float(chpt['lr']))
model_2_return.model.optimizer.load_state_dict(chpt['optimizer_state_dict'])
with open(category_names, 'r') as f:
model_2_return.cat_to_name = json.load(f)
return model_2_return
def get_layers_as_comma_sep_string(hidden_layers):
retVal = ""
count = 0
for i in hidden_layers:
if len(hidden_layers) == 1:
return str(hidden_layers[0])
if count < len(hidden_layers):
retVal = retVal + str(i) + ','
else:
retVal = retVal + str(i)
count = count + 1
print(retVal)
return retVal
def process_image(image,imagenet_means,imagenet_stdevs):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Tensor
'''
# Use transform for coverting to image tensor :-) ,
transform_image = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(imagenet_means, imagenet_stdevs)
])
img_tensor = transform_image(image)
img_tensor.unsqueeze_(0)
return img_tensor
def predict(image_path, mw, top_k,gpu):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
print('Prediction run with GPU = '+str(gpu))
img = Image.open(image_path)
# Convert image to a tensor
img = process_image(img,mw.imagenet_means,mw.imagenet_stdevs)
device = "cuda"
if gpu == False:
device = "cpu"
img = img.cpu()
else:
img = img.cuda()
mw.model.to(device)
mw.model.eval()
# Send the image tensor in a forward pass and get the prob dist
probs = torch.exp(mw.model.forward(img))
# Using the top_k arg get the top probabilities and labels
top_probs, top_labs = probs.topk(top_k)
#Bring to CPU if GPU is enabled
if gpu == True:
top_probs = top_probs.cpu()
top_labs = top_labs.cpu()
# Do a numpy conversion
top_probs = top_probs.detach().numpy().tolist()[0]
top_labs = top_labs.detach().numpy().tolist()[0]
# Convert indices to classes reversing the json read
idx_to_class = {val: key for key, val in
mw.class_to_idx.items()}
top_label_names = [idx_to_class[lab] for lab in top_labs]
top_flower_names = [mw.cat_to_name[idx_to_class[lab]] for lab in top_labs]
return top_probs, top_label_names, top_flower_names | img_classifier_utils.py | import model_wrapper as mw
import torch
from torchvision import datasets, transforms, models
from torch import nn, optim
import torch.nn.functional as F
from collections import OrderedDict
from torch.optim import lr_scheduler
from PIL import Image
import json
import pprint
def train(mw,epochs,gpu,learning_rate):
print('Training the model with '+str(epochs)+' epochs and with GPU = '+str(gpu))
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
learn_rate = float(learning_rate)
optimizer = optim.Adam(mw.model.classifier.parameters(), lr=learn_rate)
mw.model.optimizer = optimizer
device = "cuda"
if gpu == False:
device = "cpu"
mw.model.to(device)
steps = 0
running_loss = 0
print_every = 5
for epoch in range(epochs):
for inputs, labels in mw.dataloaders['train']:
steps += 1
# Move input and label tensors to the default device
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = mw.model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
mw.model.eval()
with torch.no_grad():
for inputs, labels in mw.dataloaders['valid']:
inputs, labels = inputs.to(device), labels.to(device)
logps = mw.model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
# Rubric Section - Validation Loss and Accuracy
# During training, the validation loss and accuracy are displayed
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Valid loss: {test_loss/len(mw.dataloaders['valid']):.3f}.. "
f"Valid accuracy: {accuracy/len(mw.dataloaders['valid']):.3f}")
running_loss = 0
mw.model.train()
return
def test(mw,gpu):
print('Test run with GPU = '+str(gpu))
accuracy = 0
test_loss = 0
criterion = nn.NLLLoss()
device = "cuda"
if gpu == False:
device = "cpu"
mw.model.eval()
with torch.no_grad():
for inputs, labels in mw.dataloaders['test']:
inputs, labels = inputs.to(device), labels.to(device)
logps = mw.model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Test accuracy: {accuracy/len(mw.dataloaders['test']):.3f}"
)
mw.model.train()
return
def loadModel(checkpoint,category_names):
print('Loading checkpoint from '+checkpoint)
model_2_return = None
chpt = torch.load(checkpoint)
model_2_return = mw.ModelWrapper(chpt['arch'])
model_2_return.freeze_params()
model_2_return.hidden_layers = chpt['classifier_layers'][1:len(chpt['classifier_layers'])-1]
model_2_return.create_classifier(get_layers_as_comma_sep_string(model_2_return.hidden_layers))
model_2_return.class_to_idx = chpt['class_to_idx']
model_2_return.imagenet_means = chpt['imagenet_means']
model_2_return.imagenet_stdevs = chpt['imagenet_stdevs']
model_2_return.model.load_state_dict(chpt['state_dict'])
model_2_return.model.optimizer = optim.Adam(model_2_return.model.classifier.parameters(), lr=float(chpt['lr']))
model_2_return.model.optimizer.load_state_dict(chpt['optimizer_state_dict'])
with open(category_names, 'r') as f:
model_2_return.cat_to_name = json.load(f)
return model_2_return
def get_layers_as_comma_sep_string(hidden_layers):
retVal = ""
count = 0
for i in hidden_layers:
if len(hidden_layers) == 1:
return str(hidden_layers[0])
if count < len(hidden_layers):
retVal = retVal + str(i) + ','
else:
retVal = retVal + str(i)
count = count + 1
print(retVal)
return retVal
def process_image(image,imagenet_means,imagenet_stdevs):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Tensor
'''
# Use transform for coverting to image tensor :-) ,
transform_image = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(imagenet_means, imagenet_stdevs)
])
img_tensor = transform_image(image)
img_tensor.unsqueeze_(0)
return img_tensor
def predict(image_path, mw, top_k,gpu):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
print('Prediction run with GPU = '+str(gpu))
img = Image.open(image_path)
# Convert image to a tensor
img = process_image(img,mw.imagenet_means,mw.imagenet_stdevs)
device = "cuda"
if gpu == False:
device = "cpu"
img = img.cpu()
else:
img = img.cuda()
mw.model.to(device)
mw.model.eval()
# Send the image tensor in a forward pass and get the prob dist
probs = torch.exp(mw.model.forward(img))
# Using the top_k arg get the top probabilities and labels
top_probs, top_labs = probs.topk(top_k)
#Bring to CPU if GPU is enabled
if gpu == True:
top_probs = top_probs.cpu()
top_labs = top_labs.cpu()
# Do a numpy conversion
top_probs = top_probs.detach().numpy().tolist()[0]
top_labs = top_labs.detach().numpy().tolist()[0]
# Convert indices to classes reversing the json read
idx_to_class = {val: key for key, val in
mw.class_to_idx.items()}
top_label_names = [idx_to_class[lab] for lab in top_labs]
top_flower_names = [mw.cat_to_name[idx_to_class[lab]] for lab in top_labs]
return top_probs, top_label_names, top_flower_names | 0.880399 | 0.538741 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .multibox_layer import MultiBoxLayer
class SSD(nn.Module):
"""SSD300 model."""
def __init__(self, num_classes=21):
super(SSD, self).__init__()
self.base = VGG16()
# output feature map size: 38
self.norm4 = L2Norm(512)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
# output feature map size: 19
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.pool5 = nn.MaxPool2d(kernel_size=3, padding=1, stride=1, ceil_mode=True)
# output feature map size: 19
self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
# output feature map size: 19
self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1)
self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=2)
# output feature map size: 10
self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1)
self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2)
# output feature map size: 5
self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1)
self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3)
# output feature map size: 3
self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1)
self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3)
# output feature map size: 1
self.multibox = MultiBoxLayer(num_classes)
def forward(self, x):
"""
Arguments:
x: a float tensor with shape [n, 3, 300, 300].
"""
hs = [] # hidden states
h = self.base(x)
hs.append(self.norm4(h)) # conv4_3
h = self.pool4(h)
h = F.relu(self.conv5_1(h))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
h = self.pool5(h)
h = F.relu(self.conv6(h))
h = F.relu(self.conv7(h))
hs.append(h) # conv7
h = F.relu(self.conv8_1(h))
h = F.relu(self.conv8_2(h))
hs.append(h) # conv8_2
h = F.relu(self.conv9_1(h))
h = F.relu(self.conv9_2(h))
hs.append(h) # conv9_2
h = F.relu(self.conv10_1(h))
h = F.relu(self.conv10_2(h))
hs.append(h) # conv10_2
h = F.relu(self.conv11_1(h))
h = F.relu(self.conv11_2(h))
hs.append(h) # conv11_2
loc_preds, conf_preds = self.multibox(hs)
return loc_preds, conf_preds
def VGG16():
"""VGG16 base."""
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512]
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
# feature map sizes after each pooling: [150, 75, 38]
else:
layers += [
nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
# conv uses 'same' padding
nn.ReLU(True)
]
in_channels = x
return nn.Sequential(*layers)
class L2Norm(nn.Module):
"""Channel-wise L2 normalization."""
def __init__(self, in_channels):
super(L2Norm, self).__init__()
self.weight = nn.Parameter(torch.randn(in_channels))
def forward(self, x):
"""out = weight * x / sqrt(\sum x_i^2)"""
unsqueezed_weight = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3)
return unsqueezed_weight * x * x.pow(2).sum(1, keepdim=True).clamp(min=1e-9).rsqrt() | src/ssd.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .multibox_layer import MultiBoxLayer
class SSD(nn.Module):
"""SSD300 model."""
def __init__(self, num_classes=21):
super(SSD, self).__init__()
self.base = VGG16()
# output feature map size: 38
self.norm4 = L2Norm(512)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
# output feature map size: 19
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.pool5 = nn.MaxPool2d(kernel_size=3, padding=1, stride=1, ceil_mode=True)
# output feature map size: 19
self.conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
# output feature map size: 19
self.conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
self.conv8_1 = nn.Conv2d(1024, 256, kernel_size=1)
self.conv8_2 = nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=2)
# output feature map size: 10
self.conv9_1 = nn.Conv2d(512, 128, kernel_size=1)
self.conv9_2 = nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2)
# output feature map size: 5
self.conv10_1 = nn.Conv2d(256, 128, kernel_size=1)
self.conv10_2 = nn.Conv2d(128, 256, kernel_size=3)
# output feature map size: 3
self.conv11_1 = nn.Conv2d(256, 128, kernel_size=1)
self.conv11_2 = nn.Conv2d(128, 256, kernel_size=3)
# output feature map size: 1
self.multibox = MultiBoxLayer(num_classes)
def forward(self, x):
"""
Arguments:
x: a float tensor with shape [n, 3, 300, 300].
"""
hs = [] # hidden states
h = self.base(x)
hs.append(self.norm4(h)) # conv4_3
h = self.pool4(h)
h = F.relu(self.conv5_1(h))
h = F.relu(self.conv5_2(h))
h = F.relu(self.conv5_3(h))
h = self.pool5(h)
h = F.relu(self.conv6(h))
h = F.relu(self.conv7(h))
hs.append(h) # conv7
h = F.relu(self.conv8_1(h))
h = F.relu(self.conv8_2(h))
hs.append(h) # conv8_2
h = F.relu(self.conv9_1(h))
h = F.relu(self.conv9_2(h))
hs.append(h) # conv9_2
h = F.relu(self.conv10_1(h))
h = F.relu(self.conv10_2(h))
hs.append(h) # conv10_2
h = F.relu(self.conv11_1(h))
h = F.relu(self.conv11_2(h))
hs.append(h) # conv11_2
loc_preds, conf_preds = self.multibox(hs)
return loc_preds, conf_preds
def VGG16():
"""VGG16 base."""
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512]
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
# feature map sizes after each pooling: [150, 75, 38]
else:
layers += [
nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
# conv uses 'same' padding
nn.ReLU(True)
]
in_channels = x
return nn.Sequential(*layers)
class L2Norm(nn.Module):
"""Channel-wise L2 normalization."""
def __init__(self, in_channels):
super(L2Norm, self).__init__()
self.weight = nn.Parameter(torch.randn(in_channels))
def forward(self, x):
"""out = weight * x / sqrt(\sum x_i^2)"""
unsqueezed_weight = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3)
return unsqueezed_weight * x * x.pow(2).sum(1, keepdim=True).clamp(min=1e-9).rsqrt() | 0.950307 | 0.550003 |
import cx_Freeze, os.path
PYTHON_INSTALL_DIR = os.path.dirname(os.path.dirname(os.__file__))
os.environ['TCL_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tcl8.6')
os.environ['TK_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tk8.6')
executables = [
cx_Freeze.Executable(
script = 'main.py',
base = 'Win32GUI',
targetName = 'pyjumble.exe',
icon = 'assets/images/icon.ico',
copyright = 'Copyright (c) 2018 <NAME>',
shortcutName = 'PyJumble'
)
]
shortcut_table = [
(
'DesktopShortcut', # Shortcut
'DesktopFolder', # Directory_
'PyJumble', # Name
'TARGETDIR', # Component_
'[TARGETDIR]pyjumble.exe',# Target
None, # Arguments
None, # Description
None, # Hotkey
None, # Icon
None, # IconIndex
None, # ShowCmd
'TARGETDIR' # WkDir
),
(
'StartMenuShortcut', # Shortcut
'StartMenuFolder', # Directory_
'PyJumble', # Name
'TARGETDIR', # Component_
'[TARGETDIR]pyjumble.exe',# Target
None, # Arguments
None, # Description
None, # Hotkey
None, # Icon
None, # IconIndex
None, # ShowCmd
'TARGETDIR' # WkDir
)
]
cx_Freeze.setup(
name = 'PyJumble',
version = '1.0.0',
author = '<NAME>',
url = 'https://github.com/GerardBalaoro/PyJumble',
options = {
'build_exe': {
'packages': ['pygame', 'random', 'math', 'os', 'json'],
'includes': ['config', 'engine', 'sprites', 'interface'],
'include_files': ['assets/', 'LICENSE.md', 'README.md', 'config.json'],
},
'bdist_msi': {
'data': {
'Shortcut': shortcut_table
}
}
},
executables = executables
) | setup.py | import cx_Freeze, os.path
PYTHON_INSTALL_DIR = os.path.dirname(os.path.dirname(os.__file__))
os.environ['TCL_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tcl8.6')
os.environ['TK_LIBRARY'] = os.path.join(PYTHON_INSTALL_DIR, 'tcl', 'tk8.6')
executables = [
cx_Freeze.Executable(
script = 'main.py',
base = 'Win32GUI',
targetName = 'pyjumble.exe',
icon = 'assets/images/icon.ico',
copyright = 'Copyright (c) 2018 <NAME>',
shortcutName = 'PyJumble'
)
]
shortcut_table = [
(
'DesktopShortcut', # Shortcut
'DesktopFolder', # Directory_
'PyJumble', # Name
'TARGETDIR', # Component_
'[TARGETDIR]pyjumble.exe',# Target
None, # Arguments
None, # Description
None, # Hotkey
None, # Icon
None, # IconIndex
None, # ShowCmd
'TARGETDIR' # WkDir
),
(
'StartMenuShortcut', # Shortcut
'StartMenuFolder', # Directory_
'PyJumble', # Name
'TARGETDIR', # Component_
'[TARGETDIR]pyjumble.exe',# Target
None, # Arguments
None, # Description
None, # Hotkey
None, # Icon
None, # IconIndex
None, # ShowCmd
'TARGETDIR' # WkDir
)
]
cx_Freeze.setup(
name = 'PyJumble',
version = '1.0.0',
author = '<NAME>',
url = 'https://github.com/GerardBalaoro/PyJumble',
options = {
'build_exe': {
'packages': ['pygame', 'random', 'math', 'os', 'json'],
'includes': ['config', 'engine', 'sprites', 'interface'],
'include_files': ['assets/', 'LICENSE.md', 'README.md', 'config.json'],
},
'bdist_msi': {
'data': {
'Shortcut': shortcut_table
}
}
},
executables = executables
) | 0.293911 | 0.053775 |
from pulp import *
# Creates a list of the Ingredients
import numpy
import fileinput
inp = fileinput.input()
num_cases = int(inp.next());
for case in range(num_cases):
arboles, prediccio, lenadores = map(int,inp.next()[:-1].split(" ")[:3])
Treball_maxim = []
Work_required = []
for jj in range(lenadores):
work_list = [int(i) for i in inp.next()[:-1].split(" ") if len(i) > 0]
Treball_maxim.append(work_list[0])
Work_required.append(work_list[1:])
Dedicacio = []
for arbolito in range(arboles):
for lenador in range(lenadores):
Dedicacio.append("%d:%d"%(arbolito, lenador))
ArbolAssolible = []
for lenador in range(lenadores):
ArbolAssolible.append([])
for arbol in range(arboles):
ArbolAssolible[-1].append(float(Treball_maxim[lenador])/Work_required[lenador][arbol])
prob = LpProblem("My paranoia problem", LpMinimize)
ingredient_vars = LpVariable.dicts("Dedicacio ",Dedicacio,lowBound=0.,upBound=1.)#,0)
main_cost = []
### El coste total tiene buena pinta...
for lenador in range(lenadores):
main_cost.append(lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)] for arbolito in range(arboles)]) *Treball_maxim[lenador])
prob += lpSum(main_cost)#, "Total Cost of Ingredients per can"
for arbolito in range(arboles):
for lenador in range(lenadores):
prob += lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)] * ArbolAssolible[lenador][arbolito] ]) <= 1, ' garantizando que no curro por encima de mis posibilidades %d %d menor que uno' % (arbolito, lenador)
for lenador in range(lenadores):
prob += lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)] for arbolito in range(arboles)]) <= 1
for arbol in range(arboles):
prob += lpSum([ingredient_vars["%d:%d"%(arbol, lenador)]*ArbolAssolible[lenador][arbol] for lenador in range(lenadores)]) == 1, ' totalidad arbol %d cortado' % arbol
for arbolito in range(arboles):
for lenador in range(lenadores):
prob += lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)]]) >= 0, ' garantizando dedicacion %d %d positivo' % (arbolito, lenador)
# The problem data is written to an .lp file
prob.writeLP("WhiskasModel2.lp")
# The problem is solved using PuLP's choice of Solver
prob.solve()
if LpStatus[prob.status] == "Infeasible":
print "Test case #%d: IMPOSSIBLE" % (case+1)
elif numpy.around(prediccio,2) < numpy.around(value(prob.objective),2):
print "Test case #%d: %0.2f" % (case+1, value(prob.objective)-prediccio)
else:
print "Test case #%d: RIGHT" % (case+1) | problem16/snippet.py | from pulp import *
# Creates a list of the Ingredients
import numpy
import fileinput
inp = fileinput.input()
num_cases = int(inp.next());
for case in range(num_cases):
arboles, prediccio, lenadores = map(int,inp.next()[:-1].split(" ")[:3])
Treball_maxim = []
Work_required = []
for jj in range(lenadores):
work_list = [int(i) for i in inp.next()[:-1].split(" ") if len(i) > 0]
Treball_maxim.append(work_list[0])
Work_required.append(work_list[1:])
Dedicacio = []
for arbolito in range(arboles):
for lenador in range(lenadores):
Dedicacio.append("%d:%d"%(arbolito, lenador))
ArbolAssolible = []
for lenador in range(lenadores):
ArbolAssolible.append([])
for arbol in range(arboles):
ArbolAssolible[-1].append(float(Treball_maxim[lenador])/Work_required[lenador][arbol])
prob = LpProblem("My paranoia problem", LpMinimize)
ingredient_vars = LpVariable.dicts("Dedicacio ",Dedicacio,lowBound=0.,upBound=1.)#,0)
main_cost = []
### El coste total tiene buena pinta...
for lenador in range(lenadores):
main_cost.append(lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)] for arbolito in range(arboles)]) *Treball_maxim[lenador])
prob += lpSum(main_cost)#, "Total Cost of Ingredients per can"
for arbolito in range(arboles):
for lenador in range(lenadores):
prob += lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)] * ArbolAssolible[lenador][arbolito] ]) <= 1, ' garantizando que no curro por encima de mis posibilidades %d %d menor que uno' % (arbolito, lenador)
for lenador in range(lenadores):
prob += lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)] for arbolito in range(arboles)]) <= 1
for arbol in range(arboles):
prob += lpSum([ingredient_vars["%d:%d"%(arbol, lenador)]*ArbolAssolible[lenador][arbol] for lenador in range(lenadores)]) == 1, ' totalidad arbol %d cortado' % arbol
for arbolito in range(arboles):
for lenador in range(lenadores):
prob += lpSum([ingredient_vars["%d:%d"%(arbolito, lenador)]]) >= 0, ' garantizando dedicacion %d %d positivo' % (arbolito, lenador)
# The problem data is written to an .lp file
prob.writeLP("WhiskasModel2.lp")
# The problem is solved using PuLP's choice of Solver
prob.solve()
if LpStatus[prob.status] == "Infeasible":
print "Test case #%d: IMPOSSIBLE" % (case+1)
elif numpy.around(prediccio,2) < numpy.around(value(prob.objective),2):
print "Test case #%d: %0.2f" % (case+1, value(prob.objective)-prediccio)
else:
print "Test case #%d: RIGHT" % (case+1) | 0.23014 | 0.241791 |
import numpy as np
def astype(obj, type):
try:
return obj.astype(type)
except AttributeError:
return type(obj)
def _n_chunks(n, seq):
step = len(seq) // n
return [seq[i:i+step] for i in range(0, step*n, step)]
def _hex_to_uint8(value):
return np.array(list(map(lambda s: int(s, 16), _n_chunks(3, value.lstrip('#')))) + [255], dtype=np.uint8)
def _uint8_to_hex(value):
return '#%02x%02x%02x' % tuple(value[:3])
def _float_to_uint8(value):
return np.array(list(map(lambda f: f*255.0, value)), dtype=np.uint8)
def _uint8_to_float(value):
return np.array(list(map(lambda i: i/255.0, value)), dtype=np.float32)
def save_hex(value):
return _uint8_to_hex(_float_to_uint8(value))
def load_hex(value):
return _uint8_to_float(_hex_to_uint8(value))
def rgb_to_hsv(r, g, b):
maxc = np.maximum(r, np.maximum(g, b))
minc = np.minimum(r, np.minimum(g, b))
v = maxc
minc_eq_maxc = np.equal(minc, maxc)
# compute the difference, but reset zeros to ones to avoid divide by zeros later.
ones = np.ones_like(r)
maxc_minus_minc = np.choose(minc_eq_maxc, (maxc-minc, ones))
s = (maxc-minc) / np.maximum(ones,maxc)
rc = (maxc-r) / maxc_minus_minc
gc = (maxc-g) / maxc_minus_minc
bc = (maxc-b) / maxc_minus_minc
maxc_is_r = np.equal(maxc, r)
maxc_is_g = np.equal(maxc, g)
maxc_is_b = np.equal(maxc, b)
h = np.zeros_like(r)
h = np.choose(maxc_is_b, (h, gc-rc+4.0))
h = np.choose(maxc_is_g, (h, rc-bc+2.0))
h = np.choose(maxc_is_r, (h, bc-gc))
h = np.mod(h/6.0, 1.0)
return (h, s, v)
def hsv_to_rgb(h, s, v):
h = np.clip(h, 0.0, 1.0)
s = np.clip(s, 0.0, 1.0)
v = np.clip(v, 0.0, 1.0)
if s == 0.0:
return v, v, v
i = astype(h*5.999999, int)
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
r = np.choose(i, [v, q, p, p, t, v])
g = np.choose(i, [t, v, v, q, p, p])
b = np.choose(i, [p, p, t, v, v, q])
return (r, g, b) | glance/colors/colorconversions.py | import numpy as np
def astype(obj, type):
try:
return obj.astype(type)
except AttributeError:
return type(obj)
def _n_chunks(n, seq):
step = len(seq) // n
return [seq[i:i+step] for i in range(0, step*n, step)]
def _hex_to_uint8(value):
return np.array(list(map(lambda s: int(s, 16), _n_chunks(3, value.lstrip('#')))) + [255], dtype=np.uint8)
def _uint8_to_hex(value):
return '#%02x%02x%02x' % tuple(value[:3])
def _float_to_uint8(value):
return np.array(list(map(lambda f: f*255.0, value)), dtype=np.uint8)
def _uint8_to_float(value):
return np.array(list(map(lambda i: i/255.0, value)), dtype=np.float32)
def save_hex(value):
return _uint8_to_hex(_float_to_uint8(value))
def load_hex(value):
return _uint8_to_float(_hex_to_uint8(value))
def rgb_to_hsv(r, g, b):
maxc = np.maximum(r, np.maximum(g, b))
minc = np.minimum(r, np.minimum(g, b))
v = maxc
minc_eq_maxc = np.equal(minc, maxc)
# compute the difference, but reset zeros to ones to avoid divide by zeros later.
ones = np.ones_like(r)
maxc_minus_minc = np.choose(minc_eq_maxc, (maxc-minc, ones))
s = (maxc-minc) / np.maximum(ones,maxc)
rc = (maxc-r) / maxc_minus_minc
gc = (maxc-g) / maxc_minus_minc
bc = (maxc-b) / maxc_minus_minc
maxc_is_r = np.equal(maxc, r)
maxc_is_g = np.equal(maxc, g)
maxc_is_b = np.equal(maxc, b)
h = np.zeros_like(r)
h = np.choose(maxc_is_b, (h, gc-rc+4.0))
h = np.choose(maxc_is_g, (h, rc-bc+2.0))
h = np.choose(maxc_is_r, (h, bc-gc))
h = np.mod(h/6.0, 1.0)
return (h, s, v)
def hsv_to_rgb(h, s, v):
h = np.clip(h, 0.0, 1.0)
s = np.clip(s, 0.0, 1.0)
v = np.clip(v, 0.0, 1.0)
if s == 0.0:
return v, v, v
i = astype(h*5.999999, int)
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
r = np.choose(i, [v, q, p, p, t, v])
g = np.choose(i, [t, v, v, q, p, p])
b = np.choose(i, [p, p, t, v, v, q])
return (r, g, b) | 0.542136 | 0.340403 |
from .base_node import BaseNode
class AchUsNode(BaseNode):
"""Represents an ACH-US node."""
@classmethod
def unverified_from_response(cls, user, response):
"""Create an AchUsNode instance for an ACH-US node that needs MFA.
The API record is not actually created until the MFA has been correctly
answered.
"""
return cls(user=user,
mfa_access_token=response['mfa']['access_token'],
mfa_message=response['mfa']['message'],
mfa_verified=False)
@classmethod
def create_via_bank_login(cls, user=None, bank_name=None, username=None,
password=<PASSWORD>):
"""Create a ACH-US node record in API via bank login.
Args:
user (User): user the node belongs to
bank_name (str): https://synapsepay.com/api/v3/institutions/show
username (str): the user's username with the bank
password (str): the user's password with the bank
Returns:
list: if no MFA, returns a list of AchUsNodes
AchUsNode: if MFA, returns an AchUsNode with mfa_verified=False
"""
payload = super().payload_for_create('ACH-US',
bank_name=bank_name,
username=username,
password=password,
mfa_verified=True)
response = user.client.nodes.create(user.id, payload)
if 'mfa' in response:
# create unverified node
return cls.unverified_from_response(user, response)
else:
# no mfa needed
return cls.multiple_from_response(user, response['nodes'])
@classmethod
def payload_for_create(cls, nickname, account_number, routing_number,
account_type, account_class, **kwargs):
"""Build the API 'create node' payload specific to ACH-US."""
payload = super().payload_for_create('ACH-US',
nickname=nickname,
account_number=account_number,
routing_number=routing_number,
account_type=account_type,
account_class=account_class,
**kwargs)
return payload
def verify_microdeposits(self, amount1, amount2):
"""Verify the microdeposits to activate ACH-US added by acct/routing.
After adding an ACH-US nodes via account and routing number
the user will receive two microdeposits to their account within a
a couple days. The node will not have the ability to send funds until
microdeposits are verified.
Args:
amount1 (float): the first microdeposit amount
amount2 (float): the second microdeposit amount
Returns:
AchUsNode: a new instance representing the same API record
"""
payload = {'micro': [amount1, amount2]}
response = self.user.client.nodes.update(self.user.id, self.id, payload)
return self.from_response(self.user, response)
def answer_mfa(self, answer):
"""Answer the MFA questions presented during bank login attempt.
This step is only necessary if the node's mfa_verified property is
False. Present the value of the node's mfa_message property to the user
and pass their answer to this method.
If the user answers incorrectly, the node will remain unverified and
the question will remain the same. If the bank requires an additional
MFA question, the node will remain unverified but the question
property will have a new value. Otherwise, if the user satisfies MFA,
the method will retrieve their ACH-US node data.
Args:
answer (str): the user's response to the MFA question
Returns:
AchUsNode: (self) if still unverified, returns self
list: if verification complete, returns a list of AchUsNodes
"""
payload = {'access_token': self.mfa_access_token, 'mfa_answer': answer}
response = self.user.client.nodes.create(self.user.id, payload)
if response['error_code'] == '0':
# correct answer
self.mfa_verified = True
return self.multiple_from_response(self.user, response['nodes'])
elif response['error_code'] == '10':
# incorrect answer or additional mfa answer required
self.mfa_access_token = response['mfa']['access_token']
self.mfa_message = response['mfa']['message']
self.json = response
return self | synapse_pay_rest/models/nodes/ach_us_node.py | from .base_node import BaseNode
class AchUsNode(BaseNode):
"""Represents an ACH-US node."""
@classmethod
def unverified_from_response(cls, user, response):
"""Create an AchUsNode instance for an ACH-US node that needs MFA.
The API record is not actually created until the MFA has been correctly
answered.
"""
return cls(user=user,
mfa_access_token=response['mfa']['access_token'],
mfa_message=response['mfa']['message'],
mfa_verified=False)
@classmethod
def create_via_bank_login(cls, user=None, bank_name=None, username=None,
password=<PASSWORD>):
"""Create a ACH-US node record in API via bank login.
Args:
user (User): user the node belongs to
bank_name (str): https://synapsepay.com/api/v3/institutions/show
username (str): the user's username with the bank
password (str): the user's password with the bank
Returns:
list: if no MFA, returns a list of AchUsNodes
AchUsNode: if MFA, returns an AchUsNode with mfa_verified=False
"""
payload = super().payload_for_create('ACH-US',
bank_name=bank_name,
username=username,
password=password,
mfa_verified=True)
response = user.client.nodes.create(user.id, payload)
if 'mfa' in response:
# create unverified node
return cls.unverified_from_response(user, response)
else:
# no mfa needed
return cls.multiple_from_response(user, response['nodes'])
@classmethod
def payload_for_create(cls, nickname, account_number, routing_number,
account_type, account_class, **kwargs):
"""Build the API 'create node' payload specific to ACH-US."""
payload = super().payload_for_create('ACH-US',
nickname=nickname,
account_number=account_number,
routing_number=routing_number,
account_type=account_type,
account_class=account_class,
**kwargs)
return payload
def verify_microdeposits(self, amount1, amount2):
"""Verify the microdeposits to activate ACH-US added by acct/routing.
After adding an ACH-US nodes via account and routing number
the user will receive two microdeposits to their account within a
a couple days. The node will not have the ability to send funds until
microdeposits are verified.
Args:
amount1 (float): the first microdeposit amount
amount2 (float): the second microdeposit amount
Returns:
AchUsNode: a new instance representing the same API record
"""
payload = {'micro': [amount1, amount2]}
response = self.user.client.nodes.update(self.user.id, self.id, payload)
return self.from_response(self.user, response)
def answer_mfa(self, answer):
"""Answer the MFA questions presented during bank login attempt.
This step is only necessary if the node's mfa_verified property is
False. Present the value of the node's mfa_message property to the user
and pass their answer to this method.
If the user answers incorrectly, the node will remain unverified and
the question will remain the same. If the bank requires an additional
MFA question, the node will remain unverified but the question
property will have a new value. Otherwise, if the user satisfies MFA,
the method will retrieve their ACH-US node data.
Args:
answer (str): the user's response to the MFA question
Returns:
AchUsNode: (self) if still unverified, returns self
list: if verification complete, returns a list of AchUsNodes
"""
payload = {'access_token': self.mfa_access_token, 'mfa_answer': answer}
response = self.user.client.nodes.create(self.user.id, payload)
if response['error_code'] == '0':
# correct answer
self.mfa_verified = True
return self.multiple_from_response(self.user, response['nodes'])
elif response['error_code'] == '10':
# incorrect answer or additional mfa answer required
self.mfa_access_token = response['mfa']['access_token']
self.mfa_message = response['mfa']['message']
self.json = response
return self | 0.897128 | 0.28549 |
import json
from botocore.vendored import requests
from connector_response import ConnectorResponse as ConnectorResponse
import traceback
import base64
default_jira_server = "https://issues.apache.org/jira"
jira_get_project_api = "/rest/api/2/project/"
default_authorization = "<configure-your-base64-endcoded-authorization>"
"""
Authorization Example
consider username = ci360connector and password=<PASSWORD>
Now encode ci360connector:<PASSWORD> using base64 encoding. It looks like "Y2kzNjBjb25uZWN0b3I6UGFzc3dvcmRAMTIz"
Insert this encoded string into default_authorization variable
"""
def lambda_handler(event, context):
try:
print("Input event : ", event)
response = perform_request(event)
return response.to_dict()
except Exception as e:
cr = ConnectorResponse(500, "An error occurred. The information about the JIRA project could not be retrieved.", None, True)
print("Error", e)
traceback.print_exc()
return cr
def perform_request(event):
jira_username = ""
jira_password = ""
jira_url = ""
project_key = ""
body = ""
incoming_headers = {}
body = json.loads(event["body"]);
print("Input body : ", body)
#Retrieve user comment - is design center object or plan object
if "customAttributes" in body:
groups = body["customAttributes"]["groups"]
elif "groupedAttributesRep" in body:
groups = body["groupedAttributesRep"]["groups"]
print("groups : ", groups)
for group in groups:
for item in group["fields"]:
if "attributeName" and "value" in item:
if item["attributeName"] == "JiraProjectKey":
project_key = item["value"]
print("Project key : ", project_key)
if not project_key:
return send_validation_error("You must specify a non-blank ID for the JIRA project.")
#Retrieve headers
if event is not None :
if "headers" in event :
incoming_headers = event["headers"]
print("Incoming headers : ", incoming_headers)
if incoming_headers is not None:
if "jira-url" in incoming_headers:
jira_url = incoming_headers["jira-url"] + jira_get_project_api + project_key
if "jira-username" in incoming_headers:
jira_username = incoming_headers["jira-username"]
if "jira-password" in incoming_headers:
jira_password = incoming_headers["jira-password"]
if jira_url == "" or jira_url is None :
jira_url = default_jira_server + jira_get_project_api + project_key
print("using dafault jira server")
print("Jira url : ", jira_url)
print("Jira username : ", jira_username)
if jira_url == (default_jira_server + jira_get_project_api + project_key):
authorization = 'Basic %s' % default_authorization
print("using default credentials for authorization")
else:
authorization = 'Basic %s' % base64.b64encode(bytes(jira_username + ':' + jira_password, "utf-8")).decode("utf-8")
print("encoding authorization headers completed")
print("authorization : ", authorization)
if incoming_headers is None:
incoming_headers = {}
incoming_headers['Authorization'] = authorization
incoming_headers['Content-Type'] = 'application/json'
body = {
"jql": "text ~ \""+project_key+"\"",
"maxResults": 5,
"startAt": 0
}
response = requests.get(jira_url, data = json.dumps(body), headers=incoming_headers)
if response.status_code == 200:
cr = ConnectorResponse(200, json.dumps(response.json()))
else:
cr = ConnectorResponse(response.status_code, "The information about the JIRA project '" + project_key + "' could not be retrieved. The specified JIRA URL, user name, and the password can either be unavailable or invalid. Specify valid values.", None, True)
print("Error", cr)
traceback.print_exc()
print("Response", cr.to_dict())
return cr;
def send_validation_error(error_string):
print(error_string)
return ConnectorResponse(400, error_string, None, True) | connectors/GetJiraProjectInfo.py | import json
from botocore.vendored import requests
from connector_response import ConnectorResponse as ConnectorResponse
import traceback
import base64
default_jira_server = "https://issues.apache.org/jira"
jira_get_project_api = "/rest/api/2/project/"
default_authorization = "<configure-your-base64-endcoded-authorization>"
"""
Authorization Example
consider username = ci360connector and password=<PASSWORD>
Now encode ci360connector:<PASSWORD> using base64 encoding. It looks like "Y2kzNjBjb25uZWN0b3I6UGFzc3dvcmRAMTIz"
Insert this encoded string into default_authorization variable
"""
def lambda_handler(event, context):
try:
print("Input event : ", event)
response = perform_request(event)
return response.to_dict()
except Exception as e:
cr = ConnectorResponse(500, "An error occurred. The information about the JIRA project could not be retrieved.", None, True)
print("Error", e)
traceback.print_exc()
return cr
def perform_request(event):
jira_username = ""
jira_password = ""
jira_url = ""
project_key = ""
body = ""
incoming_headers = {}
body = json.loads(event["body"]);
print("Input body : ", body)
#Retrieve user comment - is design center object or plan object
if "customAttributes" in body:
groups = body["customAttributes"]["groups"]
elif "groupedAttributesRep" in body:
groups = body["groupedAttributesRep"]["groups"]
print("groups : ", groups)
for group in groups:
for item in group["fields"]:
if "attributeName" and "value" in item:
if item["attributeName"] == "JiraProjectKey":
project_key = item["value"]
print("Project key : ", project_key)
if not project_key:
return send_validation_error("You must specify a non-blank ID for the JIRA project.")
#Retrieve headers
if event is not None :
if "headers" in event :
incoming_headers = event["headers"]
print("Incoming headers : ", incoming_headers)
if incoming_headers is not None:
if "jira-url" in incoming_headers:
jira_url = incoming_headers["jira-url"] + jira_get_project_api + project_key
if "jira-username" in incoming_headers:
jira_username = incoming_headers["jira-username"]
if "jira-password" in incoming_headers:
jira_password = incoming_headers["jira-password"]
if jira_url == "" or jira_url is None :
jira_url = default_jira_server + jira_get_project_api + project_key
print("using dafault jira server")
print("Jira url : ", jira_url)
print("Jira username : ", jira_username)
if jira_url == (default_jira_server + jira_get_project_api + project_key):
authorization = 'Basic %s' % default_authorization
print("using default credentials for authorization")
else:
authorization = 'Basic %s' % base64.b64encode(bytes(jira_username + ':' + jira_password, "utf-8")).decode("utf-8")
print("encoding authorization headers completed")
print("authorization : ", authorization)
if incoming_headers is None:
incoming_headers = {}
incoming_headers['Authorization'] = authorization
incoming_headers['Content-Type'] = 'application/json'
body = {
"jql": "text ~ \""+project_key+"\"",
"maxResults": 5,
"startAt": 0
}
response = requests.get(jira_url, data = json.dumps(body), headers=incoming_headers)
if response.status_code == 200:
cr = ConnectorResponse(200, json.dumps(response.json()))
else:
cr = ConnectorResponse(response.status_code, "The information about the JIRA project '" + project_key + "' could not be retrieved. The specified JIRA URL, user name, and the password can either be unavailable or invalid. Specify valid values.", None, True)
print("Error", cr)
traceback.print_exc()
print("Response", cr.to_dict())
return cr;
def send_validation_error(error_string):
print(error_string)
return ConnectorResponse(400, error_string, None, True) | 0.325949 | 0.101411 |
import qumulo.lib.opts
import qumulo.lib.util as util
import qumulo.rest.fs as fs
import qumulo.rest.ftp as ftp
class FtpGetStatus(qumulo.lib.opts.Subcommand):
NAME = "ftp_get_status"
DESCRIPTION = "Get FTP server settings and status"
@staticmethod
def main(conninfo, credentials, _args):
print ftp.get_status(conninfo, credentials)
class FtpModifySettings(qumulo.lib.opts.Subcommand):
NAME = "ftp_modify_settings"
DESCRIPTION = "Set FTP server settings"
@staticmethod
def options(parser):
parser.add_argument(
'--enabled',
type=util.bool_from_string,
metavar='{true,false}',
required=False)
parser.add_argument(
'--check-remote-host',
type=util.bool_from_string,
metavar='{true,false}',
required=False)
parser.add_argument(
'--log-operations',
type=util.bool_from_string,
metavar='{true,false}',
required=False)
parser.add_argument(
'--chroot-users',
type=util.bool_from_string,
metavar='{true,false}',
required=False)
parser.add_argument(
'--allow-unencrypted-connections',
type=util.bool_from_string,
metavar='{true,false}',
required=False)
parser.add_argument(
'--expand-wildcards',
type=util.bool_from_string,
metavar='{true,false}',
required=False)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--anonymous-user-as-local-user',
type=fs.LocalUser,
required=False)
group.add_argument(
'--anonymous-user-none', action='store_true', required=False)
group.add_argument('--greeting', type=str, required=False)
@staticmethod
def main(conninfo, credentials, args):
anonymous_user = None
if args.anonymous_user_none:
anonymous_user = 'none'
else:
anonymous_user = args.anonymous_user_as_local_user
if args.enabled is None \
and args.check_remote_host is None \
and args.log_operations is None \
and args.chroot_users is None \
and args.allow_unencrypted_connections is None \
and args.expand_wildcards is None \
and anonymous_user is None \
and args.greeting is None:
raise ValueError("must provide at least one argument")
print ftp.modify_settings(
conninfo,
credentials,
enabled=args.enabled,
check_remote_host=args.check_remote_host,
log_operations=args.log_operations,
chroot_users=args.chroot_users,
allow_unencrypted_connections=args.allow_unencrypted_connections,
expand_wildcards=args.expand_wildcards,
anonymous_user=anonymous_user,
greeting=args.greeting) | qumulo/commands/ftp.py |
import qumulo.lib.opts
import qumulo.lib.util as util
import qumulo.rest.fs as fs
import qumulo.rest.ftp as ftp
class FtpGetStatus(qumulo.lib.opts.Subcommand):
NAME = "ftp_get_status"
DESCRIPTION = "Get FTP server settings and status"
@staticmethod
def main(conninfo, credentials, _args):
print ftp.get_status(conninfo, credentials)
class FtpModifySettings(qumulo.lib.opts.Subcommand):
NAME = "ftp_modify_settings"
DESCRIPTION = "Set FTP server settings"
@staticmethod
def options(parser):
parser.add_argument(
'--enabled',
type=util.bool_from_string,
metavar='{true,false}',
required=False)
parser.add_argument(
'--check-remote-host',
type=util.bool_from_string,
metavar='{true,false}',
required=False)
parser.add_argument(
'--log-operations',
type=util.bool_from_string,
metavar='{true,false}',
required=False)
parser.add_argument(
'--chroot-users',
type=util.bool_from_string,
metavar='{true,false}',
required=False)
parser.add_argument(
'--allow-unencrypted-connections',
type=util.bool_from_string,
metavar='{true,false}',
required=False)
parser.add_argument(
'--expand-wildcards',
type=util.bool_from_string,
metavar='{true,false}',
required=False)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--anonymous-user-as-local-user',
type=fs.LocalUser,
required=False)
group.add_argument(
'--anonymous-user-none', action='store_true', required=False)
group.add_argument('--greeting', type=str, required=False)
@staticmethod
def main(conninfo, credentials, args):
anonymous_user = None
if args.anonymous_user_none:
anonymous_user = 'none'
else:
anonymous_user = args.anonymous_user_as_local_user
if args.enabled is None \
and args.check_remote_host is None \
and args.log_operations is None \
and args.chroot_users is None \
and args.allow_unencrypted_connections is None \
and args.expand_wildcards is None \
and anonymous_user is None \
and args.greeting is None:
raise ValueError("must provide at least one argument")
print ftp.modify_settings(
conninfo,
credentials,
enabled=args.enabled,
check_remote_host=args.check_remote_host,
log_operations=args.log_operations,
chroot_users=args.chroot_users,
allow_unencrypted_connections=args.allow_unencrypted_connections,
expand_wildcards=args.expand_wildcards,
anonymous_user=anonymous_user,
greeting=args.greeting) | 0.354433 | 0.097605 |
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import torch.nn.functional as F
synthia_txtpath='/home/dut-ai/Documents/temp/synthia_encoding.txt'
cat2color_synthia={}
with open(synthia_txtpath,'r') as file:
for line in file.readlines():
templist = line.strip().split('\t')
label = templist.pop(0)
templist=[int(element) for element in templist]
cat2color_synthia[int(label)] = templist
cityscape_txtpath='/home/dut-ai/Documents/temp/cityscape_encoding.txt'
cat2color_cityscape={}
with open(cityscape_txtpath,'r') as file:
for line in file.readlines():
templist = line.strip().split('\t')
label = templist.pop(0)
templist=[int(element) for element in templist]
cat2color_cityscape[int(label)] = templist
def label2im(image_tensor):
cat2color=cat2color_cityscape
if len(image_tensor.shape)==3:
print(image_tensor.shape)
image_tensor=image_tensor.cpu().numpy()[0,:,:]
else:
print('++++++++++',image_tensor.shape)
image_tensor=np.argmax(image_tensor[0,:,:,:].cpu().numpy(),0)
print('------------',image_tensor.shape)
h=image_tensor.shape[0]
w=image_tensor.shape[1]
image_show=np.zeros(shape=[h,w,3])
for category in list(cat2color.keys()):
try:
x, y = np.where(image_tensor == category)
image_show[x, y] = np.array(cat2color[category])
except:
continue
return image_show
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
if len(image_tensor.shape) == 3:
image_tensor = image_tensor.unsqueeze(1)
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
print(image_numpy.shape)
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def scale_pyramid(img, num_scales):
scaled_imgs = [img]
s = img.size()
h = s[2]
w = s[3]
for i in range(1, num_scales):
ratio = 2**i
nh = h // ratio
nw = w // ratio
scaled_img = F.upsample(img, size=(nh, nw), mode='nearest')
scaled_imgs.append(scaled_img)
scaled_imgs.reverse()
return scaled_imgs | util/util.py | from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
import torch.nn.functional as F
synthia_txtpath='/home/dut-ai/Documents/temp/synthia_encoding.txt'
cat2color_synthia={}
with open(synthia_txtpath,'r') as file:
for line in file.readlines():
templist = line.strip().split('\t')
label = templist.pop(0)
templist=[int(element) for element in templist]
cat2color_synthia[int(label)] = templist
cityscape_txtpath='/home/dut-ai/Documents/temp/cityscape_encoding.txt'
cat2color_cityscape={}
with open(cityscape_txtpath,'r') as file:
for line in file.readlines():
templist = line.strip().split('\t')
label = templist.pop(0)
templist=[int(element) for element in templist]
cat2color_cityscape[int(label)] = templist
def label2im(image_tensor):
cat2color=cat2color_cityscape
if len(image_tensor.shape)==3:
print(image_tensor.shape)
image_tensor=image_tensor.cpu().numpy()[0,:,:]
else:
print('++++++++++',image_tensor.shape)
image_tensor=np.argmax(image_tensor[0,:,:,:].cpu().numpy(),0)
print('------------',image_tensor.shape)
h=image_tensor.shape[0]
w=image_tensor.shape[1]
image_show=np.zeros(shape=[h,w,3])
for category in list(cat2color.keys()):
try:
x, y = np.where(image_tensor == category)
image_show[x, y] = np.array(cat2color[category])
except:
continue
return image_show
# Converts a Tensor into an image array (numpy)
# |imtype|: the desired type of the converted numpy array
def tensor2im(input_image, imtype=np.uint8):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
if len(image_tensor.shape) == 3:
image_tensor = image_tensor.unsqueeze(1)
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
print(image_numpy.shape)
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def scale_pyramid(img, num_scales):
scaled_imgs = [img]
s = img.size()
h = s[2]
w = s[3]
for i in range(1, num_scales):
ratio = 2**i
nh = h // ratio
nw = w // ratio
scaled_img = F.upsample(img, size=(nh, nw), mode='nearest')
scaled_imgs.append(scaled_img)
scaled_imgs.reverse()
return scaled_imgs | 0.314051 | 0.274807 |
import base64
import io
import json
import posixpath
from datetime import timedelta
from typing import Any, BinaryIO, Dict, Optional
from google.cloud import storage # type: ignore
from google.oauth2 import service_account # type: ignore
from giftless.storage import ExternalStorage, StreamingStorage
from .exc import ObjectNotFound
class GoogleCloudStorage(StreamingStorage, ExternalStorage):
"""Google Cloud Storage backend supporting direct-to-cloud
transfers.
"""
def __init__(self, project_name: str, bucket_name: str, account_key_file: Optional[str] = None,
account_key_base64: Optional[str] = None, path_prefix: Optional[str] = None, **_):
self.bucket_name = bucket_name
self.path_prefix = path_prefix
self.credentials = self._load_credentials(account_key_file, account_key_base64)
self.storage_client = storage.Client(project=project_name, credentials=self.credentials)
def get(self, prefix: str, oid: str) -> BinaryIO:
bucket = self.storage_client.bucket(self.bucket_name)
blob = bucket.get_blob(self._get_blob_path(prefix, oid))
if blob is None:
raise ObjectNotFound('Object does not exist')
stream = io.BytesIO()
blob.download_to_file(stream)
stream.seek(0)
return stream
def put(self, prefix: str, oid: str, data_stream: BinaryIO) -> int:
bucket = self.storage_client.bucket(self.bucket_name)
blob = bucket.blob(self._get_blob_path(prefix, oid))
blob.upload_from_file(data_stream)
return data_stream.tell()
def exists(self, prefix: str, oid: str) -> bool:
bucket = self.storage_client.bucket(self.bucket_name)
blob = bucket.blob(self._get_blob_path(prefix, oid))
return blob.exists() # type: ignore
def get_size(self, prefix: str, oid: str) -> int:
bucket = self.storage_client.bucket(self.bucket_name)
blob = bucket.get_blob(self._get_blob_path(prefix, oid))
if blob is None:
raise ObjectNotFound("Object does not exist")
return blob.size # type: ignore
def get_upload_action(self, prefix: str, oid: str, size: int, expires_in: int,
extra: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
return {
"actions": {
"upload": {
"href": self._get_signed_url(prefix, oid, http_method='PUT', expires_in=expires_in),
"header": {},
"expires_in": expires_in
}
}
}
def get_download_action(self, prefix: str, oid: str, size: int, expires_in: int,
extra: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
filename = extra.get('filename') if extra else None
disposition = extra.get('disposition', 'attachment') if extra else 'attachment'
return {
"actions": {
"download": {
"href": self._get_signed_url(
prefix, oid, expires_in=expires_in, filename=filename, disposition=disposition),
"header": {},
"expires_in": expires_in
}
}
}
def _get_blob_path(self, prefix: str, oid: str) -> str:
"""Get the path to a blob in storage
"""
if not self.path_prefix:
storage_prefix = ''
elif self.path_prefix[0] == '/':
storage_prefix = self.path_prefix[1:]
else:
storage_prefix = self.path_prefix
return posixpath.join(storage_prefix, prefix, oid)
def _get_signed_url(self, prefix: str, oid: str, expires_in: int, http_method: str = 'GET',
filename: Optional[str] = None, disposition: Optional[str] = None) -> str:
bucket = self.storage_client.bucket(self.bucket_name)
blob = bucket.blob(self._get_blob_path(prefix, oid))
disposition = f'attachment; filename={filename}' if filename else None
if filename and disposition:
disposition = f'{disposition}; filename="{filename}"'
url: str = blob.generate_signed_url(expiration=timedelta(seconds=expires_in), method=http_method, version='v4',
response_disposition=disposition, credentials=self.credentials)
return url
@staticmethod
def _load_credentials(account_key_file: Optional[str], account_key_base64: Optional[str]) \
-> service_account.Credentials:
"""Load Google Cloud credentials from passed configuration
"""
if account_key_file and account_key_base64:
raise ValueError('Provide either account_key_file or account_key_base64 but not both')
elif account_key_file:
return service_account.Credentials.from_service_account_file(account_key_file)
elif account_key_base64:
account_info = json.loads(base64.b64decode(account_key_base64))
return service_account.Credentials.from_service_account_info(account_info)
else:
raise ValueError('You must provide either account_key_file or account_key_base64') | giftless/storage/google_cloud.py | import base64
import io
import json
import posixpath
from datetime import timedelta
from typing import Any, BinaryIO, Dict, Optional
from google.cloud import storage # type: ignore
from google.oauth2 import service_account # type: ignore
from giftless.storage import ExternalStorage, StreamingStorage
from .exc import ObjectNotFound
class GoogleCloudStorage(StreamingStorage, ExternalStorage):
"""Google Cloud Storage backend supporting direct-to-cloud
transfers.
"""
def __init__(self, project_name: str, bucket_name: str, account_key_file: Optional[str] = None,
account_key_base64: Optional[str] = None, path_prefix: Optional[str] = None, **_):
self.bucket_name = bucket_name
self.path_prefix = path_prefix
self.credentials = self._load_credentials(account_key_file, account_key_base64)
self.storage_client = storage.Client(project=project_name, credentials=self.credentials)
def get(self, prefix: str, oid: str) -> BinaryIO:
bucket = self.storage_client.bucket(self.bucket_name)
blob = bucket.get_blob(self._get_blob_path(prefix, oid))
if blob is None:
raise ObjectNotFound('Object does not exist')
stream = io.BytesIO()
blob.download_to_file(stream)
stream.seek(0)
return stream
def put(self, prefix: str, oid: str, data_stream: BinaryIO) -> int:
bucket = self.storage_client.bucket(self.bucket_name)
blob = bucket.blob(self._get_blob_path(prefix, oid))
blob.upload_from_file(data_stream)
return data_stream.tell()
def exists(self, prefix: str, oid: str) -> bool:
bucket = self.storage_client.bucket(self.bucket_name)
blob = bucket.blob(self._get_blob_path(prefix, oid))
return blob.exists() # type: ignore
def get_size(self, prefix: str, oid: str) -> int:
bucket = self.storage_client.bucket(self.bucket_name)
blob = bucket.get_blob(self._get_blob_path(prefix, oid))
if blob is None:
raise ObjectNotFound("Object does not exist")
return blob.size # type: ignore
def get_upload_action(self, prefix: str, oid: str, size: int, expires_in: int,
extra: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
return {
"actions": {
"upload": {
"href": self._get_signed_url(prefix, oid, http_method='PUT', expires_in=expires_in),
"header": {},
"expires_in": expires_in
}
}
}
def get_download_action(self, prefix: str, oid: str, size: int, expires_in: int,
extra: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
filename = extra.get('filename') if extra else None
disposition = extra.get('disposition', 'attachment') if extra else 'attachment'
return {
"actions": {
"download": {
"href": self._get_signed_url(
prefix, oid, expires_in=expires_in, filename=filename, disposition=disposition),
"header": {},
"expires_in": expires_in
}
}
}
def _get_blob_path(self, prefix: str, oid: str) -> str:
"""Get the path to a blob in storage
"""
if not self.path_prefix:
storage_prefix = ''
elif self.path_prefix[0] == '/':
storage_prefix = self.path_prefix[1:]
else:
storage_prefix = self.path_prefix
return posixpath.join(storage_prefix, prefix, oid)
def _get_signed_url(self, prefix: str, oid: str, expires_in: int, http_method: str = 'GET',
filename: Optional[str] = None, disposition: Optional[str] = None) -> str:
bucket = self.storage_client.bucket(self.bucket_name)
blob = bucket.blob(self._get_blob_path(prefix, oid))
disposition = f'attachment; filename={filename}' if filename else None
if filename and disposition:
disposition = f'{disposition}; filename="{filename}"'
url: str = blob.generate_signed_url(expiration=timedelta(seconds=expires_in), method=http_method, version='v4',
response_disposition=disposition, credentials=self.credentials)
return url
@staticmethod
def _load_credentials(account_key_file: Optional[str], account_key_base64: Optional[str]) \
-> service_account.Credentials:
"""Load Google Cloud credentials from passed configuration
"""
if account_key_file and account_key_base64:
raise ValueError('Provide either account_key_file or account_key_base64 but not both')
elif account_key_file:
return service_account.Credentials.from_service_account_file(account_key_file)
elif account_key_base64:
account_info = json.loads(base64.b64decode(account_key_base64))
return service_account.Credentials.from_service_account_info(account_info)
else:
raise ValueError('You must provide either account_key_file or account_key_base64') | 0.736306 | 0.129018 |
import asm.cms.cms
import asm.cmsui.base
import asm.cmsui.interfaces
import grok
import megrok.pagelet
import zope.interface
grok.context(asm.cms.cms.CMS)
class SearchAndReplace(megrok.pagelet.Pagelet):
"""Present the user a form to allow entering search and replace terms."""
grok.layer(asm.cmsui.interfaces.ICMSSkin)
grok.require('asm.cms.EditContent')
class ReplacePreview(megrok.pagelet.Pagelet):
"""Given a users search and replace terms show a list of all matches."""
grok.layer(asm.cmsui.interfaces.ICMSSkin)
grok.require('asm.cms.EditContent')
def update(self):
self.search = self.request.form.get('search', '')
self.found = 0
self.results = []
pages = [self.application]
while pages:
page = pages.pop()
pages.extend(page.subpages)
for edition in page.editions:
try:
replace = asm.cms.interfaces.IReplaceSupport(edition)
except TypeError:
continue
occurrences = replace.search(self.search)
self.found += len(occurrences)
if occurrences:
self.results.append(
{'edition': edition,
'occurrences': occurrences})
class Replace(megrok.pagelet.Pagelet):
"""Perform a replace operation given a users search and replace terms and
a list of matches. Then display the remaining occurrences."""
grok.layer(asm.cmsui.interfaces.ICMSSkin)
grok.require('asm.cms.EditContent')
def update(self):
self.search = self.request.form.get('search', '')
self.replace = self.request.form.get('replace')
self.replaced = 0
replace_cache = {}
ids = zope.component.getUtility(zope.intid.interfaces.IIntIds)
occurrences = self.request.form.get('occurrences')
if isinstance(occurrences, basestring):
occurrences = [occurrences]
for occurrence_id in occurrences:
id, _, _, _ = occurrence_id.split('-')
if id not in replace_cache:
edition = ids.getObject(int(id))
replace = asm.cms.interfaces.IReplaceSupport(edition)
replace_cache[id] = replace.search(self.search)
occurrences = replace_cache[id]
for candidate in occurrences:
if candidate.id == occurrence_id:
candidate.replace(self.replace)
self.replaced += 1
def render(self):
self.flash('Replaced %s occurrences.' % self.replaced)
self.redirect(self.url(self.context, 'searchandreplace'))
class ReplaceActions(grok.Viewlet):
grok.template('actions')
grok.viewletmanager(asm.cmsui.base.NavigationToolActions)
grok.context(zope.interface.Interface) | src/asm/cmsui/replace.py | import asm.cms.cms
import asm.cmsui.base
import asm.cmsui.interfaces
import grok
import megrok.pagelet
import zope.interface
grok.context(asm.cms.cms.CMS)
class SearchAndReplace(megrok.pagelet.Pagelet):
"""Present the user a form to allow entering search and replace terms."""
grok.layer(asm.cmsui.interfaces.ICMSSkin)
grok.require('asm.cms.EditContent')
class ReplacePreview(megrok.pagelet.Pagelet):
"""Given a users search and replace terms show a list of all matches."""
grok.layer(asm.cmsui.interfaces.ICMSSkin)
grok.require('asm.cms.EditContent')
def update(self):
self.search = self.request.form.get('search', '')
self.found = 0
self.results = []
pages = [self.application]
while pages:
page = pages.pop()
pages.extend(page.subpages)
for edition in page.editions:
try:
replace = asm.cms.interfaces.IReplaceSupport(edition)
except TypeError:
continue
occurrences = replace.search(self.search)
self.found += len(occurrences)
if occurrences:
self.results.append(
{'edition': edition,
'occurrences': occurrences})
class Replace(megrok.pagelet.Pagelet):
"""Perform a replace operation given a users search and replace terms and
a list of matches. Then display the remaining occurrences."""
grok.layer(asm.cmsui.interfaces.ICMSSkin)
grok.require('asm.cms.EditContent')
def update(self):
self.search = self.request.form.get('search', '')
self.replace = self.request.form.get('replace')
self.replaced = 0
replace_cache = {}
ids = zope.component.getUtility(zope.intid.interfaces.IIntIds)
occurrences = self.request.form.get('occurrences')
if isinstance(occurrences, basestring):
occurrences = [occurrences]
for occurrence_id in occurrences:
id, _, _, _ = occurrence_id.split('-')
if id not in replace_cache:
edition = ids.getObject(int(id))
replace = asm.cms.interfaces.IReplaceSupport(edition)
replace_cache[id] = replace.search(self.search)
occurrences = replace_cache[id]
for candidate in occurrences:
if candidate.id == occurrence_id:
candidate.replace(self.replace)
self.replaced += 1
def render(self):
self.flash('Replaced %s occurrences.' % self.replaced)
self.redirect(self.url(self.context, 'searchandreplace'))
class ReplaceActions(grok.Viewlet):
grok.template('actions')
grok.viewletmanager(asm.cmsui.base.NavigationToolActions)
grok.context(zope.interface.Interface) | 0.444083 | 0.185467 |
import math
import numpy as np
from functools import cmp_to_key as ctk
from PIL import Image
class Point:
def __init__(self, x: float, y: float):
self.x = x
self.y = y
class EndPoint(Point):
def __init__(self, x: float, y: float, begins_segment: bool = None, segment=None, angle: float = None):
super().__init__(x, y)
self.begins_segment = begins_segment
self.segment = segment
self.angle = angle
class Segment:
def __init__(self, x1: float, y1: float, x2: float, y2: float, d: float = None):
self.p1 = EndPoint(x1, y1)
self.p2 = EndPoint(x2, y2)
self.p1.segment = self
self.p2.segment = self
self.d = d
def calculate_end_point_angles(light_source: Point, segment: Segment) -> None:
x = light_source.x
y = light_source.y
dx = 0.5 * (segment.p1.x + segment.p2.x) - x
dy = 0.5 * (segment.p1.y + segment.p2.y) - y
segment.d = (dx * dx) + (dy * dy)
segment.p1.angle = math.atan2(segment.p1.y - y, segment.p1.x - x)
segment.p2.angle = math.atan2(segment.p2.y - y, segment.p2.x - x)
def set_segment_beginning(segment: Segment) -> None:
d_angle = segment.p2.angle - segment.p1.angle
if d_angle <= -math.pi:
d_angle += 2 * math.pi
if d_angle > math.pi:
d_angle -= 2 * math.pi
segment.p1.begins_segment = d_angle > 0
segment.p2.begins_segment = not segment.p1.begins_segment
def endpoint_compare(point_a: EndPoint, point_b: EndPoint):
if point_a.angle > point_b.angle:
return 1
if point_a.angle < point_b.angle:
return -1
if not point_a.begins_segment and point_b.begins_segment:
return 1
if point_a.begins_segment and not point_b.begins_segment:
return -1
return 0
def polygon_to_segments(polygon: np.array) -> np.array:
segments = []
polygon = np.concatenate((polygon, [polygon[0]]))
for i in range(len(polygon) - 1):
p1 = polygon[i]
p2 = polygon[i + 1]
segments.append([p1, p2])
segments = np.array(segments)
return segments
def segment_in_front_of(segment_a: Segment, segment_b: Segment, relative_point: Point):
def left_of(segment: Segment, point: Point):
cross = (segment.p2.x - segment.p1.x) * (point.y - segment.p1.y) - (segment.p2.y - segment.p1.y) * (
point.x - segment.p1.x)
return cross < 0
def interpolate(point_a: Point, point_b: Point, f: float):
point = Point(x=point_a.x * (1 - f) + point_b.x * f,
y=point_a.y * (1 - f) + point_b.y * f)
return point
a1 = left_of(segment_a, interpolate(segment_b.p1, segment_b.p2, 0.01))
a2 = left_of(segment_a, interpolate(segment_b.p2, segment_b.p1, 0.01))
a3 = left_of(segment_a, relative_point)
b1 = left_of(segment_b, interpolate(segment_a.p1, segment_a.p2, 0.01))
b2 = left_of(segment_b, interpolate(segment_a.p2, segment_a.p1, 0.01))
b3 = left_of(segment_b, relative_point)
if b1 == b2 and not (b2 == b3):
return True
if a1 == a2 and a2 == a3:
return True
if a1 == a2 and not (a2 == a3):
return False
if b1 == b2 and b2 == b3:
return False
return False
def line_intersection(point1: Point, point2: Point, point3: Point, point4: Point):
a = (point4.y - point3.y) * (point2.x - point1.x) - (point4.x - point3.x) * (point2.y - point1.y)
b = (point4.x - point3.x) * (point1.y - point3.y) - (point4.y - point3.y) * (point1.x - point3.x)
assert a != 0 or a == b, "center on polygon, it not support!"
if a == 0:
s = 1
else:
s = b / a
return Point(
point1.x + s * (point2.x - point1.x),
point1.y + s * (point2.y - point1.y)
)
def get_triangle_points(origin: Point, angle1: float, angle2: float, segment: Segment):
p1 = origin
p2 = Point(origin.x + math.cos(angle1), origin.y + math.sin(angle1))
p3 = Point(0, 0)
p4 = Point(0, 0)
if segment:
p3.x = segment.p1.x
p3.y = segment.p1.y
p4.x = segment.p2.x
p4.y = segment.p2.y
else:
p3.x = origin.x + math.cos(angle1) * 2000
p3.y = origin.y + math.sin(angle1) * 2000
p4.x = origin.x + math.cos(angle2) * 2000
p4.y = origin.y + math.sin(angle2) * 2000
# use the endpoint directly when the rays are parallel to segment
if abs(segment.p1.angle - segment.p2.angle) < 1e-6:
return [p4, p3]
# it's maybe generate error coordinate when the rays are parallel to segment
p_begin = line_intersection(p3, p4, p1, p2)
p2.x = origin.x + math.cos(angle2)
p2.y = origin.y + math.sin(angle2)
p_end = line_intersection(p3, p4, p1, p2)
return [p_begin, p_end]
def calc_visible_polygon(center: np.array, polygon: np.array = None, segments: np.array = None, show: bool = False):
if segments is None and polygon is not None:
segments = polygon_to_segments(polygon)
origin = Point(x=center[0], y=center[1])
endpoints = []
for s in segments:
p1 = s[0]
p2 = s[1]
segment = Segment(x1=p1[0], y1=p1[1], x2=p2[0], y2=p2[1])
calculate_end_point_angles(origin, segment)
set_segment_beginning(segment)
endpoints.extend([segment.p1, segment.p2])
open_segments = []
output = []
begin_angle = 0
endpoints = sorted(endpoints, key=ctk(endpoint_compare))
for pas in range(2):
for endpoint in endpoints:
open_segment = open_segments[0] if len(open_segments) else None
if endpoint.begins_segment:
index = 0
segment = open_segments[index] if index < len(open_segments) else None
while segment and segment_in_front_of(endpoint.segment, segment, origin):
index += 1
segment = open_segments[index] if index < len(open_segments) else None
if not segment:
open_segments.append(endpoint.segment)
else:
open_segments.insert(index, endpoint.segment)
else:
if endpoint.segment in open_segments:
open_segments.remove(endpoint.segment)
if open_segment is not (open_segments[0] if len(open_segments) else None):
if pas == 1 and open_segment:
triangle_points = get_triangle_points(origin, begin_angle, endpoint.angle, open_segment)
output.extend(triangle_points)
begin_angle = endpoint.angle
output_polygon = []
# Remove duplicate
for i, p in enumerate(output):
q = output[(i + 1) % len(output)]
if int(p.x * 10000) == int(q.x * 10000) and int(p.y * 10000) == int(q.y * 10000):
continue
output_polygon.append([p.x, p.y])
output_polygon.reverse()
output_polygon = np.array(output_polygon)
if show:
visualization(segments, output_polygon, center)
return output_polygon
def visualization(segments: np.array, output_polygon: np.array, center: np.array, side_l=1000):
"""
:param segments: original segments
:param output_polygon: result polygon
:param center: visibility center
:param side_l: side length of board
:return:
"""
try:
import cv2
import matplotlib.pyplot as plt
except ImportError:
print("visualization need cv2 and matplotlib")
return
offset = np.array([side_l / 2, side_l / 2]) - center
segments = segments + offset
output_polygon = output_polygon + offset
origin = np.array([side_l / 2, side_l / 2])
# +0.5 as board
scale = side_l / 2.5 / np.abs(segments - origin).max()
board = np.zeros((side_l, side_l))
for segment in segments:
segment = (segment - origin) * scale + origin
segment = segment.astype(np.int)
cv2.line(board, tuple(segment[0]), tuple(segment[1]), 0.5, thickness=3)
board = cv2.drawMarker(board, tuple(origin.astype(np.int)), 1, thickness=3)
output_polygon = (output_polygon - origin) * scale + origin
board = cv2.drawContours(board, [output_polygon.astype(np.int)], 0, 1, 3)
board = cv2.drawMarker(board, tuple(origin.astype(np.int)), 1, thickness=3)
plt.axis('off')
plt.imshow(board)
plt.show()
if __name__ == '__main__':
import numpy as np
from dataset.mp3d_dataset import MP3DDataset
from utils.boundary import depth2boundaries
from utils.conversion import uv2xyz, depth2xyz
from visualization.boundary import draw_boundaries
from visualization.floorplan import draw_floorplan, draw_iou_floorplan
mp3d_dataset = MP3DDataset(root_dir='../src/dataset/mp3d', mode='train',
split_list=[['e9zR4mvMWw7', '2224be23a70a475ea6daa55d4c90a91b']])
gt = mp3d_dataset.__getitem__(0)
gt['corners'] = gt['corners'][gt['corners'][..., 0] + gt['corners'][..., 1] != 0] # Take effective corners
img = draw_floorplan(depth2xyz(gt['depth'])[:, ::2], fill_color=[1, 1, 1, 0],
show=True, scale=1, marker_color=[0, 0, 1, 1], side_l=1024)
# img = draw_iou_floorplan(gt_xz=uv2xyz(gt['corners'])[..., ::2],
# dt_xz=calc_visible_polygon(np.array([0, 0]), uv2xyz(gt['corners'])[..., ::2]),
# dt_board_color=[0, 0, 1, 0],
# gt_board_color=[0, 0, 1, 0],
# show=True, side_l=1024)
result = Image.fromarray((img[250: -100, 100:-20] * 255).astype(np.uint8))
result.save('../src/fig/sample3.png') | utils/visibility_polygon.py | import math
import numpy as np
from functools import cmp_to_key as ctk
from PIL import Image
class Point:
def __init__(self, x: float, y: float):
self.x = x
self.y = y
class EndPoint(Point):
def __init__(self, x: float, y: float, begins_segment: bool = None, segment=None, angle: float = None):
super().__init__(x, y)
self.begins_segment = begins_segment
self.segment = segment
self.angle = angle
class Segment:
def __init__(self, x1: float, y1: float, x2: float, y2: float, d: float = None):
self.p1 = EndPoint(x1, y1)
self.p2 = EndPoint(x2, y2)
self.p1.segment = self
self.p2.segment = self
self.d = d
def calculate_end_point_angles(light_source: Point, segment: Segment) -> None:
x = light_source.x
y = light_source.y
dx = 0.5 * (segment.p1.x + segment.p2.x) - x
dy = 0.5 * (segment.p1.y + segment.p2.y) - y
segment.d = (dx * dx) + (dy * dy)
segment.p1.angle = math.atan2(segment.p1.y - y, segment.p1.x - x)
segment.p2.angle = math.atan2(segment.p2.y - y, segment.p2.x - x)
def set_segment_beginning(segment: Segment) -> None:
d_angle = segment.p2.angle - segment.p1.angle
if d_angle <= -math.pi:
d_angle += 2 * math.pi
if d_angle > math.pi:
d_angle -= 2 * math.pi
segment.p1.begins_segment = d_angle > 0
segment.p2.begins_segment = not segment.p1.begins_segment
def endpoint_compare(point_a: EndPoint, point_b: EndPoint):
if point_a.angle > point_b.angle:
return 1
if point_a.angle < point_b.angle:
return -1
if not point_a.begins_segment and point_b.begins_segment:
return 1
if point_a.begins_segment and not point_b.begins_segment:
return -1
return 0
def polygon_to_segments(polygon: np.array) -> np.array:
segments = []
polygon = np.concatenate((polygon, [polygon[0]]))
for i in range(len(polygon) - 1):
p1 = polygon[i]
p2 = polygon[i + 1]
segments.append([p1, p2])
segments = np.array(segments)
return segments
def segment_in_front_of(segment_a: Segment, segment_b: Segment, relative_point: Point):
def left_of(segment: Segment, point: Point):
cross = (segment.p2.x - segment.p1.x) * (point.y - segment.p1.y) - (segment.p2.y - segment.p1.y) * (
point.x - segment.p1.x)
return cross < 0
def interpolate(point_a: Point, point_b: Point, f: float):
point = Point(x=point_a.x * (1 - f) + point_b.x * f,
y=point_a.y * (1 - f) + point_b.y * f)
return point
a1 = left_of(segment_a, interpolate(segment_b.p1, segment_b.p2, 0.01))
a2 = left_of(segment_a, interpolate(segment_b.p2, segment_b.p1, 0.01))
a3 = left_of(segment_a, relative_point)
b1 = left_of(segment_b, interpolate(segment_a.p1, segment_a.p2, 0.01))
b2 = left_of(segment_b, interpolate(segment_a.p2, segment_a.p1, 0.01))
b3 = left_of(segment_b, relative_point)
if b1 == b2 and not (b2 == b3):
return True
if a1 == a2 and a2 == a3:
return True
if a1 == a2 and not (a2 == a3):
return False
if b1 == b2 and b2 == b3:
return False
return False
def line_intersection(point1: Point, point2: Point, point3: Point, point4: Point):
a = (point4.y - point3.y) * (point2.x - point1.x) - (point4.x - point3.x) * (point2.y - point1.y)
b = (point4.x - point3.x) * (point1.y - point3.y) - (point4.y - point3.y) * (point1.x - point3.x)
assert a != 0 or a == b, "center on polygon, it not support!"
if a == 0:
s = 1
else:
s = b / a
return Point(
point1.x + s * (point2.x - point1.x),
point1.y + s * (point2.y - point1.y)
)
def get_triangle_points(origin: Point, angle1: float, angle2: float, segment: Segment):
p1 = origin
p2 = Point(origin.x + math.cos(angle1), origin.y + math.sin(angle1))
p3 = Point(0, 0)
p4 = Point(0, 0)
if segment:
p3.x = segment.p1.x
p3.y = segment.p1.y
p4.x = segment.p2.x
p4.y = segment.p2.y
else:
p3.x = origin.x + math.cos(angle1) * 2000
p3.y = origin.y + math.sin(angle1) * 2000
p4.x = origin.x + math.cos(angle2) * 2000
p4.y = origin.y + math.sin(angle2) * 2000
# use the endpoint directly when the rays are parallel to segment
if abs(segment.p1.angle - segment.p2.angle) < 1e-6:
return [p4, p3]
# it's maybe generate error coordinate when the rays are parallel to segment
p_begin = line_intersection(p3, p4, p1, p2)
p2.x = origin.x + math.cos(angle2)
p2.y = origin.y + math.sin(angle2)
p_end = line_intersection(p3, p4, p1, p2)
return [p_begin, p_end]
def calc_visible_polygon(center: np.array, polygon: np.array = None, segments: np.array = None, show: bool = False):
if segments is None and polygon is not None:
segments = polygon_to_segments(polygon)
origin = Point(x=center[0], y=center[1])
endpoints = []
for s in segments:
p1 = s[0]
p2 = s[1]
segment = Segment(x1=p1[0], y1=p1[1], x2=p2[0], y2=p2[1])
calculate_end_point_angles(origin, segment)
set_segment_beginning(segment)
endpoints.extend([segment.p1, segment.p2])
open_segments = []
output = []
begin_angle = 0
endpoints = sorted(endpoints, key=ctk(endpoint_compare))
for pas in range(2):
for endpoint in endpoints:
open_segment = open_segments[0] if len(open_segments) else None
if endpoint.begins_segment:
index = 0
segment = open_segments[index] if index < len(open_segments) else None
while segment and segment_in_front_of(endpoint.segment, segment, origin):
index += 1
segment = open_segments[index] if index < len(open_segments) else None
if not segment:
open_segments.append(endpoint.segment)
else:
open_segments.insert(index, endpoint.segment)
else:
if endpoint.segment in open_segments:
open_segments.remove(endpoint.segment)
if open_segment is not (open_segments[0] if len(open_segments) else None):
if pas == 1 and open_segment:
triangle_points = get_triangle_points(origin, begin_angle, endpoint.angle, open_segment)
output.extend(triangle_points)
begin_angle = endpoint.angle
output_polygon = []
# Remove duplicate
for i, p in enumerate(output):
q = output[(i + 1) % len(output)]
if int(p.x * 10000) == int(q.x * 10000) and int(p.y * 10000) == int(q.y * 10000):
continue
output_polygon.append([p.x, p.y])
output_polygon.reverse()
output_polygon = np.array(output_polygon)
if show:
visualization(segments, output_polygon, center)
return output_polygon
def visualization(segments: np.array, output_polygon: np.array, center: np.array, side_l=1000):
"""
:param segments: original segments
:param output_polygon: result polygon
:param center: visibility center
:param side_l: side length of board
:return:
"""
try:
import cv2
import matplotlib.pyplot as plt
except ImportError:
print("visualization need cv2 and matplotlib")
return
offset = np.array([side_l / 2, side_l / 2]) - center
segments = segments + offset
output_polygon = output_polygon + offset
origin = np.array([side_l / 2, side_l / 2])
# +0.5 as board
scale = side_l / 2.5 / np.abs(segments - origin).max()
board = np.zeros((side_l, side_l))
for segment in segments:
segment = (segment - origin) * scale + origin
segment = segment.astype(np.int)
cv2.line(board, tuple(segment[0]), tuple(segment[1]), 0.5, thickness=3)
board = cv2.drawMarker(board, tuple(origin.astype(np.int)), 1, thickness=3)
output_polygon = (output_polygon - origin) * scale + origin
board = cv2.drawContours(board, [output_polygon.astype(np.int)], 0, 1, 3)
board = cv2.drawMarker(board, tuple(origin.astype(np.int)), 1, thickness=3)
plt.axis('off')
plt.imshow(board)
plt.show()
if __name__ == '__main__':
import numpy as np
from dataset.mp3d_dataset import MP3DDataset
from utils.boundary import depth2boundaries
from utils.conversion import uv2xyz, depth2xyz
from visualization.boundary import draw_boundaries
from visualization.floorplan import draw_floorplan, draw_iou_floorplan
mp3d_dataset = MP3DDataset(root_dir='../src/dataset/mp3d', mode='train',
split_list=[['e9zR4mvMWw7', '2224be23a70a475ea6daa55d4c90a91b']])
gt = mp3d_dataset.__getitem__(0)
gt['corners'] = gt['corners'][gt['corners'][..., 0] + gt['corners'][..., 1] != 0] # Take effective corners
img = draw_floorplan(depth2xyz(gt['depth'])[:, ::2], fill_color=[1, 1, 1, 0],
show=True, scale=1, marker_color=[0, 0, 1, 1], side_l=1024)
# img = draw_iou_floorplan(gt_xz=uv2xyz(gt['corners'])[..., ::2],
# dt_xz=calc_visible_polygon(np.array([0, 0]), uv2xyz(gt['corners'])[..., ::2]),
# dt_board_color=[0, 0, 1, 0],
# gt_board_color=[0, 0, 1, 0],
# show=True, side_l=1024)
result = Image.fromarray((img[250: -100, 100:-20] * 255).astype(np.uint8))
result.save('../src/fig/sample3.png') | 0.715325 | 0.543045 |