input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<PASSWORD>,
'enabled': True,
}
]
},
3: {
'as_name': 'Test 3',
'max_prefixes': 0,
'sessions': [
{
'ip_address': '2001:db8::3',
'password': <PASSWORD>,
'enabled': True,
}
]
},
4: {
'as_name': 'Test 4',
'max_prefixes': 0,
'sessions': [
{
'ip_address': '2001:db8::4',
'password': <PASSWORD>,
'enabled': True,
}
]
},
5: {
'as_name': 'Test 5',
'max_prefixes': 0,
'sessions': [
{
'ip_address': '2001:db8::5',
'password': <PASSWORD>,
'enabled': True,
}
]
},
}
},
{'ip_version': 4, 'peers': {}},
]
internet_exchange = InternetExchange.objects.create(name='Test',
slug='test')
for i in range(1, 6):
PeeringSession.objects.create(
autonomous_system=AutonomousSystem.objects.create(
asn=i,
name='Test {}'.format(i)
),
internet_exchange=internet_exchange,
ip_address='2001:db8::{}'.format(i)
)
values = internet_exchange._generate_configuration_variables()
self.assertEqual(values['peering_groups'], expected)
class InternetExchangeViewsTestCase(ViewTestCase):
def setUp(self):
super(InternetExchangeViewsTestCase, self).setUp()
self.model = InternetExchange
self.name = '<NAME>'
self.slug = 'test-ix'
self.ix = InternetExchange.objects.create(name=self.name,
slug=self.slug)
self.asn = AutonomousSystem.objects.create(asn=64500, name='Test')
self.session = PeeringSession.objects.create(
internet_exchange=self.ix, autonomous_system=self.asn,
ip_address='2001:db8::1')
self.community = Community.objects.create(name='Test', value='64500:1')
def test_ix_list_view(self):
self.get_request('peering:ix_list')
def test_ix_add_view(self):
# Not logged in, no right to access the view, should be redirected
self.get_request('peering:ix_add', expected_status_code=302)
# Authenticate and retry, should be OK
self.authenticate_user()
self.get_request('peering:ix_add', contains='Create')
# Try to create an object with valid data
ix_to_create = {
'name': 'ix-created',
'slug': 'ix-created',
}
self.post_request('peering:ix_add', data=ix_to_create)
self.does_object_exist(ix_to_create)
# Try to create an object with invalid data
ix_not_to_create = {
'name': 'ix-notcreated',
}
self.post_request('peering:ix_add', data=ix_not_to_create)
self.does_object_not_exist(ix_not_to_create)
def test_ix_import_view(self):
# Not logged in, no right to access the view, should be redirected
self.get_request('peering:ix_import', expected_status_code=302)
# Authenticate and retry, should be OK
self.authenticate_user()
self.get_request('peering:ix_import', contains='Import')
# Try to import an object with valid data
ix_to_import = {
'csv': '''name,slug,ipv6_address,ipv4_address,configuration_template,router,check_bgp_session_states,comment
ix-created,ix-created,,,,,,''',
}
self.post_request('peering:ix_import', data=ix_to_import)
self.does_object_exist({'slug': 'ix-created'})
# Try to create an object with invalid data
ix_to_import = {
'csv': '''name,slug,ipv6_address,ipv4_address,configuration_template,router,check_bgp_session_states,comment
ix-not-created,,,,,,,''',
}
self.post_request('peering:ix_import', data=ix_to_import)
self.does_object_not_exist({'slug': 'ix-not-reated'})
def test_ix_peeringdb_import_view(self):
# Not logged in, no right to access the view, should be redirected
self.get_request('peering:ix_peeringdb_import',
expected_status_code=302)
def test_ix_bulk_delete_view(self):
# Not logged in, no right to access the view, should be redirected
self.get_request('peering:ix_bulk_delete', expected_status_code=302)
def test_ix_details_view(self):
# No slug given, view should not work
with self.assertRaises(NoReverseMatch):
self.get_request('peering:ix_details')
# Using a wrong slug, status should be 404 not found
self.get_request('peering:ix_details', params={'slug': 'not-found'},
expected_status_code=404)
# Using an existing slug, status should be 200 and the name of the IX
# should be somewhere in the HTML code
self.get_request('peering:ix_details', params={'slug': self.slug},
contains=self.name)
def test_ix_edit_view(self):
# No slug given, view should not work
with self.assertRaises(NoReverseMatch):
self.get_request('peering:ix_edit')
# Not logged in, no right to access the view, should be redirected
self.get_request('peering:ix_edit', params={'slug': self.slug},
expected_status_code=302)
# Authenticate and retry, should be OK
self.authenticate_user()
self.get_request('peering:ix_edit', params={'slug': self.slug},
contains='Update')
# Still authenticated, wrong slug should be 404 not found
self.get_request('peering:ix_edit', params={'slug': 'not-found'},
expected_status_code=404)
def test_ix_delete_view(self):
# No slug given, view should not work
with self.assertRaises(NoReverseMatch):
self.get_request('peering:ix_delete')
# Not logged in, no right to access the view, should be redirected
self.get_request('peering:ix_delete', params={'slug': self.slug},
expected_status_code=302)
# Authenticate and retry, should be OK
self.authenticate_user()
self.get_request('peering:ix_delete', params={'slug': self.slug},
contains='Confirm')
# Still authenticated, wrong slug should be 404 not found
self.get_request('peering:ix_delete', params={'slug': 'not-found'},
expected_status_code=404)
def test_ix_update_communities_view(self):
# Not logged in, no right to access the view, should be redirected
self.get_request('peering:ix_update_communities',
params={'slug': self.slug}, expected_status_code=302)
# Authenticate and retry, should be OK
self.authenticate_user()
self.get_request('peering:ix_update_communities',
params={'slug': self.slug})
# IX not found
self.get_request('peering:ix_update_communities',
params={'slug': 'not-found'},
expected_status_code=404)
# Check if adding a community works
self.assertFalse(self.ix.communities.all())
self.post_request('peering:ix_update_communities',
params={'slug': self.slug},
data={'communities': self.community.pk})
self.assertTrue(self.ix.communities.all())
def test_ix_peering_sessions_view(self):
# Not logged in, 200 OK but not contains Add Peering Session button
self.get_request('peering:ix_peering_sessions',
params={'slug': self.slug},
notcontains='Add a Peering Session')
# Authenticate and retry, 200 OK and should contains Add Peering
# Session button
self.authenticate_user()
self.get_request('peering:ix_peering_sessions',
params={'slug': self.slug},
contains='Add a Peering Session')
# IX not found
self.get_request('peering:ix_peering_sessions',
params={'slug': 'not-found'},
expected_status_code=404)
class PeeringSessionTestCase(TestCase):
def test_does_exist(self):
# No session, must expect None
self.assertIsNone(PeeringSession.does_exist())
# Prepare objects and create a peering session
autonomous_system0 = AutonomousSystem.objects.create(asn=64500,
name='Test')
internet_exchange0 = InternetExchange.objects.create(name='Test0',
slug='test0')
peering_session0 = PeeringSession.objects.create(
autonomous_system=autonomous_system0,
internet_exchange=internet_exchange0,
ip_address='2001:db8::1')
# Make sure that the session has been created
self.assertIsNotNone(peering_session0)
# Make sure that the session is returned by calling does_exist()
# without arguments (only one session in the database)
self.assertIsNotNone(PeeringSession.does_exist())
# Make sure we can retrieve the session with its IP
self.assertEqual(peering_session0,
PeeringSession.does_exist(ip_address='2001:db8::1'))
# Make sure we can retrieve the session with its IX
self.assertEqual(peering_session0,
PeeringSession.does_exist(
internet_exchange=internet_exchange0))
# Make sure we can retrieve the session with AS
self.assertEqual(peering_session0,
PeeringSession.does_exist(
autonomous_system=autonomous_system0))
# Create another peering session
peering_session1 = PeeringSession.objects.create(
autonomous_system=autonomous_system0,
internet_exchange=internet_exchange0,
ip_address='192.168.1.1')
# Make sure that the session has been created
self.assertIsNotNone(peering_session1)
# More than one session, must expect None
self.assertIsNone(PeeringSession.does_exist())
# Make sure we can retrieve the session with its IP
self.assertEqual(peering_session1,
PeeringSession.does_exist(ip_address='192.168.1.1'))
# Make sure it returns None when using a field that the two sessions
# have in common
self.assertIsNone(PeeringSession.does_exist(
internet_exchange=internet_exchange0))
# Create a new IX
internet_exchange1 = InternetExchange.objects.create(name='Test1',
slug='test1')
# Make sure it returns None when there is no session
self.assertIsNone(PeeringSession.does_exist(
internet_exchange=internet_exchange1))
# Create a new session with a already used IP in another OX
peering_session2 = PeeringSession.objects.create(
autonomous_system=autonomous_system0,
internet_exchange=internet_exchange1,
ip_address='2001:db8::1')
# Make sure that the session has been created
self.assertIsNotNone(peering_session2)
# Make sure we have None, because two sessions will be found
self.assertIsNone(PeeringSession.does_exist(ip_address='2001:db8::1'))
# But if we narrow the search with the IX we must have the proper
# session
self.assertEqual(peering_session2, PeeringSession.does_exist(
ip_address='2001:db8::1', internet_exchange=internet_exchange1))
class PeeringSessionViewsTestCase(ViewTestCase):
def setUp(self):
super(PeeringSessionViewsTestCase, self).setUp()
self.model = PeeringSession
self.ip_address = '2001:db8::64:501'
self.as64500 = AutonomousSystem.objects.create(asn=64500, name='Test')
self.ix = InternetExchange.objects.create(name='Test', slug='test')
self.peering_session = PeeringSession.objects.create(
autonomous_system=self.as64500, internet_exchange=self.ix,
ip_address=self.ip_address)
def test_peering_session_list_view(self):
self.get_request('peering:peering_session_list')
def test_peering_session_details_view(self):
# No PK given, view should not work
with self.assertRaises(NoReverseMatch):
self.get_request('peering:peering_session_details')
# Using a wrong PK, status should be 404 not found
self.get_request('peering:peering_session_details', params={'pk': 2},
expected_status_code=404)
# Using an existing PK, status should be 200 and the name of the IP
# should be somewhere in the HTML code
self.get_request('peering:peering_session_details', params={'pk': 1},
contains=self.ip_address)
def test_peering_session_edit_view(self):
# No PK given, view should not work
with self.assertRaises(NoReverseMatch):
self.get_request('peering:peering_session_edit')
# Not logged in, no right to access the view, should be redirected
self.get_request('peering:peering_session_edit', params={'pk': 1},
expected_status_code=302)
# Authenticate and retry, should be OK
self.authenticate_user()
self.get_request('peering:peering_session_edit', params={'pk': 1},
contains='Update')
# Still authenticated, wrong PK should be 404 not found
self.get_request('peering:peering_session_edit', params={'pk': 2},
expected_status_code=404)
def test_peering_session_delete_view(self):
# No PK given, view should not work
with self.assertRaises(NoReverseMatch):
self.get_request('peering:peering_session_delete')
# Not logged in, no right to access the view, should be redirected
self.get_request('peering:peering_session_delete', params={'pk': 1},
expected_status_code=302)
# Authenticate and retry, should be OK
self.authenticate_user()
self.get_request('peering:peering_session_delete', params={'pk': 1},
contains='Confirm')
# Still authenticated, wrong router should be 404 not found
self.get_request('peering:peering_session_delete', params={'pk': 2},
expected_status_code=404)
def test_router_bulk_delete_view(self):
# Not logged in, no right to access the view, should be redirected
self.get_request('peering:peering_session_bulk_delete',
expected_status_code=302)
class RouterTestCase(TestCase):
def test_napalm_bgp_neighbors_to_peer_list(self):
# Expected results
expected = [0, 0, 1, 2, 3, 2, 2]
napalm_dicts_list = [
# If None or empty dict passed, returned value must be empty list
None,
{},
# List size must match peers number including VRFs
{'global': {'peers': {'192.168.0.1': {'remote_as': 64500}}}},
{'global': {'peers': {'192.168.0.1': {'remote_as': 64500}}},
'vrf': {'peers': {'192.168.1.1': {'remote_as': 64501}}}},
{'global': {'peers': {'192.168.0.1': {'remote_as': 64500}}},
'vrf0': {'peers': {'192.168.1.1': {'remote_as': 64501}}},
'vrf1': {'peers': {'192.168.2.1': {'remote_as': 64502}}}},
# If peer does not have remote_as field, it must be ignored
{'global': {'peers': {'192.168.0.1': {'remote_as': 64500}}},
'vrf0': {'peers': {'192.168.1.1': {'remote_as': 64501}}},
'vrf1': {'peers': {'192.168.2.1': {'not_valid': 64502}}}},
# If an IP address appears more than one time, only the first
# occurence must be retained
{'global': {'peers': {'192.168.0.1': {'remote_as': 64500}}},
'vrf0': {'peers': {'192.168.1.1': {'remote_as': 64501}}},
'vrf1': {'peers': {'192.168.1.1': {'remote_as': 64502}}}},
]
# Create a router
router = Router.objects.create(name='test',
hostname='test.example.com',
platform=PLATFORM_JUNOS)
# Run test cases
for i in range(len(expected)):
self.assertEqual(expected[i],
len(router._napalm_bgp_neighbors_to_peer_list(
napalm_dicts_list[i])))
class RouterViewsTestCase(ViewTestCase):
def setUp(self):
super(RouterViewsTestCase, self).setUp()
self.model = Router
self.name = 'test.router'
self.hostname = 'test.router.example.org'
self.router = Router.objects.create(name=self.name,
hostname=self.hostname)
def test_router_list_view(self):
self.get_request('peering:router_list')
def test_router_add_view(self):
# Not logged in, no right to access the view, should be redirected
self.get_request('peering:router_add', expected_status_code=302)
# Authenticate and retry, should be OK
self.authenticate_user()
self.get_request('peering:router_add', contains='Create')
# Try to create an object with valid data
router_to_create = {
'name': 'router.created',
'hostname': 'router.created.example.com',
}
self.post_request('peering:router_add', data=router_to_create)
self.does_object_exist(router_to_create)
# Try to create an object with invalid data
router_not_to_create = {
'name': 'router.notcreated',
}
self.post_request('peering:router_add', data=router_not_to_create)
self.does_object_not_exist(router_not_to_create)
def test_router_import_view(self):
# Not logged in, no right to access the view, should | |
= tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count
negative_indices = tf.random_shuffle(negative_indices)[:negative_count]
# Gather selected ROIs
positive_rois = tf.gather(proposals, positive_indices)
negative_rois = tf.gather(proposals, negative_indices)
# Assign positive ROIs to GT boxes.
positive_overlaps = tf.gather(overlaps, positive_indices)
roi_gt_box_assignment = tf.argmax(positive_overlaps, axis=1)
roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)
roi_gt_poses = tf.gather(gt_poses, roi_gt_box_assignment)
roi_gt_class_ids=[]
for i in range(config.NUM_FEATURES-2):
roi_gt_class_ids.append(tf.gather(gt_class_ids[i], roi_gt_box_assignment))
roi_gt_class_ids.append(tf.gather(tf.gather(gt_class_ids[5], roi_gt_box_assignment,axis=0),roi_gt_box_assignment,axis=1))
# Compute bbox refinement for positive ROIs
deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)
deltas /= config.BBOX_STD_DEV
# Assign positive ROIs to GT masks
# Permute masks to [N, height, width, 1]
transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)
# Pick the right mask for each ROI
roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)
# Compute mask targets
boxes = positive_rois
if config.USE_MINI_MASK:
# Transform ROI corrdinates from normalized image space
# to normalized mini-mask space.
y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)
gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)
gt_h = gt_y2 - gt_y1
gt_w = gt_x2 - gt_x1
y1 = (y1 - gt_y1) / gt_h
x1 = (x1 - gt_x1) / gt_w
y2 = (y2 - gt_y1) / gt_h
x2 = (x2 - gt_x1) / gt_w
boxes = tf.concat([y1, x1, y2, x2], 1)
box_ids = tf.range(0, tf.shape(roi_masks)[0])
masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,
box_ids,
config.MASK_SHAPE)
# Remove the extra dimension from masks.
masks = tf.squeeze(masks, axis=3)
# Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with
# binary cross entropy loss.
masks = tf.round(masks)
# Append negative ROIs and pad bbox deltas and masks that
# are not used for negative ROIs with zeros.
rois = tf.concat([positive_rois, negative_rois], axis=0)
N = tf.shape(negative_rois)[0]
P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)
rois = tf.pad(rois, [(0, P), (0, 0)])
roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])
roi_gt_poses = tf.pad(roi_gt_poses, [(0, N + P), (0, 0)])
for i in range(config.NUM_FEATURES-2):
roi_gt_class_ids[i] = tf.pad(roi_gt_class_ids[i], [(0, N + P)])
roi_gt_class_ids[5] = tf.pad(roi_gt_class_ids[5], [(0, N + P),(0, N + P)])
deltas = tf.pad(deltas, [(0, N + P), (0, 0)])
masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])
return [rois]+roi_gt_class_ids+[deltas, masks,roi_gt_poses]
class DetectionTargetLayer(KE.Layer):
"""Subsamples proposals and generates target box refinement, class_ids,
and masks for each.
Inputs:
proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might
be zero padded if there are not enough proposals.
gt_class_ids: [batch,NUM_FEATURES, MAX_GT_INSTANCES] Integer class IDs/output feature.
relations: [batch,MAX_GT_INSTANCES,MAX_GT_INSTANCES]
gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized
coordinates.
gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type
gt_poses: [batch, MAX_GT_INSTANCES, (tetax,tetay,tetaz,x,y,z)] in radians and cm
Returns: Target ROIs and corresponding class IDs, bounding box shifts,
and masks.
rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized
coordinates
target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.
relations: [batch,TRAIN_ROIS_PER_IMAGE,TRAIN_ROIS_PER_IMAGE]
target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, NUM_CLASSES,
(dy, dx, log(dh), log(dw), class_id)]
Class-specific bbox refinements.
target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width)
Masks cropped to bbox boundaries and resized to neural
network output size.
gt_poses: [batch, TRAIN_ROIS_PER_IMAGE, (tetax,tetay,tetaz,x,y,z)] in radians and cm
Note: Returned arrays might be zero padded if not enough target ROIs.
"""
def __init__(self, config, **kwargs):
super(DetectionTargetLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
proposals = inputs[0]
gt_class_ids = inputs[1:7]
gt_boxes = inputs[7]
gt_masks = inputs[8]
gt_poses = inputs[9]
# Slice the batch and run a graph for each slice
# TODO: Rename target_bbox to target_deltas for clarity
outputs = utils.batch_slice(
[proposals]+ gt_class_ids+[ gt_boxes, gt_masks,gt_poses],
lambda x: detection_targets_graph(x, self.config),self.config.IMAGES_PER_GPU,parallel_processing=True)
return outputs
def compute_output_shape(self, input_shape):
return [
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois
(None, 1), # class_ids/category
(None, 1), # class_ids/color
(None, 1), # class_ids/shape
(None, 1), # class_ids/material
(None, 1), # class_ids/openability
(None,self.config.TRAIN_ROIS_PER_IMAGE, self.config.TRAIN_ROIS_PER_IMAGE),# relations
(None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas
(None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],
self.config.MASK_SHAPE[1]) , # masks
(None, self.config.TRAIN_ROIS_PER_IMAGE, 6), # poses
]
def compute_mask(self, inputs, mask=None):
return [None,None, None, None, None, None, None, None,None,None]
############################################################
# Detection Layer
############################################################
def clip_to_window(window, boxes):
"""
window: (y1, x1, y2, x2). The window in the image we want to clip to.
boxes: [N, (y1, x1, y2, x2)]
"""
boxes[:, 0] = np.maximum(np.minimum(boxes[:, 0], window[2]), window[0])
boxes[:, 1] = np.maximum(np.minimum(boxes[:, 1], window[3]), window[1])
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], window[2]), window[0])
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], window[3]), window[1])
return boxes
def refine_detections_graph(Inputs, config):
"""Refine classified proposals and filter overlaps and return final
detections.
Inputs:
rois: [N, (y1, x1, y2, x2)] in normalized coordinates
probs: [N, NUM_FEATURES, num_classes],[N,N,10]. Class probabilities.
deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific
bounding box deltas.
poses: [N, num_classes, (tx,ty,tz,x,y,z)]. Class-specific
pose estimation.
window: (y1, x1, y2, x2) in image coordinates. The part of the image
that contains the image excluding the padding.
Returns detections shaped: [N, (y1, x1, y2, x2, class_ids, scores,poses)],[N,N,8] where
coordinates are in image domain.
"""
rois=Inputs[0]
probs=Inputs[1:6]
deltas=Inputs[6]
window=Inputs[7]
poses=Inputs[8]
#initialize best indices(indices of regions with best scores)
best_indices=KL.Lambda(lambda x: tf.range(config.POST_NMS_ROIS_INFERENCE))(rois)
# Class IDs per ROI per feature
class_ids =[]
rel_ids=[]
indices =[]
class_scores=[]
#object description
for i in range(config.NUM_FEATURES-2):
class_ids.append(tf.argmax(probs[i], axis=1, output_type=tf.int32))
# Class probability of the top class of each ROI
indices.append(tf.stack([tf.range(probs[i].shape[0]), class_ids[i]], axis=1))
class_scores.append(tf.gather_nd(probs[i], indices[i]))
# Class-specific bounding box deltas
deltas_specific = tf.gather_nd(deltas, indices[0])
# Class-specific poses
poses_specific = tf.gather_nd(poses, indices[0])
print('poses_specific',poses_specific.get_shape())
print('deltas_specific',deltas_specific.get_shape())
# Apply bounding box deltas
# Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates
refined_rois = apply_box_deltas_graph(
rois, deltas_specific * config.BBOX_STD_DEV)
# Convert coordiates to image domain
# TODO: better to keep them normalized until later
height, width = config.IMAGE_SHAPE[:2]
refined_rois *= tf.constant([height, width, height, width], dtype=tf.float32)
# Clip boxes to image window
refined_rois = clip_boxes_graph(refined_rois, window)
#convert poses to image domain
poses_specific*=tf.constant(config.MAX_OBJECT_POSES, dtype=tf.float32)
# Clip poses to image window
orientation_normalizer=tf.constant([0.0,config.ANGLE_NORMALIZED_FACTOR],dtype='float32',shape=[2])
depth_normalizer=tf.constant([0.0,config.MAX_OBJECT_POSES[len(config.MAX_OBJECT_POSES)-1]],dtype='float32',shape=[2])
poses_specific=clip_poses_graph(poses_specific,tf.concat([orientation_normalizer,window,depth_normalizer],axis=0))
# Round and cast to int since we're deadling with pixels now
refined_rois = tf.to_int32(tf.rint(refined_rois))
# TODO: Filter out boxes with zero area
# Filter out background boxes
keep = tf.where(class_ids[0] > 0)[:, 0]
# Filter out low confidence boxes
if config.DETECTION_MIN_CONFIDENCE:
conf_keep = tf.where(class_scores[0] >= config.DETECTION_MIN_CONFIDENCE)[:, 0]
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(conf_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Apply per-class NMS
# 1. Prepare variables
pre_nms_class_ids = tf.gather(class_ids[0], keep)
pre_nms_scores = tf.gather(class_scores[0], keep)
pre_nms_rois = tf.gather(refined_rois, keep)
unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]
def nms_keep_map(class_id):
"""Apply Non-Maximum Suppression on ROIs of the given class."""
# Indices of ROIs of the given class
ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]
# Apply NMS
class_keep = tf.image.non_max_suppression(
tf.to_float(tf.gather(pre_nms_rois, ixs)),
tf.gather(pre_nms_scores, ixs),
max_output_size=config.DETECTION_MAX_INSTANCES,
iou_threshold=config.DETECTION_NMS_THRESHOLD)
# Map indicies
class_keep = tf.gather(keep, tf.gather(ixs, class_keep))
# Pad with -1 so returned tensors have the same shape
gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]
class_keep = tf.pad(class_keep, [(0, gap)],
mode='CONSTANT', constant_values=-1)
# Set shape so map_fn() can infer result shape
class_keep.set_shape([config.DETECTION_MAX_INSTANCES])
return class_keep
# 2. Map over class IDs
nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,
dtype=tf.int64)
# 3. Merge results into one list, and remove -1 padding
nms_keep = tf.reshape(nms_keep, [-1])
nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])
# 4. Compute intersection between keep and nms_keep
keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),
tf.expand_dims(nms_keep, 0))
keep = tf.sparse_tensor_to_dense(keep)[0]
# Keep top detections
roi_count = config.DETECTION_MAX_INSTANCES
class_scores_keep = tf.gather(class_scores[0], keep)
num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)
top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]
keep = tf.gather(keep, top_ids)
# Arrange output as [N, (y1, x1, y2, x2, class_id, score)]
# Coordinates are in image domain.
print('refined_rois',refined_rois.get_shape())
detections = tf.concat([
tf.to_float(tf.gather(refined_rois, keep)),
tf.to_float(tf.gather(class_ids[0], keep))[..., tf.newaxis],
tf.to_float(tf.gather(class_ids[1], keep))[..., tf.newaxis],
tf.to_float(tf.gather(class_ids[2], keep))[..., tf.newaxis],
tf.to_float(tf.gather(class_ids[3], keep))[..., tf.newaxis],
tf.to_float(tf.gather(class_ids[4], keep))[..., tf.newaxis],
tf.gather(class_scores[0], keep)[..., tf.newaxis],
tf.gather(class_scores[1], keep)[..., tf.newaxis],
tf.gather(class_scores[2], keep)[..., tf.newaxis],
tf.gather(class_scores[3], keep)[..., tf.newaxis],
tf.gather(class_scores[4], keep)[..., tf.newaxis],
tf.gather(poses_specific, keep)
], axis=1)
best_indices=tf.gather(best_indices, keep)
# Pad with zeros if detections < DETECTION_MAX_INSTANCES
gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]
detections = tf.pad(detections, [(0, gap), (0, 0)], "CONSTANT")
best_indices=tf.pad(best_indices, [(0, gap)], "CONSTANT")
return [detections,best_indices]
class DetectionLayer(KE.Layer):
"""Takes classified proposal boxes and their bounding box deltas and
returns the final detection boxes.
Returns:
[batch, num_detections, (y1, x1, y2, x2, class_ids, class_score,poses)],[batch,num_detections,num_detections,probs,num_channels]where
coordinates are in image domain
"""
def __init__(self, config=None, **kwargs):
super(DetectionLayer, self).__init__(**kwargs)
self.config = config
def call(self, inputs):
rois = inputs[0]
robotvqa_class = inputs[1:6]
mrcnn_bbox = inputs[6]
image_meta = inputs[7]
robotvqa_poses = inputs[8]
# Run detection refinement graph | |
>>> multi_dot((a, b, c))
tensor([[ 26, 49],
[ 80, 148]])
>>> multi_dot((a.to(torch.float), torch.empty(3, 0), torch.empty(0, 2)))
tensor([[0., 0.],
[0., 0.]])
""")
svd = _add_docstr(_linalg.linalg_svd, r"""
linalg.svd(A, full_matrices=True, *, out=None) -> (Tensor, Tensor, Tensor)
Computes the singular value decomposition (SVD) of a matrix.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **full SVD** of a matrix
:math:`A \in \mathbb{K}^{m \times n}`, if `k = min(m,n)`, is defined as
.. math::
A = U \operatorname{diag}(S) V^{\text{H}}
\mathrlap{\qquad U \in \mathbb{K}^{m \times m}, S \in \mathbb{R}^k, V \in \mathbb{K}^{n \times n}}
where :math:`\operatorname{diag}(S) \in \mathbb{K}^{m \times n}`,
:math:`V^{\text{H}}` is the conjugate transpose when :math:`V` is complex, and the transpose when :math:`V` is real-valued.
The matrices :math:`U`, :math:`V` (and thus :math:`V^{\text{H}}`) are orthogonal in the real case, and unitary in the complex case.
When `m > n` (resp. `m < n`) we can drop the last `m - n` (resp. `n - m`) columns of `U` (resp. `V`) to form the **reduced SVD**:
.. math::
A = U \operatorname{diag}(S) V^{\text{H}}
\mathrlap{\qquad U \in \mathbb{K}^{m \times k}, S \in \mathbb{R}^k, V \in \mathbb{K}^{k \times n}}
where :math:`\operatorname{diag}(S) \in \mathbb{K}^{k \times k}`.
In this case, :math:`U` and :math:`V` also have orthonormal columns.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
The returned decomposition is a named tuple `(U, S, Vh)`
which corresponds to :math:`U`, :math:`S`, :math:`V^{\text{H}}` above.
The singular values are returned in descending order.
The parameter :attr:`full_matrices` chooses between the full (default) and reduced SVD.
Differences with `numpy.linalg.svd`:
- Unlike `numpy.linalg.svd`, this function always returns a tuple of three tensors
and it doesn't support `compute_uv` argument.
Please use :func:`torch.linalg.svdvals`, which computes only the singular values,
instead of `compute_uv=False`.
.. note:: When :attr:`full_matrices`\ `= True`, the gradients with respect to `U[..., :, min(m, n):]`
and `Vh[..., min(m, n):, :]` will be ignored, as those vectors can be arbitrary bases
of the corresponding subspaces.
.. warning:: The returned tensors `U` and `V` are not unique, nor are they continuous with
respect to :attr:`A`.
Due to this lack of uniqueness, different hardware and software may compute
different singular vectors.
This non-uniqueness is caused by the fact that multiplying any pair of singular
vectors :math:`u_k, v_k` by `-1` in the real case or by
:math:`e^{i \phi}, \phi \in \mathbb{R}` in the complex case produces another two
valid singular vectors of the matrix.
This non-uniqueness problem is even worse when the matrix has repeated singular values.
In this case, one may multiply the associated singular vectors of `U` and `V` spanning
the subspace by a rotation matrix and `the resulting vectors will span the same subspace`_.
.. warning:: Gradients computed using `U` or `Vh` will only be finite when
:attr:`A` does not have zero as a singular value or repeated singular values.
Furthermore, if the distance between any two singular values is close to zero,
the gradient will be numerically unstable, as it depends on the singular values
:math:`\sigma_i` through the computation of
:math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`.
The gradient will also be numerically unstable when :attr:`A` has small singular
values, as it also depends on the computaiton of :math:`\frac{1}{\sigma_i}`.
.. seealso::
:func:`torch.linalg.svdvals` computes only the singular values.
Unlike :func:`torch.linalg.svd`, the gradients of :func:`~svdvals` are always
numerically stable.
:func:`torch.linalg.eig` for a function that computes another type of spectral
decomposition of a matrix. The eigendecomposition works just on on square matrices.
:func:`torch.linalg.eigh` for a (faster) function that computes the eigenvalue decomposition
for Hermitian and symmetric matrices.
:func:`torch.linalg.qr` for another (much faster) decomposition that works on general
matrices.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
full_matrices (bool, optional): controls whether to compute the full or reduced
SVD, and consequently,
the shape of the returned tensors
`U` and `Vh`. Default: `True`.
Keyword args:
out (tuple, optional): output tuple of three tensors. Ignored if `None`.
Returns:
A named tuple `(U, S, Vh)` which corresponds to :math:`U`, :math:`S`, :math:`V^{\text{H}}` above.
`S` will always be real-valued, even when :attr:`A` is complex.
It will also be ordered in descending order.
`U` and `Vh` will have the same dtype as :attr:`A`. The left / right singular vectors will be given by
the columns of `U` and the rows of `Vh` respectively.
Examples::
>>> a = torch.randn(5, 3)
>>> a
tensor([[-0.3357, -0.2987, -1.1096],
[ 1.4894, 1.0016, -0.4572],
[-1.9401, 0.7437, 2.0968],
[ 0.1515, 1.3812, 1.5491],
[-1.8489, -0.5907, -2.5673]])
>>>
>>> # reconstruction in the full_matrices=False case
>>> u, s, vh = torch.linalg.svd(a, full_matrices=False)
>>> u.shape, s.shape, vh.shape
(torch.Size([5, 3]), torch.Size([3]), torch.Size([3, 3]))
>>> torch.dist(a, u @ torch.diag(s) @ vh)
tensor(1.0486e-06)
>>>
>>> # reconstruction in the full_matrices=True case
>>> u, s, vh = torch.linalg.svd(a)
>>> u.shape, s.shape, vh.shape
(torch.Size([5, 5]), torch.Size([3]), torch.Size([3, 3]))
>>> torch.dist(a, u[:, :3] @ torch.diag(s) @ vh)
>>> torch.dist(a, u[:, :3] @ torch.diag(s) @ vh)
tensor(1.0486e-06)
>>>
>>> # extra dimensions
>>> a_big = torch.randn(7, 5, 3)
>>> u, s, vh = torch.linalg.svd(a_big, full_matrices=False)
>>> torch.dist(a_big, u @ torch.diag_embed(s) @ vh)
tensor(3.0957e-06)
.. _the resulting vectors will span the same subspace:
https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD
""")
svdvals = _add_docstr(_linalg.linalg_svdvals, r"""
linalg.svdvals(A, *, out=None) -> Tensor
Computes the singular values of a matrix.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
The singular values are returned in descending order.
.. note:: This function is equivalent to NumPy's `linalg.svd(A, compute_uv=False)`.
""" + fr"""
.. note:: {common_notes["sync_note"]}
""" + r"""
.. seealso::
:func:`torch.linalg.svd` computes the full singular value decomposition.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions.
Keyword args:
out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`.
Returns:
A real-valued tensor, even when :attr:`A` is complex.
Examples::
>>> import torch
>>> a = torch.randn(5, 3)
>>> a
tensor([[-1.3490, -0.1723, 0.7730],
[-1.6118, -0.3385, -0.6490],
[ 0.0908, 2.0704, 0.5647],
[-0.6451, 0.1911, 0.7353],
[ 0.5247, 0.5160, 0.5110]])
>>> s = torch.linalg.svdvals(a)
>>> s
tensor([2.5139, 2.1087, 1.1066])
""")
cond = _add_docstr(_linalg.linalg_cond, r"""
linalg.cond(A, p=None, *, out=None) -> Tensor
Computes the condition number of a matrix with respect to a matrix norm.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`,
the **condition number** :math:`\kappa` of a matrix
:math:`A \in \mathbb{K}^{n \times n}` is defined as
.. math::
\kappa(A) = \|A\|_p\|A^{-1}\|_p
The condition number of :attr:`A` measures the numerical stability of the linear system `AX = B`
with respect to a matrix norm.
Supports input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if :attr:`A` is a batch of matrices then
the output has the same batch dimensions.
:attr:`p` defines the matrix norm that is computed. The following norms are supported:
========= =================================
:attr:`p` matrix norm
========= =================================
`None` `2`-norm (largest singular value)
`'fro'` Frobenius norm
`'nuc'` nuclear norm
`inf` `max(sum(abs(x), dim=1))`
`-inf` `min(sum(abs(x), dim=1))`
`1` `max(sum(abs(x), dim=0))`
`-1` `min(sum(abs(x), dim=0))`
`2` largest singular value
`-2` smallest singular value
========= =================================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
For :attr:`p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`, this function uses
:func:`torch.linalg.norm` and :func:`torch.linalg.inv`.
As such, in this case, the matrix (or every matrix in the batch) :attr:`A` has to be square
and invertible.
For :attr:`p` in `(2, -2)`, this function can be computed in terms of the singular values
:math:`\sigma_1 \geq \ldots \geq \sigma_n`
.. math::
\kappa_2(A) = \frac{\sigma_1}{\sigma_n}\qquad \kappa_{-2}(A) = \frac{\sigma_n}{\sigma_1}
In these cases, it is computed using :func:`torch.linalg.svd`. For these norms, the matrix
(or every matrix in the batch) :attr:`A` may have any shape.
.. note :: When inputs are on a CUDA device, this function synchronizes that device with the CPU if
if :attr:`p` is one of `('fro', 'nuc', inf, -inf, 1, -1)`.
.. seealso::
:func:`torch.linalg.solve` for a function that solves linear systems of square matrices.
:func:`torch.linalg.lstsq` for a function that solves linear systems of general matrices.
Args:
A (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions
for :attr:`p` in `(2, -2)`, and of shape `(*, n, n)` where every matrix
is invertible for :attr:`p` in `('fro', 'nuc', inf, -inf, 1, -1)`.
p (int, inf, -inf, 'fro', 'nuc', optional):
the type of the | |
<reponame>kstough/Pyecobee
import json
import logging
import numbers
import pprint
import pytz
import requests
import sys
from datetime import date
from datetime import datetime
from datetime import timedelta
import six
from six import reraise as raise_
from . import utilities
from .ecobee_object import EcobeeObject
from .enumerations import AckType
from .enumerations import FanMode
from .enumerations import HoldType
from .enumerations import PlugState
from .enumerations import Scope
from .enumerations import SelectionType
from .exceptions import EcobeeApiException
from .exceptions import EcobeeAuthorizationException
from .exceptions import EcobeeException
from .exceptions import EcobeeHttpException
from .exceptions import EcobeeRequestsException
from .objects.demand_management import DemandManagement
from .objects.demand_response import DemandResponse
from .objects.function import Function
from .objects.group import Group
from .objects.hierarchy_privilege import HierarchyPrivilege
from .objects.hierarchy_user import HierarchyUser
from .objects.selection import Selection
from .objects.status import Status
from .objects.thermostat import Thermostat
from .responses import EcobeeAuthorizeResponse
from .responses import EcobeeCreateRuntimeReportJobResponse
from .responses import EcobeeErrorResponse
from .responses import EcobeeGroupsResponse
from .responses import EcobeeIssueDemandResponsesResponse
from .responses import EcobeeListDemandResponsesResponse
from .responses import EcobeeListHierarchySetsResponse
from .responses import EcobeeListHierarchyUsersResponse
from .responses import EcobeeListRuntimeReportJobStatusResponse
from .responses import EcobeeMeterReportsResponse
from .responses import EcobeeRuntimeReportsResponse
from .responses import EcobeeStatusResponse
from .responses import EcobeeThermostatResponse
from .responses import EcobeeThermostatsSummaryResponse
from .responses import EcobeeTokensResponse
logger = logging.getLogger(__name__)
class EcobeeService(EcobeeObject):
__slots__ = ['_thermostat_name', '_application_key', '_authorization_token', '_access_token', '_refresh_token',
'_access_token_expires_on', '_refresh_token_expires_on', '_scope']
AUTHORIZE_URL = 'https://api.ecobee.com/authorize'
TOKENS_URL = 'https://api.ecobee.com/token'
THERMOSTAT_SUMMARY_URL = 'https://api.ecobee.com/1/thermostatSummary'
THERMOSTAT_URL = 'https://api.ecobee.com/1/thermostat'
METER_REPORT_URL = 'https://api.ecobee.com/1/meterReport'
RUNTIME_REPORT_URL = 'https://api.ecobee.com/1/runtimeReport'
GROUP_URL = 'https://api.ecobee.com/1/group'
HIERARCHY_SET_URL = 'https://api.ecobee.com/1/hierarchy/set'
HIERARCHY_USER_URL = 'https://api.ecobee.com/1/hierarchy/user'
HIERARCHY_THERMOSTAT_URL = 'https://api.ecobee.com/1/hierarchy/thermostat'
DEMAND_RESPONSE_URL = 'https://api.ecobee.com/1/demandResponse'
DEMAND_MANAGEMENT_URL = 'https://api.ecobee.com/1/demandManagement'
RUNTIME_REPORT_JOB_URL = 'https://api.ecobee.com/1/runtimeReportJob'
BEFORE_TIME_BEGAN_DATE_TIME = pytz.utc.localize(datetime(2008, 1, 2, 0, 0, 0))
END_OF_TIME_DATE_TIME = pytz.utc.localize(datetime(2035, 1, 1, 0, 0, 0))
MINIMUM_COOLING_TEMPERATURE = -10
MAXIMUM_COOLING_TEMPERATURE = 120
MINIMUM_HEATING_TEMPERATURE = 45
MAXIMUM_HEATING_TEMPERATURE = 120
attribute_name_map = {'thermostat_name': 'thermostat_name', 'application_key': 'application_key',
'authorization_token': 'authorization_token',
'access_token': 'access_token', 'refresh_token': 'refresh_token',
'access_token_expires_on': 'access_token_expires_on',
'refresh_token_expires_on': 'refresh_token_expires_on',
'scope': 'scope'}
attribute_type_map = {'thermostat_name': 'six.text_type', 'application_key': 'six.text_type',
'authorization_token': 'six.text_type', 'access_token': 'six.text_type',
'refresh_token': 'six.text_type', 'access_token_expires_on': 'datetime',
'refresh_token_expires_on': 'datetime', 'scope': 'Scope'}
def __init__(self, thermostat_name, application_key, authorization_token=None,
access_token=None, refresh_token=None, access_token_expires_on=None,
refresh_token_expires_on=None, scope=Scope.SMART_WRITE):
"""
Construct an EcobeeService instance
:param thermostat_name: Name of the thermostat
:param application_key: The unique application key for your application
:param authorization_token: Credentials to be used to retrieve the initial access_token and refresh_token
:param access_token: Credentials to be used in all requests
:param refresh_token: Credentials to be used to refresh access_token and refresh_token
:param access_token_expires_on: When the access token expires on in UTC time
:param refresh_token_expires_on: When the refresh token expires on in UTC time
:param scope: Scope the application requests from the user. Valid values: Scope.SMART_READ,
Scope.SMART_WRITE, and Scope.EMS
"""
if not isinstance(application_key, six.string_types):
raise TypeError('application_key must be an instance of {0}'.format(six.string_types))
if len(application_key) != 32:
raise ValueError('application_key must be a 32 alphanumeric string')
self._thermostat_name = thermostat_name
self._application_key = application_key
self._authorization_token = authorization_token
self._access_token = access_token
self._refresh_token = refresh_token
self._access_token_expires_on = access_token_expires_on
self._refresh_token_expires_on = refresh_token_expires_on
self._scope = scope
@staticmethod
def __process_http_response(response, response_class):
if response.status_code == requests.codes.ok:
response_object = utilities.dictionary_to_object({response_class.__name__: response.json()},
{response_class.__name__: response_class},
{response_class.__name__: None},
is_top_level=True)
if logger.getEffectiveLevel() >= logging.DEBUG:
message_to_log = 'EcobeeResponse:\n[JSON]\n======\n{0}\n\n[Object]\n========\n{1}'.format(
json.dumps(response.json(),
sort_keys=True,
indent=2),
response_object.pretty_format())
logger.debug(message_to_log.strip())
return response_object
else:
try:
if 'error' in response.json():
error_response = utilities.dictionary_to_object({'EcobeeErrorResponse': response.json()},
{'EcobeeErrorResponse': EcobeeErrorResponse},
{'EcobeeErrorResponse': None},
is_top_level=True)
raise EcobeeAuthorizationException(
'ecobee authorization error encountered for URL => {0}\nHTTP error code => {1}\nError type => {'
'2}\nError description => {3}\nError URI => {4}'.format(response.request.url,
response.status_code,
error_response.error,
error_response.error_description,
error_response.error_uri),
error_response.error,
error_response.error_description,
error_response.error_uri)
elif 'status' in response.json():
status = utilities.dictionary_to_object({'Status': response.json()['status']},
{'Status': Status},
{'Status': None},
is_top_level=True)
raise EcobeeApiException(
'ecobee API error encountered for URL => {0}\nHTTP error code => {1}\nStatus code => {'
'2}\nStatus message => {3}'.format(response.request.url,
response.status_code,
status.code,
status.message),
status.code,
status.message)
else:
raise EcobeeHttpException(
'HTTP error encountered for URL => {0}\nHTTP error code => {1}'.format(response.request.url,
response.status_code))
except EcobeeException as ecobee_exception:
logger.exception('{0} raised:\n'.format(type(ecobee_exception).__name__), exc_info=True)
raise
@staticmethod
def __make_http_request(requests_http_method, url, headers=None, params=None, json_=None, timeout=5):
try:
if logger.getEffectiveLevel() >= logging.DEBUG:
if headers:
headers_list = []
for (index, key) in enumerate(sorted(headers)):
headers_list.append('{0:16} => {1!s}\n'.format(key, pprint.pformat(headers[key], indent=2)))
if params:
params_list = []
for (index, key) in enumerate(sorted(params)):
params_list.append('{0:16} => {1!s}\n'.format(key, params[key]))
message_to_log = 'Request:\n[Method]\n========\n{0}\n\n[URL]\n=====\n{1}\n{2}{3}{4}'.format(
requests_http_method.__name__.capitalize(),
url,
'\n[Headers]\n=========\n{0}'.format(''.join(headers_list)) if headers else '',
'\n[Parameters]\n============\n{0}'.format(''.join(params_list)) if params else '',
'\n[JSON]\n======\n{0}'.format(json.dumps(json_, sort_keys=True, indent=2)) if json_ else '')
logger.debug(message_to_log.strip())
return requests_http_method(url,
headers=headers,
params=params,
json=json_,
timeout=timeout)
except requests.exceptions.RequestException as re:
traceback = sys.exc_info()[2]
raise_(EcobeeRequestsException, re.message, traceback)
def authorize(self, response_type='ecobeePin', timeout=5):
"""
The authorize method allows a 3rd party application to obtain an authorization code and a 4 byte alphabetic
string which can be displayed to the user. The user then logs into the ecobee Portal and registers the
application using the PIN provided. Once this step is completed, the 3rd party application is able to
request the access and refresh tokens using the request_tokens method.
:param response_type: This is always "ecobeePin"
:param timeout: Number of seconds requests will wait to establish a connection and to receive a response
:return: An AuthorizeResponse object
:rtype: EcobeeAuthorizeResponse
:raises EcobeeAuthorizationException: If the request results in a standard or extended OAuth error
response
:raises EcobeeRequestsException: If an exception is raised by the underlying requests module
:raises TypeError: If response_type is not a string
:raises ValueError: If response_type is not set to "ecobeePin"
"""
if not isinstance(response_type, six.string_types):
raise TypeError('response_type must be an instance of {0}'.format(six.string_types))
if response_type != 'ecobeePin':
raise ValueError('response_type must be "ecobeePin"')
response = EcobeeService.__make_http_request(requests.get,
EcobeeService.AUTHORIZE_URL,
params={'client_id': self._application_key,
'response_type': response_type,
'scope': self._scope.value},
timeout=timeout)
authorize_response = EcobeeService.__process_http_response(response, EcobeeAuthorizeResponse)
self._authorization_token = authorize_response.code
return authorize_response
def request_tokens(self, grant_type='ecobeePin', timeout=5):
"""
The request_tokens method is used to request the access and refresh tokens once the user has authorized the
application within the ecobee Web Portal.
:param grant_type: This is always "ecobeePin"
:param timeout: Number of seconds requests will wait to establish a connection and to receive a response
:return: A TokensResponse object
:rtype: EcobeeTokensResponse
:raises EcobeeAuthorizationException: If the request results in a standard or extended OAuth error
response
:raises EcobeeRequestsException: If an exception is raised by the underlying requests module
:raises TypeError: If grant_type is not a string
:raises ValueError: If grant_type is not set to "ecobeePin"
"""
if not isinstance(grant_type, six.string_types):
raise TypeError('grant_type must be an instance of {0}'.format(six.string_types))
if grant_type != 'ecobeePin':
raise ValueError('grant_type must be "ecobeePin"')
now_utc = datetime.now(pytz.utc)
response = EcobeeService.__make_http_request(requests.post,
EcobeeService.TOKENS_URL,
params={'client_id': self._application_key,
'code': self._authorization_token,
'grant_type': grant_type},
timeout=timeout)
tokens_response = EcobeeService.__process_http_response(response, EcobeeTokensResponse)
self._access_token = tokens_response.access_token
self._access_token_expires_on = now_utc + timedelta(seconds=tokens_response.expires_in)
self._refresh_token = tokens_response.refresh_token
self._refresh_token_expires_on = now_utc + timedelta(days=365)
return tokens_response
def refresh_tokens(self, grant_type='refresh_token', timeout=5):
"""
All access tokens must be refreshed periodically. Token refresh reduces the potential and benefit of token
theft. Since all tokens expire, stolen tokens may only be used for a limited time. The refresh_tokens method
immediately expires the previously issued access and refresh tokens and issues brand new tokens.
:param grant_type: This is always "refresh_token"
:param timeout: Number of seconds requests will wait to establish a connection and to receive a response
:return: A TokensResponse object
:rtype: EcobeeTokensResponse
:raises EcobeeAuthorizationException: If the request results in a standard or extended OAuth error
response
:raises EcobeeRequestsException: If an exception is raised by the underlying requests module
:raises TypeError: If grant_type is not a string
:raises ValueError: If grant_type is not set to "refresh_token"
"""
if not isinstance(grant_type, six.string_types):
raise TypeError('grant_type must be an instance of {0}'.format(six.string_types))
if grant_type != 'refresh_token':
raise ValueError('grant_type must be "refresh_token"')
now_utc = datetime.now(pytz.utc)
response = EcobeeService.__make_http_request(requests.post,
EcobeeService.TOKENS_URL,
params={'client_id': self._application_key,
'code': self._refresh_token,
'grant_type': grant_type},
timeout=timeout)
tokens_response = EcobeeService.__process_http_response(response, EcobeeTokensResponse)
self._access_token = tokens_response.access_token
self._access_token_expires_on = now_utc + timedelta(seconds=tokens_response.expires_in)
self._refresh_token = tokens_response.refresh_token
self._refresh_token_expires_on = now_utc + timedelta(days=365)
return tokens_response
def request_thermostats_summary(self, selection, timeout=5):
"""
The request_thermostats_summary method retrieves a list of thermostat configuration and state
revisions. This is a light-weight polling method which will only return the revision numbers for the
significant portions of the thermostat data. It is the responsibility of the caller to store these revisions
for future determination of whether changes occurred at the next poll interval.
The intent is to permit the caller to determine whether a thermostat has changed since the last poll.
Retrieval of a whole thermostat including runtime data is expensive and impractical for large amounts of
thermostats such as a management set hierarchy, especially if nothing has changed. By storing the retrieved
revisions, the caller may determine whether to get a thermostat and which sections of the thermostat should
be retrieved.
:param selection: The selection criteria for | |
model_class = TensorboardJob
factory_class = TensorboardJobFactory
num_objects = 3
HAS_AUTH = True
def setUp(self):
super().setUp()
self.project = ProjectFactory(user=self.auth_client.user)
self.other_project = ProjectFactory()
self.url = '/{}/{}/{}/tensorboards/'.format(API_V1,
self.project.user.username,
self.project.name)
self.other_url = '/{}/{}/{}/tensorboards/'.format(API_V1,
self.other_project.user.username,
self.other_project.name)
self.objects = [self.factory_class(project=self.project) for _ in range(self.num_objects)]
# one object that does not belong to the filter
self.factory_class(project=self.other_project)
self.queryset = self.model_class.objects.filter(project=self.project)
self.queryset = self.queryset.order_by('-updated_at')
self.other_object = self.factory_class(project=self.other_project)
def test_get(self):
resp = self.auth_client.get(self.url)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
assert resp.data['count'] == len(self.objects)
data = resp.data['results']
assert len(data) == self.queryset.count()
assert data == self.serializer_class(self.queryset, many=True).data
# Test other
resp = self.auth_client.get(self.other_url)
assert resp.status_code == status.HTTP_200_OK
jobs_count = self.queryset.all().count()
assert jobs_count == self.num_objects
# Getting all jobs
resp = self.auth_client.get(self.url)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['count'] == jobs_count
def test_pagination(self):
limit = self.num_objects - 1
resp = self.auth_client.get("{}?limit={}".format(self.url, limit))
assert resp.status_code == status.HTTP_200_OK
next_page = resp.data.get('next')
assert next_page is not None
assert resp.data['count'] == self.queryset.count()
data = resp.data['results']
assert len(data) == limit
assert data == self.serializer_class(self.queryset[:limit], many=True).data
resp = self.auth_client.get(next_page)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
data = resp.data['results']
assert len(data) == 1
assert data == self.serializer_class(self.queryset[limit:], many=True).data
def test_get_order(self):
resp = self.auth_client.get(self.url + '?sort=created_at,updated_at')
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
assert resp.data['count'] == len(self.objects)
data = resp.data['results']
assert len(data) == self.queryset.count()
assert data != self.serializer_class(self.queryset, many=True).data
assert data == self.serializer_class(self.queryset.order_by('created_at', 'updated_at'),
many=True).data
def test_get_order_pagination(self):
queryset = self.queryset.order_by('created_at', 'updated_at')
limit = self.num_objects - 1
resp = self.auth_client.get("{}?limit={}&{}".format(self.url,
limit,
'sort=created_at,updated_at'))
assert resp.status_code == status.HTTP_200_OK
next_page = resp.data.get('next')
assert next_page is not None
assert resp.data['count'] == queryset.count()
data = resp.data['results']
assert len(data) == limit
assert data == self.serializer_class(queryset[:limit], many=True).data
resp = self.auth_client.get(next_page)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
data = resp.data['results']
assert len(data) == 1
assert data == self.serializer_class(queryset[limit:], many=True).data
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_get_filter(self):
# Wrong filter format raises
resp = self.auth_client.get(self.url + '?query=created_at<2010-01-01')
assert resp.status_code == status.HTTP_400_BAD_REQUEST
resp = self.auth_client.get(self.url + '?query=created_at:<2010-01-01')
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
assert resp.data['count'] == 0
resp = self.auth_client.get(self.url +
'?query=created_at:>=2010-01-01,status:Finished')
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
assert resp.data['count'] == 0
resp = self.auth_client.get(self.url +
'?query=created_at:>=2010-01-01,status:created|running')
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
assert resp.data['count'] == len(self.objects)
data = resp.data['results']
assert len(data) == self.queryset.count()
assert data == self.serializer_class(self.queryset, many=True).data
def test_get_filter_pagination(self):
limit = self.num_objects - 1
resp = self.auth_client.get("{}?limit={}&{}".format(
self.url,
limit,
'?query=created_at:>=2010-01-01,status:created|running'))
assert resp.status_code == status.HTTP_200_OK
next_page = resp.data.get('next')
assert next_page is not None
assert resp.data['count'] == self.queryset.count()
data = resp.data['results']
assert len(data) == limit
assert data == self.serializer_class(self.queryset[:limit], many=True).data
resp = self.auth_client.get(next_page)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
data = resp.data['results']
assert len(data) == 1
assert data == self.serializer_class(self.queryset[limit:], many=True).data
@pytest.mark.plugins_mark
class TestProjectNotebookListViewV1(BaseViewTest):
serializer_class = ProjectNotebookJobSerializer
model_class = NotebookJob
factory_class = NotebookJobFactory
num_objects = 3
HAS_AUTH = True
def setUp(self):
super().setUp()
self.project = ProjectFactory(user=self.auth_client.user)
self.other_project = ProjectFactory()
self.url = '/{}/{}/{}/notebooks/'.format(API_V1,
self.project.user.username,
self.project.name)
self.other_url = '/{}/{}/{}/notebooks/'.format(API_V1,
self.other_project.user.username,
self.other_project.name)
self.objects = [self.factory_class(project=self.project) for _ in range(self.num_objects)]
# one object that does not belong to the filter
self.factory_class(project=self.other_project)
self.queryset = self.model_class.objects.filter(project=self.project)
self.queryset = self.queryset.order_by('-updated_at')
self.other_object = self.factory_class(project=self.other_project)
def test_get(self):
resp = self.auth_client.get(self.url)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
assert resp.data['count'] == len(self.objects)
data = resp.data['results']
assert len(data) == self.queryset.count()
assert data == self.serializer_class(self.queryset, many=True).data
# Test other
resp = self.auth_client.get(self.other_url)
assert resp.status_code == status.HTTP_200_OK
jobs_count = self.queryset.all().count()
assert jobs_count == self.num_objects
# Getting all jobs
resp = self.auth_client.get(self.url)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['count'] == jobs_count
def test_pagination(self):
limit = self.num_objects - 1
resp = self.auth_client.get("{}?limit={}".format(self.url, limit))
assert resp.status_code == status.HTTP_200_OK
next_page = resp.data.get('next')
assert next_page is not None
assert resp.data['count'] == self.queryset.count()
data = resp.data['results']
assert len(data) == limit
assert data == self.serializer_class(self.queryset[:limit], many=True).data
resp = self.auth_client.get(next_page)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
data = resp.data['results']
assert len(data) == 1
assert data == self.serializer_class(self.queryset[limit:], many=True).data
def test_get_order(self):
resp = self.auth_client.get(self.url + '?sort=created_at,updated_at')
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
assert resp.data['count'] == len(self.objects)
data = resp.data['results']
assert len(data) == self.queryset.count()
assert data != self.serializer_class(self.queryset, many=True).data
assert data == self.serializer_class(self.queryset.order_by('created_at', 'updated_at'),
many=True).data
def test_get_order_pagination(self):
queryset = self.queryset.order_by('created_at', 'updated_at')
limit = self.num_objects - 1
resp = self.auth_client.get("{}?limit={}&{}".format(self.url,
limit,
'sort=created_at,updated_at'))
assert resp.status_code == status.HTTP_200_OK
next_page = resp.data.get('next')
assert next_page is not None
assert resp.data['count'] == queryset.count()
data = resp.data['results']
assert len(data) == limit
assert data == self.serializer_class(queryset[:limit], many=True).data
resp = self.auth_client.get(next_page)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
data = resp.data['results']
assert len(data) == 1
assert data == self.serializer_class(queryset[limit:], many=True).data
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
def test_get_filter(self):
# Wrong filter format raises
resp = self.auth_client.get(self.url + '?query=created_at<2010-01-01')
assert resp.status_code == status.HTTP_400_BAD_REQUEST
resp = self.auth_client.get(self.url + '?query=created_at:<2010-01-01')
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
assert resp.data['count'] == 0
resp = self.auth_client.get(self.url +
'?query=created_at:>=2010-01-01,status:Finished')
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
assert resp.data['count'] == 0
resp = self.auth_client.get(self.url +
'?query=created_at:>=2010-01-01,status:created|running')
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
assert resp.data['count'] == len(self.objects)
data = resp.data['results']
assert len(data) == self.queryset.count()
assert data == self.serializer_class(self.queryset, many=True).data
def test_get_filter_pagination(self):
limit = self.num_objects - 1
resp = self.auth_client.get("{}?limit={}&{}".format(
self.url,
limit,
'?query=created_at:>=2010-01-01,status:created|running'))
assert resp.status_code == status.HTTP_200_OK
next_page = resp.data.get('next')
assert next_page is not None
assert resp.data['count'] == self.queryset.count()
data = resp.data['results']
assert len(data) == limit
assert data == self.serializer_class(self.queryset[:limit], many=True).data
resp = self.auth_client.get(next_page)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['next'] is None
data = resp.data['results']
assert len(data) == 1
assert data == self.serializer_class(self.queryset[limit:], many=True).data
@pytest.mark.plugins_mark
class TestStartProjectTensorboardViewV1(BaseViewTest):
model_class = Project
factory_class = ProjectFactory
HAS_AUTH = True
def setUp(self):
super().setUp()
self.object = self.factory_class(user=self.auth_client.user)
self.url = '/{}/{}/{}/tensorboard/start'.format(
API_V1,
self.object.user.username,
self.object.name)
self.queryset = self.model_class.objects.all()
def test_start(self):
assert self.queryset.count() == 1
assert self.object.tensorboard is None
with patch('scheduler.tasks.tensorboards.tensorboards_start.apply_async') as mock_fct:
resp = self.auth_client.post(self.url)
assert mock_fct.call_count == 1
assert resp.status_code == status.HTTP_201_CREATED
assert self.queryset.count() == 1
self.object.clear_cached_properties()
assert isinstance(self.object.tensorboard, TensorboardJob)
def test_spawner_start(self):
assert self.queryset.count() == 1
with patch('scheduler.tensorboard_scheduler.start_tensorboard') as mock_fct:
resp = self.auth_client.post(self.url)
assert mock_fct.call_count == 1
assert resp.status_code == status.HTTP_201_CREATED
assert self.queryset.count() == 1
def test_start_with_updated_config(self):
with patch('scheduler.tasks.tensorboards.tensorboards_start.apply_async') as mock_fct:
resp = self.auth_client.post(self.url)
assert mock_fct.call_count == 1
assert resp.status_code == status.HTTP_201_CREATED
# Start with default config
self.object.clear_cached_properties()
content = self.object.tensorboard.content
# Simulate stop the tensorboard
self.object.tensorboard.delete()
# Starting the tensorboard without config should pass
with patch('scheduler.tasks.tensorboards.tensorboards_start.apply_async') as mock_fct:
resp = self.auth_client.post(self.url)
assert mock_fct.call_count == 1
assert resp.status_code == status.HTTP_201_CREATED
# Check that still using same config
self.object.clear_cached_properties()
assert content == self.object.tensorboard.content
# Simulate stop the tensorboard
self.object.tensorboard.delete()
self.object.save()
# Starting again the tensorboard with different config
with patch('scheduler.tasks.tensorboards.tensorboards_start.apply_async') as mock_fct:
resp = self.auth_client.post(
self.url,
data={'content': tensorboard_spec_parsed_content.raw_data})
assert mock_fct.call_count == 1
assert resp.status_code == status.HTTP_201_CREATED
self.object.clear_cached_properties()
# Check that the image was update
assert content != self.object.tensorboard.content
# Trying to start an already running job returns 200
# Starting again the tensorboard with different config
self.object.tensorboard.set_status(status=JobLifeCycle.BUILDING)
with patch('scheduler.tasks.tensorboards.tensorboards_start.apply_async') as mock_fct:
resp = self.auth_client.post(
self.url,
data={'content': tensorboard_spec_parsed_content.raw_data})
assert mock_fct.call_count == 0
assert resp.status_code == status.HTTP_200_OK
def test_start_during_build_process(self):
with patch('scheduler.tasks.tensorboards.tensorboards_start.apply_async') as start_mock:
self.auth_client.post(self.url)
self.object.refresh_from_db()
assert start_mock.call_count == 1
assert self.object.tensorboard.last_status == JobLifeCycle.CREATED
# Check that user cannot start a new job if it's already building
self.object.tensorboard.set_status(status=JobLifeCycle.BUILDING)
with patch('scheduler.tasks.tensorboards.tensorboards_start.apply_async') as start_mock:
self.auth_client.post(self.url)
assert start_mock.call_count == 0
def test_create_with_invalid_config(self):
data = {'content': 'bar'}
resp = self.auth_client.post(self.url, data)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
def test_starting_stopping_tensorboard_creating_new_one_create_new_job(self):
with patch('scheduler.tasks.tensorboards.tensorboards_start.apply_async') as start_mock:
self.auth_client.post(self.url)
self.object.refresh_from_db()
assert start_mock.call_count == 1
assert self.object.tensorboard.last_status == JobLifeCycle.CREATED
self.object.tensorboard.set_status(status=JobLifeCycle.STOPPED)
assert TensorboardJob.objects.count() == 1
assert TensorboardJobStatus.objects.count() == 2
with patch('scheduler.tasks.tensorboards.tensorboards_start.apply_async') as start_mock:
self.auth_client.post(self.url)
self.object.clear_cached_properties()
assert start_mock.call_count == 1
assert self.object.tensorboard.last_status == JobLifeCycle.CREATED
assert TensorboardJob.objects.count() == 2
assert TensorboardJobStatus.objects.count() == 3
@pytest.mark.plugins_mark
class TestStartExperimentTensorboardViewV1(BaseViewTest):
model_class = Experiment
factory_class = ExperimentFactory
HAS_AUTH = True
def setUp(self):
super().setUp()
self.project = ProjectFactory(user=self.auth_client.user)
self.object = self.factory_class(user=self.auth_client.user, project=self.project)
self.url = '/{}/{}/{}/experiments/{}/tensorboard/start'.format(
API_V1,
self.project.user.username,
self.project.name,
self.object.id)
self.queryset = self.model_class.objects
def test_start(self):
assert self.queryset.count() == 1
assert self.object.tensorboard is None
with patch('scheduler.tasks.tensorboards.tensorboards_start.apply_async') as mock_fct:
resp = self.auth_client.post(self.url)
assert mock_fct.call_count == 1
assert resp.status_code == status.HTTP_201_CREATED
assert self.queryset.count() == 1
self.object.clear_cached_properties()
assert isinstance(self.object.tensorboard, TensorboardJob)
assert self.project.tensorboard is None
def test_spawner_start(self):
assert self.queryset.count() == 1
with patch('scheduler.tensorboard_scheduler.start_tensorboard') as mock_fct:
resp = self.auth_client.post(self.url)
assert mock_fct.call_count == 1
assert resp.status_code | |
namespacedef_='', name_='ChartRegion', pretty_print=pretty_print)
for SeparatorRegion_ in self.SeparatorRegion:
namespaceprefix_ = self.SeparatorRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.SeparatorRegion_nsprefix_) else ''
SeparatorRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='SeparatorRegion', pretty_print=pretty_print)
for MathsRegion_ in self.MathsRegion:
namespaceprefix_ = self.MathsRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.MathsRegion_nsprefix_) else ''
MathsRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='MathsRegion', pretty_print=pretty_print)
for NoiseRegion_ in self.NoiseRegion:
namespaceprefix_ = self.NoiseRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.NoiseRegion_nsprefix_) else ''
NoiseRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='NoiseRegion', pretty_print=pretty_print)
for FrameRegion_ in self.FrameRegion:
namespaceprefix_ = self.FrameRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.FrameRegion_nsprefix_) else ''
FrameRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='FrameRegion', pretty_print=pretty_print)
for UnknownRegion_ in self.UnknownRegion:
namespaceprefix_ = self.UnknownRegion_nsprefix_ + ':' if (UseCapturedNS_ and self.UnknownRegion_nsprefix_) else ''
UnknownRegion_.export(outfile, level, namespaceprefix_, namespacedef_='', name_='UnknownRegion', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('imageFilename', node)
if value is not None and 'imageFilename' not in already_processed:
already_processed.add('imageFilename')
self.imageFilename = value
value = find_attr_value_('imageWidth', node)
if value is not None and 'imageWidth' not in already_processed:
already_processed.add('imageWidth')
self.imageWidth = self.gds_parse_integer(value, node, 'imageWidth')
value = find_attr_value_('imageHeight', node)
if value is not None and 'imageHeight' not in already_processed:
already_processed.add('imageHeight')
self.imageHeight = self.gds_parse_integer(value, node, 'imageHeight')
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Border':
obj_ = BorderType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Border = obj_
obj_.original_tagname_ = 'Border'
elif nodeName_ == 'PrintSpace':
obj_ = PrintSpaceType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.PrintSpace = obj_
obj_.original_tagname_ = 'PrintSpace'
elif nodeName_ == 'ReadingOrder':
obj_ = ReadingOrderType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ReadingOrder = obj_
obj_.original_tagname_ = 'ReadingOrder'
elif nodeName_ == 'Layers':
obj_ = LayersType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Layers = obj_
obj_.original_tagname_ = 'Layers'
elif nodeName_ == 'TextRegion':
obj_ = TextRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.TextRegion.append(obj_)
obj_.original_tagname_ = 'TextRegion'
elif nodeName_ == 'ImageRegion':
obj_ = ImageRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ImageRegion.append(obj_)
obj_.original_tagname_ = 'ImageRegion'
elif nodeName_ == 'LineDrawingRegion':
obj_ = LineDrawingRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.LineDrawingRegion.append(obj_)
obj_.original_tagname_ = 'LineDrawingRegion'
elif nodeName_ == 'GraphicRegion':
obj_ = GraphicRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.GraphicRegion.append(obj_)
obj_.original_tagname_ = 'GraphicRegion'
elif nodeName_ == 'TableRegion':
obj_ = TableRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.TableRegion.append(obj_)
obj_.original_tagname_ = 'TableRegion'
elif nodeName_ == 'ChartRegion':
obj_ = ChartRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ChartRegion.append(obj_)
obj_.original_tagname_ = 'ChartRegion'
elif nodeName_ == 'SeparatorRegion':
obj_ = SeparatorRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.SeparatorRegion.append(obj_)
obj_.original_tagname_ = 'SeparatorRegion'
elif nodeName_ == 'MathsRegion':
obj_ = MathsRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.MathsRegion.append(obj_)
obj_.original_tagname_ = 'MathsRegion'
elif nodeName_ == 'NoiseRegion':
obj_ = NoiseRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.NoiseRegion.append(obj_)
obj_.original_tagname_ = 'NoiseRegion'
elif nodeName_ == 'FrameRegion':
obj_ = FrameRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.FrameRegion.append(obj_)
obj_.original_tagname_ = 'FrameRegion'
elif nodeName_ == 'UnknownRegion':
obj_ = UnknownRegionType.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.UnknownRegion.append(obj_)
obj_.original_tagname_ = 'UnknownRegion'
# end class PageType
class TextRegionType(GeneratedsSuper):
"""Pure text is represented as a text region. This includes drop capitals,
but practically ornate text may be considered as a graphic.Individual
skew of the region in degrees (Range: -89.999,90)The nature of the text
in the regionThe text colour of the regionThe background colour of the
regionSpecifies whether the colour of the text appears reversed against
a background colourThe size of the characters in pointsThe degree of
space in points between the lines of textThe degree of space in points
between the characters in a string of textThe direction in which text
in a region should be read (within lines)The degrees by which you need
to turn your head in order to read the text when it is placed on the
horizontal (Range: -89.999,90)Defines whether a region of text is
indented or notThe primary language used in the regionThe secondary
language used in the regionThe primary script used in the regionThe
secondary script used in the region"""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, id=None, orientation=None, type_=None, textColour=None, bgColour=None, reverseVideo=None, fontSize=None, leading=None, kerning=None, readingDirection=None, readingOrientation=None, indented=None, primaryLanguage=None, secondaryLanguage=None, primaryScript=None, secondaryScript=None, Coords=None, TextLine=None, TextEquiv=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.id = _cast(None, id)
self.id_nsprefix_ = None
self.orientation = _cast(float, orientation)
self.orientation_nsprefix_ = None
self.type_ = _cast(None, type_)
self.type__nsprefix_ = None
self.textColour = _cast(None, textColour)
self.textColour_nsprefix_ = None
self.bgColour = _cast(None, bgColour)
self.bgColour_nsprefix_ = None
self.reverseVideo = _cast(bool, reverseVideo)
self.reverseVideo_nsprefix_ = None
self.fontSize = _cast(float, fontSize)
self.fontSize_nsprefix_ = None
self.leading = _cast(int, leading)
self.leading_nsprefix_ = None
self.kerning = _cast(int, kerning)
self.kerning_nsprefix_ = None
self.readingDirection = _cast(None, readingDirection)
self.readingDirection_nsprefix_ = None
self.readingOrientation = _cast(float, readingOrientation)
self.readingOrientation_nsprefix_ = None
self.indented = _cast(bool, indented)
self.indented_nsprefix_ = None
self.primaryLanguage = _cast(None, primaryLanguage)
self.primaryLanguage_nsprefix_ = None
self.secondaryLanguage = _cast(None, secondaryLanguage)
self.secondaryLanguage_nsprefix_ = None
self.primaryScript = _cast(None, primaryScript)
self.primaryScript_nsprefix_ = None
self.secondaryScript = _cast(None, secondaryScript)
self.secondaryScript_nsprefix_ = None
self.Coords = Coords
self.Coords_nsprefix_ = None
if TextLine is None:
self.TextLine = []
else:
self.TextLine = TextLine
self.TextLine_nsprefix_ = None
self.TextEquiv = TextEquiv
self.TextEquiv_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, TextRegionType)
if subclass is not None:
return subclass(*args_, **kwargs_)
if TextRegionType.subclass:
return TextRegionType.subclass(*args_, **kwargs_)
else:
return TextRegionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Coords(self):
return self.Coords
def set_Coords(self, Coords):
self.Coords = Coords
def get_TextLine(self):
return self.TextLine
def set_TextLine(self, TextLine):
self.TextLine = TextLine
def add_TextLine(self, value):
self.TextLine.append(value)
def insert_TextLine_at(self, index, value):
self.TextLine.insert(index, value)
def replace_TextLine_at(self, index, value):
self.TextLine[index] = value
def get_TextEquiv(self):
return self.TextEquiv
def set_TextEquiv(self, TextEquiv):
self.TextEquiv = TextEquiv
def get_id(self):
return self.id
def set_id(self, id):
self.id = id
def get_orientation(self):
return self.orientation
def set_orientation(self, orientation):
self.orientation = orientation
def get_type(self):
return self.type_
def set_type(self, type_):
self.type_ = type_
def get_textColour(self):
return self.textColour
def set_textColour(self, textColour):
self.textColour = textColour
def get_bgColour(self):
return self.bgColour
def set_bgColour(self, bgColour):
self.bgColour = bgColour
def get_reverseVideo(self):
return self.reverseVideo
def set_reverseVideo(self, reverseVideo):
self.reverseVideo = reverseVideo
def get_fontSize(self):
return self.fontSize
def set_fontSize(self, fontSize):
self.fontSize = fontSize
def get_leading(self):
return self.leading
def set_leading(self, leading):
self.leading = leading
def get_kerning(self):
return self.kerning
def set_kerning(self, kerning):
self.kerning = kerning
def get_readingDirection(self):
return self.readingDirection
def set_readingDirection(self, readingDirection):
self.readingDirection = readingDirection
def get_readingOrientation(self):
return self.readingOrientation
def set_readingOrientation(self, readingOrientation):
self.readingOrientation = readingOrientation
def get_indented(self):
return self.indented
def set_indented(self, indented):
self.indented = indented
def get_primaryLanguage(self):
return self.primaryLanguage
def set_primaryLanguage(self, primaryLanguage):
self.primaryLanguage = primaryLanguage
def get_secondaryLanguage(self):
return self.secondaryLanguage
def set_secondaryLanguage(self, secondaryLanguage):
self.secondaryLanguage = secondaryLanguage
def get_primaryScript(self):
return self.primaryScript
def set_primaryScript(self, primaryScript):
self.primaryScript = primaryScript
def get_secondaryScript(self):
return self.secondaryScript
def set_secondaryScript(self, secondaryScript):
self.secondaryScript = secondaryScript
def validate_TextTypeSimpleType(self, value):
# Validate type pc:TextTypeSimpleType, a restriction on string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['paragraph', 'heading', 'caption', 'header', 'footer', 'page-number', 'drop-capital', 'credit', 'floating', 'signature-mark', 'catch-word', 'marginalia', 'footnote', 'footnote-continued', 'TOC-entry']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on TextTypeSimpleType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
def validate_ColourSimpleType(self, value):
# Validate type pc:ColourSimpleType, a restriction on string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['black', 'blue', 'brown', 'cyan', 'green', 'grey', 'indigo', 'magenta', 'orange', 'pink', 'red', 'turquoise', 'violet', 'white', 'yellow']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on ColourSimpleType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
def validate_ReadingDirectionSimpleType(self, value):
# Validate type pc:ReadingDirectionSimpleType, a restriction on string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['left-to-right', 'right-to-left', 'top-to-bottom', 'bottom-to-top']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on | |
= np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
if norm is None:
self.norm = maxnorm
else:
self.norm = norm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(self, x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] <NAME>, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q | |
<reponame>akhilgeothom/neural-tangents
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compute the empirical NTK and approximate functions via Taylor series."""
from functools import partial
import operator
from absl import flags
from jax import random
from jax.api import eval_shape
from jax.api import jacobian
from jax.api import jvp
from jax.api import vjp
from jax.config import config
import jax.numpy as np
from jax.tree_util import tree_multimap
from jax.tree_util import tree_reduce
from neural_tangents.utils import flags as internal_flags
from neural_tangents.utils import utils
config.parse_flags_with_absl() # NOTE(schsam): Is this safe?
FLAGS = flags.FLAGS
def _read_keys(keys):
if keys is None or (isinstance(keys, np.ndarray) and keys.shape == (2,)):
key1 = key2 = keys
elif isinstance(keys, tuple):
# assuming x1 and x2 using key1 and key2, resp.
key1, key2 = keys
elif isinstance(keys, np.ndarray) and keys.shape == (2, 2):
key1, key2 = keys[0], keys[1]
else:
raise ValueError('`keys` must be one of the following: `None`, a PRNG '
'key, a tuple of PRNG keys or a (2, 2) array and dtype '
'unint32')
return key1, key2
def linearize(f, params):
"""Returns a function f_lin, the first order taylor approximation to f.
Example:
>>> # Compute the MSE of the first order Taylor series of a function.
>>> f_lin = linearize(f, params)
>>> mse = np.mean((f(new_params, x) - f_lin(new_params, x)) ** 2)
Args:
f: A function that we would like to linearize. It should have the signature
f(params, inputs) where params and inputs are `np.ndarray`s and f should
return an `np.ndarray`.
params: Initial parameters to the function that we would like to take the
Taylor series about. This can be any structure that is compatible
with the JAX tree operations.
Returns:
A function f_lin(new_params, inputs) whose signature is the same as f.
Here f_lin implements the first-order taylor series of f about params.
"""
def f_lin(p, x):
dparams = tree_multimap(lambda x, y: x - y, p, params)
f_params_x, proj = jvp(lambda param: f(param, x), (params,), (dparams,))
return f_params_x + proj
return f_lin
def taylor_expand(f, params, degree):
"""Returns a function f_tayl, the Taylor approximation to f of degree degree.
Example:
>>> # Compute the MSE of the third order Taylor series of a function.
>>> f_tayl = taylor_expand(f, params, 3)
>>> mse = np.mean((f(new_params, x) - f_tayl(new_params, x)) ** 2)
Args:
f: A function that we would like to Taylor expand. It should have the
signature f(params, inputs) where params is a PyTree, inputs is an
`np.ndarray`, and f returns an `np.ndarray`.
params: Initial parameters to the function that we would like to take the
Taylor series about. This can be any structure that is compatible
with the JAX tree operations.
degree: The degree of the Taylor expansion.
Returns:
A function f_tayl(new_params, inputs) whose signature is the same as f.
Here f_tayl implements the degree-order taylor series of f about params.
"""
def taylorize_r(f, params, dparams, degree, current_degree):
"""Recursive function to accumulate contributions to the Taylor series."""
if current_degree == degree:
return f(params)
def f_jvp(p):
_, val_jvp = jvp(f, (p,), (dparams,))
return val_jvp
df = taylorize_r(f_jvp, params, dparams, degree, current_degree+1)
return f(params) + df / (current_degree + 1)
def f_tayl(p, x):
dparams = tree_multimap(lambda x, y: x - y, p, params)
return taylorize_r(lambda param: f(param, x), params, dparams, degree, 0)
return f_tayl
# Empirical Kernel
def flatten_features(kernel):
"""Flatten an empirical kernel."""
if kernel.ndim == 2:
return kernel
assert kernel.ndim % 2 == 0
half_shape = (kernel.ndim - 1) // 2
n1, n2 = kernel.shape[:2]
feature_size = int(np.prod(kernel.shape[2 + half_shape:]))
transposition = ((0,) + tuple(i + 2 for i in range(half_shape)) +
(1,) + tuple(i + 2 + half_shape for i in range(half_shape)))
kernel = np.transpose(kernel, transposition)
return np.reshape(kernel, (feature_size * n1, feature_size * n2))
def empirical_implicit_ntk_fn(f):
"""Computes the ntk without batching for inputs x1 and x2.
The Neural Tangent Kernel is defined as J(X_1)^T J(X_2) where J is the
jacobian df/dparams. Computing the NTK directly involves directly
instantiating the jacobian which takes
O(dataset_size * output_dim * parameters) memory. It turns out it is
substantially more efficient (especially as the number of parameters grows)
to compute the NTK implicitly.
This involves using JAX's autograd to compute derivatives of linear functions
(which do not depend on the inputs). Thus, we find it more efficient to refer
to fx_dummy for the outputs of the network. fx_dummy has the same shape as
the output of the network on a single piece of input data.
TODO(schsam): Write up a better description of the implicit method.
Args:
f: The function whose NTK we are computing. f should have the signature
f(params, inputs) and should return an `np.ndarray` of outputs with shape
[|inputs|, output_dim].
Returns:
A function ntk_fn that computes the empirical ntk.
"""
def ntk_fn(x1, x2, params, keys=None):
"""Computes the empirical ntk.
Args:
x1: A first `np.ndarray` of inputs, of shape [n1, ...], over which we
would like to compute the NTK.
x2: A second `np.ndarray` of inputs, of shape [n2, ...], over which we
would like to compute the NTK.
params: A PyTree of parameters about which we would like to compute the
neural tangent kernel.
keys: None or a PRNG key or a tuple of PRNG keys or a (2, 2) array and
dtype uint32. If `key == None`, then the function `f` is deterministic
and requires no PRNG key; else if `keys` is a single PRNG key, then x1
and x2 must be the same and share the same PRNG key; else x1 and x2 use
two different PRNG keys.
Returns:
A `np.ndarray` of shape [n1, n2] + output_shape + output_shape.
"""
key1, key2 = _read_keys(keys)
# TODO(xlc): find a good way to check utils.x1_is_x2(x1, x2) == (key1==key2)
if x2 is None:
x2 = x1
f_dummy = partial(f, rng=random.PRNGKey(1))
fx2_struct = eval_shape(f_dummy, params, x2)
fx_dummy = np.ones(fx2_struct.shape, fx2_struct.dtype)
def delta_vjp_jvp(delta):
def delta_vjp(delta):
return vjp(lambda p: f(p, x2, rng=key2), params)[1](delta)
return jvp(lambda p: f(p, x1, rng=key1), (params,), delta_vjp(delta))[1]
ntk = jacobian(delta_vjp_jvp)(fx_dummy)
ndim = len(fx2_struct.shape)
ordering = (0, ndim) + tuple(range(1, ndim)) + \
tuple(x + ndim for x in range(1, ndim))
return np.transpose(ntk, ordering)
return ntk_fn
def empirical_direct_ntk_fn(f):
"""Computes the ntk without batching for inputs x1 and x2.
The Neural Tangent Kernel is defined as J(X_1)^T J(X_2) where J is the
jacobian df/dparams.
Args:
f: The function whose NTK we are computing. f should have the signature
f(params, inputs) and should return an `np.ndarray` of outputs with shape
[|inputs|, output_dim].
Returns:
A function `ntk_fn` that computes the empirical ntk.
"""
def sum_and_contract(j1, j2):
def contract(x, y):
param_count = int(np.prod(x.shape[2:]))
x = np.reshape(x, x.shape[:2] + (param_count,))
y = np.reshape(y, y.shape[:2] + (param_count,))
return np.dot(x, np.transpose(y, (0, 2, 1)))
return tree_reduce(operator.add, tree_multimap(contract, j1, j2))
def ntk_fn(x1, x2, params, keys=None):
"""Computes the empirical ntk.
Args:
x1: A first `np.ndarray` of inputs, of shape [n1, ...], over which we
would like to compute the NTK.
x2: A second `np.ndarray` of inputs, of shape [n2, ...], over which we
would like to compute the NTK.
params: A PyTree of parameters about which we would like to compute the
neural tangent kernel.
keys: None or a PRNG key or a tuple of PRNG keys or a (2, 2) array and
dtype uint32. If `key == None`, then the function `f` is deterministic
and requires no PRNG key; else if `keys` is a single PRNG key, then x1
and x2 share the same PRNG key; else x1 and x2 use two different PRNG
keys.
Returns:
A `np.ndarray` of shape [n1, n2] + output_shape + output_shape.
"""
key1, key2 = _read_keys(keys)
f1 = partial(f, rng=key1)
jac_fn1 = jacobian(f1)
| |
<reponame>gyger/PICwriter
# -*- coding: utf-8 -*-
"""
Default MEEP launch-file for arbitrary PICwriter components.
MCTS = "Meep Compute Transmission Spectra"
Launches a MEEP simulation to compute the transmission/reflection spectra from each of the component's ports when light
enters at the input `port`.
How this function maps the GDSII layers to the material stack is something that will be improved in the future.
Currently works well for 1 or 2 layer devices.
@author: dkita
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import meep as mp
import h5py
import argparse
import operator
def str2bool(v):
""" Allow proper argparse handling of boolean inputs """
if v.lower() in ("yes", "true", "True", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "False", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def get_prism_objects(eps_file):
with h5py.File(eps_file, "r") as hf:
""" Write-format:
* LL = layer
* DD = datatype
* NN = polygon index
* VV = vertex index
* XX = x-position
* ZZ = z-position
* height = height of the prism
* eps = epsilon of the prism
* y-center = center (y-direction) of the prism [note: (x,y) center defaults to (0,0)]
"""
data = np.array(
[
np.array(hf.get("LL")),
np.array(hf.get("DD")),
np.array(hf.get("NN")),
np.array(hf.get("VV")),
np.array(hf.get("XX")),
np.array(hf.get("ZZ")),
np.array(hf.get("height")),
np.array(hf.get("eps")),
np.array(hf.get("ycenter")),
]
)
""" Now, restructure so that each element of the prisms list contains all the info needed to instantiate:
i.e. [{'center':(0,0,0), 'height':0.4, 'eps':2.3, 'vlist':[(0,0), (1,2), ...]}]
"""
prisms = []
# get all unique (LL,DD,NN,ycenter,height,eps) combinations (each corresponds to a unique prism)
unique_vals = set([tuple(data[(0, 1, 2, 6, 7, 8), i]) for i in range(len(data[0]))])
for val in unique_vals:
# Search 'data' for all vertex values with the matching vals
vertex_list = []
for i in range(len(data[0])):
if (
data[0, i] == val[0]
and data[1, i] == val[1]
and data[2, i] == val[2]
and data[6, i] == val[3]
and data[7, i] == val[4]
and data[8, i] == val[5]
):
vertex_list.append([data[3, i], data[4, i], data[5, i]])
# Sort vertices
vertex_list.sort(key=operator.itemgetter(0))
# Get rid of the numbering for each vertex
vl = [
mp.Vector3(
float(vertex_list[i][1]),
float(val[5]) - float(val[3]) / 2.0,
float(vertex_list[i][2]),
)
for i in range(len(vertex_list))
]
prism = {"height": float(val[3]), "eps": float(val[4]), "vlist": vl}
prisms.append(prism)
# Sort prisms in order of ascending dielectric constant
prisms_sorted = sorted(prisms, key=operator.itemgetter("eps"))
if len(unique_vals) == 0:
raise ValueError(
"Epsilon file (eps_file="
+ str(eps_file)
+ ") is empty or the wrong format."
)
return prisms_sorted
def main(args):
"""
Args:
* **fields** (boolean): If true, outputs the fields at the relevant waveguide cross-sections (top-down and side-view)
* **output_directory** (string): Name of the output directory (for storing the fields)
* **eps_input_file** (string): Name of the hdf5 file that defines the geometry through prisms
* **input_pol** (string): Either "TE", or "TM", corresponding to the desired input mode. Defaults to "TE"
* **res** (int): Resolution of the MEEP simulation
* **nfreq** (int): The number of wavelength points to record in the transmission/reflection spectra
* **input_direction** (1 or -1): Direction of propagation for the input eigenmode. If +1, goes in +x, else if -1, goes in -x. Defaults to +1.
* **dpml** (float): Length (in microns) of the perfectly-matched layer (PML) at simulation boundaries. Defaults to 0.5 um.
* **wl_center** (float): Center wavelength (in microns)
* **wl_span** (float): Wavelength span (determines the pulse width)
* **port_vcenter** (float): Vertical center of the waveguide
* **port_height** (float): Height of the port cross-section (flux plane)
* **port_width** (float): Width of the port cross-section (flux plane)
* **source_offset** (float): Offset (in x-direction) between reflection monitor and source. Defaults to 0.1 um.
* **center_x** (float): x-coordinate of the center of the simulation region
* **center_y** (float): y-coordinate of the center of the simulation region
* **center_z** (float): z-coordinate of the center of the simulation region
* **sx** (float): Size of the simulation region in x-direction
* **sx** (float): Size of the simulation region in y-direction
* **sz** (float): Size of the simulation region in z-direction
* **port_coords** (list): List of the port coordinates (variable length), in the format [x1, y1, x2, y2, x3, y3, ...] (*must* be even)
"""
# Boolean inputs
fields = args.fields
# String inputs
output_directory = args.output_directory
eps_input_file = args.eps_input_file
input_pol = args.input_pol
# Int inputs
res = args.res
nfreq = args.nfreq
input_direction = args.input_direction
# Float inputs
dpml = args.dpml
wl_center = args.wl_center
wl_span = args.wl_span
port_vcenter = args.port_vcenter
port_height = args.port_height
port_width = args.port_width
source_offset = args.source_offset
center_x, center_y, center_z = args.center_x, args.center_y, args.center_z
sx, sy, sz = args.sx, args.sy, args.sz
# List of floats
port_coords = [float(x) for x in args.port_coords[0].split(" ")]
ports = [
(port_coords[2 * i], port_coords[2 * i + 1])
for i in range(int(len(port_coords) / 2))
]
if input_pol == "TE":
parity = mp.ODD_Z
elif input_pol == "TM":
parity = mp.EVEN_Z
else:
raise ValueError(
"Warning! Improper value of 'input_pol' was passed to mcts.py (input_pol given ="
+ str(input_pol)
+ ")"
)
if len(port_coords) % 2 != 0:
raise ValueError(
"Warning! Improper port_coords was passed to `meep_compute_transmission_spectra`. Must be even number of port_coords in [x1, y1, x2, y2, ..] format."
)
# Setup the simulation geometries
prism_objects = get_prism_objects(eps_input_file)
geometry = []
for p in prism_objects:
# print('vertices='+str(p['vlist']))
# print('axis = '+str(mp.Vector3(0,1,0)))
# print('height = '+str(p['height']))
print("material = " + str(p["eps"]))
# print('\n')
geometry.append(
mp.Prism(
p["vlist"],
axis=mp.Vector3(0, 1, 0),
height=p["height"],
material=mp.Medium(epsilon=p["eps"]),
)
)
# Setup the simulation sources
fmax = 1.0 / (wl_center - 0.5 * wl_span)
fmin = 1.0 / (wl_center + 0.5 * wl_span)
fcen = (fmax + fmin) / 2.0
df = fmax - fmin
if abs(abs(input_direction) - 1) > 1e-6:
print(input_direction)
raise ValueError("Warning! input_direction is not +1 or -1.")
# Use first port in 'ports' as the location of the eigenmode source
sources = [
mp.EigenModeSource(
src=mp.GaussianSource(fcen, fwidth=df, cutoff=30),
component=mp.ALL_COMPONENTS,
size=mp.Vector3(0, 3 * float(port_height), 3 * float(port_width)),
center=mp.Vector3(
ports[0][0] + source_offset - center_x,
float(port_vcenter) - center_y,
ports[0][1] - center_z,
),
eig_match_freq=True,
eig_parity=parity,
eig_kpoint=mp.Vector3(float(input_direction) * wl_center, 0, 0),
eig_resolution=2 * res if res > 16 else 32,
)
]
# Setup the simulation
sim = mp.Simulation(
cell_size=mp.Vector3(sx, sy, sz),
boundary_layers=[mp.PML(dpml)],
geometry=geometry,
sources=sources,
dimensions=3,
resolution=res,
filename_prefix=False,
)
""" Add power flux monitors """
print("ADDING FLUX MONITORS")
flux_plane_objects = []
for port in ports:
flux_region = mp.FluxRegion(
size=mp.Vector3(0, float(port_height), float(port_width)),
center=mp.Vector3(
float(port[0]) - center_x,
float(port_vcenter) - center_y,
float(port[1]) - center_z,
),
)
fpo = sim.add_flux(fcen, df, nfreq, flux_region)
flux_plane_objects.append(fpo)
sim.use_output_directory(str(output_directory))
""" Run the simulation """
""" Monitor the amplitude in the center of the structure """
decay_pt = mp.Vector3(0, port_vcenter, 0)
sv = mp.Volume(size=mp.Vector3(sx, sy, 0), center=mp.Vector3(0, 0, 0))
tv = mp.Volume(size=mp.Vector3(sx, 0, sz), center=mp.Vector3(0, port_vcenter, 0))
print("RUNNING SIMULATION")
if fields:
sim.run(
mp.at_beginning(mp.output_epsilon),
mp.at_beginning(
mp.with_prefix(str("sideview-"), mp.in_volume(sv, mp.output_epsilon))
),
mp.at_beginning(
mp.with_prefix(str("topview-"), mp.in_volume(tv, mp.output_epsilon))
),
mp.at_every(
1.0,
mp.to_appended(
str("ez-sideview"), mp.in_volume(sv, mp.output_efield_z)
),
),
mp.at_every(
1.0,
mp.to_appended(str("ez-topview"), mp.in_volume(tv, mp.output_efield_z)),
),
until_after_sources=mp.stop_when_fields_decayed(20, mp.Ez, decay_pt, 1e-4),
)
else:
sim.run(
until_after_sources=mp.stop_when_fields_decayed(20, mp.Ez, decay_pt, 1e-4)
)
sim.display_fluxes(*flux_plane_objects)
print("FINISHED SIMULATION")
if __name__ == "__main__":
"""
Args:
* **fields** (boolean): If true, outputs the fields at the relevant waveguide cross-sections (top-down and side-view)
* **output_directory** (string): Name of the output directory (for storing the fields)
* **eps_input_file** (string): Name of the hdf5 file that defines the geometry through prisms
* **res** (int): Resolution of the MEEP simulation
* **nfreq** (int): The number of wavelength points to record in the transmission/reflection spectra
* **input_direction** (1 or -1): Direction of propagation for the input eigenmode. If +1, goes in +x, else if -1, goes in -x. Defaults to +1.
* **dpml** (float): Length (in microns) of the perfectly-matched layer (PML) at simulation boundaries. Defaults to 0.5 um.
* **wl_center** (float): Center wavelength (in microns)
* **wl_span** (float): Wavelength span (determines the pulse width)
* **port_vcenter** (float): Vertical center of the waveguide
* **port_height** (float): Height of the port cross-section (flux plane)
* **port_width** (float): Width of the port cross-section (flux plane)
* **source_offset** (float): Offset (in x-direction) between reflection monitor and source. Defaults to 0.1 um.
| |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["def"]
# # TESS Atlas fit for TOI {{{TOINUMBER}}}
#
# **Version: {{{VERSIONNUMBER}}}**
#
# **Note: This notebook was automatically generated as part of the TESS Atlas project. More information can be found on GitHub:** [github.com/dfm/tess-atlas](https://github.com/dfm/tess-atlas)
#
# In this notebook, we do a quicklook fit for the parameters of the TESS Objects of Interest (TOI) in the system number {{{TOINUMBER}}}.
# To do this fit, we use the [exoplanet](https://exoplanet.dfm.io) library and you can find more information about that project at [exoplanet.dfm.io](https://exoplanet.dfm.io).
#
# From here, you can scroll down and take a look at the fit results, or you can:
#
# - [open the notebook in Google Colab to run the fit yourself](https://colab.research.google.com/github/dfm/tess-atlas/blob/gh-pages/notebooks/{{{VERSIONNUMBER}}}/toi-{{{TOINUMBER}}}.ipynb),
# - [view the notebook on GitHub](https://github.com/dfm/tess-atlas/blob/gh-pages/notebooks/{{{VERSIONNUMBER}}}/toi-{{{TOINUMBER}}}.ipynb), or
# - [download the notebook](https://github.com/dfm/tess-atlas/raw/gh-pages/notebooks/{{{VERSIONNUMBER}}}/toi-{{{TOINUMBER}}}.ipynb).
#
#
#
# ## Caveats
#
# There are many caveats associated with this relatively simple "quicklook" type of analysis that should be kept in mind.
# Here are some of the main things that come to mind:
#
# 1. The orbits that we fit are constrained to be *circular*. One major effect of this approximation is that the fit will significantly overestimate the confidence of the impact parameter constraint, so the results for impact parameter shouldn't be taken too seriously.
#
# 2. Transit timing variations, correlated noise, and (probably) your favorite systematics are ignored. Sorry!
#
# 3. This notebook was generated automatically without human intervention. Use at your own risk!
#
# ## Table of Contents
#
# 1. [Getting started](#Getting-started)
# 2. [Downloading Data](#Downloading-Data)
# 3. [Fitting stellar parameters](#Fitting-stellar-parameters)
# 4. [Results](#Results)
# 5. [Citations](#Citations)
# 6. [Posterior constraints](#Posterior-constraints)
# 7. [Attribution](#Attribution)
#
# ## Getting started
#
# To get going, we'll need to make out plots show up inline:
# + pycharm={"name": "#%%\n"} tags=["exe"]
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# + [markdown] tags=["def"]
# Then we'll set up the plotting styles and do all of the imports:
# + pycharm={"name": "#%%\n"} tags=["def"]
from tess_atlas.utils import notebook_initalisations
notebook_initalisations()
# + pycharm={"name": "#%%\n"} tags=["exe"]
import os
import aesara_theano_fallback.tensor as tt
import exoplanet as xo
import numpy as np
import pandas as pd
import pymc3 as pm
import pymc3_ext as pmx
from arviz import InferenceData
from celerite2.theano import GaussianProcess, terms
from tess_atlas.analysis import (
calculate_eccentricity_weights,
compute_variable,
get_untransformed_varnames,
sample_prior,
)
from tess_atlas.data import TICEntry
from tess_atlas.data.inference_data_tools import summary
from tess_atlas.utils import get_notebook_logger
# + pycharm={"name": "#%%\n"} tags=["exe"]
os.environ["INTERACTIVE_PLOTS"] = "FALSE" # "TRUE" for interactive plots
from tess_atlas.plotting import (
plot_eccentricity_posteriors,
plot_folded_lightcurve,
plot_lightcurve,
plot_phase,
plot_posteriors,
plot_priors,
)
# + pycharm={"name": "#%%\n"} tags=["exe"]
TOI_NUMBER = {{{TOINUMBER}}}
logger = get_notebook_logger(outdir=f"toi_{TOI_NUMBER}_files")
# + [markdown] tags=["def"]
# ## Downloading Data
#
# Next, we grab some inital guesses for the TOI's parameters from [ExoFOP](https://exofop.ipac.caltech.edu/tess/) and download the TOI's lightcurve with [Lightkurve].
#
# We wrap the information in three objects, a `TIC Entry`, a `Planet Candidate` and finally a `Lightcurve Data` object.
#
# - The `TIC Entry` object holds one or more `Planet Candidate`s (each candidate associated with one TOI id number) and a `Lightcurve Data` for associated with the candidates. Note that the `Lightcurve Data` object is initially the same fopr each candidate but may be masked according to the candidate transit's period.
#
# - The `Planet Candidate` holds informaiton on the TOI data collected by [SPOC] (eg transit period, etc)
#
# - The `Lightcurve Data` holds the lightcurve time and flux data for the planet candidates.
#
# [ExoFOP]: https://exofop.ipac.caltech.edu/tess/
# [Lightkurve]: https://docs.lightkurve.org/index.html
# [SPOC]: https://heasarc.gsfc.nasa.gov/docs/tess/pipeline.html
#
# Downloading the data (this may take a few minutes):
# + pycharm={"name": "#%%\n"} tags=["exe"]
tic_entry = TICEntry.load(toi=TOI_NUMBER)
# -
# Some of the TOIs parameters stored on ExoFOP:
# + pycharm={"name": "#%%\n"} tags=["exe"]
tic_entry.display()
# -
# Plot of the lightcurve:
# + pycharm={"name": "#%%\n"} tags=["exe"]
plot_lightcurve(tic_entry)
# -
# ## Fitting stellar parameters
# Now that we have the data, we can define a Bayesian model to fit it.
#
# ### The probabilistic model
#
# We use the probabilistic model as described in [Foreman-Mackey et al 2017] to determine the best parameters to fit the transits present in the lightcurve data.
#
# More explicitly, the stellar light curve $l(t; \vec{\theta})$ is modelled with a Gaussian Process (GP).
# A GP consists of a mean function $\mu(t;\vec{\theta})$ and a kernel function $k_\alpha(t,t';\vec{\theta})$, where $\vec{\theta}$ is the vector of parameters descibing the lightcurve and $t$ is the time during which the lightcurve is under observation
#
# The 8 parameters describing the lightcurve are
# $$\vec{\theta} = \{d_i, t0_i, tmax_i, b_i, r_i, f0, u1, u2\},$$
# where
# * $d_i$ transit durations for each planet,
# * $t0_i$ time of first transit for each planet (reference time),
# * $tmax_i$ time of the last transit observed by TESS for each planet (a second reference time),
# * $b_i$ impact parameter for each planet,
# * $r_i$ planet radius in stellar radius for each planet,
# * $f0$ baseline relative flux of the light curve from star,
# * $u1$ $u2$ two parameters describing the limb-darkening profile of star.
#
# Note: if the observed data only records a single transit,
# we swap $tmax_i$ with $p_i$ (orbital periods for each planet).
#
# With this we can write
# $$l(t;\vec{\theta}) \sim \mathcal{GP} (\mu(t;\vec{\theta}), k_\alpha(t,t';\vec{\theta}))\ .$$
#
# Here the mean and kernel functions are:
# * $\mu(t;\vec{\theta})$: a limb-darkened transit light curve ([Kipping 2013])
# * $k_\alpha(t,t';\vec{\theta}))$: a stochastically-driven, damped harmonic oscillator ([SHOTterm])
#
#
# Now that we have defined our transit model, we can implement it in python:
#
# [Foreman-Mackey et al 2017]: https://arxiv.org/pdf/1703.09710.pdf
# [Kipping 2013]: https://arxiv.org/abs/1308.0009
# [SHOTterm]: https://celerite2.readthedocs.io/en/latest/api/python/?highlight=SHOTerm#celerite2.terms.SHOTerm
# + pycharm={"name": "#%%\n"} tags=["def"]
DEPTH = "depth"
DURATION = "dur"
RADIUS_RATIO = "r"
TIME_START = "t0"
TIME_END = "tmax"
ORBITAL_PERIOD = "p"
MEAN_FLUX = "f0"
LC_JITTER = "jitter"
GP_RHO = "rho"
GP_SIGMA = "sigma"
RHO_CIRC = "rho_circ" # stellar density at e=0
LIMB_DARKENING_PARAM = "u"
IMPACT_PARAM = "b"
def get_test_duration(min_durations, max_durations, durations):
largest_min_duration = np.amax(
np.array([durations, 2 * min_durations]), axis=0
)
smallest_max_duration = np.amin(
np.array([largest_min_duration, 0.99 * max_durations]), axis=0
)
return smallest_max_duration
def build_planet_transit_model(tic_entry):
t = tic_entry.lightcurve.time
y = tic_entry.lightcurve.flux
yerr = tic_entry.lightcurve.flux_err
n = tic_entry.planet_count
t0s = np.array([planet.t0 for planet in tic_entry.candidates])
depths = np.array([planet.depth for planet in tic_entry.candidates])
periods = np.array([planet.period for planet in tic_entry.candidates])
tmaxs = np.array([planet.tmax for planet in tic_entry.candidates])
durations = np.array([planet.duration for planet in tic_entry.candidates])
max_durations = np.array(
[planet.duration_max for planet in tic_entry.candidates]
)
min_durations = np.array(
[planet.duration_min for planet in tic_entry.candidates]
)
test_duration = get_test_duration(min_durations, max_durations, durations)
with pm.Model() as my_planet_transit_model:
## define planet parameters
# 1) d: transit duration (duration of eclipse)
d_priors = pm.Bound(
pm.Lognormal, lower=min_durations, upper=max_durations
)(
name=DURATION,
mu=np.log(durations),
sigma=np.log(1.2),
shape=n,
testval=test_duration,
)
# 2) r: radius ratio (planet radius / star radius)
r_priors = pm.Lognormal(
name=RADIUS_RATIO, mu=0.5 * np.log(depths * 1e-3), sd=1.0, shape=n
)
# 3) b: impact parameter
b_priors = xo.distributions.ImpactParameter(
name=IMPACT_PARAM, ror=r_priors, shape=n
)
planet_priors = [r_priors, d_priors, b_priors]
## define orbit-timing parameters
# 1) t0: the time of the first transit in data (a reference time)
t0_norm = pm.Bound(
pm.Normal, lower=t0s - max_durations, upper=t0s + max_durations
)
t0_priors = t0_norm(
TIME_START, mu=t0s, sigma=0.5 * durations, shape=n, testval=t0s
)
# 2) period: the planets' orbital period
p_params, p_priors_list, tmax_priors_list = [], [], []
for n, planet in enumerate(tic_entry.candidates):
# if only one transit in data we use the period
if planet.has_data_only_for_single_transit:
p_prior = pm.Pareto(
name=f"{ORBITAL_PERIOD}_{planet.index}",
m=planet.period_min,
alpha=2.0 / 3.0,
testval=planet.period_min,
)
p_param = p_prior
tmax_prior = planet.t0
# if more than one transit in data we use a second time reference (tmax)
else:
tmax_norm = pm.Bound(
pm.Normal,
lower=planet.tmax - planet.duration_max,
upper=planet.tmax + planet.duration_max,
)
tmax_prior = tmax_norm(
name=f"{TIME_END}_{planet.index}",
mu=planet.tmax,
sigma=0.5 * planet.duration,
testval=planet.tmax,
)
p_prior = (tmax_prior - t0_priors[n]) / planet.num_periods
p_param = tmax_prior
p_params.append(p_param) # the param needed to calculate p
p_priors_list.append(p_prior)
tmax_priors_list.append(tmax_prior)
p_priors = pm.Deterministic(ORBITAL_PERIOD, tt.stack(p_priors_list))
tmax_priors = pm.Deterministic(TIME_END, tt.stack(tmax_priors_list))
## define stellar parameters
# 1) f0: the mean flux from the star
f0_prior = pm.Normal(name=MEAN_FLUX, mu=0.0, sd=10.0)
# 2) u1, u2: limb darkening parameters
u_prior = xo.distributions.QuadLimbDark("u")
stellar_priors = [f0_prior, u_prior]
## define k(t, t1; parameters)
jitter_prior = pm.InverseGamma(
name=LC_JITTER, **pmx.estimate_inverse_gamma_parameters(1.0, 5.0)
)
sigma_prior = pm.InverseGamma(
name=GP_SIGMA, **pmx.estimate_inverse_gamma_parameters(1.0, 5.0)
)
rho_prior = pm.InverseGamma(
name=GP_RHO, **pmx.estimate_inverse_gamma_parameters(0.5, 10.0)
)
kernel = terms.SHOTerm(sigma=sigma_prior, rho=rho_prior, Q=0.3)
noise_priors = [jitter_prior, sigma_prior, rho_prior]
## define the lightcurve model mu(t;paramters)
orbit = xo.orbits.KeplerianOrbit(
period=p_priors,
t0=t0_priors,
b=b_priors,
duration=d_priors,
ror=r_priors,
)
star = xo.LimbDarkLightCurve(u_prior)
lightcurve_models = star.get_light_curve(orbit=orbit, r=r_priors, t=t)
lightcurve = 1e3 * pm.math.sum(lightcurve_models, axis=-1) + | |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal_nulp
from astropy.convolution.convolve import convolve_fft, convolve
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.compat.context import nullcontext
VALID_DTYPES = ('>f4', '<f4', '>f8', '<f8')
VALID_DTYPE_MATRIX = list(itertools.product(VALID_DTYPES, VALID_DTYPES))
BOUNDARY_OPTIONS = [None, 'fill', 'wrap']
NANTREATMENT_OPTIONS = ('interpolate', 'fill')
NORMALIZE_OPTIONS = [True, False]
PRESERVE_NAN_OPTIONS = [True, False]
"""
What does convolution mean? We use the 'same size' assumption here (i.e.,
you expect an array of the exact same size as the one you put in)
Convolving any array with a kernel that is [1] should result in the same array returned
Working example array: [1, 2, 3, 4, 5]
Convolved with [1] = [1, 2, 3, 4, 5]
Convolved with [1, 1] = [1, 3, 5, 7, 9] THIS IS NOT CONSISTENT!
Convolved with [1, 0] = [1, 2, 3, 4, 5]
Convolved with [0, 1] = [0, 1, 2, 3, 4]
"""
# NOTE: use_numpy_fft is redundant if you don't have FFTW installed
option_names = ('boundary', 'nan_treatment', 'normalize_kernel')
options = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
))
option_names_preserve_nan = ('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan')
options_preserve_nan = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
(True, False)))
def expected_boundary_warning(boundary=None):
# Helper that returns the appropriate context manager for the boundary=None
# warning depending on the value of boundary.
if boundary is None:
ctx = pytest.warns(AstropyUserWarning,
match='The convolve_fft version of boundary=None '
'is equivalent to the convolve boundary=\'fill\'')
else:
ctx = nullcontext()
return ctx
def assert_floatclose(x, y):
"""Assert arrays are close to within expected floating point rounding.
Check that the result is correct at the precision expected for 64 bit
numbers, taking account that the tolerance has to reflect that all powers
in the FFTs enter our values.
"""
# The number used is set by the fact that the Windows FFT sometimes
# returns an answer that is EXACTLY 10*np.spacing.
assert_allclose(x, y, atol=10*np.spacing(x.max()), rtol=0.)
class TestConvolve1D:
@pytest.mark.parametrize(option_names, options)
def test_unity_1_none(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a unit kernel with a single element returns the same array
'''
x = np.array([1., 2., 3.], dtype='float64')
y = np.array([1.], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_unity_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None).
'''
x = np.array([1., 2., 3.], dtype='float64')
y = np.array([0., 1., 0.], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
assert_floatclose(z, x)
@pytest.mark.parametrize(option_names, options)
def test_uniform_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='float64')
y = np.array([1., 1., 1.], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
answer_key = (boundary, nan_treatment, normalize_kernel)
answer_dict = {
'sum_fill_zeros': np.array([1., 4., 3.], dtype='float64'),
'average_fill_zeros': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'sum_wrap': np.array([4., 4., 4.], dtype='float64'),
'average_wrap': np.array([4 / 3., 4 / 3., 4 / 3.], dtype='float64'),
}
result_dict = {
# boundary, nan_treatment, normalize_kernel
('fill', 'interpolate', True): answer_dict['average_fill_zeros'],
('wrap', 'interpolate', True): answer_dict['average_wrap'],
('fill', 'interpolate', False): answer_dict['sum_fill_zeros'],
('wrap', 'interpolate', False): answer_dict['sum_wrap'],
}
for k in list(result_dict.keys()):
result_dict[(k[0], 'fill', k[2])] = result_dict[k]
for k in list(result_dict.keys()):
if k[0] == 'fill':
result_dict[(None, k[1], k[2])] = result_dict[k]
assert_floatclose(z, result_dict[answer_key])
@pytest.mark.parametrize(option_names, options)
def test_halfity_3(self, boundary, nan_treatment, normalize_kernel):
'''
Test that the different modes are producing the correct results using
a uniform, non-unity kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='float64')
y = np.array([0.5, 0.5, 0.5], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel)
answer_dict = {
'sum': np.array([0.5, 2.0, 1.5], dtype='float64'),
'sum_zeros': np.array([0.5, 2., 1.5], dtype='float64'),
'sum_nozeros': np.array([0.5, 2., 1.5], dtype='float64'),
'average': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'sum_wrap': np.array([2., 2., 2.], dtype='float64'),
'average_wrap': np.array([4 / 3., 4 / 3., 4 / 3.], dtype='float64'),
'average_zeros': np.array([1 / 3., 4 / 3., 1.], dtype='float64'),
'average_nozeros': np.array([0.5, 4 / 3., 1.5], dtype='float64'),
}
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
else:
# average = average_zeros; sum = sum_zeros
answer_key += '_zeros'
assert_floatclose(z, answer_dict[answer_key])
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_unity_3_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = np.array([1., np.nan, 3.], dtype='float64')
y = np.array([0., 1., 0.], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
assert_floatclose(z, [1., 0., 3.])
inputs = (np.array([1., np.nan, 3.], dtype='float64'),
np.array([1., np.inf, 3.], dtype='float64'))
outputs = (np.array([1., 0., 3.], dtype='float64'),
np.array([1., 0., 3.], dtype='float64'))
options_unity1withnan = list(itertools.product(BOUNDARY_OPTIONS,
NANTREATMENT_OPTIONS,
(True, False),
(True, False),
inputs, outputs))
@pytest.mark.parametrize(option_names_preserve_nan + ('inval', 'outval'),
options_unity1withnan)
def test_unity_1_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan, inval, outval):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = inval
y = np.array([1.], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
assert_floatclose(z, outval)
@pytest.mark.parametrize(option_names_preserve_nan, options_preserve_nan)
def test_uniform_3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements. This version includes a NaN
value in the original array.
'''
x = np.array([1., np.nan, 3.], dtype='float64')
y = np.array([1., 1., 1.], dtype='float64')
with expected_boundary_warning(boundary=boundary):
z = convolve_fft(x, y, boundary=boundary,
nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
answer_dict = {
'sum': np.array([1., 4., 3.], dtype='float64'),
'sum_nozeros': np.array([1., 4., 3.], dtype='float64'),
'sum_zeros': np.array([1., 4., 3.], dtype='float64'),
'sum_nozeros_interpnan': np.array([1., 4., 3.], dtype='float64'),
'average': np.array([1., 2., 3.], dtype='float64'),
'sum_wrap': np.array([4., 4., 4.], dtype='float64'),
'average_wrap': np.array([4/3., 4/3., 4/3.], dtype='float64'),
'average_wrap_interpnan': np.array([2, 2, 2], dtype='float64'),
'average_nozeros': np.array([1/2., 4/3., 3/2.], dtype='float64'),
'average_nozeros_interpnan': np.array([1., 2., 3.], dtype='float64'),
'average_zeros': np.array([1 / 3., 4 / 3., 3 / 3.], dtype='float64'),
'average_zeros_interpnan': np.array([1 / 2., 4 / 2., 3 / 2.], dtype='float64'),
}
for key in list(answer_dict.keys()):
if 'sum' in key:
answer_dict[key+"_interpnan"] = answer_dict[key] * 3./2.
if normalize_kernel:
answer_key = 'average'
else:
answer_key = 'sum'
if boundary == 'wrap':
answer_key += '_wrap'
else:
# average = average_zeros; sum = sum_zeros
answer_key += '_zeros'
if nan_treatment == 'interpolate':
answer_key += '_interpnan'
posns = np.isfinite(z)
answer = answer_dict[answer_key][posns]
# check that fill is set and that the 1'th position that was originally
# NaN is included in the check
if (nan_treatment == 'fill') and posns[1]:
# we fill the center with the sum of the input array divided by
# three, since we've now pre-filled the center value with zero
answer[1] = 4 / (3. if normalize_kernel else 1.)
assert_floatclose(z[posns], answer)
def test_nan_interpolate(self):
# Test masked array
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary='fill',
nan_treatment='interpolate',
fill_value=np.nan)
assert_floatclose(result, [1, 2, 3])
def test_nan_fill(self):
# regression for #8121
# Test masked array
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
result = convolve_fft(array, kernel, boundary='fill',
nan_treatment='fill',
fill_value=0)
# note that, because fill_value also affects boundary='fill', the edge
# pixels are treated as zero rather than being ignored.
assert_floatclose(result, [1/3., 4/3., 1.])
def test_nan_fill_two(self):
# regression for #8121
# Test masked array
array = np.array([1., np.nan, 3.], dtype='float64')
kernel = np.array([1, 1, 1])
result = convolve_fft(array, kernel, boundary='fill',
nan_treatment='fill',
fill_value=1)
# note that, because fill_value also affects boundary='fill', the edge
# pixels are treated as fill_value=1 rather than being ignored.
assert_floatclose(result, [1., 5/3., 5/3.])
def test_masked_array(self):
"""
Check whether convolve_fft works with masked arrays.
"""
# Test masked array
array = np.array([1., 2., 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_array = np.ma.masked_array(array, mask=[0, 1, 0])
result = convolve_fft(masked_array, kernel, boundary='fill',
fill_value=0.)
assert_floatclose(result, [1./2, 2, 3./2])
# Now test against convolve()
convolve_result = convolve(masked_array, kernel, boundary='fill',
fill_value=0.)
assert_floatclose(convolve_result, result)
# Test masked kernel
array = np.array([1., 2., 3.], dtype='float64')
kernel = np.array([1, 1, 1])
masked_kernel | |
<filename>python/resq/run/__init__.py<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import division
import asyncio
import atexit
from collections import Iterable, defaultdict
import functools
import hashlib
import itertools as it
import numpy as np
import operator
from pprint import pformat
import re
from sqlalchemy import Column, Integer, PickleType, orm
from sqlalchemy.orm.attributes import flag_modified
from sqlalchemy_utils import ScalarListType
from subprocess import call, DEVNULL, PIPE
import resq.config as config
from resq.nf import NF, NFStatus
from resq.traffic import Traffic
from resq.traffic.generator import MelvinGen
from resq.util import Singleton
import resq.util.db as db
from resq.util.log import log_debug, log_error, log_info
from resq.util import cbm_to_size, cbm_to_ways, ways_to_cbm, disable_c_state, disable_p_state, disable_nmi_watchdog, disable_core_frequency_scaling, disable_uncore_frequency_scaling, init_dpdk, init_hugepages, init_lxc, init_netmap, init_rdt, l3_flush, l3_flush_stream, configure_ddio, configure_irq_affinity, configure_pcie
class Run(db.DeclarativeBase):
__tablename__ = 'run'
instances = {}
run_id = Column(Integer(), primary_key=True)
pipelets = Column(ScalarListType(), nullable=False)
cbms = Column(ScalarListType(int), nullable=False)
traffics = Column(ScalarListType(), nullable=False, default=False)
utilizations = Column(ScalarListType(int), nullable=False, default=False)
run_number = Column(Integer, nullable=False)
results = Column(PickleType, nullable=False)
def __new__(cls,
pipelets=None,
cbms=None,
traffics=None,
utilizations=None,
run_number=0):
if pipelets is None:
return super().__new__(cls)
assert (pipelets is not None)
for i in [pipelets, cbms, traffics, utilizations]:
assert (i is None or isinstance(i, Iterable))
#assert(len(set(cbms)) <= 4)
#assert(len(set(cbms)) == len(list(it.groupby(cbms))))
if db.session is None:
db.init()
nr_pipelets = len(pipelets)
if cbms is None:
cbms = (config.cat['cbm_max'], ) * nr_pipelets
if traffics is None:
traffics = ('u100k_60', ) * nr_pipelets
if utilizations is None:
utilizations = (100, ) * nr_pipelets
run_number = run_number
# TODO: remove workaround and fix the test logic
traffics = list(traffics)
for i, (nf, traffic) in enumerate(zip(pipelets, traffics)):
pkt_size_max = NF(nf).pkt_size_max
pkt_size = Traffic(traffic).size[0][0]
if pkt_size > pkt_size_max:
traffics[i] = traffic.replace(str(pkt_size), str(pkt_size_max))
traffics = tuple(traffics)
key = (tuple(pipelets), tuple(cbms), tuple(traffics),
tuple(utilizations), run_number)
if key in Run.instances:
return Run.instances[key]
obj = db.session.query(Run).filter_by(
pipelets=pipelets,
cbms=cbms,
traffics=traffics,
utilizations=utilizations,
run_number=run_number).first()
if obj is None:
obj = super().__new__(cls)
Run.instances[key] = obj
return obj
def __init__(self,
pipelets,
cbms=None,
traffics=None,
utilizations=None,
run_number=0):
if self.results is not None:
return
nr_pipelets = len(pipelets)
if cbms is None:
cbms = (config.cat['cbm_max'], ) * nr_pipelets
if traffics is None:
traffics = ('u100k_60', ) * nr_pipelets
if utilizations is None:
utilizations = (100, ) * nr_pipelets
self.pipelets = pipelets
self.cbms = cbms
self.traffics = traffics
self.utilizations = utilizations
self.run_number = run_number
self.results = {}
self.__normalizer__()
@orm.reconstructor
def __normalizer__(self):
self.pipelets = np.array(self.pipelets)
self.cbms = np.array(self.cbms)
self.traffics = np.array(self.traffics)
self.utilizations = np.array(self.utilizations)
def __eq__(self, e):
return (isinstance(e, self.__class__) and all([
hasattr(e, k) and getattr(e, k) == v for k, v in self.__dict__
if k != 'results'
]))
def __getattr__(self, attr):
if attr in ['__getstate__', '__setstate__']:
raise AttributeError
if 'results' in self.__dict__ and attr in self.results:
if isinstance(self.results[attr], Iterable):
return np.array(self.results[attr])
return self.results[attr]
if '_solos_' in attr:
subattr = attr[:attr.index('_solos')]
solos = getattr(self, attr[attr.index('solos_'):])
#return np.vectorize(lambda i: getattr(i, subattr)[0])(solos)
return np.array([
getattr(s, subattr)[0] if hasattr(s, subattr) else np.nan
for s in solos
])
if '_normalized_' in attr:
subattr = attr[:attr.index('_normalized')]
self_val = getattr(self, subattr)
solo_val = getattr(self, attr.replace('normalized', 'solos'))
return 100 * self_val / solo_val
if attr in ['idle_cycles', 'is_mlc', 'is_real', 'is_syn']:
return np.array([getattr(NF(i), attr) for i in self.pipelets])
raise AttributeError(attr)
def __hash__(self):
return int(hashlib.sha1(str(self).encode()).hexdigest(), 16)
def __ne__(self, e):
return not self.__eq__(e)
def __str__(self):
return 'Run(%s)' % ','.join([
'%s=%s' % (k, getattr(self, k))
for k in
['pipelets', 'cbms', 'traffics', 'utilizations', 'run_number']
])
def copy(self, **kwargs):
for name, value in self.__dict__.items():
if name in ['pipelets', 'cbms', 'traffics', 'utilizations', 'run_number'] and \
name not in kwargs:
kwargs[name] = value
return Run(**kwargs)
def encode(self, encoding):
return str(self).encode(encoding)
@property
def all_cbms(self):
f = filter(
lambda e: e.is_done, [
self.copy(
cbms=tuple([ways_to_cbm(nr_ways)] + list(self.cbms[1:])))
for nr_ways in range(config.cat['nr_ways_min'],
config.cat['nr_ways_max'] + 1)
])
return np.array(list(f))
@property
def all_runs(self):
f = filter(
lambda e: e.is_done,
[self.copy(run_number=r) for r in config.run_numbers])
return np.array(list(f))
@property
def all_runs_pending(self):
f = filter(
lambda e: not e.is_done,
[self.copy(run_number=r) for r in config.run_numbers])
return np.array(list(f))
@property
def all_utils(self):
f = filter(
lambda e: e.is_done, [
self.copy(
utilizations=tuple([u] + list(self.utilizations[1:])))
for u in config.utilizations
])
return np.array(list(f))
@property
def has_solos(self):
return all(self.solos_full_utilization)
@property
def is_done(self):
try:
return self.is_sane()[0]
except Exception as e:
log_error('failed to test for run sanity: %s' % e)
return False
@property
def is_necessary(self):
u = self.utilizations[0]
if u in [min(config.utilizations), max(config.utilizations)]:
return True
if u < min(config.utilizations) or u > max(config.utilizations):
return False
l = [e for e in self.all_utils if e.utilizations[0] != u]
if len(l) < 2:
return True
l.sort(key=lambda e: e.utilizations[0])
data = np.array([(e.utilizations[0], e.rtt_95[0], e) for e in l])
idx = np.searchsorted(data.T[0], u)
if idx in [0, len(data)]:
return True
rtt1, rtt2 = data[idx - 1][1], data[idx][1]
if rtt2 + 5 < rtt1:
return True
return (rtt2 - rtt1) > 10
@property
def is_isolated(self):
if self.is_mlc.any():
return False
int_cmasks = [int(i, 16) for i in self.cbms]
for i, cmask in enumerate(int_cmasks):
others = int_cmasks[:]
others.pop(i)
if others:
or_ = functools.reduce(operator.or_, others)
if cmask & or_ != 0:
return False
return True
@property
def has_results(self):
return 'rx_mbps_mean' in self.results and \
'cache_references' in self.results
def is_sane(self):
assert (isinstance(self.pipelets, np.ndarray))
if not self.has_results:
return (False, ('no results', ))
valid_rx = \
np.any([self.is_mlc,
self.utilizations < 10,
self.mpps > 0.005], axis=0).all()
if not valid_rx:
RuntimeManager().tgen.stop()
if self.nr_pipelets > 1 and 'syn' in self.pipelets[1]:
return (valid_rx, ('invalid rx'))
valid_rx_cv = \
valid_rx and \
np.any([self.is_mlc,
(self.rx_mpps_std / self.mpps) < 0.1], axis=0).all()
valid_tx = \
np.any([self.is_mlc,
self.utilizations == 100,
np.isclose(self.tx_mbps_request,
self.tx_mbps_mean,
atol=1, rtol=1e-2)], axis=0).all()
valid_rxtx = \
valid_rx and valid_tx and \
np.any([self.utilizations > 80,
self.is_mlc,
np.isclose(self.mpps,
self.tx_mpps_mean,
atol=1e-2, rtol=2e-2)], axis=0).all()
valid_rtt = \
np.any([self.utilizations > 80,
self.is_mlc,
self.rtt_95 < 100], axis=0).all() and \
np.any([self.utilizations >= 95,
self.is_mlc,
self.rtt_95 < 300], axis=0).all()
valid_utilization = \
np.any([self.utilizations == 100,
self.is_mlc,
np.isclose(self.utilizations,
self.pps_normalized_full_utilization,
atol=1, rtol=3e-2)], axis=0).all()
tests = [
(valid_rx, 'invalid rx'),
(valid_rx_cv, 'invalid rx cv'),
(valid_tx, 'invalid tx'),
(valid_rxtx, 'invalid rx/tx'),
(valid_rtt, 'invalid rtt'),
(valid_utilization, 'invalid utilization %s' %
self.pps_normalized_full_utilization[0]),
]
zipped = tuple(zip(*tests))
retval = (all(zipped[0]),
tuple(it.compress(zipped[1], np.logical_not(zipped[0]))))
return retval
# cycles per packet
@property
def cpp(self):
return self.cpu_cycles / self.pps
# instructions per cycle
@property
def ipc(self):
return self.instructions / self.cpu_cycles
# instructions per packet
@property
def ipp(self):
return self.instructions / self.pps
@property
def l3missrate(self):
return 100 * self.cache_misses / self.cache_references
# l3refs per packet
@property
def l3pp(self):
return self.cache_references / self.pps
@property
def mpps(self):
return self.rx_mpps_mean
@property
def pps(self):
return 1e6 * self.rx_mpps_mean
@property
def loss_rate(self):
return 100 * (
self.tx_mpps_mean - self.rx_mpps_mean) / self.tx_mpps_mean
@property
def l3_sizes(self):
return np.array([cbm_to_size(cbm) for cbm in self.cbms])
@property
def l3_ways(self):
return np.array([cbm_to_ways(cbm) for cbm in self.cbms])
@property
def best_run(self):
runs = self.all_runs
if len(runs) == 0:
return None
elif len(runs) == 1:
return runs[0]
pps = [i.pps[0] for i in runs]
return runs[np.argmax(pps)]
@property
def median_run(self):
runs = self.all_runs
if len(runs) == 0:
return self
elif len(runs) == 1:
return runs[0]
pps = np.vectorize(lambda e: e.pps[0])(runs)
idx_pps = np.argmin(np.abs(pps - np.median(pps)))
#rtt = np.vectorize(lambda e: e.rtt_95[0])(runs)
#idx_rtt = np.argmin(np.abs(rtt - np.median(rtt)))
return runs[idx_pps]
@property
def nr_cache_classes(self):
return len(list(it.groupby(self.cbms)))
@property
def nr_pipelets(self):
return len(self.pipelets)
@property
def pps_predict_cat(self):
#assert(self.is_isolated)
return self.pps_solos_same_cbm
@property
def pps_predict_nsdi12(self):
from resq.profile import Profile
if any(self.utilizations != 100):
return [-1] * self.nr_pipelets
pps_list = []
try:
if self.nr_pipelets == 1:
return self.pps
# an estimate for the number of llc refs
syns_l = []
for nf, cbm, traffic in \
zip(self.pipelets,
self.cbms,
self.traffics):
syns_l.append(
Profile(nf, traffic).runs_l3(
other_nf_types=['syn'], isolated=False, is_done=True) +
[Run(pipelets=(nf, ), cbms=(cbm, ), utilizations=(100, ))])
refs = self.cache_references_solos_same_cbm
for index, app in enumerate(self.pipelets):
other_refs = sum(refs) - refs[index]
syns = syns_l[index]
syns.sort(key=lambda e: sum(e.cache_references[1:]))
idx1 = np.argmin([
np.abs(sum(e.cache_references[1:]) - other_refs)
for e in syns
])
idx2 = idx1 + 1 if sum(
syns[idx1].cache_references[1:]) < other_refs else idx1 - 1
if idx2 >= len(syns) or idx2 < 0:
pps_list.append(-1)
continue
pps1, pps2 = syns[idx1].pps[0], syns[idx2].pps[0]
ref1, ref2 = sum(syns[idx1].cache_references[1:]), sum(
syns[idx2].cache_references[1:])
coeff = np.polyfit((ref1, ref2), (pps1, pps2), deg=1)
ppps = np.polyval(coeff, other_refs)
pps_list.append(ppps)
except Exception as e:
import sys
log_error(e)
log_error('line {}'.format(sys.exc_info()[-1].tb_lineno))
return np.array(pps_list)
@property
def port_speeds(self):
# FIXME: no 40g with netmap yet
return np.array([
40000
#if i == 0 and NF(self.pipelets[i]).port_type == 'dpdk'
#and Traffic(self.traffics[i]).size[0][0] > 60
#else 10000
for i | |
<filename>hummingbot/connector/exchange/bitmax/bitmax_exchange.py
import logging
from typing import (
Dict,
List,
Optional,
Any,
AsyncIterable,
)
from decimal import Decimal
import asyncio
import json
import aiohttp
import time
from collections import namedtuple
from hummingbot.core.network_iterator import NetworkStatus
from hummingbot.logger import HummingbotLogger
from hummingbot.core.clock import Clock
from hummingbot.core.utils.async_utils import safe_ensure_future, safe_gather
from hummingbot.connector.trading_rule import TradingRule
from hummingbot.core.data_type.cancellation_result import CancellationResult
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.limit_order import LimitOrder
from hummingbot.core.event.events import (
MarketEvent,
BuyOrderCompletedEvent,
SellOrderCompletedEvent,
OrderFilledEvent,
OrderCancelledEvent,
BuyOrderCreatedEvent,
SellOrderCreatedEvent,
MarketOrderFailureEvent,
OrderType,
TradeType,
TradeFee
)
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.connector.exchange.bitmax.bitmax_order_book_tracker import BitmaxOrderBookTracker
from hummingbot.connector.exchange.bitmax.bitmax_user_stream_tracker import BitmaxUserStreamTracker
from hummingbot.connector.exchange.bitmax.bitmax_auth import BitmaxAuth
from hummingbot.connector.exchange.bitmax.bitmax_in_flight_order import BitmaxInFlightOrder
from hummingbot.connector.exchange.bitmax import bitmax_utils
from hummingbot.connector.exchange.bitmax.bitmax_constants import EXCHANGE_NAME, REST_URL, getRestUrlPriv
from hummingbot.core.data_type.common import OpenOrder
ctce_logger = None
s_decimal_NaN = Decimal("nan")
BitmaxTradingRule = namedtuple("BitmaxTradingRule", "minNotional maxNotional")
BitmaxOrder = namedtuple("BitmaxOrder", "symbol price orderQty orderType avgPx cumFee cumFilledQty errorCode feeAsset lastExecTime orderId seqNum side status stopPrice execInst")
BitmaxBalance = namedtuple("BitmaxBalance", "asset availableBalance totalBalance")
class BitmaxExchange(ExchangeBase):
"""
BitmaxExchange connects with Bitmax exchange and provides order book pricing, user account tracking and
trading functionality.
"""
API_CALL_TIMEOUT = 10.0
SHORT_POLL_INTERVAL = 5.0
UPDATE_ORDER_STATUS_MIN_INTERVAL = 10.0
LONG_POLL_INTERVAL = 120.0
@classmethod
def logger(cls) -> HummingbotLogger:
global ctce_logger
if ctce_logger is None:
ctce_logger = logging.getLogger(__name__)
return ctce_logger
def __init__(self,
bitmax_api_key: str,
bitmax_secret_key: str,
trading_pairs: Optional[List[str]] = None,
trading_required: bool = True
):
"""
:param bitmax_api_key: The API key to connect to private Bitmax APIs.
:param bitmax_secret_key: The API secret.
:param trading_pairs: The market trading pairs which to track order book data.
:param trading_required: Whether actual trading is needed.
"""
super().__init__()
self._trading_required = trading_required
self._trading_pairs = trading_pairs
self._bitmax_auth = BitmaxAuth(bitmax_api_key, bitmax_secret_key)
self._order_book_tracker = BitmaxOrderBookTracker(trading_pairs=trading_pairs)
self._user_stream_tracker = BitmaxUserStreamTracker(self._bitmax_auth, trading_pairs)
self._ev_loop = asyncio.get_event_loop()
self._shared_client = None
self._poll_notifier = asyncio.Event()
self._last_timestamp = 0
self._in_flight_orders = {} # Dict[client_order_id:str, BitmaxInFlightOrder]
self._order_not_found_records = {} # Dict[client_order_id:str, count:int]
self._trading_rules = {} # Dict[trading_pair:str, TradingRule]
self._bitmax_trading_rules = {} # Dict[trading_pair:str, BitmaxTradingRule]
self._status_polling_task = None
self._user_stream_event_listener_task = None
self._trading_rules_polling_task = None
self._last_poll_timestamp = 0
self._account_group = None # required in order to make post requests
self._account_uid = None # required in order to produce deterministic order ids
@property
def name(self) -> str:
return EXCHANGE_NAME
@property
def order_books(self) -> Dict[str, OrderBook]:
return self._order_book_tracker.order_books
@property
def trading_rules(self) -> Dict[str, TradingRule]:
return self._trading_rules
@property
def in_flight_orders(self) -> Dict[str, BitmaxInFlightOrder]:
return self._in_flight_orders
@property
def status_dict(self) -> Dict[str, bool]:
"""
A dictionary of statuses of various connector's components.
"""
return {
"order_books_initialized": self._order_book_tracker.ready,
"account_balance": len(self._account_balances) > 0 if self._trading_required else True,
"trading_rule_initialized": len(self._trading_rules) > 0 and len(self._bitmax_trading_rules) > 0,
"user_stream_initialized":
self._user_stream_tracker.data_source.last_recv_time > 0 if self._trading_required else True,
"account_data": self._account_group is not None and self._account_uid is not None
}
@property
def ready(self) -> bool:
"""
:return True when all statuses pass, this might take 5-10 seconds for all the connector's components and
services to be ready.
"""
return all(self.status_dict.values())
@property
def limit_orders(self) -> List[LimitOrder]:
return [
in_flight_order.to_limit_order()
for in_flight_order in self._in_flight_orders.values()
]
@property
def tracking_states(self) -> Dict[str, any]:
"""
:return active in-flight orders in json format, is used to save in sqlite db.
"""
return {
key: value.to_json()
for key, value in self._in_flight_orders.items()
if not value.is_done
}
def restore_tracking_states(self, saved_states: Dict[str, any]):
"""
Restore in-flight orders from saved tracking states, this is st the connector can pick up on where it left off
when it disconnects.
:param saved_states: The saved tracking_states.
"""
self._in_flight_orders.update({
key: BitmaxInFlightOrder.from_json(value)
for key, value in saved_states.items()
})
def supported_order_types(self) -> List[OrderType]:
"""
:return a list of OrderType supported by this connector.
Note that Market order type is no longer required and will not be used.
"""
return [OrderType.LIMIT, OrderType.LIMIT_MAKER]
def start(self, clock: Clock, timestamp: float):
"""
This function is called automatically by the clock.
"""
super().start(clock, timestamp)
def stop(self, clock: Clock):
"""
This function is called automatically by the clock.
"""
super().stop(clock)
async def start_network(self):
"""
This function is required by NetworkIterator base class and is called automatically.
It starts tracking order book, polling trading rules,
updating statuses and tracking user data.
"""
self._order_book_tracker.start()
await self._update_account_data()
self._trading_rules_polling_task = safe_ensure_future(self._trading_rules_polling_loop())
if self._trading_required:
self._status_polling_task = safe_ensure_future(self._status_polling_loop())
self._user_stream_tracker_task = safe_ensure_future(self._user_stream_tracker.start())
self._user_stream_event_listener_task = safe_ensure_future(self._user_stream_event_listener())
async def stop_network(self):
"""
This function is required by NetworkIterator base class and is called automatically.
"""
self._order_book_tracker.stop()
if self._status_polling_task is not None:
self._status_polling_task.cancel()
self._status_polling_task = None
if self._trading_rules_polling_task is not None:
self._trading_rules_polling_task.cancel()
self._trading_rules_polling_task = None
if self._status_polling_task is not None:
self._status_polling_task.cancel()
self._status_polling_task = None
if self._user_stream_tracker_task is not None:
self._user_stream_tracker_task.cancel()
self._user_stream_tracker_task = None
if self._user_stream_event_listener_task is not None:
self._user_stream_event_listener_task.cancel()
self._user_stream_event_listener_task = None
async def check_network(self) -> NetworkStatus:
"""
This function is required by NetworkIterator base class and is called periodically to check
the network connection. Simply ping the network (or call any light weight public API).
"""
try:
# since there is no ping endpoint, the lowest rate call is to get BTC-USDT ticker
await self._api_request("get", "ticker")
except asyncio.CancelledError:
raise
except Exception:
return NetworkStatus.NOT_CONNECTED
return NetworkStatus.CONNECTED
async def _http_client(self) -> aiohttp.ClientSession:
"""
:returns Shared client session instance
"""
if self._shared_client is None:
self._shared_client = aiohttp.ClientSession()
return self._shared_client
async def _trading_rules_polling_loop(self):
"""
Periodically update trading rule.
"""
while True:
try:
await self._update_trading_rules()
await asyncio.sleep(60)
except asyncio.CancelledError:
raise
except Exception as e:
self.logger().network(f"Unexpected error while fetching trading rules. Error: {str(e)}",
exc_info=True,
app_warning_msg="Could not fetch new trading rules from Bitmax. "
"Check network connection.")
await asyncio.sleep(0.5)
async def _update_trading_rules(self):
instruments_info = await self._api_request("get", path_url="products")
[trading_rules, bitmax_trading_rules] = self._format_trading_rules(instruments_info)
self._trading_rules.clear()
self._trading_rules = trading_rules
self._bitmax_trading_rules.clear()
self._bitmax_trading_rules = bitmax_trading_rules
def _format_trading_rules(
self,
instruments_info: Dict[str, Any]
) -> [Dict[str, TradingRule], Dict[str, Dict[str, BitmaxTradingRule]]]:
"""
Converts json API response into a dictionary of trading rules.
:param instruments_info: The json API response
:return A dictionary of trading rules.
Response Example:
{
"code": 0,
"data": [
{
"symbol": "BTMX/USDT",
"baseAsset": "BTMX",
"quoteAsset": "USDT",
"status": "Normal",
"minNotional": "5",
"maxNotional": "100000",
"marginTradable": true,
"commissionType": "Quote",
"commissionReserveRate": "0.001",
"tickSize": "0.000001",
"lotSize": "0.001"
}
]
}
"""
trading_rules = {}
bitmax_trading_rules = {}
for rule in instruments_info["data"]:
try:
trading_pair = bitmax_utils.convert_from_exchange_trading_pair(rule["symbol"])
trading_rules[trading_pair] = TradingRule(
trading_pair,
min_price_increment=Decimal(rule["tickSize"]),
min_base_amount_increment=Decimal(rule["lotSize"])
)
bitmax_trading_rules[trading_pair] = BitmaxTradingRule(
minNotional=Decimal(rule["minNotional"]),
maxNotional=Decimal(rule["maxNotional"])
)
except Exception:
self.logger().error(f"Error parsing the trading pair rule {rule}. Skipping.", exc_info=True)
return [trading_rules, bitmax_trading_rules]
async def _update_account_data(self):
headers = {
**self._bitmax_auth.get_headers(),
**self._bitmax_auth.get_auth_headers("info"),
}
url = f"{REST_URL}/info"
response = await aiohttp.ClientSession().get(url, headers=headers)
try:
parsed_response = json.loads(await response.text())
except Exception as e:
raise IOError(f"Error parsing data from {url}. Error: {str(e)}")
if response.status != 200:
raise IOError(f"Error fetching data from {url}. HTTP status is {response.status}. "
f"Message: {parsed_response}")
if parsed_response["code"] != 0:
raise IOError(f"{url} API call failed, response: {parsed_response}")
self._account_group = parsed_response["data"]["accountGroup"]
self._account_uid = parsed_response["data"]["userUID"]
async def _api_request(self,
method: str,
path_url: str,
params: Dict[str, Any] = {},
is_auth_required: bool = False,
force_auth_path_url: Optional[str] = None
) -> Dict[str, Any]:
"""
Sends an aiohttp request and waits for a response.
:param method: The HTTP method, e.g. get or post
:param path_url: The path url or the API end point
:param is_auth_required: Whether an authentication is required, when True the function will add encrypted
signature to the request.
:returns A response in json format.
"""
url = None
headers = None
if is_auth_required:
if (self._account_group) is None:
await self._update_account_data()
url = f"{getRestUrlPriv(self._account_group)}/{path_url}"
headers = {
**self._bitmax_auth.get_headers(),
**self._bitmax_auth.get_auth_headers(
path_url if force_auth_path_url is None else force_auth_path_url
),
}
else:
url = f"{REST_URL}/{path_url}"
headers = self._bitmax_auth.get_headers()
client = await self._http_client()
if method == "get":
response = await client.get(
url,
headers=headers
)
elif method == "post":
response = await client.post(
url,
headers=headers,
data=json.dumps(params)
)
elif method == "delete":
response = await client.delete(
url,
headers=headers,
data=json.dumps(params)
)
else:
raise NotImplementedError
try:
parsed_response = json.loads(await response.text())
except Exception as e:
raise IOError(f"Error parsing data from {url}. Error: {str(e)}")
if response.status != 200:
raise IOError(f"Error fetching data from {url}. HTTP status is {response.status}. "
f"Message: {parsed_response}")
if parsed_response["code"] != 0:
raise IOError(f"{url} API call failed, response: {parsed_response}")
return parsed_response
def get_order_price_quantum(self, trading_pair: str, price: Decimal):
"""
Returns a price step, a minimum price increment for a given trading pair.
"""
trading_rule = self._trading_rules[trading_pair]
return trading_rule.min_price_increment
def get_order_size_quantum(self, trading_pair: str, order_size: Decimal):
"""
Returns an order amount step, a minimum amount increment for a given trading pair.
"""
trading_rule = self._trading_rules[trading_pair]
return Decimal(trading_rule.min_base_amount_increment)
def get_order_book(self, trading_pair: str) | |
<gh_stars>1-10
import tensorflow as tf
import utils
epsilon = 1e-8
def log2(x):
numerator = tf.log(x)
denominator = tf.log(tf.constant(2, dtype=numerator.dtype))
return numerator / denominator
def compute_pairwise_kl_bin_loss(logits, labels):
pairwise_labels, pairwise_logits = _pairwise_kl_bin_loss(labels, logits)
pairwise_weights = pairwise_labels
pairwise_weights = tf.stop_gradient(
pairwise_weights, name='weights_stop_gradient')
# return pairwise_logits, pairwise_weights
return tf.nn.relu(1 - pairwise_logits), pairwise_weights
def compute_pairwise_kl_g_loss(logits, labels):
pairwise_labels, pairwise_logits = _pairwise_kl_g_loss(labels, logits)
pairwise_weights = pairwise_labels
pairwise_weights = tf.stop_gradient(
pairwise_weights, name='weights_stop_gradient')
return tf.nn.relu(1 - pairwise_logits), pairwise_weights
# return pairwise_logits, pairwise_weights
def compute_pairwise_kl_loss(logits, labels):
pairwise_labels, pairwise_logits = _pairwise_kl(labels, logits)
pairwise_weights = pairwise_labels
pairwise_weights = tf.stop_gradient(
pairwise_weights, name='weights_stop_gradient')
# return pairwise_logits, pairwise_weights
return tf.nn.relu(1 - pairwise_logits), pairwise_weights
def _pairwise_kl(labels, logits):
pairwise_label_diff = _apply_pairwise_op(tf.subtract, labels)
pairwise_logits = _apply_pairwise_op(simm_kl_multinomial, logits)
pairwise_logits_diff = _apply_pairwise_op(tf.subtract, logits)
print('added diff sign')
pairwise_logits = tf.multiply(tf.sign(pairwise_logits_diff), pairwise_logits)
# print('pw logits:')
# print(pairwise_logits.shape)
# Only keep the case when l_i > l_j.
pairwise_labels = tf.cast(
tf.greater(pairwise_label_diff, 0), dtype=tf.float32)
is_valid = utils.is_label_valid(labels)
valid_pair = _apply_pairwise_op(tf.logical_and, is_valid)
pairwise_labels *= tf.cast(valid_pair, dtype=tf.float32)
return pairwise_labels, pairwise_logits
def _pairwise_kl_g_loss(labels, logits):
pairwise_label_diff = _apply_pairwise_op(tf.subtract, labels)
pairwise_logits = _apply_pairwise_op(simm_kl_g, logits)
pairwise_logits_diff = _apply_pairwise_op(tf.subtract, logits)
print('added diff sign')
pairwise_logits = tf.multiply(tf.sign(pairwise_logits_diff), pairwise_logits)
# print('pw logits:')
# print(pairwise_logits.shape)
# Only keep the case when l_i > l_j.
pairwise_labels = tf.cast(
tf.greater(pairwise_label_diff, 0), dtype=tf.float32)
is_valid = utils.is_label_valid(labels)
valid_pair = _apply_pairwise_op(tf.logical_and, is_valid)
pairwise_labels *= tf.cast(valid_pair, dtype=tf.float32)
return pairwise_labels, pairwise_logits
def _pairwise_kl_bin_loss(labels, logits):
pairwise_label_diff = _apply_pairwise_op(tf.subtract, labels)
pairwise_logits = _apply_pairwise_op(simm_kl_bin, logits)
pairwise_logits_diff = _apply_pairwise_op(tf.subtract, logits)
print('added diff sign')
pairwise_logits = tf.multiply(tf.sign(pairwise_logits_diff), pairwise_logits)
# print('pw logits:')
# print(pairwise_logits.shape)
# Only keep the case when l_i > l_j.
pairwise_labels = tf.cast(
tf.greater(pairwise_label_diff, 0), dtype=tf.float32)
is_valid = utils.is_label_valid(labels)
valid_pair = _apply_pairwise_op(tf.logical_and, is_valid)
pairwise_labels *= tf.cast(valid_pair, dtype=tf.float32)
return pairwise_labels, pairwise_logits
def _compute_ranks(logits, is_valid):
"""Computes ranks by sorting valid logits.
Args:
logits: A `Tensor` with shape [batch_size, list_size]. Each value is the
ranking score of the corresponding item.
is_valid: A `Tensor` of the same shape as `logits` representing validity of
each entry.
Returns:
The `ranks` Tensor.
"""
_check_tensor_shapes([logits, is_valid])
# Only sort entries with is_valid = True.
scores = tf.compat.v1.where(
is_valid, logits, - 1e-6 * tf.ones_like(logits) +
tf.reduce_min(input_tensor=logits, axis=1, keepdims=True))
return utils.sorted_ranks(scores)
def _check_tensor_shapes(tensors):
"""Checks the tensor shapes to be compatible."""
for tensor in tensors:
tensor = tf.convert_to_tensor(value=tensor)
tensor.get_shape().assert_has_rank(2)
tensor.get_shape().assert_is_compatible_with(
tf.convert_to_tensor(value=tensors[0]).get_shape())
def _pairwise_comparison(labels, logits):
r"""Returns pairwise comparison `Tensor`s.
Given a list of n items, the labels of graded relevance l_i and the logits
s_i, we form n^2 pairs. For each pair, we have the following:
/
| 1 if l_i > l_j for valid l_i and l_j.
* `pairwise_labels` = |
| 0 otherwise
\
* `pairwise_logits` = s_i - s_j
Args:
labels: A `Tensor` with shape [batch_size, list_size].
logits: A `Tensor` with shape [batch_size, list_size].
Returns:
A tuple of (pairwise_labels, pairwise_logits) with each having the shape
[batch_size, list_size, list_size].
"""
# Compute the difference for all pairs in a list. The output is a Tensor with
# shape [batch_size, list_size, list_size] where the entry [-1, i, j] stores
# the information for pair (i, j).
pairwise_label_diff = _apply_pairwise_op(tf.subtract, labels)
pairwise_logits = _apply_pairwise_op(tf.subtract, logits)
# Only keep the case when l_i > l_j.
pairwise_labels = tf.cast(
tf.greater(pairwise_label_diff, 0), dtype=tf.float32)
is_valid = utils.is_label_valid(labels)
valid_pair = _apply_pairwise_op(tf.logical_and, is_valid)
pairwise_labels *= tf.cast(valid_pair, dtype=tf.float32)
return pairwise_labels, pairwise_logits
def _apply_pairwise_op_ml(op, tensor):
"""Applies the op on tensor in the pairwise manner."""
# _check_tensor_shapes([tensor])
rval = op(tensor, tensor)
return rval
# return op(tf.expand_dims(tensor, 2), tf.expand_dims(tensor, 1))
def _apply_pairwise_op(op, tensor):
"""Applies the op on tensor in the pairwise manner."""
# _check_tensor_shapes([tensor])
return op(tf.expand_dims(tensor, 2), tf.expand_dims(tensor, 1))
def simm_kl_g(x, y):
return kl_div_gaussian(x, y) + kl_div_gaussian(y, x)
def kl_div_gaussian(x, y):
std_x = 0.25 * tf.ones_like(x)
std_y = 0.25 * tf.ones_like(y)
return 0.5 * tf.math.log(std_y / std_x) + (std_x ** 2 + (x - y) ** 2) / (2 * std_y ** 2) - 0.5
def simm_kl_bin(x, y):
return compute_kl_div_loss_bin(x, y) + compute_kl_div_loss_bin(y, x)
def simm_kl_multinomial(x, y):
return compute_kl_div_multinomial(x, y) + compute_kl_div_multinomial(y, x)
def compute_kl_div_multinomial(x, y):
return tf.reduce_sum(x * tf.math.log((x + 1e-6) / (y + 1e-6)), axis=-1)
def compute_kl_div_loss_bin(logits, labels, n=32):
loss = tf.log((1e-6 + labels) / (1e-6 + logits)) * n * labels + tf.log(
(1e-6 + 1 - labels) / (1e-6 + 1 - logits)) * n * (1 - labels)
return loss # tf.reduce_mean(loss, axis=-1)
def simm_kl_div_bern(x, y):
return kl_div_bern(x, y) + kl_div_bern(y, x)
def kl_div_bern(x, y):
n = 300
return tf.log((1e-6 + y) / (1e-6 + x)) * n * y + tf.log(
(1e-6 + 1 - y) / (1e-6 + 1 - x)) * n * (1 - y)
def compute_approxNDCG_gumbel(logits, labels):
# alpha = self._params.get('alpha', 10.0)
alpha = 10.0
# print('alpha from 10 to 0.1')
# alpha = 0.1
# the higher the alpha the more the sigmoid approximating the indicator function is steep in the nDCG approx.
is_valid = utils.is_label_valid(labels)
labels = tf.compat.v1.where(is_valid, labels, tf.zeros_like(labels))
logits = tf.compat.v1.where(
is_valid, logits, -1e3 * tf.ones_like(logits) +
tf.reduce_min(input_tensor=logits, axis=-1, keepdims=True))
label_sum = tf.reduce_sum(input_tensor=labels, axis=1, keepdims=True)
nonzero_mask = tf.greater(tf.reshape(label_sum, [-1]), 0.0)
labels = tf.compat.v1.where(nonzero_mask, labels,
1e-10 * tf.ones_like(labels))
gains = tf.pow(2., tf.cast(labels, dtype=tf.float32)) - 1.
n_samples = 8
_, sampled_logits = gumbel_neural_sort(logits, sample_size=n_samples)
sampled_logits = tf.reshape(sampled_logits, (n_samples, -1, labels.shape[-1]))
ranks = tf.map_fn(lambda l: utils.approx_ranks(l, alpha=alpha), elems=sampled_logits, dtype=tf.float32)
discounts = 1. / tf.math.log1p(tf.cast(ranks, tf.float32))
cost = tf.map_fn(lambda d: -tf.reduce_sum(input_tensor=gains * d, axis=-1, keepdims=True) * tf.expand_dims(
utils.inverse_max_dcg(labels), axis=-2), elems=discounts, dtype=tf.float32)
# discounts = 1. / tf.math.log1p(ranks)
# dcg = tf.reduce_sum(input_tensor=gains * discounts, axis=-1, keepdims=True)
# cost = -dcg * utils.inverse_max_dcg(labels)
return tf.reduce_mean(cost)
def gumbel_neural_sort(logits,
name=None,
sample_size=8,
temperature=1.0,
seed=None):
"""Generate the permutation matrix from logits by stochastic neuralsort.
By sampling logits from the Gumbel distribution,
sampled_logits = logits + Gumbel(0, 1),
the determinstic neural sort z of sampled_logits obeys the distribution with
Prob(z|logits) = (exp(logit_z1) / Z) * (exp(logit_z2) / Z-exp(logit_z1)) *
... * (exp(logit_zn) / Z-sum_i^(n-1)exp(logit_zi)),
where Z = sum_i exp(logit_i).
Args:
logits: A `Tensor` with shape [batch_size, list_size]. Each value is the
ranking score of the corresponding item.
name: A string used as the name for this loss.
sample_size: An integer representing the number of samples drawn from the
Concrete distribution defined by scores.
temperature: The Gumbel-Softmax temperature.
seed: Seed for pseudo-random number generator.
Returns:
A `Tensor` of permutation matrices whose dimension is [batch_size,
sample_size, list_size, list_size].
"""
with tf.compat.v1.name_scope(name, 'gumbel_neural_sort', [logits]):
batch_size = tf.shape(input=logits)[0]
list_size = tf.shape(input=logits)[1]
# Sample logits from Concrete(logits).
sampled_logits = tf.expand_dims(logits, 1)
sampled_logits += _sample_gumbel([batch_size, sample_size, list_size],
seed=seed)
sampled_logits = tf.reshape(sampled_logits,
[batch_size * sample_size, list_size])
# Sort by constructing the relaxed permuation matrix from sampled logits.
smooth_perm = neural_sort(sampled_logits, name, temperature)
smooth_perm = tf.reshape(smooth_perm,
[batch_size, sample_size, list_size, list_size])
return smooth_perm, sampled_logits
def _sample_gumbel(shape, eps=1e-20, seed=None):
u = tf.random.uniform(shape, minval=0, maxval=1, dtype=tf.float32, seed=seed)
return -tf.math.log(-tf.math.log(u + eps) + eps)
def neural_sort(logits, name=None, temperature=1.0):
r"""Generate the permutation matrix from logits by deterministic neuralsort.
The sort on a list of logits can be approximated by a differentiable
permutation matrix using Neural Sort (https://arxiv.org/abs/1903.08850).
The approximation is achieved by constructing a list of functions on logits,
fn_i(k) = (list_size + 1 - 2*i) * logit_k - sum_j |logit_k - logit_j|,
whose value is maximal when k is at the ith largest logit.
So that the permutation matrix can be expressed as
/ 1 if j = argmax_k fn_i(k)
P_ij = | = one_hot(argmax(fn_i(j))).
\ 0 otherwise
And the differentiable approximation of the matrix is applied with softmax,
P^_ij = softmax(fn_i(j) / temperature),
where the parameter temperature tunes the smoothiness of the approximation.
#### References
[1]: <NAME>, <NAME>, <NAME>, <NAME>.
Stochastic Optimization of Sorting Networks via Continuous Relaxations.
https://arxiv.org/abs/1903.08850
Args:
logits: A `Tensor` with shape [batch_size, list_size]. Each value is the
ranking score of the corresponding item. (We are using logits here,
noticing the original paper is using probability weights, i.e., the
exponentials of the logits).
name: A string used as the name for this loss.
temperature: The Softmax approximation temperature.
Returns:
A tensor of permutation matrices whose dimension is [batch_size, list_size,
list_size].
"""
with tf.compat.v1.name_scope(name, 'neural_sort', [logits]):
list_size = tf.shape(input=logits)[1]
logit_diff = tf.abs(tf.expand_dims(logits, 2) - tf.expand_dims(logits, 1))
# shape = [batch_size, 1, list_size].
logit_diff_sum = tf.reduce_sum(
input_tensor=logit_diff, axis=1, keepdims=True)
scaling | |
# python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Polynomial based models for finite differences and finite volumes."""
import enum
import functools
from typing import Any, Iterator, Optional, Sequence, Tuple
import numpy as np
import scipy.special
import tensorflow as tf
class Method(enum.Enum):
"""Discretization method."""
FINITE_DIFFERENCE = 1
FINITE_VOLUME = 2
def regular_stencil_1d(
offset: int,
derivative_order: int,
accuracy_order: int = 1,
grid_step: float = 1,
) -> np.ndarray:
"""Return the smallest stencil on which finite differences can be calculated.
Args:
offset: half-integer offset between input and output grids.
derivative_order: integer derivative order to calculate.
accuracy_order: integer order of polynomial accuracy to enforce. By default,
only 1st order accuracy is guaranteed.
grid_step: spacing between grid points/cells.
Returns:
1D numpy array giving positions at which to calculate finite differences.
"""
min_grid_size = derivative_order + accuracy_order
if offset == 0:
max_offset = min_grid_size // 2 # 1 -> 0, 2 -> 1, 3 -> 1, 4 -> 2, ...
grid = np.arange(-max_offset, max_offset + 1) * grid_step
elif offset == 1:
max_offset = (min_grid_size + 1) // 2 # 1 -> 1, 2 -> 1, 3 -> 2, 4 -> 2, ...
grid = (0.5 + np.arange(-max_offset, max_offset)) * grid_step
else:
raise ValueError('unexpected offset: {}'.format(offset)) # pylint: disable=g-doc-exception
return grid
def _kronecker_product(arrays: Sequence[np.ndarray]) -> np.ndarray:
return functools.reduce(np.kron, arrays)
def _exponents_up_to_degree(degree: int,
num_dimensions: int) -> Iterator[Tuple[int]]:
"""Generate all exponents up to given degree.
Args:
degree: a non-negative integer representing the maximum degree.
num_dimensions: a non-negative integer representing the number of
dimensions.
Yields:
An iterator over all tuples of non-negative integers of length
num_dimensions, whose sum is at most degree. For example, for degree=2 and
num_dimensions=2, this iterates through [(0, 0), (0, 1), (0, 2), (1, 0),
(1, 1), (2, 0)].
"""
if num_dimensions == 0:
yield tuple()
else:
for d in range(degree + 1):
for exponents in _exponents_up_to_degree(degree - d, num_dimensions - 1):
yield (d,) + exponents
def constraints(
stencils: Sequence[np.ndarray],
method: Method,
derivative_orders: Sequence[int],
accuracy_order: int,
grid_step: float = None,
) -> Tuple[np.ndarray, np.ndarray]:
"""Setup a linear equation A @ c = b for finite difference coefficients.
Elements are returned in row-major order, e.g., if two stencils of length 2
are provided: s00, s01, s10, s11.
Args:
stencils: list of arrays giving 1D stencils in each direction.
method: discretization method (i.e., finite volumes or finite differences).
derivative_orders: integer derivative orders to approximate in each grid
direction.
accuracy_order: minimum accuracy orders for the solution in each grid
direction.
grid_step: spacing between grid cells. Required if calculating a finite
volume stencil.
Returns:
Tuple of arrays `(A, b)` where `A` is 2D and `b` is 1D providing linear
constraints. Any vector of finite difference coefficients `c` such that
`A @ c = b` satisfies the requested accuracy order. The matrix `A` is
guaranteed not to have more rows than columns.
Raises:
ValueError: if the linear constraints are not satisfiable.
References:
https://en.wikipedia.org/wiki/Finite_difference_coefficient
<NAME> (1988), "Generation of Finite Difference Formulas on
Arbitrarily Spaced Grids", Mathematics of Computation, 51 (184): 699-706,
doi:10.1090/S0025-5718-1988-0935077-0, ISSN 0025-5718.
"""
# TODO(shoyer): consider supporting arbitrary non-rectangular stencils.
# TODO(shoyer): consider support different accuracy orders in different
# directions.
if accuracy_order < 1:
raise ValueError('cannot compute constriants with non-positive '
'accuracy_order: {}'.format(accuracy_order))
if len(stencils) != len(derivative_orders):
raise ValueError('mismatched lengths for stencils and derivative_orders')
all_constraints = {}
# See http://g3doc/third_party/py/datadrivenpdes/g3doc/polynomials.md.
num_dimensions = len(stencils)
max_degree = accuracy_order + sum(derivative_orders) - 1
for exponents in _exponents_up_to_degree(max_degree, num_dimensions):
# build linear constraints for a single polynomial term:
# \prod_i {x_i}^{m_i}
lhs_terms = []
rhs_terms = []
for exponent, stencil, derivative_order in zip(exponents, stencils,
derivative_orders):
if method is Method.FINITE_VOLUME:
if grid_step is None:
raise ValueError('grid_step is required for finite volumes')
# average value of x**m over a centered grid cell
lhs_terms.append(
1 / grid_step * ((stencil + grid_step / 2)**(exponent + 1) -
(stencil - grid_step / 2)**(exponent + 1)) /
(exponent + 1))
elif method is Method.FINITE_DIFFERENCE:
lhs_terms.append(stencil**exponent)
else:
raise ValueError('unexpected method: {}'.format(method))
if exponent == derivative_order:
# we get a factor of m! for m-th order derivative in each direction
rhs_term = scipy.special.factorial(exponent)
else:
rhs_term = 0
rhs_terms.append(rhs_term)
lhs = tuple(_kronecker_product(lhs_terms))
rhs = np.prod(rhs_terms)
if lhs in all_constraints and all_constraints[lhs] != rhs:
raise ValueError('conflicting constraints')
all_constraints[lhs] = rhs
# ensure a deterministic order for the rows (note: could drop this if when we
# commit to Python 3.6+, due to dictionaries being ordered)
lhs_rows, rhs_rows = zip(*sorted(all_constraints.items()))
A = np.array(lhs_rows) # pylint: disable=invalid-name
b = np.array(rhs_rows)
return A, b
def _high_order_coefficients_1d(
stencil: np.ndarray,
method: Method,
derivative_order: int,
grid_step: float = None,
) -> np.ndarray:
"""Calculate highest-order coefficients in 1D."""
# Use the highest order accuracy we can ensure in general. (In some cases,
# e.g., centered finite differences, this solution actually has higher order
# accuracy.)
accuracy_order = stencil.size - derivative_order
A, b = constraints( # pylint: disable=invalid-name
[stencil], method, [derivative_order], accuracy_order, grid_step)
return np.linalg.solve(A, b)
def coefficients(
stencils: Sequence[np.ndarray],
method: Method,
derivative_orders: Sequence[int],
accuracy_order: Optional[int] = None,
grid_step: float = None,
) -> np.ndarray:
"""Calculate standard finite difference/volume coefficients.
These coefficients are constructed by taking an outer product of coefficients
along each dimension independently. The resulting coefficients have *at least*
the requested accuracy order.
Args:
stencils: sequence of 1d stencils, one per grid dimension.
method: discretization method (i.e., finite volumes or finite differences).
derivative_orders: integer derivative orders to approximate, per grid
dimension.
accuracy_order: accuracy order for the solution. By default, the highest
possible accuracy is used in each direction.
grid_step: spacing between grid cells. Required if calculating a finite
volume stencil.
Returns:
NumPy array with one-dimension per stencil giving first order finite
difference coefficients on the grid.
"""
slices = []
sizes = []
all_coefficients = []
for stencil, derivative_order in zip(stencils, derivative_orders):
if accuracy_order is None:
excess = 0
else:
excess = stencil.size - derivative_order - accuracy_order
start = excess // 2
stop = stencil.size - excess // 2
slice_ = slice(start, stop)
axis_coefficients = _high_order_coefficients_1d(stencil[slice_], method,
derivative_order, grid_step)
slices.append(slice_)
sizes.append(stencil[slice_].size)
all_coefficients.append(axis_coefficients)
result = np.zeros(tuple(stencil.size for stencil in stencils))
result[tuple(slices)] = _kronecker_product(all_coefficients).reshape(sizes)
return result
class PolynomialAccuracy(tf.keras.layers.Layer):
"""Layer to enforce polynomial accuracy for finite difference coefficients.
Attributes:
input_size: length of input vectors that are transformed into valid finite
difference coefficients.
stencil_size: size of the resulting stencil.
bias: numpy array of shape (grid_size,) to which zero vectors are mapped.
nullspace: numpy array of shape (input_size, output_size) representing the
nullspace of the constraint matrix.
"""
def __init__(
self,
stencils: Sequence[np.ndarray],
method: Method,
derivative_orders: Sequence[int],
accuracy_order: int = 1,
bias_accuracy_order: Optional[int] = 1,
grid_step: float = None,
bias: np.ndarray = None,
dtype: Any = np.float32,
):
"""Constructor.
Args:
stencils: sequence of 1d stencils, one per grid dimension.
method: discretization method (i.e., finite volumes or finite
differences).
derivative_orders: integer derivative orders to approximate, per grid
dimension.
accuracy_order: integer order of polynomial accuracy to enforce.
bias_accuracy_order: integer order of polynomial accuracy to use for the
bias term. Only used if bias is not provided.
grid_step: spacing between grid cells.
bias: np.ndarray of shape (grid_size,) to which zero-vectors will be
mapped. Must satisfy polynomial accuracy to the requested order. By
default, we use standard low-order coefficients for the given grid.
dtype: dtype to use for computing this layer.
"""
if grid_step is None:
raise TypeError('grid_step is required for PolynomialAccuracy')
A, b = constraints( # pylint: disable=invalid-name
stencils, method, derivative_orders, accuracy_order, grid_step)
if bias is None:
bias_grid = coefficients(stencils, method, derivative_orders,
bias_accuracy_order, grid_step)
bias = bias_grid.ravel()
norm = np.linalg.norm(np.dot(A, bias) - b)
if norm > 1e-8:
raise ValueError('invalid | |
###
#
# Given, in the data/output/parents/results directory:
#
# * pshtt.csv - domain-scan, based on pshtt
# * sslyze.csv - domain-scan, based on sslyze.
#
# And, in the data/output/subdomains directory:
#
# * gather/results/gathered.csv - all gathered .gov hostnames
# * scan/results/pshtt.csv - pshtt scan for all hostnames
# * scan/results/sslyze.csv - sslyze scan for live/TLS hostnames
#
###
import errno
import csv
import datetime
import logging
import pathlib
import os
import re
import subprocess
import typing
from urllib.parse import urlparse
from shutil import copyfile
import slugify
# Import all the constants from data/env.py.
from data import env
from data import logger
from data import models
LOGGER = logger.get_logger(__name__)
# domains.csv is downloaded and live-cached during the scan
SCAN_CACHE = os.path.join(env.SCAN_DATA, "cache")
SCAN_DOMAINS_CSV = os.path.join(SCAN_CACHE, "domains.csv")
MIN_HSTS_AGE = 31536000 # one year
###
# Main task flow.
# Read in data from domains.csv, and scan data from domain-scan.
# All database operations are made in the run() method.
#
# This method blows away the database and rebuilds it from the given data.
def run(date: typing.Optional[str], connection_string: str, batch_size: typing.Optional[int] = None):
if date is None:
date = datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d")
# Read in domains and organizations from domains.csv.
# Returns dicts of values ready for saving as Domain and Agency objects.
#
# Also returns gathered subdomains, which need more filtering to be useful.
domains, owners = load_domain_data()
results = {}
acceptable_ciphers = load_compliance_data()
# Read in domain-scan CSV data.
scan_data = load_scan_data(domains)
# Capture manual exclusions and pull out some high-level data from pshtt.
for domain_name in scan_data:
# Pull out a few pshtt.csv fields as general domain-level metadata.
domain_data = scan_data[domain_name]
pshtt = domain_data.get("pshtt", None)
if pshtt is None:
# generally means scan was on different domains.csv, but
# invalid domains can hit this.
LOGGER.warning("[%s] No pshtt data for domain!", domain_name)
elif boolean_for(pshtt['Live']):
if boolean_for(pshtt['Redirect']):
redirection = urlparse(pshtt["Redirect To"]).netloc
if redirection not in domains:
LOGGER.warning("%s redirected to %s which is not in the domains list", domain_name, redirection)
results[domain_name] = {
"domain": domain_name,
"is_owner": domain_name in owners,
"is_parent": domain_name in owners,
"sources": ["canada-gov"],
"live": True,
"redirect": boolean_for(pshtt["Redirect"]),
"canonical": pshtt["Canonical URL"],
"exclude": {},
}
# Find the parent domain for all domains in the owner list, mutating results in place
map_subdomains(results, owners)
# Extract organizations actually used in the set of scanned domains, and their counts
organizations = extract_orgs(results)
sorted_domains = list(results.keys())
sorted_domains.sort()
sorted_organizations = list(organizations.keys())
sorted_organizations.sort()
# Calculate high-level per-domain conclusions for each report.
# Overwrites `results` in place
process_https(
results, scan_data, acceptable_ciphers
)
# Totals scan data for parent domains
total_reports(
results, owners,
)
# Calculate organization-level summaries. Updates `organizations` in-place.
update_organization_totals(organizations, results)
# Calculate government-wide summaries.
report = full_report(results)
report["report_date"] = date
# Reset the database.
with models.Connection(connection_string) as connection:
LOGGER.info("Clearing the domains.")
connection.domains.clear(batch_size=batch_size)
LOGGER.info("Creating all domains.")
connection.domains.create_all((results[domain_name] for domain_name in sorted_domains), batch_size=batch_size)
LOGGER.info("Clearing organizations.")
connection.organizations.clear(batch_size=batch_size)
LOGGER.info("Creating all organizations.")
connection.organizations.create_all(
(organizations[organization_name] for organization_name in sorted_organizations), batch_size=batch_size
)
LOGGER.info("Replacing government-wide totals.")
connection.reports.replace({}, report)
LOGGER.info("Signal track-web to drop cache")
connection.flags.replace({}, {"cache": False})
# Print and exit
print_report(report)
def cache_file(uri: str) -> pathlib.Path:
LOGGER.info("caching %s", uri)
mkdir_p(SCAN_CACHE)
path = pathlib.Path(uri)
cache_location = pathlib.Path(SCAN_CACHE) / path.name
if cache_location.is_file():
return cache_location
if uri.startswith("http:") or uri.startswith("https:"):
shell_out(["wget", uri, "-O", os.path.join(SCAN_CACHE, path.name)])
else:
copyfile(uri, str(cache_location))
return cache_location
def in_cache(path: str) -> bool:
cache_path = pathlib.Path(SCAN_CACHE) / pathlib.Path(path).name
return cache_path.exists()
def _load_data(path: pathlib.Path) -> typing.Set[str]:
data = set()
with path.open('r', encoding='utf-8-sig', newline='') as cipherfile:
reader = csv.reader(cipherfile)
next(reader) # assume csv has header column
for row in reader:
try:
data.add(row[0])
except IndexError:
# csv had an empty row, not really a big deal
continue
return data
def load_compliance_data() -> typing.Set[str]:
return _load_data(cache_file(env.CIPHER))
# Reads in input CSVs (domain list).
def load_domain_data() -> typing.Tuple[typing.Set, typing.Dict]:
domain_map = {}
domains = set()
# if domains.csv wasn't cached, download it anew
if not os.path.exists(SCAN_DOMAINS_CSV):
cache_file(env.DOMAINS)
owner_path = cache_file(env.OWNERSHIP)
if not os.path.exists(SCAN_DOMAINS_CSV):
LOGGER.critical("Couldn't download domains.csv")
exit(1)
with owner_path.open('r', encoding='utf-8-sig', newline='') as csvfile:
for row in csv.reader(csvfile):
if row[0].lower().startswith("domain"):
continue
domain_name = row[0].lower().strip()
organization_name_en = row[1].strip()
organization_name_fr = row[2].strip()
organization_slug = slugify.slugify(organization_name_en)
if domain_name not in domain_map:
domain_map[domain_name] = {
"organization_name_en": organization_name_en,
"organization_name_fr": organization_name_fr,
"organization_slug": organization_slug,
}
with open(SCAN_DOMAINS_CSV, newline="") as csvfile:
for row in csv.reader(csvfile):
if row[0].lower().startswith("domain"):
continue
domain = row[0].lower().strip()
domains.add(domain)
return domains, domain_map
def extract_orgs(domains: typing.Dict) -> typing.Dict:
organizations = {}
for doc in domains.values():
slug = doc['organization_slug']
organization = organizations.setdefault(slug, {
"name_en": doc['organization_name_en'],
"name_fr": doc['organization_name_fr'],
"slug": slug,
"total_domains": 0,
})
organization["total_domains"] += 1
return organizations
# Load in data from the CSVs produced by domain-scan.
# The 'domains' map is used to ignore any untracked domains.
def load_scan_data(domains: typing.Set[str]) -> typing.Dict:
parent_scan_data = {}
for domain_name in domains:
parent_scan_data[domain_name] = {}
with open(os.path.join(env.SCAN_RESULTS, "pshtt.csv"), newline="") as csvfile:
for row in csv.DictReader(csvfile):
domain = row['Domain'].lower()
if domain not in domains:
LOGGER.info("[pshtt] Skipping pshtt data for %s, not in domains.csv.", domain)
continue
parent_scan_data[domain]["pshtt"] = row
with open(os.path.join(env.SCAN_RESULTS, "sslyze.csv"), newline="") as csvfile:
for row in csv.DictReader(csvfile):
domain = row['Domain'].lower()
if domain not in domains:
LOGGER.info("[sslyze] Skipping sslyze data for %s, not in domains.csv.", domain)
continue
# If the scan was invalid, most fields will be empty strings.
# It'd be nice to make this more semantic on the domain-scan side.
if row["SSLv2"] == "":
LOGGER.info("[sslyze] Skipping sslyze data for %s, scan data was invalid.", domain)
continue
parent_scan_data[domain]["sslyze"] = row
return parent_scan_data
def map_subdomains(domains, owners):
for domain in domains:
if not domains[domain]["is_owner"]:
parts = domain.split('.')
parent = domain
while parts and parent not in owners:
parts = parts[1:]
parent = '.'.join(parts)
if not parts:
domains[domain].update({
"base_domain": domain,
"is_parent": True,
"organization_name_en": 'Government of Canada',
"organization_name_fr": 'Gouvernement du Canada',
"organization_slug": 'government-of-canada',
})
continue
parent = '.'.join(parts)
subdomains = owners[parent].setdefault("subdomains", [])
subdomains.append(domain)
domains[domain].update({
"base_domain": parent,
# If the owners was not scanned, let all subdomains become 'parents' so they are displayed
"is_parent": parent not in domains,
"organization_slug": owners[parent]["organization_slug"],
"organization_name_en": owners[parent]["organization_name_en"],
"organization_name_fr": owners[parent]["organization_name_fr"],
})
else:
domains[domain].update({
"base_domain": domain,
"is_parent": True,
"organization_slug": owners[domain]["organization_slug"],
"organization_name_en": owners[domain]["organization_name_en"],
"organization_name_fr": owners[domain]["organization_name_fr"],
})
# Given the domain data loaded in from CSVs, draw conclusions,
# and filter/transform data into form needed for display.
def process_https(domains, scan_data, acceptable_ciphers):
# For each domain, determine eligibility and, if eligible,
# use the scan data to draw conclusions.
for domain_name in domains:
### HTTPS
#
# For HTTPS, we calculate individual reports for every subdomain.
https_parent = {
"eligible": False, # domain eligible itself (is it live?)
"eligible_zone": False, # zone eligible (itself or any live subdomains?)
}
# No matter what, put the preloaded state onto the parent,
# since even an unused domain can always be preloaded.
try:
parent_preloaded = preloaded_or_not(
scan_data[domains[domain_name]['base_domain']]["pshtt"]
) if not domains[domain_name]["is_parent"] else 0
except KeyError:
# The parent domain wasn't in the list of domains to scan, assume it is not preloaded
parent_preloaded = 0
if eligible_for_https(domains[domain_name]):
https_parent = {
**https_parent,
**https_behavior_for(
scan_data[domain_name]["pshtt"],
scan_data[domain_name].get("sslyze"),
acceptable_ciphers,
parent_preloaded
)
}
https_parent["eligible_zone"] = True
domains[domain_name]["https"] = https_parent
def total_reports(domains, owners):
for domain_name in (domain for domain in domains if domains[domain]["is_parent"]):
https_parent = domains[domain_name]["https"]
subdomain_names = owners.get(domain_name, {}).get("subdomains", [])
eligible_children = {
name for name in subdomain_names if eligible_for_https(domains[name])
}
# Totals based on summing up eligible reports within this domain.
totals = {}
https_parent["eligible_zone"] |= True if eligible_children else False
# For HTTPS/HSTS, pshtt-eligible parent + subdomains.
eligible_reports = [domains[name]["https"] for name in eligible_children]
if https_parent["eligible"]:
eligible_reports = [https_parent] + eligible_reports
totals["https"] = total_https_report(eligible_reports)
# For SSLv2/SSLv3/RC4/3DES, sslyze-eligible parent + subdomains.
eligible_reports = [
domains[name]["https"]
for name in subdomain_names
if domains[name].get("https")
and domains[name]["https"].get("rc4") is not None
]
if https_parent.get("rc4") is not None:
eligible_reports = [https_parent] + eligible_reports
totals["crypto"] = total_crypto_report(eligible_reports)
domains[domain_name]["totals"] = totals
# Given a list of domains or subdomains, quick filter to which
# are eligible for this report, optionally for an organization.
def eligible_for(report, hosts, organization=None):
return [
host[report]
for hostname, host in hosts.items()
if (
host.get(report)
and host[report]["eligible"]
and ((organization is None) or (host["organization_slug"] == organization["slug"]))
)
]
# Go through each report type and add organization totals for each type.
def update_organization_totals(organizations, domains):
# For each organization, update their report counts for every domain they have.
for organization_slug in organizations.keys():
organization = organizations[organization_slug]
# HTTPS. Parent and subdomains.
# LOGGER.info("[%s][%s] Totalling report." % (organization['slug'], | |
<gh_stars>10-100
# -*- coding: utf-8 -*-
import re
from galaxy.utils import ontology
def my_clean_text(text):
text = re.sub(r'([a-zT]+)\.([a-z])', r'\1 . \2', text) # 'abc.xyz' -> 'abc . xyz'
text = re.sub(r'(\w+)\.\.? ', r'\1 . ', text) # if 'abc. ' -> 'abc . '
return text
def clean_text(text):
text = text.strip()
text = text.lower()
text = text.replace(u"’", "'")
text = text.replace(u"‘", "'")
text = text.replace(';', ',')
text = text.replace('"', ' ')
text = text.replace('/', ' and ')
text = text.replace("don't", "do n't")
text = clean_time(text)
baddata = { r'c\.b (\d), (\d) ([a-z])\.([a-z])': r'cb\1\2\3\4',
'c.b. 1 7 d.y': 'cb17dy',
'c.b.1 7 d.y': 'cb17dy',
'c.b 25, 9 a.q': 'cb259aq',
'isc.b 25, 9 a.q': 'is cb259aq',
'c.b2, 1 u.f': 'cb21uf',
'c.b 1,2 q.a':'cb12qa',
'0-122-336-5664': '01223365664',
'postcodecb21rs': 'postcode cb21rs',
r'i\.d': 'id',
' i d ': 'id',
'Telephone:01223358966': 'Telephone: 01223358966',
'depature': 'departure',
'depearting': 'departing',
'-type': ' type',
r"b[\s]?&[\s]?b": "bed and breakfast",
"b and b": "bed and breakfast",
r"guesthouse[s]?": "guest house",
r"swimmingpool[s]?": "swimming pool",
"wo n\'t": "will not",
" \'d ": " would ",
" \'m ": " am ",
" \'re' ": " are ",
" \'ll' ": " will ",
" \'ve ": " have ",
r'^\'': '',
r'\'$': '',
}
for tmpl, good in baddata.items():
text = re.sub(tmpl, good, text)
text = re.sub(r'([a-zT]+)\.([a-z])', r'\1 . \2', text) # 'abc.xyz' -> 'abc . xyz'
text = re.sub(r'(\w+)\.\.? ', r'\1 . ', text) # if 'abc. ' -> 'abc . '
with open('tools/mapping.pair', 'r') as fin:
for line in fin.readlines():
fromx, tox = line.replace('\n', '').split('\t')
text = ' ' + text + ' '
text = text.replace(' ' + fromx + ' ', ' ' + tox + ' ')[1:-1]
return text
def clean_time(utter):
utter = re.sub(r'(\d+) ([ap]\.?m)', lambda x: x.group(1) + x.group(2), utter) # 9 am -> 9am
utter = re.sub(r'((?<!\d)\d:\d+)(am)?', r'0\1', utter)
utter = re.sub(r'((?<!\d)\d)am', r'0\1:00', utter)
utter = re.sub(r'((?<!\d)\d)pm', lambda x: str(int(x.group(1))+12)+':00', utter)
utter = re.sub(r'(\d+)(:\d+)pm', lambda x: str(int(x.group(1))+12)+x.group(2), utter)
utter = re.sub(r'(\d+)a\.?m',r'\1', utter)
return utter
def clean_slot_values(domain, slot, value):
value = clean_text(value)
if not value:
value = ''
elif value == 'not mentioned':
value = ''
# value = 'not mentioned' # if in DST setting
elif domain == 'attraction':
if slot == 'name':
if value == 't':
value = ''
if value=='trinity':
value = 'trinity college'
elif slot == 'area':
if value in ['town centre', 'cent', 'center', 'ce']:
value = 'centre'
elif value in ['ely', 'in town', 'museum', 'norwich', 'same area as hotel']:
value = ""
elif value in ['we']:
value = "west"
elif slot == 'type':
if value in ['m', 'mus', 'musuem']:
value = 'museum'
elif value in ['art', 'architectural']:
value = "architecture"
elif value in ['churches']:
value = "church"
elif value in ['coll']:
value = "college"
elif value in ['concert', 'concerthall']:
value = 'concert hall'
elif value in ['night club']:
value = 'nightclub'
elif value in ['mutiple sports', 'mutliple sports', 'sports', 'galleria']:
value = 'multiple sports'
elif value in ['ol', 'science', 'gastropub', 'la raza']:
value = ''
elif value in ['swimmingpool', 'pool']:
value = 'swimming pool'
elif value in ['fun']:
value = 'entertainment'
elif domain == 'hotel':
if slot == 'area':
if value in ['cen', 'centre of town', 'near city center', 'center']:
value = 'centre'
elif value in ['east area', 'east side']:
value = 'east'
elif value in ['in the north', 'north part of town']:
value = 'north'
elif value in ['we']:
value = "west"
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot == 'name':
if value == 'uni':
value = 'university arms hotel'
elif value == 'university arms':
value = 'university arms hotel'
elif value == 'acron':
value = 'acorn guest house'
elif value == 'ashley':
value = 'ashley hotel'
elif value == 'arbury lodge guesthouse':
value = 'arbury lodge guest house'
elif value == 'la':
value = 'la margherit'
elif value == 'no':
value = ''
elif slot == 'internet':
if value == 'does not':
value = 'no'
elif value in ['y', 'free', 'free internet']:
value = 'yes'
elif value in ['4']:
value = ''
elif slot == 'parking':
if value == 'n':
value = 'no'
elif value in ['free parking']:
value = 'yes'
elif value in ['y']:
value = 'yes'
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value == 'moderately':
value = 'moderate'
elif value in ['any']:
value = "do n't care"
elif value in ['any']:
value = "do n't care"
elif value in ['inexpensive']:
value = "cheap"
elif value in ['2', '4']:
value = ''
elif slot == 'stars':
if value == 'two':
value = '2'
elif value == 'three':
value = '3'
elif value in ['4-star', '4 stars', '4 star', 'four star', 'four stars']:
value= '4'
elif slot == 'type':
if value == '0 star rarting':
value = ''
elif value == 'guesthouse':
value = 'guest house'
elif value not in ['hotel', 'guest house', "do n't care"]:
value = ''
elif domain == 'restaurant':
if slot == "area":
if value in ["center", 'scentre', "center of town", "city center", "cb30aq", "town center", 'centre of cambridge', 'city centre']:
value = "centre"
elif value == "west part of town":
value = "west"
elif value == "n":
value = "north"
elif value in ['the south']:
value = 'south'
elif value not in ['centre', 'south', "do n't care", 'west', 'east', 'north']:
value = ''
elif slot == "day":
if value == "monda":
value = "monday"
elif value == "t":
value = "tuesday"
elif slot in ['pricerange', 'price range']:
slot = 'pricerange'
if value in ['moderately', 'mode', 'mo']:
value = 'moderate'
elif value in ['not']:
value = ''
elif value in ['inexpensive', 'ch']:
value = "cheap"
elif slot == "food":
if value == "barbecue":
value = "barbeque"
elif slot == "pricerange":
if value == "moderately":
value = "moderate"
elif slot == "time":
if value == "9:00":
value = "09:00"
elif value == "9:45":
value = "09:45"
elif value == "1330":
value = "13:30"
elif value == "1430":
value = "14:30"
elif value == "9:15":
value = "09:15"
elif value == "9:30":
value = "09:30"
elif value == "1830":
value = "18:30"
elif value == "9":
value = "09:00"
elif value == "2:00":
value = "14:00"
elif value == "1:00":
value = "13:00"
elif value == "3:00":
value = "15:00"
elif domain == 'taxi':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1530':
value = '15:30'
elif value == '15 minutes':
value = ''
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '1:00':
value = '01:00'
elif value == '21:4':
value = '21:04'
elif value == '4:15':
value = '04:15'
elif value == '5:45':
value = '05:45'
elif value == '0700':
value = '07:00'
elif value == '4:45':
value = '04:45'
elif value == '8:30':
value = '08:30'
elif value == '9:30':
value = '09:30'
value = value.replace(".", ":")
elif domain == 'train':
if slot in ['arriveBy', 'arrive by']:
slot = 'arriveby'
if value == '1':
value = '01:00'
elif value in ['does not care', 'doesnt care', "doesn't care"]:
value = "do n't care"
elif value == '8:30':
value = '08:30'
elif value == 'not 15:45':
value = ''
value = value.replace(".", ":")
elif slot == 'day':
if value =='doesnt care' or value == "doesn't care":
value = "do n't care"
elif slot in ['leaveAt', 'leave at']:
slot = 'leaveat'
if value == '2:30':
value = '02:30'
elif value == '7:54':
value = '07:54'
elif value == 'after 5:45 pm':
value = '17:45'
elif value in ['early evening', 'friday', 'sunday', 'tuesday', 'afternoon']:
value = ''
elif value == '12':
value = '12:00'
elif value == '1030':
value = '10:30'
elif value == '1700':
value = '17:00'
elif value in ['does not care', 'doesnt care', 'do nt care', "doesn't care"]:
value = "do n't care"
value | |
<gh_stars>1-10
"""
********************************************
test_generator_modul_test_einstellungen.py
@digitalfellowship - Stand 07/2021
Autor: <NAME>
********************************************
Dieses Modul dient der Erstellung von Testeinstellungen für einen ILIAS-Test
Es sind nicht alle Einstellmöglichkeiten aus ILIAS verfügbar, sondern
lediglich die Einstellungen unter "Allgemeine Einstellungen" im ILIAS
"""
from tkinter import *
from tkscrolledframe import ScrolledFrame #Bewegbares Fesnter (Scrollbalken)
import sqlite3
import xml.etree.ElementTree as ET
from datetime import datetime
import datetime
import os
class Test_Einstellungen_GUI:
def __init__(self, project_root_path, test_qti_file_path_output):
# Projekt-Pfad
self.project_root_path = project_root_path
# Pfad für qti_(XML)-Datei für erstellten Test
self.test_qti_file_path_output = test_qti_file_path_output
# Name für Datenbank und Tabelle
self.settings_database = "test_settings_profiles_db.db"
self.settings_database_table = "my_profiles_table"
# Pfad für die Datenbank
self.settings_database_path = os.path.normpath(os.path.join(self.project_root_path, "Test_Generator_Datenbanken", self.settings_database))
# New Window must be "Toplevel" not "Tk()" in order to get Radiobuttons to work properly
self.test_settings_window = Toplevel()
self.test_settings_window.title("Test Einstellungen verwalten")
# Create a ScrolledFrame widget
self.sf_test_settings = ScrolledFrame(self.test_settings_window, width=300,
height=300)
self.sf_test_settings.pack(expand=1, fill="both")
# Bind the arrow keys and scroll wheel
### Bind the arrow keys and scroll wheel
### Funktion hat keine auswirkungen, erzeugt jedoch (vernachlässigbare) Fehler
# self.sf_test_settings.bind_arrow_keys(app)
# self.sf_test_settings.bind_scroll_wheel(app)
# Create a frame within the ScrolledFrame
self.test_settings = self.sf_test_settings.display_widget(Frame)
self.frame1 = LabelFrame(self.test_settings, text="Test Einstellungen", padx=5, pady=5)
self.frame1.grid(row=0, column=0, padx=20, pady=10, sticky=NW)
self.frame2 = LabelFrame(self.test_settings, text="Test Einstellungen", padx=5, pady=5)
self.frame2.grid(row=0, column=1, padx=20, pady=10, sticky=NW)
self.frame3 = LabelFrame(self.test_settings, text="Test Einstellungen", padx=5, pady=5)
self.frame3.grid(row=0, column=2, padx=20, pady=10, sticky=NW)
self.res12_min_listbox_label = Label(self.frame1, text="EINSTELLUNGEN DES TESTS",
font=('Helvetica', 10, 'bold'))
self.res12_min_listbox_label.grid(row=0, column=0, sticky=W, padx=10, pady=(20, 0))
self.res90_min_listbox_label = Label(self.frame1, text="Test-Titel")
self.res90_min_listbox_label.grid(row=1, column=0, sticky=W, padx=10)
self.res91_max_listbox_label = Label(self.frame1, text="Beschreibung")
self.res91_max_listbox_label.grid(row=2, column=0, sticky=W, padx=10)
self.res1_max_listbox_label = Label(self.frame1, text="Auswahl der Testfragen")
self.res1_max_listbox_label.grid(row=4, column=0, sticky=W, padx=10)
self.res1_prec_listbox_label = Label(self.frame1, text="Datenschutz")
self.res1_prec_listbox_label.grid(row=7, column=0, sticky=W, padx=10)
self.res1_tol_listbox_label = Label(self.frame1, text="VERFÜGBARKEIT", font=('Helvetica', 10, 'bold'))
self.res1_tol_listbox_label.grid(row=9, column=0, sticky=W, padx=10, pady=(20, 0))
self.res1_points_listbox_label = Label(self.frame1, text="Online --- not working")
self.res1_points_listbox_label.grid(row=10, column=0, sticky=W, padx=10)
self.res13_points_listbox_label = Label(self.frame1,
text="Zeitlich begrenzte Verfügbarkeit --- not working")
self.res13_points_listbox_label.grid(row=11, column=0, sticky=W, padx=10)
self.res22_tol_listbox_label = Label(self.frame1, text="INFORMATIONEN ZUM EINSTIEG",
font=('Helvetica', 10, 'bold'))
self.res22_tol_listbox_label.grid(row=14, column=0, sticky=W, padx=10, pady=(20, 0))
self.res23_points_listbox_label = Label(self.frame1, text="Einleitung")
self.res23_points_listbox_label.grid(row=15, column=0, sticky=W, padx=10)
self.res24_points_listbox_label = Label(self.frame1, text="Testeigenschaften anzeigen")
self.res24_points_listbox_label.grid(row=16, column=0, sticky=W, padx=10)
self.res31_tol_listbox_label = Label(self.frame1, text="DURCHFÜHRUNG: ZUGANG", font=('Helvetica', 10, 'bold'))
self.res31_tol_listbox_label.grid(row=17, column=0, sticky=W, padx=10, pady=(20, 0))
self.test_time_year_label = Label(self.frame1, text="Jahr")
self.test_time_year_label.grid(row=17, column=1, sticky=W)
self.test_time_month_label = Label(self.frame1, text="Mon.")
self.test_time_month_label.grid(row=17, column=1, sticky=W, padx=35)
self.test_time_day_label = Label(self.frame1, text="Tag")
self.test_time_day_label.grid(row=17, column=1, sticky=W, padx=70)
self.test_time_hour_label = Label(self.frame1, text="Std.")
self.test_time_hour_label.grid(row=17, column=1, sticky=W, padx=105)
self.test_time_minute_label = Label(self.frame1, text="Min.")
self.test_time_minute_label.grid(row=17, column=1, sticky=W, padx=140)
self.res32_points_listbox_label = Label(self.frame1, text="Test-Start")
self.res32_points_listbox_label.grid(row=18, column=0, sticky=W, padx=10)
self.res33_points_listbox_label = Label(self.frame1, text="Test-Ende")
self.res33_points_listbox_label.grid(row=19, column=0, sticky=W, padx=10)
self.res34_tol_listbox_label = Label(self.frame1, text="Test-Passwort")
self.res34_tol_listbox_label.grid(row=20, column=0, sticky=W, padx=10)
self.res35_points_listbox_label = Label(self.frame1, text="Nur ausgewählte Teilnehmer")
self.res35_points_listbox_label.grid(row=21, column=0, sticky=W, padx=10)
self.res36_points_listbox_label = Label(self.frame1, text="Anzahl gleichzeitiger Teilnehmer begrenzen")
self.res36_points_listbox_label.grid(row=22, column=0, sticky=W, padx=10)
self.res37_points_listbox_label = Label(self.frame1, text="Inaktivitätszeit der Teilnehmner (in Sek.)")
self.res37_points_listbox_label.grid(row=23, column=0, sticky=W, padx=30)
self.res41_tol_listbox_label = Label(self.frame1, text="DURCHFÜHRUNG: STEUERUNG TESTDURCHLAUF",
font=('Helvetica', 10, 'bold'))
self.res41_tol_listbox_label.grid(row=24, column=0, sticky=W, padx=10, pady=(20, 0))
self.res42_points_listbox_label = Label(self.frame1, text="Anzahl von Testdurchläufen begrenzen")
self.res42_points_listbox_label.grid(row=25, column=0, sticky=W, padx=10)
self.res43_points_listbox_label = Label(self.frame1, text="Wartezeit zwischen Durchläufen erzwingen")
self.res43_points_listbox_label.grid(row=26, column=0, sticky=W, padx=10)
self.res44_tol_listbox_label = Label(self.frame1, text="Bearbeitungsdauer begrenzen")
self.res44_tol_listbox_label.grid(row=27, column=0, sticky=W, padx=10)
self.res44_tol_listbox_label = Label(self.frame1, text="Bearbeitungsdauer (in Min).")
self.res44_tol_listbox_label.grid(row=28, column=0, sticky=W, padx=30)
self.res44_tol_listbox_label = Label(self.frame1, text="Max. Bearbeitungsdauer für jeden Testlauf zurücksetzen")
self.res44_tol_listbox_label.grid(row=29, column=0, sticky=W, padx=30)
self.res45_points_listbox_label = Label(self.frame1, text="Prüfungsansicht")
self.res45_points_listbox_label.grid(row=30, column=0, sticky=W, padx=10)
self.res45_1_points_listbox_label = Label(self.frame1, text="Titel des Tests")
self.res45_1_points_listbox_label.grid(row=31, column=0, sticky=W, padx=30)
self.res45_2_points_listbox_label = Label(self.frame1, text="Name des Teilnehmers")
self.res45_2_points_listbox_label.grid(row=32, column=0, sticky=W, padx=30)
self.res46_points_listbox_label = Label(self.frame1, text="ILIAS-Prüfungsnummer anzeigen")
self.res46_points_listbox_label.grid(row=33, column=0, sticky=W, padx=10)
self.res51_tol_listbox_label = Label(self.frame2, text="DURCHFÜHRUNG: VERHALTEN DER FRAGE",
font=('Helvetica', 10, 'bold'))
self.res51_tol_listbox_label.grid(row=0, column=2, sticky=W, padx=10, pady=(20, 0))
self.res52_points_listbox_label = Label(self.frame2, text="Anzeige der Fragentitel")
self.res52_points_listbox_label.grid(row=1, column=2, sticky=W, padx=10)
self.res53_points_listbox_label = Label(self.frame2, text="Automatisches speichern")
self.res53_points_listbox_label.grid(row=4, column=2, sticky=W, padx=10)
self.res54_tol_listbox_label = Label(self.frame2, text="Fragen mischen")
self.res54_tol_listbox_label.grid(row=5, column=2, sticky=W, padx=10)
self.res55_points_listbox_label = Label(self.frame2, text="Lösungshinweise")
self.res55_points_listbox_label.grid(row=6, column=2, sticky=W, padx=10)
self.res56_points_listbox_label = Label(self.frame2, text="Direkte Rückmeldung --- not working")
self.res56_points_listbox_label.grid(row=7, column=2, sticky=W, padx=10)
self.res57_tol_listbox_label = Label(self.frame2, text="Teilnehmerantworten")
self.res57_tol_listbox_label.grid(row=8, column=2, sticky=W, padx=10)
self.res58_points_listbox_label = Label(self.frame2, text="Verpflichtende Fragen")
self.res58_points_listbox_label.grid(row=12, column=2, sticky=W, padx=10)
self.res61_tol_listbox_label = Label(self.frame2, text="DURCHFÜHRUNG: FUNKTIONEN FÜR TEILNEHMER",
font=('Helvetica', 10, 'bold'))
self.res61_tol_listbox_label.grid(row=13, column=2, sticky=W, padx=10, pady=(20, 0))
self.res62_points_listbox_label = Label(self.frame2, text="Verwendung vorheriger Lösungen")
self.res62_points_listbox_label.grid(row=14, column=2, sticky=W, padx=10)
self.res63_points_listbox_label = Label(self.frame2, text="\"Test unterbrechen\" anzeigen")
self.res63_points_listbox_label.grid(row=15, column=2, sticky=W, padx=10)
self.res64_tol_listbox_label = Label(self.frame2, text="Nicht beantwortete Fragen")
self.res64_tol_listbox_label.grid(row=16, column=2, sticky=W, padx=10)
self.res65_points_listbox_label = Label(self.frame2, text="Fragenliste und Bearbeitungsstand anzeigen")
self.res65_points_listbox_label.grid(row=18, column=2, sticky=W, padx=10)
self.res66_points_listbox_label = Label(self.frame2, text="Fragen markieren")
self.res66_points_listbox_label.grid(row=19, column=2, sticky=W, padx=10)
self.res71_tol_listbox_label = Label(self.frame2, text="TEST ABSCHLIESSEN", font=('Helvetica', 10, 'bold'))
self.res71_tol_listbox_label.grid(row=20, column=2, sticky=W, padx=10, pady=(20, 0))
self.res72_points_listbox_label = Label(self.frame2, text="Übersicht gegebener Antworten")
self.res72_points_listbox_label.grid(row=21, column=2, sticky=W, padx=10)
self.res73_points_listbox_label = Label(self.frame2, text="Abschließende Bemerkung")
self.res73_points_listbox_label.grid(row=22, column=2, sticky=W, padx=10)
self.res74_tol_listbox_label = Label(self.frame2, text="Weiterleitung")
self.res74_tol_listbox_label.grid(row=23, column=2, sticky=W, padx=10)
self.res75_points_listbox_label = Label(self.frame2, text="Benachrichtigung")
self.res75_points_listbox_label.grid(row=24, column=2, sticky=W, padx=10)
# --------------------------- DEFINE CHECKBOXES WITH ENTRYS ---------------------------------------
# --------------------------- CHECKBOXES ---------------------------------------
self.var_online = IntVar()
self.check_online = Checkbutton(self.frame1, text="", variable=self.var_online, onvalue=1, offvalue=0)
self.check_online.deselect()
self.check_online.grid(row=10, column=1, sticky=W)
self.var_time_limited = IntVar()
self.time_limited_start_label = Label(self.frame1, text="Start")
self.time_limited_start_day_label = Label(self.frame1, text="Tag")
self.time_limited_start_day_entry = Entry(self.frame1, width=3)
self.time_limited_start_month_label = Label(self.frame1, text="Mo")
self.time_limited_start_month_entry = Entry(self.frame1, width=3)
self.time_limited_start_year_label = Label(self.frame1, text="Jahr")
self.time_limited_start_year_entry = Entry(self.frame1, width=4)
self.time_limited_start_hour_label = Label(self.frame1, text="Std")
self.time_limited_start_hour_entry = Entry(self.frame1, width=3)
self.time_limited_start_minute_label = Label(self.frame1, text="Min")
self.time_limited_start_minute_entry = Entry(self.frame1, width=3)
self.time_limited_end_label = Label(self.frame1, text="Ende")
self.time_limited_end_day_label = Label(self.frame1, text="Tag")
self.time_limited_end_day_entry = Entry(self.frame1, width=3)
self.time_limited_end_month_label = Label(self.frame1, text="Mo")
self.time_limited_end_month_entry = Entry(self.frame1, width=3)
self.time_limited_end_year_label = Label(self.frame1, text="Jahr")
self.time_limited_end_year_entry = Entry(self.frame1, width=4)
self.time_limited_end_hour_label = Label(self.frame1, text="Std")
self.time_limited_end_hour_entry = Entry(self.frame1, width=3)
self.time_limited_end_minute_label = Label(self.frame1, text="Min")
self.time_limited_end_minute_entry = Entry(self.frame1, width=3)
# self.entry.grid(row=11, column=1, sticky=W, padx=20)
self.check_time_limited = Checkbutton(self.frame1, text="", variable=self.var_time_limited, onvalue=1,
offvalue=0,
command=lambda
v=self.var_time_limited: Test_Einstellungen_GUI.show_entry_time_limited_start(
self, v))
self.check_time_limited.deselect()
self.check_time_limited.grid(row=11, column=1, sticky=W)
self.var_introduction = IntVar()
self.check_introduction = Checkbutton(self.frame1, text="", variable=self.var_introduction, onvalue=1,
offvalue=0,
command=lambda
v=self.var_introduction: Test_Einstellungen_GUI.show_introduction_textfield(
self, v))
self.check_introduction.deselect()
self.check_introduction.grid(row=15, column=1, sticky=W)
self.var_test_prop = IntVar()
self.check_test_prop = Checkbutton(self.frame1, text="", variable=self.var_test_prop, onvalue=1, offvalue=0)
self.check_test_prop.deselect()
self.check_test_prop.grid(row=16, column=1, sticky=W)
# self.var_test_password = IntVar()
# self.check_test_password = Checkbutton(self.frame1, text="", variable=self.var_test_password, onvalue=1, offvalue=0)
# self.check_test_password.deselect()
# self.check_test_password.grid(row=20, column=1, sticky=W)
self.var_specific_users = IntVar()
self.check_specific_users = Checkbutton(self.frame1, text="", variable=self.var_specific_users, onvalue=1,
offvalue=0)
self.check_specific_users.deselect()
self.check_specific_users.grid(row=21, column=1, sticky=W)
# self.var_fixed_users = IntVar()
# self.check_fixed_users = Checkbutton(self.frame1, text="", variable=self.var_fixed_users, onvalue=1, offvalue=0)
# self.check_fixed_users.deselect()
# self.check_fixed_users.grid(row=22, column=1, sticky=W)
# self.var_limit_test_runs = IntVar()
# self.check_limit_test_runs = Checkbutton(self.frame1, text="", variable=self.var_limit_test_runs, onvalue=1, offvalue=0)
# self.check_limit_test_runs.deselect()
# self.check_limit_test_runs.grid(row=22, column=1, sticky=W)
# self.var_time_betw_test_runs = IntVar()
# self.check_time_betw_test_runs = Checkbutton(self.frame1, text="", variable=self.var_time_betw_test_runs, onvalue=1, offvalue=0)
# self.check_time_betw_test_runs.deselect()
# self.check_time_betw_test_runs.grid(row=25, column=1, sticky=W)
self.var_processing_time = IntVar()
self.check_processing_time = Checkbutton(self.frame1, text="", variable=self.var_processing_time, onvalue=1,
offvalue=0)
self.check_processing_time.deselect()
self.check_processing_time.grid(row=27, column=1, sticky=W)
self.var_processing_time_reset = IntVar()
self.check_processing_time_reset = Checkbutton(self.frame1, text="", variable=self.var_processing_time_reset,
onvalue=1, offvalue=0)
self.check_processing_time_reset.deselect()
self.check_processing_time_reset.grid(row=29, column=1, sticky=W)
self.var_examview = IntVar()
self.check_examview = Checkbutton(self.frame1, text="", variable=self.var_examview, onvalue=1, offvalue=0)
self.check_examview.deselect()
self.check_examview.grid(row=30, column=1, sticky=W)
self.var_examview_test_title = IntVar()
self.check_examview_test_title = Checkbutton(self.frame1, text="", variable=self.var_examview_test_title,
onvalue=1, offvalue=0)
self.check_examview_test_title.deselect()
self.check_examview_test_title.grid(row=31, column=1, sticky=W)
self.var_examview_user_name = IntVar()
self.check_examview_user_name = Checkbutton(self.frame1, text="", variable=self.var_examview_user_name,
onvalue=1, offvalue=0)
self.check_examview_user_name.deselect()
self.check_examview_user_name.grid(row=32, column=1, sticky=W)
self.var_show_ilias_nr = IntVar()
self.check_show_ilias_nr = Checkbutton(self.frame1, text="", variable=self.var_show_ilias_nr, onvalue=1,
offvalue=0)
self.check_show_ilias_nr.deselect()
self.check_show_ilias_nr.grid(row=33, column=1, sticky=W)
self.var_autosave = IntVar()
self.check_autosave = Checkbutton(self.frame2, text="", variable=self.var_autosave, onvalue=1, offvalue=0,
command=lambda v=self.var_autosave: Test_Einstellungen_GUI.enable_autosave(self,
v))
self.check_autosave_interval_label = Label(self.frame2, text="Speicherintervall (in Sek.):")
self.check_autosave_interval_entry = Entry(self.frame2, width=10)
self.check_autosave.deselect()
self.check_autosave.grid(row=4, column=3, sticky=W)
self.var_mix_questions = IntVar()
self.check_mix_questions = Checkbutton(self.frame2, text="", variable=self.var_mix_questions, onvalue=1,
offvalue=0)
self.check_mix_questions.deselect()
self.check_mix_questions.grid(row=5, column=3, sticky=W)
self.var_show_solution_notes = IntVar()
self.check_show_solution_notes = Checkbutton(self.frame2, text="", variable=self.var_show_solution_notes,
onvalue=1, offvalue=0)
self.check_show_solution_notes.deselect()
self.check_show_solution_notes.grid(row=6, column=3, sticky=W)
self.var_direct_response = IntVar()
self.check_direct_response = Checkbutton(self.frame2, text="", variable=self.var_direct_response, onvalue=1,
offvalue=0)
self.check_direct_response.deselect()
self.check_direct_response.grid(row=7, column=3, sticky=W)
self.var_mandatory_questions = IntVar()
self.check_mandatory_questions = Checkbutton(self.frame2, text="", variable=self.var_mandatory_questions,
onvalue=1, offvalue=0)
self.check_mandatory_questions.deselect()
self.check_mandatory_questions.grid(row=12, column=3, sticky=W)
self.var_use_previous_solution = IntVar()
self.check_use_previous_solution = Checkbutton(self.frame2, text="", variable=self.var_use_previous_solution,
onvalue=1, offvalue=0)
self.check_use_previous_solution.deselect()
self.check_use_previous_solution.grid(row=14, column=3, sticky=W)
self.var_show_test_cancel = IntVar()
self.check_show_test_cancel = Checkbutton(self.frame2, text="", variable=self.var_show_test_cancel, onvalue=1,
offvalue=0)
self.check_show_test_cancel.deselect()
self.check_show_test_cancel.grid(row=15, column=3, sticky=W)
self.var_show_question_list_process_status = IntVar()
self.check_show_question_list_process_status = Checkbutton(self.frame2, text="",
variable=self.var_show_question_list_process_status,
onvalue=1, offvalue=0)
self.check_show_question_list_process_status.deselect()
self.check_show_question_list_process_status.grid(row=18, column=3, sticky=W)
self.var_question_mark = IntVar()
self.check_question_mark = Checkbutton(self.frame2, text="", variable=self.var_question_mark, onvalue=1,
offvalue=0)
self.check_question_mark.deselect()
self.check_question_mark.grid(row=19, column=3, sticky=W)
self.var_overview_answers = IntVar()
self.check_overview_answers = Checkbutton(self.frame2, text="", variable=self.var_overview_answers, onvalue=1,
offvalue=0)
self.check_overview_answers.grid(row=21, column=3, sticky=W)
self.var_show_end_comment = IntVar()
self.check_show_end_comment = Checkbutton(self.frame2, text="", variable=self.var_show_end_comment, onvalue=1,
offvalue=0,
command=lambda
v=self.var_show_end_comment: Test_Einstellungen_GUI.show_concluding_remarks(
self, v))
self.check_show_end_comment.deselect()
self.check_show_end_comment.grid(row=22, column=3, sticky=W)
self.var_forwarding = IntVar()
self.check_forwarding = Checkbutton(self.frame2, text="", variable=self.var_forwarding, onvalue=1, offvalue=0)
self.check_forwarding.deselect()
self.check_forwarding.grid(row=23, column=3, sticky=W)
self.var_notification = IntVar()
self.check_notification = Checkbutton(self.frame2, text="", variable=self.var_notification, onvalue=1,
offvalue=0)
self.check_notification.deselect()
self.check_notification.grid(row=24, column=3, sticky=W)
# --------------------------- RADIO BUTTONS ---------------------------------------
self.select_question = IntVar()
self.select_question.set(0)
self.select_question_radiobtn1 = Radiobutton(self.frame1, | |
# -*- coding: utf-8 -*-
# Python GUI code generated with wxFormBuilder (version Oct 26 2018)
# http://www.wxformbuilder.org/
import wx
class kb_test(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, id=wx.ID_ANY, title="Keyboard Test Utility", pos=wx.DefaultPosition,
size=wx.Size(710, 370), style=wx.CLOSE_BOX | wx.TAB_TRAVERSAL)
self.SetSizeHints(wx.DefaultSize, wx.DefaultSize)
# attach the key bind event to accelerator table (to use cmd+q keys to close app)
randomId = wx.NewIdRef(count=1)
self.Bind(wx.EVT_MENU, self.onkeycombo, id=randomId)
accel_tbl = wx.AcceleratorTable([(wx.ACCEL_CTRL, ord('q'), randomId)])
self.SetAcceleratorTable(accel_tbl)
bSizer1 = wx.BoxSizer(wx.VERTICAL)
self.m_panel1 = wx.Panel(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL)
bSizer2 = wx.BoxSizer(wx.VERTICAL)
bSizer3 = wx.BoxSizer(wx.HORIZONTAL)
self.m_staticText_empty1 = wx.StaticText(self.m_panel1, wx.ID_ANY, u" ", wx.DefaultPosition, wx.DefaultSize,
)
self.m_staticText = wx.StaticText(self.m_panel1, wx.ID_ANY, u"MacBook Pro / Air", wx.DefaultPosition,
wx.DefaultSize, 0)
self.m_staticText.SetFont(
wx.Font(20, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, "Arial"))
self.m_staticText_empty2 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"", wx.DefaultPosition, wx.DefaultSize,
)
bSizer3.Add(self.m_staticText_empty1, 0, wx.ALL, 5)
bSizer3.Add(self.m_staticText, 0, wx.ALL, 5)
# bSizer3.Add(self.m_staticText_empty2, 0, wx.ALL, 5)
bSizer2.Add(bSizer3, 0, wx.ALL, 10)
bSizer4 = wx.BoxSizer(wx.VERTICAL)
self.m_staticline1 = wx.StaticLine(self.m_panel1, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize,
wx.LI_HORIZONTAL)
self.m_staticline1.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNSHADOW))
bSizer4.Add(self.m_staticline1, 0, wx.EXPAND | wx.ALL, 0)
# self.m_staticText_empty = wx.StaticText(self.m_panel1, wx.ID_ANY, u"", wx.DefaultPosition, wx.DefaultSize,
# )
# bSizer4.Add(self.m_staticText_empty, 0, wx.ALL, 5)
bSizer5 = wx.BoxSizer(wx.HORIZONTAL)
self.m_staticText_esc = wx.StaticText(self.m_panel1, wx.ID_ANY, u"esc", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_esc.Wrap(-1)
self.m_staticText_esc.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_esc.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_esc, 0, wx.ALL, 5)
self.m_staticText_f1 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F1", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f1.Wrap(-1)
self.m_staticText_f1.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f1.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_f1, 0, wx.ALL, 5)
self.m_staticText_f2 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F2", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f2.Wrap(-1)
self.m_staticText_f2.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f2.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_f2, 0, wx.ALL, 5)
self.m_staticText_f3 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F3", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f3.Wrap(-1)
self.m_staticText_f3.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f3.SetBackgroundColour(wx.Colour(76, 75, 77))
bSizer5.Add(self.m_staticText_f3, 0, wx.ALL, 5)
self.m_staticText_f4 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F4", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f4.Wrap(-1)
self.m_staticText_f4.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f4.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_f4, 0, wx.ALL, 5)
self.m_staticText_f5 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F5", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f5.Wrap(-1)
self.m_staticText_f5.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f5.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_f5, 0, wx.ALL, 5)
self.m_staticText_f6 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F6", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f6.Wrap(-1)
self.m_staticText_f6.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f6.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_f6, 0, wx.ALL, 5)
self.m_staticText_f7 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F7", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f7.Wrap(-1)
self.m_staticText_f7.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f7.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_f7, 0, wx.ALL, 5)
self.m_staticText_f8 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F8", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f8.Wrap(-1)
self.m_staticText_f8.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f8.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_f8, 0, wx.ALL, 5)
self.m_staticText_f9 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F9", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f9.Wrap(-1)
self.m_staticText_f9.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f9.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_f9, 0, wx.ALL, 5)
self.m_staticText_f10 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F10", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f10.Wrap(-1)
self.m_staticText_f10.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f10.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_f10, 0, wx.ALL, 5)
self.m_staticText_f11 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F11", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f11.Wrap(-1)
self.m_staticText_f11.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f11.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_f11, 0, wx.ALL, 5)
self.m_staticText_f12 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F12", wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f12.Wrap(-1)
self.m_staticText_f12.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f12.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_f12, 0, wx.ALL, 5)
self.m_staticText_eject = wx.StaticText(self.m_panel1, wx.ID_ANY, u'\u23CF', wx.DefaultPosition, wx.Size(40, 20),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_eject.Wrap(-1)
self.m_staticText_eject.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_eject.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer5.Add(self.m_staticText_eject, 0, wx.ALL, 5)
bSizer4.Add(bSizer5, 0, wx.EXPAND, 5)
bSizer6 = wx.BoxSizer(wx.HORIZONTAL)
self.m_staticText_tilde = wx.StaticText(self.m_panel1, wx.ID_ANY, u"~\n`", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_tilde.Wrap(-1)
self.m_staticText_tilde.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_tilde.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_tilde, 0, wx.ALL, 5)
self.m_staticText_1 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"!\n1", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_1.Wrap(-1)
self.m_staticText_1.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_1.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_1, 0, wx.ALL, 5)
self.m_staticText_2 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"@\n2", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_2.Wrap(-1)
self.m_staticText_2.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_2.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_2, 0, wx.ALL, 5)
self.m_staticText_3 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"#\n3", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_3.Wrap(-1)
self.m_staticText_3.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_3.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_3, 0, wx.ALL, 5)
self.m_staticText_4 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"$\n4", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_4.Wrap(-1)
self.m_staticText_4.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_4.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_4, 0, wx.ALL, 5)
self.m_staticText_5 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"%\n5", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_5.Wrap(-1)
self.m_staticText_5.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_5.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_5, 0, wx.ALL, 5)
self.m_staticText_6 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"^\n6", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_6.Wrap(-1)
self.m_staticText_6.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_6.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_6, 0, wx.ALL, 5)
self.m_staticText_7 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"&&\n7", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_7.Wrap(-1)
self.m_staticText_7.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_7.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_7, 0, wx.ALL, 5)
self.m_staticText_8 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"*\n8", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_8.Wrap(-1)
self.m_staticText_8.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_8.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_8, 0, wx.ALL, 5)
self.m_staticText_9 = wx.StaticText(self.m_panel1, wx.ID_ANY, u"(\n9", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_9.Wrap(-1)
self.m_staticText_9.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_9.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_9, 0, wx.ALL, 5)
self.m_staticText_0 = wx.StaticText(self.m_panel1, wx.ID_ANY, u")\n0", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_0.Wrap(-1)
self.m_staticText_0.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_0.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_0, 0, wx.ALL, 5)
self.m_staticText_minus = wx.StaticText(self.m_panel1, wx.ID_ANY, u"_\n-", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_minus.Wrap(-1)
self.m_staticText_minus.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_minus.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_minus, 0, wx.ALL, 5)
self.m_staticText_plus = wx.StaticText(self.m_panel1, wx.ID_ANY, u"+\n=", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_plus.Wrap(-1)
self.m_staticText_plus.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_plus.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_plus, 0, wx.ALL, 5)
self.m_staticText_delete = wx.StaticText(self.m_panel1, wx.ID_ANY, u"delete", wx.DefaultPosition,
wx.Size(67, 38),
wx.ALIGN_RIGHT)
self.m_staticText_delete.Wrap(-1)
self.m_staticText_delete.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_delete.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer6.Add(self.m_staticText_delete, 0, wx.ALL, 5)
bSizer4.Add(bSizer6, 0, wx.EXPAND, 5)
bSizer7 = wx.BoxSizer(wx.HORIZONTAL)
self.m_staticText_tab = wx.StaticText(self.m_panel1, wx.ID_ANY, u"tab", wx.DefaultPosition, wx.Size(67, 38), 0)
self.m_staticText_tab.Wrap(-1)
self.m_staticText_tab.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_tab.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_tab, 0, wx.ALL, 5)
self.m_staticText_q = wx.StaticText(self.m_panel1, wx.ID_ANY, u"Q", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_q.Wrap(-1)
self.m_staticText_q.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_q.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_q, 0, wx.ALL, 5)
self.m_staticText_w = wx.StaticText(self.m_panel1, wx.ID_ANY, u"W", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_w.Wrap(-1)
self.m_staticText_w.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_w.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_w, 0, wx.ALL, 5)
self.m_staticText_e = wx.StaticText(self.m_panel1, wx.ID_ANY, u"E", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_e.Wrap(-1)
self.m_staticText_e.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_e.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_e, 0, wx.ALL, 5)
self.m_staticText_r = wx.StaticText(self.m_panel1, wx.ID_ANY, u"R", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_r.Wrap(-1)
self.m_staticText_r.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_r.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_r, 0, wx.ALL, 5)
self.m_staticText_t = wx.StaticText(self.m_panel1, wx.ID_ANY, u"T", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_t.Wrap(-1)
self.m_staticText_t.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_t.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_t, 0, wx.ALL, 5)
self.m_staticText_y = wx.StaticText(self.m_panel1, wx.ID_ANY, u"Y", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_y.Wrap(-1)
self.m_staticText_y.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_y.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_y, 0, wx.ALL, 5)
self.m_staticText_u = wx.StaticText(self.m_panel1, wx.ID_ANY, u"U", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_u.Wrap(-1)
self.m_staticText_u.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_u.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_u, 0, wx.ALL, 5)
self.m_staticText_i = wx.StaticText(self.m_panel1, wx.ID_ANY, u"I", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_i.Wrap(-1)
self.m_staticText_i.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_i.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_i, 0, wx.ALL, 5)
self.m_staticText_o = wx.StaticText(self.m_panel1, wx.ID_ANY, u"O", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_o.Wrap(-1)
self.m_staticText_o.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_o.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_o, 0, wx.ALL, 5)
self.m_staticText_p = wx.StaticText(self.m_panel1, wx.ID_ANY, u"P", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_p.Wrap(-1)
self.m_staticText_p.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_p.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_p, 0, wx.ALL, 5)
self.m_staticText_curly_left = wx.StaticText(self.m_panel1, wx.ID_ANY, u"{\n[", wx.DefaultPosition,
wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_curly_left.Wrap(-1)
self.m_staticText_curly_left.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_curly_left.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_curly_left, 0, wx.ALL, 5)
self.m_staticText_curly_right = wx.StaticText(self.m_panel1, wx.ID_ANY, u"}\n]", wx.DefaultPosition,
wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_curly_right.Wrap(-1)
self.m_staticText_curly_right.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_curly_right.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_curly_right, 0, wx.ALL, 5)
self.m_staticText_pipe = wx.StaticText(self.m_panel1, wx.ID_ANY, u"|\n\\", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_pipe.Wrap(-1)
self.m_staticText_pipe.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_pipe.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer7.Add(self.m_staticText_pipe, 0, wx.ALL, 5)
bSizer4.Add(bSizer7, 0, wx.EXPAND, 5)
bSizer8 = wx.BoxSizer(wx.HORIZONTAL)
self.m_staticText_caps = wx.StaticText(self.m_panel1, wx.ID_ANY, u"caps lock", wx.DefaultPosition,
wx.Size(77, 38), wx.ALIGN_LEFT)
self.m_staticText_caps.Wrap(-1)
self.m_staticText_caps.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_caps.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_caps, 0, wx.ALL, 5)
self.m_staticText_a = wx.StaticText(self.m_panel1, wx.ID_ANY, u"A", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_a.Wrap(-1)
self.m_staticText_a.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_a.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_a, 0, wx.ALL, 5)
self.m_staticText_s = wx.StaticText(self.m_panel1, wx.ID_ANY, u"S", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_s.Wrap(-1)
self.m_staticText_s.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_s.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_s, 0, wx.ALL, 5)
self.m_staticText_d = wx.StaticText(self.m_panel1, wx.ID_ANY, u"D", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_d.Wrap(-1)
self.m_staticText_d.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_d.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_d, 0, wx.ALL, 5)
self.m_staticText_f = wx.StaticText(self.m_panel1, wx.ID_ANY, u"F", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_f.Wrap(-1)
self.m_staticText_f.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_f.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_f, 0, wx.ALL, 5)
self.m_staticText_g = wx.StaticText(self.m_panel1, wx.ID_ANY, u"G", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_g.Wrap(-1)
self.m_staticText_g.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_g.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_g, 0, wx.ALL, 5)
self.m_staticText_h = wx.StaticText(self.m_panel1, wx.ID_ANY, u"H", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_h.Wrap(-1)
self.m_staticText_h.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_h.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_h, 0, wx.ALL, 5)
self.m_staticText_j = wx.StaticText(self.m_panel1, wx.ID_ANY, u"J", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_j.Wrap(-1)
self.m_staticText_j.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_j.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_j, 0, wx.ALL, 5)
self.m_staticText_k = wx.StaticText(self.m_panel1, wx.ID_ANY, u"K", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_k.Wrap(-1)
self.m_staticText_k.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_k.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_k, 0, wx.ALL, 5)
self.m_staticText_l = wx.StaticText(self.m_panel1, wx.ID_ANY, u"L", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_l.Wrap(-1)
self.m_staticText_l.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_l.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_l, 0, wx.ALL, 5)
self.m_staticText_colon = wx.StaticText(self.m_panel1, wx.ID_ANY, u":\n;", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_colon.Wrap(-1)
self.m_staticText_colon.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_colon.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_colon, 0, wx.ALL, 5)
self.m_staticText_quote = wx.StaticText(self.m_panel1, wx.ID_ANY, u"\"\n'", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_quote.Wrap(-1)
self.m_staticText_quote.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_quote.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_quote, 0, wx.ALL, 5)
self.m_staticText_enter = wx.StaticText(self.m_panel1, wx.ID_ANY, u"enter\nreturn", wx.DefaultPosition,
wx.Size(77, 38), wx.ALIGN_RIGHT)
self.m_staticText_enter.Wrap(-1)
self.m_staticText_enter.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_enter.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer8.Add(self.m_staticText_enter, 0, wx.ALL, 5)
bSizer4.Add(bSizer8, 0, wx.EXPAND, 5)
bSizer9 = wx.BoxSizer(wx.HORIZONTAL)
self.m_staticText_left_shift = wx.StaticText(self.m_panel1, wx.ID_ANY, u"shift", wx.DefaultPosition,
wx.Size(101, 38),
0)
self.m_staticText_left_shift.Wrap(-1)
self.m_staticText_left_shift.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_left_shift.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer9.Add(self.m_staticText_left_shift, 0, wx.ALL, 5)
self.m_staticText_z = wx.StaticText(self.m_panel1, wx.ID_ANY, u"Z", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_z.Wrap(-1)
self.m_staticText_z.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_z.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer9.Add(self.m_staticText_z, 0, wx.ALL, 5)
self.m_staticText_x = wx.StaticText(self.m_panel1, wx.ID_ANY, u"X", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_x.Wrap(-1)
self.m_staticText_x.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_x.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer9.Add(self.m_staticText_x, 0, wx.ALL, 5)
self.m_staticText_c = wx.StaticText(self.m_panel1, wx.ID_ANY, u"C", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_c.Wrap(-1)
self.m_staticText_c.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_c.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer9.Add(self.m_staticText_c, 0, wx.ALL, 5)
self.m_staticText_v = wx.StaticText(self.m_panel1, wx.ID_ANY, u"V", wx.DefaultPosition, wx.Size(38, 38),
wx.ALIGN_CENTER_HORIZONTAL)
self.m_staticText_v.Wrap(-1)
self.m_staticText_v.SetForegroundColour(wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
self.m_staticText_v.SetBackgroundColour(wx.Colour(76, 76, 76))
bSizer9.Add(self.m_staticText_v, 0, wx.ALL, 5)
self.m_staticText_b = wx.StaticText(self.m_panel1, wx.ID_ANY, u"B", | |
#!/usr/bin/env python
"""
Simple desktop dialogue box support for Python.
Copyright (C) 2007, 2009 <NAME> <<EMAIL>>
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 3 of the License, or (at your option) any
later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU Lesser General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
--------
Opening Dialogue Boxes (Dialogs)
--------------------------------
To open a dialogue box (dialog) in the current desktop environment, relying on
the automatic detection of that environment, use the appropriate dialogue box
class:
question = desktop.dialog.Question("Are you sure?")
result = question.open()
To override the detected desktop, specify the desktop parameter to the open
function as follows:
question.open("KDE") # Insists on KDE
question.open("GNOME") # Insists on GNOME
question.open("MATE") # Insists on MATE
The dialogue box options are documented in each class's docstring.
Available dialogue box classes are listed in the desktop.dialog.available
attribute.
Supported desktop environments are listed in the desktop.dialog.supported
attribute.
"""
from desktop import use_desktop, _run, _readfrom, _status
class _wrapper:
def __init__(self, handler):
self.handler = handler
class _readvalue(_wrapper):
def __call__(self, cmd, shell):
return self.handler(cmd, shell).strip()
class _readinput(_wrapper):
def __call__(self, cmd, shell):
return self.handler(cmd, shell)[:-1]
class _readvalues_kdialog(_wrapper):
def __call__(self, cmd, shell):
result = self.handler(cmd, shell).strip().strip('"')
if result:
return result.split('" "')
else:
return []
class _readvalues_zenity(_wrapper):
def __call__(self, cmd, shell):
result = self.handler(cmd, shell).strip()
if result:
return result.split("|")
else:
return []
class _readvalues_Xdialog(_wrapper):
def __call__(self, cmd, shell):
result = self.handler(cmd, shell).strip()
if result:
return result.split("/")
else:
return []
# Dialogue parameter classes.
class String:
"A generic parameter."
def __init__(self, name):
self.name = name
def convert(self, value, program):
return [value or ""]
class Strings(String):
"Multiple string parameters."
def convert(self, value, program):
return value or []
class StringPairs(String):
"Multiple string parameters duplicated to make identifiers."
def convert(self, value, program):
l = []
for v in value:
l.append(v)
l.append(v)
return l
class StringKeyword:
"A keyword parameter."
def __init__(self, keyword, name):
self.keyword = keyword
self.name = name
def convert(self, value, program):
return [self.keyword + "=" + (value or "")]
class StringKeywords:
"Multiple keyword parameters."
def __init__(self, keyword, name):
self.keyword = keyword
self.name = name
def convert(self, value, program):
l = []
for v in value or []:
l.append(self.keyword + "=" + v)
return l
class Integer(String):
"An integer parameter."
defaults = {
"width" : 40,
"height" : 15,
"list_height" : 10
}
scale = 8
def __init__(self, name, pixels=0):
String.__init__(self, name)
if pixels:
self.factor = self.scale
else:
self.factor = 1
def convert(self, value, program):
if value is None:
value = self.defaults[self.name]
return [str(int(value) * self.factor)]
class IntegerKeyword(Integer):
"An integer keyword parameter."
def __init__(self, keyword, name, pixels=0):
Integer.__init__(self, name, pixels)
self.keyword = keyword
def convert(self, value, program):
if value is None:
value = self.defaults[self.name]
return [self.keyword + "=" + str(int(value) * self.factor)]
class Boolean(String):
"A boolean parameter."
values = {
"kdialog" : ["off", "on"],
"zenity" : ["FALSE", "TRUE"],
"Xdialog" : ["off", "on"]
}
def convert(self, value, program):
values = self.values[program]
if value:
return [values[1]]
else:
return [values[0]]
class MenuItemList(String):
"A menu item list parameter."
def convert(self, value, program):
l = []
for v in value:
l.append(v.value)
l.append(v.text)
return l
class ListItemList(String):
"A radiolist/checklist item list parameter."
def __init__(self, name, status_first=0):
String.__init__(self, name)
self.status_first = status_first
def convert(self, value, program):
l = []
for v in value:
boolean = Boolean(None)
status = boolean.convert(v.status, program)
if self.status_first:
l += status
l.append(v.value)
l.append(v.text)
if not self.status_first:
l += status
return l
# Dialogue argument values.
class MenuItem:
"A menu item which can also be used with radiolists and checklists."
def __init__(self, value, text, status=0):
self.value = value
self.text = text
self.status = status
# Dialogue classes.
class Dialogue:
commands = {
"KDE" : "kdialog",
"GNOME" : "zenity",
"MATE" : "zenity",
"XFCE" : "zenity", # NOTE: Based on observations with Xubuntu.
"X11" : "Xdialog"
}
def open(self, desktop=None):
"""
Open a dialogue box (dialog) using a program appropriate to the desktop
environment in use.
If the optional 'desktop' parameter is specified then attempt to use
that particular desktop environment's mechanisms to open the dialog
instead of guessing or detecting which environment is being used.
Suggested values for 'desktop' are "standard", "KDE", "GNOME",
"MATE", "Mac OS X", "Windows".
The result of the dialogue interaction may be a string indicating user
input (for Input, Password, Menu, Pulldown), a list of strings
indicating selections of one or more items (for RadioList, CheckList),
or a value indicating true or false (for Question, Warning, Message,
Error).
Where a string value may be expected but no choice is made, an empty
string may be returned. Similarly, where a list of values is expected
but no choice is made, an empty list may be returned.
"""
# Decide on the desktop environment in use.
desktop_in_use = use_desktop(desktop)
# Get the program.
try:
program = self.commands[desktop_in_use]
except KeyError:
raise OSError("Desktop '%s' not supported (no known dialogue box command could be suggested)" % desktop_in_use)
# The handler is one of the functions communicating with the subprocess.
# Some handlers return boolean values, others strings.
handler, options = self.info[program]
cmd = [program]
for option in options:
if isinstance(option, str):
cmd.append(option)
else:
value = getattr(self, option.name, None)
cmd += option.convert(value, program)
return handler(cmd, 0)
class Simple(Dialogue):
def __init__(self, text, width=None, height=None):
self.text = text
self.width = width
self.height = height
class Question(Simple):
"""
A dialogue asking a question and showing response buttons.
Options: text, width (in characters), height (in characters)
Response: a boolean value indicating an affirmative response (true) or a
negative response
"""
name = "question"
info = {
"kdialog" : (_status, ["--yesno", String("text")]),
"zenity" : (_status, ["--question", StringKeyword("--text", "text")]),
"Xdialog" : (_status, ["--stdout", "--yesno", String("text"), Integer("height"), Integer("width")]),
}
class Warning(Simple):
"""
A dialogue asking a question and showing response buttons.
Options: text, width (in characters), height (in characters)
Response: a boolean value indicating an affirmative response (true) or a
negative response
"""
name = "warning"
info = {
"kdialog" : (_status, ["--warningyesno", String("text")]),
"zenity" : (_status, ["--warning", StringKeyword("--text", "text")]),
"Xdialog" : (_status, ["--stdout", "--yesno", String("text"), Integer("height"), Integer("width")]),
}
class Message(Simple):
"""
A message dialogue.
Options: text, width (in characters), height (in characters)
Response: a boolean value indicating an affirmative response (true) or a
negative response
"""
name = "message"
info = {
"kdialog" : (_status, ["--msgbox", String("text")]),
"zenity" : (_status, ["--info", StringKeyword("--text", "text")]),
"Xdialog" : (_status, ["--stdout", "--msgbox", String("text"), Integer("height"), Integer("width")]),
}
class Error(Simple):
"""
An error dialogue.
Options: text, width (in characters), height (in characters)
Response: a boolean value indicating an affirmative response (true) or a
negative response
"""
name = "error"
info = {
"kdialog" : (_status, ["--error", String("text")]),
"zenity" : (_status, ["--error", StringKeyword("--text", "text")]),
"Xdialog" : (_status, ["--stdout", "--msgbox", String("text"), Integer("height"), Integer("width")]),
}
class Menu(Simple):
"""
A menu of options, one of which being selectable.
Options: text, width (in characters), height (in characters),
list_height (in items), items (MenuItem objects)
Response: a value corresponding to the chosen item
"""
name = "menu"
info = {
"kdialog" : (_readvalue(_readfrom), ["--menu", String("text"), MenuItemList("items")]),
"zenity" : (_readvalue(_readfrom), ["--list", StringKeyword("--text", "text"), StringKeywords("--column", "titles"),
MenuItemList("items")]
),
"Xdialog" : (_readvalue(_readfrom), ["--stdout", "--menubox",
String("text"), Integer("height"), Integer("width"), Integer("list_height"), MenuItemList("items")]
),
}
item = MenuItem
number_of_titles = 2
def __init__(self, text, titles, items=None, width=None, height=None, list_height=None):
"""
Initialise a menu with the given heading 'text', column 'titles', and
optional 'items' (which may be added later), 'width' (in characters),
'height' (in characters) and 'list_height' (in items).
"""
Simple.__init__(self, text, width, height)
self.titles = ([""] * self.number_of_titles + titles)[-self.number_of_titles:]
self.items = items or []
self.list_height = list_height
def add(self, *args, **kw):
"""
Add an item, passing the given arguments to the appropriate item class.
"""
self.items.append(self.item(*args, **kw))
class RadioList(Menu):
"""
A list of radio buttons, one of which being selectable.
Options: text, width (in characters), height (in characters),
list_height (in items), items (MenuItem objects), titles
Response: a list of values corresponding to chosen items | |
... 'L': [1, 1, 2, 2]
... })
... }
... )
>>> repartitioned_mp = mp.partition_on(['P', 'L'])
>>> assert [mp["label"] for mp in repartitioned_mp.metapartitions] == [
... "P=1/L=1/partition_label",
... "P=1/L=2/partition_label",
... "P=2/L=1/partition_label",
... "P=2/L=2/partition_label"
... ]
Parameters
----------
partition_on: list or str
"""
if partition_on == self.partition_keys:
return self
for partition_column in partition_on:
if partition_column in self.indices:
raise ValueError(
"Trying to `partition_on` on a column with an explicit index!"
)
new_mp = self.as_sentinel().copy(partition_keys=partition_on)
if isinstance(partition_on, str):
partition_on = [partition_on]
partition_on = self._ensure_compatible_partitioning(partition_on)
new_data = self._partition_data(partition_on)
for label, data_dct in new_data.items():
tmp_mp = MetaPartition(
label=label,
files=self.files,
data=data_dct,
dataset_metadata=self.dataset_metadata,
metadata_version=self.metadata_version,
indices={},
table_meta={
table: normalize_column_order(schema, partition_on).with_origin(
"{}/{}".format(table, label)
)
for table, schema in self.table_meta.items()
},
partition_keys=partition_on,
)
new_mp = new_mp.add_metapartition(tmp_mp)
if self.indices:
new_mp = new_mp.build_indices(columns=self.indices.keys())
return new_mp
def _ensure_compatible_partitioning(self, partition_on):
if (
not self.partition_keys
or self.partition_keys
and (len(partition_on) >= len(self.partition_keys))
and (self.partition_keys == partition_on[: len(self.partition_keys)])
):
return partition_on[len(self.partition_keys) :]
else:
raise ValueError(
"Incompatible partitioning encountered. `partition_on` needs to include the already "
"existing partition keys and must preserve their order.\n"
"Current partition keys: `{}`\n"
"Partition on called with: `{}`".format(
self.partition_keys, partition_on
)
)
def _partition_data(self, partition_on):
existing_indices, base_label = decode_key("uuid/table/{}".format(self.label))[
2:
]
dct = dict()
empty_tables = []
for table, df in self.data.items():
# Implementation from pyarrow
# See https://github.com/apache/arrow/blob/b33dfd9c6bd800308bb1619b237dbf24dea159be/python/pyarrow/parquet.py#L1030 # noqa: E501
# column sanity checks
data_cols = set(df.columns).difference(partition_on)
missing_po_cols = set(partition_on).difference(df.columns)
if missing_po_cols:
raise ValueError(
"Partition column(s) missing: {}".format(
", ".join(sorted(missing_po_cols))
)
)
if len(data_cols) == 0:
raise ValueError("No data left to save outside partition columns")
# To be aligned with open source tooling we drop the index columns and recreate
# them upon reading as it is done by fastparquet and pyarrow
partition_keys = [df[col] for col in partition_on]
# The handling of empty dfs is not part of the arrow implementation
if df.empty:
empty_tables.append((table, df))
data_df = df.drop(partition_on, axis="columns")
for value, group in data_df.groupby(by=partition_keys, sort=False):
partitioning_info = []
if pd.api.types.is_scalar(value):
value = [value]
if existing_indices:
partitioning_info.extend(quote_indices(existing_indices))
partitioning_info.extend(quote_indices(zip(partition_on, value)))
partitioning_info.append(base_label)
new_label = "/".join(partitioning_info)
if new_label not in dct:
dct[new_label] = {}
dct[new_label][table] = group
for label, table_dct in dct.items():
for empty_table, df in empty_tables:
if empty_table not in table_dct:
table_dct[empty_table] = df.drop(labels=partition_on, axis=1)
return dct
@staticmethod
def merge_indices(metapartitions):
list_of_indices = []
for mp in metapartitions:
for sub_mp in mp:
if sub_mp.indices:
list_of_indices.append(sub_mp.indices)
return merge_indices_algo(list_of_indices)
@staticmethod
def _merge_labels(metapartitions, label_merger=None):
# Use the shortest of available labels since this has to be the partition
# label prefix
new_label = None
# FIXME: This is probably not compatible with >= v3
if label_merger is None:
for mp in metapartitions:
label = mp.label
if new_label is None or len(label) < len(new_label):
new_label = label
continue
else:
new_label = label_merger([mp.label for mp in metapartitions])
return new_label
@staticmethod
def _merge_metadata(metapartitions, metadata_merger=None):
if metadata_merger is None:
metadata_merger = combine_metadata
new_ds_meta = metadata_merger([mp.dataset_metadata for mp in metapartitions])
return new_ds_meta
@staticmethod
def merge_metapartitions(metapartitions, label_merger=None, metadata_merger=None):
LOGGER.debug("Merging metapartitions")
data = defaultdict(list)
new_metadata_version = -1
logical_conjunction = None
for mp in metapartitions:
new_metadata_version = max(new_metadata_version, mp.metadata_version)
for label, df in mp.data.items():
data[label].append(df)
if mp.logical_conjunction or logical_conjunction:
if logical_conjunction != mp.logical_conjunction:
raise TypeError(
"Can only merge metapartitions belonging to the same logical partition."
)
else:
logical_conjunction = mp.logical_conjunction
new_data = {}
for label in data:
if len(data[label]) == 1:
new_data[label] = data[label][0]
else:
for ix, idf in enumerate(data[label]):
new_label = "{}_{}".format(label, ix)
new_data[new_label] = idf
new_label = MetaPartition._merge_labels(metapartitions, label_merger)
new_ds_meta = MetaPartition._merge_metadata(metapartitions, metadata_merger)
new_mp = MetaPartition(
label=new_label,
data=new_data,
dataset_metadata=new_ds_meta,
metadata_version=new_metadata_version,
logical_conjunction=logical_conjunction,
)
return new_mp
@staticmethod
def concat_metapartitions(metapartitions, label_merger=None, metadata_merger=None):
LOGGER.debug("Concatenating metapartitions")
data = defaultdict(list)
schema = defaultdict(list)
new_metadata_version = -1
for mp in metapartitions:
new_metadata_version = max(new_metadata_version, mp.metadata_version)
for table in mp.data:
data[table].append(mp.data[table])
schema[table].append(mp.table_meta[table])
# Don't care about the partition_keys. If we try to merge
# MetaPartitions without alignment the schemas won't match.
partition_keys = mp.partition_keys
new_data = {}
new_schema = {}
for table in data:
if len(data[table]) == 1:
new_data[table] = data[table][0]
else:
new_data[table] = pd.concat(data[table])
new_schema[table] = validate_compatible(schema[table])
new_label = MetaPartition._merge_labels(metapartitions, label_merger)
new_ds_meta = MetaPartition._merge_metadata(metapartitions, metadata_merger)
new_mp = MetaPartition(
label=new_label,
data=new_data,
dataset_metadata=new_ds_meta,
metadata_version=new_metadata_version,
table_meta=new_schema,
partition_keys=partition_keys,
)
return new_mp
@_apply_to_list
def delete_from_store(self, dataset_uuid, store):
# Delete data first
for file_key in self.files.values():
store.delete(file_key)
return self.copy(files={}, data={}, metadata={})
def _unique_label(label_list):
label = os.path.commonprefix(label_list)
if len(label) == 0:
label = "_".join(label_list)
while len(label) > 0 and not label[-1].isalnum():
label = label[:-1]
return label
def partition_labels_from_mps(mps):
"""
Get a list of partition labels, flattening any nested meta partitions in the input and ignoring sentinels.
Parameters
----------
mps: List[MetaPartition]
Returns
-------
partition_labels: List[str]
"""
partition_labels = []
for mp in mps:
if len(mp) > 1:
for nested_mp in mp:
if not nested_mp.is_sentinel:
partition_labels.append(nested_mp.label)
else:
if not mp.is_sentinel:
partition_labels.append(mp.label)
return partition_labels
def parse_input_to_metapartition(
obj, metadata_version=None, expected_secondary_indices=False
) -> MetaPartition:
"""
Parses given user input and returns a MetaPartition
The format specification supports multiple input modes as following:
1. Mode - Dictionary with partition information
In this case, a dictionary is supplied where the keys describe the partition.
* **label** - (optional) Unique partition label. If None is given, a UUID \
is generated using :func:`kartothek.core.uuid.gen_uuid`.
* **data** - A dict or list of tuples. The keys represent the table name \
and the values are the actual payload data as a pandas.DataFrame.
* **indices** - Deprecated, see the keyword argument `secondary_indices` to create indices.
A dictionary to describe the dataset indices. All \
partition level indices are finally merged using \
:func:`kartothek.io_components.metapartition.MetaPartition.merge_indices` \
into a single dataset index
Examples::
# A partition with explicit label, no metadata, one table and index information
input_obj = {
'label': 'partition_label',
'data': [('table', pd.DataFrame())],
'indices': {
"column": {
value: ['partition_label']
}
}
}
# If no label is given, a UUID will be generated using :func:`kartothek.core.uuid.gen_uuid`
simple_input = {
'data': [('table', pd.DataFrame())],
}
2. Mode - `pandas.DataFrame`
If only a DataFrame is provided, a UUID is generated and the dataframe is stored
for the table name :data:`SINGLE_TABLE`
3. Mode - :class:`~kartothek.io_components.metapartition.MetaPartition`
If a MetaPartition is passed directly, it is simply passed through.
Nested MetaPartitions:
The input may also be provided as a list to ease batch processing. The returned MetaPartition
will be nested and each list element represents a single physical partition. For details on
nested MetaPartitions, see :class:`~kartothek.io_components.metapartition.MetaPartition`
Parameters
----------
obj : Union[Dict, pd.DataFrame, kartothek.io_components.metapartition.MetaPartition]
metadata_version : int, optional
The kartothek dataset specification version
expected_secondary_indices : Optional[Union[Iterable[str], Literal[False]]]
Iterable of strings containing expected columns on which indices are created. An empty iterable indicates no
indices are expected.
The default is `False`, which, indicates no checking will be done (`None` behaves the same way).
This is only used in mode "Dictionary with partition information".
Raises
------
ValueError
In case the given input is not understood
Returns
-------
MetaPartition
"""
if obj is None:
obj = []
if isinstance(obj, list):
if len(obj) == 0:
return MetaPartition(label=None, metadata_version=metadata_version)
first_element = obj[0]
mp = parse_input_to_metapartition(
obj=first_element,
metadata_version=metadata_version,
expected_secondary_indices=expected_secondary_indices,
)
for mp_in in obj[1:]:
mp = mp.add_metapartition(
parse_input_to_metapartition(
obj=mp_in,
metadata_version=metadata_version,
expected_secondary_indices=expected_secondary_indices,
)
)
elif isinstance(obj, dict):
if isinstance(obj["data"], list):
data = dict(obj["data"])
else:
data = obj["data"]
indices = obj.get("indices", {})
if indices:
warnings.warn(
"The explicit input of indices using the `indices` key is deprecated."
'Use the `secondary_indices` keyword argument of "write" and "update" functions instead.',
DeprecationWarning,
)
if expected_secondary_indices not in (False, None):
# Validate explicit input of indices
_ensure_valid_indices(
secondary_indices=expected_secondary_indices, mp_indices=indices
)
mp = MetaPartition(
# TODO: Deterministic hash for the input?
label=obj.get("label", gen_uuid()),
data=data,
indices=indices,
metadata_version=metadata_version,
)
elif isinstance(obj, pd.DataFrame):
mp = MetaPartition(
label=gen_uuid(),
data={SINGLE_TABLE: obj},
metadata_version=metadata_version,
)
elif isinstance(obj, MetaPartition):
return obj
else:
raise ValueError("Unexpected type: {}".format(type(obj)))
return mp
def _ensure_valid_indices(secondary_indices, mp_indices):
if not secondary_indices:
if mp_indices:
raise ValueError(
"Incorrect indices provided for dataset.\n"
f"Expected index columns: {secondary_indices}"
f"Provided index: {mp_indices}"
)
else:
secondary_indices = set(secondary_indices)
# If the dataset has `secondary_indices` defined, then these indices will be build later so there is no need to
# ensure that they are also defined here (on a | |
axis=2)[:config.num_encoding_dim_as_attention_weight]
for logit in list_of_logits:
# print(logit.get_shape().as_list())
logit_tmp = tf.expand_dims(logit, axis=2)
# print(logit_tmp.get_shape().as_list())
wgt_tmp = exp_mask(logit_tmp, mask)
# print(wgt_tmp.get_shape().as_list())
weighted_sum = tf.reduce_sum(tf.nn.softmax(wgt_tmp, dim=1) * matrix, axis=1)
features.append(weighted_sum)
features = [tf.expand_dims(f, axis=1) for f in features]
return tf.concat(features, axis=1)
if len(features) == 0:
return matrix
else:
features = [tf.expand_dims(f, axis=1) for f in features]
ft = tf.concat(features, axis=1)
return tf.concat([ft, matrix], axis=1)
def multi_perspective_merge(self, config, lhs, rhs, scope = None):
with tf.variable_scope(scope or "multi_perspective_merge"):
features = []
if config.MPM_max_pool:
l = tf.reduce_max(lhs, axis=1)
r = tf.reduce_max(rhs, axis=1)
features.append(self.multi_perspective_generation(config, l, r, 16, "MPM_max_pool"))
if len(features) == 0:
return lhs
else:
ftr = tf.concat(features, axis=1)
print("{} out shape".format(scope))
print(ftr.get_shape().as_list())
return ftr
def multi_perspective_generation(self, config, lhs, rhs, perspectives, scope):
with tf.variable_scope(scope or "multi_perspective_matching"):
dim = lhs.get_shape().as_list()[-1]
comm = lhs * rhs #
comm_aug = tf.tile(tf.expand_dims(comm, axis=1), [1, perspectives, 1])
perspect_weight = tf.get_variable("perspect_weight", shape=[perspectives, dim])
return comm_aug * perspect_weight
def conv_blocks(config, arg, filter_size, name, is_train, tensor_dict=None):
with tf.variable_scope(name or "conv_blocks"):
def conv_pooling(res, name):
with tf.variable_scope(name or "conv_pooling"):
chan = res.get_shape().as_list()[-1]
filters = tf.get_variable("filter", shape=[2,2,chan,chan],dtype='float')
bias = tf.get_variable("bias", shape=[chan], dtype='float')
return tf.nn.conv2d(res, filters, [1,2,2,1], "VALID", name='conv_pooling') + bias
if config.use_elu:
act = tf.nn.elu
elif config.conv_use_tanh_act:
act = tf.tanh
elif config.use_selu:
act = selu
elif config.use_PRelu:
act = PRelu
else:
act = tf.nn.relu
if config.conv_layer_norm:
norm=tf.contrib.layers.layer_norm
else:
norm=tf.contrib.layers.batch_norm
init_dim = arg.get_shape().as_list()[-1]
if config.transitioning_conv_blocks:
res = residual(config, arg, init_dim, 336, filter_size, "res_transition_1", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
res = residual(config, arg, 336, 224, filter_size, "res_transition_2", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
res = residual(config, arg, 224, config.res_conv_1_chan, filter_size, "res1", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
else:
res = residual(config, arg, init_dim, config.res_conv_1_chan, filter_size, "res1", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
print(res.get_shape().as_list())
# N * 48 * 48 * config.res_conv_1_chan
res = residual(config, res, config.res_conv_1_chan, config.res_conv_1_chan, filter_size, "res2", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
print(res.get_shape().as_list())
# N * 48 * 48 * config.res_conv_1_chan
# if not config.even_smaller_CNN:
if not config.rm_1_chan1_conv:
res = residual(config, res, config.res_conv_1_chan, config.res_conv_1_chan, filter_size, "res3", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
#try more poolings (MAX here) [N, 24, 24, config.res_conv_1_chan]
if config.use_stride2_conv_replace_max_pooling:
res = conv_pooling(res, "first_conv_pool")
else:
res = tf.nn.max_pool(res, [1,2,2,1], [1,2,2,1], "VALID")
if not config.even_smaller_CNN:
res = residual(config, res, config.res_conv_1_chan, config.res_conv_1_chan, filter_size, "res4", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
res = residual(config, res, config.res_conv_1_chan, config.res_conv_1_chan, filter_size, "res5", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
# N * 24 * 24 * config.res_conv_2_chan
res = residual(config, res, config.res_conv_1_chan, config.res_conv_2_chan, filter_size, "res6", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
if config.use_stride2_conv_replace_max_pooling:
res = conv_pooling(res, "second_conv_pool")
else:
res = tf.nn.max_pool(res, [1,2,2,1], [1,2,2,1], "VALID")
res = residual(config, res, config.res_conv_2_chan, config.res_conv_2_chan, filter_size, "res7", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
if not config.even_smaller_CNN:
res = residual(config, res, config.res_conv_2_chan, config.res_conv_2_chan, filter_size, "res8", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
if config.add_1_chan2_conv:
res = residual(config, res, config.res_conv_2_chan, config.res_conv_2_chan, filter_size, "res8_1", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
res = residual(config, res, config.res_conv_2_chan, config.res_conv_3_chan, filter_size, "res9", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
if config.use_stride2_conv_replace_max_pooling:
res = conv_pooling(res, "third_conv_pool")
else:
res = tf.nn.max_pool(res, [1,2,2,1], [1,2,2,1], "VALID")
res = residual(config, res, config.res_conv_3_chan, config.res_conv_3_chan, filter_size, "res13", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
# if not config.even_smaller_CNN:
if not config.rm_1_chan3_conv:
res = residual(config, res, config.res_conv_3_chan, config.res_conv_3_chan, filter_size, "res14", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
res = residual(config, res, config.res_conv_3_chan, config.res_conv_3_chan, filter_size, "res15", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
if config.last_avg_pooling:
res = tf.nn.avg_pool(res, [1,6,6,1],[1,1,1,1],"VALID")
elif config.last_avg_max_pooling:
max_pool = tf.nn.max_pool(res, [1,6,6,1],[1,1,1,1], "VALID")
avg_pool = tf.nn.avg_pool(res, [1,6,6,1],[1,1,1,1], "VALID")
res = tf.concat([max_pool, avg_pool], axis=3)
elif not config.wo_last_max_pool:
res = tf.nn.max_pool(res, [1,2,2,1], [1,2,2,1], "VALID")
shape_list = res.get_shape().as_list()
print(shape_list)
out_final = tf.reshape(res, [-1, shape_list[1]*shape_list[2]*shape_list[3]])
if config.add_tensor_to_tensor_dict:
tensor_dict['conv_out_before_reshape'] = res
tensor_dict['conv_out_after_reshape'] = out_final
return out_final
def shuffle_add(config, dense_tensor):
list_of_logits = tf.unstack(dense_tensor, axis=3)
np.random.shuffle(list_of_logits)
list_of_new_logits = []
for i in range(len(list_of_logits) / 2):
list_of_new_logits.append(list_of_logits[2 * i] + list_of_logits[2 * i + 1])
# if config.full_shuffle_add:
# np.random.shuffle(list_of_logits)
# for i in range(len(list_of_logits) / 2):
# list_of_new_logits.append(list_of_logits[2 * i] + list_of_logits[2 * i + 1])
# list_of_new_logits = [tf.expand_dims(tensor, axis=3) for tensor in list_of_new_logits]
# new_logit = tf.concat(list_of_new_logits, axis=3)
# return new_logit
list_of_new_logits = [tf.expand_dims(tensor, axis=3) for tensor in list_of_new_logits]
new_logit = tf.concat(list_of_new_logits, axis=3)
# bi_att_mx = tf.concat([dense_tensor, new_logit], axis=3)
return new_logit
def add_features(config, dense_attention, p_mask, h_mask):
features = []
PL = dense_attention.get_shape().as_list()[1]
HL = dense_attention.get_shape().as_list()[2]
# p_aug = tf.tile(tf.expand_dims(p, 2), [1,1,HL,1])
# h_aug = tf.tile(tf.expand_dims(h, 1), [1,PL,1,1]) #[N, PL, HL, 2d]
p_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, HL, 1]), tf.bool), axis=3)
h_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(h_mask, 1), [1, PL, 1, 1]), tf.bool), axis=3)
# ph_mask = p_mask_aug & h_mask_aug TODO
ph_mask = None
# if config.dense_attention_shuffle_add:
# features.append(shuffle_add(config, dense_attention, ph_mask))
if config.dense_attention_max_feature: #including row-wise softmax, column-wise softmax
dense_attention_max_feature(config, dense_attention, features, ph_mask)
# features.append(dense_attention_max_feature(config, dense_attention, ph_mask))
if config.dense_attention_mean_feature: #including row-wise softmax, column-wise softmax
dense_attention_mean_feature(config, dense_attention, features, ph_mask)
if config.dense_attention_min_feature: #including row-wise softmax, column-wise softmax
dense_attention_min_feature(config, dense_attention, features, ph_mask)
if config.dense_attention_sum_feature: #including row-wise softmax, column-wise softmax
dense_attention_sum_feature(config, dense_attention, features, ph_mask)
features.append(dense_attention)
new_dense_attention = tf.concat(features, axis=3)
return new_dense_attention
def dense_attention_max_feature(config, bi_att_mx, collection, ph_mask):
sum_feature = tf.reduce_max(bi_att_mx, axis=3)
collection.append(tf.expand_dims(sum_feature, axis=3))
switch = [False, False]
if config.dense_attention_max_row_wise_softmax_feature:
switch[0] = True
if config.dense_attention_max_column_wise_softmax_feature:
switch[1] = True
dense_logits_softmax_features(config, sum_feature, collection, ph_mask, switch, scope='max_features')
# return tf.expand_dims(sum_feature, axis=3)
def dense_attention_mean_feature(config, bi_att_mx, collection, ph_mask):
mean_feature = tf.reduce_mean(bi_att_mx, axis=3)
collection.append(tf.expand_dims(mean_feature, axis=3))
switch = [False, False]
if config.dense_attention_mean_row_wise_feature:
switch[0] = True
if config.dense_attention_mean_column_wise_feature:
switch[1] = True
dense_logits_softmax_features(config, mean_feature, collection, ph_mask, switch, scope='mean_features')
def dense_attention_min_feature(config, bi_att_mx, collection, ph_mask):
min_feature = tf.reduce_min(bi_att_mx, axis=3)
collection.append(tf.expand_dims(min_feature, axis=3))
switch = [False, False]
if config.dense_attention_min_row_wise_feature:
switch[0] = True
if config.dense_attention_min_column_wise_feature:
switch[1] = True
dense_logits_softmax_features(config, min_feature, collection, ph_mask, switch, scope='mean_features')
def dense_attention_sum_feature(config, bi_att_mx, collection, ph_mask):
sum_feature = tf.reduce_sum(bi_att_mx, axis=3)
collection.append(tf.expand_dims(sum_feature, axis=3))
switch = [False, False]
if config.dense_attention_sum_row_wise_feature:
switch[0] = True
if config.dense_attention_sum_column_wise_feature:
switch[1] = True
dense_logits_softmax_features(config, sum_feature, collection, ph_mask, switch, scope='mean_features')
def bi_attention_mx(config, is_train, p, h, p_mask=None, h_mask=None, scope=None, tensor_dict=None): #[N, L, 2d]
with tf.variable_scope(scope or "dense_logit_bi_attention"):
PL = p.get_shape().as_list()[1]
HL = h.get_shape().as_list()[1]
p_aug = tf.tile(tf.expand_dims(p, 2), [1,1,HL,1])
h_aug = tf.tile(tf.expand_dims(h, 1), [1,PL,1,1]) #[N, PL, HL, 2d]
# if p_mask is None:
# ph_mask = None
# else:
# p_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, HL, 1]), tf.bool), axis=3)
# h_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(h_mask, 1), [1, PL, 1, 1]), tf.bool), axis=3)
# ph_mask = p_mask_aug & h_mask_aug
ph_mask = None
if config.super_dense_attention:
h_logits = p_aug * h_aug
elif config.super_dense_attention_linear:
h_logits_tmp = linear(p_aug, p_aug.get_shape().as_list()[-1] ,True, bias_start=0.0, scope="super_dense_attention_linear", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train)
h_logits = h_logits_tmp * h_aug
elif config.super_super_dense_attention:
h_logits = tf.concat([p_aug, h_aug, p_aug * h_aug], axis=3)
else:
h_logits = dense_logits(config, [p_aug, h_aug], config.dense_logit_features_num, True, wd=config.wd, mask=ph_mask, is_train=is_train, func=config.dense_att_logit_func, scope='h_logits') # [N, PL, HL]
return h_logits
def dense_logits_softmax_features(config, dense_logit_feature, collection, ph_mask, switch , scope=None):
with tf.variable_scope(scope or "dense_logits_softmax_features"):
# assert p_mask != None
# assert h_mask != None
# PL = dense_logit.get_shape().as_list()[1]
# HL = dense_logit.get_shape().as_list()[2]
# p_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, HL, 1]), tf.bool), axis=3)
# h_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(h_mask, 1), [1, PL, 1, 1]), tf.bool), axis=3)
# ph_mask = p_mask_aug & h_mask_aug #[N, PL, HL]
# ph_mask_d = tf.tile(tf.expand_dims(ph_mask, 3), [1,1,1,config.dense_logit_features_num])
dense_logit_with_exp_mask = exp_mask(dense_logit_feature, ph_mask) #[N, PL, HL, 20]
dense_logit_softmax_col = None
dense_logit_softmax_row = None
dense_logit_with_exp_mask = tf.expand_dims(dense_logit_with_exp_mask, axis=3)
if switch[0]:
print("dense logit with exp mask size")
print(dense_logit_with_exp_mask.get_shape().as_list())
dense_logit_softmax_row = tf.nn.softmax(dense_logit_with_exp_mask, dim=2, name='softmax_row')
if switch[1]:
dense_logit_softmax_col = tf.nn.softmax(dense_logit_with_exp_mask, dim=1, name='softmax_col')
mask = tf.expand_dims(tf.cast(ph_mask,tf.float32), axis=3)
if dense_logit_softmax_row is not None:
dense_logit_softmax_row = mask * dense_logit_softmax_row
print("mask shape")
print(mask.get_shape().as_list())
print("single layer feature")
print(dense_logit_softmax_row.get_shape().as_list())
collection.append(dense_logit_softmax_row)
if dense_logit_softmax_col is not None:
dense_logit_softmax_col = mask * dense_logit_softmax_col
collection.append(dense_logit_softmax_col)
# return tf.concat([dense_logit, dense_logit_softmax_col, dense_logit_softmax_row], axis=3)
def self_attention(config, is_train, p, p_mask=None, scope=None, tensor_dict=None): #[N, L, 2d]
with tf.variable_scope(scope or "self_attention"):
PL = p.get_shape().as_list()[1]
dim = p.get_shape().as_list()[-1]
# HL = tf.shape(h)[1]
p_aug_1 = tf.tile(tf.expand_dims(p, 2), [1,1,PL,1])
p_aug_2 = tf.tile(tf.expand_dims(p, 1), [1,PL,1,1]) #[N, PL, HL, | |
'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('v_6', 't_2'), ('v_4', 't_3'), ('v_7', 't_3'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('v_9', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('t_3', 'v_4'), ('t_3', 'v_8'),
('v_5', 't_2'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'), ('v_6', 't_2'),
('v_4', 't_3'), ('v_8', 't_3'), ('v_7', 't_3'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('v_9', 'c_1'), ('t_3', 'c_1'), ('v_8', 't_2'), ('t_3', 'v_4'), ('v_5', 't_2'),
('t_2', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_4', 't_3'),
('v_7', 't_2'), ('t_2', 'v_7'), ('c_1', 't_3'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('v_9', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('v_5', 't_2'),
('c_1', 't_2'), ('c_1', 'v_9'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_4', 't_3'), ('v_7', 't_2'),
('v_8', 't_3'), ('t_2', 'v_7'), ('c_1', 't_3'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('v_9', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_8', 't_2'), ('t_3', 'v_4'),
('v_5', 't_2'), ('t_2', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_2', 'v_5'), ('t_3', 'v_6'),
('v_4', 't_3'), ('v_7', 't_3'), ('c_1', 't_3'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('t_3', 'v_5'), ('t_3', 'c_1'), ('v_8', 't_2'), ('v_9', 'c_1'), ('t_3', 'v_4'),
('t_2', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_2', 'v_6'), ('v_6', 't_2'), ('v_4', 't_3'),
('v_7', 't_2'), ('v_5', 't_3'), ('t_2', 'v_7'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('t_3', 'v_5'), ('t_3', 'c_1'), ('v_9', 'c_1'), ('t_3', 'v_4'), ('t_3', 'v_8'),
('c_1', 't_2'), ('c_1', 'v_9'), ('t_2', 'v_6'), ('v_6', 't_2'), ('v_4', 't_3'), ('v_7', 't_2'),
('v_8', 't_3'), ('v_5', 't_3'), ('t_2', 'v_7'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('t_3', 'v_5'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_8', 't_2'), ('t_3', 'v_4'),
('v_9', 'c_1'), ('t_2', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_2', 'v_6'), ('v_6', 't_2'),
('v_4', 't_3'), ('v_5', 't_3'), ('v_7', 't_3'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('t_3', 'v_5'), ('t_3', 'c_1'), ('v_8', 't_2'), ('v_9', 'c_1'), ('t_3', 'v_4'),
('t_2', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_6'), ('v_4', 't_3'), ('v_7', 't_2'),
('v_5', 't_3'), ('t_2', 'v_7'), ('c_1', 't_3'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_4'),
('c_1', 't_2'), ('v_5', 't_2'), ('t_2', 'v_5'), ('t_2', 'v_6'), ('v_6', 't_2'), ('v_7', 't_3'),
('v_8', 't_3'), ('v_9', 't_3'), ('v_4', 't_2'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_4'), ('c_1', 't_2'),
('v_5', 't_2'), ('t_2', 'v_5'), ('v_4', 't_2'), ('t_3', 'v_6'), ('v_7', 't_2'), ('v_8', 't_3'),
('v_9', 't_3'), ('t_2', 'v_7'), ('c_1', 't_3'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('v_8', 't_2'), ('v_5', 't_2'), ('t_3', 'v_9'),
('t_2', 'v_4'), ('c_1', 't_2'), ('t_2', 'v_8'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_7', 't_3'),
('v_9', 't_3'), ('v_4', 't_2'), ('c_1', 't_3'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('t_3', 'c_1'), ('t_3', 'v_7'), ('t_3', 'v_8'), ('v_5', 't_2'), ('t_2', 'v_4'),
('c_1', 't_2'), ('t_2', 'v_5'), ('t_3', 'v_6'), ('v_7', 't_3'), ('v_9', 't_2'), ('v_8', 't_3'),
('v_4', 't_2'), ('c_1', 't_3'), ('t_2', 'v_9'), ('v_6', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), ('c_1', 'v_7'), ('c_1', 'v_6'), ('c_1', 't_3'), ('t_2', 'v_9'), ('c_1', 'v_4'),
('c_1', 'v_8'), ('c_1', 't_2'), ('c_1', 'v_9'), ('t_3', 'v_5'), ('t_3', 'v_7'), ('t_2', 'v_4'),
('t_2', 'v_8'), ('t_3', 'v_6')}},
{'DOMAIN': {'v_5', 't_3', 'v_6', 'v_7', 't_2', 'v_9', 'v_4', 'v_8', 'c_1'}, 'City': {'c_1'},
'Town': {'t_3', 't_2'}, 'Village': {'v_5', 'v_6', 'v_7', 'v_9', 'v_8', 'v_4'},
'Road': {('t_2', 'c_1'), ('t_3', 'v_5'), ('t_3', 'c_1'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_4'),
('c_1', 't_2'), ('t_2', 'v_6'), ('v_4', 't_2'), ('v_6', 't_2'), ('v_7', 't_2'), ('v_8', 't_3'),
('v_9', 't_3'), ('v_5', 't_3'), ('t_2', 'v_7'), ('c_1', 't_3')},
'>': {('c_1', 'v_5'), ('t_3', 'v_4'), ('t_3', 'v_8'), ('t_3', 'v_9'), ('t_2', 'v_5'), ('t_2', 'v_6'),
('t_2', 'v_7'), | |
INTERFACE
- name: Create vPC interfaces
cisco.dcnm.dcnm_interface: &vpc_merge
fabric: mmudigon-fabric
state: merged # only choose from [merged, replaced, deleted, overridden, query]
config:
- name: vpc750 # should be of the form vpc<port-id>
type: vpc # choose from this list [pc, vpc, sub_int, lo, eth]
switch: # provide switches of vPC pair
- ["192.168.127.12",
"192.168.127.12"]
deploy: true # choose from [true, false]
profile:
admin_state: true # choose from [true, false]
mode: trunk # choose from [trunk, access]
peer1_pcid: 100 # choose between [Min:1, Max:4096], if not given, will be VPC port-id
peer2_pcid: 100 # choose between [Min:1, Max:4096], if not given, will be VPC port-id
peer1_members: # member interfaces on peer 1
- e1/24
peer2_members: # member interfaces on peer 2
- e1/24
pc_mode: 'active' # choose from ['on', 'active', 'passive']
bpdu_guard: true # choose from [true, false, 'no']
port_type_fast: true # choose from [true, false]
mtu: jumbo # choose from [default, jumbo]
peer1_allowed_vlans: none # choose from [none, all, vlan range]
peer2_allowed_vlans: none # choose from [none, all, vlan range]
peer1_description: "VPC acting as trunk peer1"
peer2_description: "VPC acting as trunk peer2"
- name: Replace vPC interfaces
cisco.dcnm.dcnm_interface:
fabric: mmudigon-fabric
state: replaced # only choose from [merged, replaced, deleted, overridden, query]
config:
- name: vpc750 # should be of the form vpc<port-id>
type: vpc # choose from this list [pc, vpc, sub_int, lo, eth]
switch: # provide switches of vPC pair
- ["192.168.127.12",
"192.168.127.12"]
deploy: true # choose from [true, false]
profile:
admin_state: false ## choose from [true, false]
mode: trunk # choose from [trunk, access]
peer1_pcid: 100 # choose between [Min:1, Max:4096], if not given, will be VPC port-id
peer2_pcid: 100 # choose between [Min:1, Max:4096], if not given, will be VPC port-id
peer1_members: ## member interfaces on peer 1
- e1/26
peer2_members: ## member interfaces on peer 2
- e1/26
pc_mode: 'active' ## choose from ['on', 'active', 'passive']
bpdu_guard: false ## choose from [true, false, 'no']
port_type_fast: false ## choose from [true, false]
mtu: default ## choose from [default, jumbo]
peer1_allowed_vlans: all ## choose from [none, all, vlan range]
peer2_allowed_vlans: all ## choose from [none, all, vlan range]
peer1_description: "VPC acting as trunk peer1 - modified"
peer2_description: "VPC acting as trunk peer2 - modified"
peer1_cmds: # Freeform config
- no shutdown
peer2_cmds: # Freeform config
- no shutdown
# To delete or reset a particular interface on a specific switch in the fabric
- name: Delete vPC interfaces
cisco.dcnm.dcnm_interface:
fabric: mmudigon-fabric
state: deleted # only choose from [merged, replaced, deleted, overridden, query]
config:
- name: vpc750 # should be of the form vpc<port-id>
switch: # provide switches of vPC pair
- ["192.168.127.12",
"192.168.127.12"]
- name: Override vPC interfaces
cisco.dcnm.dcnm_interface:
fabric: mmudigon-fabric
state: overridden # only choose from [merged, replaced, deleted, overridden, query]
config:
- name: vpc752 # should be of the form vpc<port-id>
type: vpc # choose from this list [pc, vpc, sub_int, lo, eth]
switch: # provide switches of vPC pair
- ["192.168.127.12",
"192.168.127.12"]
deploy: true # choose from [true, false]
profile:
admin_state: true # choose from [true, false]
mode: trunk # choose from [trunk, access]
peer1_pcid: 752 # choose between [Min:1, Max:4096], if not given, will be VPC port-id
#peer2_pcid: 1 # choose between [Min:1, Max:4096], if not given, will be VPC port-id
peer1_members: # member interfaces on peer 1
- e1/26
peer2_members: # member interfaces on peer 2
- e1/27
pc_mode: 'on' # choose from ['on', 'active', 'passive']
bpdu_guard: true # choose from [true, false, no]
port_type_fast: true # choose from [true, false]
mtu: jumbo # choose from [default, jumbo]
peer1_allowed_vlans: none # choose from [none, all, vlan range]
peer2_allowed_vlans: none # choose from [none, all, vlan range]
peer1_description: "VPC acting as trunk peer1"
peer2_description: "VPC acting as trunk peer2"
peer1_cmds: # Freeform config
- no shutdown
- no shutdown
peer2_cmds: # Freeform config
- no shutdown
- no shutdown
QUERY
- name: Query interface details
cisco.dcnm.dcnm_interface:
fabric: mmudigon-fabric
state: query # only choose from [merged, replaced, deleted, overridden, query]
config:
- switch:
- "192.168.127.12" # provide the switch information where the config is to be deployed
- name: po350
switch:
- "192.168.127.12" # provide the switch information where the config is to be deployed
- name: lo450
switch:
- "192.168.127.12" # provide the switch information where the config is to be deployed
- name: eth1/1
switch:
- "192.168.127.12" # provide the switch information where the config is to be deployed
- name: eth1/15.2
switch:
- "192.168.127.12" # provide the switch information where the config is to be deployed
- name: vpc750
switch:
- "192.168.127.12" # provide the switch information where the config is to be deployed
'''
import time
import json
import re
import copy
import sys
import socket
from textwrap import dedent
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.cisco.dcnm.plugins.module_utils.network.dcnm.dcnm import \
dcnm_send, get_fabric_inventory_details, dcnm_get_ip_addr_info, validate_list_of_dicts, get_ip_sn_dict
import datetime
LOG_ERROR = 0
LOG_DEBUG = 4
LOG_VERBOSE = 5
class DcnmIntf:
def __init__(self, module):
self.module = module
self.params = module.params
self.fabric = module.params['fabric']
self.config = copy.deepcopy(module.params.get('config'))
self.pb_input = []
self.check_mode = False
self.intf_info = []
self.want = []
self.have = []
self.have_all = []
self.have_all_list = []
self.diff_create = []
self.diff_replace = []
self.diff_delete = [[],[],[],[],[]]
self.diff_deploy = []
self.diff_query = []
self.log_verbosity = 0
self.fd = None
self.vpc_ip_sn = {}
self.changed_dict = [{'merged' : [], 'deleted' : [], 'replaced' : [], 'overridden' : [], 'deploy' : [], 'query' : []}]
self.inventory_data = get_fabric_inventory_details(self.module, self.fabric)
self.ip_sn, self.hn_sn = get_ip_sn_dict(self.inventory_data)
self.dcnm_intf_facts = {
'fabric' : module.params['fabric'],
'config' : module.params['config'],
}
self.result = dict(
changed = False,
diff = [],
response = []
)
# New Interfaces
# To map keys from self.have to keys from config
self.keymap = {
"policy" : "policy",
"ifName" : "ifname",
"serialNumber" : "sno",
"fabricName" : "fabric",
"IP" : "ipv4_addr",
"INTF_VRF" : "int_vrf",
"V6IP" : "ipv6_addr",
"IPv6" : "ipv6_addr",
"IPv6_PREFIX" : "ipv6_mask_len",
"ROUTING_TAG" : "route_tag",
"ROUTE_MAP_TAG" : "route_tag",
"CONF" : "cmds",
"DESC" : "description",
"VLAN" : "vlan",
"ADMIN_STATE" : "admin_state",
"MEMBER_INTERFACES" : "members",
"PC_MODE" : "pc_mode",
"BPDUGUARD_ENABLED" : "bpdu_guard",
"PORTTYPE_FAST_ENABLED" : "port_type_fast",
"MTU" : "mtu",
"SPEED" : "speed",
"ALLOWED_VLANS" : "allowed_vlans",
"ACCESS_VLAN" : "access_vlan",
"PREFIX" : "ipv4_mask_len",
"INTF_NAME" : "ifname",
"PO_ID" : "ifname",
"PEER1_PCID" : "peer1_pcid",
"PEER2_PCID" : "peer2_pcid",
"PEER1_MEMBER_INTERFACES" : "peer1_members",
"PEER2_MEMBER_INTERFACES" : "peer2_members",
"PEER1_ALLOWED_VLANS" : "peer1_allowed_vlans",
"PEER2_ALLOWED_VLANS" : "peer2_allowed_vlans",
"PEER1_PO_DESC" : "peer1_description",
"PEER2_PO_DESC" : "peer2_description",
"PEER1_PO_CONF" : "peer1_cmds",
"PEER2_PO_CONF" : "peer2_cmds",
"PEER1_ACCESS_VLAN" : "peer1_access_vlan",
"PEER2_ACCESS_VLAN" : "peer2_access_vlan",
}
# New Interfaces
self.pol_types = {
"pc_monitor" : "int_monitor_port_channel_11_1",
"pc_trunk" : "int_port_channel_trunk_host_11_1",
"pc_access" : "int_port_channel_access_host_11_1",
"pc_l3" : "int_l3_port_channel",
"sub_int_subint" : "int_subif_11_1",
"lo_lo" : "int_loopback_11_1",
"eth_trunk" : "int_trunk_host_11_1",
"eth_access" : "int_access_host_11_1",
"eth_routed" : "int_routed_host_11_1",
"eth_monitor" : "int_monitor_ethernet_11_1",
"eth_epl_routed" : "epl_routed_intf",
"vpc_trunk" : "int_vpc_trunk_host_11_1",
"vpc_access" : "int_vpc_access_host_11_1"
}
# New Interfaces
self.int_types = {
"pc" : "INTERFACE_PORT_CHANNEL",
"vpc" : "INTERFACE_VPC",
"sub_int" : "SUBINTERFACE",
"lo" : "INTERFACE_LOOPBACK",
"eth" : "INTERFACE_ETHERNET"
}
# New Interfaces
self.int_index = {
"INTERFACE_PORT_CHANNEL" : 0,
"INTERFACE_VPC" : 1,
"INTERFACE_ETHERNET" : 2,
"INTERFACE_LOOPBACK" : 3,
"SUBINTERFACE" : 4
}
def log_msg (self, msg):
if (self.fd is None):
self.fd = open("interface.log", "w+")
if (self.fd is not None):
self.fd.write (msg)
# New Interfaces
def dcnm_intf_get_if_name (self, name, if_type):
if ('pc' == if_type):
port_id = re.findall(r'\d+', name)
return ("Port-channel" + str(port_id[0]), port_id[0])
if ('vpc' == if_type):
port_id = re.findall(r'\d+', name)
return ("vPC" + str(port_id[0]), port_id[0])
if ('sub_int' == if_type):
port_id = re.findall(r'\d+\/\d+.\d+', name)
return ("Ethernet" + str(port_id[0]), port_id[0])
if ('lo' == if_type):
port_id = re.findall(r'\d+', name)
return ("Loopback" + str(port_id[0]), port_id[0])
if ('eth' == if_type):
port_id = re.findall(r'\d+\/\d+', name)
return ("Ethernet" + str(port_id[0]), port_id[0])
def dcnm_intf_get_vpc_serial_number(self, sw):
path = '/rest/interface/vpcpair_serial_number?serial_number=' + self.ip_sn[sw]
resp = dcnm_send (self.module, 'GET', path)
if (resp and resp['RETURN_CODE'] == 200):
return resp['DATA']['vpc_pair_sn']
else:
return ''
# Flatten the incoming config database and have the required fileds updated.
# This modified config DB will be used while creating payloads. To avoid
# messing up the incoming config make a copy of it.
def dcnm_intf_copy_config(self):
if (None is self.config):
return
for | |
<reponame>attilabukor/kudu
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#################################################################
# WARNING: This code is not stable and is intended for internal testing only.
#
# Backup and Restore performance test driver.
#
# Example invocation:
# backup-perf.py \
# --spark-submit-command spark2-submit \
# --kudu-spark-tools-jar kudu-spark2-tools_2.11-1.9.0-SNAPSHOT.jar
# --kudu-backup-jar kudu-backup2_2.11-1.9.0-SNAPSHOT.jar
# --num-tasks 20 \
# --master-addresses a123.example.com \
# --impalad-address a123.example.com \
# --backup-path hdfs:///user/foo/backups \
# --partitions 450 \
# --table-data-size-mb 500000 \
# test_table_1
#################################################################
import argparse
import datetime
import json
import subprocess
import sys
import timeit
from collections import OrderedDict
class TickingTimer:
""" Timer to keep track of the period between ticks. """
def __init__(self):
self.last_tick_ = timeit.default_timer()
def tick(self):
"""
Resets the tick timer and returns the duration between the last two calls
to tick(), or construction of this object, whichever was more recent.
"""
prev_last_tick = self.last_tick_
self.last_tick_ = timeit.default_timer()
latest_tick_period = self.last_tick_ - prev_last_tick
return latest_tick_period
def last_tick_time(self):
"""
Returns the clock time of the last tick, or construction of this object,
whichever was more recent.
"""
return self.last_tick_
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def parse_bool(s):
if s.lower() == 'true': return True
if s.lower() == 'false': return False
raise argparse.ArgumentTypeError('value must be true or false')
def timestamp():
return datetime.datetime.now().isoformat()
def run_command(opts, cmd):
""" Print the command and run it if not in dry-run mode. """
print(cmd)
if not opts.dryrun:
print(check_output(cmd, shell=True))
def ensure_commands_available(opts):
run_command(opts, 'which impala-shell')
run_command(opts, 'which kudu')
def get_restored_table_name(opts):
return opts.table_name + opts.table_restore_suffix
# TODO: Change this to use the Kudu python API.
# It's good dog-fooding and removes the Impala requirement.
def create_table(opts, stats):
""" Create a Kudu table via impala-shell """
print("--------------------------------------")
print("Creating table %s" % (opts.table_name,))
print("--------------------------------------")
print(timestamp())
create_table_ddl = "CREATE TABLE %s (" % (opts.table_name,)
num_bigint_cols = opts.columns - opts.num_string_columns
assert(num_bigint_cols > 0)
for i in range(opts.columns):
coltype = 'STRING'
if i < num_bigint_cols: coltype = 'BIGINT'
if i > 0: create_table_ddl += ', '
create_table_ddl += "f%d %s" % (i, coltype)
if i == 0: create_table_ddl += ' PRIMARY KEY'
create_table_ddl += ") PARTITION BY HASH(f0) PARTITIONS %d STORED AS KUDU " % \
(opts.partitions, )
create_table_ddl += "TBLPROPERTIES ('kudu.num_tablet_replicas' = '%d')" % \
(opts.replication_factor, )
cmd = 'echo "%s" | impala-shell -i %s -f -' % (create_table_ddl, opts.impalad_address)
run_command(opts, cmd)
def drop_created_table(opts, stats):
""" Drop the created Kudu table via impala-shell """
print("--------------------------------------")
print("Dropping created table %s" % (opts.table_name, ))
print("--------------------------------------")
print(timestamp())
sql = "DROP TABLE %s" % (opts.table_name, )
cmd = 'echo "%s" | impala-shell -i %s -f -' % (sql, opts.impalad_address)
run_command(opts, cmd)
def drop_restored_table(opts, stats):
""" Drop the restored Kudu table via the kudu table delete command """
# TODO: This may no longer be needed if and when we integrate
# restoring HMS metadata and the table is restored as "Impala-managed".
print("--------------------------------------")
print("Dropping restored table %s" % (get_restored_table_name(opts), ))
print("--------------------------------------")
print(timestamp())
cmd = 'kudu table delete %s %s' % (opts.master_addresses, opts.table_prefix +
get_restored_table_name(opts))
run_command(opts, cmd)
def load_table(opts, stats):
""" Load a table with data using the DistributedDataGenerator spark job """
print("--------------------------------------")
print("Loading table %s" % (opts.table_name,))
print("--------------------------------------")
print(timestamp())
# Example invocation:
# spark-submit --class org.apache.kudu.spark.tools.DistributedDataGenerator \
# kudu-spark2-tools_2.11-1.8.0-SNAPSHOT.jar \
# --type random \
# --num-rows 10000000 \
# --num-tasks 20 \
# impala::default.foo_test3 m123.example.com
CLASS_NAME = 'org.apache.kudu.spark.tools.DistributedDataGenerator'
# TODO: Non-string columns are assumed to be 8 bytes.
row_size_bytes = opts.num_string_columns * opts.string_field_len + \
(opts.columns - opts.num_string_columns) * 8
num_rows = opts.table_data_size_mb * 1024 * 1024 / row_size_bytes
print("INFO: Inserting %d rows of %d bytes each" % (num_rows, row_size_bytes))
stats['row_size_bytes'] = row_size_bytes
stats['num_rows'] = num_rows
cmd = "%s --class %s %s --type %s --num-rows %d --num-tasks %d %s %s" % \
(opts.spark_submit_command, CLASS_NAME, opts.kudu_spark_tools_jar,
opts.load_policy, num_rows, opts.load_num_tasks, opts.table_prefix + opts.table_name,
opts.master_addresses)
run_command(opts, cmd)
def backup_table(opts, stats):
print("--------------------------------------")
print("Backing up table %s" % (opts.table_name,))
print("--------------------------------------")
print(timestamp())
CLASS_NAME = "org.apache.kudu.backup.KuduBackup"
cmd = "%s --class %s %s --kuduMasterAddresses %s --scanRequestTimeoutMs %d --path %s %s" % \
(opts.spark_submit_command, CLASS_NAME, opts.kudu_backup_jar,
opts.master_addresses, opts.scan_request_timeout_ms,
opts.backup_path, opts.table_prefix + opts.table_name)
run_command(opts, cmd)
def restore_table(opts, stats):
print("--------------------------------------")
print("Restoring table %s as %s" % (opts.table_name, get_restored_table_name(opts)))
print("--------------------------------------")
print(timestamp())
CLASS_NAME = "org.apache.kudu.backup.KuduRestore"
cmd = "%s --class %s %s --tableSuffix %s --kuduMasterAddresses %s --path %s %s" % \
(opts.spark_submit_command, CLASS_NAME, opts.kudu_backup_jar,
opts.table_restore_suffix, opts.master_addresses, opts.backup_path, opts.table_prefix + opts.table_name)
run_command(opts, cmd)
def parse_args():
""" Parse command-line arguments """
parser = argparse.ArgumentParser(description='Run a Kudu backup and restore performance test',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Kudu Configuration
parser.add_argument('--master-addresses', required=True, help='The Kudu master addresses')
# Impala Configuration
parser.add_argument('--impalad-address', required=True, help='The Impala daemon address')
parser.add_argument('--table-prefix', default='impala::default.',
help='Kudu table name prefix in the Hive metastore')
# Spark Job Configuration
parser.add_argument('--spark-submit-command', default='spark-submit',
help='The name of the spark-submit binary')
## Spark Loader Configuration
parser.add_argument('--kudu-spark-tools-jar', default='kudu-spark*-tools*.jar',
help='The path to the kudu-spark-tools jar (for --load-table)')
parser.add_argument('--load-num-tasks', type=int, default=20,
help='Number of Spark tasks to create when loading data')
parser.add_argument('--load-policy', default='sequential', choices=['sequential', 'random'],
help='The data loading policy for the data generator')
parser.add_argument('--string-field-len', type=int, default=128,
help='The length, in bytes, of generated string column values')
## Spark Backup/Restore Job Configuration
parser.add_argument('--kudu-backup-jar', default='kudu-backup*.jar',
help='The path to the kudu-backup jar')
parser.add_argument('--backup-path', default='hdfs:///kudu-backup-tests',
help='The Hadoop-compatible path at which to store the backup')
parser.add_argument('--backup-file-format', default='parquet',
help='The file format of the backup: must be parquet')
parser.add_argument('--scan-request-timeout-ms', type=int, default=30000,
help='The default scan timeout for backup, in milliseconds')
parser.add_argument('--table-restore-suffix', default='-restore',
help='Kudu table name suffix to append on restore')
# Table Configuration
parser.add_argument('--columns', type=int, default=10,
help='The number of columns in the Kudu table')
parser.add_argument('--num-string-columns', type=int, default=9,
help='The number of string columns in the table; the rest will be bigints')
parser.add_argument('--partitions', type=int, default=10,
help='The number of hash partitions of the table. '
'This script only supports hash partitions')
parser.add_argument('--table-data-size-mb', type=int, default=1024,
help='The uncompressed data size of the table, in MB')
parser.add_argument('--replication-factor', type=int, default=3,
help='The replication factor of the table')
# Positional
parser.add_argument('table_name', help='The name of the Kudu table to create/backup')
# Actions
parser.add_argument('--create-table', type=parse_bool, choices=[True, False], default=False,
help='Whether to create the table for loading')
parser.add_argument('--drop-created-table', type=parse_bool, choices=[True, False], default=False,
help='Whether to drop the created table after a successful test run')
parser.add_argument('--load-table', type=parse_bool, choices=[True, False], default=False,
help='Whether to load the table with data')
parser.add_argument('--backup-table', type=parse_bool, choices=[True, False], default=False,
help='Whether to back up the table')
parser.add_argument('--restore-table', type=parse_bool, choices=[True, False], default=False,
help='Whether to restore the table')
parser.add_argument('--drop-restored-table', type=parse_bool, choices=[True, False], default=False,
help='Whether to drop the restored table after a successful test run')
# Utility
parser.add_argument('--dryrun', action='store_true',
help='Do not execute any commands, only print what would be executed')
return parser.parse_args()
def main():
start_timestamp = timestamp()
print(start_timestamp)
print("Starting perf test...")
opts = parse_args()
stats = OrderedDict()
stats['start_timestamp'] = start_timestamp
stats['columns'] = opts.columns
stats['num_string_columns'] = opts.num_string_columns
stats['partitions'] = opts.partitions
stats['table_data_size_mb'] = opts.table_data_size_mb
stats['replication_factor'] = opts.replication_factor
stats['load_num_tasks'] = opts.load_num_tasks
stats['load_policy'] = opts.load_policy
stats['string_field_len'] = opts.load_policy
timer = TickingTimer()
start = timer.last_tick_time()
if opts.create_table:
create_table(opts, stats)
stats['create_table_duration_sec'] = timer.tick()
if opts.load_table:
load_table(opts, stats)
stats['load_table_duration_sec'] = timer.tick()
if opts.backup_table:
backup_table(opts, stats)
stats['backup_table_duration_sec'] = timer.tick()
if opts.restore_table:
restore_table(opts, stats)
stats['restore_table_duration_sec'] = timer.tick()
if opts.drop_created_table:
drop_created_table(opts, stats)
stats['drop_created_table_duration_sec'] = timer.tick()
if opts.drop_restored_table:
drop_restored_table(opts, stats)
stats['drop_restored_table_duration_sec'] = timer.tick()
end = timer.last_tick_time()
stats['end_timestamp'] = timestamp()
print(stats['end_timestamp'])
print("Ending perf test")
total_duration = end - start
stats['total_duration_sec'] = total_duration
print("Total time elapsed: %s s" % (total_duration, ))
print("")
print("--------------------------------------")
print("[ BEGIN | |
"""
======================================================
Monolayer (:mod:`graphene.monolayer`)
======================================================
Functions
=========
Band structure
--------------
.. toctree::
:maxdepth: 1
graphene.monolayer.Hamiltonian
graphene.monolayer.CarrierDispersion
graphene.monolayer.DensityOfStates
graphene.monolayer.FermiWavenumber
graphene.monolayer.CarrierDensity
graphene.monolayer.ChemicalPotential
Optical Properties
------------------
.. toctree::
:maxdepth: 1
graphene.monolayer.Polarizibility
graphene.monolayer.ScalarOpticalConductivity
graphene.monolayer.Permittivity
graphene.monolayer.FresnelReflection
Plasmonics
----------
.. toctree::
:maxdepth: 1
graphene.monolayer.PlasmonDispersion
"""
import numpy as np
import scipy.constants as sc
from scipy import special, optimize, integrate
import graphenemodeling.graphene._constants as _c
import graphenemodeling.statistical_distributions as sd
############
# Geometry #
############
def UnitCell(m,n):
'''Positions of unit cell
Parameters
----------
m, n: Unit cell indices.
References
----------
[1] <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2009). The electronic properties of graphene. Rev. Mod. Phys. 81, 109–162. https://link.aps.org/doi/10.1103/RevModPhys.81.109.
'''
a1 = np.array(_c.a1)
a2 = np.array(_c.a2)
pos = m*a1 + n*a2
return pos
def AtomicPosition(m,n,i):
delta = _c.a/2*np.array([1,3**(1/2)])
pos = UnitCell(m,n)+i*delta
return pos
##################
# Band Structure #
##################
def Hamiltonian(k,model,g0prime=0):
'''Tight-binding Hamiltonian in momentum space.
Parameters
----------
k: array-like, complex, rad/m
Wavevector of carrier. Use complex ``k=kx + 1j*ky`` for 2D wavevectors.
model: string
``'LowEnergy'``, ``'FullTightBinding'``
g0prime: scalar, J
The particle-hole asymmetry parameter :math:`\\gamma_0'`. Typically :math:`0.02\\gamma_0\\leq\\gamma_0'\\leq 0.2\\gamma_0`.
Returns
----------
H: 2x2 complex ndarray
Tight-binding Hamiltonian evaluated at k.
Raises
------
ValueError
if `model` is not 'LowEnergy' or 'FullTightBinding'
Notes
-----
Let :math:`k=k_x+ik_y`. Then the ``model=FullTightBinding`` expression is given by
.. math::
H = \\left(\\array{
-\\gamma_0' & \\gamma_0f(k) \n
\\gamma_0f(k)^* & -\\gamma_0'
} \\right)
where :math:`f(k)= e^{ik_x a/2} + 2 e^{-i k_x a/ 2}\\cos(k_y a \\sqrt{3}/2)`
The more common ``model=LowEnergy`` approximation is
.. math::
H = \\hbar v_F\\left(\\array{
0 & k \n
k^* & 0
} \\right)
References
----------
[1] <NAME>. (1947). The Band Theory of Graphite. Phys. Rev. 71, 622–634.
https://link.aps.org/doi/10.1103/PhysRev.71.622
[1] <NAME>., and <NAME>. (1958). Band Structure of Graphite.
Phys. Rev. 109, 272–279. https://link.aps.org/doi/10.1103/PhysRev.109.272.
[2] <NAME>., and <NAME>. (2007). Space-time dispersion of graphene conductivity. Eur. Phys. J. B 56, 281–284.
https://link.springer.com/article/10.1140/epjb/e2007-00142-3.
'''
if model!='LowEnergy' and model!='FullTightBinding':
raise ValueError("Argument model must be 'LowEnergy' or 'FullTightBinding'")
if model == 'LowEnergy':
H11 = 0
H12 = sc.hbar * _c.vF * k
H21 = np.conj(H12)
H22 = 0
if model == 'FullTightBinding':
kx = np.real(k)
ky = np.imag(k)
H11 = -g0prime
H12 = _c.g0 * ( np.exp(1j*kx*_c.a/2)
+ 2*np.exp(-1j*kx*_c.a/2)*np.cos(ky*_c.a*np.sqrt(3)/2) )
H21 = np.conj(H12)
H22 = -g0prime
H = np.array( [[H11, H12],
[H12, H22] ])
return H
def CarrierDispersion(k,model,eh=1,g0prime=_c.g0prime):
'''The dispersion of Dirac fermions in monolayer graphene.
These are the eigenvalues of the Hamiltonian.
However, in both the ``LowEnergy`` model and the ``FullTightBinding`` model, we use closed form solutions rather than solving for the eigenvalues directly.
This saves time and make broadcasting easier.
Parameters
----------
k: array-like, complex, rad/m
Wavevector of Dirac fermion relative to K vector.
For 2D wavevectors, use :math:`k= k_x + i k_y`.
model: string
``'LowEnergy'``: Linear approximation of dispersion.
``'FullTightBinding'``: Eigenvalues of tight-binding approximation. We use a closed form rather than finding eigenvalues of Hamiltonian to save time and avoid broadcasting issues.
eh: int
Band index:
``eh=1`` returns conduction band,
``eh=-1`` returns valence band
Returns
----------
dispersion: complex ndarray
Dispersion relation evaluated at k.
Raises
------
ValueError
if `model` is not 'LowEnergy' or 'FullTightBinding'.
ValueError
if `eh` not 1 or -1.
Notes
-----
When ``model='LowEnergy'``,
.. math::
E =\\pm\\hbar v_F |k|
When ``model=FullTightBinding``,
.. math::
E = \\pm \\gamma_0 \\sqrt{3 + f(k)} - \\gamma_0'f(k)
where :math:`f(k)= 2 \\cos(\\sqrt{3}k_y a) + 4 \\cos(\\sqrt{3}k_y a/2)\\cos(3k_xa/2)`.
Both expressions are equivalent to diagonalizing the Hamiltonian of the corresponding ``model``.
Examples
--------
Plot the Fermion dispersion relation.
.. plot::
>>> import matplotlib.pyplot as plt
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from scipy.constants import elementary_charge as eV
>>> eF = 0.4*eV
>>> kF = mlg.FermiWavenumber(eF,model='LowEnergy')
>>> k = np.linspace(-2*kF,2*kF,num=100)
>>> conduction_band = mlg.CarrierDispersion(k,model='LowEnergy')
>>> valence_band = mlg.CarrierDispersion(k,model='LowEnergy',eh=-1)
>>> fig, ax = plt.subplots(figsize=(5,6))
>>> ax.plot(k/kF,conduction_band/eF,'k')
[...
>>> ax.plot(k/kF,valence_band/eF, 'k')
[...
>>> ax.plot(k/kF,np.zeros_like(k),color='gray')
[...
>>> ax.axvline(x=0,ymin=0,ymax=1,color='gray')
<...
>>> ax.set_axis_off()
>>> plt.show()
Plot the full multi-dimensional dispersion relation with a particle-hole asymmetry. Replicates Figure 3 in Ref. [1].
.. plot::
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from graphenemodeling.graphene import _constants as _c
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits import mplot3d # 3D plotting
>>> kmax = np.abs(_c.K)
>>> emax = mlg.CarrierDispersion(0,model='FullTightBinding',g0prime=-0.2*_c.g0)
>>> kx = np.linspace(-kmax,kmax,num=100)
>>> ky = np.copy(kx)
>>> k = (kx + 1j*ky[:,np.newaxis]) + _c.K # k is relative to K. Add K to move to center of Brillouin zone
>>> conduction_band = mlg.CarrierDispersion(k,model='FullTightBinding',eh=1,g0prime=-0.2*_c.g0)
>>> valence_band = mlg.CarrierDispersion(k,model='FullTightBinding',eh=-1,g0prime=-0.2*_c.g0)
>>> fig = plt.figure(figsize=(8,8))
>>> fullax = plt.axes(projection='3d')
>>> fullax.view_init(20,35)
>>> KX, KY = np.meshgrid(kx,ky)
>>> fullax.plot_surface(KX/kmax,KY/kmax,conduction_band/_c.g0,rstride=1,cstride=1,cmap='viridis',edgecolor='none')
<...
>>> fullax.plot_surface(KX/kmax,KY/kmax,valence_band/_c.g0,rstride=1,cstride=1,cmap='viridis',edgecolor='none')
<...
>>> fullax.set_xlabel('$k_x/|K|$')
Text...
>>> fullax.set_ylabel('$k_y/|K|$')
Text...
>>> fullax.set_zlabel('$\\epsilon/\\gamma_0$')
Text...
>>> fullax.set_title('Brillouin Zone of Graphene')
Text...
>>> plt.show()
References
----------
[1] <NAME>. (1947). The Band Theory of Graphite. Phys. Rev. 71, 622–634.
https://link.aps.org/doi/10.1103/PhysRev.71.622
[1] <NAME>., and <NAME>. (1958). Band Structure of Graphite.
Phys. Rev. 109, 272–279. https://link.aps.org/doi/10.1103/PhysRev.109.272.
[2] <NAME>., and <NAME>. (2007). Space-time dispersion of graphene conductivity. Eur. Phys. J. B 56, 281–284.
https://link.springer.com/article/10.1140/epjb/e2007-00142-3.
[4] <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2009).
The electronic properties of graphene. Rev. Mod. Phys. 81, 109–162.
https://link.aps.org/doi/10.1103/RevModPhys.81.109.
'''
if model!='LowEnergy' and model!='FullTightBinding':
raise ValueError("Argument model must be 'LowEnergy' or 'FullTightBinding'")
if eh!=1 and eh!=-1:
raise ValueError('eh must be either 1 or -1')
if model == 'LowEnergy':
dispersion = eh*sc.hbar*_c.vF*np.abs(k)
if model == 'FullTightBinding':
k = k - _c.K
f = lambda k: (2*np.cos(np.sqrt(3)*np.imag(k)*_c.a)
+ 4*np.cos((np.sqrt(3)*np.imag(k)/2)*_c.a)*np.cos((3/2)*np.real(k)*_c.a) )
# [sic] eh only applies to first term
dispersion = eh*_c.g0*np.sqrt(3+ f(k)) - g0prime*f(k)
return dispersion
def FermiWavenumber(FermiLevel,model,g0prime=_c.g0prime):
'''
The Fermi wavenumber, i.e. the wavenumber of the state at
the Fermi energy.
Parameters
----------
FermiLevel: array-like, J
Fermi level
model: string
'LowEnergy' or 'FullTightBinding'.
Examples
--------
Confirm energy of Fermi wavevector is equal to Fermi level.
>>> from graphenemodeling import graphene
>>> from scipy.constants import elementary_charge as eV
>>> mlg = graphene.Monolayer()
>>> FermiLevel = 0.4 * eV
>>> kF = mlg.FermiWavenumber(FermiLevel, model='LowEnergy')
>>> mlg.CarrierDispersion(kF,model='LowEnergy')/eV
0.4
'''
if model == 'LowEnergy':
return np.abs(FermiLevel) / (sc.hbar*_c.vF)
if model == 'FullTightBinding':
'''
Need to finish off this code-finding procedure
'''
eh = np.sign(FermiLevel)
# f is zero when kf is correct value
f = lambda kf: FermiLevel - CarrierDispersion(kf, model='FullTightBinding',eh=eh,g0prime=g0prime)
# Choose LowEnergy answer for initial starting point
kf0 = FermiWavenumber(FermiLevel,model='LowEnergy',g0prime=g0prime)
result = optimize.root_scalar(f,x0=kf0,x1=kf0*.9,rtol=1e-10).root
return result
def DensityOfStates(E,model,g0prime=_c.g0prime):
'''
The density of states per square meter of graphene at energy :math:`E`.
Parameters
----------
E: array-like, J
Energy :math:`E` at which to evaluate density of states.
model: string
``'LowEnergy'``or ``'FullTightBinding'``
g0prime: scalar, J
The particle-hole asymmetry parameter :math:`\\gamma_0'`. Typically :math:`0.02\\gamma_0\\leq\\gamma_0'\\leq 0.2\\gamma_0`.
Returns
-------
array-like
Density of states, units are states per J-m^2
Notes
-----
For ``model==LowEnergy``, the form is simply
.. math::
\\rho(E)=\\frac{2}{\\pi}\\frac{|E|}{\\hbar^2 v_F^2}
whereas the ``FullTightBinding`` model has a much more complicated form (eqn. 14 of [2])
.. math::
\\rho(E)=\\frac{4}{\\pi^2}\\frac{|E|}{\\gamma_0^2}\\frac{1}{\\sqrt{Z_0}}\\mathbf{F}\\left(\\frac{\\pi}{2},\\sqrt{\\frac{Z_1}{Z_0}}\\right)
where :math:`\\mathbf{F}(\\pi/2,x)` is the complete elliptic integral of the first kind (see `scipy.special.ellipk`_) and
.. _scipy.special.ellipk: https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.ellipk.html
.. math::
Z_0 = \\left\\{\\array{
(1 + |E/\\gamma_0|)^2 - \\frac{[(E/\\gamma_0)^2-1]^2}{4}, & |E|\\leq \\gamma_0 \n
4|E/\\gamma_0|, & -3\\gamma_0\\leq E \\leq -\\gamma_0,\\gamma_0\\leq E\\leq 3\\gamma_0
}\\right.
Z_1 = \\left\\{\\array{
4|E/\\gamma_0|, & |E|\\leq \\gamma_0 \n
(1 + |E/\\gamma_0|)^2 - \\frac{[(E/\\gamma_0)^2-1]^2}{4}, & -3\\gamma_0\\leq E \\leq -\\gamma_0,\\gamma_0\\leq E\\leq 3\\gamma_0
}\\right.
Examples
--------
Plot the density of states for ``model=LowEnergy`` approximation and ``model=FullTightBinding`` model. Replicates Fig. 5 in Ref. [2].
.. plot::
>>> from graphenemodeling.graphene import monolayer as mlg
>>> from graphenemodeling.graphene import _constants as _c
>>> import matplotlib.pyplot as plt
>>> E = np.linspace(-3,3,num=200) * _c.g0
>>> DOS_low = mlg.DensityOfStates(E,model='LowEnergy')
>>> DOS_full = mlg.DensityOfStates(E,model='FullTightBinding')
>>> plt.plot(E/_c.g0,DOS_full/np.max(DOS_full),'k-',label='FullTightBinding')
[<...
>>> plt.plot(E/_c.g0,DOS_low/np.max(DOS_full),'k-.',label='LowEnergy')
[<...
>>> plt.xlabel('$E/\\gamma_0$')
Text...
>>> plt.ylabel('DOS (a.u.)')
Text...
>>> plt.legend()
<...
>>> plt.show()
References
----------
[1] <NAME>., and <NAME>. (1953). The Statistics of a Two-Dimensional, Hexagonal Net. Phys. Rev. 89, 662–662. https://link.aps.org/doi/10.1103/PhysRev.89.662.
[2] <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. (2009).
The electronic properties of graphene. Rev. Mod. Phys. 81, 109–162.
https://link.aps.org/doi/10.1103/RevModPhys.81.109.
'''
if model=='LowEnergy':
E = np.abs(E)
DOS = 2 * E / (sc.pi*(sc.hbar*_c.vF)**2)
return DOS
elif model=='FullTightBinding':
if g0prime!=0:
raise Exception('Not supported for g0prime!=0.\nSetting g0prime=0')
g0prime=0
prefactor = 4*np.abs(E) / (sc.pi*_c.g0)**2
def fZ0(E):
if np.abs(E)<np.abs(_c.g0):
term1 = (1+np.abs(E/_c.g0))**2
term2 = -((E/_c.g0)**2 - 1)**2 / 4
| |
# (C) Copyright 2019-2021 Hewlett Packard Enterprise Development LP.
# Apache License 2.0
import json
import logging
import re
from random import randint
from pyaoscx.utils import util as utils
from pyaoscx.exceptions.response_error import ResponseError
from pyaoscx.exceptions.generic_op_error import GenericOperationError
from pyaoscx.pyaoscx_module import PyaoscxModule
from pyaoscx.utils.list_attributes import ListDescriptor
class ACL(PyaoscxModule):
"""
Provide configuration management for ACL on AOS-CX devices.
"""
base_uri = "system/acls"
resource_uri_name = "acls"
indices = ["name", "list_type"]
cfg_aces = ListDescriptor("cfg_aces")
def __init__(self, session, name, list_type, uri=None, **kwargs):
self.session = session
# Assign IDs
self.name = name
self.list_type = list_type
self._uri = uri
# List used to determine attributes related to the ACL configuration
self.config_attrs = []
self.materialized = False
# Attribute dictionary used to manage the original data
# obtained from the GET
self.__original_attributes = {}
# Set arguments needed for correct creation
utils.set_creation_attrs(self, **kwargs)
# Use to manage ACL Entries
self.cfg_aces = []
# Attribute used to know if object was changed recently
self.__modified = False
# Set an initial random version
self._update_version()
@PyaoscxModule.connected
def get(self, depth=None, selector=None):
"""
Perform a GET call to retrieve data for an ACL table entry and fill
the object with the incoming attributes
:param depth: Integer deciding how many levels into the API JSON that
references will be returned.
:param selector: Alphanumeric option to select specific information to
return.
:return: Returns True if there is not an exception raised
"""
logging.info("Retrieving the switch ACLs")
depth = self.session.api.default_depth\
if depth is None else depth
selector = self.session.api.default_selector\
if selector is None else selector
if not self.session.api.valid_depth(depth):
depths = self.session.api.valid_depths
raise Exception("ERROR: Depth should be {}".format(depths))
if selector not in self.session.api.valid_selectors:
selectors = " ".join(self.session.api.valid_selectors)
raise Exception(
"ERROR: Selector should be one of {}".format(selectors))
payload = {"depth": depth, "selector": selector}
uri = "{base_url}{class_uri}/{id1}{separator}{id2}".format(
base_url=self.session.base_url,
class_uri=ACL.base_uri,
id1=self.name,
separator=self.session.api.compound_index_separator,
id2=self.list_type)
try:
response = self.session.s.get(uri,
verify=False,
params=payload,
proxies=self.session.proxy)
except Exception as e:
raise ResponseError("GET", e)
if not utils._response_ok(response, "GET"):
raise GenericOperationError(response.text, response.status_code)
data = json.loads(response.text)
# Remove fields because they are not needed for the PUT request
if "name" in data:
data.pop("name")
if "list_type" in data:
data.pop("list_type")
# Delete unwanted data
if "cfg_aces" in data:
data.pop("cfg_aces")
# Add dictionary as attributes for the object
utils.create_attrs(self, data)
# Determines if the ACL is configurable
if selector in self.session.api.configurable_selectors:
# Set self.config_attrs and delete ID from it
utils.set_config_attrs(self, data, "config_attrs",
["name", "list_type"])
# Set original attributes
self.__original_attributes = data
# Sets object as materialized
# Information is loaded from the Device
self.materialized = True
# Clean ACL Entries settings
if self.cfg_aces == []:
# Set ACL Entries if any
# Adds ACL Entries to parent ACL already
from pyaoscx.acl_entry import AclEntry
AclEntry.get_all(self.session, self)
return True
@classmethod
def get_all(cls, session):
"""
Perform a GET call to retrieve all system ACLs,
and create a dictionary containing them
:param cls: Object's class
:param session: pyaoscx.Session object used to represent a logical
connection to the device
:return: Dictionary containing ACLs IDs as keys and a
Acl objects as values
"""
logging.info("Retrieving the switch ACL")
uri = "{base_url}{class_uri}".format(base_url=session.base_url,
class_uri=ACL.base_uri)
try:
response = session.s.get(uri, verify=False, proxies=session.proxy)
except Exception as e:
raise ResponseError("GET", e)
if not utils._response_ok(response, "GET"):
raise GenericOperationError(response.text, response.status_code)
data = json.loads(response.text)
acl_dict = {}
# Get all URI elements in the form of a list
uri_list = session.api.get_uri_from_data(data)
for uri in uri_list:
# Create a Acl object
indices, acl = ACL.from_uri(session, uri)
acl_dict[indices] = acl
return acl_dict
@PyaoscxModule.connected
def apply(self):
"""
Main method used to either create or update an existing
ACL table entry.
Checks whether the ACL exists in the switch
Calls self.update() if ACL being updated
Calls self.create() if a new ACL is being created
:return modified: Boolean, True if object was created or modified
False otherwise
"""
modified = False
if self.materialized:
modified = self.update()
else:
modified = self.create()
# Set internal attribute
self.__modified = modified
return modified
@PyaoscxModule.connected
def update(self):
"""
Perform a PUT call to apply changes to an existing ACL table entry
:return modified: True if Object was modified and a PUT request
was made.False otherwise
"""
# Variable returned
modified = False
acl_data = utils.get_attrs(self, self.config_attrs)
uri = "{base_url}{class_uri}/{id1}{separator}{id2}".format(
base_url=self.session.base_url,
class_uri=ACL.base_uri,
id1=self.name,
separator=self.session.api.compound_index_separator,
id2=self.list_type)
# Compare dictionaries
if acl_data == self.__original_attributes:
# Object was not modified
modified = False
else:
# The version should change every time the ACL (or any of
# its entries) change so that it is written to hardware
self._update_version()
acl_data["cfg_version"] = self.cfg_version
post_data = json.dumps(acl_data, sort_keys=True, indent=4)
try:
response = self.session.s.put(uri,
verify=False,
data=post_data,
proxies=self.session.proxy)
except Exception as e:
raise ResponseError("PUT", e)
if not utils._response_ok(response, "PUT"):
raise GenericOperationError(response.text,
response.status_code)
else:
logging.info("SUCCESS: Update ACL table entry {} succeeded")
# Set new original attributes
self.__original_attributes = acl_data
modified = True
return modified
@PyaoscxModule.connected
def create(self):
"""
Perform a POST call to create a new ACL table entry
Only returns if an exception is not raise
:return modified: Boolean, True if entry was created.
"""
acl_data = {}
acl_data = utils.get_attrs(self, self.config_attrs)
acl_data["name"] = self.name
acl_data["list_type"] = self.list_type
uri = "{base_url}{class_uri}".format(base_url=self.session.base_url,
class_uri=ACL.base_uri)
post_data = json.dumps(acl_data, sort_keys=True, indent=4)
try:
response = self.session.s.post(uri,
verify=False,
data=post_data,
proxies=self.session.proxy)
except Exception as e:
raise ResponseError("POST", e)
if not utils._response_ok(response, "POST"):
raise GenericOperationError(response.text, response.status_code)
else:
logging.info("SUCCESS: Adding ACL table entry {} succeeded\
".format(self.name))
# Get all object's data
self.get()
# Object was modified, as it was created
return True
@PyaoscxModule.connected
def delete(self):
"""
Perform DELETE call to delete ACL table entry.
"""
uri = "{base_url}{class_uri}/{id1}{separator}{id2}".format(
base_url=self.session.base_url,
class_uri=ACL.base_uri,
id1=self.name,
separator=self.session.api.compound_index_separator,
id2=self.list_type)
try:
response = self.session.s.delete(uri,
verify=False,
proxies=self.session.proxy)
except Exception as e:
raise ResponseError("DELETE", e)
if not utils._response_ok(response, "DELETE"):
raise GenericOperationError(response.text, response.status_code)
else:
logging.info("SUCCESS: Delete ACL table entry {} succeeded\
".format(self.name))
# Delete object attributes
utils.delete_attrs(self, self.config_attrs)
@classmethod
def from_response(cls, session, response_data):
"""
Create a Acl object given a response_data
:param cls: Object's class
:param session: pyaoscx.Session object used to represent a logical
connection to the device
:param response_data: The response can be either a
dictionary: {
id: "/rest/v10.04/system/
/acls/{name},{list_type}"
}
or a
string: "/rest/v10.04/system/acls/{name},{list_type}"
:return: Acl object
"""
acl_arr = session.api.get_keys(response_data,
ACL.resource_uri_name)
list_type = acl_arr[1]
name = acl_arr[0]
return ACL(session, name, list_type)
@classmethod
def from_uri(cls, session, uri):
"""
Create a Acl object given a URI
:param cls: Object's class
:param session: pyaoscx.Session object used to represent a logical
connection to the device
:param uri: a String with a URI
:return indices, acl: tuple containing both the indices and
Acl object
"""
# Obtain ID from URI
index_pattern = \
re.compile(
r"(.*)acls/(?P<index1>.+)[,./-](?P<index2>.+)")
name = index_pattern.match(uri).group("index1")
list_type = index_pattern.match(uri).group("index2")
# Create Acl object
acl = ACL(session, name, list_type)
indices = "{},{}".format(name, list_type)
return indices, acl
def __str__(self):
return "ACL name:{}, list_type:{}".format(self.name, self.list_type)
def get_uri(self):
"""
Method used to obtain the specific ACL URI
return: Object's URI
"""
if self._uri is None:
self._uri = \
"{resource_prefix}{class_uri}/{id1}{separator}{id2}".format(
resource_prefix=self.session.resource_prefix,
class_uri=ACL.base_uri,
id1=self.name,
separator=(
self.session.api.compound_index_separator),
id2=self.list_type
)
return self._uri
def get_info_format(self):
"""
Method used to obtain correct object format for referencing inside
other objects
return: Object format depending on the API Version
"""
return self.session.api.get_index(self)
def was_modified(self):
"""
Getter method for the __modified attribute
:return: Boolean True if the object was recently modified,
False otherwise.
"""
return self.__modified
def _update_version(self):
"""
Whenever the ACL (or any of its entries) change,the version should
be updated so that it gets written to hardware. If the version
doesn't change, the new configuration won't get to the hardware
"""
new_cfg_version = randint(-9007199254740991, 9007199254740991)
if self.materialized:
if hasattr(self, "cfg_version"):
logging.warning(
"ACL %s was modified, but the version wasn't, "
"so the version was changed automatically to %d",
str(self),
new_cfg_version
)
else:
logging.warning(
"ACL %s didn't have a version configured. %d was added",
str(self),
new_cfg_version
)
self.cfg_version = new_cfg_version
####################################################################
# IMPERATIVE FUNCTIONS
####################################################################
def add_acl_entry(self,
sequence_num,
action,
count=None,
protocol=None,
src_ip=None,
dst_ip=None,
dst_l4_port_min=None,
dst_l4_port_max=None,
src_mac=None,
dst_mac=None,
ethertype=None):
"""
Create an AclEntry object, ACL Entry already exists, value passed
won't update the entry
:param sequence_num: Integer number of the sequence
:param action: Action should be either "permit" or | |
{function_with_interval_param(0,1, interval=pd.Interval(tstwodaysago, tstoday))}'
)
print('==== 3rd pass ===')
print("request for data from three days to yesterday")
print("expected split in two intervals")
print(f'Final result:\n {function_with_interval_param(0,1, interval=pd.Interval(tsthreedaysago, tsyesterday))}' )
print('==== 4th pass ===')
print("request for data from three days to tomorrow")
print("expected split in three intervals")
print(f'Final result:\n\
{function_with_interval_param(0,1, interval1= pd.Interval(tsthreedaysago, tstomorrow))}' )
print('==== 5th pass ===')
print("request for data from two days ago to today with different first argument")
print("No caching expected and one interval")
print( f'Final result:\n{function_with_interval_param(1, 1, interval=pd.Interval(tstwodaysago, tstoday))}' )
print('==== 6th pass ===')
print("request for data from three days ago to today with different first argument")
print("Two intervals expected")
print( f'Final result: {function_with_interval_param(1, 1, interval=pd.Interval(tsthreedaysago, tstoday))}' )
# Testing with an interval as position argument and one interval as keyword argument
if False:
@MemoizationWithIntervals(
[0], ['interval1'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
cache=klepto.archives.hdf_archive(
f'{pdl.today().to_date_string()}_memoisation.hdf5'),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_params(interval0,
interval1=pd.Interval(tstwodaysago, tstomorrow)):
time.sleep(1)
print('***')
print(f'interval0: {interval0}')
print(f'interval1: {interval1}')
return (interval0, interval1)
print('=*=*=*=* DEMONSTRATION WITH TWO INTERVAL PARAMETERS =*=*=*=*')
print('==== First pass ===')
print(f'Initialisation: first interval:\nyest to tday - second interval: two days ago to tomorrow')
print(f'Final result:\n{function_with_interval_params(pd.Interval(tsyesterday, tstoday))}')
print('==== Second pass ===')
print(f'Call with first interval:\n3 days ago to tday - second interval: unchanged')
print('Expected caching and split of first interval in two')
print( f'Final result: {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday))}' )
print('==== 3rd pass ===')
print(f'Call with first interval:\nunchanged - second interval: yest to today')
print('Expected only cached results and previous split of first interval')
print(f'Final result:\n {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday), interval1 = pd.Interval(tsyesterday, tstoday))}' )
print('==== 4th pass ===')
print(f'Call with first interval:\n3 days ago to today - second interval: yest to today')
print('Expected only cached results and only split of first interval')
print(f'Final result:\n {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday), interval1 = pd.Interval(tsyesterday, tstoday))}' )
print('==== 5th pass ===')
print(f'Call with first interval:\n3 days ago to yesterday - second interval: 3 days ago to tomorrow')
print('Expected no split of first interval and split of second interval in two. Only one none-cached call')
print(f'Final result:\n\
{function_with_interval_params(pd.Interval(tsthreedaysago, tsyesterday), interval1= pd.Interval(tsthreedaysago, tstomorrow))}'
)
print('==== 6th pass ===')
print(f'Call with first interval:\n3 days ago to today - second interval: 3 days ago to tomorrow')
print('Expected split of first interval in two and split of second interval in two. One non-cached call: today - tomorrow x ')
print(f'Final result:\n\
{function_with_interval_params(pd.Interval(tsthreedaysago, tstoday), interval1=pd.Interval(tsthreedaysago, tstomorrow))}'
)
# Showing the issue with the current version
if False:
@MemoizationWithIntervals(None,
['interval'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_param(valint,
interval=pd.Interval(tstwodaysago, tstomorrow)):
time.sleep(1)
print('**********************************')
print(f'valint: {valint}')
print(f'interval: {interval}')
return (valint, interval)
print('==== First pass ===')
print( f'Final result:\n{function_with_interval_param(2, interval=pd.Interval(tsyesterday, tstoday))}')
print('==== Second pass ===')
print(f'Final result: {function_with_interval_param(2, interval=pd.Interval(tsthreedaysago, tstoday))}')
print('==== 3rd pass ===')
print( f'Final result:\n {function_with_interval_param(3, interval=pd.Interval(tsthreedaysago, tstoday))}')
print('==== 4th pass ===')
print(f'Final result:\n\ {function_with_interval_param(3, interval=pd.Interval(tsthreedaysago, tstomorrow))}')
# testing getting back the memoized function from MemoizationWithIntervals
if False:
@MemoizationWithIntervals(
[0], ['interval1'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
cache=klepto.archives.file_archive(
f'{pdl.today().to_date_string()}_memoisation.pkl'),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_params(interval0,
interval1=pd.Interval(
tstwodaysago,
tstomorrow)):
time.sleep(1)
print('**********************************')
print(f'interval0: {interval0}')
print(f'interval1: {interval1}')
return (interval0, interval1)
print('==== First pass ===')
# function_with_interval_params(pd.Interval(pdl.yesterday(), pdl.today(),closed='left'),
# interval1 = pd.Interval(pdl.yesterday().add(days=0),
# pdl.today(), closed='both')
# )
f_mzed = function_with_interval_params(get_function_cachedQ=True)
print(
f'Final result:\n{function_with_interval_params(pd.Interval(tsyesterday, tstoday))}'
)
print(f'==============\nf_memoized live cache: {f_mzed.__cache__()}')
print(f'f_memoized live cache type: {type(f_mzed.__cache__())}')
print(f'f_memoized file cache: {f_mzed.__cache__().archive}')
print(f'f_memoized live cache: {f_mzed.info()}')
f_mzed.__cache__().dump()
print(f'f_memoized file cache: {f_mzed.__cache__().archive}')
# print('==== Second pass ===')
# print(f'Final result: {function_with_interval_params(pd.Interval(pdl.yesterday().add(days=-2), pdl.today()))}')
# print('==== 3rd pass ===')
# print(f'Final result:\n\
# {function_with_interval_params(pd.Interval(pdl.yesterday().add(days=-2), pdl.yesterday()), interval1 = pd.Interval(pdl.yesterday().add(days=0), pdl.today()))}')
# print('==== 4th pass ===')
# print(f'Final result:\n\
# {function_with_interval_params(pd.Interval(pdl.yesterday().add(days=-2), pdl.yesterday()), interval1= pd.Interval(pdl.yesterday().add(days=-2), pdl.tomorrow()))}')
# testing serialization with HDF5 memoized function from MemoizationWithIntervals
if False:
@MemoizationWithIntervals(
[0], ['interval1'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
cache=klepto.archives.hdf_archive(
f'{pdl.today().to_date_string()}_memoisation.hdf5',
serialized=True,
cached=False,
meta=False),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_params(interval0,
interval1=pd.Interval(
tstwodaysago,
tstomorrow)):
time.sleep(1)
print('*********** function called *******************')
print(f'interval0: {interval0}')
print(f'interval1: {interval1}')
return (interval0, interval1)
print('==== First pass ===')
# function_with_interval_params(pd.Interval(pdl.yesterday(), pdl.today(),closed='left'),
# interval1 = pd.Interval(pdl.yesterday().add(days=0),
# pdl.today(), closed='both')
# )
f_mzed = function_with_interval_params(get_function_cachedQ=True)
print(
f'Final result:\n{function_with_interval_params(pd.Interval(tsyesterday, tstoday))}'
)
print(f'==============\nf_memoized live cache: {f_mzed.__cache__()}')
print(f'f_memoized live cache type: {type(f_mzed.__cache__())}')
print(f'f_memoized file cache: {f_mzed.__cache__().archive}')
print(f'f_memoized live cache: {f_mzed.info()}')
f_mzed.__cache__().dump()
print(f'f_memoized file cache: {f_mzed.__cache__().archive}')
if False:
@MemoizationWithIntervals([0], aggregation=list, debug=False)
def function_with_interval_params(interval0):
time.sleep(1)
print('**********************************')
print(f'interval0: {interval0}')
return (interval0)
print('==== First pass ===')
print(
f'Final result: {function_with_interval_params(pd.Interval(tsyesterday, tstoday))}'
)
print('==== Second pass ===')
print(
f'Final result: {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday) )}'
)
print('==== 3rd pass ===')
print(
f'Final result: {function_with_interval_params(pd.Interval(tsthreedaysago, tsyesterday))}'
)
print('==== 4th pass ===')
print(
f'Final result: {function_with_interval_params(pd.Interval(tsthreedaysago, tstomorrow))}'
)
# Testing kwargs only
if False:
@MemoizationWithIntervals([], ['period'],
aggregation=list,
debug=False)
def function_with_interval_params(array=['USD/JPY'],
period=pd.Interval( tsyesterday, pd.Timestamp.now('UTC'))):
time.sleep(1)
print('************* function called *********************')
print(f'interval0: {period}')
return (array, period)
print('==== First pass ===')
print(
f'Final result: {function_with_interval_params(array=["USD/JPY"], period = pd.Interval(tsyesterday, pd.Timestamp.now(tz="UTC")))}'
)
print('==== Second pass ===')
print(
f'Final result: {function_with_interval_params(array=["USD/JPY"],period = pd.Interval(tsyesterday, pd.Timestamp.now(tz="UTC")) )}'
)
print('==== 3rd pass ===')
print(
f'Final result: {function_with_interval_params(array=["USD/JPY"],period = pd.Interval(tsyesterday, pd.Timestamp.now(tz="UTC")))}'
)
# Testing tolerance
if False:
timenow = pdl.now()
timenowplus5s = timenow.add(seconds=5)
fiveseconds = timenowplus5s - timenow
@MemoizationWithIntervals([], ['period'],
aggregation=list,
debug=False,
rounding=fiveseconds)
def function_with_interval_params(array=['USD/JPY'],
period=pd.Interval(tsyesterday, pd.Timestamp.now(tz="UTC"))
):
time.sleep(1)
print('************* function called *********************')
print(f'interval0: {period}')
return (period)
print('==== First pass ===')
print(
f'Final result: {function_with_interval_params(array=["USD/JPY"], period=pd.Interval(tstoday, pd.Timestamp.now(tz="UTC")))}'
)
print('==== Second pass ===')
time.sleep(1)
print(
f'Final result: {function_with_interval_params(["USD/JPY"], period=pd.Interval(tstoday, pd.Timestamp.now(tz="UTC")))}'
)
time.sleep(6)
print('==== 3rd pass ===')
print(
f'Final result: {function_with_interval_params(["USD/JPY"], period=pd.Interval(tstoday, pd.Timestamp.now(tz="UTC")))}'
)
print('==== 4th pass ===')
print(
f'Final result: {function_with_interval_params(["USD/JPY"], period = pd.Interval(tstoday, pd.Timestamp.now(tz="UTC")))}'
)
if False:
itvals = RecordIntervalsPandas()
calls = itvals(pd.Interval(pdl.yesterday(), pdl.today()))
print(
list(
map(
lambda i:
(i.left.to_date_string(), i.right.to_date_string()),
calls)))
calls = itvals(pd.Interval(pdl.yesterday().add(days=-2), pdl.today()))
print(
list(
map(
lambda i:
(i.left.to_date_string(), i.right.to_date_string()),
calls)))
calls = itvals(
pd.Interval(pdl.yesterday().add(days=-2), pdl.yesterday()))
print(
list(
map(
lambda i:
(i.left.to_date_string(), i.right.to_date_string()),
calls)))
calls = itvals(
pd.Interval(pdl.yesterday().add(days=-4), pdl.tomorrow()))
print(
list(
map(
lambda i:
(i.left.to_date_string(), i.right.to_date_string()),
calls)))
if False:
def solution(data, n):
counts = {}
counts = {x: counts.get(x, data.count(x)) for x in data}
return [x for x in data if counts[x] <= n]
print(solution([1, 2, 3], 0))
print(solution([1, 2, 2, 3, 3, 4, 5, 5], 1))
if False:
cont = {0: [0, 1], 2: [3, 4]}
argsorg = [5, 6, 7]
calls_args = [[
arg if i not in cont.keys() else cont[i][j]
for i, arg in enumerate(argsorg)
] for p in cont.keys() for j in range(len(cont[p]))]
if True:
print("Testing subintervals and strategies")
print("1. No sub")
itvals_nosub = RecordIntervalsPandas(subintervals_requiredQ=False, subinterval_minQ=False)
print("-6->-1")
calls = itvals_nosub(pd.Interval(-6, -1))
print("-3->0")
calls = itvals_nosub(pd.Interval(-3, 0 ))
print_calls(calls)
print("2. No sub first strategy")
itvals_sub = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=False)
print("-6->-1")
calls = itvals_sub(pd.Interval(-6,-1))
print("-3->0")
calls = itvals_sub(pd.Interval(-3,0))
print_calls(calls)
print("3. Sub second strategy")
itvals_sub2 = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=True)
print("-6->-1")
calls = itvals_sub2(pd.Interval(-6,-1))
print("-3->0")
calls = itvals_sub2(pd.Interval(-3,0))
print_calls(calls)
# Test ok
if False:
print("Testing subintervals and strategies")
print("1. No sub")
itvals_nosub = RecordIntervalsPandas(subintervals_requiredQ=False, subinterval_minQ=False)
print("-6->-1")
calls = itvals_nosub(pd.Interval(tssixdaysago, tsyesterday))
print("-3->0")
calls = itvals_nosub(pd.Interval(tsthreedaysago, tstoday ))
print_calls(calls)
print("2. No sub first strategy")
itvals_sub = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=False)
print("-6->-1")
calls = itvals_sub(pd.Interval(tssixdaysago, tsyesterday))
print("-3->0")
calls = itvals_sub(pd.Interval(tsthreedaysago, tstoday ))
print_calls(calls)
print("3. Sub second strategy")
itvals_sub2 = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=True)
print("-6->-1")
calls = itvals_sub2(pd.Interval(tssixdaysago, tsyesterday))
print("-3->0")
calls = itvals_sub2(pd.Interval(tsthreedaysago, tstoday))
print_calls(calls)
if False:
print("Testing subinterval and first strategy")
itvals = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=False)
calls = itvals(pd.Interval(tsfourdaysago, tsthreedaysago))
print(list(map(lambda i: (i.left, i.right), calls)))
calls = itvals(pd.Interval(tstwodaysago, tstoday))
print(list(map(lambda i: (i.left, i.right), calls)))
calls = itvals(pd.Interval(tssixdaysago, tsyesterday))
print(list(map(lambda i: (i.left, i.right), calls)))
calls = itvals(pd.Interval(tssixdaysago, tsyesterday))
print("should be broken in 3 intervals: -5->-4 | -4->-3 | -3->-1")
print(sorted(list(map(lambda i: (i.left, i.right), calls))))
if False:
print("Testing subinterval and second strategy")
itvals = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=True)
calls = itvals(pd.Interval(tsfourdaysago, tsthreedaysago))
print(list(map(lambda i: (i.left, i.right), calls)))
calls = itvals(pd.Interval(tstwodaysago, tstoday))
print(list(map(lambda i: (i.left, i.right), calls)))
calls = itvals(pd.Interval(tssixdaysago, tsyesterday))
print(sorted(list(map(lambda i: (i.left, i.right), calls))))
calls = itvals(pd.Interval(tssixdaysago, tsyesterday))
print("should be broken in 3 intervals: -5->-4 | -4->-3 | -3->-1")
print(sorted(list(map(lambda i: (i.left, i.right), calls))))
if False:
print("Testing subinterval and first strategy")
itvals = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=False)
calls = itvals(pd.Interval(-2, 0))
print_calls(calls)
calls = itvals(pd.Interval(-4, -3))
print_calls(calls)
calls = itvals(pd.Interval(-6, 1))
print("should be broken in 3 intervals: -6->-4 | -4->-3 | -3->-2 | -2->0 | 0->1")
print_calls(calls)
if False:
print("Testing subinterval and second strategy")
itvals = RecordIntervalsPandas(subintervals_requiredQ=True, subinterval_minQ=True)
calls | |
# -*- coding: utf-8 -*-
"""
This Python 3.3 module implements the HEALPix map projection as described in [CaRo2007]_.
.. [CaRo2007] <NAME> and <NAME>, Mapping on the healpix grid, Monthly Notices of the Royal Astronomical Society 381 (2007), no. 2, 865--872.
CHANGELOG:
- <NAME> (AR), 2013-01-26: Refactored code from release 0.3.
- AR, 2013-03-05: In in_healpix_image() increased eps to 1e-10 to decrease out-of-bounds errors i was getting when drawing figures.
- AR, 2013-07-23: Ported to Python 3.3.
NOTE:
All lengths are measured in meters and all angles are measured in radians
unless indicated otherwise.
By 'ellipsoid' below, i mean an oblate ellipsoid of revolution.
"""
#*****************************************************************************
# Copyright (C) 2013 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU Lesser General Public License (LGPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
# Import third-party modules.
from numpy import pi, floor, sqrt, sin, arcsin, sign, array, deg2rad, rad2deg
# Import my modules.
from .utils import my_round, auth_lat, auth_rad
def healpix_sphere(lam, phi):
r"""
Compute the signature function of the HEALPix
projection of the unit sphere.
INPUT:
- `lam, phi` - Geodetic longitude-latitude coordinates in radians.
Assume -pi <= `lam` < pi and -pi/2 <= `phi` <= pi/2.
EXAMPLES::
>>> print(healpix_sphere(0, arcsin(2.0/3)) == (0, pi/4))
True
"""
phi0 = arcsin(2.0/3)
# Equatorial region.
if abs(phi) <= phi0:
x = lam
y = 3*pi/8*sin(phi)
# Polar region.
else:
sigma = sqrt(3*(1 - abs(sin(phi))))
cap_number = floor(2*lam/pi + 2)
if cap_number >= 4:
# Rounding error
cap_number = 3
lamc = -3*pi/4 + (pi/2)*cap_number
x = lamc + (lam - lamc)*sigma
y = sign(phi)*pi/4*(2 - sigma)
return x, y
def healpix_sphere_inverse(x, y):
r"""
Compute the inverse of the healpix_sphere().
INPUT:
- `x, y` - Planar coordinates in meters in the image of the
HEALPix projection of the unit sphere.
EXAMPLES::
>>> print(healpix_sphere_inverse(0, pi/4) == (0, arcsin(2.0/3)))
True
"""
# Throw error if input coordinates are out of bounds.
if not in_healpix_image(x, y):
print("Error: input coordinates (%f, %f) are out of bounds" % \
(x, y))
return float("inf"), float("inf")
y0 = pi/4
# Equatorial region.
if abs(y) <= y0:
lam = x
phi = arcsin(8*y/(3*pi))
# Polar region but not the poles.
elif abs(y) < pi/2:
cap_number = floor(2*x/pi + 2)
if cap_number >= 4:
# Rounding error.
cap_number = 3
xc = -3*pi/4 + (pi/2)*cap_number
tau = 2 - 4*abs(y)/pi
lam = xc + (x - xc)/tau
phi = sign(y)*arcsin(1 - tau**2/3)
# Handle rounding errors in longitude.
if lam < -pi:
lam = -pi
elif lam > pi:
lam = pi
# Poles.
else:
lam = -pi
phi = sign(y)*pi/2
return lam, phi
def healpix_ellipsoid(lam, phi, e=0):
r"""
Compute the signature functions of the HEALPix projection of an oblate
ellipsoid with eccentricity `e` whose authalic sphere is the unit sphere.
Works when `e` = 0 (spherical case) too.
INPUT:
- `lam, phi` - Geodetic longitude-latitude coordinates in radians.
Assume -pi <= `lam` < pi and -pi/2 <= `phi` <= pi/2.
- `e` - Eccentricity of the oblate ellipsoid.
EXAMPLES::
>>> print(my_round(healpix_ellipsoid(0, pi/7), 15))
(0, 0.51115723774642197)
>>> print(my_round(healpix_ellipsoid(0, pi/7, e=0.8), 15))
(0, 0.26848445085783701)
"""
beta = auth_lat(phi, e, radians=True)
return healpix_sphere(lam, beta)
def healpix_ellipsoid_inverse(x, y, e=0):
r"""
Compute the inverse of healpix_ellipsoid().
EXAMPLES::
>>> p = (0, pi/7)
>>> q = healpix_ellipsoid(*p)
>>> print(my_round(healpix_ellipsoid_inverse(*q), 15))
(0, 0.44879895051282798)
>>> print(my_round(p, 15))
(0, 0.448798950512828)
"""
# Throw error if input coordinates are out of bounds.
if not in_healpix_image(x, y):
print("Error: input coordinates (%f, %f) are out of bounds" % \
(x, y))
return
lam, beta = healpix_sphere_inverse(x, y)
phi = auth_lat(beta, e, radians=True, inverse=True)
return lam, phi
def in_healpix_image(x, y):
r"""
Return True if and only if `(x, y)` lies in the image of the HEALPix
projection of the unit sphere.
EXAMPLES::
>>> eps = 0 # Test boundary points.
>>> hp = [
... (-pi - eps, pi/4),
... (-3*pi/4, pi/2 + eps),
... (-pi/2, pi/4 + eps),
... (-pi/4, pi/2 + eps),
... (0, pi/4 + eps),
... (pi/4, pi/2 + eps),
... (pi/2, pi/4 + eps),
... (3*pi/4, pi/2 + eps),
... (pi + eps, pi/4),
... (pi + eps,-pi/4),
... (3*pi/4,-pi/2 - eps),
... (pi/2,-pi/4 - eps),
... (pi/4,-pi/2 - eps),
... (0,-pi/4 - eps),
... (-pi/4,-pi/2 - eps),
... (-pi/2,-pi/4 - eps),
... (-3*pi/4,-pi/2 - eps),
... (-pi - eps,-pi/4)
... ]
>>> for p in hp:
... if not in_healpix_image(*p):
... print('Fail')
...
>>> in_healpix_image(0, 0)
True
>>> in_healpix_image(0, pi/4 + 0.1)
False
"""
# matplotlib is a third-party module.
from matplotlib.path import Path
# Fuzz to slightly expand HEALPix image boundary so that
# points on the boundary count as lying in the image.
eps = 1e-10
vertices = [
(-pi - eps, pi/4),
(-3*pi/4, pi/2 + eps),
(-pi/2, pi/4 + eps),
(-pi/4, pi/2 + eps),
(0, pi/4 + eps),
(pi/4, pi/2 + eps),
(pi/2, pi/4 + eps),
(3*pi/4, pi/2 + eps),
(pi + eps, pi/4),
(pi + eps, -pi/4),
(3*pi/4, -pi/2 - eps),
(pi/2, -pi/4 - eps),
(pi/4, -pi/2 - eps),
(0,-pi/4 - eps),
(-pi/4, -pi/2 - eps),
(-pi/2, -pi/4 - eps),
(-3*pi/4, -pi/2 - eps),
(-pi - eps, -pi/4)
]
poly = Path(vertices)
return bool(poly.contains_point([x, y]))
def healpix_vertices():
r"""
Return a list of the planar vertices of the HEALPix projection of
the unit sphere.
"""
return [
(pi, pi/4),
(3*pi/4, pi/2),
(pi/2, pi/4),
(pi/4, pi/2),
(0, pi/4),
(-pi/4, pi/2),
(-pi/2, pi/4),
(-3*pi/4, pi/2),
(-pi, pi/4),
(-pi, -pi/4),
(-3*pi/4, -pi/2),
(-pi/2, -pi/4),
(-pi/4, -pi/2),
(0,-pi/4),
(pi/4, -pi/2),
(pi/2, -pi/4),
(3*pi/4, -pi/2),
(pi, -pi/4),
]
def healpix(a=1, e=0):
r"""
Return a function object that wraps the HEALPix projection and its inverse
of an ellipsoid with major radius `a` and eccentricity `e`.
EXAMPLES::
>>> f = healpix(a=2, e=0)
>>> print(my_round(f(0, pi/3, radians=True), 15))
(0.57495135977821499, 2.1457476865731109)
>>> p = (0, 60)
>>> q = f(*p, radians=False); print(my_round(q, 15))
(0.57495135977821499, 2.1457476865731109)
>>> print(my_round(f(*q, radians=False, inverse=True), 15))
(5.9999999999999997e-15, 59.999999999999986)
>>> print(my_round(p, 15))
(0, 60)
OUTPUT:
- A function object of the form f(u, v, radians=False, inverse=False).
"""
R_A = auth_rad(a, e)
def f(u, v, radians=False, inverse=False):
if not inverse:
lam, phi = u, v
if not radians:
# Convert to radians.
lam, phi = deg2rad([lam, phi])
return tuple(R_A*array(healpix_ellipsoid(lam, phi, e=e)))
else:
# Scale down to R_A = 1.
x, y = array((u, v))/R_A
lam, phi = array(healpix_ellipsoid_inverse(x, y, e=e))
if not radians:
# Convert to degrees.
lam, phi = rad2deg([lam, phi])
return lam, phi
return f
def healpix_diagram(a=1, e=0, shade_polar_region=True):
r"""
Return a Sage Graphics object diagramming the HEALPix projection
boundary and polar triangles for the ellipsoid with major radius `a`
and eccentricity `e`.
Inessential graphics method.
Requires Sage graphics methods.
"""
from sage.all import Graphics, line2d, point, polygon, text, RealNumber, Integer
# Make Sage types compatible with Numpy.
RealNumber = float
Integer = int
R = auth_rad(a, e)
g = Graphics()
color = 'black' # Boundary color.
shade_color = 'blue' # Polar triangles color.
dl = array((R*pi/2,0))
lu = [(-R*pi, R*pi/4),(-R*3*pi/4, R*pi/2)]
ld = [(-R*3*pi/4, R*pi/2),(-R*pi/2, R*pi/4)]
g += line2d([(-R*pi, -R*pi/4),(-R*pi, R*pi/4)], color=color)
g += line2d([(R*pi, R*pi/4),(R*pi, -R*pi/4)], linestyle = '--',
color=color)
for k in range(4):
g += line2d([array(p) + k*dl for p in lu], color=color)
g += line2d([array(p) + k*dl for p in ld], linestyle = '--',
color=color)
g += line2d([array(p) + array((k*R*pi/2 - R*pi/4, -R*3*pi/4))
for p in ld], color=color)
g += line2d([array(p) + array((k*R*pi/2 + R*pi/4, -R*3*pi/4))
for p in lu], linestyle = '--', color=color)
pn = array((-R*3*pi/4, R*pi/2))
ps = array((-R*3*pi/4, -R*pi/2))
g += point([pn + k*dl for k in range(4)] +
[ps + k*dl for k in range(4)], size=20, color=color)
g += point([pn + k*dl for | |
<reponame>anoppa/Proyecto-IA-Sim-Comp<gh_stars>1-10
from numpy import string_
from ..non_terminal import NonTerminal
from ..terminal import Terminal
from ..production import Production
from ..Node.declaration_nodes import *
from ..Node.expression_nodes import *
from ..Node.statement_nodes import *
from ..Node.program_node import ProgramNode
from ..Grammar import Grammar
def get_grammar():
# non terminals
program, elements_list = NonTerminal.get_non_terminals("<program> <elements-list>")
statement, simple_statement = NonTerminal.get_non_terminals(
"<statement> <simple-statement>"
)
def_class, body_statements = NonTerminal.get_non_terminals(
"<def-class> <body-statements>"
)
(
def_func,
def_var,
assignment,
) = NonTerminal.get_non_terminals("<def-func> <def-var> <assignment>")
def_agent = NonTerminal("<def-agent>")
param, param_list = NonTerminal.get_non_terminals("<param> <param-list>")
instance = NonTerminal("<instance>")
def_rand_var, def_effect, def_effect_list = NonTerminal.get_non_terminals(
"<def-rand-var> <def-effect> <def-effect-list>"
)
expr, arith, term, factor, atom = NonTerminal.get_non_terminals(
"<expr> <arith> <term> <factor> <atom>"
)
func_call, arg_list = NonTerminal.get_non_terminals("<func-call> <arg-list>")
prob_func, prob_func_list, rule = NonTerminal.get_non_terminals(
"<prob-func> <prob-func-list> <rule>"
)
for_, if_, if_else = NonTerminal.get_non_terminals("<for> <if> <if-else>")
or_expr, and_expr, not_expr, compare_expr = NonTerminal.get_non_terminals(
"<or-expr> <and-expr> <not-expr> <compare-factor>"
)
dict_, dict_items_list, dict_item = NonTerminal.get_non_terminals(
"<dict> <dict-items-list> <dict-item>"
)
return_ = NonTerminal("<return>")
tuple_, list_ = NonTerminal.get_non_terminals("<tuple> <list>")
string = NonTerminal("<string>")
idxs = NonTerminal("<idxs>")
nums = NonTerminal("<nums>")
non_terminals = [
idxs,
string,
program,
elements_list,
statement,
return_,
simple_statement,
def_class,
body_statements,
def_func,
def_var,
assignment,
def_agent,
param,
param_list,
instance,
def_rand_var,
def_effect,
def_effect_list,
expr,
arith,
term,
factor,
atom,
or_expr,
and_expr,
not_expr,
arg_list,
prob_func_list,
rule,
if_,
if_else,
func_call,
for_,
dict_,
dict_items_list,
dict_item,
tuple_,
list_,
prob_func,
compare_expr,
nums,
]
# terminals
epsilon = Terminal("epsilon")
effect = Terminal("effect")
return_kw = Terminal("return")
in_, on, is_ = Terminal.get_terminals("in on is")
rule_operator, arrow = Terminal.get_terminals("=> ->")
(
semi,
colon,
comma,
dot,
opar,
cpar,
ocur,
ccur,
quotation_marks,
) = Terminal.get_terminals('; : , . ( ) { } " ')
equal, plus, minus, star, div = Terminal.get_terminals("= + - * /")
gt, lt, equals_b, not_equals_b, not_, and_, or_ = Terminal.get_terminals(
"> < == != not and or"
)
idx, int_, double_, class_, function_ = Terminal.get_terminals(
"idx int double class function"
)
for_kw, if_kw, else_kw = Terminal.get_terminals("for if else")
activation_condition, effect_time, repetition, action = Terminal.get_terminals(
"activationCondition effectTime repetition action"
)
supply = Terminal("supply")
type_ = Terminal("type")
new_ = Terminal("new")
osquare_br, csquare_br = Terminal.get_terminals("[ ]")
false_, true_ = Terminal.get_terminals("false true")
terminals = [
false_,
true_,
type_,
new_,
epsilon,
effect,
in_,
rule_operator,
quotation_marks,
semi,
equal,
gt,
idx,
for_kw,
activation_condition,
arrow,
colon,
return_kw,
comma,
dot,
opar,
cpar,
ocur,
ccur,
plus,
minus,
star,
div,
lt,
equals_b,
not_equals_b,
not_,
and_,
or_,
on,
class_,
function_,
if_kw,
else_kw,
effect_time,
repetition,
action,
osquare_br,
csquare_br,
supply,
int_,
double_,
]
rules = {}
productions = {}
# <program>
p_77 = Production(program, [elements_list])
rules[p_77] = lambda _, s: ProgramNode(s[1])
productions[program] = [p_77]
# <elements-list>
p_0 = Production(elements_list, [statement, elements_list])
rules[p_0] = lambda _, s: [s[1]] + s[2]
p_49 = Production(elements_list, [statement])
rules[p_49] = lambda _, s: [s[1]]
productions[elements_list] = [p_0, p_49]
# <statement>
p_1 = Production(statement, [def_class])
rules[p_1] = lambda _, s: s[1]
p_2 = Production(statement, [def_func])
rules[p_2] = lambda _, s: s[1]
p_75 = Production(statement, [def_agent])
rules[p_75] = lambda _, s: s[1]
p_54 = Production(statement, [simple_statement])
rules[p_54] = lambda _, s: s[1]
productions[statement] = [p_54, p_2, p_1, p_75]
# <simple-statement>
p_51 = Production(simple_statement, [if_])
rules[p_51] = lambda _, s: s[1]
p_52 = Production(simple_statement, [if_else])
rules[p_52] = lambda _, s: s[1]
p_53 = Production(simple_statement, [for_])
rules[p_53] = lambda _, s: s[1]
p_97 = Production(simple_statement, [func_call])
rules[p_97] = lambda _, s: s[1]
p_56 = Production(simple_statement, [def_var])
rules[p_56] = lambda _, s: s[1]
p_57 = Production(simple_statement, [assignment])
rules[p_57] = lambda _, s: s[1]
productions[simple_statement] = [p_51, p_52, p_53, p_97, p_57, p_56]
# <def_class>
p_4 = Production(def_class, [class_, idx, ocur, statement, ccur])
rules[p_4] = lambda _, s: ClassNode(s[2].expression, s[4])
p_5 = Production(def_class, [class_, idx, colon, idx, ocur, statement, ccur])
rules[p_5] = lambda _, s: ClassNode(s[2].expression, s[3], s[4])
productions[def_class] = [p_4, p_5]
# <def-agent>
p_76 = Production(
def_agent,
[
type_,
idx,
equal,
ocur,
activation_condition,
colon,
list_,
semi,
effect_time,
colon,
nums,
semi,
repetition,
colon,
nums,
semi,
action,
colon,
idx,
semi,
ccur,
],
)
rules[p_76] = lambda _, s: AgentDefNode(
s[1].expression, s[2].expression, s[7], s[11], s[15], s[19].expression
)
p_109 = Production(
def_agent,
[
type_,
idx,
equal,
ocur,
activation_condition,
colon,
list_,
semi,
effect_time,
colon,
nums,
semi,
repetition,
colon,
nums,
semi,
action,
colon,
idx,
semi,
supply,
colon,
nums,
semi,
ccur,
],
)
rules[p_109] = lambda _, s: AgentDefNode(
s[1].expression, s[2].expression, s[7], s[11], s[15], s[19].expression, s[23]
)
productions[def_agent] = [p_76, p_109]
# <def-func>
p_9 = Production(
def_func,
[
function_,
idx,
opar,
param_list,
cpar,
arrow,
type_,
ocur,
body_statements,
ccur,
],
)
rules[p_9] = lambda _, s: FuncDeclarationNode(
s[2].expression, s[4], s[7].expression, s[9]
)
productions[def_func] = [p_9]
# <def-var>
p_15 = Production(def_var, [type_, idx, equal, expr, semi])
rules[p_15] = lambda _, s: VarDeclarationNode(
s[1].expression, s[2].expression, s[4]
)
productions[def_var] = [p_15]
# <assignment>
p_16 = Production(assignment, [idx, equal, expr, semi])
rules[p_16] = lambda _, s: AssignmentNode(s[1].expression, s[3])
productions[assignment] = [p_16]
# <param>
p_10 = Production(param, [type_, idx])
rules[p_10] = lambda _, s: (s[1].expression, s[2].expression)
productions[param] = [p_10]
# <param-list>
p_11 = Production(param_list, [param, comma, param_list])
rules[p_11] = lambda _, s: [s[1]] + s[3]
p_98 = Production(param_list, [param])
rules[p_98] = lambda _, s: [s[1]]
p_12 = Production(param_list, [epsilon])
rules[p_12] = lambda h, s: []
productions[param_list] = [p_11, p_12, p_98]
# <expr>
p_64 = Production(expr, [or_expr])
rules[p_64] = lambda _, s: s[1]
p_17 = Production(expr, [rule])
rules[p_17] = lambda _, s: s[1]
p_63 = Production(expr, [instance])
rules[p_63] = lambda _, s: s[1]
p_88 = Production(expr, [def_rand_var])
rules[p_88] = lambda _, s: s[1]
productions[expr] = [p_64, p_63, p_17, p_88]
# <arith>
p_20 = Production(arith, [term])
rules[p_20] = lambda _, s: s[1]
p_21 = Production(arith, [term, plus, arith])
rules[p_21] = lambda _, s: PlusNode(s[1], s[3])
p_22 = Production(arith, [term, minus, arith])
rules[p_22] = lambda _, s: MinusNode(s[1], s[3])
productions[arith] = [p_20, p_21, p_22]
# <term>
p_80 = Production(term, [factor])
rules[p_80] = lambda _, s: s[1]
p_81 = Production(term, [factor, star, term])
rules[p_81] = lambda _, s: ByNode(s[1], s[3])
p_23 = Production(term, [factor, div, term])
rules[p_23] = lambda _, s: DivideNode(s[1], s[3])
productions[term] = [p_80, p_81, p_23]
# <factor>
p_24 = Production(factor, [atom])
rules[p_24] = lambda _, s: s[1]
p_25 = Production(factor, [opar, or_expr, cpar])
rules[p_25] = lambda _, s: s[2]
productions[factor] = [p_24, p_25]
# <string>
p_115 = Production(string, [quotation_marks, idxs, quotation_marks])
rules[p_115] = lambda _, s: StringNode(s[2])
productions[string] = [p_115]
# <idxs>
p_113 = Production(idxs, [idx, idxs])
rules[p_113] = lambda _, s: s[1].expression + s[2]
p_114 = Production(idxs, [epsilon])
rules[p_114] = lambda _, s: ""
productions[idxs] = [p_113, p_114]
# <atom>
p_28 = Production(atom, [idx])
rules[p_28] = lambda _, s: VariableNode(s[1].expression)
p_108 = Production(atom, [nums])
rules[p_108] = lambda _, s: s[1]
p_112 = Production(atom, [string])
rules[p_112] = lambda _, s: s[1]
p_29 = Production(atom, [func_call])
rules[p_29] = lambda _, s: s[1]
p_100 = Production(atom, [dict_])
rules[p_100] = lambda _, s: s[1]
p_101 = Production(atom, [tuple_])
rules[p_101] = lambda _, s: s[1]
p_102 = Production(atom, [list_])
rules[p_102] = lambda _, s: s[1]
p_110 = Production(atom, [false_])
rules[p_110] = lambda _, s: BooleanNode(s[1])
p_111 = Production(atom, [true_])
rules[p_111] = lambda _, s: BooleanNode(s[1])
productions[atom] = [p_28, p_29, p_100, p_102, p_101, p_108, p_110, p_111, p_112]
# <or-expr>
p_65 = Production(or_expr, [and_expr, or_, or_expr])
rules[p_65] = lambda _, s: OrNode(s[1], s[3])
p_82 = Production(or_expr, [and_expr])
rules[p_82] = lambda _, s: s[1]
productions[or_expr] = [p_65, p_82]
# <and-expr>
p_89 = Production(and_expr, [not_expr, and_, and_expr])
rules[p_89] = lambda _, s: AndNode(s[1], s[3])
p_93 = Production(and_expr, [not_expr])
rules[p_93] = lambda _, s: s[1]
productions[and_expr] = [p_89, p_93]
# <not-expr>
p_91 = Production(not_expr, [not_, compare_expr])
rules[p_91] = lambda _, s: NotNode(s[2])
p_92 = Production(not_expr, [compare_expr])
rules[p_92] = lambda _, s: s[1]
productions[not_expr] = [p_91, p_92]
# <compare-expr>
p_67 = Production(compare_expr, [arith, equals_b, compare_expr])
rules[p_67] = lambda _, s: EqualsNode(s[1], s[3])
p_68 = Production(compare_expr, [arith, not_equals_b, compare_expr])
rules[p_68] = lambda _, s: NotEqualsNode(s[1], s[3])
p_69 = Production(compare_expr, [arith, lt, compare_expr])
rules[p_69] = lambda _, s: LesserNode(s[1], s[3])
p_83 = Production(compare_expr, [arith, gt, compare_expr])
rules[p_83] = lambda _, s: GreaterNode(s[1], s[3])
p_94 = Production(compare_expr, [arith])
rules[p_94] = lambda _, s: s[1]
productions[compare_expr] = [p_67, p_68, p_69, p_83, p_94]
# <func-call>
p_31 = Production(func_call, [idx, opar, arg_list, cpar])
| |
"""
Utility methods for data processing.
"""
import os
from glob import glob
import itertools
import csv
import pandas as pd
from constants import NUM, NUMBERREGEX, UNK, WORD_START, WORD_END, EMBEDS_FILES, FULL_LANG, LABELS, MODIFIED_LABELS
def print_task_labels(task_name, label2id, id_sequence, file):
#Convert label_id sequence to label sequence and write to file
#changed the original function completely
with open(file, 'a+') as f:
writer = csv.writer(f,delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['ID', task_name])
label_list=dict()
for task_id, labels_ids in label2id.items():
if task_name==task_id:
for label, idx in labels_ids.items():
label_list[label] = idx
#print(label_list)
count = 1
#with open(file, 'a+') as f:
for label_idx_seq in id_sequence:
#Create a label_sequence for each tweet
label_seq = []
for task, label_idx in label_idx_seq.items():
#intialize_values
#target_val=''
#group_val=''
#annotator_val=[]
#sentiment_val=[]
#Non multilabel_tasks, labels are of the form [1, [7], [12], ...
if task==task_name:
if task=='target' or task =='group' or task=='directness':
for target_label, indice in label2id[task].items():
if indice==label_idx[0]:
if task=='target':
val=target_label
else:
val=target_label
#Multilabel tasks, labels are of the form [1, 0, 0, 1, 0, 0], ... such that each column represents one label
elif task=='annotator_sentiment':
val=[]
for j in range(len(label_idx)):
if label_idx[j]>0:
for label, indice in label2id[task].items():
#if labels[j]==1 or label number j ==1 append the name of the label
if indice==j:
val.append(label)
elif task=='sentiment':
val=[]
for j in range(len(label_idx)):
if label_idx[j]>0:
for label, indice in label2id[task].items():
#if labels[j]==1 or label number j ==1 append the name of the label
if indice==j:
val.append(label)
writer.writerow([count,val])
count+=1
#target_val=''
#group_val=''
#annotator_val=[]
#sentiment_val=[]
f.close()
#write functions for studying correlations
def save_generated_labels_in_csv_file(label2id, id_sequence, file):
#Convert label_id sequence to label sequence and write to file
#changed the original function completely
with open(file, 'a+') as f:
writer = csv.writer(f,delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['ID','annotator_sentiment','sentiment','group','target'])
label_list=dict()
for task, labels_ids in label2id.items():
#print (task)
for label, idx in labels_ids.items():
label_list[label] = idx
#print(label_list)
count = 1
#with open(file, 'a+') as f:
for label_idx_seq in id_sequence:
#Create a label_sequence for each tweet
label_seq = []
for task, label_idx in label_idx_seq.items():
#intialize_values
#target_val=''
#group_val=''
#annotator_val=[]
#sentiment_val=[]
#Non multilabel_tasks, labels are of the form [1, [7], [12], ...
if task=='target' or task =='group':
for target_label, indice in label2id[task].items():
if indice==label_idx[0]:
if task=='target':
target_val=target_label
else:
group_val=target_label
#Multilabel tasks, labels are of the form [1, 0, 0, 1, 0, 0], ... such that each column represents one label
elif task=='annotator_sentiment':
annotator_val=[]
for j in range(len(label_idx)):
if label_idx[j]>0:
for label, indice in label2id[task].items():
#if labels[j]==1 or label number j ==1 append the name of the label
if indice==j:
annotator_val.append(label)
elif task=='sentiment':
sentiment_val=[]
for j in range(len(label_idx)):
if label_idx[j]>0:
for label, indice in label2id[task].items():
#if labels[j]==1 or label number j ==1 append the name of the label
if indice==j:
sentiment_val.append(label)
writer.writerow([count,sentiment_val,target_val,group_val,annotator_val])
#target_val=''
#group_val=''
#annotator_val=[]
#sentiment_val=[]
count+=1
f.close()
def get_label(label2id, id_sequence, file):
#Convert label_id sequence to label sequence and write to file
#changed the original function completely
label_list=dict()
for task, labels_ids in label2id.items():
#print (task)
for label, idx in labels_ids.items():
label_list[label] = idx
#print(label_list)
count = 1
with open(file, 'a+') as f:
for label_idx_seq in id_sequence:
#Create a label_sequence for each tweet
label_seq = []
for task, label_idx in label_idx_seq.items():
#Non multilabel_tasks, labels are of the form [1, [7], [12], ...
if task=='target' or task =='group':
for target_label, indice in label2id[task].items():
if indice==label_idx[0]:
label_seq.append(target_label)
#Multilabel tasks, labels are of the form [1, 0, 0, 1, 0, 0], ... such that each column represents one label
elif task=='annotator_sentiment' or task =='sentiment':
for j in range(len(label_idx)):
if label_idx[j]>0:
for label, indice in label2id[task].items():
#if labels[j]==1 or label number j ==1 append the name of the label
if indice==j:
label_seq.append(label)
f.write(str(count) +'.\t'+','.join(label_seq) +'\n')
count+=1
f.close()
def normalize(word):
"""Normalize a word by lower-casing it or replacing it if it is a number."""
return NUM if NUMBERREGEX.match(word) else word.lower()
def average_by_task(score_dict):
#Compute unweighted average of all metrics among all tasks
total = 0
count = 0
for key in score_dict:
total+=(score_dict[key]['micro_f1'] + score_dict[key]['macro_f1'])
count+=2
return total/float(count)
def average_by_lang(score_list, data_size_list, total_data_size):
#Compute weighted average of all languages
res = 0
for idx in range(len(score_list)):
ratio = float(data_size_list[idx]) / total_data_size
res += ratio * score_list[idx]
return res
def load_embeddings_file(embeds, languages, sep=" ", lower=False):
"""Loads a word embedding file."""
embed_dir = EMBEDS_FILES[embeds]
file_name_list = []
for f in os.listdir(embed_dir):
if (any([f.endswith(lang+'.vec') for lang in languages])):
file_name_list.append(os.path.join(embed_dir,f))
word2vec = {}
total_num_words = 0
embed_dim = 0
encoding = None
for file_name in file_name_list:
print('\n\n Loading {}.....\n\n'.format(file_name))
if(file_name.endswith('ar.vec') or file_name.endswith('fr.vec')):
encoding='utf-8'
with open(file=file_name, mode='r', encoding=encoding) as f:
(num_words, embed_dim) = (int(x) for x in f.readline().rstrip('\n').split(' '))
total_num_words+=num_words
for idx, line in enumerate(f):
if((idx+1)%(1e+5)==0):
print('Loading {}/{} words'.format(idx+1, num_words))
fields = line.rstrip('\n').split(sep)
vec = [float(x) for x in fields[1:]]
word = fields[0]
if lower:
word = word.lower()
word2vec[word] = vec
print('Loaded pre-trained embeddings of dimension: {}, size: {}, lower: {}'
.format(embed_dim, total_num_words, lower))
return word2vec, embed_dim
def get_data(languages, task_names, word2id=None, task2label2id=None, data_dir=None,
train=True, verbose=False):
"""
:param languages: a list of languages from which to obtain the data
:param task_names: a list of task names
:param word2id: a mapping of words to their ids
:param char2id: a mapping of characters to their ids
:param task2label2id: a mapping of tasks to a label-to-id dictionary
:param data_dir: the directory containing the data
:param train: whether data is used for training (default: True)
:param verbose: whether to print more information re file reading
:return X: a list of tuples containing a list of word indices and a list of
a list of character indices;
Y: a list of dictionaries mapping a task to a list of label indices;
org_X: the original words; a list of lists of normalized word forms;
org_Y: a list of dictionaries mapping a task to a list of labels;
word2id: a word-to-id mapping;
char2id: a character-to-id mapping;
task2label2id: a dictionary mapping a task to a label-to-id mapping.
"""
X = []
Y = []
org_X = []
org_Y = []
# for training, we initialize all mappings; for testing, we require mappings
if train:
# create word-to-id, character-to-id, and task-to-label-to-id mappings
word2id = {}
# set the indices of the special characters
word2id[UNK] = 0 # unk word / OOV
for language in languages:
num_sentences = 0
num_tokens = 0
full_lang = FULL_LANG[language]
#file_reader = iter(())
language_path = os.path.join(data_dir, full_lang)
assert os.path.exists(language_path), ('language path %s does not exist.'
% language_path)
csv_file = os.path.join(language_path,os.listdir(language_path)[0])
df = pd.read_csv(csv_file)
#Column headers are HITId, tweet, sentiment, directness, annotator_sentiment, target, group
for index, instance in df.iterrows():
num_sentences+=1
#sentence = instance['tweet'].split()
sentence = instance['tweet'].split()
sentence_word_indices = [] # sequence of word indices
sentence_char_indices = [] # sequence of char indice
# keep track of the label indices and labels for each task
sentence_task2label_indices = {}
for i, word in enumerate(sentence):
num_tokens+=1
if train and word not in word2id:
word2id[word] = len(word2id)
sentence_word_indices.append(word2id.get(word, word2id[UNK]))
labels = None
for task in task2label2id.keys():
if('sentiment' in task):
labels = instance[task].split('_')
else:
labels = [instance[task]]
if('sentiment' in task):#Multi-label
sentence_task2label_indices[task]=[0]*len(task2label2id[task])
for label in labels:
label_idx = task2label2id[task][label]
sentence_task2label_indices[task][label_idx]=1
else:
sentence_task2label_indices[task] = [task2label2id[task][labels[0]]]
X.append(sentence_word_indices)
Y.append(sentence_task2label_indices)
assert len(X) == len(Y)
return X, Y, word2id
#Log the training process
def log_fit(log_dir, epoch, languages, test_lang, task_names, train_score, dev_score):
if(len(task_names) ==1):
task_name = task_names[0]
if(len(languages) == 1):
task_directory = os.path.join(log_dir,'STSL/')
if not os.path.exists(task_directory):
os.mkdir(task_directory)
file = os.path.join(log_dir, 'STSL/{}_{}.csv'.format(languages[0],task_names[0]))
else:
task_directory = os.path.join(log_dir,'STML/')
if not os.path.exists(task_directory):
os.mkdir(task_directory)
file = os.path.join(log_dir, 'STML/{}.csv'.format(task_names[0]))
#This function needs to be changed
if(os.path.exists(file)):
with open(file, 'a') as f:
writer = csv.writer(f,delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow([epoch, test_lang, train_score[task_name]['micro_f1'], train_score[task_name]['macro_f1'],
dev_score[task_name]['micro_f1'], dev_score[task_name]['macro_f1']])
else:
with open(file, 'a') as f:
writer = csv.writer(f,delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['epoch', 'test_lang', task_name+'-train-micro-f1', task_name+'-train-macro-f1',
task_name+'-dev-micro-f1', task_name+'-dev-macro-f1'])
writer.writerow([epoch, test_lang, train_score[task_name]['micro_f1'], train_score[task_name]['macro_f1'],
dev_score[task_name]['micro_f1'], dev_score[task_name]['macro_f1']])
f.close()
else:
if(len(languages) ==1):
task_directory = os.path.join(log_dir,'MTSL/')
if not os.path.exists(task_directory):
os.mkdir(task_directory)
file = os.path.join(log_dir, 'MTSL/{}.csv'.format(languages[0]))
else:
task_directory = os.path.join(log_dir,'MTML/')
if not os.path.exists(task_directory):
os.mkdir(task_directory)
file = os.path.join(log_dir, 'MTML/log.csv')
task_name_list = []
task_f1_list = []
#changed for task_name in task_names to for task_name in task_names:
for task_name in task_names:
task_name_list+=[task_name+'-train-micro-f1', task_name+'-train-macro-f1',
task_name+'-dev-micro-f1', task_name+'-dev-macro-f1']
task_f1_list | |
the reference to the literature
"""
dir = os.environ["XUVTOP"]
ioneqdir = os.path.join(dir,'ioneq')
ioneqInfo = util.findFileTypes(ioneqdir, type = '*.ioneq')
ioneqNames = ioneqInfo['fileName']
shortNames = []
for aname in ioneqNames:
shortNames.append(os.path.splitext(aname)[0])
ioneqFullNames = ioneqInfo['fullName']
if ioneqName not in shortNames:
# the user will select an ioneq file
gIoneq = chgui.gui.selectorDialog(ioneqNames,label='Select one', multiChoice=False)
gIoneq_idx = gIoneq.selectedIndex
if len(gIoneq_idx) > 0:
ioneqFileName = ioneqFullNames[gIoneq_idx[0]]
else:
print(' no file chosen')
return
else:
index = shortNames.index(ioneqName)
ioneqFileName = ioneqFullNames[index]
#
with open(ioneqFileName,'r') as input:
s1 = input.readlines()
ntemp,nele = s1[0].split()
if verbose:
print((' ntemp, nele = %5i %5i'%(ntemp, nele)))
nTemperature = int(ntemp)
nElement = int(nele)
#
header_linet = FortranRecordReader(str(nTemperature)+'f6.2')
ioneqTemperature = header_linet.read(s1[1])
ioneqTemperature = np.asarray(ioneqTemperature[:],np.float64)
ioneqTemperature = 10.**ioneqTemperature
nlines = 0
idx = -1
while idx < 0:
aline = s1[nlines][0:5]
idx = aline.find('-1')
nlines += 1
nlines -= 1
#
#
header_lineq = FortranRecordReader('2i3,'+str(nTemperature)+'e10.2')
#
ioneqAll = np.zeros((nElement,nElement+1,nTemperature),np.float64)
for iline in range(2,nlines):
out = header_lineq.read(s1[iline])
iz = out[0]
ion = out[1]
ioneqAll[iz-1,ion-1].put(list(range(nTemperature)),np.asarray(out[2:],np.float64))
ioneqAll = np.where(ioneqAll > minIoneq, ioneqAll, 0.)
ioneqRef = []
for one in s1[nlines+1:]:
ioneqRef.append(one[:-1]) # gets rid of the \n
del s1
return {'ioneqname':ioneqName,'ioneqAll':ioneqAll,'ioneqTemperature':ioneqTemperature,'ioneqRef':ioneqRef}
def ipRead(verbose=False):
"""
Reads the ionization potential file
Returns
-------
ip : array-like
Ionization potential (in eV)
"""
topdir = os.environ["XUVTOP"]
ipname = os.path.join(topdir, 'ip','chianti.ip')
ipfile = open(ipname)
data = ipfile.readlines()
ipfile.close()
nip = 0
ndata = 2
maxz = 0
while ndata > 1:
s1 = data[nip]
s2 = s1.split()
ndata = len(s2)
nip = nip+1
if int(s2[0]) > maxz:
maxz = int(s2[0])
if verbose:
print((' maxz = %5i'%(maxz)))
nip = nip-1
ip = np.zeros((maxz, maxz), np.float64)
for aline in data[0:nip]:
s2 = aline.split()
iz = int(s2[0])
ion = int(s2[1])
ip[iz-1, ion-1] = float(s2[2])
return ip*const.invCm2Ev
def masterListRead():
"""
Read a CHIANTI masterlist file.
Returns
-------
masterlist : `list`
All ions in Chianti database
"""
dir = os.environ["XUVTOP"]
fname = os.path.join(dir,'masterlist','masterlist.ions')
input = open(fname,'r')
s1 = input.readlines()
input.close()
masterlist = []
for i in range(0,len(s1)):
s1a = s1[i][:-1]
s2 = s1a.split(';')
masterlist.append(s2[0].strip())
return masterlist
def masterListInfo(force=False, verbose=False):
"""
Get information about ions in the CHIANTI masterlist.
Returns
-------
masterListInfo : `dict`
{'wmin', 'wmax', 'tmin', 'tmax'} Minimum and maximum wavelengths in
the wgfa file. Minimum and maximum temperatures for which the
ionization balance is nonzero.
Notes
-----
This function speeds up multi-ion spectral calculations.
The information is stored in a pickled file 'masterlist_ions.pkl'
If the file is not found, one will be created.
"""
dir = os.environ["XUVTOP"]
infoPath = os.path.join(dir, 'masterlist')
infoName = os.path.join(dir,'masterlist','masterlist_ions.pkl')
#masterName=os.path.join(dir,'masterlist','masterlist.ions')
#
makeNew = force == True or not os.path.isfile(infoName)
# if os.path.isfile(infoName):
if not makeNew:
# print ' file exists - ', infoName
pfile = open(infoName, 'rb')
masterListInfo = pickle.load(pfile)
pfile.close
elif os.access(infoPath, os.W_OK):
# the file does not exist but we have write access and will create it
defaults = defaultsRead()
print((' defaults = %s'%(str(defaults))))
ioneqName = defaults['ioneqfile']
ioneq = ioneqRead(ioneqName = ioneqName)
masterList = masterListRead()
masterListInfo = {}
haveZ = [0]*31
haveStage = np.zeros((31, 31), np.int64)
haveDielectronic = np.zeros((31, 31), np.int64)
for one in masterList:
if verbose:
print((' ion = %s'%(one)))
ionInfo = convertName(one)
z = ionInfo['Z']
stage = ionInfo['Ion']
haveZ[z] = 1
dielectronic = ionInfo['Dielectronic']
if dielectronic:
haveDielectronic[z, stage] = 1
else:
haveStage[z, stage] = 1
thisIoneq = ioneq['ioneqAll'][z- 1, stage - 1 + dielectronic]
good = thisIoneq > 0.
goodTemp = ioneq['ioneqTemperature'][good]
tmin = float(goodTemp.min())
tmax = float(goodTemp.max())
vgood = thisIoneq == thisIoneq.max()
vgoodTemp = float(ioneq['ioneqTemperature'][vgood][0])
wgfa = wgfaRead(one)
nZeros = wgfa['wvl'].count(0.)
# two-photon transitions are denoted by a wavelength of zero (0.)
while nZeros > 0:
wgfa['wvl'].remove(0.)
nZeros = wgfa['wvl'].count(0.)
# unobserved lines are denoted with a negative wavelength
wvl = np.abs(np.asarray(wgfa['wvl'], np.float64))
wmin = float(wvl.min())
wmax = float(wvl.max())
masterListInfo[one] = {'wmin':wmin, 'wmax':wmax, 'tmin':tmin, 'tmax':tmax, 'tIoneqMax':vgoodTemp}
masterListInfo['haveZ'] = haveZ
masterListInfo['haveStage'] = haveStage
masterListInfo['haveDielectronic'] = haveDielectronic
# now do the bare ions from H thru Zn
# these are only involved in the continuum
for iz in range(1, 31):
ions = zion2name(iz, iz+1)
thisIoneq = ioneq['ioneqAll'][iz-1, iz]
good = thisIoneq > 0.
goodTemp = ioneq['ioneqTemperature'][good]
tmin = float(goodTemp.min())
tmax = float(goodTemp.max())
wmin = 0.
wmax = 1.e+30
masterListInfo[ions] = {'wmin':wmin, 'wmax':wmax, 'tmin':tmin, 'tmax':tmax}
pfile = open(infoName, 'wb')
pickle.dump(masterListInfo, pfile)
pfile.close
else:
# the file does not exist and we do NOT have write access to creat it
# will just make an inefficient, useless version
masterListInfo = {}
for one in masterList:
ionInfo = convertName(one)
z = ionInfo['Z']
stage = ionInfo['Ion']
dielectronic = ionInfo['Dielectronic']
wmin = 0.
wmax = 1.e+30
masterListInfo[one] = {'wmin':wmin, 'wmax':wmax, 'tmin':1.e+4, 'tmax':1.e+9}
# now do the bare ions from H thru Zn
# these are only involved in the continuum
for iz in range(1, 31):
ions = zion2name(iz, iz+1)
wmin = 0.
wmax = 1.e+30
masterListInfo[ions] = {'wmin':wmin, 'wmax':wmax, 'tmin':1.e+4, 'tmax':1.e+9}
pfile = open(infoName, 'wb')
pickle.dump(masterListInfo, pfile)
pfile.close
masterListInfo = {'noInfo':'none'}
return masterListInfo
def photoxRead(ions):
"""
Read CHIANTI photoionization .photox files
Returns
-------
{'lvl1', 'lvl2', 'energy', 'cross', 'ref'} : `dict`
Energy (in Rydbergs) and cross section (in :math:`\mathrm{cm}^{-2}`)
Notes
-----
The photox files are not in any released version of the CHIANTI database.
"""
#
zion = util.convertName(ions)
if zion['Z'] < zion['Ion']:
print((' this is a bare nucleus that has no ionization rate'))
return
#
fname = util.ion2filename(ions)
paramname = fname+'.photox'
input = open(paramname,'r')
lines = input.readlines()
input.close
# get number of energies
# neng = int(lines[0][0:6])
dataEnd = 0
lvl1 = []
lvl2 = []
energy = []
cross = []
icounter = 0
while not dataEnd:
lvl11 = int(lines[icounter][:8])
lvl21 = int(lines[icounter][8:15])
ener = lines[icounter][15:].split()
energy1 = np.asarray(ener, np.float64)
#
icounter += 1
irsl = int(lines[icounter][:8])
ind0 = int(lines[icounter][8:15])
if irsl != lvl11 or ind0 != lvl21:
# this only happens if the file was written incorrectly
print((' lvl1, lvl2 = %7i %7i'%(lvl11, lvl21)))
print((' irsl, indo = %7i %7i'%(irsl, ind0)))
return
crs = lines[icounter][15:].split()
cross1 = np.asarray(crs, np.float64)
lvl1.append(lvl11)
lvl2.append(lvl21)
energy.append(energy1)
cross.append(cross1)
icounter += 1
dataEnd = lines[icounter].count('-1')
ref = lines[icounter+1:-1]
cross = np.asarray(cross, np.float64)
energy = np.asarray(energy, np.float64)
return {'lvl1':lvl1, 'lvl2':lvl2,'energy':energy, 'cross':cross, 'ref':ref}
def rrRead(ions, filename=None):
"""
Read CHIANTI radiative recombination .rrparams files
Returns
-------
{'rrtype','params','ref'} : `dict`
"""
#
#
if filename:
paramname = filename
else:
fname = util.ion2filename(ions)
paramname = fname+'.rrparams'
if os.path.isfile(paramname):
input = open(paramname,'r')
# need to read first line and see how many elements
lines = input.readlines()
input.close()
rrtype = int(lines[0])
ref = lines[3:-1]
#
if rrtype == 1:
# a Badnell type
# fmt=FortranFormat('3i5,e12.4,f10.5,2e12.4')
header_line = FortranRecordReader('3i5,e12.4,f10.5,2e12.4')
# params=FortranLine(lines[1],fmt)
params = header_line.read(lines[1])
RrParams = {'rrtype':rrtype, 'params':params, 'ref':ref}
elif rrtype == 2:
# a Badnell type
# fmt=FortranFormat('3i5,e12.4,f10.5,2e11.4,f10.5,e12.4')
header_line = FortranRecordReader('3i5,e12.4,f10.5,2e11.4,f10.5,e12.4')
# params=FortranLine(lines[1],fmt)
params = header_line.read(lines[1])
RrParams = {'rrtype':rrtype, 'params':params, 'ref':ref}
elif rrtype == 3:
# a Shull type
# fmt=FortranFormat('2i5,2e12.4')
header_line = FortranRecordReader('2i5,2e12.4')
# params=FortranLine(lines[1],fmt)
params = header_line.read(lines[1])
RrParams={'rrtype':rrtype, 'params':params, 'ref':ref}
else:
RrParams = None
print((' for ion %5s unknown RR type = %5i' %(ions, rrtype)))
return RrParams
else:
return {'rrtype':-1}
def rrLossRead():
''' to read the Mao 2017 rr loss parameters [12]_
References
----------
.. [12] <NAME>., <NAME>., <NAME>., `2017 Astron. Astrophys. 599, A10
<http://adsabs.harvard.edu/abs/2017A%26A...599A..10M>`_
'''
filename = os.path.join(os.environ['XUVTOP'], 'continuum', 'rrloss_mao_2017_pars.dat')
inpt = open(filename, 'r')
lines = inpt.readlines()
inpt.close()
iso = []
z = []
a0 = []
b0 = []
c0 = []
a1 = []
b1 = []
a2 = []
b2 = []
mdp = []
for aline in lines:
iso.append(int(aline.split()[0]))
z.append(int(aline.split()[1]))
a0.append(float(aline.split()[2]))
b0.append(float(aline.split()[3]))
c0.append(float(aline.split()[4]))
a1.append(float(aline.split()[5]))
b1.append(float(aline.split()[6]))
a2.append(float(aline.split()[7]))
b2.append(float(aline.split()[8]))
mdp.append(float(aline.split()[9]))
return {'iso':iso, 'z':z, 'a0':a0, 'b0':b0, 'c0':c0, 'a1':a1, 'b1':b1, 'a2':a2, 'b2':b2, 'mdp':mdp}
def scupsRead(ions, filename=None, verbose=False):
'''
Read the new format v8 scups file containing the scaled temperature and upsilons from [8]_.
Parameters
----------
ions : `str`
Ion, e.g. 'c_5' for C V
filename : `str`, optional
Custom filename, will override that specified by `ions`
verbose : `bool`
'''
#
if filename:
scupsFileName = filename
bname = os.path.basename(scupsFileName)
ions = bname.split('.')[0]
else:
fname = util.ion2filename(ions)
scupsFileName = fname+'.scups'
if not os.path.isfile(scupsFileName):
print((' elvlc file does not exist: %s'%(scupsFileName)))
| |
among_var == 60:
if not self.slice_from(u"na\u0161"):
return False
elif among_var == 61:
if not self.slice_from(u"ja\u0161"):
return False
elif among_var == 62:
if not self.slice_from(u"ka\u0161"):
return False
elif among_var == 63:
if not self.slice_from(u"ba\u0161"):
return False
elif among_var == 64:
if not self.slice_from(u"ga\u0161"):
return False
elif among_var == 65:
if not self.slice_from(u"va\u0161"):
return False
elif among_var == 66:
if not self.slice_from(u"e\u0161"):
return False
elif among_var == 67:
if not self.slice_from(u"i\u0161"):
return False
elif among_var == 68:
if not self.slice_from(u"ikat"):
return False
elif among_var == 69:
if not self.slice_from(u"lat"):
return False
elif among_var == 70:
if not self.slice_from(u"et"):
return False
elif among_var == 71:
if not self.slice_from(u"est"):
return False
elif among_var == 72:
if not self.slice_from(u"ist"):
return False
elif among_var == 73:
if not self.slice_from(u"kst"):
return False
elif among_var == 74:
if not self.slice_from(u"ost"):
return False
elif among_var == 75:
if not self.slice_from(u"i\u0161t"):
return False
elif among_var == 76:
if not self.slice_from(u"ova"):
return False
elif among_var == 77:
if not self.slice_from(u"av"):
return False
elif among_var == 78:
if not self.slice_from(u"ev"):
return False
elif among_var == 79:
if not self.slice_from(u"iv"):
return False
elif among_var == 80:
if not self.slice_from(u"ov"):
return False
elif among_var == 81:
if not self.slice_from(u"mov"):
return False
elif among_var == 82:
if not self.slice_from(u"lov"):
return False
elif among_var == 83:
if not self.slice_from(u"el"):
return False
elif among_var == 84:
if not self.slice_from(u"anj"):
return False
elif among_var == 85:
if not self.slice_from(u"enj"):
return False
elif among_var == 86:
if not self.slice_from(u"\u0161nj"):
return False
elif among_var == 87:
if not self.slice_from(u"en"):
return False
elif among_var == 88:
if not self.slice_from(u"\u0161n"):
return False
elif among_var == 89:
if not self.slice_from(u"\u010Din"):
return False
elif among_var == 90:
if not self.slice_from(u"ro\u0161i"):
return False
elif among_var == 91:
if not self.slice_from(u"o\u0161"):
return False
elif among_var == 92:
if not self.slice_from(u"evit"):
return False
elif among_var == 93:
if not self.slice_from(u"ovit"):
return False
elif among_var == 94:
if not self.slice_from(u"ast"):
return False
elif among_var == 95:
if not self.slice_from(u"k"):
return False
elif among_var == 96:
if not self.slice_from(u"eva"):
return False
elif among_var == 97:
if not self.slice_from(u"ava"):
return False
elif among_var == 98:
if not self.slice_from(u"iva"):
return False
elif among_var == 99:
if not self.slice_from(u"uva"):
return False
elif among_var == 100:
if not self.slice_from(u"ir"):
return False
elif among_var == 101:
if not self.slice_from(u"a\u010D"):
return False
elif among_var == 102:
if not self.slice_from(u"a\u010Da"):
return False
elif among_var == 103:
if not self.slice_from(u"ni"):
return False
elif among_var == 104:
if not self.slice_from(u"a"):
return False
elif among_var == 105:
if not self.slice_from(u"ur"):
return False
elif among_var == 106:
if not self.slice_from(u"astaj"):
return False
elif among_var == 107:
if not self.slice_from(u"istaj"):
return False
elif among_var == 108:
if not self.slice_from(u"ostaj"):
return False
elif among_var == 109:
if not self.slice_from(u"aj"):
return False
elif among_var == 110:
if not self.slice_from(u"asta"):
return False
elif among_var == 111:
if not self.slice_from(u"ista"):
return False
elif among_var == 112:
if not self.slice_from(u"osta"):
return False
elif among_var == 113:
if not self.slice_from(u"ta"):
return False
elif among_var == 114:
if not self.slice_from(u"inj"):
return False
elif among_var == 115:
if not self.slice_from(u"as"):
return False
elif among_var == 116:
if not self.slice_from(u"i"):
return False
elif among_var == 117:
if not self.slice_from(u"lu\u010D"):
return False
elif among_var == 118:
if not self.slice_from(u"jeti"):
return False
elif among_var == 119:
if not self.slice_from(u"e"):
return False
elif among_var == 120:
if not self.slice_from(u"at"):
return False
elif among_var == 121:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"luc"):
return False
elif among_var == 122:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"snj"):
return False
elif among_var == 123:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"os"):
return False
elif among_var == 124:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"ac"):
return False
elif among_var == 125:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"ec"):
return False
elif among_var == 126:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"uc"):
return False
elif among_var == 127:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"rosi"):
return False
elif among_var == 128:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"aca"):
return False
elif among_var == 129:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"jas"):
return False
elif among_var == 130:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"tas"):
return False
elif among_var == 131:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"gas"):
return False
elif among_var == 132:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"nas"):
return False
elif among_var == 133:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"kas"):
return False
elif among_var == 134:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"vas"):
return False
elif among_var == 135:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"bas"):
return False
elif among_var == 136:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"as"):
return False
elif among_var == 137:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"cin"):
return False
elif among_var == 138:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"astaj"):
return False
elif among_var == 139:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"istaj"):
return False
elif among_var == 140:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"ostaj"):
return False
elif among_var == 141:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"asta"):
return False
elif among_var == 142:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"ista"):
return False
elif among_var == 143:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"osta"):
return False
elif among_var == 144:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"ava"):
return False
elif among_var == 145:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"eva"):
return False
elif among_var == 146:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"iva"):
return False
elif among_var == 147:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"uva"):
return False
elif among_var == 148:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"ova"):
return False
elif among_var == 149:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"jeti"):
return False
elif among_var == 150:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"inj"):
return False
elif among_var == 151:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"ist"):
return False
elif among_var == 152:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"es"):
return False
elif among_var == 153:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"et"):
return False
elif among_var == 154:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"is"):
return False
elif among_var == 155:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"ir"):
return False
elif among_var == 156:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"ur"):
return False
elif among_var == 157:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"uj"):
return False
elif among_var == 158:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"ni"):
return False
elif among_var == 159:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"sn"):
return False
elif among_var == 160:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"ta"):
return False
elif among_var == 161:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"a"):
return False
elif among_var == 162:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"i"):
return False
elif among_var == 163:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"e"):
return False
else:
if not self.B_no_diacritics:
return False
if not self.slice_from(u"n"):
return False
return True
def __r_Step_3(self):
self.ket = self.cursor
if self.find_among_b(SerbianStemmer.a_3) == 0:
return False
self.bra = self.cursor
if not self.__r_R1():
return False
if not self.slice_from(u""):
return False
return True
def _stem(self):
self.__r_cyr_to_lat()
self.__r_prelude()
self.__r_mark_regions()
self.limit_backward = self.cursor
self.cursor = self.limit
v_4 = self.limit - self.cursor
self.__r_Step_1()
self.cursor = self.limit - v_4
v_5 = self.limit - self.cursor
try:
try:
v_6 = self.limit - self.cursor
try:
if not self.__r_Step_2():
raise lab2()
raise lab1()
except lab2: pass
self.cursor = self.limit - v_6
if not self.__r_Step_3():
raise lab0()
except lab1: pass
except lab0: pass
self.cursor = self.limit - v_5
self.cursor = self.limit_backward
return True
class lab0(BaseException): pass
class lab1(BaseException): pass
class lab2(BaseException): pass
class lab3(BaseException): pass
class lab4(BaseException): pass
class lab5(BaseException): pass
class lab6(BaseException): pass
class lab7(BaseException): pass
class lab8(BaseException): pass
class lab9(BaseException): pass
class | |
<gh_stars>0
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from unittest.mock import Mock, patch
from azext_aks_preview.__init__ import register_aks_preview_resource_type
from azext_aks_preview._client_factory import CUSTOM_MGMT_AKS_PREVIEW
from azext_aks_preview._consts import CONST_WORKLOAD_RUNTIME_OCI_CONTAINER
from azext_aks_preview.agentpool_decorator import (
AKSPreviewAgentPoolAddDecorator,
AKSPreviewAgentPoolContext,
AKSPreviewAgentPoolModels,
AKSPreviewAgentPoolUpdateDecorator,
)
from azext_aks_preview.tests.latest.utils import get_test_data_file_path
from azure.cli.command_modules.acs._consts import (
CONST_DEFAULT_NODE_OS_TYPE,
CONST_DEFAULT_NODE_VM_SIZE,
CONST_NODEPOOL_MODE_SYSTEM,
CONST_NODEPOOL_MODE_USER,
CONST_SCALE_DOWN_MODE_DELETE,
CONST_VIRTUAL_MACHINE_SCALE_SETS,
AgentPoolDecoratorMode,
DecoratorMode,
)
from azure.cli.command_modules.acs.agentpool_decorator import AKSAgentPoolParamDict
from azure.cli.command_modules.acs.tests.latest.mocks import MockCLI, MockClient, MockCmd
from azure.cli.core.azclierror import CLIInternalError, InvalidArgumentValueError, MutuallyExclusiveArgumentError
class AKSPreviewAgentPoolContextCommonTestCase(unittest.TestCase):
def _remove_defaults_in_agentpool(self, agentpool):
self.defaults_in_agentpool = {}
for attr_name, attr_value in vars(agentpool).items():
if not attr_name.startswith("_") and attr_name != "name" and attr_value is not None:
self.defaults_in_agentpool[attr_name] = attr_value
setattr(agentpool, attr_name, None)
return agentpool
def _restore_defaults_in_agentpool(self, agentpool):
for key, value in self.defaults_in_agentpool.items():
if getattr(agentpool, key, None) is None:
setattr(agentpool, key, value)
return agentpool
def create_initialized_agentpool_instance(
self, nodepool_name="nodepool1", remove_defaults=True, restore_defaults=True, **kwargs
):
"""Helper function to create a properly initialized agentpool instance.
:return: the AgentPool object
"""
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
agentpool = self.models.UnifiedAgentPoolModel(name=nodepool_name)
else:
agentpool = self.models.UnifiedAgentPoolModel()
agentpool.name = nodepool_name
# remove defaults
if remove_defaults:
self._remove_defaults_in_agentpool(agentpool)
# set properties
for key, value in kwargs.items():
setattr(agentpool, key, value)
# resote defaults
if restore_defaults:
self._restore_defaults_in_agentpool(agentpool)
return agentpool
def common_get_zones(self):
# default
ctx_1 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"zone": None, "node_zones": "test_node_zones"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_zones(), "test_node_zones")
agentpool_1 = self.create_initialized_agentpool_instance(
availability_zones=["test_mc_zones1", "test_mc_zones2"]
)
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(ctx_1.get_zones(), ["test_mc_zones1", "test_mc_zones2"])
# custom value
ctx_2 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"zones": "test_zones", "node_zones": "test_node_zones"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_2.get_zones(), "test_zones")
def common_get_host_group_id(self):
# default
ctx_1 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"host_group_id": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_host_group_id(), None)
agentpool_1 = self.create_initialized_agentpool_instance(host_group_id="test_host_group_id")
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(ctx_1.get_host_group_id(), "test_host_group_id")
def common_get_crg_id(self):
# default
ctx_1 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"crg_id": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_crg_id(), None)
agentpool_1 = self.create_initialized_agentpool_instance(
capacity_reservation_group_id="test_capacity_reservation_group_id"
)
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(ctx_1.get_crg_id(), "test_capacity_reservation_group_id")
def common_get_message_of_the_day(self):
# default
ctx_1 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"message_of_the_day": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_message_of_the_day(), None)
agentpool_1 = self.create_initialized_agentpool_instance(message_of_the_day="test_message_of_the_day")
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(ctx_1.get_message_of_the_day(), "test_message_of_the_day")
# custom
ctx_2 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"message_of_the_day": get_test_data_file_path("motd.txt")}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(
ctx_2.get_message_of_the_day(),
"<KEY>
)
# custom
ctx_3 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"message_of_the_day": "fake-path"}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
with self.assertRaises(InvalidArgumentValueError):
ctx_3.get_message_of_the_day()
def common_get_gpu_instance_profile(self):
# default
ctx_1 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"gpu_instance_profile": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_gpu_instance_profile(), None)
agentpool_1 = self.create_initialized_agentpool_instance(gpu_instance_profile="test_gpu_instance_profile")
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(ctx_1.get_gpu_instance_profile(), "test_gpu_instance_profile")
def common_get_workload_runtime(self):
# default
ctx_1 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"workload_runtime": None}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_workload_runtime(), CONST_WORKLOAD_RUNTIME_OCI_CONTAINER)
agentpool_1 = self.create_initialized_agentpool_instance(workload_runtime="test_workload_runtime")
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(ctx_1.get_workload_runtime(), "test_workload_runtime")
def common_get_enable_custom_ca_trust(self):
# default
ctx_1 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"enable_custom_ca_trust": True}),
self.models,
DecoratorMode.CREATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_enable_custom_ca_trust(), True)
agentpool_1 = self.create_initialized_agentpool_instance(enable_custom_ca_trust=False)
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(ctx_1.get_enable_custom_ca_trust(), False)
# custom
ctx_2 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"enable_custom_ca_trust": True}),
self.models,
DecoratorMode.UPDATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_2.get_enable_custom_ca_trust(), True)
agentpool_2 = self.create_initialized_agentpool_instance(enable_custom_ca_trust=False)
ctx_2.attach_agentpool(agentpool_2)
self.assertEqual(ctx_2.get_enable_custom_ca_trust(), True)
# custom
ctx_3 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"enable_custom_ca_trust": True, "disable_custom_ca_trust": True}),
self.models,
DecoratorMode.UPDATE,
self.agentpool_decorator_mode,
)
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_3.get_enable_custom_ca_trust()
def common_get_disable_custom_ca_trust(self):
# default
ctx_1 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"disable_custom_ca_trust": True}),
self.models,
DecoratorMode.UPDATE,
self.agentpool_decorator_mode,
)
self.assertEqual(ctx_1.get_disable_custom_ca_trust(), True)
agentpool_1 = self.create_initialized_agentpool_instance(enable_custom_ca_trust=True)
ctx_1.attach_agentpool(agentpool_1)
self.assertEqual(ctx_1.get_disable_custom_ca_trust(), True)
# custom
ctx_2 = AKSPreviewAgentPoolContext(
self.cmd,
AKSAgentPoolParamDict({"enable_custom_ca_trust": True, "disable_custom_ca_trust": True}),
self.models,
DecoratorMode.UPDATE,
self.agentpool_decorator_mode,
)
with self.assertRaises(MutuallyExclusiveArgumentError):
ctx_2.get_disable_custom_ca_trust()
class AKSPreviewAgentPoolContextStandaloneModeTestCase(AKSPreviewAgentPoolContextCommonTestCase):
def setUp(self):
# manually register CUSTOM_MGMT_AKS_PREVIEW
register_aks_preview_resource_type()
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.resource_type = CUSTOM_MGMT_AKS_PREVIEW
self.agentpool_decorator_mode = AgentPoolDecoratorMode.STANDALONE
self.models = AKSPreviewAgentPoolModels(self.cmd, self.resource_type, self.agentpool_decorator_mode)
def test_get_zones(self):
self.common_get_zones()
def test_get_host_group_id(self):
self.common_get_host_group_id()
def test_get_crg_id(self):
self.common_get_crg_id()
def test_get_message_of_the_day(self):
self.common_get_message_of_the_day()
def test_get_gpu_instance_profile(self):
self.common_get_gpu_instance_profile()
def test_get_workload_runtime(self):
self.common_get_workload_runtime()
def test_get_enable_custom_ca_trust(self):
self.common_get_enable_custom_ca_trust()
def test_get_disable_custom_ca_trust(self):
self.common_get_disable_custom_ca_trust()
class AKSPreviewAgentPoolContextManagedClusterModeTestCase(AKSPreviewAgentPoolContextCommonTestCase):
def setUp(self):
# manually register CUSTOM_MGMT_AKS_PREVIEW
register_aks_preview_resource_type()
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.resource_type = CUSTOM_MGMT_AKS_PREVIEW
self.agentpool_decorator_mode = AgentPoolDecoratorMode.MANAGED_CLUSTER
self.models = AKSPreviewAgentPoolModels(self.cmd, self.resource_type, self.agentpool_decorator_mode)
def test_get_zones(self):
self.common_get_zones()
def test_get_host_group_id(self):
self.common_get_host_group_id()
def test_get_crg_id(self):
self.common_get_crg_id()
def test_get_message_of_the_day(self):
self.common_get_message_of_the_day()
def test_get_gpu_instance_profile(self):
self.common_get_gpu_instance_profile()
def test_get_workload_runtime(self):
self.common_get_workload_runtime()
def test_get_enable_custom_ca_trust(self):
self.common_get_enable_custom_ca_trust()
def test_get_disable_custom_ca_trust(self):
self.common_get_disable_custom_ca_trust()
class AKSPreviewAgentPoolAddDecoratorCommonTestCase(unittest.TestCase):
def _remove_defaults_in_agentpool(self, agentpool):
self.defaults_in_agentpool = {}
for attr_name, attr_value in vars(agentpool).items():
if not attr_name.startswith("_") and attr_name != "name" and attr_value is not None:
self.defaults_in_agentpool[attr_name] = attr_value
setattr(agentpool, attr_name, None)
return agentpool
def _restore_defaults_in_agentpool(self, agentpool):
for key, value in self.defaults_in_agentpool.items():
if getattr(agentpool, key, None) is None:
setattr(agentpool, key, value)
return agentpool
def create_initialized_agentpool_instance(
self, nodepool_name="nodepool1", remove_defaults=True, restore_defaults=True, **kwargs
):
"""Helper function to create a properly initialized agentpool instance.
:return: the AgentPool object
"""
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
agentpool = self.models.UnifiedAgentPoolModel(name=nodepool_name)
else:
agentpool = self.models.UnifiedAgentPoolModel()
agentpool.name = nodepool_name
# remove defaults
if remove_defaults:
self._remove_defaults_in_agentpool(agentpool)
# set properties
for key, value in kwargs.items():
setattr(agentpool, key, value)
# resote defaults
if restore_defaults:
self._restore_defaults_in_agentpool(agentpool)
return agentpool
def common_set_up_preview_vm_properties(self):
dec_1 = AKSPreviewAgentPoolAddDecorator(
self.cmd,
self.client,
{"host_group_id": "test_host_group_id", "crg_id": "test_crg_id"},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_preview_vm_properties(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_preview_vm_properties(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
host_group_id="test_host_group_id", capacity_reservation_group_id="test_crg_id"
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
def common_set_up_motd(self):
dec_1 = AKSPreviewAgentPoolAddDecorator(
self.cmd,
self.client,
{"message_of_the_day": get_test_data_file_path("motd.txt")},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_motd(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_motd(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
message_of_the_day="<KEY>
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
def common_set_up_gpu_propertes(self):
dec_1 = AKSPreviewAgentPoolAddDecorator(
self.cmd,
self.client,
{"gpu_instance_profile": "test_gpu_instance_profile", "workload_runtime": "test_workload_runtime"},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_gpu_properties(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_gpu_properties(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
gpu_instance_profile="test_gpu_instance_profile",
workload_runtime="test_workload_runtime",
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
def common_set_up_custom_ca_trust(self):
dec_1 = AKSPreviewAgentPoolAddDecorator(
self.cmd,
self.client,
{"enable_custom_ca_trust": True},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.set_up_custom_ca_trust(None)
agentpool_1 = self.create_initialized_agentpool_instance(restore_defaults=False)
dec_1.context.attach_agentpool(agentpool_1)
dec_agentpool_1 = dec_1.set_up_custom_ca_trust(agentpool_1)
dec_agentpool_1 = self._restore_defaults_in_agentpool(dec_agentpool_1)
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
enable_custom_ca_trust=True,
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
class AKSPreviewAgentPoolAddDecoratorStandaloneModeTestCase(AKSPreviewAgentPoolAddDecoratorCommonTestCase):
def setUp(self):
# manually register CUSTOM_MGMT_AKS_PREVIEW
register_aks_preview_resource_type()
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.resource_type = CUSTOM_MGMT_AKS_PREVIEW
self.agentpool_decorator_mode = AgentPoolDecoratorMode.STANDALONE
self.models = AKSPreviewAgentPoolModels(self.cmd, self.resource_type, self.agentpool_decorator_mode)
self.client = MockClient()
def test_set_up_preview_vm_properties(self):
self.common_set_up_preview_vm_properties()
def test_set_up_motd(self):
self.common_set_up_motd()
def test_set_up_gpu_propertes(self):
self.common_set_up_gpu_propertes()
def test_set_up_custom_ca_trust(self):
self.common_set_up_custom_ca_trust()
def test_construct_agentpool_profile_preview(self):
import inspect
from azext_aks_preview.custom import aks_agentpool_add
optional_params = {}
positional_params = []
for _, v in inspect.signature(aks_agentpool_add).parameters.items():
if v.default != v.empty:
optional_params[v.name] = v.default
else:
positional_params.append(v.name)
ground_truth_positional_params = [
"cmd",
"client",
"resource_group_name",
"cluster_name",
"nodepool_name",
]
self.assertEqual(positional_params, ground_truth_positional_params)
# prepare a dictionary of default parameters
raw_param_dict = {
"resource_group_name": "test_rg_name",
"cluster_name": "test_cluster_name",
"nodepool_name": "test_nodepool_name",
}
raw_param_dict.update(optional_params)
# default value in `aks nodepool add`
dec_1 = AKSPreviewAgentPoolAddDecorator(
self.cmd,
self.client,
raw_param_dict,
self.resource_type,
self.agentpool_decorator_mode,
)
with patch(
"azext_aks_preview.agentpool_decorator.cf_agent_pools",
return_value=Mock(list=Mock(return_value=[])),
):
dec_agentpool_1 = dec_1.construct_agentpool_profile_preview()
ground_truth_upgrade_settings_1 = self.models.AgentPoolUpgradeSettings()
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
nodepool_name="test_nodepool_name",
vm_size=CONST_DEFAULT_NODE_VM_SIZE,
os_type=CONST_DEFAULT_NODE_OS_TYPE,
enable_node_public_ip=False,
enable_auto_scaling=False,
count=3,
node_taints=[],
os_disk_size_gb=0,
upgrade_settings=ground_truth_upgrade_settings_1,
type_properties_type=CONST_VIRTUAL_MACHINE_SCALE_SETS,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_fips=False,
mode=CONST_NODEPOOL_MODE_USER,
scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE,
workload_runtime=CONST_WORKLOAD_RUNTIME_OCI_CONTAINER,
enable_custom_ca_trust=False,
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
dec_1.context.raw_param.print_usage_statistics()
class AKSPreviewAgentPoolAddDecoratorManagedClusterModeTestCase(AKSPreviewAgentPoolAddDecoratorCommonTestCase):
def setUp(self):
# manually register CUSTOM_MGMT_AKS_PREVIEW
register_aks_preview_resource_type()
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.resource_type = CUSTOM_MGMT_AKS_PREVIEW
self.agentpool_decorator_mode = AgentPoolDecoratorMode.MANAGED_CLUSTER
self.models = AKSPreviewAgentPoolModels(self.cmd, self.resource_type, self.agentpool_decorator_mode)
self.client = MockClient()
def test_set_up_preview_vm_properties(self):
self.common_set_up_preview_vm_properties()
def test_set_up_motd(self):
self.common_set_up_motd()
def test_set_up_gpu_propertes(self):
self.common_set_up_gpu_propertes()
def test_set_up_custom_ca_trust(self):
self.common_set_up_custom_ca_trust()
def test_construct_agentpool_profile_preview(self):
import inspect
from azext_aks_preview.custom import aks_create
optional_params = {}
positional_params = []
for _, v in inspect.signature(aks_create).parameters.items():
if v.default != v.empty:
optional_params[v.name] = v.default
else:
positional_params.append(v.name)
ground_truth_positional_params = [
"cmd",
"client",
"resource_group_name",
"name",
"ssh_key_value",
]
self.assertEqual(positional_params, ground_truth_positional_params)
# prepare a dictionary of default parameters
raw_param_dict = {
"resource_group_name": "test_rg_name",
"name": "test_cluster_name",
"ssh_key_value": None,
}
raw_param_dict.update(optional_params)
# default value in `aks_create`
dec_1 = AKSPreviewAgentPoolAddDecorator(
self.cmd,
self.client,
raw_param_dict,
self.resource_type,
self.agentpool_decorator_mode,
)
with patch(
"azure.cli.command_modules.acs.agentpool_decorator.cf_agent_pools",
return_value=Mock(list=Mock(return_value=[])),
):
dec_agentpool_1 = dec_1.construct_agentpool_profile_preview()
upgrade_settings_1 = self.models.AgentPoolUpgradeSettings()
ground_truth_agentpool_1 = self.create_initialized_agentpool_instance(
nodepool_name="nodepool1",
orchestrator_version="",
vm_size=CONST_DEFAULT_NODE_VM_SIZE,
os_type=CONST_DEFAULT_NODE_OS_TYPE,
enable_node_public_ip=False,
enable_auto_scaling=False,
count=3,
node_taints=[],
os_disk_size_gb=0,
upgrade_settings=upgrade_settings_1,
type=CONST_VIRTUAL_MACHINE_SCALE_SETS,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_fips=False,
mode=CONST_NODEPOOL_MODE_SYSTEM,
workload_runtime=CONST_WORKLOAD_RUNTIME_OCI_CONTAINER,
enable_custom_ca_trust=False,
)
self.assertEqual(dec_agentpool_1, ground_truth_agentpool_1)
dec_1.context.raw_param.print_usage_statistics()
class AKSPreviewAgentPoolUpdateDecoratorCommonTestCase(unittest.TestCase):
def _remove_defaults_in_agentpool(self, agentpool):
self.defaults_in_agentpool = {}
for attr_name, attr_value in vars(agentpool).items():
if not attr_name.startswith("_") and attr_name != "name" and attr_value is not None:
self.defaults_in_agentpool[attr_name] = attr_value
setattr(agentpool, attr_name, None)
return agentpool
def _restore_defaults_in_agentpool(self, agentpool):
for key, value in self.defaults_in_agentpool.items():
if getattr(agentpool, key, None) is None:
setattr(agentpool, key, value)
return agentpool
def create_initialized_agentpool_instance(
self, nodepool_name="nodepool1", remove_defaults=True, restore_defaults=True, **kwargs
):
"""Helper function to create a properly initialized agentpool instance.
:return: the AgentPool object
"""
if self.agentpool_decorator_mode == AgentPoolDecoratorMode.MANAGED_CLUSTER:
agentpool = self.models.UnifiedAgentPoolModel(name=nodepool_name)
else:
agentpool = self.models.UnifiedAgentPoolModel()
agentpool.name = nodepool_name
# remove defaults
if remove_defaults:
self._remove_defaults_in_agentpool(agentpool)
# set properties
for key, value in kwargs.items():
setattr(agentpool, key, value)
# resote defaults
if restore_defaults:
self._restore_defaults_in_agentpool(agentpool)
return agentpool
def common_update_custom_ca_trust(self):
dec_1 = AKSPreviewAgentPoolUpdateDecorator(
self.cmd,
self.client,
{"enable_custom_ca_trust": True, "disable_custom_ca_trust": False},
self.resource_type,
self.agentpool_decorator_mode,
)
# fail on passing the wrong agentpool object
with self.assertRaises(CLIInternalError):
dec_1.update_vm_properties(None)
agentpool_1 = self.create_initialized_agentpool_instance(
| |
"https://discord.gg/3GvT66U",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"CLP": {
"symbol": "CLP",
"address": "0x7FCE2856899a6806eeEf70807985fc7554C66340",
"decimals": 9,
"name": "CryptoLending",
"ens_address": "",
"website": "https://cryptolending.org",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/CLPcoin-125929678066347",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"CXC": {
"symbol": "CXC",
"address": "0x2134057C0b461F898D375Cead652Acae62b59541",
"decimals": 18,
"name": "CoxxxCoin",
"ens_address": "",
"website": "http://coxxxcoin.com",
"logo": {
"src": "http://www.coxxxcoin.com/CoxxxCoin.256.png",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "https://github.com/coxxxcoin/smart_contract",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/CoxxxCoin/",
"slack": "https://coxxxcoin.slack.com",
"telegram": "",
"twitter": "https://twitter.com/coxxxcoin",
"youtube": ""
}
},
"ALTS": {
"symbol": "ALTS",
"address": "0x638AC149eA8EF9a1286C41B977017AA7359E6Cfa",
"decimals": 18,
"name": "ALTS Token",
"ens_address": "",
"website": "http://www.altcoinstalks.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/AltcoinsTalks",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/joinchat/FVTLFELkfHI6_CKz7AbIpQ",
"twitter": "https://twitter.com/AltcoinsTalks",
"youtube": ""
}
},
"PLASMA": {
"symbol": "PLASMA",
"address": "0x59416A25628A76b4730eC51486114c32E0B582A1",
"decimals": 6,
"name": "PLASMA",
"ens_address": "",
"website": "https://plasma.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"RNDR": {
"symbol": "RNDR",
"address": "0x0996bFb5D057faa237640E2506BE7B4f9C46de0B",
"decimals": 18,
"name": "<NAME>",
"ens_address": "",
"website": "https://rendertoken.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "https://rendertoken.rocket.chat",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/RenderToken",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/rendertoken",
"youtube": ""
}
},
"1ST": {
"symbol": "1ST",
"address": "0xAf30D2a7E90d7DC361c8C4585e9BB7D2F6f15bc7",
"decimals": 18,
"name": "FirstBlood",
"ens_address": "",
"website": "https://firstblood.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "https://slack.firstblood.io",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"ARXT": {
"symbol": "ARXT",
"address": "0xb0D926c1BC3d78064F3e1075D5bD9A24F35Ae6C5",
"decimals": 18,
"name": "Assistive Reality ARX",
"ens_address": "",
"website": "https://aronline.io",
"logo": {
"src": "https://aronline.io/wp-content/uploads/2018/01/favicon.png",
"width": "100",
"height": "100",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "https://aronline.io/"
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/AssistiveReality/",
"forum": "",
"github": "https://github.com/va109/Artex",
"gitter": "",
"instagram": "https://www.instagram.com/AssistiveReality/",
"linkedin": "https://www.linkedin.com/in/assistive-reality/",
"reddit": "",
"slack": "",
"telegram": "https://t.me/AssistiveReality_ARX",
"twitter": "https://twitter.com/aronline_io/",
"youtube": ""
}
},
"CCS": {
"symbol": "CCS",
"address": "0x315cE59FAFd3A8d562b7Ec1C8542382d2710b06c",
"decimals": 18,
"name": "CacaoShares",
"ens_address": "cacaoshares.eth",
"website": "http://www.cacaoshares.com",
"logo": {
"src": "http://cacaoshares.com/wp-content/uploads/2017/12/cropped-logo-cherry-2018-1-e1513046302595.png",
"width": "28",
"height": "28",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": "http://www.cacaoshares.com"
},
"social": {
"blog": "http://www.cacaoshares.com/blog",
"chat": "",
"facebook": "",
"forum": "",
"github": "github.com/AnalemaTechnologies/CacaoShares",
"gitter": "",
"instagram": "http://www.instagram.com/cacaoshares",
"linkedin": "https://www.linkedin.com/company/cacaoshares",
"reddit": "",
"slack": "",
"telegram": "https://t.me/joinchat/FoJjLkP1Qxh9yZbCZ5mC9A",
"twitter": "https://twitter.com/cacaoshares",
"youtube": ""
}
},
"LA": {
"symbol": "LA",
"address": "0xE50365f5D679CB98a1dd62D6F6e58e59321BcdDf",
"decimals": 18,
"name": "LATOKEN",
"ens_address": "",
"website": "https://latoken.com/",
"logo": {
"src": "https://cdn.latoken.com/common/img/logo.svg",
"width": 512,
"height": 512,
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>@latoken.com",
"url": ""
},
"social": {
"blog": "https://blog.latoken.com/",
"chat": "",
"facebook": "https://www.facebook.com/LiquidAssetToken/",
"forum": "",
"github": "https://github.com/latoken",
"gitter": "",
"instagram": "https://www.instagram.com/latokens/",
"linkedin": "https://www.linkedin.com/company/latoken",
"reddit": "https://www.reddit.com/r/LAToken/",
"slack": "",
"telegram": "https://t.me/la_token",
"twitter": "https://twitter.com/LATokens",
"youtube": "https://www.youtube.com/channel/UCvTfsRYJYD2X26VXbqDVgTQ/featured"
}
},
"HB": {
"symbol": "HB",
"name": "HeartBout",
"type": "ERC20",
"address": "0xE2492F8D2A2618d8709Ca99b1d8d75713Bd84089",
"ens_address": "",
"decimals": 18,
"website": "https://heartbout.com",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/HeartBout",
"youtube": ""
}
},
"TDH": {
"symbol": "TDH",
"address": "0x2a1dbabe65c595B0022e75208C34014139d5d357",
"decimals": 18,
"name": "TrustedHealth",
"ens_address": "",
"website": "https://trustedhealth.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "https://medium.com/trustedhealth-io",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/trustedhealth-io/",
"reddit": "https://www.reddit.com/r/TrustedHealth/",
"slack": "",
"telegram": "https://t.me/TrustedHealth_io",
"twitter": "https://twitter.com/_trustedhealth",
"youtube": ""
}
},
"TIME": {
"symbol": "TIME",
"address": "0x6531f133e6DeeBe7F2dcE5A0441aA7ef330B4e53",
"decimals": 8,
"name": "Chronobank",
"ens_address": "",
"website": "https://chronobank.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": "https://chronobank.io/faq"
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "https://chronobank.herokuapp.com",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"EKO": {
"symbol": "EKO",
"name": "EchoLink",
"type": "ERC20",
"address": "0xa6a840E50bCaa50dA017b91A0D86B8b2d41156EE",
"ens_address": "",
"decimals": 18,
"website": "https://echolink.info",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "https://medium.com/@EchoLinkInfo",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/EchoLinkInfo",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/EchoLinkInfo1",
"youtube": ""
}
},
"2DC": {
"symbol": "2DC",
"address": "0x9fC0583220eB44fAeE9e2dc1E63F39204DDD9090",
"decimals": 18,
"name": "DualChain",
"ens_address": "",
"website": "",
"logo": {
"src": "https://image.ibb.co/iniG4c/282541262323.png",
"width": "400",
"height": "400",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/CompraBitcoin-1453383194773687/",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"SKE": {
"symbol": "SKE",
"name": "Super Keep Token",
"type": "ERC20",
"address": "0x13DB74B3cf512F65C4b91683940B4f3955E05085",
"ens_address": "",
"decimals": 8,
"website": "http://superkeep.pro/",
"logo": {
"src": "http://app.superkeep.cn/DataCenter/upload/admin/image/currency/0x13db74b3cf512f65c4b91683940b4f3955e05085.png",
"width": "28",
"height": "28",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"MTRc": {
"symbol": "MTRc",
"address": "0x1e49fF77c355A3e38D6651ce8404AF0E48c5395f",
"decimals": 18,
"name": "MTRCToken",
"ens_address": "",
"website": "https://modultrade.io",
"logo": {
"src": "https://en.modultrade.io/img/new_set/modultrade_logo.svg",
"width": "60",
"height": "20",
"ipfs_hash": ""
},
"support": {
"email": "<EMAIL>",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/modultrade/",
"forum": "https://bitcointalk.org/index.php?topic=2240518",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "https://www.linkedin.com/company/16259600/",
"reddit": "https://www.reddit.com/r/SandCoin",
"slack": "",
"telegram": "https://t.me/ModulTradeIO",
"twitter": "https://twitter.com/ModulTrade",
"youtube": ""
}
},
"GUP": {
"symbol": "GUP",
"address": "0xf7B098298f7C69Fc14610bf71d5e02c60792894C",
"decimals": 3,
"name": "Matchpool",
"ens_address": "",
"website": "https://matchpool.co",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "http://community.matchpool.com/",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/Matchpool",
"slack": "",
"telegram": "",
"twitter": "https://twitter.com/matchpool",
"youtube": ""
}
},
"IND": {
"symbol": "IND",
"address": "0xf8e386EDa857484f5a12e4B5DAa9984E06E73705",
"decimals": 18,
"name": "Indorse",
"ens_address": "",
"website": "https://indorse.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "https://www.reddit.com/r/indorse",
"slack": "https://slack.indorse.io",
"telegram": "",
"twitter": "https://twitter.com/joinindorse",
"youtube": ""
}
},
"ROCK2PAY": {
"symbol": "ROCK2PAY",
"type": "ERC20",
"address": "0x0E3de3B0E3D617FD8D1D8088639bA877feb4d742",
"decimals": 18,
"name": "ICE ROCK MINING",
"ens_address": "",
"website": "https://icerockmining.io",
"logo": {
"src": "https://rockmining.blob.core.windows.net/logo/fullLogo.png",
"width": 132,
"height": 132,
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "https://www.facebook.com/IceRockMiningICO",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "",
"telegram": "https://t.me/icerockmining",
"twitter": "https://twitter.com/icerockmining",
"youtube": "https://www.youtube.com/channel/UCxQCi2z51-LU9vgiBeuuGLg"
}
},
"EDG": {
"symbol": "EDG",
"address": "0x08711D3B02C8758F2FB3ab4e80228418a7F8e39c",
"decimals": 0,
"name": "Edgeless",
"ens_address": "",
"website": "https://edgeless.io",
"logo": {
"src": "",
"width": "",
"height": "",
"ipfs_hash": ""
},
"support": {
"email": "",
"url": ""
},
"social": {
"blog": "",
"chat": "",
"facebook": "",
"forum": "",
"github": "",
"gitter": "",
"instagram": "",
"linkedin": "",
"reddit": "",
"slack": "https://edgelessethcasino.signup.team",
"telegram": "",
"twitter": "",
"youtube": ""
}
},
"JetCoins": {
"symbol": "JetCoins",
"address": "0x773450335eD4ec3DB45aF74f34F2c85348645D39",
"decimals": 18,
"name": "JetCoins",
"ens_address": "",
"website": "",
"logo": {
| |
<filename>brainbox/task/trials.py
from iblutil.numerical import ismember
from brainbox.processing import bincount2D
import numpy as np
def find_trial_ids(trials, side='all', choice='all', order='trial num', sort='idx',
contrast=(1, 0.5, 0.25, 0.125, 0.0625, 0), event=None):
"""
Finds trials that match criterion
:param trials: trials object. Must contain attributes contrastLeft, contrastRight and
feedbackType
:param side: stimulus side, options are 'all', 'left' or 'right'
:param choice: trial choice, options are 'all', 'correct' or 'incorrect'
:param contrast: contrast of stimulus, pass in list/tuple of all contrasts that want to be
considered e.g [1, 0.5] would only look for trials with 100 % and 50 % contrast
:param order: how to order the trials, options are 'trial num' or 'reaction time'
:param sort: how to sort the trials, options are 'side' (split left right trials), 'choice'
(split correct incorrect trials), 'choice and side' (split left right and correct incorrect)
:param event: trial event to align to (in order to remove nan trials for this event)
:return: np.array of trial ids, list of dividers to indicate how trials are sorted
"""
if event:
idx = ~np.isnan(trials[event])
else:
idx = np.ones_like(trials['feedbackType'], dtype=bool)
# Find trials that have specified contrasts
cont = np.bitwise_or(ismember(trials['contrastLeft'][idx], np.array(contrast))[0],
ismember(trials['contrastRight'][idx], np.array(contrast))[0])
# Find different permutations of trials
# correct right
cor_r = np.where(
np.bitwise_and(cont, np.bitwise_and(trials['feedbackType'][idx] == 1,
np.isfinite(trials['contrastRight'][idx]))))[0]
# correct left
cor_l = np.where(
np.bitwise_and(cont, np.bitwise_and(trials['feedbackType'][idx] == 1,
np.isfinite(trials['contrastLeft'][idx]))))[0]
# incorrect right
incor_r = np.where(
np.bitwise_and(cont, np.bitwise_and(trials['feedbackType'][idx] == -1,
np.isfinite(trials['contrastRight'][idx]))))[0]
# incorrect left
incor_l = np.where(
np.bitwise_and(cont, np.bitwise_and(trials['feedbackType'][idx] == -1,
np.isfinite(trials['contrastLeft'][idx]))))[0]
reaction_time = trials['response_times'][idx] - trials['goCue_times'][idx]
def _order_by(_trials, order):
# Returns subset of trials either ordered by trial number or by reaction time
sorted_trials = np.sort(_trials)
if order == 'trial num':
return sorted_trials
elif order == 'reaction time':
sorted_reaction = np.argsort(reaction_time[sorted_trials])
return sorted_trials[sorted_reaction]
dividers = []
# Find the trial id for all possible combinations
if side == 'all' and choice == 'all':
if sort == 'idx':
trial_id = _order_by(np.r_[cor_r, cor_l, incor_r, incor_l], order)
elif sort == 'choice':
trial_id = np.r_[_order_by(np.r_[cor_l, cor_r], order),
_order_by(np.r_[incor_l, incor_r], order)]
dividers.append(np.r_[cor_l, cor_r].shape[0])
elif sort == 'side':
trial_id = np.r_[_order_by(np.r_[cor_l, incor_l], order),
_order_by(np.r_[cor_r, incor_r], order)]
dividers.append(np.r_[cor_l, incor_l].shape[0])
elif sort == 'choice and side':
trial_id = np.r_[_order_by(cor_l, order), _order_by(incor_l, order),
_order_by(cor_r, order), _order_by(incor_r, order)]
dividers.append(cor_l.shape[0])
dividers.append(np.r_[cor_l, incor_l].shape[0])
dividers.append(np.r_[cor_l, incor_l, cor_r].shape[0])
if side == 'left' and choice == 'all':
if sort in ['idx', 'side']:
trial_id = _order_by(np.r_[cor_l, incor_l], order)
elif sort in ['choice', 'choice and side']:
trial_id = np.r_[_order_by(cor_l, order), _order_by(incor_l, order)]
dividers.append(cor_l.shape[0])
if side == 'right' and choice == 'all':
if sort in ['idx', 'side']:
trial_id = _order_by(np.r_[cor_r, incor_r], order)
elif sort in ['choice', 'choice and side']:
trial_id = np.r_[_order_by(cor_r, order), _order_by(incor_r, order)]
dividers.append(cor_r.shape[0])
if side == 'all' and choice == 'correct':
if sort in ['idx', 'choice']:
trial_id = _order_by(np.r_[cor_l, cor_r], order)
elif sort in ['side', 'choice and side']:
trial_id = np.r_[_order_by(cor_l, order), _order_by(cor_r, order)]
dividers.append(cor_l.shape[0])
if side == 'all' and choice == 'incorrect':
if sort in ['idx', 'choice']:
trial_id = _order_by(np.r_[incor_l, incor_r], order)
elif sort in ['side', 'choice and side']:
trial_id = np.r_[_order_by(incor_l, order), _order_by(incor_r, order)]
dividers.append(incor_l.shape[0])
if side == 'left' and choice == 'correct':
trial_id = _order_by(cor_l, order)
if side == 'left' and choice == 'incorrect':
trial_id = _order_by(incor_l, order)
if side == 'right' and choice == 'correct':
trial_id = _order_by(cor_r, order)
if side == 'right' and choice == 'incorrect':
trial_id = _order_by(incor_r, order)
return trial_id, dividers
def get_event_aligned_raster(times, events, tbin=0.02, values=None, epoch=[-0.4, 1], bin=True):
"""
Get event aligned raster
:param times: array of times e.g spike times or dlc points
:param events: array of events to epoch around
:param tbin: bin size to over which to count events
:param values: values to scale counts by
:param epoch: window around each event
:param bin: whether to bin times in tbin windows or not
:return:
"""
if bin:
vals, bin_times, _ = bincount2D(times, np.ones_like(times), xbin=tbin, weights=values)
vals = vals[0]
t = np.arange(epoch[0], epoch[1] + tbin, tbin)
nbin = t.shape[0]
else:
vals = values
bin_times = times
tbin = np.mean(np.diff(bin_times))
t = np.arange(epoch[0], epoch[1], tbin)
nbin = t.shape[0]
# remove nan trials
events = events[~np.isnan(events)]
intervals = np.c_[events + epoch[0], events + epoch[1]]
# Remove any trials that are later than the last value in bin_times
out_intervals = intervals[:, 1] > bin_times[-1]
epoch_idx = np.searchsorted(bin_times, intervals)[np.invert(out_intervals)]
for ep in range(nbin):
if ep == 0:
event_raster = (vals[epoch_idx[:, 0] + ep]).astype(float)
else:
event_raster = np.c_[event_raster, vals[epoch_idx[:, 0] + ep]]
# Find any trials that are less than the first value time and fill with nans (case for example
# where spiking of cluster doesn't start till after start of first trial due to settling of
# brain)
event_raster[intervals[np.invert(out_intervals), 0] < bin_times[0]] = np.nan
# Add back in the trials that were later than last value with nans
if np.sum(out_intervals) > 0:
event_raster = np.r_[event_raster, np.full((np.sum(out_intervals),
event_raster.shape[1]), np.nan)]
assert(event_raster.shape[0] == intervals.shape[0])
return event_raster, t
def get_psth(raster, trial_ids=None):
"""
Compute psth averaged over chosen trials
:param raster: output from event aligned raster, window of activity around event
:param trial_ids: the trials from the raster to average over
:return:
"""
if trial_ids is None:
mean = np.nanmean(raster, axis=0)
err = np.nanstd(raster, axis=0) / np.sqrt(raster.shape[0])
else:
raster = filter_by_trial(raster, trial_ids)
mean = np.nanmean(raster, axis=0)
err = np.nanstd(raster, axis=0) / np.sqrt(raster.shape[0])
return mean, err
def filter_by_trial(raster, trial_id):
"""
Select trials of interest for raster
:param raster:
:param trial_id:
:return:
"""
return raster[trial_id, :]
def filter_correct_incorrect_left_right(trials, event_raster, event, order='trial num'):
"""
Return psth for left correct, left incorrect, right correct, right incorrect and raster
sorted by these trials
:param trials: trials object
:param event_raster: output from get_event_aligned_activity
:param event: event to align to e.g 'goCue_times', 'stimOn_times'
:param order: order to sort trials by either 'trial num' or 'reaction time'
:return:
"""
trials_sorted, div = find_trial_ids(trials, sort='choice and side', event=event, order=order)
trials_lc, _ = find_trial_ids(trials, side='left', choice='correct', event=event, order=order)
trials_li, _ = find_trial_ids(trials, side='left', choice='incorrect', event=event,
order=order)
trials_rc, _ = find_trial_ids(trials, side='right', choice='correct', event=event, order=order)
trials_ri, _ = find_trial_ids(trials, side='right', choice='incorrect', event=event,
order=order)
psth = dict()
mean, err = get_psth(event_raster, trials_lc)
psth['left_correct'] = {'vals': mean, 'err': err,
'linestyle': {'color': 'r'}}
mean, err = get_psth(event_raster, trials_li)
psth['left_incorrect'] = {'vals': mean, 'err': err,
'linestyle': {'color': 'r', 'linestyle': 'dashed'}}
mean, err = get_psth(event_raster, trials_rc)
psth['right_correct'] = {'vals': mean, 'err': err,
'linestyle': {'color': 'b'}}
mean, err = get_psth(event_raster, trials_ri)
psth['right_incorrect'] = {'vals': mean, 'err': err,
'linestyle': {'color': 'b', 'linestyle': 'dashed'}}
raster = {}
raster['vals'] = filter_by_trial(event_raster, trials_sorted)
raster['dividers'] = div
return raster, psth
def filter_correct_incorrect(trials, event_raster, event, order='trial num'):
"""
Return psth for correct and incorrect trials and raster sorted by correct incorrect
:param trials: trials object
:param event_raster: output from get_event_aligned_activity
:param event: event to align to e.g 'goCue_times', 'stimOn_times'
:param order: order to sort trials by either 'trial num' or 'reaction time'
:return:
"""
trials_sorted, div = find_trial_ids(trials, sort='choice', event=event, order=order)
trials_c, _ = find_trial_ids(trials, side='all', choice='correct', event=event, order=order)
trials_i, _ = find_trial_ids(trials, side='all', choice='incorrect', event=event, order=order)
psth = dict()
mean, err = get_psth(event_raster, trials_c)
psth['correct'] = {'vals': mean, 'err': err, 'linestyle': {'color': 'r'}}
mean, err = get_psth(event_raster, trials_i)
psth['incorrect'] = {'vals': mean, 'err': err, 'linestyle': {'color': 'b'}}
raster = {}
raster['vals'] = filter_by_trial(event_raster, trials_sorted)
raster['dividers'] = div
return raster, psth
def filter_left_right(trials, event_raster, event, order='trial num'):
"""
Return psth for left and right trials and raster sorted by left right
:param trials: trials object
:param event_raster: output from get_event_aligned_activity
:param event: event to align to e.g 'goCue_times', 'stimOn_times'
:param order: order to sort trials by either 'trial num' or 'reaction time'
:return:
"""
trials_sorted, div = find_trial_ids(trials, sort='choice', event=event, order=order)
trials_l, _ = find_trial_ids(trials, side='left', choice='all', event=event, order=order)
trials_r, _ = find_trial_ids(trials, side='right', choice='all', event=event, order=order)
psth = dict()
mean, err = get_psth(event_raster, trials_l)
psth['left'] = {'vals': mean, 'err': err, 'linestyle': {'color': 'r'}}
mean, err = get_psth(event_raster, trials_r)
psth['right'] = {'vals': mean, 'err': err, 'linestyle': {'color': 'b'}}
raster = {}
raster['vals'] = filter_by_trial(event_raster, trials_sorted)
raster['dividers'] = div
return raster, psth
def filter_trials(trials, event_raster, event, order='trial num', | |
import theano.tensor as T
from .base import MergeLayer
__all__ = [
"autocrop",
"autocrop_array_shapes",
"ConcatLayer",
"concat",
"ElemwiseMergeLayer",
"ElemwiseSumLayer",
]
def autocrop(inputs, cropping):
"""
Crops the given input arrays.
Cropping takes a sequence of inputs and crops them per-axis in order to
ensure that their sizes are consistent so that they can be combined
in an element-wise fashion. If cropping is enabled for a specific axis,
the minimum size in that axis of all inputs is computed, and all
inputs are cropped to that size.
The per-axis cropping modes are:
`None`: this axis is not cropped, inputs are unchanged in this axis
`'lower'`: inputs are cropped choosing the lower portion in this axis
(`a[:crop_size, ...]`)
`'upper'`: inputs are cropped choosing the upper portion in this axis
(`a[-crop_size:, ...]`)
`'center'`: inputs are cropped choosing the central portion in this axis
(``a[offset:offset+crop_size, ...]`` where
``offset = (a.shape[0]-crop_size)//2)``
Parameters
----------
inputs : list of Theano expressions
The input arrays in the form of a list of Theano expressions
cropping : list of cropping modes
Cropping modes, one for each axis. If length of `cropping` is less
than the number of axes in the inputs, it is padded with `None`.
If `cropping` is None, `input` is returned as is.
Returns
-------
list of Theano expressions
each expression is the cropped version of the corresponding input
Example
-------
For example, given three inputs:
>>> import numpy
>>> import theano
>>> a = numpy.random.random((1, 2, 3, 4))
>>> b = numpy.random.random((5, 4, 4, 2))
>>> c = numpy.random.random((7, 1, 8, 9))
Cropping mode for each axis:
>>> cropping = [None, 'lower', 'center', 'upper']
Crop (note that the input arrays are converted to Theano vars first,
and that the results are converted back from Theano expressions to
numpy arrays by calling `eval()`)
>>> xa, xb, xc = autocrop([theano.shared(a), \
theano.shared(b), \
theano.shared(c)], cropping)
>>> xa, xb, xc = xa.eval(), xb.eval(), xc.eval()
They will be left as is in axis 0 and cropped in the other three,
choosing the lower, center and upper portions:
Axis 0: choose all, axis 1: lower 1 element,
axis 2: central 3 (all) and axis 3: upper 2
>>> (xa == a[:, :1, :3, -2:]).all()
True
Axis 0: choose all, axis 1: lower 1 element,
axis 2: central 3 starting at 0 and axis 3: upper 2 (all)
>>> (xb == b[:, :1, :3, -2:]).all()
True
Axis 0: all, axis 1: lower 1 element (all),
axis 2: central 3 starting at 2 and axis 3: upper 2
>>> (xc == c[:, :1, 2:5:, -2:]).all()
True
"""
if cropping is None:
# No cropping in any dimension
return inputs
else:
# Get the number of dimensions
ndim = inputs[0].ndim
# Check for consistent number of dimensions
if not all(input.ndim == ndim for input in inputs):
raise ValueError("Not all inputs are of the same "
"dimensionality. Got {0} inputs of "
"dimensionalities {1}.".format(
len(inputs),
[input.ndim for input in inputs]))
# Get the shape of each input, where each shape will be a Theano
# expression
shapes = [input.shape for input in inputs]
# Convert the shapes to a matrix expression
shapes_tensor = T.as_tensor_variable(shapes)
# Min along axis 0 to get the minimum size in each dimension
min_shape = T.min(shapes_tensor, axis=0)
# Nested list of slices; each list in `slices` corresponds to
# an input and contains a slice for each dimension
slices_by_input = [[] for i in range(len(inputs))]
# If there are more dimensions than cropping entries, pad
# the cropping
cropping = list(cropping)
if ndim > len(cropping):
cropping = list(cropping) + \
[None] * (ndim - len(cropping))
# For each dimension
for dim, cr in enumerate(cropping):
if cr is None:
# Don't crop this dimension
slice_all = slice(None)
for slices in slices_by_input:
slices.append(slice_all)
else:
# We crop all inputs in the dimension `dim` so that they
# are the minimum found in this dimension from all inputs
sz = min_shape[dim]
if cr == 'lower':
# Choose the first `sz` elements
slc_lower = slice(None, sz)
for slices in slices_by_input:
slices.append(slc_lower)
elif cr == 'upper':
# Choose the last `sz` elements
slc_upper = slice(-sz, None)
for slices in slices_by_input:
slices.append(slc_upper)
elif cr == 'center':
# Choose `sz` elements from the center
for sh, slices in zip(shapes, slices_by_input):
offset = (sh[dim] - sz) // 2
slices.append(slice(offset, offset+sz))
else:
raise ValueError(
'Unknown crop mode \'{0}\''.format(cr))
return [input[slices] for input, slices in
zip(inputs, slices_by_input)]
def autocrop_array_shapes(input_shapes, cropping):
"""
Computes the shapes of the given arrays after auto-cropping is applied.
For more information on cropping, see the :func:`autocrop` function
documentation.
Parameters
----------
input_shapes : the shapes of input arrays prior to cropping in
the form of a list of tuples
cropping : a list of cropping modes, one for each axis. If length of
`cropping` is less than the number of axes in the inputs, it is
padded with `None`. If `cropping` is None, `input_shapes` is returned
as is. For more information on their values and operation, see the
:func:`autocrop` documentation.
Returns
-------
list of tuples
each tuple is a cropped version of the corresponding input
shape tuple in `input_shapes`
For example, given three input shapes with 4 axes each:
>>> a = (1, 2, 3, 4)
>>> b = (5, 4, 4, 2)
>>> c = (7, 1, 8, 9)
Cropping mode for each axis:
>>> cropping = [None, 'lower', 'center', 'upper']
Apply:
>>> cropped_shapes = autocrop_array_shapes([a, b, c], cropping)
>>> cropped_shapes[0]
(1, 1, 3, 2)
>>> cropped_shapes[1]
(5, 1, 3, 2)
>>> cropped_shapes[2]
(7, 1, 3, 2)
Note that axis 0 remains unchanged, where all the others are cropped
to the minimum size in that axis.
"""
if cropping is None:
return input_shapes
else:
# Check for consistent number of dimensions
ndim = len(input_shapes[0])
if not all(len(sh) == ndim for sh in input_shapes):
raise ValueError("Not all inputs are of the same "
"dimensionality. Got {0} inputs of "
"dimensionalities {1}.".format(
len(input_shapes),
[len(sh) for sh in input_shapes]))
result = []
# If there are more dimensions than cropping entries, pad
# the cropping
cropping = list(cropping)
if ndim > len(cropping):
cropping = list(cropping) + \
[None] * (ndim - len(cropping))
for sh, cr in zip(zip(*input_shapes), cropping):
if cr is None:
result.append(sh)
elif cr in {'lower', 'center', 'upper'}:
result.append([min(sh)] * len(sh))
else:
raise ValueError('Unknown crop mode \'{0}\''.format(cr))
return [tuple(sh) for sh in zip(*result)]
class ConcatLayer(MergeLayer):
"""
Concatenates multiple inputs along the specified axis. Inputs should have
the same shape except for the dimension specified in axis, which can have
different sizes.
Parameters
-----------
incomings : a list of :class:`Layer` instances or tuples
The layers feeding into this layer, or expected input shapes
axis : int
Axis which inputs are joined over
cropping : None or [crop]
Cropping for each input axis. Cropping is described in the docstring
for :func:`autocrop`. Cropping is always disabled for `axis`.
"""
def __init__(self, incomings, axis=1, cropping=None, **kwargs):
super(ConcatLayer, self).__init__(incomings, **kwargs)
self.axis = axis
if cropping is not None:
# If cropping is enabled, don't crop on the selected axis
cropping = list(cropping)
cropping[axis] = None
self.cropping = cropping
def get_output_shape_for(self, input_shapes):
input_shapes = autocrop_array_shapes(input_shapes, self.cropping)
# Infer the output shape by grabbing, for each axis, the first
# input size that is not `None` (if there is any)
output_shape = [next((s for s in sizes if s is not None), None)
for sizes in zip(*input_shapes)]
def match(shape1, shape2):
return (len(shape1) == len(shape2) and
all(i == self.axis or s1 is None or s2 is None or s1 == s2
for i, (s1, s2) in enumerate(zip(shape1, shape2))))
# Check for compatibility with inferred output shape
if not all(match(shape, output_shape) for shape in input_shapes):
raise ValueError("Mismatch: input shapes must be the same except "
"in the concatenation axis")
# Infer output shape on concatenation axis and return
sizes = [input_shape[self.axis] for input_shape in input_shapes]
concat_size = | |
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAutoScalingInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeAutoScalingInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateAutoScalingGroupFromInstance(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateAutoScalingGroupFromInstance", g_param[OptionsDefine.Version])
return
param = {
"AutoScalingGroupName": argv.get("--AutoScalingGroupName"),
"InstanceId": argv.get("--InstanceId"),
"MinSize": Utils.try_to_json(argv, "--MinSize"),
"MaxSize": Utils.try_to_json(argv, "--MaxSize"),
"DesiredCapacity": Utils.try_to_json(argv, "--DesiredCapacity"),
"InheritInstanceTag": Utils.try_to_json(argv, "--InheritInstanceTag"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateAutoScalingGroupFromInstanceRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateAutoScalingGroupFromInstance(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateLifecycleHook(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateLifecycleHook", g_param[OptionsDefine.Version])
return
param = {
"AutoScalingGroupId": argv.get("--AutoScalingGroupId"),
"LifecycleHookName": argv.get("--LifecycleHookName"),
"LifecycleTransition": argv.get("--LifecycleTransition"),
"DefaultResult": argv.get("--DefaultResult"),
"HeartbeatTimeout": Utils.try_to_json(argv, "--HeartbeatTimeout"),
"NotificationMetadata": argv.get("--NotificationMetadata"),
"NotificationTarget": Utils.try_to_json(argv, "--NotificationTarget"),
"LifecycleTransitionType": argv.get("--LifecycleTransitionType"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateLifecycleHookRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateLifecycleHook(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doUpgradeLifecycleHook(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("UpgradeLifecycleHook", g_param[OptionsDefine.Version])
return
param = {
"LifecycleHookId": argv.get("--LifecycleHookId"),
"LifecycleHookName": argv.get("--LifecycleHookName"),
"LifecycleTransition": argv.get("--LifecycleTransition"),
"DefaultResult": argv.get("--DefaultResult"),
"HeartbeatTimeout": Utils.try_to_json(argv, "--HeartbeatTimeout"),
"NotificationMetadata": argv.get("--NotificationMetadata"),
"NotificationTarget": Utils.try_to_json(argv, "--NotificationTarget"),
"LifecycleTransitionType": argv.get("--LifecycleTransitionType"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.UpgradeLifecycleHookRequest()
model.from_json_string(json.dumps(param))
rsp = client.UpgradeLifecycleHook(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDisableAutoScalingGroup(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DisableAutoScalingGroup", g_param[OptionsDefine.Version])
return
param = {
"AutoScalingGroupId": argv.get("--AutoScalingGroupId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DisableAutoScalingGroupRequest()
model.from_json_string(json.dumps(param))
rsp = client.DisableAutoScalingGroup(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLaunchConfigurations(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeLaunchConfigurations", g_param[OptionsDefine.Version])
return
param = {
"LaunchConfigurationIds": Utils.try_to_json(argv, "--LaunchConfigurationIds"),
"Filters": Utils.try_to_json(argv, "--Filters"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Offset": Utils.try_to_json(argv, "--Offset"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLaunchConfigurationsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeLaunchConfigurations(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePaiInstances(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribePaiInstances", g_param[OptionsDefine.Version])
return
param = {
"InstanceIds": Utils.try_to_json(argv, "--InstanceIds"),
"Filters": Utils.try_to_json(argv, "--Filters"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Offset": Utils.try_to_json(argv, "--Offset"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePaiInstancesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribePaiInstances(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateScalingPolicy(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateScalingPolicy", g_param[OptionsDefine.Version])
return
param = {
"AutoScalingGroupId": argv.get("--AutoScalingGroupId"),
"ScalingPolicyName": argv.get("--ScalingPolicyName"),
"AdjustmentType": argv.get("--AdjustmentType"),
"AdjustmentValue": Utils.try_to_json(argv, "--AdjustmentValue"),
"MetricAlarm": Utils.try_to_json(argv, "--MetricAlarm"),
"Cooldown": Utils.try_to_json(argv, "--Cooldown"),
"NotificationUserGroupIds": Utils.try_to_json(argv, "--NotificationUserGroupIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateScalingPolicyRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateScalingPolicy(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteLaunchConfiguration(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteLaunchConfiguration", g_param[OptionsDefine.Version])
return
param = {
"LaunchConfigurationId": argv.get("--LaunchConfigurationId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteLaunchConfigurationRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteLaunchConfiguration(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteLifecycleHook(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteLifecycleHook", g_param[OptionsDefine.Version])
return
param = {
"LifecycleHookId": argv.get("--LifecycleHookId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteLifecycleHookRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteLifecycleHook(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAutoScalingGroupLastActivities(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeAutoScalingGroupLastActivities", g_param[OptionsDefine.Version])
return
param = {
"AutoScalingGroupIds": Utils.try_to_json(argv, "--AutoScalingGroupIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeAutoScalingGroupLastActivitiesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeAutoScalingGroupLastActivities(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLifecycleHooks(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeLifecycleHooks", g_param[OptionsDefine.Version])
return
param = {
"LifecycleHookIds": Utils.try_to_json(argv, "--LifecycleHookIds"),
"Filters": Utils.try_to_json(argv, "--Filters"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Offset": Utils.try_to_json(argv, "--Offset"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLifecycleHooksRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeLifecycleHooks(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doEnableAutoScalingGroup(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("EnableAutoScalingGroup", g_param[OptionsDefine.Version])
return
param = {
"AutoScalingGroupId": argv.get("--AutoScalingGroupId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.EnableAutoScalingGroupRequest()
model.from_json_string(json.dumps(param))
rsp = client.EnableAutoScalingGroup(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeScheduledActions(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeScheduledActions", g_param[OptionsDefine.Version])
return
param = {
"ScheduledActionIds": Utils.try_to_json(argv, "--ScheduledActionIds"),
"Filters": Utils.try_to_json(argv, "--Filters"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.AutoscalingClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeScheduledActionsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeScheduledActions(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeAutoScalingGroups(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeAutoScalingGroups", g_param[OptionsDefine.Version])
return
param = {
"AutoScalingGroupIds": Utils.try_to_json(argv, "--AutoScalingGroupIds"),
"Filters": Utils.try_to_json(argv, "--Filters"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Offset": Utils.try_to_json(argv, "--Offset"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 8 15:44:58 2018
@author: joachim
"""
import os
import re
import yaml
import logging
import warnings
from tunacell.io.text import (TextParsingError, MissingFileError,
MissingFolderError, MismatchFileError,
CorruptedFileError)
from tunacell.base.observable import Observable, FunctionalObservable
# import a bunch of filters to be able to load them using eval
from tunacell.filters.main import (FilterAND, FilterOR, FilterNOT, FilterTRUE,
FilterSet)
from tunacell.filters.cells import (FilterCellAny,
FilterCellIDparity,
FilterCompleteCycle,
FilterCycleFrames,
FilterCycleSpanIncluded,
FilterData,
FilterDaughters,
FilterHasParent,
FilterLengthIncrement,
FilterObservableBound,
FilterSymmetricDivision,
FilterTimeInCycle)
from tunacell.filters.trees import (FilterTreeAny,
FilterTreeDepth,
FilterTreeTimeIntersect)
from tunacell.filters.lineages import (FilterLineageAny,
FilterLineageData,
FilterLineageLength,
FilterLineageTimeBound,
FilterLineageTimeIntersect,
FilterLineageTimeLength,
FilterLineageWithCellProperty)
from tunacell.filters.containers import (FilterContainerAny,
FilterContainerMetadataEquals)
logger = logging.getLogger(__name__)
# EXPORTING ANALYSIS FILES/FOLDERS
def get_analysis_path(exp, user_abspath=None, write=True):
"""Returns path to analysis folder.
Parameters
----------
exp : :class:`Experiment` instance
user_abspath: str (default None)
if given, will search within this path
Returns
-------
analysis_path : str
path to analysis folder corresponding to exp
"""
# user defined main folder
if user_abspath is not None:
analysis = os.path.join(user_abspath, 'analysis')
# canonical analysis folder
else:
# text
if exp.filetype == 'text' or exp.filetype == 'supersegger':
analysis = os.path.join(exp.abspath, 'analysis')
elif exp.filetype == 'h5':
folder_up = os.path.split(exp.abspath)[0]
analysis = os.path.join(folder_up, 'analysis')
elif exp.filetype == 'simu':
analysis = os.path.join(os.path.expanduser('~'), 'tmptunacell',
exp.label, 'analysis')
if write and not os.path.exists(analysis):
os.makedirs(analysis)
return analysis
def _get_collections(path, basename='filterset'):
"""Build dict of folders with name starting with basename
"""
if not os.path.exists(path):
raise MissingFolderError('{} is missing'.format(path))
p = re.compile(basename + '_(\d+)')
ls = os.listdir(path)
collec = {} # dic repr(filt): (index, folder_path)
# loop through directories and inspect each filterset directory
for item in ls:
path_to_item = os.path.join(path, item)
if os.path.isdir(path_to_item):
m = p.match(item)
if m:
sindex, = m.groups()
index = int(sindex)
rep = ''
with open(os.path.join(path_to_item, item + '.txt'), 'r') as f:
rep = f.readline().rstrip()
if rep:
collec[rep] = (index, path_to_item)
return collec
def _get_item_path(folder, item, kind='filterset', write=True):
"""Returns path to corresponding item
Parameters
----------
folder : str
absolute path of folder to look in
item : :class:`FilterSet` to look for in folder
kind : str {'filterset', 'condition'}
write : bool {True, False}
whether to write corresponding path on disk when it does not exist yet
Returns
-------
index, path
index : int
integer label associated to item
path : str
absolute path to item
"""
collec = _get_collections(folder, basename=kind)
try:
index, path = collec[repr(item)]
except KeyError:
used_indices = [c[0] for key, c in collec.items()]
index = _get_new_index(used_indices, start_index=1)
basename = '{}_{:02d}'.format(kind, index)
if item.label is not None:
basename += '_{}'.format(item.label)
path = os.path.join(folder, basename)
if write:
os.makedirs(path)
# write text file for filter description
text_file = os.path.join(path, basename + '.txt')
with open(text_file, 'w') as f:
f.write('{}\n\n{}'.format(repr(item), str(item)))
return index, path
def get_filters(analysis_path):
return _get_collections(analysis_path, 'filterset')
def get_conditions(obs_path):
return _get_collections(obs_path, 'condition')
def get_filter_path(analysis_path, fset, write=True):
return _get_item_path(analysis_path, fset, kind='filterset', write=write)
def get_condition_path(obs_path, condition, write=True):
# specific case for master : no further filter
if condition is None or condition == 'master':
path = os.path.join(obs_path, 'master')
if write and not os.path.exists(path):
os.makedirs(path)
return 0, path
else:
return _get_item_path(obs_path, condition, kind='condition',
write=write)
def get_observable_path(filter_path, obs, write=True):
if not os.path.exists(filter_path):
raise MissingFolderError('filter-folder')
basename = obs.name
path = os.path.join(filter_path, basename)
if write and not os.path.exists(path):
logger.debug('Creating path {}'.format(path))
os.makedirs(path)
text_file = os.path.join(path, basename + '.txt')
with open(text_file, 'w') as f:
if isinstance(obs, Observable):
f.write('{}\n\n{}\n\n{}\n\ncodestring: {}'.format(repr(obs),
obs.as_latex_string,
obs.as_string_table(),
str(obs)))
elif isinstance(obs, FunctionalObservable):
names = ', '.join([arg.name for arg in obs.observables])
msg = '{}: FunctionalObservable({})'.format(basename, names)
msg += '\n\n'
for var_obs in obs.observables:
msg += '{}\n'.format(repr(var_obs))
msg.rstrip()
f.write(msg)
# save serialized function
source_file = os.path.join(path, basename + '_source.txt')
with open(source_file, 'w') as sf:
sf.write('{}'.format(obs.source_f))
# force write
elif write and os.path.exists(path):
logger.debug('May write existing file folders')
# read mode: check that Observable representations match
elif (not write) and os.path.exists(path):
text_file = os.path.join(path, basename + '.txt')
with open(text_file, 'r') as f:
read_repr = f.readline().strip()
if read_repr != repr(obs):
if isinstance(obs, Observable):
logger.debug('Obs path {} does not match argument {}'.format(path, obs))
logger.debug('Changing name by appending a letter')
raise MismatchFileError(level='observable')
elif isinstance(obs, FunctionalObservable):
msg = ('Impossible to check whether FunctionalObservable '
'matches since the export of its combining function is '
'not set up.')
warnings.warn(msg)
else:
logger.debug('Reading matching observable path {}'.format(path))
return path
def get_biobservable_path(filter_path, obss, write=True):
"""Get folder for bivariate analysis
Parameters
----------
filter_path : str
parent folder, should be a filterset path
obss : couple of :class:`Observable` instances
write : bool {True, False}
whether to write new path or not
Returns
-------
path : str
path to the bivariate analysis folder
"""
if not os.path.exists(filter_path):
raise MissingFolderError('filter-folder')
basename = '---'.join([obs.name for obs in obss])
path = os.path.join(filter_path, basename)
if write and not os.path.exists(path):
os.makedirs(path)
# no writing of text dile description since univariate analysis did it
return path
def read_count_file(filter_path):
"""Read yaml file for count
Parameters
----------
filter_path : str
path to a FilterSet folder
Returns
-------
counts : dict
keys are cells, lineages, colonies, containers
"""
count_file = os.path.join(filter_path, '.counts.yml')
if not os.path.exists(count_file):
raise MissingFileError
with open(count_file, 'r') as f:
counts = yaml.load(f)
# check that information is correctly stored
a = 'cells' in counts
b = 'lineages' in counts
c = 'colonies' in counts
d = 'containers' in counts
if not (a and b and c and d):
raise CorruptedFileError
return counts
def write_count_file(filter_path, counts):
"""Write yaml file"""
count_file = os.path.join(filter_path, '.counts.yml')
with open(count_file, 'w') as f:
yaml.dump(counts, stream=f, default_flow_style=False)
# PRINTING STUFF FROM ANALYSIS TEXT FILES
def _print_collections(parent_folder, kind='filterset'):
"""Print list of filtersets/conditions
Parameters
----------
parent_folder : str
parent folder in which to look for filtersets/conditions
kind : str {'filterset', 'condition'}
"""
msg = 'Looking for {}s under {} ...'.format(kind, parent_folder)
collec = _get_collections(parent_folder, basename=kind)
# order items using index
as_list = sorted([(index, path_to_item, rep)
for rep, (index, path_to_item) in collec.items()],
key=lambda x: x[0])
if len(as_list) == 0:
msg += '\n\n Nothing here. Move along.'
for (index, path_to_item, rep) in as_list:
basename = os.path.basename(path_to_item)
consensus = '{}_{:02d}_(\S*)'.format(kind, index)
chain = re.compile(consensus)
m = chain.match(basename)
name = ''
if m:
name, = m.groups()
if not name:
name = '(none)'
fname = os.path.join(path_to_item, basename + '.txt')
if not os.path.exists(fname):
raise MissingFileError('Missing description file under {}'.format(path_to_item))
rep, human = _read_first_remaining(fname)
msg += '\n\n{}. name: {} path: {}'.format(index, name, path_to_item)
msg += '\nrep: {}'.format(rep)
if human:
msg += '\n{}'.format(human)
print(msg)
print()
return
def print_filtersets(exp):
"""Print out filtersets saved in analysis folder
Parameters
----------
exp : :class:`Experiment` instance
"""
analysis_path = get_analysis_path(exp, write=False)
if not os.path.exists(analysis_path):
print('There is no analysis folder. Compute, export, and come back later')
_print_collections(analysis_path, kind='filterset')
return
def print_conditions(exp, fset, obs):
"""Print out conditions used for input observable
Parameters
----------
exp : :class:`Experiment` instance
fset : :class:`FilterSet` instance
obs : :class:`Observable` instance
"""
analysis_path = get_analysis_path(exp, write=False)
_, filter_path = get_filter_path(analysis_path, fset, write=False)
obs_path = get_observable_path(filter_path, obs, write=False)
_print_collections(obs_path, kind='condition')
return
def print_observables(exp, fset):
"""Print out observables that have been analyzed
Parameters
----------
exp : :class:`Experiment` instance
fset : :class:`FilterSet` instance
"""
analysis_path = get_analysis_path(exp, write=False)
_, filter_path = get_filter_path(analysis_path, fset, write=False)
msg = 'Looking for observables under {} ...'.format(filter_path)
items = os.listdir(filter_path)
candidates = [item for item in items
if os.path.isdir(os.path.join(filter_path, item))]
valids = []
for name in candidates:
abs_path = os.path.join(filter_path, name)
fname = os.path.join(abs_path, name + '.txt')
if os.path.exists(fname):
rep, human = _read_first_remaining(fname)
if 'Observable' in rep or 'FunctionalObservable' in rep:
valids.append(name)
msg += '\n\n{} path: {}'.format(name, abs_path)
msg += '\nrep: {}'.format(rep)
if human:
msg += '\n\n{}'.format(human)
if len(valids) == 0:
msg += 'Nothing there. Move along'
print(msg)
print()
return
# LOADING STUFF FROM TEXT FILES
class ImpossibleToLoad(ValueError):
pass
def load_item_from_path(path):
"""Returns an evaluated object from path"""
basename = os.path.basename(path)
fname = os.path.join(path, basename + '.txt')
if not os.path.exists(fname):
raise MissingFileError('Missing description file under {}'.format(path))
rep, human_string = _read_first_remaining(fname)
# so far, only FunctionalObservable are not loadable (TO FIX)
if 'FunctionalObservable' in rep:
raise ImpossibleToLoad('FunctionalObservable are not loadable')
return eval(rep)
# other functions
def _read_first_remaining(filename):
"""Get first line of file, and remaining content as couple.
Parameters
----------
filename : str
absolute path to file
Returns
-------
(first line, remaining content): str, str
"""
with open(filename, 'r') as f:
first = f.readline()
msg = ''
for line in f.readlines():
msg += line
return | |
= kwargs.get("flags", None)
self.stopwords = kwargs.get("stopwords", None)
def _to_generated(self):
if not self.flags:
flags = None
else:
flags = DELIMITER.join(self.flags)
return _PatternAnalyzer(
name=self.name,
lower_case_terms=self.lower_case_terms,
pattern=self.pattern,
flags=flags,
stopwords=self.stopwords,
)
@classmethod
def _from_generated(cls, pattern_analyzer):
if not pattern_analyzer:
return None
if not pattern_analyzer.flags:
flags = None
else:
flags = pattern_analyzer.flags.split(DELIMITER)
return cls(
name=pattern_analyzer.name,
lower_case_terms=pattern_analyzer.lower_case_terms,
pattern=pattern_analyzer.pattern,
flags=flags,
stopwords=pattern_analyzer.stopwords,
)
class PatternTokenizer(LexicalTokenizer):
"""Tokenizer that uses regex pattern matching to construct distinct tokens.
This tokenizer is implemented using Apache Lucene.
All required parameters must be populated in order to send to Azure.
:keyword name: Required. The name of the tokenizer. It must only contain letters, digits, spaces,
dashes or underscores, can only start and end with alphanumeric characters, and is limited to
128 characters.
:paramtype name: str
:keyword pattern: A regular expression to match token separators. Default is an
expression that matches one or more white space characters.
:paramtype pattern: str
:keyword flags: List of regular expression flags. Possible values of each flag include: 'CANON_EQ',
'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'.
:paramtype flags: list[str] or list[~search_service_client.models.RegexFlags]
:keyword group: The zero-based ordinal of the matching group in the regular expression to
extract into tokens. Use -1 if you want to use the entire pattern to split the input into
tokens, irrespective of matching groups. Default is -1.
:paramtype group: int
"""
_validation = {"odata_type": {"required": True}, "name": {"required": True}}
_attribute_map = {
"odata_type": {"key": "@odata\\.type", "type": "str"},
"name": {"key": "name", "type": "str"},
"pattern": {"key": "pattern", "type": "str"},
"flags": {"key": "flags", "type": "[str]"},
"group": {"key": "group", "type": "int"},
}
def __init__(self, **kwargs):
super(PatternTokenizer, self).__init__(**kwargs)
self.odata_type = "#Microsoft.Azure.Search.PatternTokenizer"
self.pattern = kwargs.get("pattern", r"\W+")
self.flags = kwargs.get("flags", None)
self.group = kwargs.get("group", -1)
def _to_generated(self):
if not self.flags:
flags = None
else:
flags = DELIMITER.join(self.flags)
return _PatternTokenizer(
name=self.name,
pattern=self.pattern,
flags=flags,
group=self.group,
)
@classmethod
def _from_generated(cls, pattern_tokenizer):
if not pattern_tokenizer:
return None
if not pattern_tokenizer.flags:
flags = None
else:
flags = pattern_tokenizer.flags.split(DELIMITER)
return cls(
name=pattern_tokenizer.name,
pattern=pattern_tokenizer.pattern,
flags=flags,
group=pattern_tokenizer.group,
)
class SearchResourceEncryptionKey(msrest.serialization.Model):
"""A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be
used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps.
All required parameters must be populated in order to send to Azure.
:keyword key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data
at rest.
:paramtype key_name: str
:keyword key_version: Required. The version of your Azure Key Vault key to be used to encrypt
your data at rest.
:paramtype key_version: str
:keyword vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that
contains the key to be used to encrypt your data at rest. An example URI might be https://my-
keyvault-name.vault.azure.net.
:paramtype vault_uri: str
:keyword application_id: Required. An AAD Application ID that was granted the required access
permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The
Application ID should not be confused with the Object ID for your AAD Application.
:paramtype application_id: str
:keyword application_secret: The authentication key of the specified AAD application.
:paramtype application_secret: str
"""
_validation = {
"key_name": {"required": True},
"key_version": {"required": True},
"vault_uri": {"required": True},
}
_attribute_map = {
"key_name": {"key": "keyVaultKeyName", "type": "str"},
"key_version": {"key": "keyVaultKeyVersion", "type": "str"},
"vault_uri": {"key": "keyVaultUri", "type": "str"},
"application_id": {"key": "applicationId", "type": "str"},
"application_secret": {"key": "applicationSecret", "type": "str"},
}
def __init__(self, **kwargs):
super(SearchResourceEncryptionKey, self).__init__(**kwargs)
self.key_name = kwargs["key_name"]
self.key_version = kwargs["key_version"]
self.vault_uri = kwargs["vault_uri"]
self.application_id = kwargs.get("application_id", None)
self.application_secret = kwargs.get("application_secret", None)
def _to_generated(self):
if self.application_id and self.application_secret:
access_credentials = AzureActiveDirectoryApplicationCredentials(
application_id=self.application_id,
application_secret=self.application_secret,
)
else:
access_credentials = None
return _SearchResourceEncryptionKey(
key_name=self.key_name,
key_version=self.key_version,
vault_uri=self.vault_uri,
access_credentials=access_credentials,
)
@classmethod
def _from_generated(cls, search_resource_encryption_key):
if not search_resource_encryption_key:
return None
if search_resource_encryption_key.access_credentials:
application_id = (
search_resource_encryption_key.access_credentials.application_id
)
application_secret = (
search_resource_encryption_key.access_credentials.application_secret
)
else:
application_id = None
application_secret = None
return cls(
key_name=search_resource_encryption_key.key_name,
key_version=search_resource_encryption_key.key_version,
vault_uri=search_resource_encryption_key.vault_uri,
application_id=application_id,
application_secret=application_secret,
)
class SynonymMap(msrest.serialization.Model):
"""Represents a synonym map definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:keyword name: Required. The name of the synonym map.
:paramtype name: str
:ivar format: Required. The format of the synonym map. Only the 'solr' format is currently
supported. Default value: "solr".
:vartype format: str
:keyword synonyms: Required. A series of synonym rules in the specified synonym map format. The
rules must be separated by newlines.
:paramtype synonyms: list[str]
:keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your data when you
want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive
Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive
Search will ignore attempts to set this property to null. You can change this property as
needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with
customer-managed keys is not available for free search services, and is only available for paid
services created on or after January 1, 2019.
:paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
:keyword e_tag: The ETag of the synonym map.
:paramtype e_tag: str
"""
_validation = {
"name": {"required": True},
"format": {"required": True, "constant": True},
"synonyms": {"required": True},
}
_attribute_map = {
"name": {"key": "name", "type": "str"},
"format": {"key": "format", "type": "str"},
"synonyms": {"key": "synonyms", "type": "[str]"},
"encryption_key": {
"key": "encryptionKey",
"type": "SearchResourceEncryptionKey",
},
"e_tag": {"key": "@odata\\.etag", "type": "str"},
}
format = "solr"
def __init__(self, **kwargs):
super(SynonymMap, self).__init__(**kwargs)
self.name = kwargs["name"]
self.synonyms = kwargs["synonyms"]
self.encryption_key = kwargs.get("encryption_key", None)
self.e_tag = kwargs.get("e_tag", None)
def _to_generated(self):
return _SynonymMap(
name=self.name,
synonyms="\n".join(self.synonyms),
encryption_key=self.encryption_key._to_generated() # pylint:disable=protected-access
if self.encryption_key
else None,
e_tag=self.e_tag,
)
@classmethod
def _from_generated(cls, synonym_map):
if not synonym_map:
return None
return cls(
name=synonym_map.name,
synonyms=synonym_map.synonyms.split("\n"),
# pylint:disable=protected-access
encryption_key=SearchResourceEncryptionKey._from_generated(
synonym_map.encryption_key
),
e_tag=synonym_map.e_tag,
)
class SearchIndexerDataSourceConnection(msrest.serialization.Model):
"""Represents a datasource connection definition, which can be used to configure an indexer.
All required parameters must be populated in order to send to Azure.
:keyword name: Required. The name of the datasource connection.
:paramtype name: str
:keyword description: The description of the datasource connection.
:paramtype description: str
:keyword type: Required. The type of the datasource connection. Possible values include: "azuresql",
"cosmosdb", "azureblob", "azuretable", "mysql", "adlsgen2".
:paramtype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType
:keyword connection_string: The connection string for the datasource connection.
:paramtype connection_string: str
:keyword container: Required. The data container for the datasource connection.
:paramtype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer
:keyword data_change_detection_policy: The data change detection policy for the datasource connection.
:paramtype data_change_detection_policy: ~azure.search.documents.models.DataChangeDetectionPolicy
:keyword data_deletion_detection_policy: The data deletion detection policy for the datasource connection.
:paramtype data_deletion_detection_policy:
~azure.search.documents.models.DataDeletionDetectionPolicy
:keyword e_tag: The ETag of the data source.
:paramtype e_tag: str
:keyword identity: An explicit managed identity to use for this datasource. If not specified and
the connection string is a managed identity, the system-assigned managed identity is used. If
not specified, the value remains unchanged. If "none" is specified, the value of this property
is cleared.
:paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity
:keyword encryption_key: A description of an encryption key that you create in Azure Key Vault.
This key is used to provide an additional level of encryption-at-rest for your datasource
definition when you want full assurance that no one, not even Microsoft, can decrypt your data
source definition in Azure Cognitive Search. Once you have encrypted your data source
definition, it will always remain encrypted. Azure Cognitive Search will ignore attempts to set
this property to null. You can change this property as needed if you want to rotate your
encryption key; Your datasource definition will be unaffected. Encryption with customer-managed
keys is not available for free search services, and is only available for paid services created
on or after January 1, 2019.
:paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey
"""
_validation = {
"name": {"required": True},
"type": {"required": True},
"connection_string": {"required": True},
"container": {"required": | |
= cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
ignorelist = ignorelist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
aux_tokens = aux_tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~ignorelist) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
new_ignorelist, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update ignorelist to ignore any finalized hypos
ignorelist = new_ignorelist.ge(cand_size)[:, :beam_size]
assert (~ignorelist).any(dim=1).all()
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
aux_tokens[:, : step + 1] = torch.index_select(
aux_tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
aux_tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
# for sent in range(len(finalized)):
# # make into beam container
# BCList = [
# BeamContainer(elem["score"].item(), elem) for elem in finalized[sent]
# ]
# BCList.sort()
# BCList.reverse()
# finalized[sent] = torch.jit.annotate(
# List[Dict[str, Tensor]], [x.elem for x in BCList]
# )
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int, pad, eos
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def finalize_aux_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
encoder_outs,
decoder_out,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
eos,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
Returns number of sentences being finalized.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
decoder_out_clone = decoder_out.index_select(1, bbsz_idx)[: step + 1].transpose(0, 1)
encoder_outs_clone = self.model.reorder_encoder_out(encoder_outs, bbsz_idx)
tokens_clone[:, step] = eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
# set() is not supported in script export
sents_seen: Dict[str, Optional[Tensor]] = {}
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
# Cannot create dict for key type '(int, int)' in torchscript.
# The workaround is to cast int to string
seen = str(sent.item()) + "_" + str(unfin_idx.item())
if seen not in sents_seen:
sents_seen[seen] = None
if self.match_source_len and step > src_lengths[unfin_idx]:
score = torch.tensor(-math.inf).to(score)
if len(finalized[sent]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent].append(
{
"tokens": tokens_clone[i],
"auxiliary_out": decoder_out_clone[i],
"encoder_outs": self.model.reorder_encoder_out(
encoder_outs_clone, torch.tensor([i], dtype=torch.long).to(tokens_clone.device)),
"score": score,
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for seen in sents_seen.keys():
# check termination conditions for this sentence
sent: int = int(float(seen.split("_")[0]))
unfin_idx: int = int(float(seen.split("_")[1]))
if not finished[sent] and self.is_finished(
step, unfin_idx, max_len, len(finalized[sent]), beam_size
):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
src_tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
Returns number of sentences being finalized.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
src_tokens_clone = src_tokens.index_select(0, bbsz_idx)
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
# set() is not supported in script export
sents_seen: Dict[str, Optional[Tensor]] = {}
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
# Cannot create dict for key type '(int, int)' in torchscript.
# The workaround is to cast int to string
seen = str(sent.item()) + "_" + str(unfin_idx.item())
if seen not in sents_seen:
sents_seen[seen] = None
if self.match_source_len and step > src_lengths[unfin_idx]:
score = torch.tensor(-math.inf).to(score)
if len(finalized[sent]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
src_mask = src_tokens_clone[i] != self.src_pad
finalized[sent].append(
{
"tokens": tokens_clone[i],
"score": score,
"aux_tokens": src_tokens_clone[i].masked_select(src_mask),
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for seen in sents_seen.keys():
# check termination conditions for this sentence
sent: int = int(float(seen.split("_")[0]))
unfin_idx: int = int(float(seen.split("_")[1]))
if not finished[sent] and self.is_finished(
step, unfin_idx, max_len, len(finalized[sent]), beam_size
):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
class EnsembleTwoPhaseModel(EnsembleModel):
"""A wrapper around an ensemble of models."""
auxiliary_incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]]
def __init__(self, models):
super().__init__(models)
self.auxiliary_incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for _ in range(self.models_size)
],
)
def reset_incremental_state(self):
super().reset_incremental_state()
self.auxiliary_incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for _ in range(self.models_size)
],
)
return
@torch.jit.export
def forward_decoder(
self, tokens,
encoder_outs: List[EncoderOut],
aux_tokens: Tensor,
aux_decoder_out: Tensor,
temperature: float = 1.0
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[EncoderOut] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.forward_decoder(
tokens,
auxiliary_out=aux_decoder_out,
auxiliary_tokens=aux_tokens,
encoder_out=encoder_out,
incremental_state=self.incremental_states[i],
)
else:
decoder_out = model.forward_decoder(
tokens,
auxiliary_out=aux_decoder_out,
auxiliary_tokens=aux_tokens,
encoder_out=encoder_out)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn | |
initialize=0)
m.x302 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x303 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x304 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x305 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x306 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x307 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x308 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x309 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x310 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x311 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x312 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x313 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x314 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x315 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x316 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x317 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x318 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x319 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x320 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x321 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x322 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x323 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x324 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x325 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x326 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x327 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x328 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x329 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x330 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x331 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x332 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x333 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x334 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x335 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x336 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x337 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x338 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x339 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x340 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x341 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x342 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x343 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x344 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x345 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x346 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x347 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x348 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x349 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x350 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x351 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x352 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x353 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x354 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x355 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x356 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x357 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x358 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x359 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x360 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x361 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x362 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x363 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x364 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x365 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x366 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x367 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x368 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x369 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x370 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x371 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x372 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x373 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x374 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x375 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x376 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x377 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x378 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x379 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x380 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x381 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x382 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x383 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x384 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x385 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x386 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x387 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x388 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x389 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x390 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x391 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x392 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x393 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x394 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x395 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x396 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x397 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x398 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x399 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x400 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x401 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x402 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x403 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x404 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x405 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x406 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x407 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x408 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x409 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x410 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x411 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x412 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x413 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x414 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x415 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x416 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x417 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x418 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x419 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x420 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x421 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x422 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x423 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x424 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x425 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x426 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x427 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x428 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x429 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x430 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x431 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x432 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x433 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x434 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x435 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x436 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x437 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x438 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x439 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x440 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x441 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x442 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x443 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x444 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x445 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x446 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x447 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x448 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x449 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x450 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x451 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x452 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x453 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x454 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x455 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x456 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x457 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x458 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x459 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x460 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x461 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x462 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x463 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x464 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x465 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x466 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x467 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x468 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x469 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x470 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x471 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x472 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x473 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x474 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x475 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x476 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x477 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x478 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x479 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x480 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x481 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x482 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x483 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x484 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x485 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x486 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x487 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x488 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x489 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x490 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x491 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x492 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x493 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x494 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x495 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x496 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x497 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x498 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x499 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x500 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x501 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x502 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x503 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x504 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x505 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x506 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x507 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x508 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x509 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x510 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x511 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x512 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x513 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x514 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x515 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x516 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x517 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x518 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x519 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x520 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x521 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x522 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x523 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x524 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x525 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x526 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x527 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x528 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x529 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x530 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x531 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x532 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x533 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x534 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x535 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x536 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x537 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x538 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x539 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x540 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x541 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x542 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x543 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x544 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x545 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x546 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x547 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x548 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x549 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x550 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x551 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x552 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x553 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x554 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x555 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x556 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x557 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x558 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x559 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x560 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x561 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x562 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x563 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x564 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x565 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x566 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x567 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x568 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x569 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x570 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x571 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x572 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x573 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x574 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x575 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x576 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x577 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x578 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x579 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x580 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x581 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x582 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x583 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x584 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x585 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x586 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x587 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x588 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x589 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x590 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x591 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x592 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x593 = Var(within=Reals, bounds=(0,None), initialize=0)
m.x594 = Var(within=Reals, bounds=(0,None), initialize=0)
m.b595 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b596 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b597 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b598 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b599 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b600 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b601 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b602 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b603 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b604 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b605 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b606 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b607 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b608 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b609 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b610 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b611 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b612 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b613 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b614 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b615 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b616 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b617 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b618 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b619 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b620 = Var(within=Binary, bounds=(0,1), initialize=0)
m.b621 = Var(within=Binary, bounds=(0,1), | |
{POS: DET},
"<nil>|<dem>|<-sam>|DET|M|S|@>N": {POS: DET},
"<nil>|<sam->|PRP|@<ADVL": {POS: ADP},
"<nil>|ADV|@<ADVL": {POS: ADV},
"<nil>|ADV|@>N": {POS: ADV},
"<nil>|PERS|F|3S|ACC|@ACC>": {POS: PRON},
"<nil>|PRP|@ADVL>": {POS: ADP},
"<nil>|PRP|@N<": {POS: ADP},
"<nil>|PRP|@P<": {POS: ADP},
"<nil>|X|@X": {POS: X},
"<np-def>|ADJ|F|S|@P<": {POS: ADJ},
"<np-def>|ADJ|M|S|@N<": {POS: ADJ},
"<np-def>|ADJ|M|S|@N<PRED": {POS: ADJ},
"<np-def>|DET|F|P|@P<": {POS: PRON},
"<np-def>|DET|M|P|@SUBJ>": {POS: PRON},
"<np-def>|DET|M|S|@P<": {POS: PRON},
"<np-def>|DET|M|S|@SUBJ>": {POS: PRON},
"<np-def>|N|@P<": {POS: NOUN},
"<np-def>|N|F|P|@<ACC": {POS: NOUN},
"<np-def>|N|F|P|@<ADVL": {POS: NOUN},
"<np-def>|N|F|P|@<SA": {POS: NOUN},
"<np-def>|N|F|P|@<SC": {POS: NOUN},
"<np-def>|N|F|P|@<SUBJ": {POS: NOUN},
"<np-def>|N|F|P|@>A": {POS: NOUN},
"<np-def>|N|F|P|@ADVL>": {POS: NOUN},
"<np-def>|N|F|P|@APP": {POS: NOUN},
"<np-def>|N|F|P|@KOMP<": {POS: NOUN},
"<np-def>|N|F|P|@N<": {POS: NOUN},
"<np-def>|N|F|P|@N<PRED": {POS: NOUN},
"<np-def>|N|F|P|@NPHR": {POS: NOUN},
"<np-def>|N|F|P|@P<": {POS: NOUN},
"<np-def>|N|F|P|@SUBJ>": {POS: NOUN},
"<np-def>|N|F|P|@TOP": {POS: NOUN},
"<np-def>|N|F|S|@<ACC": {POS: NOUN},
"<np-def>|N|F|S|@<ADVL": {POS: NOUN},
"<np-def>|N|F|S|@<OC": {POS: NOUN},
"<np-def>|N|F|S|@<PIV": {POS: NOUN},
"<np-def>|N|F|S|@<SA": {POS: NOUN},
"<np-def>|N|F|S|@<SC": {POS: NOUN},
"<np-def>|N|F|S|@<SUBJ": {POS: NOUN},
"<np-def>|N|F|S|@<sUBJ": {POS: NOUN},
"<np-def>|N|F|S|@>A": {POS: NOUN},
"<np-def>|N|F|S|@ACC>": {POS: NOUN},
"<np-def>|N|F|S|@ADVL>": {POS: NOUN},
"<np-def>|N|F|S|@APP": {POS: NOUN},
"<np-def>|N|F|S|@AUX<": {POS: NOUN},
"<np-def>|N|F|S|@FS-N<": {POS: NOUN},
"<np-def>|N|F|S|@FS-N<PRED": {POS: NOUN},
"<np-def>|N|F|S|@ICL-<ACC": {POS: NOUN},
"<np-def>|N|F|S|@N<": {POS: NOUN},
"<np-def>|N|F|S|@N<PRED": {POS: NOUN},
"<np-def>|N|F|S|@NPHR": {POS: NOUN},
"<np-def>|N|F|S|@P<": {POS: NOUN},
"<np-def>|N|F|S|@SC>": {POS: NOUN},
"<np-def>|N|F|S|@SUBJ>": {POS: NOUN},
"<np-def>|N|M/F|P|@P<": {POS: NOUN},
"<np-def>|N|M/F|P|@SUBJ>": {POS: NOUN},
"<np-def>|N|M/F|S|@P<": {POS: NOUN},
"<np-def>|N|M/F|S|@SUBJ>": {POS: NOUN},
"<np-def>|N|M|P|@<ACC": {POS: SYM},
"<np-def>|N|M|P|@<ADVL": {POS: SYM},
"<np-def>|N|M|P|@<SA": {POS: NOUN},
"<np-def>|N|M|P|@<SC": {POS: SYM},
"<np-def>|N|M|P|@<SUBJ": {POS: SYM},
"<np-def>|N|M|P|@>A": {POS: SYM},
"<np-def>|N|M|P|@A<": {POS: NOUN},
"<np-def>|N|M|P|@ACC>": {POS: NOUN},
"<np-def>|N|M|P|@ADVL>": {POS: NOUN},
"<np-def>|N|M|P|@APP": {POS: NOUN},
"<np-def>|N|M|P|@ICL-P<": {POS: NOUN},
"<np-def>|N|M|P|@N<": {POS: SYM},
"<np-def>|N|M|P|@N<PRED": {POS: SYM},
"<np-def>|N|M|P|@NPHR": {POS: SYM},
"<np-def>|N|M|P|@P<": {POS: SYM},
"<np-def>|N|M|P|@SC>": {POS: NOUN},
"<np-def>|N|M|P|@SUBJ>": {POS: SYM},
"<np-def>|N|M|P|@TOP": {POS: NOUN},
"<np-def>|N|M|S/P|@P<": {POS: NOUN},
"<np-def>|N|M|S|@<ACC": {POS: NOUN},
"<np-def>|N|M|S|@<ADVL": {POS: NOUN},
"<np-def>|N|M|S|@<OC": {POS: NOUN},
"<np-def>|N|M|S|@<SC": {POS: NOUN},
"<np-def>|N|M|S|@<SUBJ": {POS: NOUN},
"<np-def>|N|M|S|@>A": {POS: NOUN},
"<np-def>|N|M|S|@A<": {POS: NOUN},
"<np-def>|N|M|S|@ACC>": {POS: NOUN},
"<np-def>|N|M|S|@ADVL>": {POS: NOUN},
"<np-def>|N|M|S|@APP": {POS: NOUN},
"<np-def>|N|M|S|@AS<": {POS: NOUN},
"<np-def>|N|M|S|@AUX<": {POS: NOUN},
"<np-def>|N|M|S|@N<": {POS: NOUN},
"<np-def>|N|M|S|@N<PRED": {POS: NOUN},
"<np-def>|N|M|S|@NPHR": {POS: NOUN},
"<np-def>|N|M|S|@P<": {POS: NOUN},
"<np-def>|N|M|S|@SC>": {POS: NOUN},
"<np-def>|N|M|S|@SUBJ>": {POS: NOUN},
"<np-def>|PROP|<UPOS:NOUN>|<UF:Gender=Masc>|N|M|S|@SUBJ>": {POS: PROPN},
"<np-def>|PROP|M|S|@P<": {POS: PROPN},
"<np-idf>|<cjt>|N|F|P|@APP": {POS: NOUN},
"<np-idf>|<cjt>|N|F|P|@N<PRED": {POS: NOUN},
"<np-idf>|<cjt>|N|F|S|@<ACC": {POS: NOUN},
"<np-idf>|<cjt>|N|M|P|@N<PRED": {POS: NOUN},
"<np-idf>|<cjt>|N|M|P|@P<": {POS: NOUN},
"<np-idf>|ADJ|F|S|@N<": {POS: ADJ},
"<np-idf>|ADJ|F|S|@N<PRED": {POS: ADJ},
"<np-idf>|ADJ|M|P|@N<": {POS: ADJ},
"<np-idf>|ADJ|M|P|@N<PRED": {POS: ADJ},
"<np-idf>|ADJ|M|S|@N<": {POS: ADJ},
"<np-idf>|ADJ|M|S|@N<PRED": {POS: ADJ},
"<np-idf>|N|@N<PRED": {POS: NOUN},
"<np-idf>|N|@NPHR": {POS: NOUN},
"<np-idf>|N|@P<": {POS: NOUN},
"<np-idf>|N|F|P|@<ACC": {POS: NOUN},
"<np-idf>|N|F|P|@<ADVL": {POS: NOUN},
"<np-idf>|N|F|P|@<OC": {POS: NOUN},
"<np-idf>|N|F|P|@<SC": {POS: NOUN},
"<np-idf>|N|F|P|@<SUBJ": {POS: NOUN},
"<np-idf>|N|F|P|@>N": {POS: NOUN},
"<np-idf>|N|F|P|@ACC>": {POS: NOUN},
"<np-idf>|N|F|P|@ADVL>": {POS: NOUN},
"<np-idf>|N|F|P|@APP": {POS: NOUN},
"<np-idf>|N|F|P|@N<": {POS: NOUN},
"<np-idf>|N|F|P|@N<PRED": {POS: NOUN},
"<np-idf>|N|F|P|@NPHR": {POS: NOUN},
"<np-idf>|N|F|P|@P<": {POS: NOUN},
"<np-idf>|N|F|P|@S<": {POS: NOUN},
"<np-idf>|N|F|P|@SUBJ>": {POS: NOUN},
"<np-idf>|N|F|P|@VOK": {POS: NOUN},
"<np-idf>|N|F|S|@<ACC": {POS: NOUN},
"<np-idf>|N|F|S|@<ADVL": {POS: NOUN},
"<np-idf>|N|F|S|@<OC": {POS: NOUN},
"<np-idf>|N|F|S|@<PRED": {POS: NOUN},
"<np-idf>|N|F|S|@<SA": {POS: NOUN},
"<np-idf>|N|F|S|@<SC": {POS: NOUN},
"<np-idf>|N|F|S|@<SUBJ": {POS: NOUN},
"<np-idf>|N|F|S|@<sC": {POS: NOUN},
"<np-idf>|N|F|S|@<sUBJ": {POS: NOUN},
"<np-idf>|N|F|S|@>N": {POS: NOUN},
"<np-idf>|N|F|S|@A<": {POS: NOUN},
"<np-idf>|N|F|S|@ACC>": {POS: NOUN},
"<np-idf>|N|F|S|@ADVL>": {POS: NOUN},
"<np-idf>|N|F|S|@APP": {POS: NOUN},
"<np-idf>|N|F|S|@N<": {POS: NOUN},
"<np-idf>|N|F|S|@N<PRED": {POS: NOUN},
"<np-idf>|N|F|S|@NPHR": {POS: NOUN},
"<np-idf>|N|F|S|@P<": {POS: NOUN},
"<np-idf>|N|F|S|@PRED>": {POS: NOUN},
"<np-idf>|N|F|S|@S<": {POS: NOUN},
"<np-idf>|N|F|S|@SC>": {POS: NOUN},
"<np-idf>|N|F|S|@SUBJ>": {POS: NOUN},
"<np-idf>|N|F|S|@UTT": {POS: NOUN},
"<np-idf>|N|F|S|@VOK": {POS: NOUN},
"<np-idf>|N|M/F|P|@<ACC": {POS: NOUN},
"<np-idf>|N|M/F|P|@<SUBJ": {POS: NOUN},
"<np-idf>|N|M/F|P|@NPHR": {POS: NOUN},
"<np-idf>|N|M/F|P|@P<": {POS: NOUN},
"<np-idf>|N|M/F|P|@SUBJ>": {POS: NOUN},
"<np-idf>|N|M/F|S|@NPHR": {POS: NOUN},
"<np-idf>|N|M/F|S|@P<": {POS: NOUN},
"<np-idf>|N|M|P|@<ACC": {POS: NOUN},
"<np-idf>|N|M|P|@<ADVL": {POS: NOUN},
"<np-idf>|N|M|P|@<OC": {POS: NOUN},
"<np-idf>|N|M|P|@<PIV": {POS: NOUN},
"<np-idf>|N|M|P|@<SA": {POS: NOUN},
"<np-idf>|N|M|P|@<SC": {POS: NOUN},
"<np-idf>|N|M|P|@<SUBJ": {POS: NOUN},
"<np-idf>|N|M|P|@>A": {POS: NOUN},
"<np-idf>|N|M|P|@>N": {POS: NOUN},
"<np-idf>|N|M|P|@A<": {POS: NOUN},
"<np-idf>|N|M|P|@ACC>": {POS: NOUN},
"<np-idf>|N|M|P|@ADVL>": {POS: NOUN},
"<np-idf>|N|M|P|@APP": {POS: NOUN},
"<np-idf>|N|M|P|@AUX<": {POS: NOUN},
"<np-idf>|N|M|P|@ICL-<ACC": {POS: NOUN},
"<np-idf>|N|M|P|@ICL-P<": {POS: NOUN},
"<np-idf>|N|M|P|@N<": {POS: NOUN},
"<np-idf>|N|M|P|@N<PRED": {POS: NOUN},
"<np-idf>|N|M|P|@NPHR": {POS: NOUN},
"<np-idf>|N|M|P|@P<": {POS: NOUN},
"<np-idf>|N|M|P|@PRED>": {POS: NOUN},
"<np-idf>|N|M|P|@SUBJ>": {POS: PROPN},
"<np-idf>|N|M|P|@TOP": {POS: NOUN},
"<np-idf>|N|M|R|@<ACC": {POS: NOUN},
"<np-idf>|N|M|S|@<ACC": {POS: NOUN},
"<np-idf>|N|M|S|@<ADVL": {POS: NOUN},
"<np-idf>|N|M|S|@<OC": {POS: NOUN},
"<np-idf>|N|M|S|@<PRED": {POS: NOUN},
"<np-idf>|N|M|S|@<SA": {POS: NOUN},
"<np-idf>|N|M|S|@<SC": {POS: NOUN},
"<np-idf>|N|M|S|@<SUBJ": {POS: NOUN},
"<np-idf>|N|M|S|@<sC": {POS: NOUN},
"<np-idf>|N|M|S|@<sUBJ": {POS: NOUN},
"<np-idf>|N|M|S|@>A": {POS: NOUN},
"<np-idf>|N|M|S|@>N": {POS: NOUN},
"<np-idf>|N|M|S|@A<": {POS: NOUN},
"<np-idf>|N|M|S|@ACC>": {POS: NOUN},
"<np-idf>|N|M|S|@ADVL": {POS: NOUN},
"<np-idf>|N|M|S|@ADVL>": {POS: NOUN},
"<np-idf>|N|M|S|@APP": {POS: NOUN},
"<np-idf>|N|M|S|@CO": {POS: NOUN},
"<np-idf>|N|M|S|@ICL-<SC": {POS: NOUN},
"<np-idf>|N|M|S|@ICL-P<": {POS: NOUN},
"<np-idf>|N|M|S|@KOMP<": {POS: NOUN},
"<np-idf>|N|M|S|@N<": {POS: NOUN},
"<np-idf>|N|M|S|@N<PRED": {POS: NOUN},
"<np-idf>|N|M|S|@NPHR": {POS: NOUN},
"<np-idf>|N|M|S|@P<": {POS: NOUN},
"<np-idf>|N|M|S|@PRED>": {POS: NOUN},
"<np-idf>|N|M|S|@S<": {POS: NOUN},
"<np-idf>|N|M|S|@SC>": {POS: NOUN},
"<np-idf>|N|M|S|@SUBJ>": {POS: NOUN},
"<np-idf>|N|M|S|@VOC": {POS: NOUN},
"<np-idf>|N|M|S|@VOK": {POS: NOUN},
"<np-idf>|N|M|s|@P<": {POS: NOUN},
"<np-idf>|PROP|<UPOS:NOUN>|<UF:Gender=Fem>|N|F|S|@APP": {POS: PROPN},
"<np-idf>|PROP|M|S|@P<": {POS: PROPN},
"<obj>|PERS|F|3P|ACC|@<ACC": {POS: PRON},
"<obj>|PERS|F|3P|ACC|@ACC>": {POS: PRON},
"<obj>|PERS|F|3S|ACC|@<ACC": {POS: PRON},
"<obj>|PERS|F|3S|ACC|@ACC>": {POS: PRON},
"<obj>|PERS|M/F|3S|ACC|@<ACC": {POS: PRON},
"<obj>|PERS|M|3P|ACC|@<ACC": {POS: PRON},
"<obj>|PERS|M|3P|ACC|@ACC>": {POS: PRON},
"<obj>|PERS|M|3S|ACC|@<ACC": {POS: PRON},
"<obj>|PERS|M|3S|ACC|@ACC>": {POS: PRON},
"<parkc-1>|ADV|@CO": {POS: ADV},
"<parkc-1>|KC|@CO": {POS: CCONJ},
"<parkc-2>|<co-subj>|KC|@CO": {POS: CCONJ},
"<parkc-2>|KC|@CO": {POS: CCONJ},
"<pass>|<cjt>|<mv>|V|PCP|F|P|@ICL-AUX<": {POS: VERB},
"<pass>|<cjt>|<mv>|V|PCP|F|S|@ICL-AUX<": {POS: VERB},
"<pass>|<cjt>|<mv>|V|PCP|M|S|@ICL-AUX<": {POS: VERB},
"<pass>|<mv>|V|PCP|F|P|@ICL-AUX<": {POS: VERB},
"<pass>|<mv>|V|PCP|F|S|@ICL-AUX<": {POS: VERB},
"<pass>|<mv>|V|PCP|MVF|S|@ICL-AUX<": {POS: VERB},
"<pass>|<mv>|V|PCP|M|P|@ICL-AUX<": {POS: VERB},
"<pass>|<mv>|V|PCP|M|S|@ICL-AUX<": {POS: VERB},
"<poss>|<np-def>|DET|M|P|@P<": {POS: PRON},
"<poss>|<np-def>|DET|M|P|@SUBJ>": {POS: PRON},
"<poss>|<np-def>|DET|M|S|@N<": {POS: DET},
"<poss>|<np-idf>|DET|F|S|@N<": {POS: DET},
"<poss>|<si>|<np-def>|DET|M|P|@>A": {POS: DET},
"<poss>|<si>|DET|F|P|@>N": {POS: DET},
"<poss>|<si>|DET|F|S|@>N": {POS: DET},
"<poss>|<si>|DET|M|P|@>N": {POS: DET},
"<poss>|<si>|DET|M|S|@>N": {POS: DET},
"<poss>|DET|F|P|@>N": {POS: DET},
"<poss>|DET|F|P|@N<": {POS: DET},
"<poss>|DET|F|S|@<SC": {POS: PRON},
"<poss>|DET|F|S|@>N": {POS: DET},
"<poss>|DET|F|S|@N<": {POS: DET},
"<poss>|DET|M|P|@>N": {POS: DET},
"<poss>|DET|M|P|@N<": {POS: DET},
"<poss>|DET|M|S|@<SC": {POS: PRON},
"<poss>|DET|M|S|@>N": {POS: DET},
"<poss>|DET|M|S|@N<": {POS: DET},
"<prd>|ADV|@<ADVL": {POS: ADV},
"<prd>|ADV|@<OC": {POS: ADV},
"<prd>|ADV|@<SC": {POS: ADV},
"<prd>|PRP|@<ADVL": {POS: ADP},
"<prd>|PRP|@<OC": {POS: ADP},
"<prd>|PRP|@<SC": {POS: ADP},
"<prd>|PRP|@ADVL>": {POS: ADP},
"<prd>|PRP|@COM": {POS: ADP},
"<prd>|PRP|@N<": {POS: ADP},
"<prd>|PRP|@N<ARG": {POS: ADP},
"<prd>|PRP|@N<PRED": {POS: ADP},
"<prd>|PRP|@OC": {POS: ADP},
"<prd>|PRP|@OC>": {POS: ADP},
"<prd>|PRP|@P<": {POS: ADP},
"<prd>|PRP|@PRED>": {POS: ADP},
"<premod>|<kc>|ADV|@ADVL>": {POS: ADV},
"<premod>|<quant>|ADV|@ADVL>": {POS: ADV},
"<premod>|ADV|@<ADVL": {POS: ADV},
"<premod>|ADV|@ADVL>": {POS: ADV},
"<prop>|<Eg>|<n>|<np-def>|ADJ|M|S|@<ACC": {POS: ADJ},
"<prop>|<Eg>|<n>|<np-def>|ADJ|M|S|@<ADVL": {POS: ADJ},
"<prop>|<Eg>|<n>|<np-def>|ADJ|M|S|@P<": {POS: ADJ},
"<prop>|<NUM-ord>|ADJ|M|S|@N<": {POS: ADJ},
"<prop>|<card>|<np-def>|NUM|M|P|@P<": {POS: NUM},
"<prop>|<card>|NUM|F|P|@>N": {POS: NUM},
"<prop>|<card>|NUM|M|P|@>N": {POS: NUM},
"<prop>|<n>|<card>|<np-def>|NUM|M|P|@P<": {POS: NUM},
"<prop>|<n>|<np-def>|ADJ|M|S|@<SUBJ": {POS: ADJ},
"<prop>|<n>|<np-def>|ADJ|M|S|@P<": {POS: ADJ},
"<prop>|<n>|<np-def>|ADJ|M|S|@SUBJ>": {POS: ADJ},
"<prop>|<n>|<np-idf>|ADJ|M|S|@P<": {POS: ADJ},
"<prop>|<n>|ADJ|M|S|@P<": {POS: ADJ},
"<prop>|<np-def>|N|F|P|@<ACC": {POS: NOUN},
"<prop>|<np-def>|N|F|P|@P<": {POS: NOUN},
"<prop>|<np-def>|N|F|P|@SUBJ>": {POS: NOUN},
"<prop>|<np-def>|N|F|S|@<ACC": {POS: NOUN},
"<prop>|<np-def>|N|F|S|@<SUBJ": {POS: NOUN},
"<prop>|<np-def>|N|F|S|@N<PRED": {POS: NOUN},
"<prop>|<np-def>|N|F|S|@P<": {POS: NOUN},
"<prop>|<np-def>|N|F|S|@SUBJ>": {POS: NOUN},
"<prop>|<np-def>|N|M|P|@<ACC": {POS: NOUN},
"<prop>|<np-def>|N|M|P|@<SUBJ": {POS: NOUN},
"<prop>|<np-def>|N|M|P|@P<": {POS: NOUN},
"<prop>|<np-def>|N|M|P|@SUBJ>": {POS: NOUN},
"<prop>|<np-def>|N|M|S|@<ACC": {POS: NOUN},
"<prop>|<np-def>|N|M|S|@<SC": {POS: NOUN},
"<prop>|<np-def>|N|M|S|@<SUBJ": {POS: NOUN},
"<prop>|<np-def>|N|M|S|@APP": {POS: NOUN},
"<prop>|<np-def>|N|M|S|@NPHR": {POS: NOUN},
"<prop>|<np-def>|N|M|S|@P<": {POS: NOUN},
"<prop>|<np-def>|N|M|S|@SUBJ>": {POS: NOUN},
"<prop>|<np-idf>|N|@P<": {POS: NOUN},
"<prop>|<np-idf>|N|F|P|@P<": {POS: NOUN},
"<prop>|<np-idf>|N|F|S|@<ACC": {POS: NOUN},
"<prop>|<np-idf>|N|F|S|@<SC": {POS: NOUN},
"<prop>|<np-idf>|N|F|S|@<SUBJ": {POS: NOUN},
"<prop>|<np-idf>|N|F|S|@N<": {POS: NOUN},
"<prop>|<np-idf>|N|F|S|@N<PRED": {POS: NOUN},
"<prop>|<np-idf>|N|F|S|@NPHR": {POS: NOUN},
"<prop>|<np-idf>|N|F|S|@P<": {POS: NOUN},
"<prop>|<np-idf>|N|F|S|@SUBJ>": {POS: NOUN},
"<prop>|<np-idf>|N|M/F|S|@P<": {POS: NOUN},
"<prop>|<np-idf>|N|M|P|@<OC": {POS: NOUN},
"<prop>|<np-idf>|N|M|P|@<SUBJ": {POS: NOUN},
"<prop>|<np-idf>|N|M|P|@P<": {POS: NOUN},
"<prop>|<np-idf>|N|M|S|@<ACC": {POS: NOUN},
"<prop>|<np-idf>|N|M|S|@<SC": {POS: NOUN},
"<prop>|<np-idf>|N|M|S|@<SUBJ": {POS: NOUN},
"<prop>|<np-idf>|N|M|S|@N<": {POS: NOUN},
"<prop>|<np-idf>|N|M|S|@N<PRED": {POS: NOUN},
"<prop>|<np-idf>|N|M|S|@NPHR": {POS: NOUN},
"<prop>|<np-idf>|N|M|S|@P<": {POS: NOUN},
"<prop>|<np-idf>|N|M|S|@SUBJ>": {POS: NOUN},
"<prop>|ADJ|F|P|@N<": {POS: ADJ},
"<prop>|ADJ|F|S|@N<": {POS: ADJ},
"<prop>|ADJ|M/F|S|@N<": {POS: ADJ},
"<prop>|ADJ|M|P|@N<": {POS: ADJ},
"<prop>|ADJ|M|S|@N<": {POS: ADJ},
"<prp>|<cjt>|ADV|@<ADVL": {POS: ADV},
"<prp>|<first-cjt>|PRP|@<ADVL": {POS: ADP},
"<prp>|<rel>|<first-cjt>|PRP|@<SC": {POS: ADP},
"<prp>|<rel>|PRP|@<ADVL": {POS: ADP},
"<prp>|<rel>|PRP|@ADVL>": {POS: ADP},
"<prp>|PRP|@<OC": {POS: ADP},
"<prp>|PRP|@<SC": {POS: ADP},
"<prp>|PRP|@A<": {POS: ADP},
"<quant>|<KOMP>|<COMP>|ADV|@<ACC": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|@<ADVL": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|@<SC": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|@>A": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|@>N": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|@A<": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|@ACC>": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|@ADVL": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|@ADVL>": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|@CO": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|@N<": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|@N<PRED": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|@P<": {POS: ADV},
"<quant>|<KOMP>|<COMP>|ADV|F|P|@<ADVL": {POS: ADV},
"<quant>|<KOMP>|<COMP>|DET|F|P|@<ACC": {POS: PRON},
"<quant>|<KOMP>|<COMP>|DET|F|P|@<SC": {POS: PRON},
"<quant>|<KOMP>|<COMP>|DET|F|P|@>N": {POS: DET},
"<quant>|<KOMP>|<COMP>|DET|F|S|@>N": {POS: DET},
"<quant>|<KOMP>|<COMP>|DET|F|S|@SUBJ>": {POS: PRON},
"<quant>|<KOMP>|<COMP>|DET|M/F|S/P|@<ACC": {POS: PRON},
"<quant>|<KOMP>|<COMP>|DET|M/F|S/P|@P<": {POS: PRON},
"<quant>|<KOMP>|<COMP>|DET|M|P|@>N": {POS: DET},
"<quant>|<KOMP>|<COMP>|DET|M|P|@P<": {POS: PRON},
"<quant>|<KOMP>|<COMP>|DET|M|P|@SUBJ>": {POS: PRON},
"<quant>|<KOMP>|<COMP>|DET|M|S/P|@<SC": {POS: PRON},
"<quant>|<KOMP>|<COMP>|DET|M|S|@<SC": {POS: PRON},
"<quant>|<KOMP>|<COMP>|DET|M|S|@>N": {POS: DET},
"<quant>|<KOMP>|<COMP>|DET|M|S|@P<": {POS: PRON},
"<quant>|<KOMP>|<COMP>|DET|M|S|@SUBJ>": {POS: PRON},
"<quant>|<KOMP>|<np-def>|<COMP>|DET|M|P|@P<": {POS: PRON},
"<quant>|<KOMP>|ADV|@>A": {POS: ADV},
"<quant>|<KOMP>|ADV|@CO": {POS: ADV},
"<quant>|<KOMP>|DET|M/F|S/P|@>A": {POS: DET},
"<quant>|<KOMP>|DET|M|P|@>A": {POS: DET},
"<quant>|<KOMP>|DET|M|S|@>A": {POS: DET},
"<quant>|<SUP>|DET|M|S|@>N": {POS: DET},
"<quant>|<det>|ADV|@<ACC": {POS: ADV},
"<quant>|<det>|ADV|@>A": {POS: ADV},
"<quant>|<det>|ADV|@>N": {POS: ADV},
"<quant>|<det>|ADV|@ADVL>": {POS: ADV},
"<quant>|<det>|ADV|@P<": {POS: ADV},
"<quant>|<det>|INDP|M|S|@P<": {POS: PRON},
"<quant>|<interr>|ADV|@>N": {POS: ADV},
"<quant>|<interr>|ADV|@P<": {POS: ADV},
"<quant>|<np-def>|DET|M|P|@<ACC": {POS: PRON},
"<quant>|<np-def>|DET|M|S|@P<": {POS: PRON},
"<quant>|<np-def>|DET|M|S|@SUBJ>": {POS: PRON},
"<quant>|<np-idf>|DET|F|S|@SUBJ>": {POS: PRON},
"<quant>|<np-idf>|DET|M|P|@P<": {POS: PRON},
"<quant>|<np-idf>|DET|M|P|@SUBJ>": {POS: PRON},
"<quant>|<np-idf>|DET|M|S|@ACC>": {POS: PRON},
"<quant>|<np-idf>|DET|M|S|@P<": {POS: PRON},
"<quant>|<parkc-1>|<KOMP>|ADV|@CO": {POS: ADV},
"<quant>|<parkc-1>|<KOMP>|KC|@CO": {POS: CCONJ},
"<quant>|ADJ|F|P|@N<": {POS: ADJ},
"<quant>|ADJ|F|S|@<SC": {POS: ADJ},
"<quant>|ADJ|F|S|@>N": {POS: ADJ},
"<quant>|ADJ|F|S|@N<": {POS: ADJ},
"<quant>|ADJ|M|P|@<OC": {POS: ADJ},
"<quant>|ADJ|M|P|@N<": {POS: ADJ},
"<quant>|ADJ|M|S|@<SC": {POS: ADJ},
"<quant>|ADV|@<ACC": {POS: ADV},
"<quant>|ADV|@<ADVL": {POS: ADV},
"<quant>|ADV|@<SA": {POS: ADV},
"<quant>|ADV|@<SC": {POS: ADV},
"<quant>|ADV|@>A": {POS: ADV},
"<quant>|ADV|@>N": {POS: ADV},
"<quant>|ADV|@>P": {POS: ADV},
"<quant>|ADV|@A<": {POS: ADV},
"<quant>|ADV|@ADVL>": {POS: ADV},
"<quant>|ADV|@CO": {POS: ADV},
"<quant>|ADV|@FS-STA": {POS: ADV},
"<quant>|ADV|@N<": {POS: ADV},
"<quant>|ADV|@N<|<MWE2>": {POS: ADV},
"<quant>|ADV|@P<": {POS: ADV},
"<quant>|ART|F|S|@>N": {POS: DET},
"<quant>|ART|M|S|@>N": {POS: DET},
"<quant>|DET|@>A": {POS: DET},
"<quant>|DET|F|P|@<ACC": {POS: PRON},
"<quant>|DET|F|P|@<SC": {POS: PRON},
"<quant>|DET|F|P|@<SUBJ": {POS: PRON},
"<quant>|DET|F|P|@>N": {POS: DET},
"<quant>|DET|F|P|@N<": {POS: DET},
"<quant>|DET|F|P|@N<PRED": {POS: DET},
"<quant>|DET|F|P|@NPHR": {POS: DET},
| |
<reponame>faradaymahe/VASP_band_plot<gh_stars>1-10
"""
This module contains standardized plots as well as more complex plots,
such as band structures and density of states put together, and spin
projected plots.
"""
from vaspvis.band import Band
from vaspvis.dos import Dos
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
def _figure_setup(ax, fontsize=6, ylim=[-6, 6]):
ax.set_ylabel('$E - E_{F}$ $(eV)$', fontsize=fontsize)
ax.set_ylim(ylim[0], ylim[1])
ax.tick_params(labelsize=fontsize, length=2.5)
ax.tick_params(axis='x', length=0)
def _figure_setup_dos(ax, fontsize=6, energyaxis='y'):
ax.tick_params(labelsize=fontsize, length=2.5)
if energyaxis == 'y':
ax.set_ylabel('$E - E_{F}$ $(eV)$', fontsize=fontsize)
ax.set_xlabel('Density of States', fontsize=fontsize)
if energyaxis == 'x':
ax.set_xlabel('$E - E_{F}$ $(eV)$', fontsize=fontsize)
ax.set_ylabel('Density of States', fontsize=fontsize)
def _figure_setup_band_dos(ax, fontsize, ylim):
ax1 = ax[0]
ax2 = ax[1]
ax2.tick_params(axis='y', length=0)
ax2.tick_params(axis='x', length=0, labelsize=fontsize)
ax2.set_xlabel('Density of States', fontsize=fontsize)
ax1.tick_params(labelsize=fontsize)
ax1.tick_params(axis='x', length=0)
ax1.set_ylabel('$E - E_{F}$ $(eV)$', fontsize=fontsize)
ax1.set_xlabel('Wave Vector', fontsize=fontsize)
ax1.set_ylim(ylim[0], ylim[1])
return ax1, ax2
def _figure_setup_band_dos_spin_polarized(ax, fontsize, ylim):
ax_band_up = ax[0, 0]
ax_dos_up = ax[0, 1]
ax_band_down = ax[1, 0]
ax_dos_down = ax[1, 1]
ax_dos_up.tick_params(axis='y', length=0)
ax_dos_up.tick_params(axis='x', length=0,
labelsize=fontsize, labelbottom=False)
ax_band_up.tick_params(labelsize=fontsize)
ax_band_up.tick_params(axis='x', length=0, labelbottom=False)
ax_band_up.set_ylabel('$E - E_{F}$ $(eV)$', fontsize=fontsize)
ax_band_up.set_ylim(ylim[0], ylim[1])
ax_dos_down.tick_params(axis='y', length=0)
ax_dos_down.tick_params(axis='x', length=0, labelsize=fontsize)
ax_dos_down.set_xlabel('Density of States', fontsize=fontsize)
ax_band_down.tick_params(labelsize=fontsize)
ax_band_down.tick_params(axis='x', length=0)
ax_band_down.set_ylabel('$E - E_{F}$ $(eV)$', fontsize=fontsize)
ax_band_down.set_xlabel('Wave Vector', fontsize=fontsize)
ax_band_down.set_ylim(ylim[0], ylim[1])
return ax_band_up, ax_dos_up, ax_band_down, ax_dos_down
def _figure_setup_layer_dos(ax, fontsize=6, energyaxis='y'):
ax.tick_params(labelsize=fontsize)
if energyaxis == 'y':
ax.set_ylabel('$E - E_{F}$ $(eV)$', fontsize=fontsize)
ax.set_xlabel('Layers', fontsize=fontsize)
if energyaxis == 'x':
ax.set_xlabel('$E - E_{F}$ $(eV)$', fontsize=fontsize)
ax.set_ylabel('Layers', fontsize=fontsize)
def band_plain(
folder,
output='band_plain.png',
spin='up',
color='black',
linewidth=1.25,
linestyle='-',
figsize=(4, 3),
erange=[-6, 6],
hse=False,
kpath=None,
n=None,
fontsize=7,
save=True,
):
"""
This function generates a plain band structure
Parameters:
folder (str): This is the folder that contains the VASP files
output (str): File name of the resulting plot.
spin (str): Choose which spin direction to parse. ('up' or 'down')
color (str): Color of the band structure lines
linewidth (float): Line width of the band structure lines
linestyle (str): Line style of the bands
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list / tuple): Range of energy to show in the plot [low, high]
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
band = Band(
folder=folder,
spin=spin,
hse=hse,
kpath=kpath,
n=n,
)
fig = plt.figure(figsize=(figsize), dpi=400)
ax = fig.add_subplot(111)
_figure_setup(ax=ax, fontsize=fontsize, ylim=[erange[0], erange[1]])
band.plot_plain(
ax=ax,
color=color,
linewidth=linewidth,
linestyle=linestyle,
)
plt.tight_layout(pad=0.2)
if save:
plt.savefig(output)
else:
return fig, ax
def band_spd(
folder,
output='band_spd.png',
spin='up',
scale_factor=6,
order=['s', 'p', 'd'],
color_dict=None,
legend=True,
linewidth=0.75,
band_color='black',
figsize=(4, 3),
erange=[-6, 6],
hse=False,
kpath=None,
n=None,
fontsize=7,
save=True,
):
"""
This function generates a s, p, d projected band structure.
Parameters:
folder (str): This is the folder that contains the VASP files
output (str): File name of the resulting plot.
spin (str): Choose which spin direction to parse. ('up' or 'down')
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
order (list): This determines the order in which the points are plotted on the
graph. This is an option because sometimes certain orbitals can be hidden
under others because they have a larger weight. For example, if the
weights of the d orbitals are greater than that of the s orbitals, it
might be smart to choose ['d', 'p', 's'] as the order so the s orbitals are
plotted over the d orbitals.
color_dict (dict[str][str]): This option allow the colors of the s, p, and d
orbitals to be specified. Should be in the form of:
{'s': <s color>, 'p': <p color>, 'd': <d color>}
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list / tuple): Range of energy to show in the plot [low, high]
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
band = Band(
folder=folder,
spin=spin,
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
fig = plt.figure(figsize=(figsize), dpi=400)
ax = fig.add_subplot(111)
_figure_setup(ax=ax, fontsize=fontsize, ylim=[erange[0], erange[1]])
band.plot_spd(
ax=ax,
scale_factor=scale_factor,
order=order,
color_dict=color_dict,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
plt.tight_layout(pad=0.2)
if save:
plt.savefig(output)
else:
return fig, ax
def band_atom_orbitals(
folder,
atom_orbital_pairs,
output='band_atom_orbitals.png',
spin='up',
scale_factor=6,
color_list=None,
legend=True,
linewidth=0.75,
band_color='black',
figsize=(4, 3),
erange=[-6, 6],
hse=False,
kpath=None,
n=None,
fontsize=7,
save=True,
):
"""
This function generates a projected band structure on specific [atom, orbital] pairs.
Parameters:
folder (str): This is the folder that contains the VASP files
output (str): File name of the resulting plot.
spin (str): Choose which spin direction to parse. ('up' or 'down')
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
atom_orbital_pairs (list[list]): Selected orbitals on selected atoms to plot.
This should take the form of [[atom index, orbital_index], ...].
color_list (list): List of colors of the same length as the atom_orbital_pairs
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (string): Color of the plain band structure
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list / tuple): Range of energy to show in the plot [low, high]
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
band = Band(
folder=folder,
spin=spin,
| |
"guda": 29432,
"save": 29433,
"û": 29434,
"▁Alanya": 29435,
"▁HOME": 29436,
"▁Intens": 29437,
"▁Lyrics": 29438,
"▁Mogu": 29439,
"▁Oude": 29440,
"▁Presenta": 29441,
"▁Seminar": 29442,
"▁Zadar": 29443,
"▁construction": 29444,
"▁lini": 29445,
"▁pia": 29446,
"▁publish": 29447,
"▁roman": 29448,
"▁scene": 29449,
"湖": 29450,
"LÉ": 29451,
"aysan": 29452,
"gö": 29453,
"installer": 29454,
"photography": 29455,
"rā": 29456,
"teil": 29457,
"torul": 29458,
"ukku": 29459,
"yā": 29460,
"zt": 29461,
"ędzi": 29462,
"▁(42)": 29463,
"▁Está": 29464,
"▁Oulu": 29465,
"▁Qaz": 29466,
"▁hour": 29467,
"▁join": 29468,
"▁lawr": 29469,
"▁nich": 29470,
"▁province": 29471,
"▁rit": 29472,
"▁tena": 29473,
"▁wedding": 29474,
"▁České": 29475,
"▁Şi": 29476,
"ILI": 29477,
"INTA": 29478,
"arg": 29479,
"broek": 29480,
"eaza": 29481,
"pewa": 29482,
"thach": 29483,
"untur": 29484,
"weni": 29485,
"zeit": 29486,
"zgl": 29487,
"▁8.30": 29488,
"▁Aina": 29489,
"▁Baix": 29490,
"▁Bloemfontein": 29491,
"▁Dixit": 29492,
"▁ONLINE": 29493,
"▁Relativ": 29494,
"▁Szcze": 29495,
"▁dum": 29496,
"▁marina": 29497,
"▁reli": 29498,
"▁sad": 29499,
"▁tape": 29500,
"よ": 29501,
"伊": 29502,
"杨": 29503,
"格": 29504,
"리": 29505,
"LOW": 29506,
"ably": 29507,
"ationis": 29508,
"diyi": 29509,
"koski": 29510,
"ngkang": 29511,
"offen": 29512,
"pato": 29513,
"▁Atlet": 29514,
"▁Helsingborg": 29515,
"▁Tantra": 29516,
"▁Template": 29517,
"▁Waffen": 29518,
"▁alma": 29519,
"▁communication": 29520,
"▁dil": 29521,
"▁fusion": 29522,
"▁marine": 29523,
"▁offshore": 29524,
"▁probably": 29525,
"▁protection": 29526,
"▁send": 29527,
"▁ulei": 29528,
"▁videos": 29529,
"堂": 29530,
"夫": 29531,
"自治": 29532,
"虎": 29533,
"mbet": 29534,
"volo": 29535,
"yyar": 29536,
"ās": 29537,
"▁Benar": 29538,
"▁Iya": 29539,
"▁Marija": 29540,
"▁Orde": 29541,
"▁Republican": 29542,
"▁Schü": 29543,
"▁Sibiu": 29544,
"▁Tü": 29545,
"▁Yavuz": 29546,
"▁blow": 29547,
"▁emo": 29548,
"▁example": 29549,
"▁gondol": 29550,
"▁granat": 29551,
"▁progressive": 29552,
"▁rule": 29553,
"▁websites": 29554,
"佐": 29555,
"EGO": 29556,
"INI": 29557,
"NOM": 29558,
"duit": 29559,
"jne": 29560,
"kaus": 29561,
"kene": 29562,
"lighting": 29563,
"mr": 29564,
"mung": 29565,
"pne": 29566,
"qal": 29567,
"ryd": 29568,
"spray": 29569,
"tlen": 29570,
"öld": 29571,
"şar": 29572,
"•": 29573,
"▁(41)": 29574,
"▁Bred": 29575,
"▁Béla": 29576,
"▁DIE": 29577,
"▁Plugin": 29578,
"▁Rä": 29579,
"▁Speicher": 29580,
"▁Tampere": 29581,
"▁UNA": 29582,
"▁bok": 29583,
"▁dă": 29584,
"▁esse": 29585,
"▁kalo": 29586,
"▁mh": 29587,
"▁monde": 29588,
"▁sino": 29589,
"▁trust": 29590,
"고": 29591,
"bair": 29592,
"bhava": 29593,
"erius": 29594,
"fata": 29595,
"ographie": 29596,
"▁Doppel": 29597,
"▁Garten": 29598,
"▁Győr": 29599,
"▁Segura": 29600,
"▁Siya": 29601,
"▁capture": 29602,
"▁corsi": 29603,
"▁cv": 29604,
"▁insu": 29605,
"▁issue": 29606,
"▁released": 29607,
"▁remember": 29608,
"▁yan": 29609,
"▁на": 29610,
"Chandigarh": 29611,
"SEL": 29612,
"UJ": 29613,
"dagi": 29614,
"dā": 29615,
"fac": 29616,
"metre": 29617,
"ndere": 29618,
"serv": 29619,
"topic": 29620,
"vaj": 29621,
"vno": 29622,
"▁Gdy": 29623,
"▁Helt": 29624,
"▁Méndez": 29625,
"▁bha": 29626,
"▁gest": 29627,
"▁maki": 29628,
"▁muffin": 29629,
"▁ratio": 29630,
"▁vesper": 29631,
"▁zoo": 29632,
"▁по": 29633,
"サ": 29634,
"华": 29635,
"OKI": 29636,
"OWA": 29637,
"PUR": 29638,
"alen": 29639,
"altı": 29640,
"lishi": 29641,
"owned": 29642,
"rados": 29643,
"ína": 29644,
"ît": 29645,
"▁Academia": 29646,
"▁Gambar": 29647,
"▁Krea": 29648,
"▁Ranta": 29649,
"▁Traum": 29650,
"▁comarca": 29651,
"▁dama": 29652,
"▁devices": 29653,
"▁editor": 29654,
"▁hear": 29655,
"▁held": 29656,
"▁mano": 29657,
"▁operate": 29658,
"▁pede": 29659,
"▁popular": 29660,
"▁russe": 29661,
"▁shared": 29662,
"▁signal": 29663,
"▁styr": 29664,
"▁wet": 29665,
"タ": 29666,
"楚": 29667,
"樂": 29668,
"국": 29669,
"CEST": 29670,
"ELO": 29671,
"byen": 29672,
"linis": 29673,
"olva": 29674,
"ovine": 29675,
"pitan": 29676,
"roep": 29677,
"ufu": 29678,
"vagn": 29679,
"ört": 29680,
"ıç": 29681,
"▁Apotheke": 29682,
"▁Kä": 29683,
"▁Magistr": 29684,
"▁ame": 29685,
"▁difficile": 29686,
"▁others": 29687,
"▁pell": 29688,
"▁radica": 29689,
"▁wid": 29690,
"烈": 29691,
"자": 29692,
"OMI": 29693,
"STAV": 29694,
"activa": 29695,
"grád": 29696,
"lifting": 29697,
"lyg": 29698,
"malı": 29699,
"posto": 29700,
"pré": 29701,
"ramai": 29702,
"sasa": 29703,
"»,": 29704,
"▁According": 29705,
"▁Actually": 29706,
"▁Bayi": 29707,
"▁Kalle": 29708,
"▁Migra": 29709,
"▁Platz": 29710,
"▁brune": 29711,
"▁grow": 29712,
"▁subter": 29713,
"▁tako": 29714,
"▁tat": 29715,
"▁tee": 29716,
"▁三": 29717,
"LEY": 29718,
"etter": 29719,
"iamo": 29720,
"rla": 29721,
"tón": 29722,
"▁......": 29723,
"▁MER": 29724,
"▁Priorit": 29725,
"▁Riks": 29726,
"▁Tisza": 29727,
"▁ferro": 29728,
"▁kb": 29729,
"▁label": 29730,
"▁limit": 29731,
"▁torre": 29732,
"靖": 29733,
"ahana": 29734,
"krov": 29735,
"kū": 29736,
"llisia": 29737,
"portu": 29738,
"zato": 29739,
"▁Boha": 29740,
"▁Podemos": 29741,
"▁Stran": 29742,
"▁Voda": 29743,
"▁carro": 29744,
"▁mill": 29745,
"▁nep": 29746,
"▁sous": 29747,
"▁tant": 29748,
"▁tenue": 29749,
"合": 29750,
"衛": 29751,
".06.2018": 29752,
"IFU": 29753,
"LOV": 29754,
"fero": 29755,
"manns": 29756,
"massa": 29757,
"memor": 29758,
"merci": 29759,
"ukia": 29760,
"umus": 29761,
"vih": 29762,
"ṇa": 29763,
"▁Akta": 29764,
"▁Basket": 29765,
"▁Boğaz": 29766,
"▁Dunaj": 29767,
"▁Hru": 29768,
"▁Júnior": 29769,
"▁Rasool": 29770,
"▁influence": 29771,
"▁lacus": 29772,
"▁livet": 29773,
"▁pelo": 29774,
"▁raja": 29775,
"▁©": 29776,
"▁Če": 29777,
"UNT": 29778,
"blé": 29779,
"likt": 29780,
"lä": 29781,
"с": 29782,
"х": 29783,
"▁Popula": 29784,
"▁Position": 29785,
"▁Tisch": 29786,
"▁Veszprém": 29787,
"▁abrupt": 29788,
"▁dissi": 29789,
"▁gastro": 29790,
"▁loo": 29791,
"▁naka": 29792,
"▁nice": 29793,
"▁personal": 29794,
"▁pudi": 29795,
"▁sati": 29796,
"▁smo": 29797,
"▁stel": 29798,
"京": 29799,
"令": 29800,
"TING": 29801,
"bax": 29802,
"bule": 29803,
"cubi": 29804,
"frau": 29805,
"gatta": 29806,
"gniti": 29807,
"▁Bistri": 29808,
"▁Figueres": 29809,
"▁Fuld": 29810,
"▁Péter": 29811,
"▁Wohl": 29812,
"▁battery": 29813,
"▁counter": 29814,
"▁degli": 29815,
"▁done": 29816,
"▁gear": 29817,
"▁mouse": 29818,
"▁package": 29819,
"▁pig": 29820,
"▁pé": 29821,
"▁quas": 29822,
"▁rank": 29823,
"▁strength": 29824,
"▁term": 29825,
"▁tic": 29826,
"▁Çarşamba": 29827,
"▁―": 29828,
"も": 29829,
"杜": 29830,
"군": 29831,
"Industri": 29832,
"Prof": 29833,
"bjerg": 29834,
"boken": 29835,
"closure": 29836,
"fara": 29837,
"flug": 29838,
"hulu": 29839,
"mille": 29840,
"nij": 29841,
"premi": 29842,
"syk": 29843,
"zami": 29844,
"▁Bundestag": 29845,
"▁Cultura": 29846,
"▁Diri": 29847,
"▁Kaduna": 29848,
"▁Madal": 29849,
"▁Noen": 29850,
"▁PhD": 29851,
"▁Vasar": 29852,
"▁colour": 29853,
"▁dru": 29854,
"▁fiber": 29855,
"▁juba": 29856,
"▁margine": 29857,
"▁matu": 29858,
"▁mission": 29859,
"▁oma": 29860,
"▁orchestr": 29861,
"▁pavo": 29862,
"▁perfor": 29863,
"▁speaker": 29864,
"▁Ča": 29865,
"▁İm": 29866,
"め": 29867,
"裕": 29868,
"ESH": 29869,
"OTE": 29870,
"Youtube": 29871,
"ensa": 29872,
"kontakt": 29873,
"krom": 29874,
"leef": 29875,
"njo": 29876,
"olas": 29877,
"szak": 29878,
"tów": 29879,
"urdu": 29880,
"⁄": 29881,
"▁Fuck": 29882,
"▁Ghulam": 29883,
"▁Herbst": 29884,
"▁Poetry": 29885,
"▁Supplement": 29886,
"▁Wirtschafts": 29887,
"▁Zahn": 29888,
"▁divisa": 29889,
"▁lua": 29890,
"▁osteo": 29891,
"▁remain": 29892,
"▁thre": 29893,
"▁trim": 29894,
"▁trombo": 29895,
"▁weather": 29896,
"▁Ș": 29897,
"RAK": 29898,
"bhal": 29899,
"blan": 29900,
"corre": 29901,
"cré": 29902,
"iyor": 29903,
"ledi": 29904,
"nuo": 29905,
"reiz": 29906,
"rumah": 29907,
"siz": 29908,
"stico": 29909,
"usius": 29910,
"vek": 29911,
"vén": 29912,
"zun": 29913,
"öv": 29914,
"ğu": 29915,
"▁Motel": 29916,
"▁Siria": 29917,
"▁bambu": 29918,
"▁gaudi": 29919,
"▁heat": 29920,
"▁imi": 29921,
"▁oss": 29922,
"▁troch": 29923,
"▁İki": 29924,
"▁о": 29925,
"ブ": 29926,
"記": 29927,
"[24]": 29928,
"ciclo": 29929,
"drog": 29930,
"drow": 29931,
"▁Bintang": 29932,
"▁Björk": 29933,
"▁Hajduk": 29934,
"▁Istri": 29935,
"▁Nachrichten": 29936,
"▁Pek": 29937,
"▁Principal": 29938,
"▁Svenska": 29939,
"▁bone": 29940,
"▁built": 29941,
"▁compose": 29942,
"▁kurash": 29943,
"▁romance": 29944,
"▁roulette": 29945,
"▁vita": 29946,
"▁weight": 29947,
"▁İz": 29948,
"夜": 29949,
"für": 29950,
"issons": 29951,
"qam": 29952,
"▁España": 29953,
"▁Núñez": 29954,
"▁Permanent": 29955,
"▁Ratu": 29956,
"▁Tule": 29957,
"▁europa": 29958,
"▁fact": 29959,
"▁jam": 29960,
"▁municipal": 29961,
"▁object": 29962,
"▁patent": 29963,
"▁samsung": 29964,
"▁sent": 29965,
"▁strip": 29966,
"▁template": 29967,
"▁teret": 29968,
"▁vertical": 29969,
"利": 29970,
"将": 29971,
"ANZA": 29972,
"avit": 29973,
"contact": 29974,
"depend": 29975,
"goj": 29976,
"ibili": 29977,
"tunnel": 29978,
"şin": 29979,
"șa": 29980,
"▁BEL": 29981,
"▁Ditt": 29982,
"▁Egy": 29983,
"▁Kuş": 29984,
"▁Lugar": 29985,
"▁Meine": 29986,
"▁Motiv": 29987,
"▁Tuma": 29988,
"▁Tyto": 29989,
"▁aru": 29990,
"▁least": 29991,
"▁names": 29992,
"▁partner": 29993,
"▁shr": 29994,
"▁tama": 29995,
"▁vom": 29996,
"け": 29997,
"宇": 29998,
"燕": 29999,
"重": 30000,
"++": 30001,
"SHOP": 30002,
"dilo": 30003,
"gati": 30004,
"ndhu": 30005,
"ngkul": 30006,
"rzu": 30007,
"ssima": 30008,
"uté": 30009,
"yf": 30010,
"ة": 30011,
"▁Aika": 30012,
"▁Benefici": 30013,
"▁Carballo": 30014,
"▁Dön": 30015,
"▁Frauen": 30016,
"▁Hobby": 30017,
"▁Mondial": 30018,
"▁Prev": 30019,
"▁Vester": 30020,
"▁]": 30021,
"▁acc": 30022,
"▁bron": 30023,
"▁pali": 30024,
"▁preti": 30025,
"▁ride": 30026,
"▁shift": 30027,
"宁": 30028,
"HOL": 30029,
"HUS": 30030,
"IZA": 30031,
"för": 30032,
"lândia": 30033,
"region": 30034,
"▁ITALIA": 30035,
"▁Szo": 30036,
"▁kre": 30037,
"▁kroner": 30038,
"▁length": 30039,
"▁mort": 30040,
"▁secund": 30041,
"▁teu": 30042,
"百": 30043,
"09.20": 30044,
"DIM": 30045,
"EIN": 30046,
"bite": 30047,
"centre": 30048,
"gué": 30049,
"sever": 30050,
"urb": 30051,
"uses": 30052,
"⌘": 30053,
"▁Budějovic": 30054,
"▁Intr": 30055,
"▁Poste": 30056,
"▁Rek": 30057,
"▁Rii": 30058,
"▁above": 30059,
"▁beli": 30060,
"▁bli": 30061,
"▁component": 30062,
"▁controller": 30063,
"▁halli": 30064,
"▁products": 30065,
"▁solar": 30066,
"▁tl": 30067,
"▁wake": 30068,
"军": 30069,
"Kamp": 30070,
"RIG": 30071,
| |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import nlpregex.abs_graph.double_link
import nlpregex.abs_graph.node
import nlpregex.abs_graph.edge
import nlpregex.abs_graph.graph
from graphviz import Digraph
import re
import subprocess
import os
SYM_EPSILON = "<eps>"
# @class FA
#
# Represents a finite-state automaton FA=(A, S, s, D, F) where
# A: set of alphabets
# S: set of states
# s: start state
# F: set of final states
# D: S x A -> S: Transition from a state to the next upon
# receiving an alphabet.
#
# It is represented by directed graph FA=(N,E).
# N: set of states in FANode.
# E: represents transition D in FAEdge.
#
# The transition is defined as in_token and out_token.
# Epsilon transition is represented by empty string ''.
#
#
# Standalone functions
# --------------------
#
# - DFA_from_NFA( nfa , optimize = False)
#
# generates an equivalent DFA from given NFA
# It assumes the availability of the following commands and libs from OpenFST.
# - fstcompile
# - fstdeterminize
# - fstrmepsilon
# - fstminimize
# - fstprint
#
# @param nfa : input NFA
# @param optimize : True if you want to run fstminimize
# it will change the relative position of output token
# relative to the input token.
# @return DFA in FA.
#
#
#
# # Following functions are provided to generate a primitive automata.
#
#
# - make_unit_epsilon()#
#
# creates an FA that has the unit epsilon transition only
# s ---[eps]---> f
#
#
# - make_unit_transition(in_token, out_token):
#
# creates an FA that accepts a single token.
#
# s --- in_token/out_token ---> f
#
#
# - ttsp_concatenate( S )
#
# creates an FA which accepts the sequence serially in the
# FAs specified in S in that order.
# Let S = [Sstart, S2, S3, ..., Send]
# Then the resultant FA's start state is Start's start, and
# the final states correspond to the final states of Send.
#
# Sstart.s ---<Sstart>---> n1 ---<S2>---> n2 ... ---<Send>--->Send.F
#
#
# - ttsp_bundle( P )
#
# creates an FA which accepts any sequence by FA specified in P.
# Let P = [P1, P2, ..., PN]
#
# +---<P1>---+
# | |
# s-+---<P2>---+-F
# | ... ... |
# | |
# +---<Pn>---+
#
#
# - enclose_in_cycle( g, plus_or_star )
#
# Make the FA a Kleene closure or a positive closure.
#
# @param g : FA to be processed
# @param plus_or_star : '+' - add a backward edge from f to s.
# '*' - add a backward edge and forward edge
#
# Add a backward edge from the finish node to the start node
#
# ________ ________
# / \ / \
# s----------f => s_new -<eps>-> s----------f -<eps>-> f_new
# \________/ | ^\________/| ^
# | | | |
# | --<eps>--- |
# --------------<eps>----------------
# ('*')
#
# # Convertion to/from OpenFST
#
# FA has a functionality to convert to/from OpenFST text format files.
#
# FST's text representation consists of three files.
#
# - FST file
# This describest the graph structure and their edges.
# It consists of three parts.
#
# The first part has the following line.
#
# 0 dst_node_id in_sym_id [out_sym_id] [weight]
#
# The first 0 represents the intial node ID and it is reserved for
# the initial state.
#
# The second part has the follwing format per line.
#
# src_node_id dst_node_id in_sym_id [out_sym_id] [weight]
#
# They are all decimal numbers.
# The src_node_id in the first line designates the start state.
#
# The third part has the following format per line.
#
# final_node_id
#
# - Input symbol file
# This describest the mapping from in_sym_id in the FST file to an actual symbol.
# Each line has the following format.
# in_sym_id in_sym_str
#
# 0 is reserved for epsilon and it is by customary the first line as follows.
#
# 0 <eps>
#
# - Output symbol file
# This describest the mapping from out_sym_id in the FST file to an actual symbol.
# It has the same format as the input symbol file.
#
# To avoid potential problems caused by certain characters in unicode in in_token and out_token of FAEdge,
# we make a mapping from the strings in in_token/out_token to in_sym_str/out_sym_str.
#
# The mapping from in_token/out_token to in_sym_str/out_sym_str is as follows
#
# in_token/out_token <===> in_sym{:08d}".format(id) / out_sym{:08d}".format(id)
#
#
# The conversion is provided by the following two member functions of FA.
#
#
# Conversion to OpenFST:
# ( str0, str1, str2, str3, str4 ) = FA.to_open_fst()
#
# str0 : contents of FST file for OpenFST
# str1 : contents of Input symbol file for OpenFST
# str2 : contents of Output symbol file for OpenFST
# str3 : contents of mapping from in_sym_str to real in_token
# str4 : contents of mapping from out_sym_str to real out_token
#
#
# Conversion from OpenFST:
# FA.from_open_fst( fst, sym_in, sym_out )
#
# FA must be empty before calling this.
# @param fst : conetnts of OpenFST file (text version, not compiled binary version.)
# @param sym_in : mapping from in_sym_str to real in_token
# @param sym_out : mapping from out_sym_str to real out_token
#
class FANode(nlpregex.abs_graph.node.BaseNode):
def __init__( self ):
super().__init__()
self.start_node = False
self.final_node = False
self.node_id = 0
class FAEdge(nlpregex.abs_graph.edge.BaseEdge):
def __init__(self):
super().__init__()
self.in_token = ""
self.out_token = ""
def is_epsilon(self):
return self.in_token == ""
class FA(nlpregex.abs_graph.graph.BaseGraph):
def __init__(self):
super().__init__()
self.start_node = None
self.final_nodes = []
self.re_balanced_token_patten = re.compile( r'^\[(PRE|POST) [0-9]+\]$' )
def set_start_node( self, n ):
self.start_node = n
n.srart_node = True
def get_start_node( self ):
return self.start_node
def remove_start_node( self ):
if self.start_node:
n = self.start_node
n.srart_node = False
return n
else:
return None
def add_final_node( self, n ):
self.final_nodes.append( n )
n.final_node = True
def first_final_node( self ):
if len( self.final_nodes ) != 0:
return self.final_nodes[0]
else:
return None
def remove_final_nodes( self ):
final_nodes_copy = self.final_nodes
self.final_nodes = []
for n in final_nodes_copy:
n.final_node = False
return final_nodes_copy
def draw( self, comment_string, show_out_token = False, view_now = True, out_format = "svg" ):
self.reassign_node_ids()
g_dot = Digraph( comment = comment_string )
g_dot.graph_attr['rankdir'] = 'LR'
for n in self.nodes():
label_out = str(n.node_id)
if n in self.final_nodes:
g_dot.node( str(n.node_id), label=label_out, shape='doublecircle' )
else:
g_dot.node( str(n.node_id), label=label_out, shape='circle' )
for e in self.directed_edges():
e.in_token
in_token = e.in_token.replace('"', '')
out_token = e.out_token.replace('"', '')
if in_token == "":
in_token = "ε"
if out_token == "":
out_token = "ε"
if show_out_token:
label_out = '"' +in_token + " / " + out_token + '"'
else:
label_out = '"' + in_token + '"'
g_dot.edge( str(e.src_node.node_id), str(e.dst_node.node_id), label=label_out )
for n in self.nodes():
delattr( n, 'node_id' )
g_dot.render( comment_string, view=view_now, format=out_format )
def reassign_node_ids(self):
node_id = 1
for n in self.nodes():
if n == self.start_node:
n.node_id = 0
else:
n.node_id = node_id
node_id += 1
# This FA must be empty before calling this.
# @param
# lines : List of phrases
def from_flat_list( self, lines ):
s_f_created = False
lines = lines.split("\n")
for line in lines:
fields = line.strip().split()
fields_cleaned = []
for e in fields:
if len(e)>0:
fields_cleaned.append(e)
if len(fields_cleaned)== 1:
if not s_f_created:
s = FANode()
s.add_to_graph(self)
self.set_start_node(s)
f = FANode()
f.add_to_graph(self)
self.add_final_node(f)
s_f_created = True
e = FAEdge()
e.in_token = fields[0]
e.out_token = fields[0]
e.add_to_graph( self, s, f, "directed" )
elif len(fields_cleaned) > 1:
if not s_f_created:
s = FANode()
s.add_to_graph(self)
self.set_start_node(s)
f = FANode()
f.add_to_graph(self)
self.add_final_node(f)
s_f_created = True
prev_node = s
for i in range(0, len(fields_cleaned)-1):
n = FANode()
n.add_to_graph(self)
e = FAEdge()
e.in_token = fields_cleaned[i]
e.out_token = fields_cleaned[i]
e.add_to_graph( self, prev_node, n, "directed" )
prev_node = n
e = FAEdge()
e.in_token = fields_cleaned[-1]
e.out_token = fields_cleaned[-1]
e.add_to_graph( self, prev_node, f, "directed" )
self.reassign_node_ids()
#
# Generates file contents that describes this FA for OpenFST.
#
# @param retain_balanced_in_token :
# True : in_token is passed as is.
# False : if in_token matches '^\[(PRE|POST) [0-9]+\]$'
# then it will be replaced with ''
# This must be set to False for decoder.
#
# @return
#
# fst_str : file content for OpenFST in AT&T FSM format
# It consists of three parts. (Optional weights are not shown.)
#
# First part. (Initial state and its transitions)
# 0 <new state id> <open fst in_token> <open fst out_token>
#
# Second part.
# <cur state id> <new state id> <open fst in_token> <open fst out_token>
#
# Third part
# <final state>
# NOTE: Id 0 is reserved for the initial state.
#
# fst_sym_in_str : file content for OpenFST symbol table for in tokens.
# fst_sym_out_str : | |
{variable}
return cls(variable, ctrl(typ=type_t))
class ConditionSet(BindingOp):
"""A set represented as a condition on a variable.
The body must be of type t."""
canonical_name = "Set"
op_name_uni="Set"
op_name_latex="Set"
def __init__(self, var_or_vtype, body, varname=None, assignment=None,
type_check=True):
body = self.ensure_typed_expr(body, assignment=assignment)
super().__init__(var_or_vtype=var_or_vtype, typ=None, body=body,
varname=varname, body_type=types.type_t, assignment=assignment,
type_check=type_check)
self.type = types.SetType(self.vartype)
def structural_singleton(self):
pass
def term(self):
return False
def latex_str(self, parens=True, **kwargs):
return ensuremath("\\{%s_{%s}\\:|\\: "
% (self.varname, self.vartype.latex_str())
+ self.body.latex_str(**kwargs) + "\\}")
def __lshift__(self, i):
return SetContains(i, self)
def to_characteristic(self):
"""Return a LFun based on the condition used to describe the set."""
return LFun(self.vartype, self.body, self.varname)
def try_adjust_type_local(self, unified_type, derivation_reason,
assignment, env):
inner_type = unified_type.content_type
char = self.to_characteristic()
sub_var = TypedTerm(self.varname, inner_type)
new_condition = char.apply(sub_var)
return self.copy_local(sub_var, new_condition)
BindingOp.add_op(ConditionSet)
class ListedSet(TypedExpr):
"""A listed set is a set that simply lists members."""
canonical_name = "ListedSet"
op_name_uni="ListedSet"
op_name_latex="ListedSet"
def __init__(self, iterable, typ=None, assignment=None, type_check=True):
s = set(iterable) # remove duplicates, flatten order
args = [self.ensure_typed_expr(a,assignment=assignment) for a in s]
args = sorted(args, key=repr) # for a canonical ordering
if len(args) == 0 and typ is None:
typ = types.VariableType("X") # could be a set of anything
elif typ is None:
typ = args[0].type
for i in range(len(args)):
# type checking TODO: this isn't right, would need to pick the
# strongest type
args[i] = self.ensure_typed_expr(args[i], typ)
super().__init__("Set", *args)
#self.op = "Set"
self.type = types.SetType(typ)
def subst(self, i, s):
if len(self.args) < 2:
return super().subst(i, s)
else:
raise NotImplementedError(
"Beta reduction into a set of size>1 not currently supported.")
# TODO deal with this
# the problem is the same as usual -- set order isn't stable so we
# need to do this all at once rather than member-by-member.
def copy(self):
return ListedSet(self.args)
def copy_local(self, *args, type_check=True):
return ListedSet(args)
def term(self):
return False
def __lshift__(self, i):
"""Use the `<<` operator for set membership."""
return SetContains(i, self)
def set(self):
"""Return a python `set` version of the ListedSet.
Note that this isn't guaranteed to be defined for anything with a set
type."""
return set(self.args)
def cardinality(self):
return len(self.args)
def to_condition_set(self):
"""Convert to a condition set by disjoining members."""
# ensure that we build a condition set from a variable that is not free
# in any of the members
varname = self.find_safe_variable(starting="x")
conditions = [BinaryGenericEqExpr(TypedTerm(varname, a.type), a)
for a in self.args]
return ConditionSet(self.type.content_type,
BinaryOrExpr.join(*conditions), varname=varname)
def reduce_all(self):
"""Special-cased reduce_all for listed sets. There are two problems.
First, the reduction may actually result in a change in the size of the
set, something generally not true of reduction elsewhere. Second,
because the constructor calls `set`, `copy` is not guaranteed to return
an object with a stable order. Therefore we must batch the reductions
(where the TypedExpr version doesn't).
Note that currently this produces non-ideal derivation sequences."""
dirty = False
accum = list()
result = self
for i in range(len(result.args)):
new_arg_i = result.args[i].reduce_all()
if new_arg_i is not result.args[i]:
dirty = True
reason = "Recursive reduction of set member %s" % (i+1)
# TODO: this isn't quite right but I can't see what else to do
# right now
result = derived(result, result, desc=reason,
subexpression=new_arg_i, allow_trivial=True)
accum.append(new_arg_i)
else:
accum.append(new_arg_i)
if dirty:
new_result = ListedSet(accum)
new_result = derived(new_result, result,
desc="Construction of set from reduced set members")
result = new_result
return result
def __repr__(self):
return repr(set(self.args))
def latex_str(self, **kwargs):
inner = ", ".join([a.latex_str(**kwargs) for a in self.args])
return ensuremath("\\{" + inner + "\\}")
def try_adjust_type_local(self, unified_type, derivation_reason, assignment,
env):
inner_type = unified_type.content_type
content = [a.try_adjust_type(inner_type,
derivation_reason=derivation_reason,
assignment=assignment) for a in self.args]
result = self.copy_local(*content)
return result
@classmethod
def random(self, ctrl, max_type_depth=1, max_members=6, allow_empty=True):
typ = get_type_system().random_type(max_type_depth, 0.5)
if allow_empty:
r = range(max_members+1)
else:
r = range(max_members+1)[1:]
length = random.choice(r)
members = [ctrl(typ=typ) for i in range(length)]
return ListedSet(members)
class ForallUnary(BindingOp):
"""Universal unary quantifier"""
canonical_name = "Forall"
op_name_uni = "∀"
op_name_latex = "\\forall{}"
def __init__(self, var_or_vtype, body, varname=None, assignment=None,
type_check=True):
super().__init__(var_or_vtype, types.type_t, body, varname=varname,
assignment=assignment, type_check=type_check)
def copy(self):
return ForallUnary(self.vartype, self.body, self.varname)
def copy_local(self, var, arg, type_check=True):
return ForallUnary(var, arg, type_check=type_check)
def simplify(self):
# note: not valid if the domain of individuals is completely empty
# (would return True)
if not self.varname in self.body.free_variables():
return self.body
return self
BindingOp.add_op(ForallUnary)
class ExistsUnary(BindingOp):
"""Existential unary quantifier"""
canonical_name = "Exists"
op_name_uni="∃"
op_name_latex="\\exists{}"
def __init__(self, var_or_vtype, body, varname=None, assignment=None,
type_check=True):
super().__init__(var_or_vtype, types.type_t, body, varname=varname,
assignment=assignment, type_check=type_check)
def copy(self):
return ExistsUnary(self.vartype, self.body, self.varname)
def copy_local(self, var, arg, type_check=True):
return ExistsUnary(var, arg, type_check=type_check)
def simplify(self):
# note: not valid if the domain of individuals is completely empty
# (would return False)
if not self.varname in self.body.free_variables():
return self.body
return self
BindingOp.add_op(ExistsUnary)
class ExistsExact(BindingOp):
"""Existential unary quantifier"""
canonical_name = "ExistsExact"
op_name_uni="∃!"
op_name_latex="\\exists{}!"
def __init__(self, var_or_vtype, body, varname=None, assignment=None,
type_check=True):
super().__init__(var_or_vtype, types.type_t, body, varname=varname,
assignment=assignment, type_check=type_check)
def copy(self):
return ExistsExact(self.vartype, self.body, self.varname)
def copy_local(self, var, arg, type_check=True):
return ExistsExact(var, arg, type_check=type_check)
BindingOp.add_op(ExistsExact)
class IotaUnary(BindingOp):
"""Iota operator. This is best construed as Russellian."""
canonical_name = "Iota"
op_name_uni = "ι"
op_name_latex="\\iota{}"
secondary_names = {"ι"}
def __init__(self, var_or_vtype, body, varname=None, assignment=None,
type_check=True):
super().__init__(var_or_vtype=var_or_vtype, typ=None, body=body,
varname=varname, body_type=types.type_t, assignment=assignment,
type_check=type_check)
self.type = self.vartype
def copy(self):
return IotaUnary(self.vartype, self.body, self.varname)
def copy_local(self, var, arg, type_check=True):
return IotaUnary(var, arg, type_check=type_check)
def to_test(self, x):
"""Return a LFun based on the condition used to describe the set."""
return LFun(self.vartype, self.body, self.varname).apply(x)
def try_adjust_type_local(self, unified_type, derivation_reason, assignment,
env):
sub_var = TypedTerm(self.varname, unified_type)
# TODO: does this need to pass in assignment?
new_condition = self.to_test(sub_var)
result = self.copy_local(sub_var, new_condition)
return result
class IotaPartial(IotaUnary):
canonical_name = "IotaPartial"
op_name_uni = "ι"
op_name_latex="\\iota{}"
secondary_names = {}
def __init__(self, var_or_vtype, body, varname=None, assignment=None,
type_check=True):
super().__init__(var_or_vtype, body, varname, assignment, type_check)
def copy(self):
return IotaPartial(self.vartype, self.body, self.varname)
def copy_local(self, var, arg, type_check=True):
return IotaPartial(var, arg, type_check=type_check)
def calculate_partiality(self, vars=None):
new_body = self.body.calculate_partiality(vars=vars)
# defer any further calculation if there are bound variables in the body
if vars is not None:
if vars | new_body.free_variables():
return derived(self.copy_local(self.var_instance, new_body),
self, "Partiality simplification")
if isinstance(new_body, Partial):
new_body = new_body.body & new_body.condition
new_condition = new_body.copy()
new_body = IotaUnary(self.var_instance, new_body)
if self.varname in new_condition.free_variables():
new_condition = ExistsExact(self.var_instance, new_condition)
return derived(Partial(new_body, new_condition), self,
"Partiality simplification")
BindingOp.add_op(IotaUnary)
BindingOp.add_op(IotaPartial)
class LFun(BindingOp):
"""A typed function. Can itself be used as an operator in a TypedExpr.
"""
canonical_name = "Lambda"
secondary_names = {"L", "λ", "lambda"}
op_name_uni="λ"
op_name_latex="\\lambda{}"
def __init__(self, var_or_vtype, body, varname=None, let=False,
assignment=None, type_check=True):
# Use placeholder typ argument of None. This is because the input type
# won't be known until the var_or_vtype argument is parsed, which is
# done in the superclass constructor.
# sort of a hack, this could potentially cause odd side effects if
# BindingOp.__init__ is changed without taking this into account.
super().__init__(var_or_vtype=var_or_vtype, typ=None, body=body,
varname=varname, body_type=body.type, assignment=assignment,
type_check=type_check)
self.type = FunType(self.vartype, body.type)
self.let = let
@property
def argtype(self):
return self.type.left
@property
def returntype(self):
return self.type.right
def functional(self):
return True # no need to do any calculations
def copy(self):
r = LFun(self.argtype, self.body, self.varname, type_check=False)
r.let = self.let
return r
def copy_local(self, var, arg, type_check=True):
r = LFun(var, arg, type_check=type_check)
r.let = self.let
return r
def try_adjust_type_local(self, unified_type, derivation_reason, assignment,
env):
vacuous = False
# env will not start with bound variable in it
env.add_var_mapping(self.varname, self.argtype)
# update mapping with new type
left_principal = env.try_add_var_mapping(self.varname,
unified_type.left)
if left_principal is None:
return None
new_body = self.body
if self.argtype != left_principal:
# arg type needs to be adjusted.
new_var = TypedTerm(self.varname, left_principal)
else:
new_var = self.var_instance
if self.type.right != unified_type.right:
new_body = new_body.try_adjust_type(unified_type.right,
derivation_reason=derivation_reason,
assignment=assignment)
new_fun = self.copy_local(new_var, new_body)
env.merge(new_body.get_type_env())
if self.varname in env.var_mapping:
del env.var_mapping[self.varname]
new_fun = new_fun.under_type_assignment(env.type_mapping)
return new_fun
def apply(self,arg):
"""Apply an argument directly to the function.
`__call__` plus `reduce` is (almost) equivalent to `apply`, but using
`apply` directly will not generate a derivations."""
# do I really want flexible equality here??
# TODO: return to this. Right now a type mismatch still gets raised
# during beta reduction.
ts = get_type_system()
if ts.eq_check(self.argtype, arg.type):
# first check for potential variable name collisions when
# substituting, and the substitute
#TODO: do | |
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_with_compatibility
def test_ctr_2ps_2tr_hasync_2thread_Fslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Fslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Fslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_hasync_2thread_Tslice_Tdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Tslice_Tdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Tslice_Tdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_hasync_2thread_Fslice_Tdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Fslice_Tdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Fslice_Tdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_hasync_2thread_Tslice_Fdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Tslice_Fdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Tslice_Fdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_hasync_2thread_Fslice_Fdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Fslice_Fdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Fslice_Fdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_hasync_2thread_Tslice_Tdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Tslice_Tdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Tslice_Tdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_hasync_2thread_Fslice_Tdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Fslice_Tdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Fslice_Tdc_Tsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_hasync_2thread_Tslice_Fdc_Fsm_Tsr_Fgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Tslice_Fdc_Fsm_Tsr_Fgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Tslice_Fdc_Fsm_Tsr_Fgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_hasync_2thread_Fslice_Fdc_Fsm_Tsr_Fgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Fslice_Fdc_Fsm_Tsr_Fgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Fslice_Fdc_Fsm_Tsr_Fgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_hasync_2thread_Tslice_Tdc_Fsm_Tsr_Fgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Tslice_Tdc_Fsm_Tsr_Fgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Tslice_Tdc_Fsm_Tsr_Fgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_hasync_2thread_Fslice_Tdc_Fsm_Tsr_Fgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Fslice_Tdc_Fsm_Tsr_Fgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Fslice_Tdc_Fsm_Tsr_Fgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_hasync_2thread_Tslice_Fdc_Tsm_Tsr_Fgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Tslice_Fdc_Tsm_Tsr_Fgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Tslice_Fdc_Tsm_Tsr_Fgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_hasync_2thread_Fslice_Fdc_Tsm_Tsr_Fgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Fslice_Fdc_Tsm_Tsr_Fgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Fslice_Fdc_Tsm_Tsr_Fgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
def test_ctr_2ps_2tr_hasync_2thread_Tslice_Tdc_Tsm_Tsr_Fgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Tslice_Tdc_Tsm_Tsr_Fgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Tslice_Tdc_Tsm_Tsr_Fgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
@run_by_freq(freq="MONTH")
def test_ctr_2ps_2tr_hasync_2thread_Fslice_Tdc_Tsm_Tsr_Fgeo_Twp_Tha_pn25(
self):
"""test_ctr_2ps_2tr_hasync_2thread_Fslice_Tdc_Tsm_Tsr_Fgeo_Twp_Tha_pn25."""
TestFleetBase.__init__(self, pservers=2, trainers=2)
self.run_params = {
'sync_mode': 'half_async',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': True,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': True,
'push_nums': 25
}
self.test_ctr_2ps_2tr_hasync_2thread_Fslice_Tdc_Tsm_Tsr_Fgeo_Twp_Tha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=3e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[1], delta=3e-0, expect=self.single_cpu_data)
"""sync"""
def test_ctr_1ps_1tr_sync_1thread_Tslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_1ps_1tr_sync_1thread_Tslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=1, trainers=1)
self.run_params = {
'sync_mode': 'sync',
'cpu_num': 1,
'num_threads': 1,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_1ps_1tr_sync_1thread_Tslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 1
assert len(train_data_list2) == 1
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=1e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[0], delta=1e-0, expect=self.single_cpu_data)
@run_with_compatibility
def test_ctr_1ps_1tr_sync_1thread_Fslice_Fdc_Fsm_Fsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_1ps_1tr_sync_1thread_Fslice_Fdc_Fsm_Fsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=1, trainers=1)
self.run_params = {
'sync_mode': 'sync',
'cpu_num': 1,
'num_threads': 1,
'slice_var_up': False,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': False,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_1ps_1tr_sync_1thread_Fslice_Fdc_Fsm_Fsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 1
assert len(train_data_list2) == 1
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=1e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[0], delta=1e-0, expect=self.single_cpu_data)
def test_ctr_1ps_2tr_sync_2thread_Tslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_1ps_2tr_sync_2thread_Tslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=1, trainers=2)
self.run_params = {
'sync_mode': 'sync',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': True,
'enable_dc_asgd': False,
'split_method': False,
'runtime_split_send_recv': True,
'geo_sgd': False,
'wait_port': True,
'use_hierarchical_allreduce': False,
'push_nums': 25
}
self.test_ctr_1ps_2tr_sync_2thread_Tslice_Fdc_Fsm_Tsr_Fgeo_Twp_Fha_pn25.__func__.__doc__ = json.dumps(
self.run_params)
train_data_list1 = self.get_result(
self._model_file, update_method='pserver')
train_data_list2 = self.get_result(
self._model_file, update_method='pserver')
# 判断两个list输出是否为2
assert len(train_data_list1) == 2
assert len(train_data_list2) == 2
# 两个train的loss值存在微小差距
self.check_data(
train_data_list1[0], delta=1e-0, expect=train_data_list2[0])
# loss值与预期相符
self.check_data(
train_data_list1[0], delta=1e-0, expect=self.single_cpu_data)
@run_with_compatibility
def test_ctr_1ps_2tr_sync_2thread_Fslice_Fdc_Fsm_Fsr_Fgeo_Twp_Fha_pn25(
self):
"""test_ctr_1ps_2tr_sync_2thread_Fslice_Fdc_Fsm_Fsr_Fgeo_Twp_Fha_pn25."""
TestFleetBase.__init__(self, pservers=1, trainers=2)
self.run_params = {
'sync_mode': 'sync',
'cpu_num': 2,
'num_threads': 2,
'slice_var_up': False,
'enable_dc_asgd': | |
import atexit
import cPickle as pickle
import errno
import fnmatch
import io
import os
import os.path
import pygame
import stat
import threading
import time
import sys
import pytweening
from pygame.locals import *
from subprocess import call
# UI classes ---------------------------------------------------------------
# Icon is a very simple bitmap class, just associates a name and a pygame
# image (PNG loaded from icons directory) for each.
# There isn't a globally-declared fixed list of Icons. Instead, the list
# is populated at runtime from the contents of the 'icons' directory.
class Icon:
def __init__(self, name):
self.name = name
self.originalbitmap = pygame.image.load(iconPath + '/' + name + '.png').convert(24)
#self.bitmap = pygame.transform.smoothscale(self.originalbitmap, (self.originalbitmap.get_width(),self.originalbitmap.get_height()))
self.bitmap = self.originalbitmap.convert(16)
# Button is a simple tappable screen region. Each has:
# - bounding rect ((X,Y,W,H) in pixels)
# - optional background color and/or Icon (or None), always centered
# - optional foreground Icon, always centered
# - optional single callback function
# - optional single value passed to callback
# Occasionally Buttons are used as a convenience for positioning Icons
# but the taps are ignored. Stacking order is important; when Buttons
# overlap, lowest/first Button in list takes precedence when processing
# input, and highest/last Button is drawn atop prior Button(s). This is
# used, for example, to center an Icon by creating a passive Button the
# width of the full screen, but with other buttons left or right that
# may take input precedence (e.g. the Effect labels & buttons).
# After Icons are loaded at runtime, a pass is made through the global
# buttons[] list to assign the Icon objects (from names) to each Button.
class Button:
def __init__(self, **kwargs):
self.key = None # the key
self.color = None # Background fill color, if any
self.iconBg = None # Background Icon (atop color fill)
self.staticBg = None #
self.animating= False
self.iconFg = None # Foreground Icon (atop background)
self.bg = None # Background Icon name
self.fg = None # Foreground Icon name
self.callback = None # Callback function
self.value = None # Value passed to callback
self.w = None
self.h = None
self.shift = None
self.shiftimg = None
for key, value in kwargs.iteritems():
if key == 'color': self.color = value
elif key == 'bg' : self.bg = value
elif key == 'fg' : self.fg = value
elif key == 'cb' : self.callback = value
elif key == 'value': self.value = value
elif key == 'key' : self.key = value
elif key == 'shift': self.shift = value
def selected(self, pos):
x1 = self.rect[0]
y1 = self.rect[1]
x2 = x1 + self.rect[2] - 1
y2 = y1 + self.rect[3] - 1
if ((pos[0] >= x1) and (pos[0] <= x2) and
(pos[1] >= y1) and (pos[1] <= y2)):
if self.callback:
if self.value is None: self.callback()
else: self.callback(self.value)
return True
return False
def draw(self, screen):
if self.shiftimg is None and self.shift is not None:
self.shiftimg = pygame.image.load(iconPath + '/' + self.shift + '.png').convert(16)
self.shiftimg = pygame.transform.scale(self.shiftimg, (self.w,self.h))
if self.color:
screen.fill(self.color, self.rect)
if self.iconBg:
if shift and self.shift is not None:
img = self.shiftimg
else:
if self.staticBg is None:
self.staticBg = pygame.transform.smoothscale(self.iconBg.bitmap.convert(24), (self.w,self.h)).convert(16)
if self.animating:
img = pygame.transform.scale(self.iconBg.bitmap, (self.w,self.h))
else:
img = self.staticBg
#img = self.iconBg.bitmap
#img.set_alpha(255)
screen.blit(img,(self.rect[0],self.rect[1]))
if self.iconFg:
img = pygame.transform.scale(self.iconFg.bitmap, (self.w,self.h))
#img.set_alpha(255)
screen.blit(img,
(self.rect[0],
self.rect[1]))
def setBg(self, name):
if name is None:
self.iconBg = None
else:
for i in icons:
if name == i.name:
self.iconBg = i
break
# UI callbacks -------------------------------------------------------------
# These are defined before globals because they're referenced by items in
# the global buttons[] list.
# Global stuff -------------------------------------------------------------
screenMode = 9 # Current screen mode; default = viewfinder
screenModePrior = -1 # Prior screen mode (for detecting changes)
settingMode = 4 # Last-used settings mode (default = storage)
storeMode = 0 # Storage mode; default = Photos folder
storeModePrior = -1 # Prior storage mode (for detecting changes)
sizeMode = 0 # Image size; default = Large
fxMode = 0 # Image effect; default = Normal
isoMode = 0 # ISO settingl default = Auto
iconPath = 'icons' # Subdirectory containing UI bitmaps (PNG format)
saveIdx = -1 # Image index for saving (-1 = none set yet)
loadIdx = -1 # Image index for loading
scaled = None # pygame Surface w/last-loaded image
shift = False
icons = [] # This list gets populated at startup
# buttons[] is a list of lists; each top-level list element corresponds
# to one screen mode (e.g. viewfinder, image playback, storage settings),
# and each element within those lists corresponds to one UI button.
# There's a little bit of repetition (e.g. prev/next buttons are
# declared for each settings screen, rather than a single reusable
# set); trying to reuse those few elements just made for an ugly
# tangle of code elsewhere.
buttonsold = [
#row 1
[Button( bg='adobe_Ai'),
Button( bg='adobe_Dw'),
Button( bg='adobe_Fl'),
Button( bg='adobe_Fw'),
Button( bg='adobe_Id'),
Button( bg='adobe_Ps'),
Button( bg='axialis'),
Button( bg='adobe_Ai'),
Button( bg='adobe_Dw'),
Button( bg='adobe_Fl'),
Button( bg='adobe_Fw'),
Button( bg='adobe_Id'),
Button( bg='adobe_Ps'),
Button( bg='axialis')
],
#row 2
[Button( bg='chrome'),
Button( bg='dropbox', key=pygame.K_1),
Button( bg='email', key=pygame.K_2),
Button( bg='explorer', key=pygame.K_3),
Button( bg='firefox', key=pygame.K_4),
Button( bg='flashget', key=pygame.K_5),
Button( bg='foobar', key=pygame.K_6),
Button( bg='chrome', key=pygame.K_7),
Button( bg='dropbox', key=pygame.K_8),
Button( bg='email', key=pygame.K_9),
Button( bg='explorer', key=pygame.K_0),
],
#row 3
[Button( bg='games'),
Button( bg='googleEarth', key=pygame.K_q),
Button( bg='handbrake', key=pygame.K_w),
Button( bg='mediaPlayer', key=pygame.K_e),
Button( bg='notepad', key=pygame.K_r),
Button( bg='opera', key=pygame.K_t),
Button( bg='safari', key=pygame.K_y),
Button( bg='games', key=pygame.K_u),
Button( bg='googleEarth', key=pygame.K_i),
Button( bg='handbrake', key=pygame.K_o),
Button( bg='mediaPlayer', key=pygame.K_p),
Button( bg='notepad'),
],
#row 4
[Button( bg='sonyericsson'),
Button( bg='totalCommander', key=pygame.K_a),
Button( bg='uTorrent', key=pygame.K_s),
Button( bg='vlcPlayer', key=pygame.K_d),
Button( bg='webcam', key=pygame.K_f),
Button( bg='xbmc', key=pygame.K_g),
Button( bg='safari', key=pygame.K_h),
Button( bg='sonyericsson', key=pygame.K_j),
Button( bg='totalCommander', key=pygame.K_k),
Button( bg='uTorrent', key=pygame.K_l),
Button( bg='vlcPlayer'),
Button( bg='webcam',),
],
#row 5
[Button( bg='adobe_Ai'),
Button( bg='adobe_Dw', key=pygame.K_z),
Button( bg='adobe_Fl', key=pygame.K_x),
Button( bg='adobe_Fw', key=pygame.K_c),
Button( bg='adobe_Id', key=pygame.K_v),
Button( bg='adobe_Ps', key=pygame.K_b),
Button( bg='axialis', key=pygame.K_n),
Button( bg='adobe_Ai', key=pygame.K_m),
Button( bg='adobe_Dw'),
Button( bg='adobe_Fl'),
Button( bg='adobe_Fw'),
Button( bg='adobe_Id'),
],
#row 6
[Button( bg='chrome'),
Button( bg='dropbox'),
Button( bg='email'),
Button( bg='explorer'),
Button( bg='firefox'),
Button( bg='flashget'),
Button( bg='foobar'),
Button( bg='chrome'),
]
]
buttons = [
#row 1
[Button( bg='escape'),
Button( bg='f1'),
Button( bg='f2'),
Button( bg='f3'),
Button( bg='f4'),
Button( bg='f5'),
Button( bg='f6'),
Button( bg='f7'),
Button( bg='f8'),
Button( bg='f9'),
Button( bg='f10'),
Button( bg='f11'),
Button( bg='f12'),
Button( bg='printscreen')
],
#row 2
[
Button( bg='~'),
Button( bg='1', key=pygame.K_1),
Button( bg='2', key=pygame.K_2),
Button( bg='3', key=pygame.K_3),
Button( bg='4', key=pygame.K_4),
Button( bg='5', key=pygame.K_5),
Button( bg='6', key=pygame.K_6),
Button( bg='7', key=pygame.K_7),
Button( bg='8', key=pygame.K_8),
Button( bg='9', key=pygame.K_9),
Button( bg='0', key=pygame.K_0),
Button( bg='-'),
Button( bg='+'),
Button( bg='oemclear'),
],
#row 3
[Button( bg='tab'),
Button( bg='q', shift = 'qu', key=pygame.K_q),
Button( bg='w', shift = 'wu', key=pygame.K_w),
Button( bg='e', shift = 'eu', key=pygame.K_e),
Button( bg='r', shift = 'ru', key=pygame.K_r),
Button( bg='t', shift = 'tu', key=pygame.K_t),
Button( bg='y', shift = 'yu', key=pygame.K_y),
Button( bg='u', shift = 'uu', key=pygame.K_u),
Button( bg='i', shift = 'iu', key=pygame.K_i),
Button( bg='o', shift = 'ou', key=pygame.K_o),
Button( bg='p', shift = 'pu', key=pygame.K_p),
Button( bg='['),
Button( bg=']'),
Button( bg='return'),
],
#row 4
[Button( bg='capital'),
Button( bg='a', shift = 'au', key=pygame.K_a),
Button( bg='s', shift = 'su', key=pygame.K_s),
Button( bg='d', shift = 'du', key=pygame.K_d),
Button( bg='f', shift = 'fu', key=pygame.K_f),
Button( bg='g', shift = 'gu', key=pygame.K_g),
Button( bg='h', shift = 'hu', key=pygame.K_h),
Button( bg='j', shift = 'ju', key=pygame.K_j),
Button( bg='k', shift = 'ku', key=pygame.K_k),
Button( bg='l', shift = 'lu', key=pygame.K_l),
Button( bg=';'),
Button( bg='#'),
Button( bg='#'),
Button( bg='#'),
],
#row 5
[Button( bg='lshiftkey'),
Button( bg='z', shift = 'zu', key=pygame.K_z),
Button( bg='x', shift = 'xu', key=pygame.K_x),
Button( bg='c', shift = 'cu', key=pygame.K_c),
Button( bg='v', shift = 'vu', key=pygame.K_v),
Button( bg='b', shift = 'bu', key=pygame.K_b),
Button( bg='n', shift = 'nu', key=pygame.K_n),
Button( bg='m', shift = 'mu', key=pygame.K_m),
Button( bg=','),
Button( bg='rshiftkey'),
Button( bg='up'),
Button( bg='forwardslash'),
],
#row 6
[Button( bg='lcontrolkey'),
Button( bg='lwin'),
Button( bg='alt'),
Button( bg='space'),
Button( bg='alt'),
Button( bg='left'),
Button( bg='down'),
Button( bg='right'),
]
]
# Scan files in a directory, locating JPEGs with names matching the
# software's convention (IMG_XXXX.JPG), returning a tuple with the
# lowest and highest indices (or None if no matching files).
def imgRange(path):
min = 9999
max = 0
try:
for file in os.listdir(path):
if fnmatch.fnmatch(file, 'IMG_[0-9][0-9][0-9][0-9].JPG'):
i = int(file[4:8])
if(i < min): min = i
if(i > max): max = i
finally:
return None if | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 12:12:41 2021
@author: sjguo
"""
import pennylane as qml
import autograd.numpy as np
from time import time
import matplotlib.pyplot as plt
dev = qml.device('default.qubit', wires=2) # Define REAL noisy two-qubit device
sim = qml.device('default.qubit', wires=2) # Define prefect two-qubit simulation
def noisy_CNot(noise):
'''
Noised CNOT gate (aka source gate), Assume noise is unknown.
noise.shape = (4,3) range = [-noise level/2, noise level/2]
'''
qml.Rot(*noise[0],wires=0)
qml.Rot(*noise[1],wires=1)
qml.CNOT(wires=[0,1])
qml.Rot(*noise[2],wires=0)
qml.Rot(*noise[3],wires=1)
def random_state_ansatz(state_params, noise=None, improve=None): #TODO: For real quantum device, may need to use: arXiv:1608.00263
'''
Ansatz that generate random state with pre-randomized state_params.
state_params.shape = (8) range = [-np.pi, np.pi]
noise = noise in noisy_CNot function or False. If False, apply true CNOT.
improve = params in improving_CNot function or False.
'''
qml.Rot(*state_params[0], wires=0)
qml.Rot(*state_params[1], wires=1)
if noise is None:
qml.CNOT(wires=[0,1])
elif improve is None:
noisy_CNot(noise)
else:
improving_CNot(improve, noise)
qml.Rot(*state_params[2], wires=0)
qml.Rot(*state_params[3], wires=1)
def improving_CNot(params, noise):
'''
Ansatz that try to improve noisy_CNot with varibles params.
params.shape = (2*d+2, 3) range = [-np.pi, np.pi]
noise = noise in noisy_CNot function
'''
d = (params.shape[0]-2)//2
qml.Rot(*params[0],wires=0)
qml.Rot(*params[1],wires=1)
for i in range(d):
noisy_CNot(noise)
qml.Rot(*params[2*i+2],wires=0)
qml.Rot(*params[2*i+3],wires=1)
def bias_test(noise=None):
'''
Test if the distribution is biased for random state ansatz.
'''
sample_size = 10000
state_params = np.random.uniform(low=-np.pi, high=np.pi, size=(sample_size, 4, 3))
if noise is None:
@qml.qnode(sim)
def random_state(state_params, noise):
random_state_ansatz(state_params)
return qml.state()
else:
@qml.qnode(dev)
def random_state(state_params, noise):
random_state_ansatz(state_params, noise=noise)
return qml.state()
state_amp = []
state_phase = []
for p in state_params:
state = np.array(random_state(p, noise))
state_amp.append(np.abs(state)**2)
state_phase.append(np.angle(state))
state_amp = np.array(state_amp)
state_phase = np.array(state_phase)
# print(state_amp.mean(axis=0))
# print(state_phase.mean(axis=0))
fig, axs = plt.subplots(2, 4,figsize=(20,6))
for i in range(4):
axs[0,i].hist(state_amp[:,i], bins=20, range=(0,1))
axs[1,i].hist(state_phase[:,i], bins=20, range=(-np.pi,np.pi))
axs[0,i].set_ylim([0,sample_size/3])
axs[1,i].set_ylim([0,sample_size/15])
def GramSchmidt(A):
"""
Gives a orthonormal matrix, using modified Gram Schmidt Procedure
:param A: a matrix of column vectors
:return: a matrix of orthonormal column vectors
"""
# assuming A is a square matrix
dim = A.shape[0]
Q = np.zeros(A.shape, dtype=A.dtype)
for j in range(0, dim):
q = A[:,j]
for i in range(0, j):
rij = np.vdot(Q[:,i], q)
q = q - rij*Q[:,i]
rjj = np.linalg.norm(q, ord=2)
if np.isclose(rjj,0.0):
raise ValueError("invalid input matrix")
else:
Q[:,j] = q/rjj
return Q
def find_unitary(target_state):
"""
Find a unitary that tranform the |00> state to target_state
"""
A = np.identity(4, dtype=complex)
a = np.argmax(target_state)
A[:, a] = target_state
A[:, [a, 0]] = A[:, [0, a]]
return GramSchmidt(A)
def get_agi(params, noise, state_prep=True, alpha=0.0): #TODO: For real quantum device, may use: arXiv:1104.4695
'''
Calculate the Average Gate Infidelity, with measurement on prepared state.
params, noise = params, noise in improving_CNot function
If params = None, Calculate AGI of noisy_CNot
state_prep: If true, do both random state preparation with real CNOT gates.
If False, do experimental state preparation with noisy_CNot.
If parmas-like: do experimental state preparation with improving_CNot.
alpha: Parameter related to number of shots measuring the prepared state.
Range in [0, 1), higher alpha requires more shots. alpha = 1 means infinite shots.
Defined as: simulated_state = sqrt(1-alpha) * expected state + sqrt(alpha) * prepared_state
If alpha = 0.0: Execute original get_agi described in VQGO paper.
'''
# CNOT True:
if alpha == 0: #For Original VQGO
@qml.qnode(sim)
def CNOT_true(state_params):
random_state_ansatz(state_params)
qml.CNOT(wires=[0,1])
return qml.state()
else: #For Improved VQGO
@qml.qnode(dev)
def state_prepared(state_params):
# Ramdom state preparation
if type(state_prep) == bool:
if state_prep == True:
random_state_ansatz(state_params)
else:
random_state_ansatz(state_params, noise=noise)
else:
random_state_ansatz(state_params, noise=noise, improve=state_prep)
# Measurements
return qml.state()
@qml.qnode(sim)
def state_expected(state_params):
random_state_ansatz(state_params)
# Measurements
return qml.state()
@qml.qnode(sim)
def CNOT_true_updated(U):
qml.QubitUnitary(U,wires=[0,1])
qml.CNOT(wires=[0,1])
return qml.state()
# CNot Test:
@qml.qnode(dev)
def CNot_test(state_params):
# Ramdom state preparation
if type(state_prep) == bool:
if state_prep == True:
random_state_ansatz(state_params)
else:
random_state_ansatz(state_params, noise=noise)
else:
random_state_ansatz(state_params, noise=noise, improve=state_prep)
# Apply testing CNot
if params is None:
noisy_CNot(noise)
else:
improving_CNot(params, noise)
return qml.state()
sample_size = 100
state_params = np.random.uniform(low=-np.pi, high=np.pi, size=(sample_size, 4, 3))
fidelities = []
if alpha != 0:
a1 = np.sqrt(1-alpha)
a2 = np.sqrt(alpha)
for p in state_params:
if alpha == 0:
state_true = CNOT_true(p)
else:
u = find_unitary(a1*state_expected(p) + a2*state_prepared(p))
state_true = CNOT_true_updated(u)
state_test = CNot_test(p)
fidelities.append(np.abs(np.dot(state_true.conj(),state_test))**2)
return 1 - np.mean(np.array(fidelities))
def vqgo(prep_params, noise, previous_params=None, get_history=False, alpha=0.0, start_time=None, depth=2):
'''
VQGO algorithm from arXiv:1810.12745. With state preparation params for QSI.
'''
def cost_fn(params):
return get_agi(params, noise, state_prep=prep_params, alpha=alpha)
if previous_params is None:
params = np.random.uniform(low=-np.pi, high=np.pi, size=(2*depth+2, 3))
else:
params = previous_params
max_iterations = 250
opt = qml.AdamOptimizer(stepsize=0.3)
agi_list = []
params_list = []
if get_history:
agi_true_list = []
for n in range(max_iterations+1):
params = opt.step(cost_fn, params)
params_list.append(params)
agi_list.append(cost_fn(params))
if get_history:
agi_true_list.append(get_agi(params,noise))
if n % 10 == 0:
if start_time is None:
print("VQGO: Iteration = {:}, cost_AGI = {:.8f} ".format(n, agi_list[-1]))
else:
print("VQGO: Iteration = {:}, cost_AGI = {:.8f}, Time = {:.0f} ".format(n, agi_list[-1], time()-t))
if get_history:
return params_list[np.argmin(agi_list)], np.array(agi_true_list)
else:
return params_list[np.argmin(agi_list)]
def plot_result(vqgo_agi, qsi_agi, qsi_agi_exp=None, title=''):
if len(vqgo_agi.shape) == 1: # 1 iteration of VQGO
plt.plot(vqgo_agi, color='orange', label='VQGO')
plt.plot(0, qsi_agi[0],'*', markersize=10, color='red', label='Intitial')
plt.plot(len(vqgo_agi), qsi_agi[1], '*', markersize=10, color='green', label='Optimized')
if qsi_agi_exp is not None:
plt.plot(0, qsi_agi_exp[0],'*', markersize=10, color='orangered', label='Intitial (Measured)')
plt.plot(len(vqgo_agi), qsi_agi_exp[1], '*', markersize=10, color='springgreen', label='Optimized (Measured)')
plt.xlabel('VQGO iteration')
elif len(vqgo_agi.shape) == 2: # multiple iterations of VQGO
x_vqgo = np.arange(0,len(qsi_agi)-1, 1/len(vqgo_agi[0]))
plt.plot(x_vqgo, vqgo_agi.flatten(), alpha=0.5, color='orange', label='VQGO')
plt.plot(0, qsi_agi[0],'*', markersize=10, color='red', label='Intitial', zorder=10)
plt.plot(qsi_agi, '*-', markersize=10, color='green', label='QSI')
if qsi_agi_exp is not None:
plt.plot(0, qsi_agi_exp[0],'*', markersize=10, color='red', alpha=0.5, label='Intitial (Measured)', zorder=11)
plt.plot(np.arange(0,2), qsi_agi_exp[:2], '-', markersize=10, color='green', alpha=0.5)
plt.plot(np.arange(1,len(qsi_agi_exp)), qsi_agi_exp[1:], '*-', markersize=10, color='green', alpha=0.5, label='QSI (Measured)')
plt.xlabel('QSI iteration')
plt.ylabel('AGI')
plt.yscale('log')
plt.legend()
#%% Test random state preparation
if __name__ == "__main__":
noise = np.random.uniform(low=-np.pi/10, high=np.pi/10, size=(4,3))
bias_test(noise)
bias_test(False)
print(get_agi(noise))
#%% VQGO (Assume perfect state preparation)
if __name__ == "__main__":
t = time()
noise = np.random.normal(loc=0.0, scale=0.15, size=(4,3))
agi_noise = get_agi(None, noise)
print("Initial AGI = {:.8f} ".format(agi_noise))
params, vqgo_agi = vqgo(True, noise, get_history=True, start_time=t)
agi_improved = get_agi(params, noise)
print("Optimized AGI = {:.8f} ".format(agi_improved))
plot_result(vqgo_agi, (agi_noise, agi_improved))
#%% Improved VQGO with Noisy preparation (One iteration of QSI)
if __name__ == "__main__":
t = time()
noise = np.random.normal(loc=0.0, scale=0.01, size=(4,3))
agi_noise = get_agi(None, noise)
print("Initial AGI = {:.8f} ".format(agi_noise))
params, vqgo_agi = vqgo(False, noise, get_history=True, alpha=0.1, start_time=t, depth=5)
agi_improved = get_agi(params, noise)
print("Optimized AGI = {:.8f} ".format(agi_improved))
plot_result(vqgo_agi, (agi_noise, agi_improved))
#%% QSI (Testing)
if __name__ == "__main__":
t = time()
noise = np.random.normal(loc=0.0, scale=0.15, size=(4,3))
iteration = 3
alpha = 0.1
agi = np.zeros((iteration+1)) # True AGI
agi_exp = np.ones((iteration+1)) # Experimental AGI
vqgo_agi_list = []
#AGI of Noisy_CNot
agi[0] = get_agi(None, noise)
agi_exp[0] = get_agi(None, noise, state_prep=False)
print("QSI: Iteration = {:}, AGI = {:.8f}, Exp_AGI = {:.8f}, Time = {:.0f} ".format(0, agi[0], agi_exp[0], time()-t))
# First Iteration: Use noisy_CNot to prepare
prep_params, vqgo_agi = vqgo(False, noise, get_history=True, alpha=alpha, start_time=t)
agi[1] = get_agi(prep_params, noise)
agi_exp[1] = get_agi(prep_params, noise, state_prep=prep_params)
vqgo_agi_list.append(vqgo_agi)
print("QSI: Iteration = {:}, True_AGI = {:.8f}, Exp_AGI = {:.8f}, Time = {:.0f} ".format(1, agi[1], agi_exp[1], time()-t))
# Second Iteration and on: Use improved_CNot to prepare
for i in range(iteration-1):
prep_params_temp, vqgo_agi = vqgo(prep_params, noise, previous_params=prep_params, get_history=True, alpha=alpha, start_time=t)
agi[i+2] = get_agi(prep_params, noise)
agi_exp[i+2] = get_agi(prep_params, noise, state_prep=prep_params)
if np.argmin(agi_exp) == i+2: #Update params only if getting better
prep_params = prep_params_temp
vqgo_agi_list.append(vqgo_agi)
print("QSI: Iteration = {:}, True_AGI = {:.8f}, Exp_AGI = {:.8f}, Time = {:.0f} ".format(i+2, agi[i+2], agi_exp[i+2], time()-t))
plot_result(np.array(vqgo_agi_list), agi, qsi_agi_exp=agi_exp)
#%% Integrated QSI (Compare with Improved VQGO) (Testing)
def qsi(noise, previous_params=None, get_history=False, alpha=0.0, start_time=None):
'''
VQGO algorithm from arXiv:1810.12745. With state preparation params for QSI.
'''
def find_cost_fn(prep_params):
def cost_fn(params):
return get_agi(params, noise, state_prep=prep_params, alpha=alpha)
return cost_fn
if previous_params is None:
params = np.random.uniform(low=-np.pi, high=np.pi, size=(6,3))
else:
params = previous_params
max_iterations = 150
opt = qml.AdamOptimizer(stepsize=0.3)
agi_list = []
agi_exp_list = np.ones(16)
agi_exp_list[0] = get_agi(None, noise, state_prep=False)
params_list = []
if get_history:
agi_true_list = []
cost_fn = find_cost_fn(False)
for n in | |
pipeline_manager.pipelines[0].publishers[1]
self.assertEqual(1, len(new_publisher.samples))
self.assertEqual('a_update',
getattr(new_publisher.samples[0], 'name'))
def test_multiple_counter_pipeline(self):
self._set_pipeline_cfg('counters', ['a', 'b'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter,
sample.Sample(
name='b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(2, len(publisher.samples))
self.assertEqual('a_update', getattr(publisher.samples[0], 'name'))
self.assertEqual('b_update', getattr(publisher.samples[1], 'name'))
def test_flush_pipeline_cache(self):
CACHE_SIZE = 10
extra_transformer_cfg = [
{
'name': 'cache',
'parameters': {
'size': CACHE_SIZE,
}
},
{
'name': 'update',
'parameters':
{
'append_name': '_new'
}
},
]
self._extend_pipeline_cfg('transformers', extra_transformer_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, self.test_counter)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(publisher.samples))
pipe.flush(None)
self.assertEqual(0, len(publisher.samples))
pipe.publish_data(None, self.test_counter)
pipe.flush(None)
self.assertEqual(0, len(publisher.samples))
for i in range(CACHE_SIZE - 2):
pipe.publish_data(None, self.test_counter)
pipe.flush(None)
self.assertEqual(CACHE_SIZE, len(publisher.samples))
self.assertEqual('a_update_new', getattr(publisher.samples[0], 'name'))
def test_flush_pipeline_cache_multiple_counter(self):
CACHE_SIZE = 3
extra_transformer_cfg = [
{
'name': 'cache',
'parameters': {
'size': CACHE_SIZE
}
},
{
'name': 'update',
'parameters':
{
'append_name': '_new'
}
},
]
self._extend_pipeline_cfg('transformers', extra_transformer_cfg)
self._set_pipeline_cfg('counters', ['a', 'b'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
with pipeline_manager.publisher(None) as p:
p([self.test_counter,
sample.Sample(
name='b',
type=self.test_counter.type,
volume=self.test_counter.volume,
unit=self.test_counter.unit,
user_id=self.test_counter.user_id,
project_id=self.test_counter.project_id,
resource_id=self.test_counter.resource_id,
timestamp=self.test_counter.timestamp,
resource_metadata=self.test_counter.resource_metadata,
)])
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(publisher.samples))
with pipeline_manager.publisher(None) as p:
p([self.test_counter])
self.assertEqual(CACHE_SIZE, len(publisher.samples))
self.assertEqual('a_update_new',
getattr(publisher.samples[0], 'name'))
self.assertEqual('b_update_new',
getattr(publisher.samples[1], 'name'))
def test_flush_pipeline_cache_before_publisher(self):
extra_transformer_cfg = [{
'name': 'cache',
'parameters': {}
}]
self._extend_pipeline_cfg('transformers', extra_transformer_cfg)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
publisher = pipe.publishers[0]
pipe.publish_data(None, self.test_counter)
self.assertEqual(0, len(publisher.samples))
pipe.flush(None)
self.assertEqual(1, len(publisher.samples))
self.assertEqual('a_update',
getattr(publisher.samples[0], 'name'))
def test_global_unit_conversion(self):
scale = 'volume / ((10**6) * 60)'
transformer_cfg = [
{
'name': 'unit_conversion',
'parameters': {
'source': {},
'target': {'name': 'cpu_mins',
'unit': 'min',
'scale': scale},
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['cpu'])
counters = [
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=1200000000,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={}
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(1, len(publisher.samples))
pipe.flush(None)
self.assertEqual(1, len(publisher.samples))
cpu_mins = publisher.samples[-1]
self.assertEqual('cpu_mins', getattr(cpu_mins, 'name'))
self.assertEqual('min', getattr(cpu_mins, 'unit'))
self.assertEqual(sample.TYPE_CUMULATIVE, getattr(cpu_mins, 'type'))
self.assertEqual(20, getattr(cpu_mins, 'volume'))
def test_unit_identified_source_unit_conversion(self):
transformer_cfg = [
{
'name': 'unit_conversion',
'parameters': {
'source': {'unit': '°C'},
'target': {'unit': '°F',
'scale': '(volume * 1.8) + 32'},
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['core_temperature',
'ambient_temperature'])
counters = [
sample.Sample(
name='core_temperature',
type=sample.TYPE_GAUGE,
volume=36.0,
unit='°C',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={}
),
sample.Sample(
name='ambient_temperature',
type=sample.TYPE_GAUGE,
volume=88.8,
unit='°F',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={}
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(2, len(publisher.samples))
core_temp = publisher.samples[0]
self.assertEqual('core_temperature', getattr(core_temp, 'name'))
self.assertEqual('°F', getattr(core_temp, 'unit'))
self.assertEqual(96.8, getattr(core_temp, 'volume'))
amb_temp = publisher.samples[1]
self.assertEqual('ambient_temperature', getattr(amb_temp, 'name'))
self.assertEqual('°F', getattr(amb_temp, 'unit'))
self.assertEqual(88.8, getattr(amb_temp, 'volume'))
self.assertEqual(96.8, getattr(core_temp, 'volume'))
def _do_test_rate_of_change_conversion(self, prev, curr, type, expected,
offset=1, weight=None):
s = ("(resource_metadata.user_metadata.autoscaling_weight or 1.0)"
"* (resource_metadata.non.existent or 1.0)"
"* (100.0 / (10**9 * (resource_metadata.cpu_number or 1)))")
transformer_cfg = [
{
'name': 'rate_of_change',
'parameters': {
'source': {},
'target': {'name': 'cpu_util',
'unit': '%',
'type': sample.TYPE_GAUGE,
'scale': s},
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['cpu'])
now = timeutils.utcnow()
later = now + datetime.timedelta(minutes=offset)
um = {'autoscaling_weight': weight} if weight else {}
counters = [
sample.Sample(
name='cpu',
type=type,
volume=prev,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=now.isoformat(),
resource_metadata={'cpu_number': 4,
'user_metadata': um},
),
sample.Sample(
name='cpu',
type=type,
volume=prev,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource2',
timestamp=now.isoformat(),
resource_metadata={'cpu_number': 2,
'user_metadata': um},
),
sample.Sample(
name='cpu',
type=type,
volume=curr,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=later.isoformat(),
resource_metadata={'cpu_number': 4,
'user_metadata': um},
),
sample.Sample(
name='cpu',
type=type,
volume=curr,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource2',
timestamp=later.isoformat(),
resource_metadata={'cpu_number': 2,
'user_metadata': um},
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(2, len(publisher.samples))
pipe.flush(None)
self.assertEqual(2, len(publisher.samples))
cpu_util = publisher.samples[0]
self.assertEqual('cpu_util', getattr(cpu_util, 'name'))
self.assertEqual('test_resource', getattr(cpu_util, 'resource_id'))
self.assertEqual('%', getattr(cpu_util, 'unit'))
self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type'))
self.assertEqual(expected, getattr(cpu_util, 'volume'))
cpu_util = publisher.samples[1]
self.assertEqual('cpu_util', getattr(cpu_util, 'name'))
self.assertEqual('test_resource2', getattr(cpu_util, 'resource_id'))
self.assertEqual('%', getattr(cpu_util, 'unit'))
self.assertEqual(sample.TYPE_GAUGE, getattr(cpu_util, 'type'))
self.assertEqual(expected * 2, getattr(cpu_util, 'volume'))
def test_rate_of_change_conversion(self):
self._do_test_rate_of_change_conversion(120000000000,
180000000000,
sample.TYPE_CUMULATIVE,
25.0)
def test_rate_of_change_conversion_weight(self):
self._do_test_rate_of_change_conversion(120000000000,
180000000000,
sample.TYPE_CUMULATIVE,
27.5,
weight=1.1)
def test_rate_of_change_conversion_negative_cumulative_delta(self):
self._do_test_rate_of_change_conversion(180000000000,
120000000000,
sample.TYPE_CUMULATIVE,
50.0)
def test_rate_of_change_conversion_negative_gauge_delta(self):
self._do_test_rate_of_change_conversion(180000000000,
120000000000,
sample.TYPE_GAUGE,
-25.0)
def test_rate_of_change_conversion_zero_delay(self):
self._do_test_rate_of_change_conversion(120000000000,
120000000000,
sample.TYPE_CUMULATIVE,
0.0,
offset=0)
def test_rate_of_change_no_predecessor(self):
s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
transformer_cfg = [
{
'name': 'rate_of_change',
'parameters': {
'source': {},
'target': {'name': 'cpu_util',
'unit': '%',
'type': sample.TYPE_GAUGE,
'scale': s}
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['cpu'])
now = timeutils.utcnow()
counters = [
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=120000000000,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=now.isoformat(),
resource_metadata={'cpu_number': 4}
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(0, len(publisher.samples))
pipe.flush(None)
self.assertEqual(0, len(publisher.samples))
@mock.patch('ceilometer.transformer.conversions.LOG')
def test_rate_of_change_out_of_order(self, the_log):
s = "100.0 / (10**9 * (resource_metadata.cpu_number or 1))"
transformer_cfg = [
{
'name': 'rate_of_change',
'parameters': {
'source': {},
'target': {'name': 'cpu_util',
'unit': '%',
'type': sample.TYPE_GAUGE,
'scale': s}
}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['cpu'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
now = timeutils.utcnow()
earlier = now - datetime.timedelta(seconds=10)
later = now + datetime.timedelta(seconds=10)
counters = [
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=125000000000,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=now.isoformat(),
resource_metadata={'cpu_number': 4}
),
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=120000000000,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=earlier.isoformat(),
resource_metadata={'cpu_number': 4}
),
sample.Sample(
name='cpu',
type=sample.TYPE_CUMULATIVE,
volume=130000000000,
unit='ns',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=later.isoformat(),
resource_metadata={'cpu_number': 4}
),
]
pipe.publish_data(None, counters)
publisher = pipe.publishers[0]
self.assertEqual(1, len(publisher.samples))
pipe.flush(None)
self.assertEqual(1, len(publisher.samples))
cpu_util_sample = publisher.samples[0]
self.assertEqual(12.5, cpu_util_sample.volume)
the_log.warning.assert_called_with(
'dropping out of time order sample: %s',
(counters[1],)
)
def test_resources(self):
resources = ['test1://', 'test2://']
self._set_pipeline_cfg('resources', resources)
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertEqual(resources,
pipeline_manager.pipelines[0].resources)
def test_no_resources(self):
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
self.assertEqual(0, len(pipeline_manager.pipelines[0].resources))
def _do_test_rate_of_change_mapping(self, pipe, meters, units):
now = timeutils.utcnow()
base = 1000
offset = 7
rate = 42
later = now + datetime.timedelta(minutes=offset)
counters = []
for v, ts in [(base, now.isoformat()),
(base + (offset * 60 * rate), later.isoformat())]:
for n, u, r in [(meters[0], units[0], 'resource1'),
(meters[1], units[1], 'resource2')]:
s = sample.Sample(
name=n,
type=sample.TYPE_CUMULATIVE,
volume=v,
unit=u,
user_id='test_user',
project_id='test_proj',
resource_id=r,
timestamp=ts,
resource_metadata={},
)
counters.append(s)
pipe.publish_data(None, counters)
publisher = pipe.publishers[0]
self.assertEqual(2, len(publisher.samples))
pipe.flush(None)
self.assertEqual(2, len(publisher.samples))
bps = publisher.samples[0]
self.assertEqual('%s.rate' % meters[0], getattr(bps, 'name'))
self.assertEqual('resource1', getattr(bps, 'resource_id'))
self.assertEqual('%s/s' % units[0], getattr(bps, 'unit'))
self.assertEqual(sample.TYPE_GAUGE, getattr(bps, 'type'))
self.assertEqual(rate, getattr(bps, 'volume'))
rps = publisher.samples[1]
self.assertEqual('%s.rate' % meters[1], getattr(rps, 'name'))
self.assertEqual('resource2', getattr(rps, 'resource_id'))
self.assertEqual('%s/s' % units[1], getattr(rps, 'unit'))
self.assertEqual(sample.TYPE_GAUGE, getattr(rps, 'type'))
self.assertEqual(rate, getattr(rps, 'volume'))
def test_rate_of_change_mapping(self):
map_from = {'name': 'disk\\.(read|write)\\.(bytes|requests)',
'unit': '(B|request)'}
map_to = {'name': 'disk.\\1.\\2.rate',
'unit': '\\1/s'}
transformer_cfg = [
{
'name': 'rate_of_change',
'parameters': {
'source': {
'map_from': map_from
},
'target': {
'map_to': map_to,
'type': sample.TYPE_GAUGE
},
},
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['disk.read.bytes',
'disk.write.requests'])
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
meters = ('disk.read.bytes', 'disk.write.requests')
units = ('B', 'request')
self._do_test_rate_of_change_mapping(pipe, meters, units)
def _do_test_aggregator(self, parameters, expected_length):
transformer_cfg = [
{
'name': 'aggregator',
'parameters': parameters,
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters', ['storage.objects.incoming.bytes'])
counters = [
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=26,
unit='B',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=16,
unit='B',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '2.0'}
),
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=53,
unit='B',
user_id='test_user_bis',
project_id='test_proj_bis',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
),
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=42,
unit='B',
user_id='test_user_bis',
project_id='test_proj_bis',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '2.0'}
),
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=15,
unit='B',
user_id='test_user',
project_id='test_proj_bis',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '2.0'}
),
sample.Sample(
name='storage.objects.incoming.bytes',
type=sample.TYPE_DELTA,
volume=2,
unit='B',
user_id='test_user_bis',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '3.0'}
),
]
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
pipe.flush(None)
publisher = pipeline_manager.pipelines[0].publishers[0]
self.assertEqual(expected_length, len(publisher.samples))
return sorted(publisher.samples, key=lambda s: s.volume)
def test_aggregator_meter_type(self):
volumes = [1.0, 2.0, 3.0]
transformer_cfg = [
{
'name': 'aggregator',
'parameters': {'size': len(volumes) * len(sample.TYPES)}
},
]
self._set_pipeline_cfg('transformers', transformer_cfg)
self._set_pipeline_cfg('counters',
['testgauge', 'testcumulative', 'testdelta'])
counters = []
for sample_type in sample.TYPES:
for volume in volumes:
counters.append(sample.Sample(
name='test' + sample_type,
type=sample_type,
volume=volume,
unit='B',
user_id='test_user',
project_id='test_proj',
resource_id='test_resource',
timestamp=timeutils.utcnow().isoformat(),
resource_metadata={'version': '1.0'}
))
pipeline_manager = pipeline.PipelineManager(self.pipeline_cfg,
self.transformer_manager)
pipe = pipeline_manager.pipelines[0]
pipe.publish_data(None, counters)
pipe.flush(None)
publisher = pipeline_manager.pipelines[0].publishers[0]
actual = sorted(s.volume for s in publisher.samples)
self.assertEqual([2.0, 3.0, 6.0], actual)
def test_aggregator_metadata(self):
for conf, expected_version in [('last', '2.0'), ('first', '1.0')]:
samples = self._do_test_aggregator({
'resource_metadata': conf,
'target': {'name': 'aggregated-bytes'}
}, expected_length=4)
s = samples[0]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(2, s.volume)
self.assertEqual('test_user_bis', s.user_id)
self.assertEqual('test_proj', s.project_id)
self.assertEqual({'version': '3.0'},
s.resource_metadata)
s = samples[1]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(15, s.volume)
self.assertEqual('test_user', s.user_id)
self.assertEqual('test_proj_bis', s.project_id)
self.assertEqual({'version': '2.0'},
s.resource_metadata)
s = samples[2]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(42, s.volume)
self.assertEqual('test_user', s.user_id)
self.assertEqual('test_proj', s.project_id)
self.assertEqual({'version': expected_version},
s.resource_metadata)
s = samples[3]
self.assertEqual('aggregated-bytes', s.name)
self.assertEqual(95, s.volume)
self.assertEqual('test_user_bis', s.user_id)
self.assertEqual('test_proj_bis', s.project_id)
self.assertEqual({'version': expected_version},
s.resource_metadata)
def test_aggregator_user_last_and_metadata_last(self):
samples = self._do_test_aggregator({
'resource_metadata': 'last',
'user_id': 'last',
'target': {'name': 'aggregated-bytes'}
}, expected_length=2)
s = samples[0]
self.assertEqual('aggregated-bytes', | |
<reponame>pedrocamargo/fast-trips
from __future__ import division
from builtins import str
from builtins import object
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import os
import pandas as pd
from .Error import NetworkInputError
from .Logger import FastTripsLogger
class Transfer(object):
"""
Transfer class.
One instance represents all of the Transfer links.
Stores transfer link information in :py:attr:`Transfer.transfers_df`, an
instance of :py:class:`pandas.DataFrame`.
"""
#: File with fasttrips transfer information (this extends the
#: `gtfs transfers <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/transfers.md>`_ file).
#: See `transfers_ft specification <https://github.com/osplanning-data-standards/GTFS-PLUS/blob/master/files/transfers_ft.md>`_.
INPUT_TRANSFERS_FILE = "transfers_ft.txt"
#: gtfs Transfers column name: Origin stop identifier
TRANSFERS_COLUMN_FROM_STOP = 'from_stop_id'
#: gtfs Transfers column name: Destination stop identifier
TRANSFERS_COLUMN_TO_STOP = 'to_stop_id'
#: gtfs Transfers column name: Transfer Type
TRANSFERS_COLUMN_TRANSFER_TYPE = 'transfer_type'
#: gtfs Transfers column name: Minimum transfer time for transfer_type=2. Float, seconds.
TRANSFERS_COLUMN_MIN_TRANSFER_TIME = 'min_transfer_time'
#: fasttrips Transfers column name: Link walk distance, in miles. This is a float.
TRANSFERS_COLUMN_DISTANCE = 'dist'
#: fasttrips Transfers column name: Origin route identifier
TRANSFERS_COLUMN_FROM_ROUTE = 'from_route_id'
#: fasttrips Transfers column name: Destination route identifier
TRANSFERS_COLUMN_TO_ROUTE = 'to_route_id'
#: fasttrips Transfers column name: Schedule precedence
TRANSFERS_COLUMN_SCHEDULE_PRECEDENCE = 'schedule_precedence'
#: fasttrips Transfers column name: Elevation Gain, feet gained along link. Integer.
TRANSFERS_COLUMN_ELEVATION_GAIN = 'elevation_gain'
#: fasttrips Transfers column name: Population Density, people per square mile. Float.
TRANSFERS_COLUMN_POPULATION_DENSITY = 'population_density'
#: fasttrips Transfers column name: Retail Density, employees per square mile. Float.
TRANSFERS_COLUMN_RETAIL_DENSITY = 'retail_density'
#: fasttrips Transfers column name: Auto Capacity, vehicles per hour per mile. Float.
TRANSFERS_COLUMN_AUTO_CAPACITY = 'auto_capacity'
#: fasttrips Transfers column name: Indirectness, ratio of Manhattan distance to crow-fly distance. Float.
TRANSFERS_COLUMN_INDIRECTNESS = 'indirectness'
# ========== Added by fasttrips =======================================================
#: fasttrips Transfers column name: Is this a stop-to-stop transfer? (e.g. from transfers.txt, and not involving a lot)
TRANSFERS_COLUMN_STOP_TO_STOP = "stop2stop"
#: fasttrips Transfers column name: Origin Stop Numerical Identifier. Int.
TRANSFERS_COLUMN_FROM_STOP_NUM = 'from_stop_id_num'
#: fasttrips Transfers column name: Destination Stop Numerical Identifier. Int.
TRANSFERS_COLUMN_TO_STOP_NUM = 'to_stop_id_num'
#: gtfs Transfers column name: Minimum transfer time for transfer_type=2. Float, min.
TRANSFERS_COLUMN_MIN_TRANSFER_TIME_MIN = 'min_transfer_time_min'
#: Transfer walk speed, in miles per hour
#:
#: .. todo:: Make this configurable?
#:
WALK_SPEED_MILES_PER_HOUR = 2.7
#: Transfers column name: Link walk time. This is a TimeDelta.
#:
#: .. todo:: Remove these? Maybe weights should be distance based? Walk speed is configured how?
#:
TRANSFERS_COLUMN_TIME = 'time'
#: Transfers column name: Link walk time in minutes. This is a float.
TRANSFERS_COLUMN_TIME_MIN = 'time_min'
#: Transfers column name: Link generic cost. Float.
TRANSFERS_COLUMN_PENALTY = 'transfer_penalty'
#: File with transfer links for C++ extension
#: It's easier to pass it via file rather than through the
#: initialize_fasttrips_extension() because of the strings involved
OUTPUT_TRANSFERS_FILE = "ft_intermediate_transfers.txt"
def __init__(self, input_archive, output_dir, gtfs_feed):
"""
Constructor. Reads the gtfs data from the transitfeed schedule, and the additional
fast-trips transfers data from the input files in *input_archive*.
"""
self.output_dir = output_dir
# Combine all gtfs Transfer objects to a single pandas DataFrame
self.transfers_df = gtfs_feed.transfers
# make it zero if transfer_type != 2, since that's the only time it applies
self.transfers_df.loc[self.transfers_df[Transfer.TRANSFERS_COLUMN_TRANSFER_TYPE] != 2, \
Transfer.TRANSFERS_COLUMN_MIN_TRANSFER_TIME] = 0
# these are from transfers.txt so they don't involve lots
self.transfers_df[Transfer.TRANSFERS_COLUMN_STOP_TO_STOP] = True
# Read the fast-trips supplemental transfers data file
transfers_ft_df = gtfs_feed.get(Transfer.INPUT_TRANSFERS_FILE)
# verify required columns are present
transfer_ft_cols = list(transfers_ft_df.columns.values)
assert (Transfer.TRANSFERS_COLUMN_FROM_STOP in transfer_ft_cols)
assert (Transfer.TRANSFERS_COLUMN_TO_STOP in transfer_ft_cols)
assert (Transfer.TRANSFERS_COLUMN_DISTANCE in transfer_ft_cols)
# join to the transfers dataframe -- need to use the transfers_ft as the primary because
# it may have PNR lot id to/from stop transfers (while gtfs transfers does not),
# and we don't want to drop them
if len(transfers_ft_df) > 0:
self.transfers_df = pd.merge(left=self.transfers_df, right=transfers_ft_df,
how='right',
on=[Transfer.TRANSFERS_COLUMN_FROM_STOP,
Transfer.TRANSFERS_COLUMN_TO_STOP])
# fill in NAN
self.transfers_df.fillna(value={Transfer.TRANSFERS_COLUMN_MIN_TRANSFER_TIME: 0,
Transfer.TRANSFERS_COLUMN_TRANSFER_TYPE: 0,
Transfer.TRANSFERS_COLUMN_STOP_TO_STOP: False},
inplace=True)
if Transfer.TRANSFERS_COLUMN_FROM_ROUTE not in self.transfers_df.columns.values:
self.transfers_df[Transfer.TRANSFERS_COLUMN_FROM_ROUTE] = None
if Transfer.TRANSFERS_COLUMN_TO_ROUTE not in self.transfers_df.columns.values:
self.transfers_df[Transfer.TRANSFERS_COLUMN_TO_ROUTE] = None
# support BOTH TRANSFERS_COLUMN_FROM_ROUTE and TRANSFERS_COLUMN_TO_ROUTE but not one
one_route_specified_df = self.transfers_df.loc[
self.transfers_df[Transfer.TRANSFERS_COLUMN_FROM_ROUTE].notnull() ^
self.transfers_df[Transfer.TRANSFERS_COLUMN_TO_ROUTE].notnull()]
if len(one_route_specified_df):
error_msg = "Only one of %s or %s specified for transfer: need both or neither:\n%s" % \
(Transfer.TRANSFERS_COLUMN_FROM_ROUTE, Transfer.TRANSFERS_COLUMN_TO_ROUTE,
str(one_route_specified_df))
FastTripsLogger.fatal(error_msg)
raise NetworkInputError(Transfer.INPUT_TRANSFERS_FILE, error_msg)
# SPECIAL -- we rely on this in the extension
self.transfers_df[Transfer.TRANSFERS_COLUMN_PENALTY] = 1.0
FastTripsLogger.debug("=========== TRANSFERS ===========\n" + str(self.transfers_df.head()))
FastTripsLogger.debug("\n" + str(self.transfers_df.dtypes))
# TODO: this is to be consistent with original implementation. Remove?
if len(self.transfers_df) > 0:
self.transfers_df[Transfer.TRANSFERS_COLUMN_MIN_TRANSFER_TIME_MIN] = \
self.transfers_df[Transfer.TRANSFERS_COLUMN_MIN_TRANSFER_TIME] / 60.0
# fill in null dist
null_dist = self.transfers_df.loc[self.transfers_df[Transfer.TRANSFERS_COLUMN_DISTANCE].isnull()]
if len(null_dist) > 0:
FastTripsLogger.warn("Filling in %d transfers with null dist" % len(null_dist))
self.transfers_df.loc[self.transfers_df[Transfer.TRANSFERS_COLUMN_DISTANCE].isnull(),
Transfer.TRANSFERS_COLUMN_DISTANCE] = 0.0
# transfer time is based on distance
self.transfers_df[Transfer.TRANSFERS_COLUMN_TIME_MIN] = \
self.transfers_df[Transfer.TRANSFERS_COLUMN_DISTANCE] * 60.0 / Transfer.WALK_SPEED_MILES_PER_HOUR
# Sanity check transfer times. A 13 hour-long walk transfer is suspicious.
# TODO: make this less arbitrary? It's based on the max SFCTA xfer link but it is too high
too_long_transfers = self.transfers_df.loc[self.transfers_df[Transfer.TRANSFERS_COLUMN_TIME_MIN] > 780]
if len(too_long_transfers) > 0:
error_msg = "Found %d excessively long transfer links out of %d total transfer links. Expected distances are in miles. Unit problem?" % \
(len(too_long_transfers), len(self.transfers_df))
FastTripsLogger.fatal(error_msg)
FastTripsLogger.fatal("\n%s\n" % str(too_long_transfers.head()))
raise NetworkInputError(Transfer.INPUT_TRANSFERS_FILE, error_msg)
self.transfers_df.loc[ \
self.transfers_df[Transfer.TRANSFERS_COLUMN_TIME_MIN] < self.transfers_df[
Transfer.TRANSFERS_COLUMN_MIN_TRANSFER_TIME_MIN], \
Transfer.TRANSFERS_COLUMN_TIME_MIN] = self.transfers_df[Transfer.TRANSFERS_COLUMN_MIN_TRANSFER_TIME_MIN]
# convert time column from float to timedelta
self.transfers_df[Transfer.TRANSFERS_COLUMN_TIME] = \
self.transfers_df[Transfer.TRANSFERS_COLUMN_TIME_MIN].map(lambda x: datetime.timedelta(minutes=x))
FastTripsLogger.debug("Final\n" + str(self.transfers_df))
FastTripsLogger.debug("\n" + str(self.transfers_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s, %25s" %
(len(self.transfers_df), "transfers", "transfers.txt", Transfer.INPUT_TRANSFERS_FILE))
def add_numeric_stop_id(self, stops):
"""
Stops are now equipped to add numeric ID (DAPs are in) so grab them
"""
# Add the numeric stop ids to transfers
if len(self.transfers_df) > 0:
self.transfers_df = stops.add_numeric_stop_id(self.transfers_df,
id_colname=Transfer.TRANSFERS_COLUMN_FROM_STOP,
numeric_newcolname=Transfer.TRANSFERS_COLUMN_FROM_STOP_NUM,
warn=True,
warn_msg="Numeric stop id not found for transfer from_stop_id")
self.transfers_df = stops.add_numeric_stop_id(self.transfers_df,
id_colname=Transfer.TRANSFERS_COLUMN_TO_STOP,
numeric_newcolname=Transfer.TRANSFERS_COLUMN_TO_STOP_NUM,
warn=True,
warn_msg="Numeric stop id not found for transfer to_stop_id")
# We're ready to write it
self.write_transfers_for_extension()
def add_transfer_attributes(self, transfer_links_df, all_links_df):
"""
Adds transfer attributes for transfer links and returns those transfer links with the additional columns.
Pass all_links_df in order to get the from_route_id and to_route_id for the transfers.
"""
from .Passenger import Passenger
len_transfer_links_df = len(transfer_links_df)
transfer_links_cols = list(transfer_links_df.columns.values)
FastTripsLogger.debug(
"add_transfer_attributes: transfer_links_df(%d) head(20)=\n%s\ntransfers_df head(20)=\n%s" % \
(len_transfer_links_df, transfer_links_df.head(20).to_string(), self.transfers_df.head(20).to_string()))
# nothing to do
if len_transfer_links_df == 0:
return transfer_links_df
if len(self.transfers_df) == 0:
return transfer_links_df
# these will be filled for route matches
transfer_links_done = pd.DataFrame()
# match on both from route and to route
if Transfer.TRANSFERS_COLUMN_FROM_ROUTE not in self.transfers_df.columns.values:
transfers_with_routes_df = pd.DataFrame()
transfers_wo_routes_df = self.transfers_df
else:
transfers_with_routes_df = self.transfers_df.loc[
self.transfers_df[Transfer.TRANSFERS_COLUMN_FROM_ROUTE].notnull()]
transfers_wo_routes_df = self.transfers_df.loc[
self.transfers_df[Transfer.TRANSFERS_COLUMN_FROM_ROUTE].isnull()]
FastTripsLogger.debug("add_transfer_attributes: have %d transfers with routes and %d transfers without routes" % \
(len(transfers_with_routes_df), len(transfers_wo_routes_df)))
if len(transfers_with_routes_df) > 0:
# this is what we need of the trips
trip_links_df = all_links_df.loc[all_links_df[Passenger.PF_COL_ROUTE_ID].notnull(),
[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
Passenger.PF_COL_LINK_NUM,
Passenger.PF_COL_ROUTE_ID]
]
# FastTripsLogger.debug("trip_links_df head(20)=\n%s" % trip_links_df.head().to_string())
# match transfer with trip's next link to get from route_id
trip_links_df["next_link_num"] = trip_links_df[Passenger.PF_COL_LINK_NUM] + 1
transfer_links_df = pd.merge(left=transfer_links_df,
left_on=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
Passenger.PF_COL_LINK_NUM],
right=trip_links_df[[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
"next_link_num",
Passenger.PF_COL_ROUTE_ID]],
right_on=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
"next_link_num"],
suffixes=["", " from"],
how="left")
transfer_links_df.rename(
columns={"%s from" % Passenger.PF_COL_ROUTE_ID: Transfer.TRANSFERS_COLUMN_FROM_ROUTE}, inplace=True)
# match transfer with trip's prev link to get to route_id
trip_links_df["prev_link_num"] = trip_links_df[Passenger.PF_COL_LINK_NUM] - 1
transfer_links_df = pd.merge(left=transfer_links_df,
left_on=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
Passenger.PF_COL_LINK_NUM],
right=trip_links_df[[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
"prev_link_num",
Passenger.PF_COL_ROUTE_ID]],
right_on=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.PF_COL_PATH_NUM,
"prev_link_num"],
suffixes=["", " to"],
how="left")
transfer_links_df.rename(columns={"%s to" % Passenger.PF_COL_ROUTE_ID: Transfer.TRANSFERS_COLUMN_TO_ROUTE},
inplace=True)
transfer_links_df.drop(["prev_link_num", "next_link_num"], axis=1, inplace=True)
# FastTripsLogger.debug("transfer_links_df after adding route info:\n%s" % transfer_links_df.head(20).to_string())
# match on transfer attributes
transfer_links_df = pd.merge(left=transfer_links_df,
left_on=["A_id_num", "B_id_num",
Transfer.TRANSFERS_COLUMN_FROM_ROUTE,
Transfer.TRANSFERS_COLUMN_TO_ROUTE],
right=transfers_with_routes_df,
right_on=[Transfer.TRANSFERS_COLUMN_FROM_STOP_NUM,
Transfer.TRANSFERS_COLUMN_TO_STOP_NUM,
Transfer.TRANSFERS_COLUMN_FROM_ROUTE,
Transfer.TRANSFERS_COLUMN_TO_ROUTE],
how="left",
indicator=True)
# FastTripsLogger.debug("transfer_links_df _merge: \n%s" % str(transfer_links_df["_merge"].value_counts()))
transfer_links_df.drop([Transfer.TRANSFERS_COLUMN_FROM_STOP_NUM, Transfer.TRANSFERS_COLUMN_TO_STOP_NUM],
axis=1, inplace=True)
# now some of these have attributes, some still need
| |
(('species', ), )
ordering = ('species',)
frozen_columns = 1
def serialize(self):
""" Generate string representation
Returns:
:obj:`str`: value of primary attribute
"""
return 'CONC[{}]'.format(self.species.serialize())
class SpeciesTypeCoefficient(obj_tables.Model):
""" A tuple of a species type and a coefficient
Attributes:
species_type (:obj:`SpeciesType`): species_type
coefficient (:obj:`float`): coefficient
Related attributes:
complex (:obj:`ComplexSpeciesType`): complex
"""
species_type = ManyToOneAttribute(SpeciesType, related_name='species_type_coefficients')
coefficient = FloatAttribute(min=0.)
class Meta(obj_tables.Model.Meta):
attribute_order = ('species_type', 'coefficient')
frozen_columns = 1
table_format = TableFormat.cell
ordering = ('species_type',)
def serialize(self):
""" Serialize related object
Returns:
:obj:`str`: string representation of a species type and a coefficient
"""
return self._serialize(self.species_type, self.coefficient)
@staticmethod
def _serialize(species_type, coefficient):
""" Serialize values
Args:
species_type (:obj:`SpeciesType`): species_type
coefficient (:obj:`float`): coefficient
Returns:
:obj:`str`: string representation of a species type and a coefficient
"""
coefficient = float(coefficient)
if coefficient == 1:
coefficient_str = ''
elif coefficient % 1 == 0 and abs(coefficient) < 1000:
coefficient_str = '({:.0f}) '.format(coefficient)
else:
coefficient_str = '({:e}) '.format(coefficient)
return '{}{}'.format(coefficient_str, species_type.get_primary_attribute())
@classmethod
def deserialize(cls, attribute, value, objects):
""" Deserialize value
Args:
attribute (:obj:`Attribute`): attribute
value (:obj:`str`): String representation
objects (:obj:`dict`): dictionary of objects, grouped by model
Returns:
:obj:`tuple` of `list` of `SpeciesTypeCoefficient`, `InvalidAttribute` or `None`: tuple of cleaned value
and cleaning error
"""
parts = []
errors = []
st_id = SpeciesType.id.pattern[1:-1]
stoch = r'\(((\d*\.?\d+|\d+\.)(e[\-\+]?\d+)?)\)'
gbl_part = r'({} )*({})'.format(stoch, st_id)
gbl_side = r'{}( \+ {})*'.format(gbl_part, gbl_part)
gbl_pattern = r'^({})$'.format(gbl_side)
global_match = re.match(gbl_pattern, value, flags=re.I)
if global_match:
subunits_str = global_match.group(1)
else:
attr = cls.Meta.attributes['species_type']
return (None, InvalidAttribute(attr, ['Incorrectly formatted participants: {}'.format(value)]))
for part in re.findall(gbl_part, subunits_str, flags=re.I):
species_type = None
for species_type_cls in get_subclasses(SpeciesType):
if species_type_cls in objects and part[4] in objects[species_type_cls]:
species_type = objects[species_type_cls][part[4]]
break
if not species_type:
errors.append('Undefined species type "{}"'.format(part[4]))
coefficient = float(part[1] or 1.)
if not errors:
if cls not in objects:
objects[cls] = {}
serialized_value = cls._serialize(species_type, coefficient)
if serialized_value in objects[cls]:
subunit_part = objects[cls][serialized_value]
else:
subunit_part = cls(species_type=species_type, coefficient=coefficient)
objects[cls][serialized_value] = subunit_part
parts.append(subunit_part)
if errors:
return (None, InvalidAttribute(cls, errors))
return (parts, None)
class SpeciesCoefficient(obj_tables.Model):
""" A tuple of a species and a coefficient
Attributes:
species (:obj:`Species`): species
coefficient (:obj:`float`): coefficient
Related attributes:
reaction (:obj:`Reaction`): reaction
"""
species = ManyToOneAttribute(Species, related_name='species_coefficients')
coefficient = FloatAttribute(nan=False)
class Meta(obj_tables.Model.Meta):
attribute_order = ('species', 'coefficient')
frozen_columns = 1
table_format = TableFormat.cell
ordering = ('species',)
def serialize(self, show_compartment=True, show_coefficient_sign=True):
""" Serialize related object
Args:
show_compartment (:obj:`bool`, optional): if true, show compartment
show_coefficient_sign (:obj:`bool`, optional): if true, show coefficient sign
Returns:
:obj:`str`: string representation of a species and a coefficient
"""
return self._serialize(self.species, self.coefficient,
show_compartment=show_compartment,
show_coefficient_sign=show_coefficient_sign)
@staticmethod
def _serialize(species, coefficient, show_compartment=True, show_coefficient_sign=True):
""" Serialize values
Args:
species (:obj:`Species`): species
coefficient (:obj:`float`): coefficient
show_compartment (:obj:`bool`, optional): if true, show compartment
show_coefficient_sign (:obj:`bool`, optional): if true, show coefficient sign
Returns:
:obj:`str`: string representation of a species and a coefficient
"""
coefficient = float(coefficient)
if not show_coefficient_sign:
coefficient = abs(coefficient)
if coefficient == 1:
coefficient_str = ''
elif coefficient % 1 == 0 and abs(coefficient) < 1000:
coefficient_str = '({:.0f}) '.format(coefficient)
else:
coefficient_str = '({:e}) '.format(coefficient)
if show_compartment:
return '{}{}'.format(coefficient_str, species.serialize())
else:
return '{}{}'.format(coefficient_str, species.species_type.get_primary_attribute())
@classmethod
def deserialize(cls, attribute, value, objects, compartment=None):
""" Deserialize value
Args:
attribute (:obj:`Attribute`): attribute
value (:obj:`str`): String representation
objects (:obj:`dict`): dictionary of objects, grouped by model
compartment (:obj:`Compartment`, optional): compartment
Returns:
:obj:`tuple` of `list` of `SpeciesCoefficient`, `InvalidAttribute` or `None`: tuple of cleaned value
and cleaning error
"""
errors = []
st_id = SpeciesType.id.pattern[1:-1]
comp_id = Compartment.id.pattern[1:-1]
if compartment:
pattern = r'^(\(((\-?\d*\.?\d+|\d+\.)(e[\-\+]?\d+)?)\) )*({})$'.format(st_id)
else:
pattern = r'^(\(((\-?\d*\.?\d+|\d+\.)(e[\-\+]?\d+)?)\) )*({}\[{}\])$'.format(st_id, comp_id)
match = re.match(pattern, value, flags=re.I)
if match:
errors = []
coefficient = float(match.group(2) or 1.)
if compartment:
species_id = Species.gen_id(match.group(
5), compartment.get_primary_attribute())
else:
species_id = match.group(5)
species, error = Species.deserialize(
attribute, species_id, objects)
if error:
return (None, error)
serial_val = cls._serialize(species, coefficient)
if cls in objects and serial_val in objects[cls]:
return (objects[cls][serial_val], None)
obj = cls(species=species, coefficient=coefficient)
if cls not in objects:
objects[cls] = {}
objects[cls][obj.serialize()] = obj
return (obj, None)
else:
attr = cls.Meta.attributes['species']
return (None, InvalidAttribute(attr, ['Invalid species coefficient']))
class PolymerSpeciesType(SpeciesType):
""" Knowledge of a polymer
Attributes:
circular (:obj:`bool`): is the polymer circular
double_stranded (:obj:`bool`): is the polymer double stranded
Related attributes:
loci (:obj:`list` of :obj:`PolymerLocus`): loci
"""
circular = obj_tables.BooleanAttribute()
double_stranded = obj_tables.BooleanAttribute()
class Meta(obj_tables.Model.Meta):
attribute_order = ('id', 'name', 'circular', 'double_stranded',
'comments', 'references', 'identifiers')
@abc.abstractmethod
def get_seq(self):
""" Get the polymer sequence
Returns:
:obj:`Bio.Seq.Seq`: sequence
"""
pass # pragma: no cover
def get_len(self):
""" Get the polymer length
Returns:
:obj:`int`: length
"""
return len(self.get_seq())
def get_subseq(self, start, end, strand=PolymerStrand.positive):
""" Get a subsequence
Args:
start (:obj:`int`): start coordinate (1-indexed)
end (:obj:`int`): end coordinate (1-indexed)
strand (:obj:`PolymerStrand`, optional): strand
Returns:
:obj:`Bio.Seq.Seq`: sequence
Raises:
:obj:`ValueError`: if the polymer is linear and the start or end coordinates
are less than 1 or greater than the length of the sequence
"""
seq = self.get_seq()
seq_len = len(seq)
# convert to zero-based indexing
start -= 1
if self.circular:
n_wrap = int(math.floor(start / seq_len))
start = start - seq_len * n_wrap
end = end - seq_len * n_wrap
elif start < 0 or end > seq_len:
raise ValueError('Start and end coordinates for linear polymers must be at '
'least 1 and less than the length of the sequence')
if end <= seq_len:
pos_seq = seq[start:end]
else:
pos_seq = seq[start:] + \
str(seq) * (int(math.floor(end / seq_len)) - 1) + \
seq[0:end % seq_len]
if strand == PolymerStrand.positive:
return pos_seq
else:
return pos_seq.reverse_complement()
class PolymerLocus(KnowledgeBaseObject):
""" Knowledge about a locus of a polymer
Attributes:
polymer (:obj:`PolymerSpeciesType`): polymer
start (:obj:`int`): start position
end (:obj:`int`): end position
strand (:obj:`PolymerStrand`): strand
references (:obj:`list` of :obj:`Reference`): references
identifiers (:obj:`list` of :obj:`Identifier`): identifiers
"""
cell = obj_tables.ManyToOneAttribute(Cell, related_name='loci')
polymer = obj_tables.ManyToOneAttribute(PolymerSpeciesType, related_name='loci')
start = obj_tables.IntegerAttribute()
end = obj_tables.IntegerAttribute()
references = obj_tables.ManyToManyAttribute(Reference, related_name='loci')
identifiers = IdentifierAttribute(related_name='loci')
strand = obj_tables.EnumAttribute(
PolymerStrand, default=PolymerStrand.positive)
class Meta(obj_tables.Model.Meta):
attribute_order = ('id', 'name', 'polymer', 'strand', 'start', 'end', 'identifiers', 'references', 'comments')
def get_seq(self):
""" Get the sequence
Returns:
:obj:`Bio.Seq.Seq`: sequence
"""
return self.polymer.get_subseq(self.start, self.end, strand=self.strand)
def get_len(self):
""" Get the length
Returns:
:obj:`int`: length
"""
return abs(self.start - self.end) + 1
def get_direction(self):
""" Returns the direction of the polymer feature defind by its strand and start/end coordinate
Returns:
:obj:`PolymerDirection`: direction (in ['forward', 'reverse'])
Raises:
:obj:`ValueError`: start and end coordinate of chromosome feature can not be the same
:obj:`Exception`: strand is not member of PolymerStrand
"""
if self.start < self.end:
if self.strand == PolymerStrand.positive:
return PolymerDirection.forward
elif self.strand == PolymerStrand.negative:
return PolymerDirection.reverse
else:
raise Exception('Unrecognized polymer strand ({}) found for {}.'.format(self.strand, self.id))
elif self.start > self.end:
if self.strand == PolymerStrand.positive:
return PolymerDirection.reverse
elif self.strand == PolymerStrand.negative:
return PolymerDirection.forward
else:
raise Exception('Unrecognized polymer strand ({}) found for {}.'.format(self.strand, self.id))
elif self.start == self.end:
raise ValueError('Start and end position of chromosome feature can not be the same (Chrom feature id: {}).'.format(self.id))
class ObservableExpression(obj_tables.Model, Expression):
""" A mathematical expression of Observables and Species
The expression used by a `Observable`.
Attributes:
expression (:obj:`str`): mathematical expression for an Observable
species (:obj:`list` of :obj:`Species`): Species used by this Observable expression
observables (:obj:`list` of :obj:`Observable`): other Observables used by this Observable expression
Related attributes:
observable (:obj:`Observable`): observable
"""
expression = LongStringAttribute(primary=True, unique=True, default='')
species = ManyToManyAttribute(Species, related_name='observable_expressions')
observables = ManyToManyAttribute('Observable', related_name='observable_expressions')
class Meta(obj_tables.Model.Meta, Expression.Meta):
table_format = TableFormat.cell
expression_term_models = ('Species', 'Observable')
expression_is_linear = True
expression_unit_registry = unit_registry
def serialize(self):
""" Generate string representation
Returns:
:obj:`str`: string representation
"""
return Expression.serialize(self)
@classmethod
def deserialize(cls, value, objects):
""" Deserialize value
Args:
value (:obj:`str`): String representation
objects (:obj:`dict`): dictionary of objects, grouped by model
Returns:
:obj:`tuple` of :obj:`ObservableExpression`, `InvalidAttribute` or `None`:
tuple of cleaned value and cleaning error
"""
return Expression.deserialize(cls, value, objects)
class Observable(KnowledgeBaseObject):
""" Observable: a linear function of other Observables and Species
Attributes:
cell (:obj:`Cell`): cell
expression (:obj:`ObservableExpression`): mathematical expression for an Observable
units (:obj:`unit_registry.Unit`): units of expression
references (:obj:`list` of :obj:`Reference`): references
identifiers (:obj:`list` of :obj:`Identifier`): identifiers
Related attributes:
observable_expressions (:obj:`list` of :obj:`ObservableExpression`): observable expressions
rate_law_expressions (:obj:`list` of :obj:`RateLawExpression`): rate law expressions
"""
cell = ManyToOneAttribute(Cell, related_name='observables')
expression = ManyToOneExpressionAttribute(ObservableExpression, related_name='observable',
min_related=1, min_related_rev=1)
| |
<reponame>lygztq/gomoku
import abc
import copy
import numpy as np
import six
from pygomoku.Board import Board
from pygomoku.mcts import policy_fn
from pygomoku.mcts.progressbar import ProgressBar
def softmax(x):
probs = np.exp(x - np.max(x)) # avoid overflow
probs /= np.sum(probs)
return probs
def action_prob_via_vis_times(vis_times):
activates = vis_times - np.max(vis_times)
probs = activates / np.sum(activates)
return probs
@six.add_metaclass(abc.ABCMeta)
class TreeNode(object):
"""
The abstract class for tree node in search tree.
"""
@abc.abstractmethod
def expand(self, action_priors):
"""Expand tree node by creating new child.
Args:
action_priors:
a list of avaliable actions(for UCT).
a list of tuple of avaliable actions and their prior probs.
"""
pass
@abc.abstractmethod
def select(self, weight_c):
"""Select action among children of current node.
Return:
A tuple, (action, next_node)
"""
pass
@abc.abstractmethod
def update(self, bp_value):
"""Update node value from leaf.
"""
pass
@abc.abstractmethod
def backPropagation(self, bp_value):
"""Backpropagation the final result from leaf to the root.
"""
pass
@abc.abstractmethod
def evaluate(self, weight_c):
"""Calculate and return the UCB value for this node.
Args:
weight_c: a number in (0, inf) controlling the relative impact of
value Q, and prior probability P, on this node's score.
"""
pass
@abc.abstractmethod
def is_root(self):
pass
@abc.abstractmethod
def is_leaf(self):
pass
@six.add_metaclass(abc.ABCMeta)
class TreeSearch(object):
"""The abstract class for tree search.
"""
@abc.abstractmethod
def _playout(self, state):
"""Run a single playout from the root to the leaf, getting a value at
the leaf and propagating it back through its parents.
State is modified in-place, so a copy must be provided.
"""
pass
# @abc.abstractmethod
# def _evaluateRollout(self, state, limit):
# """Use the rollout policy to play until the end of the game,
# returning +1 if the current player wins, -1 if the opponent wins,
# and 0 if it is a tie.
# """
# pass
@abc.abstractmethod
def getMove(self, state):
"""Runs all playouts sequentially and returns the most visited action.
state: the current game state
Return: the selected action
"""
pass
@abc.abstractmethod
def updateWithMove(self, last_move):
"""Step forward in the tree, keeping everything we already know
about the subtree.
"""
pass
@abc.abstractmethod
def reset(self):
pass
@abc.abstractmethod
def __str__(self):
pass
class MCTSTreeNode(TreeNode):
"""A node in the MCTS tree. Each node keeps track of its own value Q,
prior probability P, and its visit-count-adjusted prior score u.
Attributes:
parent: The parent node for current node. Root's parent is None.
children: A dict whose key is action and value is corresponding child node.
_vis_times: An integer shows the number of times this node has been visited.
_Q: Q value, the quality value. Judge the value for exploitation for a node.
_U: U value. Judge the value for exploration for a node. A node with more
visit times will have small U value.
_P: The prior probability for a node to be exploration(or the
prior probability for its corresponding action to be taken).
"""
def __init__(self, parent, prior_prob):
self.parent = parent
self.children = {} # a map from action to node
self._vis_times = 0
self._Q = 0 # Q = sum_{all rollout}(rollout_result)/vis_times
self._U = 0 # U = prior_prob / (1 + vis_times)
self._P = prior_prob
def expand(self, action_priors):
"""Expand this node by creating all its children.
Args:
action_priors: the (action, prior probability) list for its children node.
"""
for action, prob in action_priors:
if action not in self.children:
self.children[action] = MCTSTreeNode(self, prob)
def select(self, weight_c):
"""Select action among children that gives maximum action value Q
plus bonus u(P).
Return: A tuple of (action, next_node)
"""
return max(self.children.items(),
key=lambda act_node: act_node[1].evaluate(weight_c))
def update(self, bp_value):
"""Update node values from leaf evaluation.
vis_time += 1
Q += delta
Args:
bp_value: the value of subtree evaluation from the current player's
perspective.
"""
self._vis_times += 1
# this expression of Q is a running average
# suppose v_{i} is the result value of i-th rollout
# Q_{N} = sum_{1 to N}(v) / N
# Q_{N+1}
# = (sum_{1 to N}(v) + v_{N+1}) / (N+1)
# = N/(N+1) * Q_{N} + v_{N+1}/(N+1)
# dQ
# = Q_{N+1} - Q_{N}
# = (v_{N+1} - Q_{N}) / (N+1)
self._Q += float(bp_value - self._Q) / self._vis_times
def backPropagation(self, bp_value):
"""Backpropagation the final result from leaf to the root.
"""
self.update(bp_value)
if self.parent: # if has parent, not root
# NOTE: '-' --> good result for one player means bad result for the other.
self.parent.backPropagation(-bp_value)
def evaluate(self, weight_c):
"""Calculate and return the value for this node.
It is a combination of leaf evaluations Q, and this node's prior
adjusted for its visit count, u.
Args:
weight_c: a number in (0, inf) controlling the relative impact of
value Q, and prior probability P, on this node's score.
"""
self._U = self._P * \
np.sqrt(self.parent._vis_times) / (1 + self._vis_times)
return self._Q + weight_c * self._U
def is_leaf(self):
return False if self.children else True
def is_root(self):
return self.parent is None
@property
def vis_times(self):
return self._vis_times
@property
def Q_value(self):
return self._Q
class MCTS(TreeSearch):
"""
The Monte Carlo Tree Search.
Attributes:
root: The root node for search tree.
_expand_policy: A function that takes in a board state and outputs
a list of (action, probability) tuples which used for node expanding
and also a score between in [-1,1] (i.e. The expected value of the end
game score from the current player's perspective, in pure MCTS without
Neural network, this value will be 0) for the current player.
_rollout_policy: A function similar to expand_policy, used for random play
in rollout phase.
_weight_c: a number in (0, inf) that controls how quickly exploration
converges to the maximum-value policy. A higher value means
relying on the prior less and prefer to try new node.
_compute_budget: How many times will we search in this tree (Num of playout).
_silent: If True, MCTS will not print log informations.
_expand_bound: Only expand a leaf node when its vis_times >= expand_bound
"""
def __init__(self, expand_policy, rollout_policy, weight_c=5, compute_budget=10000, expand_bound=1, silent=False):
self.root = MCTSTreeNode(None, 1.0)
self._expand_policy = expand_policy
self._rollout_policy = rollout_policy
self._weight_c = weight_c
self._compute_budget = int(compute_budget)
self._silent = silent
self._expand_bound = min(expand_bound, compute_budget)
def reset(self):
self.root = MCTSTreeNode(None, 1.0)
def _playout(self, state):
"""Run a single playout from the root to the leaf, getting a value at
the leaf and propagating it back through its parents.
State is modified in-place, so a copy must be provided.
"""
node = self.root
while True:
if node.is_leaf(): # if leaf or only root in tree.
break
action, node = node.select(self._weight_c)
state.play(action)
action_probs, _ = self._expand_policy(state)
# Check for end of game
is_end, _ = state.gameEnd()
if not is_end and node.vis_times >= self._expand_bound:
node.expand(action_probs)
# Evaluate the leaf node by random rollout
bp_value = self._evaluateRollout(state)
# bp
node.backPropagation(-bp_value)
def _evaluateRollout(self, state, limit=1000):
"""Use the rollout policy to play until the end of the game,
returning +1 if the current player wins, -1 if the opponent wins,
and 0 if it is a tie.
Args:
state: current board state
limit: usually in gomoku we don't need this. The upper bound for
rollout times.
"""
# player color of the leaf node
player_color = state.current_player
for _ in range(limit):
is_end, winner_color = state.gameEnd()
if is_end:
break
action_probs = self._rollout_policy(state) # (action, prob)
next_action = max(action_probs, key=lambda x: x[1])[0]
state.play(next_action)
else:
if not self._silent:
print("[Warning]: rollout exceeds the limit({})".format(limit))
if winner_color is None:
return 0
else:
return 1 if winner_color == player_color else -1
def getMove(self, state):
"""Runs all playouts sequentially and returns the most visited action.
state: the current game state
Return: the selected action
"""
# if at the beginning of game, we should put stone at center.
if state.is_empty:
return len(state.availables) // 2
if self._silent:
for _ in range(self._compute_budget):
state_copy = copy.deepcopy(state)
self._playout(state_copy)
else:
print("Thinking...")
pb = ProgressBar(self._compute_budget, total_sharp=20)
for _ in range(self._compute_budget):
pb.iterStart()
state_copy = copy.deepcopy(state)
self._playout(state_copy)
pb.iterEnd()
return max(self.root.children.items(),
key=lambda act_node: act_node[1].vis_times)[0]
# return max(self.root.children.items(),
# key=lambda act_node: act_node[1].Q_value)[0]
def testOut(self):
return sorted(list(self.root.children.items()), key=lambda x: x[-1])
def think(self, state, decay_level=100):
"""Consider the current board state and give a suggested move.
| |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import hashlib
import itertools
import pickle as c_pickle
import shutil
import time
import typing
import uuid
from collections import Iterable
from concurrent.futures import ProcessPoolExecutor as Executor
from contextlib import ExitStack
from functools import partial
from heapq import heapify, heappop, heapreplace
from operator import is_not
from pathlib import Path
import cloudpickle as f_pickle
import lmdb
import numpy as np
from fate_arch.common import Party, file_utils
from fate_arch.common.log import getLogger
LOGGER = getLogger()
# noinspection PyPep8Naming
class Table(object):
def __init__(
self,
session: "Session",
namespace: str,
name: str,
partitions,
need_cleanup=True,
):
self._need_cleanup = need_cleanup
self._namespace = namespace
self._name = name
self._partitions = partitions
self._session = session
@property
def partitions(self):
return self._partitions
@property
def name(self):
return self._name
@property
def namespace(self):
return self._namespace
def __del__(self):
if self._need_cleanup:
self.destroy()
def __str__(self):
return f"<Table {self._namespace}|{self._name}|{self._partitions}|{self._need_cleanup}>"
def __repr__(self):
return self.__str__()
def destroy(self):
for p in range(self._partitions):
with self._get_env_for_partition(p, write=True) as env:
db = env.open_db()
with env.begin(write=True) as txn:
txn.drop(db)
table_key = f"{self._namespace}.{self._name}"
_get_meta_table().delete(table_key)
path = _get_storage_dir(self._namespace, self._name)
shutil.rmtree(path, ignore_errors=True)
def take(self, n, **kwargs):
if n <= 0:
raise ValueError(f"{n} <= 0")
return list(itertools.islice(self.collect(**kwargs), n))
def count(self):
cnt = 0
for p in range(self._partitions):
with self._get_env_for_partition(p) as env:
cnt += env.stat()["entries"]
return cnt
# noinspection PyUnusedLocal
def collect(self, **kwargs):
iterators = []
with ExitStack() as s:
for p in range(self._partitions):
env = s.enter_context(self._get_env_for_partition(p))
txn = s.enter_context(env.begin())
iterators.append(s.enter_context(txn.cursor()))
# Merge sorted
entries = []
for _id, it in enumerate(iterators):
if it.next():
key, value = it.item()
entries.append([key, value, _id, it])
heapify(entries)
while entries:
key, value, _, it = entry = entries[0]
yield c_pickle.loads(key), c_pickle.loads(value)
if it.next():
entry[0], entry[1] = it.item()
heapreplace(entries, entry)
else:
_, _, _, it = heappop(entries)
def reduce(self, func):
# noinspection PyProtectedMember
rs = self._session._submit_unary(
func, _do_reduce, self._partitions, self._name, self._namespace
)
rs = [r for r in filter(partial(is_not, None), rs)]
if len(rs) <= 0:
return None
rtn = rs[0]
for r in rs[1:]:
rtn = func(rtn, r)
return rtn
def map(self, func):
return self._unary(func, _do_map)
def mapValues(self, func):
return self._unary(func, _do_map_values)
def flatMap(self, func):
_flat_mapped = self._unary(func, _do_flat_map)
return _flat_mapped.save_as(
name=str(uuid.uuid1()),
namespace=_flat_mapped.namespace,
partition=self._partitions,
need_cleanup=True,
)
def applyPartitions(self, func):
return self._unary(func, _do_apply_partitions)
def mapPartitions(self, func, preserves_partitioning=False):
un_shuffled = self._unary(func, _do_map_partitions)
if preserves_partitioning:
return un_shuffled
return un_shuffled.save_as(
name=str(uuid.uuid1()),
namespace=un_shuffled.namespace,
partition=self._partitions,
need_cleanup=True,
)
def mapReducePartitions(self, mapper, reducer):
dup = _create_table(
self._session,
str(uuid.uuid1()),
self.namespace,
self._partitions,
need_cleanup=True,
)
def _dict_reduce(a: dict, b: dict):
for k, v in b.items():
if k not in a:
a[k] = v
else:
a[k] = reducer(a[k], v)
return a
def _local_map_reduce(it):
ret = {}
for _k, _v in mapper(it):
if _k not in ret:
ret[_k] = _v
else:
ret[_k] = reducer(ret[_k], _v)
return ret
dup.put_all(
self.applyPartitions(_local_map_reduce).reduce(_dict_reduce).items()
)
return dup
def glom(self):
return self._unary(None, _do_glom)
def sample(self, fraction, seed=None):
return self._unary((fraction, seed), _do_sample)
def filter(self, func):
return self._unary(func, _do_filter)
def join(self, other: "Table", func):
return self._binary(other, func, _do_join)
def subtractByKey(self, other: "Table"):
func = f"{self._namespace}.{self._name}-{other._namespace}.{other._name}"
return self._binary(other, func, _do_subtract_by_key)
def union(self, other: "Table", func=lambda v1, v2: v1):
return self._binary(other, func, _do_union)
# noinspection PyProtectedMember
def _map_reduce(self, mapper, reducer):
results = self._session._submit_map_reduce(
mapper, reducer, self._partitions, self._name, self._namespace
)
result = results[0]
# noinspection PyProtectedMember
return _create_table(
session=self._session,
name=result.name,
namespace=result.namespace,
partitions=self._partitions,
)
def _unary(self, func, do_func):
# noinspection PyProtectedMember
results = self._session._submit_unary(
func, do_func, self._partitions, self._name, self._namespace
)
result = results[0]
# noinspection PyProtectedMember
return _create_table(
session=self._session,
name=result.name,
namespace=result.namespace,
partitions=self._partitions,
)
def _binary(self, other: "Table", func, do_func):
session_id = self._session.session_id
left, right = self, other
if left._partitions != right._partitions:
if other.count() > self.count():
left = left.save_as(
str(uuid.uuid1()), session_id, partition=right._partitions
)
else:
right = other.save_as(
str(uuid.uuid1()), session_id, partition=left._partitions
)
# noinspection PyProtectedMember
results = self._session._submit_binary(
func,
do_func,
left._partitions,
left._name,
left._namespace,
right._name,
right._namespace,
)
result: _Operand = results[0]
# noinspection PyProtectedMember
return _create_table(
session=self._session,
name=result.name,
namespace=result.namespace,
partitions=left._partitions,
)
def save_as(self, name, namespace, partition=None, need_cleanup=True):
if partition is None:
partition = self._partitions
# noinspection PyProtectedMember
dup = _create_table(self._session, name, namespace, partition, need_cleanup)
dup.put_all(self.collect())
return dup
def _get_env_for_partition(self, p: int, write=False):
return _get_env(self._namespace, self._name, str(p), write=write)
def put(self, k, v):
k_bytes, v_bytes = _kv_to_bytes(k=k, v=v)
p = _hash_key_to_partition(k_bytes, self._partitions)
with self._get_env_for_partition(p, write=True) as env:
with env.begin(write=True) as txn:
return txn.put(k_bytes, v_bytes)
def put_all(self, kv_list: Iterable):
txn_map = {}
is_success = True
with ExitStack() as s:
for p in range(self._partitions):
env = s.enter_context(self._get_env_for_partition(p, write=True))
txn_map[p] = env, env.begin(write=True)
for k, v in kv_list:
try:
k_bytes, v_bytes = _kv_to_bytes(k=k, v=v)
p = _hash_key_to_partition(k_bytes, self._partitions)
is_success = is_success and txn_map[p][1].put(k_bytes, v_bytes)
except Exception as e:
is_success = False
LOGGER.exception(f"put_all for k={k} v={v} fail. exception: {e}")
break
for p, (env, txn) in txn_map.items():
txn.commit() if is_success else txn.abort()
def get(self, k):
k_bytes = _k_to_bytes(k=k)
p = _hash_key_to_partition(k_bytes, self._partitions)
with self._get_env_for_partition(p) as env:
with env.begin(write=True) as txn:
old_value_bytes = txn.get(k_bytes)
return (
None if old_value_bytes is None else c_pickle.loads(old_value_bytes)
)
def delete(self, k):
k_bytes = _k_to_bytes(k=k)
p = _hash_key_to_partition(k_bytes, self._partitions)
with self._get_env_for_partition(p, write=True) as env:
with env.begin(write=True) as txn:
old_value_bytes = txn.get(k_bytes)
if txn.delete(k_bytes):
return (
None
if old_value_bytes is None
else c_pickle.loads(old_value_bytes)
)
return None
# noinspection PyMethodMayBeStatic
class Session(object):
def __init__(self, session_id, max_workers=None):
self.session_id = session_id
self._pool = Executor(max_workers=max_workers)
def __getstate__(self):
# session won't be pickled
pass
def load(self, name, namespace):
return _load_table(session=self, name=name, namespace=namespace)
def create_table(self, name, namespace, partitions, need_cleanup, error_if_exist):
return _create_table(
session=self,
name=name,
namespace=namespace,
partitions=partitions,
need_cleanup=need_cleanup,
error_if_exist=error_if_exist,
)
# noinspection PyUnusedLocal
def parallelize(
self, data: Iterable, partition: int, include_key: bool = False, **kwargs
):
if not include_key:
data = enumerate(data)
table = _create_table(
session=self,
name=str(uuid.uuid1()),
namespace=self.session_id,
partitions=partition,
)
table.put_all(data)
return table
def cleanup(self, name, namespace):
data_path = _get_data_dir()
if not data_path.is_dir():
LOGGER.error(f"illegal data dir: {data_path}")
return
# e.g.: '/fate/data/202109081519036144070_reader_0_0_host_10000'
namespace_dir = data_path.joinpath(namespace)
if not namespace_dir.is_dir():
# remove role and party_id
# e.g.: '202109081519036144070_reader_0_0'
stem = '_'.join(namespace_dir.stem.split('_')[:-2])
# TODO: find where the dir was created
namespace_dir = namespace_dir.with_name(stem)
if not namespace_dir.is_dir():
# TODO: find the reason
LOGGER.warning(f"namespace dir {namespace_dir} does not exist")
return
for table in namespace_dir.glob(name):
shutil.rmtree(table)
def stop(self):
self._pool.shutdown()
def kill(self):
self._pool.shutdown()
def _submit_unary(self, func, _do_func, partitions, name, namespace):
task_info = _TaskInfo(
self.session_id,
function_id=str(uuid.uuid1()),
function_bytes=f_pickle.dumps(func),
)
futures = []
for p in range(partitions):
futures.append(
self._pool.submit(
_do_func, _UnaryProcess(task_info, _Operand(namespace, name, p))
)
)
results = [r.result() for r in futures]
return results
def _submit_map_reduce_in_partition(
self, mapper, reducer, partitions, name, namespace
):
task_info = _MapReduceTaskInfo(
self.session_id,
function_id=str(uuid.uuid1()),
map_function_bytes=f_pickle.dumps(mapper),
reduce_function_bytes=f_pickle.dumps(reducer),
)
futures = []
for p in range(partitions):
futures.append(
self._pool.submit(
_do_map_reduce_in_partitions,
_MapReduceProcess(task_info, _Operand(namespace, name, p)),
)
)
results = [r.result() for r in futures]
return results
def _submit_binary(
self, func, do_func, partitions, name, namespace, other_name, other_namespace
):
task_info = _TaskInfo(
self.session_id,
function_id=str(uuid.uuid1()),
function_bytes=f_pickle.dumps(func),
)
futures = []
for p in range(partitions):
left = _Operand(namespace, name, p)
right = _Operand(other_namespace, other_name, p)
futures.append(
self._pool.submit(do_func, _BinaryProcess(task_info, left, right))
)
results = [r.result() for r in futures]
return results
class Federation(object):
def _federation_object_key(self, name, tag, s_party, d_party):
return f"{self._session_id}-{name}-{tag}-{s_party.role}-{s_party.party_id}-{d_party.role}-{d_party.party_id}"
def __init__(self, session, session_id, party: Party):
self._session_id = session_id
self._party: Party = party
self._loop = asyncio.get_event_loop()
self._session = session
self._federation_status_table = _create_table(
session=session,
name=self._get_status_table_name(self._party),
namespace=self._session_id,
partitions=1,
need_cleanup=True,
error_if_exist=False,
)
self._federation_object_table = _create_table(
session=session,
name=self._get_object_table_name(self._party),
namespace=self._session_id,
partitions=1,
need_cleanup=True,
error_if_exist=False,
)
self._other_status_tables = {}
self._other_object_tables = {}
@staticmethod
def _get_status_table_name(party):
return f"__federation_status__.{party.role}_{party.party_id}"
@staticmethod
def _get_object_table_name(party):
return f"__federation_object__.{party.role}_{party.party_id}"
def _get_other_status_table(self, party):
if party in self._other_status_tables:
return self._other_status_tables[party]
table = _create_table(
self._session,
name=self._get_status_table_name(party),
namespace=self._session_id,
partitions=1,
need_cleanup=False,
error_if_exist=False,
)
self._other_status_tables[party] = table
return table
def _get_other_object_table(self, party):
if party in self._other_object_tables:
return self._other_object_tables[party]
table = _create_table(
self._session,
name=self._get_object_table_name(party),
namespace=self._session_id,
partitions=1,
need_cleanup=False,
error_if_exist=False,
)
self._other_object_tables[party] = table
return table
# noinspection PyProtectedMember
def _put_status(self, party, _tagged_key, value):
self._get_other_status_table(party).put(_tagged_key, value)
# noinspection PyProtectedMember
def _put_object(self, party, _tagged_key, | |
#%%
import os
cwd = os.getcwd()
dir_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(dir_path)
import argparse
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import torchvision.utils
import numpy as np
import os.path
from scipy.io import loadmat
from model import *
from utils import *
from args_python import *
from matplotlib import pyplot as plt
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from sklearn.model_selection import train_test_split
import hdf5storage
EulerN=3
QuaternionN=4
ScaleSpaceAndGainN=2
class CustomDataset(Dataset):
"""TensorDataset with support of transforms.
"""
def __init__(self, tensors, transform=None):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
self.transform = transform
def __getitem__(self, index):
x = self.tensors[0][index]
if self.transform:
x = self.transform(x)
y = self.tensors[1][index]
return x, y
def __len__(self):
return self.tensors[0].size(0)
#%%
def train(args, model, device, train_loader, optimizer, epoch, writer, Rbeta, zipped_vals, scheduler):
model.train()
run_loss = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
R_est = euler2R(output[:,0:EulerN])
R_target = euler2R(target[:,0:EulerN])
gt, pred, rot_loss = getlossrotation(False, R_est, R_target)
gain_scale_loss = getlossspacescale(output[:,EulerN],target[:,EulerN]) + getlossgain(output[:,EulerN+1],target[:,EulerN+1])
loss = rot_loss + gain_scale_loss
if args.test:
print("Ground truth : {} \n Predicted values : {}".format(torch.transpose(gt,1,2), pred))
break
run_loss += loss.item()
loss.backward()
optimizer.step()
scheduler.step()
if (batch_idx+1) % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.8f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx * len(data) / len(train_loader.dataset), run_loss/args.log_interval)) #
# grid = torchvision.utils.make_grid(data)
writer.add_scalar('training_loss', run_loss/args.log_interval, epoch*len(train_loader)+batch_idx)
# writer.add_image('images', grid)
writer.add_graph(model, data)
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
writer.add_histogram(tag, value.detach().cpu().numpy(), batch_idx+1)
run_loss = 0.0
def validate(args, model, device, val_loader, Rbeta, zipped_vals):
model.eval()
val_loss = 0.0
with torch.no_grad():
for data, target in val_loader:
data, target = data.to(device), target.to(device)
output = model(data)
R_est = euler2R(output[:,0:EulerN])
R_target = euler2R(target[:,0:EulerN])
gt, pred, rot_loss = getlossrotation(False, R_est, R_target)
gain_scale_loss = getlossspacescale(output[:,EulerN],target[:,EulerN]) + getlossgain(output[:,EulerN+1],target[:,EulerN+1])
loss_val = rot_loss + gain_scale_loss
val_loss += loss_val
val_loss /= len(val_loader)
print('\nValidation set: Average loss: {:.8f}\n'.format(val_loss.item()))
if args.test:
print("Ground truth : {} \n\n Predicted values : {} \n".format(torch.transpose(gt,1,2), pred))
return val_loss
def test(args, model, device, test_loader, Rbeta, zipped_vals, data_stat):
if args.get_pred_only:
model.eval()
test_out_list = []
with torch.no_grad():
for data in test_loader:
data = data[0].to(device)
output = model(data)
test_out_list.append(output.cpu().numpy())
save_mat = np.concatenate(test_out_list)
hdf5storage.savemat(args.pred_folder+'/pred_labels.mat', {'labeldata':save_mat})
else:
model.eval()
test_loss = 0.0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
R_est = euler2R(output[:,0:EulerN])
R_target = euler2R(target[:,0:EulerN])
gt, pred, rot_loss = getlossrotation(True, R_est, R_target)
gain_scale_loss = getlossspacescale(output[:,EulerN],target[:,EulerN]) + getlossgain(output[:,EulerN+1],target[:,EulerN+1])
loss_test = rot_loss + gain_scale_loss
test_loss += loss_test
test_loss /= len(test_loader)
print('\nTest set: Average loss: {:.8f}\n'.format(test_loss.item()))
print("Ground truth : {} \n\n Predicted values : {} \n".format(torch.transpose(gt,1,2), pred))
# value = torch.add(torch.matmul(pred,gt),-1*torch.eye(3))
# print("Loss value for these sample {}".format(torch.norm(value,p='fro',dim=(2, 3))))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch 3D angle regression from 2D images')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=100, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 30)')
parser.add_argument('--no-cuda', action='store_false', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--UseQuaternionNotEuler', action='store_true', default=False, help='give this flag in order to use the Quaternion representation, otherwise the Euler angles representation will be used')
parser.add_argument('--ScaleSpaceMin', type=float, default=0.8, help='minimum value of the space scaling')
parser.add_argument('--ScaleSpaceMax', type=float, default=1.2, help='maximum value of the space scaling')
parser.add_argument('--GainMin', type=float, default=0.8, help='minimum value of the gain')
parser.add_argument('--GainMax', type=float, default=1.2, help='maximum value of the gain')
parser.add_argument('--RootDirectory4Data', default='./', help='the name of the root director for the data')
parser.add_argument('--arch', default='VGG',help='the architecture to use. options are VGG, MLP for now. Can add more')
parser.add_argument('--carve_val', action='store_false', default=True, help='Whether validation set has to be carved out from the training set. Default is true')
parser.add_argument('--test', action='store_true', default=False, help='Whether train or test mode. Default is train mode.')
parser.add_argument('--get_pred_only', action='store_true', default=False, help='Get only predictions from images')
parser.add_argument('--pred_folder', default='./', help='Directory of file with test images.')
args = parser.parse_args()
# args=Args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
trainingdirectory = args.RootDirectory4Data+"/"+"training"
trainingimagefile="imagefile.mat"
traininglabelfile="labelfile.mat"
train_images = hdf5storage.loadmat(os.path.join(trainingdirectory, trainingimagefile))['imagedata']
train_labels = hdf5storage.loadmat(os.path.join(trainingdirectory, traininglabelfile))['labeldata']
if args.carve_val:
print("Carving out validation set from training set")
train_images, val_images, train_labels, val_labels = train_test_split(train_images, train_labels, test_size=0.1, random_state=42)
else:
print("Loading validation set")
validationdirectory = args.RootDirectory4Data+"/"+"validation"
validationimagefile="imagefile.mat"
validationlabelfile="labelfile.mat"
val_images = hdf5storage.loadmat(os.path.join(validationdirectory, validationimagefile))['imagedata']
val_labels = hdf5storage.loadmat(os.path.join(validationdirectory, validationlabelfile))['labeldata']
train_images = np.expand_dims(train_images,1)
val_images = np.expand_dims(val_images,1)
mean = np.mean(train_images)
std = np.std(train_images)
data_stat = [mean, std]
print("Dataset mean is {}".format(mean))
print("Dataset std is {}".format(std))
norm_train_images = (train_images - mean)/std
norm_val_images = (val_images - mean)/std
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_dataset = torch.utils.data.TensorDataset(torch.Tensor(norm_train_images), torch.Tensor(train_labels))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
val_dataset = torch.utils.data.TensorDataset(torch.Tensor(norm_val_images), torch.Tensor(val_labels))
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.test_batch_size, shuffle=True, **kwargs)
# torch.autograd.set_detect_anomaly(True)
if args.arch == "EulerGainVGG":
model = EulerGainVGG(args).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=5e-4, amsgrad=True)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=1e-5, max_lr=1e-4, cycle_momentum=False, mode='exp_range')
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-5, cycle_momentum=False, steps_per_epoch=len(train_loader), epochs=args.epochs)
elif args.arch == "EulerGainMLP":
model = EulerGainMLP(args).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-3, amsgrad=True)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=1e-5, max_lr=1e-4, cycle_momentum=False, mode='exp_range')
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-5, cycle_momentum=False, steps_per_epoch=len(train_loader), epochs=args.epochs)
elif args.arch == "QuaternionGainMLP":
model = QuaternionGainMLP(args).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-3, amsgrad=True)
# scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=1e-5, max_lr=1e-4, cycle_momentum=False, mode='exp_range')
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-5, cycle_momentum=False, steps_per_epoch=len(train_loader), epochs=args.epochs)
if args.UseQuaternionNotEuler:
ckpts_dir_name = f"checkpoints{args.RootDirectory4Data[7:]}/Quaternion_{args.epochs}/{args.arch}"
log_dir = f"runs{args.RootDirectory4Data[7:]}/Quaternion_{args.epochs}/{args.arch}"
else:
ckpts_dir_name = f"checkpoints{args.RootDirectory4Data[7:]}/Euler_{args.epochs}/{args.arch}"
log_dir = f"runs{args.RootDirectory4Data[7:]}/Euler_{args.epochs}/{args.arch}"
# load data
if args.get_pred_only:
testingdirectory = args.pred_folder
testingimagefile="imagefile.mat"
test_images = hdf5storage.loadmat(os.path.join(testingdirectory, testingimagefile))['imagedata']
test_images = np.expand_dims(test_images,1)
norm_test_images = (test_images - mean)/std
test_dataset = torch.utils.data.TensorDataset(torch.Tensor(norm_test_images))
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
model.load_state_dict(torch.load(f"{ckpts_dir_name}/angle_regress_100.pt")) # make sure to load from latest checkpoint
print("Test set predictions\n")
zipped_vals = None
Rbeta = None
test(args, model, device, test_loader, Rbeta, zipped_vals, data_stat)
else:
testingdirectory = args.RootDirectory4Data+"/"+"testing"
testingimagefile="imagefile.mat"
testinglabelfile="labelfile.mat"
test_images = hdf5storage.loadmat(os.path.join(testingdirectory, testingimagefile))['imagedata']
test_labels = hdf5storage.loadmat(os.path.join(testingdirectory, testinglabelfile))['labeldata']
test_images = np.expand_dims(test_images,1)
norm_test_images = (test_images - mean)/std
test_dataset = torch.utils.data.TensorDataset(torch.Tensor(norm_test_images), torch.Tensor(test_labels))
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=True, **kwargs)
# model.load_state_dict(torch.load(f"{ckpts_dir_name}/angle_regress_100.pt"))
os.makedirs(ckpts_dir_name, exist_ok=True)
writer = SummaryWriter(log_dir=log_dir,flush_secs=10)
Rbeta=None
zipped_vals=None
if not args.test:
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch, writer, Rbeta, zipped_vals, scheduler)
v_loss = validate(args, model, device, val_loader, Rbeta, zipped_vals)
writer.add_scalar('validation_loss', v_loss, epoch)
if epoch%10==0:
torch.save(model.state_dict(),f"{ckpts_dir_name}/angle_regress_{epoch}.pt")
writer.close()
else:
model.load_state_dict(torch.load(f"{ckpts_dir_name}/angle_regress_100.pt")) # make sure to load from latest checkpoint
print("Test set predictions\n")
test(args, model, device, test_loader, Rbeta, zipped_vals, data_stat)
if __name__ == '__main__':
main()
#%%
#####################################
# visualize few samples of the data
#####################################
# trainingdirectory="./data_big_Haar0.2/training"
# testingdirectory="./data_big_Haar0.2/testing"
# trainingimagefile="imagefile.mat"
# testingimagefile="imagefile.mat"
# traininglabelfile="labelfile.mat"
# testinglabelfile="labelfile.mat"
# #read the Matlab .mat files
# train_images = hdf5storage.loadmat(os.path.join(trainingdirectory, trainingimagefile))['imagedata']
# train_labels = hdf5storage.loadmat(os.path.join(trainingdirectory, traininglabelfile))['labeldata']
# test_images = hdf5storage.loadmat(os.path.join(testingdirectory, testingimagefile))['imagedata']
# test_labels = hdf5storage.loadmat(os.path.join(testingdirectory, testinglabelfile))['labeldata']
# train_images = np.expand_dims(train_images,1)
# test_images = np.expand_dims(test_images,1)
# use_cuda = False
# kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# train_dataset = CustomDataset(tensors=(torch.Tensor(train_images), torch.Tensor(train_labels)))
# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=24, shuffle=True, **kwargs, drop_last=False)
# for batch_idx, (data, target) in enumerate(train_loader):
# # print(data.shape)
# grid = torchvision.utils.make_grid(data)
# matplotlib_imshow(grid, one_channel=True)
# # print(target.numpy())
# break
# %%
###########################
# train test split of data
###########################
# import numpy as np
# import hdf5storage
# from sklearn.model_selection import train_test_split
# images = hdf5storage.loadmat("imagefile1.mat")["imagedata"]
# labels = hdf5storage.loadmat("labelfile.mat")["labeldata"]
# train_images, test_images, train_labels, test_labels = train_test_split(images, labels, test_size=0.1, random_state=42)
# print(train_images.shape)
# print(test_images.shape)
# print(train_labels.shape)
# print(test_labels.shape)
# hdf5storage.savemat('./training/imagefile.mat',{"imagedata":train_images})
# hdf5storage.savemat('./training/labelfile.mat',{"labeldata":train_labels})
# hdf5storage.savemat('./testing/imagefile.mat',{"imagedata":test_images})
# hdf5storage.savemat('./testing/labelfile.mat',{"labeldata":test_labels})
# test_images, val_images, test_labels, val_labels = train_test_split(val_images, val_labels, test_size=0.5, random_state=42)
# hdf5storage.savemat('./validation/imagefile.mat',{"imagedata":val_images})
# hdf5storage.savemat('./validation/labelfile.mat',{"labeldata":val_labels})
# %%
###########################
# read from tf events file
###########################
# import numpy as np
# from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
# import matplotlib as mpl
# import matplotlib.pyplot as plt
# import hdf5storage
# tf_size_guidance = {'scalars':10000}
# event_acc = EventAccumulator('./events.out.tfevents.1582062336.superman.11982.0', tf_size_guidance)
# event_acc.Reload()
# training_accuracies = event_acc.Scalars('training_loss')
# validation_accuracies = event_acc.Scalars('validation_loss')
# steps_train = len(training_accuracies)
# y_train = np.zeros([steps_train, 1])
# steps_val = len(validation_accuracies)
# y_val = np.zeros([steps_val, 1])
# for i in range(steps_train):
# y_train[i, 0] = training_accuracies[i][2] # value
# for i in range(steps_val):
# y_val[i, 0] = validation_accuracies[i][2] # value
# hdf5storage.savemat('./training_curve.mat',{'values':y_train})
# hdf5storage.savemat('./validation_curve.mat',{'values':y_val})
#%%
######################################################
# plot train val curves with x, y labels and title
######################################################
# import numpy as np
# from matplotlib import pyplot as plt
# train_file_name = 'Euler_train_noise05.csv'
# val_file_name = 'Euler_val_noise05.csv'
# train_data = np.genfromtxt(train_file_name, delimiter=',')
# train_data = train_data[1:,:]
# val_data = np.genfromtxt(val_file_name, delimiter=',')
# val_data = val_data[1:,:]
# plt.figure()
# plt.plot(train_data[:,1], train_data[:,2])
# plt.title('Training loss curve')
# plt.xlabel("Iterations")
# plt.ylabel("Loss")
# plt.savefig('train_loss.png')
# plt.figure()
# plt.plot(val_data[:,1], val_data[:,2])
# plt.title('Validation | |
''' This module contains the program that runs the numerical experiments.'''
from bayescg_k import bayescg_k
from bayescg import bayescg, bayescg_post_cov
from cgqs import cgqs
from utilities import mv_normal
import numpy as np
from scipy import linalg
from scipy.special import erfinv
import matplotlib.pyplot as plt
def bayescg_convergence(AVec, b, x0, SigVec, NormA, xTrue, it, delay=5, \
samples=10, reorth=True, plt_title=True):
""" Creates convergence plots for BayesCG
This program must be used in a Jupyter notebook
Parameters
----------
A : function
Function that computes matvec of matrix A
b : numpy array
Vector b
x0 : numpy array
Initial guess for x
SigVec : Function or None
Function that computes matvec of prior covariance OR
If None, then Krylov prior is used
NormA : float
2-norm of A
xTrue : numpy array,
True solution of linear system
it : int
Number of iterations to perform
delay : int, optional, default is 5
Delay for computing error estimates
samples : int or None, optional, default is 10
Number of S-statistic samples to compute each iteration
reorth : bool, optional, default is False
plt_title : bool, optional, default is True
Whether to have the plot titles display
Returns
-------
Plots with the following figure numbers
1 -- Convergence of posterior mean and covariance
2 -- Convergence of posterior mean only
3 -- Convergence of posterior covariance only
4 -- Convergence of posterior mean and covariance with error estimate
computed with samples from posterior
"""
N = len(b)
if SigVec is None:
if delay is None:
delay = N
_,_,_,info = bayescg_k(AVec,b,x0,delay,it,None,1e-128,samples,reorth,\
NormA,xTrue)
trace = info['sExp']
else:
xm,SigASm,info = bayescg(AVec,b,x0,SigVec,it,None,reorth,NormA,xTrue)
trace = info['trace']
if samples is not None:
Sample_matrix = np.zeros((samples,it+1))
for i in range(it+1):
if i == 0:
SigPost = SigVec(np.eye(N))
else:
SigPost = bayescg_post_cov(SigVec,SigASm[:,:i])
U,S,VH = linalg.svd(SigPost)
SqrtSigPost = VH.T*(S**(1/2))
for j in range(samples):
Xrv = mv_normal(xm,SqrtSigPost)
Sample_matrix[j,i] = np.inner(Xrv-xm,AVec(Xrv-xm))
info['samples'] = Sample_matrix
plt.figure(1)
plt.semilogy(info['err'], 'k', label = 'Error')
plt.semilogy(trace, '--r', label = 'Trace')
plt.ylabel('Error and Trace')
plt.xlabel('Iteration $m$')
plt.legend()
if plt_title:
plt.title('Convergence of BayesCG')
plt.tight_layout()
plt.figure(2)
plt.semilogy(info['err'], 'k', label = 'Error')
plt.ylabel('Error')
plt.xlabel('Iteration $m$')
if plt_title:
plt.title('Convergence of BayesCG')
plt.tight_layout()
plt.figure(3)
plt.plot(trace, '--k', label = 'Trace')
plt.ylabel('Trace')
plt.xlabel('Iteration $m$')
if plt_title:
plt.title('Convergence of BayesCG')
plt.tight_layout()
if samples is not None:
plt.figure(4)
plt.semilogy(info['samples'][0,:],'.r',markersize=1,label = 'Samples')
for i in range(1,samples):
plt.semilogy(info['samples'][i,:],'.r',markersize=1)
plt.semilogy(trace, '--k', label = 'Trace')
plt.semilogy(info['err'], 'k', label = 'Error')
plt.legend()
plt.ylabel('Error and Trace')
plt.xlabel('Iteration $m$')
if plt_title:
plt.title('Convergence of BayesCG')
plt.tight_layout()
def bayescg_exp_plots(AVec, b, x0, NormA, xTrue, GR, it, it_z=None, L=5,\
samples=10, pct=95, MA=20, reorth=False,
cgq=True, plt_title=True):
""" Computes plots of numerical experiments of BayesCG under Krylov prior
This program must be used in a Jupyter notebook
Parameters
----------
A : function
Function that computes matvec of matrix A
b : numpy array
Vector b
x0 : numpy array
Initial guess for x
NormA : float or None
2-norm of A
If supplied, residual is ||r||/(||A|| ||x_m||)
If not, residual is ||r||/||b||
xTrue : numpy array,
True solution of linear system
GR : float or None
Lower bound for smallest eigenvalue of A
If mu is supplied the Gauss-Radau error bound [1] is computed
If mu is None, the Gauss-Radau approximation [2] is computed
it : int
Number of iterations to perform
it_z : int or None, optional, default is None
Number of iterations to zoom in on
L : int, optional, default is 5
Delay for computing error estimates
samples : int or None, optional, default is 10
Number of S-statistic samples to compute each iteration
pct : float, 0<pct<100, optional, default is 95
Percent credible interval to plot
MA : int, optional, default is 20
Number of iteration moving average to plot
reorth : bool, optional, default is False
Whether to reorthogonalize
cgq : bool, optional, default is True
Whether to run CGQ algorithm
plt_title : bool, optional, default is True
Whether to have the plot titles display
Returns
-------
Plots with the following figure numbers
1 -- Emperical samples from S statistic (Only if samples is not None)
2 -- Emperical S statistic credible interval (If samples is not None)
3 -- S statistic credible interval and Gauss-Radau estimate
4 -- Relative accuracy of error estimates
5 -- Relative accuracy of error estimates, MA iteration moving average
6 -- BayesCG vs CG error (Only if Samples is not None)
7 -- BayesCG vs CG residual (Only if Samples is not None)
8 -- First it_z iterations of Figure 1
9 -- First it_z iterations of Figure 2
10 -- First it_z iterations of Figure 3
11 -- First it_z iterations of Figure 4
12 -- First it_z iterations of Figure 5
"""
pct_mult = erfinv(pct/100)*np.sqrt(2)
if (not cgq) or (samples is not None):
x,_,_,info = bayescg_k(AVec,b,x0,L,it,None,1e-128,samples,reorth,\
NormA,xTrue)
if cgq:
_,info2 = cgqs(AVec, b, x0, GR, None, it, L, reorth, NormA, xTrue)
Emu = info2['GRApprox']
SExp = info2['sExp']
SVar = info2['sSD']
err2 = info2['err']
res2 = info2['res']
comp_color = 'b'
if GR is not None:
Emu2 = info2['GaussRadau']
else:
Emu = None
SExp = info['sExp']
SVar = info['sSD']
err2 = info['err']
res2 = info['res']
comp_color = 'r'
if samples is not None:
scatter_color = 'r'
S = info['samples']
err = info['err']
res = info['res']
SSD = np.std(S,0,ddof=1)
SAvg = np.mean(S,0)
SMax = SAvg+pct_mult*SSD
plt.figure(1)
plt.semilogy(S[0], '.', markersize=1, label='S Stat Sample',
color=scatter_color)
for i in range(1,S.shape[0]):
plt.semilogy(S[i], '.', markersize=1, color=scatter_color)
plt.semilogy(err,'k',label = 'Error')
plt.legend()
Axes = plt.axis()
plt.xlabel('Iteration')
plt.ylabel('Squared A-norm Error')
if plt_title:
plt.title('BayesCG S Statistic Samples')
plt.tight_layout()
plt.figure(2)
plt.fill_between(range(len(err2)),SAvg,SMax,color=scatter_color,\
alpha=.2,label='Emperical S '+str(pct)+'% Cred Int')
plt.semilogy(err,'k', label = 'Error')
plt.legend()
plt.xlabel('Iteration')
plt.ylabel('Squared A-norm Error')
if plt_title:
plt.title('BayesCG S Statistic Mean and Max')
plt.tight_layout()
plt.figure(3)
plt.fill_between(range(len(err2)),SExp,SExp+pct_mult*SVar,\
color = 'b',alpha = .2,label = 'S '+str(pct)\
+'% Cred Int')
if Emu is not None:
plt.semilogy(Emu,':k',label = 'G-R Approx')
if GR is not None:
plt.semilogy(Emu2,'-.',color = (0,.5,0),label = 'G-R Bound')
plt.semilogy(err2,'k',label = 'Error')
plt.legend()
plt.xlabel('Iteration')
plt.ylabel('Squared A-norm Error')
if plt_title:
plt.title('CG Error Estimate')
plt.tight_layout()
if samples is not None:
AxesCG = plt.axis()
Axes = (min(Axes[0],AxesCG[0]),
max(Axes[1],AxesCG[1]),
min(Axes[2],AxesCG[2]),
max(Axes[3],AxesCG[3]))
plt.figure(1)
plt.axis(Axes)
plt.figure(2)
plt.axis(Axes)
plt.figure(3)
plt.axis(Axes)
plt.figure(4)
plt.semilogy(np.abs(err2-SExp)/np.min([err2,SExp],0),\
'-b',label = 'S Mean')
plt.semilogy(np.abs(err2-(SExp+pct_mult*SVar))/\
np.min([err2,SExp+pct_mult*SVar],0),'--r',\
label = 'S('+str(pct)+')')
if Emu is not None:
plt.semilogy(np.abs(err2-Emu)/np.min([err2,Emu],0),\
':k',label = 'G-R Approx')
if GR is not None:
plt.semilogy(np.abs(err2-Emu2)/np.min([err2,Emu2],0),\
'-.',color = (0,.5,0),label = 'G-R Bound')
plt.legend()
plt.xlabel('Iteration')
plt.ylabel('Relative Accuracy')
if plt_title:
plt.title('Relative Distance Between CG Error and Estimates')
plt.tight_layout()
MA1 = np.zeros(len(err2)-MA)
MA2 = np.copy(MA1)
MA3 = np.copy(MA1)
MA4 = np.copy(MA1)
for i in range(len(err2)-MA):
MA1[i] = np.mean(np.abs(err2[i:i+MA]\
-(SExp[i:i+MA]+pct_mult\
*SVar[i:i+MA]))\
/np.min([err2[i:i+MA],SExp[i:i+MA]+pct_mult\
*SVar[i:i+MA]],0))
MA2[i] = np.mean(np.abs(err2[i:i+MA]\
-SExp[i:i+MA])\
/np.min([err2[i:i+MA],SExp[i:i+MA]],0))
if Emu is not None:
MA3[i] = np.mean(np.abs(err2[i:i+MA]\
-Emu[i:i+MA])\
/np.min([err2[i:i+MA],Emu[i:i+MA]],0))
if GR is not None:
MA4[i] = np.mean(np.abs(err2[i:i+MA]\
-Emu2[i:i+MA])\
/np.min([err2[i:i+MA],Emu2[i:i+MA]],0))
plt.figure(5)
plt.semilogy(MA2,'-b',label = 'S Mean')
plt.semilogy(MA1,'--r',label = 'S('+str(pct)+')')
if Emu is not None:
plt.semilogy(MA3,':k',label = 'G-R Approx')
if GR is not None:
plt.semilogy(MA4,'-.',color=(0,.5,0),label = 'G-R Bound')
plt.legend()
plt.xlabel('Iteration')
plt.ylabel('Relative Accuracy')
if plt_title:
plt.title('Distance Between Error and Estimates, '\
+str(MA)+' Iteration Avg')
plt.tight_layout()
if samples is not None:
plt.figure(6)
plt.semilogy(err2, '-', label='CG Error', color=comp_color)
plt.semilogy(err, '--', label='BayesCG Error', color=scatter_color)
plt.legend()
plt.xlabel('Iteration')
plt.ylabel('Squared A-norm Error')
if plt_title:
plt.title('CG and BayesCG Error')
plt.tight_layout()
plt.figure(7)
plt.semilogy(res2, '-', label='CG Residual', color=comp_color)
plt.semilogy(res, '--', label='BayesCG Residual', color=scatter_color)
plt.legend()
plt.xlabel('Iteration')
plt.ylabel('Relative Residual')
if plt_title:
plt.title('CG and BayesCG Residual')
plt.tight_layout()
if it_z is not None:
if samples is not None:
plt.figure(8)
plt.semilogy(S[0,:it_z],'.r',markersize = 1,\
label = 'S Stat Sample')
for i in range(1,S.shape[0]):
plt.semilogy(S[i,:it_z],'.r',markersize = 1)
plt.semilogy(err[:it_z],'k',label = 'Error')
plt.legend()
Axes2 = plt.axis()
plt.xlabel('Iteration')
plt.ylabel('Squared A-norm Error')
if plt_title:
plt.title('BayesCG S Statistic Samples')
plt.tight_layout()
plt.figure(9)
plt.fill_between(range(len(err2[:it_z])),SAvg[:it_z],\
SMax[:it_z],color = 'r',alpha = .2,\
label = 'S '+str(pct)+'% Cred Int')
plt.semilogy(err[:it_z],'k', label = 'Error')
plt.legend()
plt.xlabel('Iteration')
plt.ylabel('Squared A-norm Error')
if plt_title:
plt.title('BayesCG S Statistic Mean and Max')
plt.tight_layout()
plt.figure(10)
plt.fill_between(range(len(err2[:it_z])),SExp[:it_z],\
SExp[:it_z]+pct_mult*SVar[:it_z],color = 'b',\
alpha = .2,label = 'S '+str(pct)+'% Cred Int')
if Emu is not None:
plt.semilogy(Emu[:it_z],':k',label = 'G-R Approx')
if GR is not None:
plt.semilogy(Emu2[:it_z],'-.',color=(0,.5,0),\
label='G-R Bound')
| |
<gh_stars>1-10
#FOUR Ultrasonic sensors (HC-SR04) FUNCTION
#Version 1
#Goal: Assign four sensors to the four directions depending on the configuration
#When turn occurs, the sensor that aligns with each direction will change = FOUR CONFIGURATIONS
#Four sensors: sensor_1, sensor_2, sensor_3, sensor_4
#Four directions: sensor_N, sensor_W, sensor_S, sensor_E
#Starting orientation: S1=S_N, S2=S_W, S3=S_S, S4=S_E
#if moving straight, will remember configuration (variables sent back from function) and jump straight to determining squareLoc
#if turning right, add one to rightIndex and match leftIndex - remember both right and leftIndex
#if turning left, add one to leftIndex and match rightIndex- remember both right and leftIndex
#Main function (mainFn) will be part of FSM code --> will call determineLoc
#determineLoc: sends back (location, rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E)
#i.e. remembers config, indexes, and sends back square location (i.e. location)
#determineLoc will be called every 0.5 seconds and square location will be sent to display station via bluetooth
#will also be called if washer is found (these square locations will be remembered in order to print out on LCD at end)
def mainFn (): #This will all be in FSM main code
#Importing
import RPi.GPIO as GPIO
import time
import numpy as np
#Declare board setup
GPIO.setmode(GPIO.BCM) #sets GPIO pin numbering
#Remove warnings
GPIO.setwarnings(False)
#Delarations - these will change
turningright = 0 #sent to this function by encoder?
turningleft = 0 #sent to this function by encoder?
movingforward = 0 #no turning!
rightIndex = 0 #original orientation
leftIndex = 0 #original orientation
#Now, determine location of car
#Sending back configuration (sensor_N, etc.), so if continuing straight will "remember" configuration until a turn occurs
(location, rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E) = determineLoc(movingforward,turningright,turningleft,rightIndex,leftIndex)
#Hold onto index values and sensor directions
#LOCATION is square location
return (location, rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E) #to main function
"""
"""
def determineLoc(movingforward,turningright,turningleft,rightIndex,leftIndex):
#Declare GPIO pins for 4 ultrasonic sensors: THESE DO NOT CHANGE
#Ultrasonic sensor 1: starting off north
TRIG_sensor1 = 5 #output pin - triggers the sensor
ECHO_sensor1 = 6 #input pin - reads the return signal from the sensor
#Ultrasonic sensor 2: starting off west
TRIG_sensor2 = 19
ECHO_sensor2 = 26
#Ultrasonic sensor 3: starting off south
TRIG_sensor3 = 8
ECHO_sensor3 = 7
#Ultrasonic sensor 4: starting off east
TRIG_sensor4 = 20
ECHO_sensor4 = 21
#Now account for turning - DETERMINE CONFIGURATION
if ((turningright == 1) and (turningleft == 0) and (movingforward == 0)) :
rightIndex = rightIndex + 1
#leftIndex does not change -> calling function
(rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E) = assignDirection(rightIndex, leftIndex, turningright, turningleft)
#leftIndex set to match config of rightIndex
elif ((turningleft == 1)and (turningright == 0) and (movingforward == 0)):
leftIndex = leftIndex + 1
#rightIndex does not change -> calling function
(rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E) = assignDirection(rightIndex, leftIndex, turningright, turningleft)
#rightIndex set to match config of leftIndex
elif ((turningright == 1) and (turningleft == 1)):
#Error: cannot turn both ways at same time!
print("ERROR!!!! Cannot turn right and left at same time")
elif ((movingforward == 1) and (turningright == 0) and (turningleft == 0)):
#No turning, move forward
print("Continue Moving Forward!")
else:
print("ERROR!")
(TRIG_N, TRIG_W, TRIG_S, TRIG_E, config_N, config_W, config_S, config_E) = declareSensor(sensor_N, sensor_W, sensor_S, sensor_E, TRIG_sensor1, ECHO_sensor1, TRIG_sensor2, ECHO_sensor2, TRIG_sensor3, ECHO_sensor3, TRIG_sensor4, ECHO_sensor4)
#Declare N,W,S,E TRIG & ECHO pins
#Ultrasonic sensor - North
GPIO.setup(TRIG_N,GPIO.OUT)
GPIO.setup(ECHO_N,GPIO.IN)
#Ultrasonic sensor - West
GPIO.setup(TRIG_W,GPIO.OUT)
GPIO.setup(ECHO_W,GPIO.IN)
#Ultrasonic sensor - South
GPIO.setup(TRIG_S,GPIO.OUT)
GPIO.setup(ECHO_S,GPIO.IN)
#Ultrasonic sensor - East
GPIO.setup(TRIG_E,GPIO.OUT)
GPIO.setup(ECHO_E,GPIO.IN)
#Read distance from each sensor -> calling function
distance_N = readDistance(1) #determine north distance
distance_W = readDistance(2) #determine west distance
distance_S = readDistance(3) #determine south distance
distance_E = readDistance(4) #determine east distance
#Assign length and width of car according to configuration
(dim1, dim2) = assignLengthWidth(config_N, config_S, config_W, config_E):
#Determine location -> calling function
location = squareLoc(distance_N, distance_W, distance_S, distance_E, dim1, dim2)
#Print out location
#Will need to send this location to the display station VIA BLUETOOTH!
print ("Square:",location)
return (location, rightIndex, leftIndex, sensor_N, sensor_W, sensor_S, sensor_E) #to main function
"""
"""
def declareSensor(sensor_N, sensor_W, sensor_S, sensor_E, TRIG_sensor1, ECHO_sensor1, TRIG_sensor2, ECHO_sensor2, TRIG_sensor3, ECHO_sensor3, TRIG_sensor4, ECHO_sensor4):
#Set two GPIO ports as inputs/outputs depending on CONFIGURATION!
#First, declare North sensor
if (sensor_N == 1):
TRIG_N = TRIG_sensor1
ECHO_N = ECHO_sensor1
config_N = 1 #dummy variable to track which direction of length/width
elif (sensor_N == 2):
TRIG_N = TRIG_sensor2
ECHO_N = ECHO_sensor2
config_N = 0
elif (sensor_N == 3):
TRIG_N = TRIG_sensor3
ECHO_N = ECHO_sensor3
config_N = 1
elif (sensor_N == 4):
TRIG_N = TRIG_sensor4
ECHO_N = ECHO_sensor4
config_N = 0
#Second, declare West sensor
if (sensor_W == 1):
TRIG_W = TRIG_sensor1
ECHO_W = ECHO_sensor1
config_W = 0
elif (sensor_W == 2):
TRIG_W = TRIG_sensor2
ECHO_W = ECHO_sensor2
config_W = 1
elif (sensor_W == 3):
TRIG_W = TRIG_sensor3
ECHO_W = ECHO_sensor3
config_W = 0
elif (sensor_W == 4):
TRIG_W = TRIG_sensor4
ECHO_W = ECHO_sensor4
config_W = 1
#Third, declare South sensor
if (sensor_S == 1):
TRIG_S = TRIG_sensor1
ECHO_S = ECHO_sensor1
config_S = 1
elif (sensor_S == 2):
TRIG_S = TRIG_sensor2
ECHO_S = ECHO_sensor2
config_S = 0
elif (sensor_S == 3):
TRIG_S = TRIG_sensor3
ECHO_S = ECHO_sensor3
config_S = 1
elif (sensor_S == 4):
TRIG_S = TRIG_sensor4
ECHO_S = ECHO_sensor4
config_S = 0
#Fourth, declare East sensor
if (sensor_E == 1):
TRIG_E = TRIG_sensor1
ECHO_E = ECHO_sensor1
config_E = 0
elif (sensor_E == 2):
TRIG_E = TRIG_sensor2
ECHO_E = ECHO_sensor2
config_E = 1
elif (sensor_E == 3):
TRIG_E = TRIG_sensor3
ECHO_E = ECHO_sensor3
config_E = 0
elif (sensor_E == 4):
TRIG_E = TRIG_sensor4
ECHO_E = ECHO_sensor4
config_E = 1
return (TRIG_N, TRIG_W, TRIG_S, TRIG_E, config_N, config_W, config_S, config_E) #to main function
""""
""""
#Function to read from encoder, assign N,W,S,E sensors
def assignDirection(rightIndex, leftIndex, turningright, turningleft):
#First check to reset to original orientation, loop complete
if (rightIndex == 4):
rightIndex = 0
#Set leftIndex to match rightIndex
leftIndex = 0
elif (leftIndex == 4):
leftIndex = 0
#Set rightIndex to match leftIndex
rightIndex = 0
#Turning right initiated
if (turningright == 1): #only rightIndex should have changed
print ("Turned right")
if (rightIndex == 0):
#Assign sensors
sensor_N = 1 #sensor_1
sensor_W = 2 #sensor_2
sensor_S = 3 #sensor_3
sensor_E = 4 #sensor_4
#Match not-changed leftIndex with changed rightIndex config
leftIndex = 0
elif (rightIndex == 1):
#Assign sensors
sensor_N = 2 #sensor_2
sensor_W = 3 #sensor_3
sensor_S = 4 #sensor_4
sensor_E = 1 #sensor_1
#Match not-changed leftIndex with changed rightIndex config
leftIndex = 3
elif (rightIndex == 2):
#Assign sensors
sensor_N = 3 #sensor_3
sensor_W = 4 #sensor_4
sensor_S = 1 #sensor_1
sensor_E = 2 #sensor_2
#Match not-changed leftIndex with changed rightIndex config
leftIndex = 2
elif (rightIndex == 3):
#Assign sensors
sensor_N = 4 #sensor_4
sensor_W = 1 #sensor_1
sensor_S = 2 #sensor_2
sensor_E = 3 #sensor_3
#Match not-changed leftIndex with changed rightIndex config
leftIndex = 1
else: #already checked for value of 4!
print("ERROR!!!") #should only be values 1, 2, 3
elif (turningleft == 1): #only leftIndex should have changed
print ("Turned left")
if (leftIndex == 0):
#Assign sensors
sensor_N = 1 #sensor_1
sensor_W = 2 #sensor_2
sensor_S = 3 #sensor_3
sensor_E = 4 #sensor_4
#Match not-changed rightIndex with changed leftIndex config
rightIndex = 0
elif (leftIndex == 1):
#Assign sensors
sensor_N = 4 #sensor_4
sensor_W = 1 #sensor_1
sensor_S = 2 #sensor_2
sensor_E = 3 #sensor_3
#Match not-changed rightIndex with changed leftIndex config
rightIndex = 3
elif (leftIndex == 2):
#Assign sensors
sensor_N = 3 #sensor_3
sensor_W = 4 #sensor_4
sensor_S = 1 #sensor_1
sensor_E = 2 #sensor_2
#Match not-changed rightIndex with changed leftIndex config
rightIndex = 2
elif (leftIndex == 3):
#Assign sensors
sensor_N = 2 #sensor_2
sensor_W = 3 #sensor_3
sensor_S = 4 #sensor_4
sensor_E = 1 #sensor_1
#Match not-changed rightIndex with changed leftIndex config
rightIndex = 1
else: #already checked for | |
match="in_port="+exp.VfsMatch["enp3s0f2"]+",ip,nw_dst=10.0.0.2"
action="mod_dl_dst:"+exp.GetMacByVf("enp3s0f4")+","+exp.VfsMatch["enp3s0f3"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#Flow Rules (2)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s1f3"]
action="mod_dl_dst:"+outDestMac+","+exp.VfsMatch["enp3s1f2"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
'''
OVS Flow Rules for tenant-2 :
+ (1) in: enp3s0f2 ,dest IP 10.0.0.3 --> out= enp3s0f5, change Mac to enp3s0f6 mac
+ (2) in: enp3s1f5 --> out= enp3s1f2, change Mac to 00:00:00:00:30:56
'''
#Flow Rules (1)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s0f2"]+",ip,nw_dst=10.0.0.3"
action="mod_dl_dst:"+exp.GetMacByVf("enp3s0f6")+","+exp.VfsMatch["enp3s0f5"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#Flow Rules (2)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s1f5"]
action="mod_dl_dst:"+outDestMac+","+exp.VfsMatch["enp3s1f2"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#show Flow rules of br0
exp.showFlowRules("vswitch-vm", exp.OVS_PATH,"br0")
#Start DPDK App in The tenantVM
#exp.StartDpdkApp("tenant-green-1")
EmailNotify(msg, "is ready", logTimeStamp)
return True
def phy2phy_SRIOV_MultiOvs_NoDPDK(cnx_server, config):
#----------------------------------------#
exp.NicType= "mlx"
exp.isSRIOV= True
exp.IsDPDK= False
exp.OVS_PATH= exp.nodpdk_path
exp.Server_cnx= cnx_server
exp.scsName= "phy2phy_SRIOV_MultiOvs_NoDPDK"
logTimeStamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#----------------------------------------#
cpu_config= CpuAllocation(config)
exp.PhyPorts= [
("enp3s0f0", "8"),
("enp3s0f1", "8")
]
exp.MyVfs= [
("enp3s0f2", "0", "off", "vswitch-vm"),
("enp3s1f2", "0", "off", "vswitch-vm"),
("enp3s0f5", "0", "off", "vswitch-vm-2"),
("enp3s1f5", "0", "off", "vswitch-vm-2")]
exp.usedVms=[
("vswitch-vm", cpu_config[0][0], "4G"),
("vswitch-vm-2", cpu_config[0][1], "4G")]
OvsCpu= cpu_config[2]
#----------------- OVS-VM_1------------------
OvsVmPorts1= [
("enp3s0f2", False),
("enp3s1f2", False)]
#----------------- OVS-VM_2------------------
OvsVmPorts2= [
("enp3s0f5", False),
("enp3s1f5", False)]
msg= GetScenarioSummary([OvsVmPorts1, OvsVmPorts2], OvsCpu, [], "")
EmailNotify(msg, "is beeing prepared", logTimeStamp )
Logs(msg,config, logTimeStamp)
exp.InitialConfig()
exp.Vfsconfig()
exp.ConfigOVS("vswitch-vm", "br0", OvsVmPorts1, OvsCpu)
exp.ConfigOVS("vswitch-vm-2", "br0", OvsVmPorts2, OvsCpu)
'''
OVS Flow Rules for OVS-VM-1:
+ (1) in: enp3s0f2 with ip: 10.0.0.2 --> out= enp3s1f2, change Mac to 00:00:00:00:30:56
'''
#Flow Rules (1)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s0f2"]+",ip,nw_dst=10.0.0.2"
action="mod_dl_dst:"+outDestMac+","+exp.VfsMatch["enp3s1f2"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#show Flow rules of br0
exp.showFlowRules("vswitch-vm", exp.OVS_PATH,"br0")
'''
OVS Flow Rules for OVS-VM-2:
+ (1) in: enp3s0f5 with ip: 10.0.0.3 --> out= enp3s1f5, change Mac to 00:00:00:00:30:56
'''
#Flow Rules (1)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s0f5"]+",ip,nw_dst=10.0.0.3"
action="mod_dl_dst:"+outDestMac+","+exp.VfsMatch["enp3s1f5"]
exp.addFlowRule("vswitch-vm-2" , exp.OVS_PATH, "br0", match, action)
#show Flow rules of br0
exp.showFlowRules("vswitch-vm-2", exp.OVS_PATH,"br0")
EmailNotify(msg, "is ready", logTimeStamp)
return True
def phy2phy_SRIOV_MultiOvs_DPDK(cnx_server, config, dpdkMemory="1024,0"):
#----------------------------------------#
exp.NicType= "mlx"
exp.isSRIOV= True
exp.IsDPDK= True
exp.OVS_PATH= exp.dpdk_path
exp.Server_cnx= cnx_server
exp.scsName= "phy2phy_SRIOV_MultiOvs_DPDK"
logTimeStamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#----------------------------------------#
cpu_config= CpuAllocation(config)
exp.PhyPorts= [
("enp3s0f0", "8"),
("enp3s0f1", "8")
]
exp.MyVfs= [
("enp3s0f2", "0", "off", "vswitch-vm"),
("enp3s1f2", "0", "off", "vswitch-vm"),
("enp3s0f5", "0", "off", "vswitch-vm-2"),
("enp3s1f5", "0", "off", "vswitch-vm-2")]
exp.usedVms=[
("vswitch-vm", cpu_config[0][0], "4G"),
("vswitch-vm-2", cpu_config[0][1], "4G")]
OvsCpu= cpu_config[2]
DpdkCpu= cpu_config[3]
DpdkMem= dpdkMemory
#----------------- OVS-VM_1------------------
if len(DpdkCpu[1]) >1:
OvsVmPorts1= [
("enp3s0f2", True, str(DpdkCpu[1][0])),
("enp3s1f2", True, str(DpdkCpu[1][1]))]
else:
OvsVmPorts1= [
("enp3s0f2", True, str(DpdkCpu[1][0])),
("enp3s1f2", True, str(DpdkCpu[1][0]))]
#----------------- OVS-VM_2------------------
if len(DpdkCpu[1]) >1:
OvsVmPorts2= [
("enp3s0f5", True, str(DpdkCpu[1][0])),
("enp3s1f5", True, str(DpdkCpu[1][1]))]
else:
OvsVmPorts2= [
("enp3s0f5", True, str(DpdkCpu[1][0])),
("enp3s1f5", True, str(DpdkCpu[1][0]))]
msg= GetScenarioSummary([], OvsCpu, DpdkCpu, DpdkMem)
EmailNotify(msg, "is beeing prepared", logTimeStamp)
Logs(msg,config, logTimeStamp)
#-------------------------------------------
exp.InitialConfig()
exp.Vfsconfig()
exp.ConfigOVS("vswitch-vm", "br0", OvsVmPorts1, OvsCpu, DpdkMem, DpdkCpu)
if IsCpusIsolated:
exp.ConfigOVS("vswitch-vm-2", "br0", OvsVmPorts2, OvsCpu, DpdkMem, DpdkCpu)
else:
exp.ConfigOVS("vswitch-vm-2", "br0", OvsVmPorts2, OvsCpu, DpdkMem, [2,[0]])
'''
OVS Flow Rules:
+ (1) in: enp3s0f2 with ip:10.0.0.2--> out= enp3s1f2, change Mac to 00:00:00:00:30:56
'''
#Flow Rules (1)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s0f2"]+",ip,nw_dst=10.0.0.2"
action="mod_dl_dst:"+outDestMac+","+exp.VfsMatch["enp3s1f2"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#show Flow rules of br0
exp.showFlowRules("vswitch-vm", exp.OVS_PATH,"br0")
'''
OVS Flow Rules for OVS-VM-2:
+ (1) in: enp3s0f3 with ip:10.0.0.3--> out= enp3s1f3, change Mac to 00:00:00:00:30:56
'''
#Flow Rules (1)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s0f5"]+",ip,nw_dst=10.0.0.3"
action="mod_dl_dst:"+outDestMac+","+exp.VfsMatch["enp3s1f5"]
exp.addFlowRule("vswitch-vm-2" , exp.OVS_PATH, "br0", match, action)
#show Flow rules of br0
exp.showFlowRules("vswitch-vm-2", exp.OVS_PATH,"br0")
EmailNotify(msg, "is ready", logTimeStamp)
return True
def phy2phy_Baseline_MultiTenant_NoDPDK(cnx_server, config):
#----------------------------------------#
exp.NicType= "mlx"
exp.isSRIOV= False
exp.IsDPDK= False
exp.OVS_PATH= exp.nodpdk_path
exp.Server_cnx= cnx_server
exp.scsName= "phy2phy_Baseline_MultiTenant_NoDPDK"
logTimeStamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#----------------------------------------#
cpu_config= CpuAllocation(config)
exp.PhyPorts= [
("enp3s0f0", "br0"),
("enp3s0f1", "br0")
]
exp.VirtualPorts=[]
#no Vm is used in this topology
exp.usedVms=[]
#no Vfs ir virtual portes are needed in this topology
Ports=[]
OvsCpu= cpu_config[2]
msg= GetScenarioSummary([], OvsCpu, [], "")
EmailNotify(msg, "is beeing prepared", logTimeStamp)
Logs(msg,config, logTimeStamp)
#----------------------------------------#
exp.InitialConfig()
exp.ConfigOVS(exp.Server_cnx, "br0", Ports, OvsCpu)
'''
OVS Flow Rules:
+ (1) in: enp3s0f0 with ip 10.0.0.2 --> out= enp3s0f1
+ (2) in: enp3s0f0 with ip 10.0.0.3 --> out= enp3s0f1
'''
#Flow Rules (1)
#---------------------------------------------------------
match="in_port=enp3s0f0,ip,nw_dst=10.0.0.2"
action="enp3s0f1"
exp.addFlowRule(exp.Server_cnx , exp.OVS_PATH, "br0", match, action)
#Flow Rules (2)
#---------------------------------------------------------
match="in_port=enp3s0f0,ip,nw_dst=10.0.0.3"
action="enp3s0f1"
exp.addFlowRule(exp.Server_cnx , exp.OVS_PATH, "br0", match, action)
#show Flow rules of br0
exp.showFlowRules(exp.Server_cnx, exp.OVS_PATH, "br0")
EmailNotify(msg, "is ready", logTimeStamp)
return True
# Not tested !!!
def phy2phy_Baseline_MultiTenant_DPDK(cnx_server, config, dpdkMemory="1024,0"):
#----------------------------------------#
exp.NicType= "mlx"
exp.isSRIOV= False
exp.IsDPDK= True
exp.OVS_PATH= exp.dpdk_path
exp.Server_cnx= cnx_server
exp.scsName= "phy2phy_Baseline_MultiTenant_DPDK"
logTimeStamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#----------------------------------------#
cpu_config= CpuAllocation(config)
exp.VirtualPorts=[]
#no Vm is used in this topology
exp.usedVms=[]
#no Vfs ir virtual portes are needed in this topology
Ports=[]
OvsCpu= cpu_config[2]
DpdkCpu= cpu_config[3]
DpdkMem= dpdkMemory
if len(DpdkCpu[1]) >1:
exp.PhyPorts= [
("enp3s0f0", "br0", True, str(DpdkCpu[1][0])),
("enp3s0f1", "br0", True, str(DpdkCpu[1][1]))]
else:
exp.PhyPorts= [
("enp3s0f0", "br0", True, str(DpdkCpu[1][0])),
("enp3s0f1", "br0", True, str(DpdkCpu[1][0]))]
msg= GetScenarioSummary([], OvsCpu, DpdkCpu, DpdkMem)
EmailNotify(msg, "is beeing prepared", logTimeStamp)
Logs(msg,config, logTimeStamp)
#----------------------------------------#
exp.InitialConfig(True)
exp.ConfigOVS(exp.Server_cnx, "br0", Ports, OvsCpu, DpdkMem, DpdkCpu)
'''
OVS Flow Rules:
+ (1) in: enp3s0f0 with ip 10.0.0.2 --> out= enp3s0f1
+ (2) in: enp3s0f0 with ip 10.0.0.3 --> out= enp3s0f1
'''
#Flow Rules (1)
#---------------------------------------------------------
match="in_port=enp3s0f0,ip,nw_dst=10.0.0.2"
action="enp3s0f1"
exp.addFlowRule(exp.Server_cnx , exp.OVS_PATH, "br0", match, action)
#Flow Rules (2)
#---------------------------------------------------------
match="in_port=enp3s0f0,ip,nw_dst=10.0.0.3"
action="enp3s0f1"
exp.addFlowRule(exp.Server_cnx , exp.OVS_PATH, "br0", match, action)
#show Flow rules of br0
exp.showFlowRules(exp.Server_cnx, exp.OVS_PATH, "br0")
EmailNotify(msg, "is ready", logTimeStamp)
return True
def phy2phy_SRIOV_MultiTenant_DPDK(cnx_server, config, dpdkMemory="1024,0"):
#----------------------------------------#
exp.NicType= "mlx"
exp.isSRIOV= True
exp.IsDPDK= True
exp.OVS_PATH= exp.dpdk_path
exp.Server_cnx= cnx_server
exp.scsName= "phy2phy_SRIOV_MultiTenant_DPDK"
logTimeStamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#----------------------------------------#
cpu_config= CpuAllocation(config)
exp.PhyPorts= [
("enp3s0f0", "8"),
("enp3s0f1", "8")
]
exp.MyVfs= [
("enp3s0f2", "0", "off", "vswitch-vm"),
("enp3s1f2", "0", "off", "vswitch-vm")]
exp.usedVms=[
("vswitch-vm", cpu_config[0][0], "4G")]
#----------------- OVS-VM_1------------------
OvsCpu= cpu_config[2]
DpdkCpu= cpu_config[3]
DpdkMem= dpdkMemory
if len(DpdkCpu[1]) >1:
OvsVmPorts1= [
("enp3s0f2", True, str(DpdkCpu[1][0])),
("enp3s1f2", True, str(DpdkCpu[1][1]))]
else:
OvsVmPorts1= [
("enp3s0f2", True, str(DpdkCpu[1][0])),
("enp3s1f2", True, str(DpdkCpu[1][0]))]
msg= GetScenarioSummary([OvsVmPorts1], OvsCpu, DpdkCpu, DpdkMem)
EmailNotify(msg, "is beeing prepared", logTimeStamp)
Logs(msg,config, logTimeStamp)
#-------------------------------------------
exp.InitialConfig()
exp.Vfsconfig()
exp.ConfigOVS("vswitch-vm", "br0", OvsVmPorts1, OvsCpu, DpdkMem, DpdkCpu)
'''
OVS Flow Rules:
+ (1) in: enp3s0f2 with ip:10.0.0.2--> out= enp3s1f2, change Mac to 00:00:00:00:30:56
+ (2) in: enp3s0f2 with ip:10.0.0.3--> out= enp3s1f2, change Mac to 00:00:00:00:30:56
'''
#Flow Rules (1)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s0f2"]+",ip,nw_dst=10.0.0.2"
action="mod_dl_dst:"+outDestMac+","+exp.VfsMatch["enp3s1f2"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#Flow Rules (2)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s0f2"]+",ip,nw_dst=10.0.0.3"
action="mod_dl_dst:"+outDestMac+","+exp.VfsMatch["enp3s1f2"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#show Flow rules of br0
exp.showFlowRules("vswitch-vm", exp.OVS_PATH,"br0")
EmailNotify(msg, "is ready", logTimeStamp)
return True
def phy2phy_SRIOV_MultiTenant_NoDPDK(cnx_server, config):
#----------------------------------------#
exp.NicType= "mlx"
exp.isSRIOV= True
exp.IsDPDK= False
exp.OVS_PATH= exp.nodpdk_path
exp.Server_cnx= cnx_server
exp.scsName= "phy2phy_SRIOV_MultiTenant_NoDPDK"
logTimeStamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#----------------------------------------#
cpu_config= CpuAllocation(config)
exp.PhyPorts= [
("enp3s0f0", "8"),
("enp3s0f1", "8")
]
exp.MyVfs= [
("enp3s0f2", "0", "off", "vswitch-vm"),
("enp3s1f2", "0", "off", "vswitch-vm")]
exp.usedVms=[
("vswitch-vm", cpu_config[0][0], "4G")]
#----------------- OVS-VM_1------------------
OvsCpu1= cpu_config[2]
OvsVmPorts1= [
("enp3s0f2", False),
("enp3s1f2", False)]
msg= GetScenarioSummary([OvsVmPorts1], OvsCpu1, [], "")
EmailNotify(msg, "is beeing prepared", logTimeStamp)
Logs(msg,config, logTimeStamp)
exp.InitialConfig()
exp.Vfsconfig()
exp.ConfigOVS("vswitch-vm", "br0", OvsVmPorts1, OvsCpu1)
#Flow Rules (1)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s0f2"]+",ip,nw_dst=10.0.0.2"
action="mod_dl_dst:"+outDestMac+","+exp.VfsMatch["enp3s1f2"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#Flow Rules (2)
#---------------------------------------------------------
match="in_port="+exp.VfsMatch["enp3s0f2"]+",ip,nw_dst=10.0.0.3"
action="mod_dl_dst:"+outDestMac+","+exp.VfsMatch["enp3s1f2"]
exp.addFlowRule("vswitch-vm" , exp.OVS_PATH, "br0", match, action)
#show Flow rules of br0
exp.showFlowRules("vswitch-vm", exp.OVS_PATH,"br0")
EmailNotify(msg, "is ready", logTimeStamp)
return True
###########################################################################################
###################---Custom Scenarios (No CPU array is needed)---#########################
###########################################################################################
def phy2vm2vm2phy_SRIOV_MultiOvs_DPDK_Custom(cnx_server, dpdkMemory="1024,0"):
#----------------------------------------#
exp.NicType= "mlx"
exp.isSRIOV= True
exp.IsDPDK= True
exp.OVS_PATH= exp.dpdk_path
exp.Server_cnx= cnx_server
exp.scsName= "phy2vm2vm2phy_SRIOV_MultiOvs_DPDK_Custom"
logTimeStamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
#----------------------------------------#
exp.HOST_CPU="custom"
exp.PhyPorts= [
("enp3s0f0", "8"),
("enp3s0f1", "8")
]
exp.MyVfs= [
("enp3s0f2", "0", "off", "vswitch-vm"),
("enp3s1f2", "0", "off", "vswitch-vm"),
("enp3s1f3", "10", "off", "vswitch-vm"),
("enp3s0f3", "10", "off", "vswitch-vm"),
("enp3s0f4", "10", "off", "tenant-green-1"),
("enp3s1f4", "10", "off", "tenant-green-1"),
("enp3s0f5", "0", "off", "vswitch-vm-2"),
("enp3s1f5", "0", "off", "vswitch-vm-2"),
("enp3s1f6", "20", "off", "vswitch-vm-2"),
("enp3s0f6", "20", "off", "vswitch-vm-2"),
("enp3s0f7", "20", "off", "tenant-green-2"),
("enp3s1f7", "20", "off", "tenant-green-2")
]
exp.usedVms=[
("vswitch-vm", | |
<reponame>theNewFlesh/hidebound<filename>python/hidebound/core/database.py
from typing import Any, Dict, List, Union
from copy import deepcopy
from importlib import import_module
from pathlib import Path
import json
import os
import shutil
import sys
from pandas import DataFrame
import jsoncomment as jsonc
import numpy as np
import pandasql
import requests
from hidebound.core.config import Config
from hidebound.core.specification_base import SpecificationBase
from hidebound.exporters.girder_exporter import GirderExporter
from hidebound.exporters.local_disk_exporter import LocalDiskExporter
from hidebound.exporters.s3_exporter import S3Exporter
import hidebound.core.database_tools as db_tools
import hidebound.core.tools as tools
from hidebound.core.logging import ProgressLogger
# ------------------------------------------------------------------------------
class Database:
'''
Generates a DataFrame using the files within a given directory as rows.
'''
@staticmethod
def from_config(config):
# type: (Dict[str, Any]) -> "Database"
'''
Constructs a Database instance given a valid config.
Args:
config (dict): Dictionary that meets Config class standards.
Raises:
DataError: If config is invalid.
Returns:
Database: Database instance.
'''
# validate config and populate with default values
config = deepcopy(config)
config = Config(config)
config.validate()
config = config.to_primitive()
specs = []
for path in config['specification_files']:
sys.path.append(path)
filepath = Path(path)
filename = filepath.name
filename, _ = os.path.splitext(filename)
module = import_module(filename, filepath) # type: ignore
specs.extend(module.SPECIFICATIONS) # type: ignore
specs = list(set(specs))
config['specifications'] = specs
return Database(
config['root_directory'],
config['hidebound_directory'],
specifications=specs,
include_regex=config['include_regex'],
exclude_regex=config['exclude_regex'],
write_mode=config['write_mode'],
exporters=config['exporters'],
webhooks=config['webhooks'],
)
@staticmethod
def from_json(filepath):
# type: (Union[str, Path]) -> "Database"
'''
Constructs a Database instance from a given json file.
Args:
filepath (str or Path): Filepath of json config file.
Returns:
Database: Database instance.
'''
with open(filepath) as f:
config = jsonc.JsonComment().load(f)
return Database.from_config(config)
def __init__(
self,
root_dir,
hidebound_dir,
specifications=[],
include_regex='',
exclude_regex=r'\.DS_Store',
write_mode='copy',
exporters={},
webhooks=[],
):
# type: (Union[str, Path], Union[str, Path], List[SpecificationBase], str, str, str, Dict[str, Any], List[Dict]) -> None # noqa E501
r'''
Creates an instance of Database but does not populate it with data.
Args:
root_dir (str or Path): Root directory to recurse.
hidebound_dir (str or Path): Directory where hidebound data will be
saved.
specifications (list[SpecificationBase], optional): List of asset
specifications. Default: [].
include_regex (str, optional): Include filenames that match this
regex. Default: None.
exclude_regex (str, optional): Exclude filenames that match this
regex. Default: '\.DS_Store'.
write_mode (str, optional): How assets will be extracted to
hidebound/content directory. Default: copy.
exporters (dict, optional): Dictionary of exporter configs, where
the key is the exporter name and the value is its config.
Default: {}.
webhooks (list[dict], optional): List of webhooks to call.
Default: [].
Raises:
TypeError: If specifications contains a non-SpecificationBase
object.
ValueError: If write_mode not is not "copy" or "move".
FileNotFoundError: If root is not a directory or does not exist.
FileNotFoundError: If hidebound_dir is not directory or does not
exist.
NameError: If hidebound_dir is not named "hidebound".
Returns:
Database: Database instance.
'''
# validate hidebound dir
hb_root = Path(hidebound_dir)
if not hb_root.is_dir():
msg = f'{hb_root} is not a directory or does not exist.'
raise FileNotFoundError(msg)
if Path(hb_root).name != 'hidebound':
msg = f'{hb_root} directory is not named hidebound.'
raise NameError(msg)
# setup logger
logpath = Path(hb_root, 'logs', 'progress', 'hidebound-progress.log')
self._logger = ProgressLogger(__name__, logpath)
# validate spec classes
bad_specs = list(filter(
lambda x: not issubclass(x, SpecificationBase), specifications
))
if len(bad_specs) > 0:
msg = 'SpecificationBase may only contain subclasses of '
msg += f'SpecificationBase. Found: {bad_specs}.'
self._logger.error(msg)
raise TypeError(msg)
# validate root dir
root = Path(root_dir)
if not root.is_dir():
msg = f'{root} is not a directory or does not exist.'
self._logger.error(msg)
raise FileNotFoundError(msg)
# validate write mode
modes = ['copy', 'move']
if write_mode not in modes:
msg = f'Invalid write mode: {write_mode} not in {modes}.'
self._logger.error(msg)
raise ValueError(msg)
self._root = root
self._hb_root = hb_root
self._include_regex = include_regex
self._exclude_regex = exclude_regex
self._write_mode = write_mode
self._specifications = {x.__name__.lower(): x for x in specifications} \
# type: Dict[str, SpecificationBase]
self._exporters = exporters
self._webhooks = webhooks
self.data = None
# needed for testing
self.__exporter_lut = None
self._logger.info('Database initialized')
def create(self):
# type: () -> "Database"
'''
Extract valid assets as data and metadata within the hidebound
directory.
Writes:
* file content to hb_parent/hidebound/content - under same directory
structure
* asset metadata as json to hb_parent/hidebound/metadata/asset
* file metadata as json to hb_parent/hidebound/metadata/file
* asset log as json to hb_parent/hidebound/logs/asset
* file log as json to hb_parent/hidebound/logs/file
Raises:
RunTimeError: If data has not been initialized.
Returns:
Database: self.
'''
total = 7
if self.data is None:
msg = 'Data not initialized. Please call update.'
raise RuntimeError(msg)
def write_json(obj, filepath):
with open(filepath, 'w') as f:
json.dump(obj, f)
def write_log(log, filepath):
with open(filepath, 'w') as f:
f.write(log)
temp = db_tools._get_data_for_write(
self.data, self._root, self._hb_root
)
self._logger.info('create: get data', step=1, total=total)
if temp is None:
return self
file_data, asset_meta, file_meta, asset_log, file_log = temp
# make directories
for item in temp:
item.target.apply(lambda x: os.makedirs(Path(x).parent, exist_ok=True))
self._logger.info('create: make directories', step=2, total=total)
# write file data
if self._write_mode == 'move':
file_data.apply(lambda x: shutil.move(x.source, x.target), axis=1)
tools.delete_empty_directories(self._root)
else:
file_data.apply(lambda x: shutil.copy2(x.source, x.target), axis=1)
self._logger.info('create: write file data', step=3, total=total)
# write asset metadata
asset_meta.apply(lambda x: write_json(x.metadata, x.target), axis=1)
self._logger.info('create: write asset metadata', step=4, total=total)
# write file metadata
file_meta.apply(lambda x: write_json(x.metadata, x.target), axis=1)
self._logger.info('create: write file metadata', step=5, total=total)
# write asset log
asset_log.apply(lambda x: write_log(x.metadata, x.target), axis=1)
self._logger.info('create: write asset log', step=6, total=total)
# write file log
file_log.apply(lambda x: write_log(x.metadata, x.target), axis=1)
self._logger.info('create: write file log', step=7, total=total)
self._logger.info('create: complete', step=7, total=total)
return self
def read(self, group_by_asset=False):
# type: (bool) -> "DataFrame"
'''
Return a DataFrame which can be easily be queried and has only cells
with scalar values.
Args:
group_by_asset (bool, optional): Whether to group the data by asset.
Default: False.
Raises:
RunTimeError: If data has not been initialized.
Returns:
DataFrame: Formatted data.
'''
total = 5
if self.data is None:
msg = 'Data not initialized. Please call update.'
raise RuntimeError(msg)
def coordinate_to_dict(item):
if 'coordinate' in item.keys():
keys = ['coordinate_x', 'coordinate_y', 'coordinate_z']
coords = dict(zip(keys, item['coordinate']))
del item['coordinate']
item.update(coords)
return item
data = self.data.copy()
self._logger.info('read: copy data', step=1, total=total)
col = 'file_traits'
if group_by_asset:
col = 'asset_traits'
data = data.groupby('asset_path', as_index=False).first()
self._logger.info('read: group by asset', step=2, total=total)
data[col] = data[col].apply(coordinate_to_dict)
traits = DataFrame(data[col].tolist())
for col in traits.columns:
if col not in data.columns:
data[col] = np.nan
mask = traits[col].notnull()
data.loc[mask, col] = traits.loc[mask, col]
self._logger.info('read: filter traits', step=3, total=total)
# find columns by legal type
cols = self.data.columns.tolist()
if len(self.data) > 0:
cols = data \
.applymap(type) \
.apply(lambda x: x.unique().tolist())
legal_cols = set([int, float, str, bool, None])
cols = cols.apply(lambda x: set(x).difference(legal_cols) == set())
cols = cols[cols].index.tolist()
self._logger.info('read: filter legal types', step=4, total=total)
# nicely order columns
head_cols = [
'project',
'specification',
'descriptor',
'version',
'coordinate_x',
'coordinate_y',
'coordinate_z',
'frame',
'extension',
'filename',
'filepath',
'file_error',
'asset_name',
'asset_path',
'asset_type',
'asset_error',
'asset_valid',
]
head_cols = list(filter(lambda x: x in cols, head_cols))
tail_cols = sorted(list(set(cols).difference(head_cols)))
cols = head_cols + tail_cols
data = data[cols]
self._logger.info('read: order columns', step=5, total=total)
self._logger.info('read: complete', step=5, total=total)
return data
def update(self):
# type: () -> "Database"
'''
Recurse root directory, populate self.data with its files, locate and
validate assets.
Returns:
Database: self.
'''
total = 12
self._logger.info('update', step=0, total=total)
exclude_re = '|'.join([self._exclude_regex, 'hidebound/logs/progress'])
data = tools.directory_to_dataframe(
self._root,
include_regex=self._include_regex,
exclude_regex=exclude_re
)
self._logger.info(f'update: parsed {self._root}', step=1, total=total)
if len(data) > 0:
db_tools._add_specification(data, self._specifications)
self._logger.info('update: add_specification', step=2, total=total)
db_tools._validate_filepath(data)
self._logger.info('update: validate_filepath', step=3, total=total)
db_tools._add_file_traits(data)
self._logger.info('update: add_file_traits', step=4, total=total)
db_tools._add_relative_path(data, 'filepath', self._root)
self._logger.info('update: add_relative_path', step=5, total=total)
db_tools._add_asset_name(data)
self._logger.info('update: add_asset_name', step=6, total=total)
db_tools._add_asset_path(data)
self._logger.info('update: add_asset_path', step=7, total=total)
db_tools._add_relative_path(data, 'asset_path', self._root)
self._logger.info('update: add_relative_path', step=8, total=total)
db_tools._add_asset_type(data)
self._logger.info('update: add_asset_type', step=9, total=total)
db_tools._add_asset_traits(data)
self._logger.info('update: add_asset_traits', step=10, total=total)
db_tools._validate_assets(data)
self._logger.info('update: validate_assets', step=11, total=total)
data = db_tools._cleanup(data)
self.data = data
self._logger.info('update: cleanup', step=12, total=total)
self._logger.info('update: complete', step=12, total=total)
return self
def delete(self):
# type: () -> "Database"
'''
Deletes hidebound/content and hidebound/metadata directories and all their
contents.
Returns:
Database: self.
'''
total = 2
data_dir = Path(self._hb_root, 'content')
if data_dir.exists():
shutil.rmtree(data_dir)
self._logger.info('delete: data directory', step=1, total=total)
meta_dir = Path(self._hb_root, 'metadata')
if meta_dir.exists():
shutil.rmtree(meta_dir)
self._logger.info('delete: metadata directory', step=2, total=total)
self._logger.info('delete: complete', step=2, total=total)
return self
def call_webhooks(self):
# type () -> requests.Response
'''
Calls webhooks defined in config.
Yields:
requests.Response: Webhook response.
'''
total = len(self._webhooks)
for i, hook in enumerate(self._webhooks):
url = hook['url']
headers = hook.get('headers', None)
method = hook['method']
| |
jspectra[:, freq_dc, :] = (
jspectra[:, ind_vel[1], :] + jspectra[:, ind_vel[2], :]) / 2 # CORRECCION
if jcspectraExist:
jcspectra[:, freq_dc, :] = (
jcspectra[:, ind_vel[1], :] + jcspectra[:, ind_vel[2], :]) / 2
if mode == 2:
vel = numpy.array([-2, -1, 1, 2])
xx = numpy.zeros([4, 4])
for fil in range(4):
xx[fil, :] = vel[fil]**numpy.asarray(list(range(4)))
xx_inv = numpy.linalg.inv(xx)
xx_aux = xx_inv[0, :]
for ich in range(num_chan):
yy = jspectra[ich, ind_vel, :]
jspectra[ich, freq_dc, :] = numpy.dot(xx_aux, yy)
junkid = jspectra[ich, freq_dc, :] <= 0
cjunkid = sum(junkid)
if cjunkid.any():
jspectra[ich, freq_dc, junkid.nonzero()] = (
jspectra[ich, ind_vel[1], junkid] + jspectra[ich, ind_vel[2], junkid]) / 2
if jcspectraExist:
for ip in range(num_pairs):
yy = jcspectra[ip, ind_vel, :]
jcspectra[ip, freq_dc, :] = numpy.dot(xx_aux, yy)
self.dataOut.data_spc = jspectra
self.dataOut.data_cspc = jcspectra
return self.dataOut
class removeInterference(Operation):
def removeInterference2(self):
cspc = self.dataOut.data_cspc
spc = self.dataOut.data_spc
Heights = numpy.arange(cspc.shape[2])
realCspc = numpy.abs(cspc)
for i in range(cspc.shape[0]):
LinePower= numpy.sum(realCspc[i], axis=0)
Threshold = numpy.amax(LinePower)-numpy.sort(LinePower)[len(Heights)-int(len(Heights)*0.1)]
SelectedHeights = Heights[ numpy.where( LinePower < Threshold ) ]
InterferenceSum = numpy.sum( realCspc[i,:,SelectedHeights], axis=0 )
InterferenceThresholdMin = numpy.sort(InterferenceSum)[int(len(InterferenceSum)*0.98)]
InterferenceThresholdMax = numpy.sort(InterferenceSum)[int(len(InterferenceSum)*0.99)]
InterferenceRange = numpy.where( ([InterferenceSum > InterferenceThresholdMin]))# , InterferenceSum < InterferenceThresholdMax]) )
#InterferenceRange = numpy.where( ([InterferenceRange < InterferenceThresholdMax]))
if len(InterferenceRange)<int(cspc.shape[1]*0.3):
cspc[i,InterferenceRange,:] = numpy.NaN
self.dataOut.data_cspc = cspc
def removeInterference(self, interf = 2, hei_interf = None, nhei_interf = None, offhei_interf = None):
jspectra = self.dataOut.data_spc
jcspectra = self.dataOut.data_cspc
jnoise = self.dataOut.getNoise()
num_incoh = self.dataOut.nIncohInt
num_channel = jspectra.shape[0]
num_prof = jspectra.shape[1]
num_hei = jspectra.shape[2]
# hei_interf
if hei_interf is None:
count_hei = int(num_hei / 2)
hei_interf = numpy.asmatrix(list(range(count_hei))) + num_hei - count_hei
hei_interf = numpy.asarray(hei_interf)[0]
# nhei_interf
if (nhei_interf == None):
nhei_interf = 5
if (nhei_interf < 1):
nhei_interf = 1
if (nhei_interf > count_hei):
nhei_interf = count_hei
if (offhei_interf == None):
offhei_interf = 0
ind_hei = list(range(num_hei))
# mask_prof = numpy.asarray(range(num_prof - 2)) + 1
# mask_prof[range(num_prof/2 - 1,len(mask_prof))] += 1
mask_prof = numpy.asarray(list(range(num_prof)))
num_mask_prof = mask_prof.size
comp_mask_prof = [0, num_prof / 2]
# noise_exist: Determina si la variable jnoise ha sido definida y contiene la informacion del ruido de cada canal
if (jnoise.size < num_channel or numpy.isnan(jnoise).any()):
jnoise = numpy.nan
noise_exist = jnoise[0] < numpy.Inf
# Subrutina de Remocion de la Interferencia
for ich in range(num_channel):
# Se ordena los espectros segun su potencia (menor a mayor)
power = jspectra[ich, mask_prof, :]
power = power[:, hei_interf]
power = power.sum(axis=0)
psort = power.ravel().argsort()
# Se estima la interferencia promedio en los Espectros de Potencia empleando
junkspc_interf = jspectra[ich, :, hei_interf[psort[list(range(
offhei_interf, nhei_interf + offhei_interf))]]]
if noise_exist:
# tmp_noise = jnoise[ich] / num_prof
tmp_noise = jnoise[ich]
junkspc_interf = junkspc_interf - tmp_noise
#junkspc_interf[:,comp_mask_prof] = 0
jspc_interf = junkspc_interf.sum(axis=0) / nhei_interf
jspc_interf = jspc_interf.transpose()
# Calculando el espectro de interferencia promedio
noiseid = numpy.where(
jspc_interf <= tmp_noise / numpy.sqrt(num_incoh))
noiseid = noiseid[0]
cnoiseid = noiseid.size
interfid = numpy.where(
jspc_interf > tmp_noise / numpy.sqrt(num_incoh))
interfid = interfid[0]
cinterfid = interfid.size
if (cnoiseid > 0):
jspc_interf[noiseid] = 0
# Expandiendo los perfiles a limpiar
if (cinterfid > 0):
new_interfid = (
numpy.r_[interfid - 1, interfid, interfid + 1] + num_prof) % num_prof
new_interfid = numpy.asarray(new_interfid)
new_interfid = {x for x in new_interfid}
new_interfid = numpy.array(list(new_interfid))
new_cinterfid = new_interfid.size
else:
new_cinterfid = 0
for ip in range(new_cinterfid):
ind = junkspc_interf[:, new_interfid[ip]].ravel().argsort()
jspc_interf[new_interfid[ip]
] = junkspc_interf[ind[nhei_interf // 2], new_interfid[ip]]
jspectra[ich, :, ind_hei] = jspectra[ich, :,
ind_hei] - jspc_interf # Corregir indices
# Removiendo la interferencia del punto de mayor interferencia
ListAux = jspc_interf[mask_prof].tolist()
maxid = ListAux.index(max(ListAux))
if cinterfid > 0:
for ip in range(cinterfid * (interf == 2) - 1):
ind = (jspectra[ich, interfid[ip], :] < tmp_noise *
(1 + 1 / numpy.sqrt(num_incoh))).nonzero()
cind = len(ind)
if (cind > 0):
jspectra[ich, interfid[ip], ind] = tmp_noise * \
(1 + (numpy.random.uniform(cind) - 0.5) /
numpy.sqrt(num_incoh))
ind = numpy.array([-2, -1, 1, 2])
xx = numpy.zeros([4, 4])
for id1 in range(4):
xx[:, id1] = ind[id1]**numpy.asarray(list(range(4)))
xx_inv = numpy.linalg.inv(xx)
xx = xx_inv[:, 0]
ind = (ind + maxid + num_mask_prof) % num_mask_prof
yy = jspectra[ich, mask_prof[ind], :]
jspectra[ich, mask_prof[maxid], :] = numpy.dot(
yy.transpose(), xx)
indAux = (jspectra[ich, :, :] < tmp_noise *
(1 - 1 / numpy.sqrt(num_incoh))).nonzero()
jspectra[ich, indAux[0], indAux[1]] = tmp_noise * \
(1 - 1 / numpy.sqrt(num_incoh))
# Remocion de Interferencia en el Cross Spectra
if jcspectra is None:
return jspectra, jcspectra
num_pairs = int(jcspectra.size / (num_prof * num_hei))
jcspectra = jcspectra.reshape(num_pairs, num_prof, num_hei)
for ip in range(num_pairs):
#-------------------------------------------
cspower = numpy.abs(jcspectra[ip, mask_prof, :])
cspower = cspower[:, hei_interf]
cspower = cspower.sum(axis=0)
cspsort = cspower.ravel().argsort()
junkcspc_interf = jcspectra[ip, :, hei_interf[cspsort[list(range(
offhei_interf, nhei_interf + offhei_interf))]]]
junkcspc_interf = junkcspc_interf.transpose()
jcspc_interf = junkcspc_interf.sum(axis=1) / nhei_interf
ind = numpy.abs(jcspc_interf[mask_prof]).ravel().argsort()
median_real = int(numpy.median(numpy.real(
junkcspc_interf[mask_prof[ind[list(range(3 * num_prof // 4))]], :])))
median_imag = int(numpy.median(numpy.imag(
junkcspc_interf[mask_prof[ind[list(range(3 * num_prof // 4))]], :])))
comp_mask_prof = [int(e) for e in comp_mask_prof]
junkcspc_interf[comp_mask_prof, :] = numpy.complex(
median_real, median_imag)
for iprof in range(num_prof):
ind = numpy.abs(junkcspc_interf[iprof, :]).ravel().argsort()
jcspc_interf[iprof] = junkcspc_interf[iprof, ind[nhei_interf // 2]]
# Removiendo la Interferencia
jcspectra[ip, :, ind_hei] = jcspectra[ip,
:, ind_hei] - jcspc_interf
ListAux = numpy.abs(jcspc_interf[mask_prof]).tolist()
maxid = ListAux.index(max(ListAux))
ind = numpy.array([-2, -1, 1, 2])
xx = numpy.zeros([4, 4])
for id1 in range(4):
xx[:, id1] = ind[id1]**numpy.asarray(list(range(4)))
xx_inv = numpy.linalg.inv(xx)
xx = xx_inv[:, 0]
ind = (ind + maxid + num_mask_prof) % num_mask_prof
yy = jcspectra[ip, mask_prof[ind], :]
jcspectra[ip, mask_prof[maxid], :] = numpy.dot(yy.transpose(), xx)
# Guardar Resultados
self.dataOut.data_spc = jspectra
self.dataOut.data_cspc = jcspectra
return 1
def run(self, dataOut, interf = 2,hei_interf = None, nhei_interf = None, offhei_interf = None, mode=1):
self.dataOut = dataOut
if mode == 1:
self.removeInterference(interf = 2,hei_interf = None, nhei_interf = None, offhei_interf = None)
elif mode == 2:
self.removeInterference2()
return self.dataOut
class IncohInt(Operation):
__profIndex = 0
__withOverapping = False
__byTime = False
__initime = None
__lastdatatime = None
__integrationtime = None
__buffer_spc = None
__buffer_cspc = None
__buffer_dc = None
__dataReady = False
__timeInterval = None
n = None
def __init__(self):
Operation.__init__(self)
def setup(self, n=None, timeInterval=None, overlapping=False):
"""
Set the parameters of the integration class.
Inputs:
n : Number of coherent integrations
timeInterval : Time of integration. If the parameter "n" is selected this one does not work
overlapping :
"""
self.__initime = None
self.__lastdatatime = 0
self.__buffer_spc = 0
self.__buffer_cspc = 0
self.__buffer_dc = 0
self.__profIndex = 0
self.__dataReady = False
self.__byTime = False
if n is None and timeInterval is None:
raise ValueError("n or timeInterval should be specified ...")
if n is not None:
self.n = int(n)
else:
self.__integrationtime = int(timeInterval)
self.n = None
self.__byTime = True
def putData(self, data_spc, data_cspc, data_dc):
"""
Add a profile to the __buffer_spc and increase in one the __profileIndex
"""
self.__buffer_spc += data_spc
if data_cspc is None:
self.__buffer_cspc = None
else:
self.__buffer_cspc += data_cspc
if data_dc is None:
self.__buffer_dc = None
else:
self.__buffer_dc += data_dc
self.__profIndex += 1
return
def pushData(self):
"""
Return the sum of the last profiles and the profiles used in the sum.
Affected:
self.__profileIndex
"""
data_spc = self.__buffer_spc
data_cspc = self.__buffer_cspc
data_dc = self.__buffer_dc
n = self.__profIndex
self.__buffer_spc = 0
self.__buffer_cspc = 0
self.__buffer_dc = 0
self.__profIndex = 0
return data_spc, data_cspc, data_dc, n
def byProfiles(self, *args):
self.__dataReady = False
avgdata_spc = None
avgdata_cspc = None
avgdata_dc = None
self.putData(*args)
if self.__profIndex == self.n:
avgdata_spc, avgdata_cspc, avgdata_dc, n = self.pushData()
self.n = n
self.__dataReady = True
return avgdata_spc, avgdata_cspc, avgdata_dc
def byTime(self, datatime, *args):
self.__dataReady = False
avgdata_spc = None
avgdata_cspc = None
avgdata_dc = None
self.putData(*args)
if (datatime - self.__initime) >= self.__integrationtime:
avgdata_spc, avgdata_cspc, avgdata_dc, n = self.pushData()
self.n = n
self.__dataReady = True
return avgdata_spc, avgdata_cspc, avgdata_dc
def integrate(self, datatime, *args):
if self.__profIndex == 0:
self.__initime = datatime
if self.__byTime:
avgdata_spc, avgdata_cspc, avgdata_dc = self.byTime(
datatime, *args)
else:
avgdata_spc, avgdata_cspc, avgdata_dc = self.byProfiles(*args)
if not self.__dataReady:
return None, None, None, None
return self.__initime, avgdata_spc, avgdata_cspc, avgdata_dc
def run(self, dataOut, n=None, timeInterval=None, overlapping=False):
if n == 1:
return dataOut
dataOut.flagNoData = True
if not self.isConfig:
self.setup(n, | |
# coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class EdFiLearningObjective(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'academic_subjects': 'list[EdFiLearningObjectiveAcademicSubject]',
'learning_objective_id': 'str',
'namespace': 'str',
'parent_learning_objective_reference': 'EdFiLearningObjectiveReference',
'content_standard': 'EdFiLearningObjectiveContentStandard',
'description': 'str',
'grade_levels': 'list[EdFiLearningObjectiveGradeLevel]',
'learning_standards': 'list[EdFiLearningObjectiveLearningStandard]',
'nomenclature': 'str',
'objective': 'str',
'success_criteria': 'str',
'etag': 'str'
}
attribute_map = {
'id': 'id',
'academic_subjects': 'academicSubjects',
'learning_objective_id': 'learningObjectiveId',
'namespace': 'namespace',
'parent_learning_objective_reference': 'parentLearningObjectiveReference',
'content_standard': 'contentStandard',
'description': 'description',
'grade_levels': 'gradeLevels',
'learning_standards': 'learningStandards',
'nomenclature': 'nomenclature',
'objective': 'objective',
'success_criteria': 'successCriteria',
'etag': '_etag'
}
def __init__(self, id=None, academic_subjects=None, learning_objective_id=None, namespace=None, parent_learning_objective_reference=None, content_standard=None, description=None, grade_levels=None, learning_standards=None, nomenclature=None, objective=None, success_criteria=None, etag=None, _configuration=None): # noqa: E501
"""EdFiLearningObjective - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._id = None
self._academic_subjects = None
self._learning_objective_id = None
self._namespace = None
self._parent_learning_objective_reference = None
self._content_standard = None
self._description = None
self._grade_levels = None
self._learning_standards = None
self._nomenclature = None
self._objective = None
self._success_criteria = None
self._etag = None
self.discriminator = None
if id is not None:
self.id = id
self.academic_subjects = academic_subjects
self.learning_objective_id = learning_objective_id
self.namespace = namespace
if parent_learning_objective_reference is not None:
self.parent_learning_objective_reference = parent_learning_objective_reference
if content_standard is not None:
self.content_standard = content_standard
if description is not None:
self.description = description
if grade_levels is not None:
self.grade_levels = grade_levels
if learning_standards is not None:
self.learning_standards = learning_standards
if nomenclature is not None:
self.nomenclature = nomenclature
self.objective = objective
if success_criteria is not None:
self.success_criteria = success_criteria
if etag is not None:
self.etag = etag
@property
def id(self):
"""Gets the id of this EdFiLearningObjective. # noqa: E501
# noqa: E501
:return: The id of this EdFiLearningObjective. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EdFiLearningObjective.
# noqa: E501
:param id: The id of this EdFiLearningObjective. # noqa: E501
:type: str
"""
self._id = id
@property
def academic_subjects(self):
"""Gets the academic_subjects of this EdFiLearningObjective. # noqa: E501
An unordered collection of learningObjectiveAcademicSubjects. The description of the content or subject area (e.g., arts, mathematics, reading, stenography, or a foreign language) of an assessment. # noqa: E501
:return: The academic_subjects of this EdFiLearningObjective. # noqa: E501
:rtype: list[EdFiLearningObjectiveAcademicSubject]
"""
return self._academic_subjects
@academic_subjects.setter
def academic_subjects(self, academic_subjects):
"""Sets the academic_subjects of this EdFiLearningObjective.
An unordered collection of learningObjectiveAcademicSubjects. The description of the content or subject area (e.g., arts, mathematics, reading, stenography, or a foreign language) of an assessment. # noqa: E501
:param academic_subjects: The academic_subjects of this EdFiLearningObjective. # noqa: E501
:type: list[EdFiLearningObjectiveAcademicSubject]
"""
if self._configuration.client_side_validation and academic_subjects is None:
raise ValueError("Invalid value for `academic_subjects`, must not be `None`") # noqa: E501
self._academic_subjects = academic_subjects
@property
def learning_objective_id(self):
"""Gets the learning_objective_id of this EdFiLearningObjective. # noqa: E501
The identifier for the specific learning objective in the context of a standard (e.g., 111.15.3.1.A). # noqa: E501
:return: The learning_objective_id of this EdFiLearningObjective. # noqa: E501
:rtype: str
"""
return self._learning_objective_id
@learning_objective_id.setter
def learning_objective_id(self, learning_objective_id):
"""Sets the learning_objective_id of this EdFiLearningObjective.
The identifier for the specific learning objective in the context of a standard (e.g., 111.15.3.1.A). # noqa: E501
:param learning_objective_id: The learning_objective_id of this EdFiLearningObjective. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and learning_objective_id is None:
raise ValueError("Invalid value for `learning_objective_id`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
learning_objective_id is not None and len(learning_objective_id) > 60):
raise ValueError("Invalid value for `learning_objective_id`, length must be less than or equal to `60`") # noqa: E501
self._learning_objective_id = learning_objective_id
@property
def namespace(self):
"""Gets the namespace of this EdFiLearningObjective. # noqa: E501
Namespace for the LearningObjective. # noqa: E501
:return: The namespace of this EdFiLearningObjective. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this EdFiLearningObjective.
Namespace for the LearningObjective. # noqa: E501
:param namespace: The namespace of this EdFiLearningObjective. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and namespace is None:
raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
namespace is not None and len(namespace) > 255):
raise ValueError("Invalid value for `namespace`, length must be less than or equal to `255`") # noqa: E501
self._namespace = namespace
@property
def parent_learning_objective_reference(self):
"""Gets the parent_learning_objective_reference of this EdFiLearningObjective. # noqa: E501
:return: The parent_learning_objective_reference of this EdFiLearningObjective. # noqa: E501
:rtype: EdFiLearningObjectiveReference
"""
return self._parent_learning_objective_reference
@parent_learning_objective_reference.setter
def parent_learning_objective_reference(self, parent_learning_objective_reference):
"""Sets the parent_learning_objective_reference of this EdFiLearningObjective.
:param parent_learning_objective_reference: The parent_learning_objective_reference of this EdFiLearningObjective. # noqa: E501
:type: EdFiLearningObjectiveReference
"""
self._parent_learning_objective_reference = parent_learning_objective_reference
@property
def content_standard(self):
"""Gets the content_standard of this EdFiLearningObjective. # noqa: E501
:return: The content_standard of this EdFiLearningObjective. # noqa: E501
:rtype: EdFiLearningObjectiveContentStandard
"""
return self._content_standard
@content_standard.setter
def content_standard(self, content_standard):
"""Sets the content_standard of this EdFiLearningObjective.
:param content_standard: The content_standard of this EdFiLearningObjective. # noqa: E501
:type: EdFiLearningObjectiveContentStandard
"""
self._content_standard = content_standard
@property
def description(self):
"""Gets the description of this EdFiLearningObjective. # noqa: E501
The description of the LearningObjective. # noqa: E501
:return: The description of this EdFiLearningObjective. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this EdFiLearningObjective.
The description of the LearningObjective. # noqa: E501
:param description: The description of this EdFiLearningObjective. # noqa: E501
:type: str
"""
if (self._configuration.client_side_validation and
description is not None and len(description) > 1024):
raise ValueError("Invalid value for `description`, length must be less than or equal to `1024`") # noqa: E501
self._description = description
@property
def grade_levels(self):
"""Gets the grade_levels of this EdFiLearningObjective. # noqa: E501
An unordered collection of learningObjectiveGradeLevels. The grade level for which the LearningObjective is targeted. The semantics of null is assumed to mean that the learning objective is not associated with any grade level. # noqa: E501
:return: The grade_levels of this EdFiLearningObjective. # noqa: E501
:rtype: list[EdFiLearningObjectiveGradeLevel]
"""
return self._grade_levels
@grade_levels.setter
def grade_levels(self, grade_levels):
"""Sets the grade_levels of this EdFiLearningObjective.
An unordered collection of learningObjectiveGradeLevels. The grade level for which the LearningObjective is targeted. The semantics of null is assumed to mean that the learning objective is not associated with any grade level. # noqa: E501
:param grade_levels: The grade_levels of this EdFiLearningObjective. # noqa: E501
:type: list[EdFiLearningObjectiveGradeLevel]
"""
self._grade_levels = grade_levels
@property
def learning_standards(self):
"""Gets the learning_standards of this EdFiLearningObjective. # noqa: E501
An unordered collection of learningObjectiveLearningStandards. LearningStandard(s) included in this objective. # noqa: E501
:return: The learning_standards of this EdFiLearningObjective. # noqa: E501
:rtype: list[EdFiLearningObjectiveLearningStandard]
"""
return self._learning_standards
@learning_standards.setter
def learning_standards(self, learning_standards):
"""Sets the learning_standards of this EdFiLearningObjective.
An unordered collection of learningObjectiveLearningStandards. LearningStandard(s) included in this objective. # noqa: E501
:param learning_standards: The learning_standards of this EdFiLearningObjective. # noqa: E501
:type: list[EdFiLearningObjectiveLearningStandard]
"""
self._learning_standards = learning_standards
@property
def nomenclature(self):
"""Gets the nomenclature of this EdFiLearningObjective. # noqa: E501
Reflects the specific nomenclature used for the LearningObjective. # noqa: E501
:return: The nomenclature of this EdFiLearningObjective. # noqa: E501
:rtype: str
"""
return self._nomenclature
@nomenclature.setter
def nomenclature(self, nomenclature):
"""Sets the nomenclature of this EdFiLearningObjective.
Reflects the specific nomenclature used for the LearningObjective. # noqa: E501
:param nomenclature: The nomenclature of this EdFiLearningObjective. # noqa: E501
| |
self,
query,
options,
fetch_function=fetch_fn,
collection_link=database_or_container_link,
page_iterator_class=query_iterable.QueryIterable
)
def QueryItemsChangeFeed(self, collection_link, options=None, response_hook=None, **kwargs):
"""Queries documents change feed in a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
options may also specify partition key range id.
:param response_hook:
A callable invoked with the response metadata
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
partition_key_range_id = None
if options is not None and "partitionKeyRangeId" in options:
partition_key_range_id = options["partitionKeyRangeId"]
return self._QueryChangeFeed(
collection_link, "Documents", options, partition_key_range_id, response_hook=response_hook, **kwargs
)
def _QueryChangeFeed(
self, collection_link, resource_type, options=None, partition_key_range_id=None, response_hook=None, **kwargs
):
"""Queries change feed of a resource in a collection.
:param str collection_link:
The link to the document collection.
:param str resource_type:
The type of the resource.
:param dict options:
The request options for the request.
:param str partition_key_range_id:
Specifies partition key range id.
:param response_hook:
A callable invoked with the response metadata
:return:
Query Iterable of Documents.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
options["changeFeed"] = True
resource_key_map = {"Documents": "docs"}
# For now, change feed only supports Documents and Partition Key Range resouce type
if resource_type not in resource_key_map:
raise NotImplementedError(resource_type + " change feed query is not supported.")
resource_key = resource_key_map[resource_type]
path = base.GetPathFromLink(collection_link, resource_key)
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
def fetch_fn(options):
return (
self.__QueryFeed(
path,
resource_key,
collection_id,
lambda r: r[resource_type],
lambda _, b: b,
None,
options,
partition_key_range_id,
response_hook=response_hook,
**kwargs
),
self.last_response_headers,
)
return ItemPaged(
self,
None,
options,
fetch_function=fetch_fn,
collection_link=collection_link,
page_iterator_class=query_iterable.QueryIterable
)
def _ReadPartitionKeyRanges(self, collection_link, feed_options=None, **kwargs):
"""Reads Partition Key Ranges.
:param str collection_link:
The link to the document collection.
:param dict feed_options:
:return:
Query Iterable of PartitionKeyRanges.
:rtype:
query_iterable.QueryIterable
"""
if feed_options is None:
feed_options = {}
return self._QueryPartitionKeyRanges(collection_link, None, feed_options, **kwargs)
def _QueryPartitionKeyRanges(self, collection_link, query, options=None, **kwargs):
"""Queries Partition Key Ranges in a collection.
:param str collection_link:
The link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of PartitionKeyRanges.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link, "pkranges")
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
def fetch_fn(options):
return (
self.__QueryFeed(
path, "pkranges", collection_id, lambda r: r["PartitionKeyRanges"],
lambda _, b: b, query, options, **kwargs
),
self.last_response_headers,
)
return ItemPaged(
self, query, options, fetch_function=fetch_fn, page_iterator_class=query_iterable.QueryIterable
)
def CreateItem(self, database_or_container_link, document, options=None, **kwargs):
"""Creates a document in a collection.
:param str database_or_container_link:
The link to the database when using partitioning, otherwise link to the document collection.
:param dict document:
The Azure Cosmos document to create.
:param dict options:
The request options for the request.
:param bool options['disableAutomaticIdGeneration']:
Disables the automatic id generation. If id is missing in the body and this
option is true, an error will be returned.
:return:
The created Document.
:rtype:
dict
"""
# Python's default arguments are evaluated once when the function is defined,
# not each time the function is called (like it is in say, Ruby). This means
# that if you use a mutable default argument and mutate it, you will and have
# mutated that object for all future calls to the function as well. So, using
# a non-mutable deafult in this case(None) and assigning an empty dict(mutable)
# inside the method For more details on this gotcha, please refer
# http://docs.python-guide.org/en/latest/writing/gotchas/
if options is None:
options = {}
# We check the link to be document collection link since it can be database
# link in case of client side partitioning
if base.IsItemContainerLink(database_or_container_link):
options = self._AddPartitionKey(database_or_container_link, document, options)
collection_id, document, path = self._GetContainerIdWithPathForItem(
database_or_container_link, document, options
)
return self.Create(document, path, "docs", collection_id, None, options, **kwargs)
def UpsertItem(self, database_or_container_link, document, options=None, **kwargs):
"""Upserts a document in a collection.
:param str database_or_container_link:
The link to the database when using partitioning, otherwise link to the document collection.
:param dict document:
The Azure Cosmos document to upsert.
:param dict options:
The request options for the request.
:param bool options['disableAutomaticIdGeneration']:
Disables the automatic id generation. If id is missing in the body and this
option is true, an error will be returned.
:return:
The upserted Document.
:rtype:
dict
"""
# Python's default arguments are evaluated once when the function is defined,
# not each time the function is called (like it is in say, Ruby). This means
# that if you use a mutable default argument and mutate it, you will and have
# mutated that object for all future calls to the function as well. So, using
# a non-mutable deafult in this case(None) and assigning an empty dict(mutable)
# inside the method For more details on this gotcha, please refer
# http://docs.python-guide.org/en/latest/writing/gotchas/
if options is None:
options = {}
# We check the link to be document collection link since it can be database
# link in case of client side partitioning
if base.IsItemContainerLink(database_or_container_link):
options = self._AddPartitionKey(database_or_container_link, document, options)
collection_id, document, path = self._GetContainerIdWithPathForItem(
database_or_container_link, document, options
)
return self.Upsert(document, path, "docs", collection_id, None, options, **kwargs)
PartitionResolverErrorMessage = (
"Couldn't find any partition resolvers for the database link provided. "
+ "Ensure that the link you used when registering the partition resolvers "
+ "matches the link provided or you need to register both types of database "
+ "link(self link as well as ID based link)."
)
# Gets the collection id and path for the document
def _GetContainerIdWithPathForItem(self, database_or_container_link, document, options):
if not database_or_container_link:
raise ValueError("database_or_container_link is None or empty.")
if document is None:
raise ValueError("document is None.")
CosmosClientConnection.__ValidateResource(document)
document = document.copy()
if not document.get("id") and not options.get("disableAutomaticIdGeneration"):
document["id"] = base.GenerateGuidId()
collection_link = database_or_container_link
if base.IsDatabaseLink(database_or_container_link):
partition_resolver = self.GetPartitionResolver(database_or_container_link)
if partition_resolver is not None:
collection_link = partition_resolver.ResolveForCreate(document)
else:
raise ValueError(CosmosClientConnection.PartitionResolverErrorMessage)
path = base.GetPathFromLink(collection_link, "docs")
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return collection_id, document, path
def ReadItem(self, document_link, options=None, **kwargs):
"""Reads a document.
:param str document_link:
The link to the document.
:param dict options:
The request options for the request.
:return:
The read Document.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(document_link)
document_id = base.GetResourceIdOrFullNameFromLink(document_link)
return self.Read(path, "docs", document_id, None, options, **kwargs)
def ReadTriggers(self, collection_link, options=None, **kwargs):
"""Reads all triggers in a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
Query Iterable of Triggers.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
return self.QueryTriggers(collection_link, None, options, **kwargs)
def QueryTriggers(self, collection_link, query, options=None, **kwargs):
"""Queries triggers in a collection.
:param str collection_link:
The link to the document collection.
:param (str or dict) query:
:param dict options:
The request options for the request.
:return:
Query Iterable of Triggers.
:rtype:
query_iterable.QueryIterable
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link, "triggers")
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
def fetch_fn(options):
return (
self.__QueryFeed(
path, "triggers", collection_id, lambda r: r["Triggers"], lambda _, b: b, query, options, **kwargs
),
self.last_response_headers,
)
return ItemPaged(
self, query, options, fetch_function=fetch_fn, page_iterator_class=query_iterable.QueryIterable
)
def CreateTrigger(self, collection_link, trigger, options=None, **kwargs):
"""Creates a trigger in a collection.
:param str collection_link:
The link to the document collection.
:param dict trigger:
:param dict options:
The request options for the request.
:return:
The created Trigger.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, trigger = self._GetContainerIdWithPathForTrigger(collection_link, trigger)
return self.Create(trigger, path, "triggers", collection_id, None, options, **kwargs)
def UpsertTrigger(self, collection_link, trigger, options=None, **kwargs):
"""Upserts a trigger in a collection.
:param str collection_link:
The link to the document collection.
:param dict trigger:
:param dict options:
The request options for the request.
:return:
The upserted Trigger.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, trigger = self._GetContainerIdWithPathForTrigger(collection_link, trigger)
return self.Upsert(trigger, path, "triggers", collection_id, None, options, **kwargs)
def _GetContainerIdWithPathForTrigger(self, collection_link, trigger): # pylint: disable=no-self-use
CosmosClientConnection.__ValidateResource(trigger)
trigger = trigger.copy()
if trigger.get("serverScript"):
trigger["body"] = str(trigger.pop("serverScript", ""))
elif trigger.get("body"):
trigger["body"] = str(trigger["body"])
path = base.GetPathFromLink(collection_link, "triggers")
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return collection_id, path, trigger
def ReadTrigger(self, trigger_link, options=None, **kwargs):
"""Reads | |
If possible inspect the ordering on the data we were given and
# update the table to reflect that.
order_by = self.data.ordering
if order_by is not None:
self.order_by = order_by
else:
self.order_by = order_by
if template is not None:
self.template_name = template
warnings.warn('template argument to Table is deprecated. Use template_name instead.', DeprecationWarning)
else:
self.template_name = template_name
# If a request is passed, configure for request
if request:
RequestConfig(request).configure(self)
self._counter = count()
def get_top_pinned_data(self):
'''
Return data for top pinned rows containing data for each row.
Iterable type like: queryset, list of dicts, list of objects.
Having a non-zero number of pinned rows
will not result in an empty resultset message being rendered,
even if there are no regular data rows
Returns:
`None` (default) no pinned rows at the top, iterable, data for pinned rows at the top.
Note:
To show pinned row this method should be overridden.
Example:
>>> class TableWithTopPinnedRows(Table):
... def get_top_pinned_data(self):
... return [{
... 'column_a' : 'some value',
... 'column_c' : 'other value',
... }]
'''
return None
def get_bottom_pinned_data(self):
'''
Return data for bottom pinned rows containing data for each row.
Iterable type like: queryset, list of dicts, list of objects.
Having a non-zero number of pinned rows
will not result in an empty resultset message being rendered,
even if there are no regular data rows
Returns:
`None` (default) no pinned rows at the bottom, iterable, data for pinned rows at the bottom.
Note:
To show pinned row this method should be overridden.
Example:
>>> class TableWithBottomPinnedRows(Table):
... def get_bottom_pinned_data(self):
... return [{
... 'column_a' : 'some value',
... 'column_c' : 'other value',
... }]
'''
return None
def before_render(self, request):
'''
A way to hook into the moment just before rendering the template.
Can be used to hide a column.
Arguments:
request: contains the `WGSIRequest` instance, containing a `user` attribute if
`.django.contrib.auth.middleware.AuthenticationMiddleware` is added to
your `MIDDLEWARE_CLASSES`.
Example::
class Table(tables.Table):
name = tables.Column(orderable=False)
country = tables.Column(orderable=False)
def before_render(self, request):
if request.user.has_perm('foo.delete_bar'):
self.columns.hide('country')
else:
self.columns.show('country')
'''
return
def as_html(self, request):
'''
Render the table to an HTML table, adding `request` to the context.
'''
# reset counter for new rendering
self._counter = count()
template = get_template(self.template_name)
context = {
'table': self,
'request': request
}
self.before_render(request)
return template.render(context)
def as_values(self, exclude_columns=None):
'''
Return a row iterator of the data which would be shown in the table where
the first row is the table headers.
arguments:
exclude_columns (iterable): columns to exclude in the data iterator.
This can be used to output the table data as CSV, excel, for example using the
`~.export.ExportMixin`.
If a column is defined using a :ref:`table.render_FOO`, the returned value from
that method is used. If you want to differentiate between the rendered cell
and a value, use a `value_Foo`-method::
class Table(tables.Table):
name = tables.Column()
def render_name(self, value):
return format_html('<span class="name">{}</span>', value)
def value_name(self, value):
return value
will have a value wrapped in `<span>` in the rendered HTML, and just returns
the value when `as_values()` is called.
'''
if exclude_columns is None:
exclude_columns = ()
def excluded(column):
if column.column.exclude_from_export:
return True
return column.name in exclude_columns
yield [
force_text(column.header, strings_only=True)
for column in self.columns if not excluded(column)
]
for row in self.rows:
yield [
force_text(row.get_cell_value(column.name), strings_only=True)
for column in row.table.columns if not excluded(column)
]
def has_footer(self):
'''
Returns True if any of the columns define a ``_footer`` attribute or a
``render_footer()`` method
'''
return self.show_footer and any(column.has_footer() for column in self.columns)
@property
def show_header(self):
return (self._show_header if self._show_header is not None
else self._meta.show_header)
@show_header.setter
def show_header(self, value):
self._show_header = value
@property
def order_by(self):
return self._order_by
@order_by.setter
def order_by(self, value):
'''
Order the rows of the table based on columns.
Arguments:
value: iterable or comma separated string of order by aliases.
'''
# collapse empty values to ()
order_by = () if not value else value
# accept string
order_by = order_by.split(',') if isinstance(order_by, six.string_types) else order_by
valid = []
# everything's been converted to a iterable, accept iterable!
for alias in order_by:
name = OrderBy(alias).bare
if name in self.columns and self.columns[name].orderable:
valid.append(alias)
self._order_by = OrderByTuple(valid)
self.data.order_by(self._order_by)
@property
def order_by_field(self):
return (self._order_by_field if self._order_by_field is not None
else self._meta.order_by_field)
@order_by_field.setter
def order_by_field(self, value):
self._order_by_field = value
@property
def page_field(self):
return (self._page_field if self._page_field is not None
else self._meta.page_field)
@page_field.setter
def page_field(self, value):
self._page_field = value
def paginate(self, klass=Paginator, per_page=None, page=1, *args, **kwargs):
'''
Paginates the table using a paginator and creates a ``page`` property
containing information for the current page.
Arguments:
klass (`~django.core.paginator.Paginator`): A paginator class to
paginate the results.
per_page (int): Number of records to display on each page.
page (int): Page to display.
Extra arguments are passed to the paginator.
Pagination exceptions (`~django.core.paginator.EmptyPage` and
`~django.core.paginator.PageNotAnInteger`) may be raised from this
method and should be handled by the caller.
'''
per_page = per_page or self._meta.per_page
self.paginator = klass(self.rows, per_page, *args, **kwargs)
self.page = self.paginator.page(page)
@property
def per_page_field(self):
return (self._per_page_field if self._per_page_field is not None
else self._meta.per_page_field)
@per_page_field.setter
def per_page_field(self, value):
self._per_page_field = value
@property
def prefix(self):
return (self._prefix if self._prefix is not None
else self._meta.prefix)
@prefix.setter
def prefix(self, value):
self._prefix = value
@property
def prefixed_order_by_field(self):
return '%s%s' % (self.prefix, self.order_by_field)
@property
def prefixed_page_field(self):
return '%s%s' % (self.prefix, self.page_field)
@property
def prefixed_per_page_field(self):
return '%s%s' % (self.prefix, self.per_page_field)
@property
def sequence(self):
return self._sequence
@sequence.setter
def sequence(self, value):
if value:
value = Sequence(value)
value.expand(self.base_columns.keys())
self._sequence = value
@property
def orderable(self):
if self._orderable is not None:
return self._orderable
else:
return self._meta.orderable
@orderable.setter
def orderable(self, value):
self._orderable = value
@property
def template_name(self):
if self._template is not None:
return self._template
else:
return self._meta.template_name
@template_name.setter
def template_name(self, value):
self._template = value
@property
def paginated_rows(self):
'''
Return the rows for the current page if the table is paginated, else all rows.
'''
if hasattr(self, 'page'):
return self.page.object_list
return self.rows
def get_column_class_names(self, classes_set, bound_column):
'''
Returns a set of HTML class names for cells (both td and th) of a
**bound column** in this table.
By default this returns the column class names defined in the table's
attributes, and additionally the bound column's name.
This method can be overridden to change the default behavior, for
example to simply `return classes_set`.
Arguments:
classes_set(set of string): a set of class names to be added
to the cell, retrieved from the column's attributes. In the case
of a header cell (th), this also includes ordering classes.
To set the classes for a column, see `.Column`.
To configure ordering classes, see :ref:`ordering-class-name`
bound_column(`.BoundColumn`): the bound column the class names are
determined for. Useful for accessing `bound_column.name`.
Returns:
A set of class names to be added to cells of this column
'''
classes_set.add(bound_column.name)
return classes_set
# Python 2/3 compatible way to enable the metaclass
@six.add_metaclass(DeclarativeColumnsMetaclass)
class Table(TableBase):
# ensure the Table class has the right class docstring
__doc__ = TableBase.__doc__
# Table = DeclarativeColumnsMetaclass(str('Table'), (TableBase, ), {})
def table_factory(model, table=Table, fields=None, exclude=None,
localize=None):
"""
Returns Table class for given `model`, equivalent to defining a custom table class::
class MyTable(tables.Table):
class Meta:
model = model
Arguments:
model (`~django.db.models.Model`): Model associated with the new table
table (`.Table`): Base Table class used to create the new one
fields (list of str): Fields displayed in tables
exclude (list of str): Fields exclude in tables
localize (list of str): Fields to localize
"""
attrs = {'model': model}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
if localize is not None:
attrs['localize'] = localize
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(table, 'Meta'):
parent = (table.Meta, object)
Meta = type(str('Meta'), parent, attrs)
# Give this new table class a reasonable name.
class_name = model.__name__ + str('Table')
# Class attributes for the new table class.
table_class_attrs = {
'Meta': Meta,
}
return type(table)(class_name, | |
"dtype": dtype,
"strides": strides,
"padding": padding,
"dimension_numbers": dim_nums,
"precision": precision
}
for dtype in all_dtypes
for lhs_shape, filter_shape, strides, padding, dim_nums in [
((2, 5), (), (), [], ("NC", "OI", "CN")),
((2, 3, 4), (2,), (2,), [(0, 2)], ("CNH", "OHI", "HNC")),
((3, 1, 4, 5), (1, 3), (1, 3), [(3, 1), (2, 2)],
("NCHW", "OIHW", "NCHW")),
((3, 2, 5, 6), (4, 3), (4, 3), [(5, 2), (2, 4)],
None),
((1, 2, 3, 4), (1, 1), (1, 1), [(0, 0), (0, 0)],
("NCWH", "OHWI", "CNHW")),
((1, 2, 3, 4), (3, 2), (1, 1), [(0, 0), (0, 0)],
("CWHN", "HOWI", "NCHW")),
((2, 3, 4, 5, 6), (2, 1, 3), (2, 1, 3), [(1, 2), (5, 3), (3, 5)],
("NHWDC", "HDIWO", "DCWNH"))
]
for precision in [None,
lax.Precision.DEFAULT,
lax.Precision.HIGH,
lax.Precision.HIGHEST]
))
def testConvGeneralDilatedPatchesNonOverlapping(self,
lhs_shape,
filter_shape,
dtype,
strides,
padding,
dimension_numbers,
precision):
if np.issubdtype(dtype, np.integer) or np.issubdtype(dtype, np.bool_):
# TODO(b/183565702): Support integer convolutions on CPU/GPU.
if jtu.device_under_test() == "gpu":
raise SkipTest("Integer convolution not yet supported on GPU")
rng = jtu.rand_small(self.rng())
lhs = rng(lhs_shape, dtype)
if dimension_numbers is None:
lhs_spec, rhs_spec, out_spec = "NCHW", "OIHW", "NCHW"
else:
lhs_spec, rhs_spec, out_spec = dimension_numbers
filter_spec = ''.join(c for c in rhs_spec if c not in ('I', 'O'))
patches_spec = out_spec.replace('C', 'C' + filter_spec.lower())
full_padding = []
for c in lhs_spec:
if c in ('N', 'C'):
full_padding += [(0, 0)]
else:
full_padding += [padding[filter_spec.index(c)]]
lhs_padded = np.pad(lhs, full_padding, 'constant')
out = lax.transpose(lhs_padded, [lhs_spec.index(c) for c in out_spec])
patches = lax.conv_general_dilated_patches(
lhs=lhs,
filter_shape=filter_shape,
window_strides=strides,
padding=padding,
dimension_numbers=dimension_numbers,
precision=precision
)
source = []
# Test that output spatial shape is factored into `#patches x patch_size`.
for c in out_spec:
out_c = out.shape[out_spec.index(c)]
patch_c = patches.shape[out_spec.index(c)]
if c == 'N':
self.assertEqual(out_c, patch_c)
elif c == 'C':
self.assertEqual(out_c * np.prod(filter_shape), patch_c)
else:
self.assertEqual(out_c, patch_c * filter_shape[filter_spec.index(c)])
source += [patches_spec.index(c), patches_spec.index(c.lower())]
# Test that stacking patches together gives the source image, padded.
c = out_spec.index('C')
patches = patches.reshape(patches.shape[:c] +
(lhs_shape[lhs_spec.index('C')],) +
filter_shape +
patches.shape[c + 1:]
)
patches = np.moveaxis(patches, source, range(len(source)))
for i in range(len(filter_shape)):
patches = patches.reshape(patches.shape[:i] + (-1,) +
patches.shape[2 + i:])
patches = np.moveaxis(
patches,
range(len(filter_shape)),
[out_spec.index(c) for c in out_spec if c not in ('N', 'C')])
self.assertAllClose(out, patches)
# TODO(mattjj): test conv_general_dilated against numpy
def testConv0DIsDot(self):
rng = jtu.rand_default(self.rng())
def args_maker():
return [rng((10, 5), np.float32), rng((5, 7), np.float32)]
jnp_fun = partial(lax.conv_general_dilated, window_strides=(),
padding='VALID', dimension_numbers=('NC', 'IO', 'NC'))
self._CompileAndCheck(jnp_fun, args_maker)
self._CheckAgainstNumpy(np.dot, jnp_fun, args_maker, tol=.1)
def testGradConv0D(self):
# Reproduces a failure in neural_tangents not caught in our presubmit tests
# See cl/367416742.
lhs = np.ones((2, 5), dtype=np.float32)
rhs = np.ones((5, 10), dtype=np.float32)
def f_jax(lhs, rhs):
return lax.conv_general_dilated(
lhs, rhs, window_strides=(),
padding=(), lhs_dilation=(), rhs_dilation=(),
dimension_numbers=lax.ConvDimensionNumbers((0, 1), (1, 0), (0, 1)),
batch_group_count=1, feature_group_count=1, precision=None,
preferred_element_type=None)
res, pullback = jax.vjp(f_jax, lhs, rhs)
grad = pullback(np.ones_like(res))
self.assertAllClose((lhs * 10., rhs * 2.), grad)
@staticmethod
def _conv_transpose_via_grad(data, kernel, strides, padding,
rhs_dilation=None, dimension_numbers=None):
"""Helper method: calculates conv transpose via grad for testing."""
assert len(data.shape) == len(kernel.shape)
nspatial = len(data.shape) - 2
one = (1,) * nspatial
rhs_dilation = rhs_dilation or one
dn = lax.conv_dimension_numbers(data.shape, kernel.shape,
dimension_numbers)
in_shape = np.take(data.shape, dn.lhs_spec)
in_sdims = in_shape[2:]
k_shape = np.take(kernel.shape, dn.rhs_spec)
k_sdims = k_shape[2:]
e_k_sdims = [(k-1) * r + 1 for k, r in zip(k_sdims, rhs_dilation)]
if padding == 'VALID':
o_sdims = [in_sdims[i]*strides[i] + max(e_k_sdims[i]-strides[i],0)
for i in range(nspatial)]
elif padding == 'SAME':
o_sdims = [in_sdims[i]*strides[i] for i in range(nspatial)]
o_shape = [in_shape[0], k_shape[1]] + o_sdims
out_spec_inv = [x[0] for x in
sorted(enumerate(dn.out_spec), key=lambda x: x[1])]
o_layout = np.take(np.array(o_shape), out_spec_inv)
placeholder = np.ones(o_layout, data.dtype)
conv = lambda x: lax.conv_general_dilated(x, kernel, strides, padding,
one, rhs_dilation, dn)
_, g = jax.vjp(conv, placeholder)
return g(data)[0]
@staticmethod
def _transpose_conv_kernel(data, kernel, dimension_numbers):
dn = lax.conv_dimension_numbers(data.shape, kernel.shape,
dimension_numbers)
spatial_axes = np.array(dn.rhs_spec)[2:]
for axis in spatial_axes:
kernel = np.flip(kernel, axis)
kernel = np.swapaxes(kernel, dn.rhs_spec[0], dn.rhs_spec[1])
return kernel
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rhs_dilation": rhs_dilation,
"dspec": dspec}
for lhs_shape, rhs_shape in [
((b, 9, 10, i), (k, k, j, i)) # NB: i,j flipped in RHS for transpose
for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]
for dtype in float_dtypes
for strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)]
for padding in ["VALID", "SAME"]
for dspec in [('NHWC', 'HWIO', 'NHWC'),]
for rhs_dilation in [None, (2, 2)]))
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testConvTranspose2DT(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
# NB: this test calculates conv_transpose performing identically to the
# lhs-grad of conv.
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec,
transpose_kernel=True)
def fun_via_grad(lhs, rhs):
return self._conv_transpose_via_grad(lhs, rhs, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rhs_dilation": rhs_dilation,
"dspec": dspec}
for lhs_shape, rhs_shape in [
((b, 9, 10, i), (k, k, i, j))
for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]
for dtype in float_dtypes
for strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)]
for padding in ["VALID", "SAME"]
for dspec in [('NHWC', 'HWIO', 'NHWC'),]
for rhs_dilation in [None, (2, 2)]))
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testConvTranspose2D(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec,
transpose_kernel=False)
def fun_via_grad(lhs, rhs):
rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)
return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rhs_dilation": rhs_dilation,
"dspec": dspec}
for lhs_shape, rhs_shape in [
((b, 10, i), (k, i, j))
for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]
for dtype in float_dtypes
for strides in [(1,), (2,), (3,)]
for padding in ["VALID", "SAME"]
for dspec in [('NHC', 'HIO', 'NHC'),]
for rhs_dilation in [None, (2,)]))
def testConvTranspose1D(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
dimension_numbers=dspec,
rhs_dilation=rhs_dilation,
transpose_kernel=False)
def fun_via_grad(lhs, rhs):
rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)
return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rhs_dilation": rhs_dilation,
"dspec": dspec}
for lhs_shape, rhs_shape in [
((b, i), (i, j))
for b, i, j in itertools.product([2,3],[2,3],[2,3])]
for dtype in float_dtypes
for strides in [()]
for padding in ["VALID", "SAME"]
for dspec in [('NC', 'IO', 'NC'),]
for rhs_dilation in [None, ()]))
def testConvTranspose0D(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation):
rng = jtu.rand_small(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
dimension_numbers=dspec,
rhs_dilation=rhs_dilation,
transpose_kernel=False)
def fun_via_grad(lhs, rhs):
rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)
return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun_via_grad, fun, args_maker)
def testConvTransposePaddingList(self):
# Regression test for https://github.com/google/jax/discussions/8695
a = jnp.ones((28,28))
b = jnp.ones((3,3))
c = lax.conv_general_dilated(a[None, None], b[None, None], (1,1), [(0,0),(0,0)], (1,1))
self.assertArraysEqual(c, 9 * jnp.ones((1, 1, 26, 26)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}_precision={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
precision),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"precision": precision}
for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]
for dtype in all_dtypes
for precision in [None, lax.Precision.DEFAULT, lax.Precision.HIGH,
lax.Precision.HIGHEST,
(lax.Precision.DEFAULT, lax.Precision.HIGHEST)]))
def testDot(self, lhs_shape, rhs_shape, dtype, precision):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CompileAndCheck(partial(lax.dot, precision=precision), args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}_preferred_element_type={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
jtu.format_shape_dtype_string((), preferred_element_type)
),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype, "preferred_element_type": preferred_element_type
}
for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), | |
<= 0)
m.c1309 = Constraint(expr= - m.b130 + m.b131 - m.b155 <= 0)
m.c1310 = Constraint(expr= - m.b130 + m.b132 - m.b156 <= 0)
m.c1311 = Constraint(expr= - m.b130 + m.b133 - m.b157 <= 0)
m.c1312 = Constraint(expr= - m.b130 + m.b134 - m.b158 <= 0)
m.c1313 = Constraint(expr= - m.b130 + m.b135 - m.b159 <= 0)
m.c1314 = Constraint(expr= - m.b130 + m.b136 - m.b160 <= 0)
m.c1315 = Constraint(expr= - m.b130 + m.b137 - m.b161 <= 0)
m.c1316 = Constraint(expr= - m.b131 + m.b132 - m.b162 <= 0)
m.c1317 = Constraint(expr= - m.b131 + m.b133 - m.b163 <= 0)
m.c1318 = Constraint(expr= - m.b131 + m.b134 - m.b164 <= 0)
m.c1319 = Constraint(expr= - m.b131 + m.b135 - m.b165 <= 0)
m.c1320 = Constraint(expr= - m.b131 + m.b136 - m.b166 <= 0)
m.c1321 = Constraint(expr= - m.b131 + m.b137 - m.b167 <= 0)
m.c1322 = Constraint(expr= - m.b132 + m.b133 - m.b168 <= 0)
m.c1323 = Constraint(expr= - m.b132 + m.b134 - m.b169 <= 0)
m.c1324 = Constraint(expr= - m.b132 + m.b135 - m.b170 <= 0)
m.c1325 = Constraint(expr= - m.b132 + m.b136 - m.b171 <= 0)
m.c1326 = Constraint(expr= - m.b132 + m.b137 - m.b172 <= 0)
m.c1327 = Constraint(expr= - m.b133 + m.b134 - m.b173 <= 0)
m.c1328 = Constraint(expr= - m.b133 + m.b135 - m.b174 <= 0)
m.c1329 = Constraint(expr= - m.b133 + m.b136 - m.b175 <= 0)
m.c1330 = Constraint(expr= - m.b133 + m.b137 - m.b176 <= 0)
m.c1331 = Constraint(expr= - m.b134 + m.b135 - m.b177 <= 0)
m.c1332 = Constraint(expr= - m.b134 + m.b136 - m.b178 <= 0)
m.c1333 = Constraint(expr= - m.b134 + m.b137 - m.b179 <= 0)
m.c1334 = Constraint(expr= - m.b135 + m.b136 - m.b180 <= 0)
m.c1335 = Constraint(expr= - m.b135 + m.b137 - m.b181 <= 0)
m.c1336 = Constraint(expr= - m.b136 + m.b137 - m.b182 <= 0)
m.c1337 = Constraint(expr= - m.b138 + m.b139 - m.b147 <= 0)
m.c1338 = Constraint(expr= - m.b138 + m.b140 - m.b148 <= 0)
m.c1339 = Constraint(expr= - m.b138 + m.b141 - m.b149 <= 0)
m.c1340 = Constraint(expr= - m.b138 + m.b142 - m.b150 <= 0)
m.c1341 = Constraint(expr= - m.b138 + m.b143 - m.b151 <= 0)
m.c1342 = Constraint(expr= - m.b138 + m.b144 - m.b152 <= 0)
m.c1343 = Constraint(expr= - m.b138 + m.b145 - m.b153 <= 0)
m.c1344 = Constraint(expr= - m.b138 + m.b146 - m.b154 <= 0)
m.c1345 = Constraint(expr= - m.b139 + m.b140 - m.b155 <= 0)
m.c1346 = Constraint(expr= - m.b139 + m.b141 - m.b156 <= 0)
m.c1347 = Constraint(expr= - m.b139 + m.b142 - m.b157 <= 0)
m.c1348 = Constraint(expr= - m.b139 + m.b143 - m.b158 <= 0)
m.c1349 = Constraint(expr= - m.b139 + m.b144 - m.b159 <= 0)
m.c1350 = Constraint(expr= - m.b139 + m.b145 - m.b160 <= 0)
m.c1351 = Constraint(expr= - m.b139 + m.b146 - m.b161 <= 0)
m.c1352 = Constraint(expr= - m.b140 + m.b141 - m.b162 <= 0)
m.c1353 = Constraint(expr= - m.b140 + m.b142 - m.b163 <= 0)
m.c1354 = Constraint(expr= - m.b140 + m.b143 - m.b164 <= 0)
m.c1355 = Constraint(expr= - m.b140 + m.b144 - m.b165 <= 0)
m.c1356 = Constraint(expr= - m.b140 + m.b145 - m.b166 <= 0)
m.c1357 = Constraint(expr= - m.b140 + m.b146 - m.b167 <= 0)
m.c1358 = Constraint(expr= - m.b141 + m.b142 - m.b168 <= 0)
m.c1359 = Constraint(expr= - m.b141 + m.b143 - m.b169 <= 0)
m.c1360 = Constraint(expr= - m.b141 + m.b144 - m.b170 <= 0)
m.c1361 = Constraint(expr= - m.b141 + m.b145 - m.b171 <= 0)
m.c1362 = Constraint(expr= - m.b141 + m.b146 - m.b172 <= 0)
m.c1363 = Constraint(expr= - m.b142 + m.b143 - m.b173 <= 0)
m.c1364 = Constraint(expr= - m.b142 + m.b144 - m.b174 <= 0)
m.c1365 = Constraint(expr= - m.b142 + m.b145 - m.b175 <= 0)
m.c1366 = Constraint(expr= - m.b142 + m.b146 - m.b176 <= 0)
m.c1367 = Constraint(expr= - m.b143 + m.b144 - m.b177 <= 0)
m.c1368 = Constraint(expr= - m.b143 + m.b145 - m.b178 <= 0)
m.c1369 = Constraint(expr= - m.b143 + m.b146 - m.b179 <= 0)
m.c1370 = Constraint(expr= - m.b144 + m.b145 - m.b180 <= 0)
m.c1371 = Constraint(expr= - m.b144 + m.b146 - m.b181 <= 0)
m.c1372 = Constraint(expr= - m.b145 + m.b146 - m.b182 <= 0)
m.c1373 = Constraint(expr= - m.b147 + m.b148 - m.b155 <= 0)
m.c1374 = Constraint(expr= - m.b147 + m.b149 - m.b156 <= 0)
m.c1375 = Constraint(expr= - m.b147 + m.b150 - m.b157 <= 0)
m.c1376 = Constraint(expr= - m.b147 + m.b151 - m.b158 <= 0)
m.c1377 = Constraint(expr= - m.b147 + m.b152 - m.b159 <= 0)
m.c1378 = Constraint(expr= - m.b147 + m.b153 - m.b160 <= 0)
m.c1379 = Constraint(expr= - m.b147 + m.b154 - m.b161 <= 0)
m.c1380 = Constraint(expr= - m.b148 + m.b149 - m.b162 <= 0)
m.c1381 = Constraint(expr= - m.b148 + m.b150 - m.b163 <= 0)
m.c1382 = Constraint(expr= - m.b148 + m.b151 - m.b164 <= 0)
m.c1383 = Constraint(expr= - m.b148 + m.b152 - m.b165 <= 0)
m.c1384 = Constraint(expr= - m.b148 + m.b153 - m.b166 <= 0)
m.c1385 = Constraint(expr= - m.b148 + m.b154 - m.b167 <= 0)
m.c1386 = Constraint(expr= - m.b149 + m.b150 - m.b168 <= 0)
m.c1387 = Constraint(expr= - m.b149 + m.b151 - m.b169 <= 0)
m.c1388 = Constraint(expr= - m.b149 + m.b152 - m.b170 <= 0)
m.c1389 = Constraint(expr= - m.b149 + m.b153 - m.b171 <= 0)
m.c1390 = Constraint(expr= - m.b149 + m.b154 - m.b172 <= 0)
m.c1391 = Constraint(expr= - m.b150 + m.b151 - m.b173 <= 0)
m.c1392 = Constraint(expr= - m.b150 + m.b152 - m.b174 <= 0)
m.c1393 = Constraint(expr= - m.b150 + m.b153 - m.b175 <= 0)
m.c1394 = Constraint(expr= - m.b150 + m.b154 - m.b176 <= 0)
m.c1395 = Constraint(expr= - m.b151 + m.b152 - m.b177 <= 0)
m.c1396 = Constraint(expr= - m.b151 + m.b153 - m.b178 <= 0)
m.c1397 = Constraint(expr= - m.b151 + m.b154 - m.b179 <= 0)
m.c1398 = Constraint(expr= - m.b152 + m.b153 - m.b180 <= 0)
m.c1399 = Constraint(expr= - m.b152 + m.b154 - m.b181 <= 0)
m.c1400 = Constraint(expr= - m.b153 + m.b154 - m.b182 <= 0)
m.c1401 = Constraint(expr= - m.b155 + m.b156 - m.b162 <= 0)
m.c1402 = Constraint(expr= - m.b155 + m.b157 - m.b163 <= 0)
m.c1403 = Constraint(expr= - m.b155 + m.b158 - m.b164 <= 0)
m.c1404 = Constraint(expr= - m.b155 + m.b159 - m.b165 <= 0)
m.c1405 = Constraint(expr= - m.b155 + m.b160 - m.b166 <= 0)
m.c1406 = Constraint(expr= - m.b155 + m.b161 - m.b167 <= 0)
m.c1407 = Constraint(expr= - m.b156 + m.b157 - m.b168 <= 0)
m.c1408 = Constraint(expr= - m.b156 + m.b158 - m.b169 <= 0)
m.c1409 = Constraint(expr= - m.b156 + m.b159 - m.b170 <= 0)
m.c1410 = Constraint(expr= - m.b156 + m.b160 - m.b171 <= 0)
m.c1411 = Constraint(expr= - m.b156 + m.b161 - m.b172 <= 0)
m.c1412 = Constraint(expr= - m.b157 + m.b158 - m.b173 <= 0)
m.c1413 = Constraint(expr= - m.b157 + m.b159 - m.b174 <= 0)
m.c1414 = Constraint(expr= - m.b157 + m.b160 - m.b175 <= 0)
m.c1415 = Constraint(expr= - m.b157 + m.b161 - m.b176 <= 0)
m.c1416 = Constraint(expr= - m.b158 + m.b159 - m.b177 <= 0)
m.c1417 = Constraint(expr= - m.b158 + m.b160 - m.b178 <= 0)
m.c1418 = Constraint(expr= - m.b158 + m.b161 - m.b179 <= 0)
m.c1419 = Constraint(expr= - m.b159 + m.b160 - m.b180 <= 0)
m.c1420 = Constraint(expr= - m.b159 + m.b161 - m.b181 <= 0)
m.c1421 = Constraint(expr= - m.b160 + m.b161 - m.b182 <= 0)
m.c1422 = Constraint(expr= - m.b162 + m.b163 - m.b168 <= 0)
m.c1423 = Constraint(expr= - m.b162 + m.b164 - m.b169 <= 0)
m.c1424 = Constraint(expr= - m.b162 + m.b165 - m.b170 <= 0)
m.c1425 = Constraint(expr= - m.b162 + m.b166 - m.b171 <= 0)
m.c1426 = Constraint(expr= - m.b162 + m.b167 - m.b172 <= 0)
m.c1427 = Constraint(expr= - m.b163 + m.b164 - m.b173 <= 0)
m.c1428 = Constraint(expr= - m.b163 + m.b165 - m.b174 <= 0)
m.c1429 = Constraint(expr= - m.b163 + m.b166 - m.b175 <= 0)
m.c1430 = Constraint(expr= - m.b163 + m.b167 - m.b176 <= 0)
m.c1431 = Constraint(expr= - m.b164 + m.b165 - m.b177 <= 0)
m.c1432 = Constraint(expr= - m.b164 + m.b166 - m.b178 <= 0)
m.c1433 = Constraint(expr= - m.b164 + m.b167 - m.b179 <= 0)
m.c1434 = Constraint(expr= - m.b165 + m.b166 - m.b180 <= 0)
m.c1435 = Constraint(expr= - m.b165 + m.b167 - m.b181 <= 0)
m.c1436 = Constraint(expr= - m.b166 + m.b167 - m.b182 | |
<reponame>niravkin/pyCGM<filename>pyCGM_Single/pycgmKinetics.py
#pyCGM
# This module was contributed by <NAME>
# the CoM calculation is not an exact clone of PiG,
# but the differences are not clinically significant.
# We will add updates accordingly.
#
from __future__ import division
import os
import numpy as np
import sys
if sys.version_info[0]==2:
pyver = 2
else:
pyver = 3
#helper functions useful for dealing with frames of data, i.e. 1d arrays of (x,y,z)
#coordinate. Also in Utilities but need to clean everything up somewhat!
def f(p, x):
"""
Parameters
----------
p : list
A that has a length of at least 2.
x : int or float
Scaling factor for the first variable in p.
Returns
-------
int or float
Returns the first value in p scaled by x, aded by the second value in p.
Examples
--------
>>> import numpy as np
>>> from .pycgmKinetics import f
>>> p = [1, 2]
>>> x = 10
>>> f(p, x)
12
>>> p = np.array([5.16312215, 8.79307163])
>>> x = 2.0
>>> np.around(f(p, x),8)
19.11931593
"""
return (p[0] * x) + p[1]
def dot(v,w):
"""Calculate the Dot Product function.
Parameters
----------
v : list
First 3-element list.
w : list
Second 3-element list.
Returns
-------
int or float
The quotient of the dot product of vectors v and w.
Examples
--------
>>> import numpy as np
>>> from .pycgmKinetics import dot
>>> v = [1, 2, 3]
>>> w = [4, 5, 6]
>>> dot(v,w)
32
>>> v = np.array([6.56643344, 6.23972959, 2.83918231])
>>> w = np.array([5.53732499, 7.61560881, 6.2563037])
>>> np.around(dot(v,w),8)
101.64260241
"""
x,y,z = v
X,Y,Z = w
return x*X + y*Y + z*Z
def length(v):
"""Calculate Length of a 3D Vector function.
Parameters
----------
v : list
A 3-element list.
Returns
-------
float
Returns the length of a 3D vector.
Examples
--------
>>> import numpy as np
>>> from .pycgmKinetics import length
>>> v = [1,2,3]
>>> np.around(length(v),8)
3.74165739
>>> v = np.array([6.56643344, 6.23972959, 2.83918231])
>>> np.around(length(v),8)
9.49279883
"""
x,y,z = v
return np.sqrt(x*x + y*y + z*z)
def vector(b,e):
"""Vector Subtraction function.
Parameters
----------
v : list
First 3D vector.
e : list
Second 3D vector.
Returns
-------
tuple
Returns the vector of e - v.
Examples
--------
>>> import numpy as np
>>> from .pycgmKinetics import vector
>>> v = [1,2,3]
>>> e = [4,5,6]
>>> vector(v, e)
(3, 3, 3)
>>> v = np.array([5.10897693, 6.18161923, 9.44221215])
>>> e = np.array([3.68040209, 9.92542233, 5.38362424])
>>> vector(v, e)
(-1.42857484, 3.7438031, -4.05858791)
"""
x,y,z = b
X,Y,Z = e
return (X-x, Y-y, Z-z)
def unit(v):
"""Create Unit Vector function.
Parameters
----------
v : list
A 3-element list.
Returns
-------
tuple
Returns the unit vector of a given vector.
Examples
--------
>>> import numpy as np
>>> from .pycgmKinetics import unit
>>> v = [1,2,3]
>>> np.around(unit(v),8)
array([0.26726124, 0.53452248, 0.80178373])
>>> v = np.array([6.56643344, 6.23972959, 2.83918231])
>>> np.around(unit(v),8)
array([0.69172786, 0.6573119 , 0.29908801])
"""
x,y,z = v
mag = length(v)
return (x/mag, y/mag, z/mag)
def distance(p0,p1):
"""Calculate Distance function.
Parameters
----------
p0 : list
First x,y,z coordinate point.
p1 : list
Second x,y,z coordinate point.
Returns
-------
float
Returns distance between points p0 and p1.
Examples
--------
>>> import numpy as np
>>> from .pycgmKinetics import distance
>>> p0 = [1,2,3]
>>> p1 = [4,5,6]
>>> np.around(distance(p0,p1),8)
5.19615242
>>> p0 = np.array([6.56643344, 6.23972959, 2.83918231])
>>> p1 = np.array([1.25539445, 4.44290559, 0.80470151])
>>> np.around(distance(p0,p1),8)
5.96446341
"""
return length(vector(p0,p1))
def scale(v,sc):
"""Create Scaled Vector function.
Parameters
----------
v : list
A 3-element list.
sc : int or float
The scaling factor.
Returns
-------
tuple
Returns the given vector scaled by scaling factor.
Examples
--------
>>> import numpy as np
>>> from .pycgmKinetics import scale
>>> v = [1,2,3]
>>> sc = 2
>>> scale(v, sc)
(2, 4, 6)
>>> v = np.array([0.5664332, 0.23972959, 0.83918231])
>>> sc = 10.0
>>> scale(v, sc)
(5.664332, 2.3972959, 8.3918231)
"""
x,y,z = v
return (x * sc, y * sc, z * sc)
def add(v,w):
"""Vector Addition function.
Parameters
----------
v : list
First 3-element list.
w : list
Second 3-element list.
Returns
-------
tuple
Returns the sum of the two given vectors.
Examples
--------
>>> import numpy as np
>>> from .pycgmKinetics import add
>>> v = [1, 2, 3]
>>> w = [4, 5, 6]
>>> add(v, w)
(5, 7, 9)
>>> v = np.array([3.98527165, 5.52526792, 4.34518370])
>>> w = np.array([5.82147992, 7.87348922, 2.61204120])
>>> add(v, w)
(9.80675157, 13.39875714, 6.9572249)
"""
x,y,z = v
X,Y,Z = w
return (x+X, y+Y, z+Z)
def pnt2line(pnt, start, end):
"""Calculate Point-Line Distance function
This function calculates the distance from a given point, pnt, to a line.
The line is represented by two other points, start and end.
Parameters
----------
pnt : list
An x,y,z point on the same plane.
start : list
First x,y,z point on the line.
end : list
Second x,y,z point on the line.
Returns
-------
dist, nearest, pnt : tuple
Returns dist, the closest distance from the point to the line,
Returns nearest, the closest point on the line from the given pnt as a 1x3 array,
Returns pnt, the original given pnt as a 1x3 array.
All of these are returned in a single tuple.
Examples
--------
>>> import numpy as np
>>> from .pycgmKinetics import pnt2line
>>> pnt = [1, 2, 3]
>>> start = [4, 5, 6]
>>> end = [7, 8, 9]
>>> [np.around(arr,8) for arr in pnt2line(pnt, start, end)]
[5.19615242, array([4., 5., 6.]), array([1, 2, 3])]
>>> pnt = np.array([9.82004519, 6.7344815, 0.94587439])
>>> start = np.array([3.89481034, 4.02115225, 4.3075406])
>>> end = np.array([7.56622188, 3.58992166, 8.2749309])
>>> [np.around(arr,8) for arr in pnt2line(pnt, start, end)] #doctest: +NORMALIZE_WHITESPACE
[7.21009005,
array([4.79961726, 3.91487693, 5.28529048]),
array([9.82004519, 6.7344815 , 0.94587439])]
"""
lineVec = vector(start, end)
pntVec = vector(start, pnt)
lineLength = length(lineVec)
lineUnitVec = unit(lineVec)
pntVecScaled = scale(pntVec, 1.0/lineLength)
t = dot(lineUnitVec, pntVecScaled)
if t < 0.0:
t = 0.0
elif t > 1.0:
t = 1.0
nearest = scale(lineVec, t)
dist = distance(nearest, pntVec)
nearest = add(nearest, start)
return dist, nearest, pnt
#def norm3d(v):
# try:
# return np.asarray(sqrt((v[0]*v[0]+v[1]*v[1]+v[2]*v[2])))
# except:
# return np.nan
def findL5_Pelvis(frame):
"""Calculate L5 Markers Given Pelvis function
Markers used: LHip, RHip, Pelvis_axis
Parameters
----------
frame : dict
Dictionaries of marker lists.
{ [], [], [], ... }
Returns
-------
midHip, L5 : tuple
Returns the x,y,z marker positions of the midHip (1x3 array) and L5 (1x3 array) in a tuple.
Examples
--------
>>> import numpy as np
>>> from .pycgmKinetics import findL5_Pelvis
>>> Pelvis_axis = [np.array([251.60830688, 391.74131775, 1032.89349365]),
... np.array([[251.74063624, 392.72694721, 1032.78850073],
... [250.61711554, 391.87232862, 1032.8741063],
... [251.60295336, 391.84795134, 1033.88777762]]),
... np.array([231.57849121, 210.25262451, 1052.24969482])]
>>> LHip = np.array([308.38050472, 322.80342417, 937.98979061])
>>> RHip = np.array([182.57097863, 339.43231855, 935.529000126])
>>> frame = { 'Pelvis_axis': Pelvis_axis, 'RHip': RHip, 'LHip': LHip}
>>> np.around(findL5_Pelvis(frame),8)
array([[ 245.47574168, 331.11787136, 936.75939537],
[ 271.52716019, 371.69050709, 1043.80997977]])
"""
#The L5 position is estimated as (LHJC + RHJC)/2 +
#(0.0, 0.0, 0.828) * Length(LHJC - RHJC), where the value 0.828
#is a ratio of the distance from the hip joint centre level to the
#top of the lumbar 5: this is calculated as in teh vertical (z) axis
LHJC = frame['LHip']
RHJC = frame['RHip']
midHip = (LHJC+RHJC)/2
#zOffset = ([0.0,0.0,distance(RHJC, LHJC)*0.925])
#L5 = midHip + zOffset
offset = distance(RHJC,LHJC) * .925
z_axis = frame['Pelvis_axis'][1][2]
norm_dir = np.array(unit(z_axis))
L5 = midHip + offset * norm_dir
return midHip, L5#midHip + ([0.0, 0.0, zOffset])
def findL5_Thorax(frame):
"""Calculate L5 Markers Given Thorax function
Markers used: C7, RHip, LHip, Thorax_axis
Parameters
----------
frame : dict
Dictionaries of marker lists.
Returns
-------
L5 : array
Returns the x,y,z marker positions of the L5 in a 1x3 array.
Examples
--------
>>> from .pycgmKinetics import findL5_Thorax
>>> import numpy as np
>>> Thorax_axis = [[[256.3454633226447, 365.7223958512035, 1461.920891187948], | |
S(3)/2), (1, 2, S(3)/2)) )/5 + \
sqrt(10)*JzKetCoupled(S(5)/2, -S(1)/2, (S(1)/2, 1, 1), ((1,
3, S(3)/2), (1, 2, S(5)/2)) )/10
assert couple(TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(1, 1)), ((1, 3), (1, 2)) ) == \
-sqrt(2)*JzKetCoupled(S(1)/2, S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(1)/2), (1, 2, S(1)/2)) )/3 - \
JzKetCoupled(S(1)/2, S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(3)/2), (1, 2, S(1)/2)) )/3 - \
2*JzKetCoupled(S(3)/2, S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(1)/2), (1, 2, S(3)/2)) )/3 + \
sqrt(5)*JzKetCoupled(S(3)/2, S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(3)/2), (1, 2, S(3)/2)) )/15 + \
sqrt(5)*JzKetCoupled(S(
5)/2, S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(3)/2), (1, 2, S(5)/2)) )/5
assert couple(TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(1, 0)), ((1, 3), (1, 2)) ) == \
JzKetCoupled(S(1)/2, -S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(1)/2), (1, 2, S(1)/2)) )/3 - \
sqrt(2)*JzKetCoupled(S(1)/2, -S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(3)/2), (1, 2, S(1)/2)) )/3 - \
sqrt(2)*JzKetCoupled(S(3)/2, -S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(1)/2), (1, 2, S(3)/2)) )/3 - \
sqrt(10)*JzKetCoupled(S(3)/2, -S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(3)/2), (1, 2, S(3)/2)) )/15 + \
sqrt(10)*JzKetCoupled(S(5)/2, -S(1)/2, (S(1)/2, 1, 1), ((1,
3, S(3)/2), (1, 2, S(5)/2)) )/5
assert couple(TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(1, 0), JzKet(1, -1)), ((1, 3), (1, 2)) ) == \
-sqrt(15)*JzKetCoupled(S(3)/2, -S(3)/2, (S(1)/2, 1, 1), ((1, 3, S(3)/2), (1, 2, S(3)/2)) )/5 + \
sqrt(10)*JzKetCoupled(S(5)/2, -S(3)/2, (S(1)/2, 1, 1), ((1,
3, S(3)/2), (1, 2, S(5)/2)) )/5
assert couple(TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(1, 1)), ((1, 3), (1, 2)) ) == \
-2*JzKetCoupled(S(1)/2, -S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(1)/2), (1, 2, S(1)/2)) )/3 + \
sqrt(2)*JzKetCoupled(S(1)/2, -S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(3)/2), (1, 2, S(1)/2)) )/6 - \
sqrt(2)*JzKetCoupled(S(3)/2, -S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(1)/2), (1, 2, S(3)/2)) )/3 + \
2*sqrt(10)*JzKetCoupled(S(3)/2, -S(1)/2, (S(1)/2, 1, 1), ((1, 3, S(3)/2), (1, 2, S(3)/2)) )/15 + \
sqrt(10)*JzKetCoupled(S(5)/2, -S(1)/2, (S(1)/2, 1, 1), ((1,
3, S(3)/2), (1, 2, S(5)/2)) )/10
assert couple(TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(1, 0)), ((1, 3), (1, 2)) ) == \
-sqrt(3)*JzKetCoupled(S(3)/2, -S(3)/2, (S(1)/2, 1, 1), ((1, 3, S(1)/2), (1, 2, S(3)/2)) )/3 + \
2*sqrt(15)*JzKetCoupled(S(3)/2, -S(3)/2, (S(1)/2, 1, 1), ((1, 3, S(3)/2), (1, 2, S(3)/2)) )/15 + \
sqrt(10)*JzKetCoupled(S(5)/2, -S(3)/2, (S(1)/2, 1, 1), ((1,
3, S(3)/2), (1, 2, S(5)/2)) )/5
assert couple(TensorProduct(JzKet(S(1)/2, S(-1)/2), JzKet(1, -1), JzKet(1, -1)), ((1, 3), (1, 2)) ) == \
JzKetCoupled(S(
5)/2, -S(5)/2, (S(1)/2, 1, 1), ((1, 3, S(3)/2), (1, 2, S(5)/2)) )
# j1=1, 1, 1
assert couple(TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, 1)), ((1, 3), (1, 2)) ) == \
JzKetCoupled(3, 3, (1, 1, 1), ((1, 3, 2), (1, 2, 3)) )
assert couple(TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, 0)), ((1, 3), (1, 2)) ) == \
sqrt(2)*JzKetCoupled(2, 2, (1, 1, 1), ((1, 3, 1), (1, 2, 2)) )/2 - \
sqrt(6)*JzKetCoupled(2, 2, (1, 1, 1), ((1, 3, 2), (1, 2, 2)) )/6 + \
sqrt(3)*JzKetCoupled(3, 2, (1, 1, 1), ((1, 3, 2), (1, 2, 3)) )/3
assert couple(TensorProduct(JzKet(1, 1), JzKet(1, 1), JzKet(1, -1)), ((1, 3), (1, 2)) ) == \
sqrt(3)*JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 0), (1, 2, 1)) )/3 - \
JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 1)) )/2 + \
sqrt(15)*JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 1)) )/30 + \
JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 2)) )/2 - \
sqrt(3)*JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 2)) )/6 + \
sqrt(15)*JzKetCoupled(3, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 3)) )/15
assert couple(TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 1)), ((1, 3), (1, 2)) ) == \
sqrt(6)*JzKetCoupled(2, 2, (1, 1, 1), ((1, 3, 2), (1, 2, 2)) )/3 + \
sqrt(3)*JzKetCoupled(3, 2, (1, 1, 1), ((1, 3, 2), (1, 2, 3)) )/3
assert couple(TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, 0)), ((1, 3), (1, 2)) ) == \
JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 1)) )/2 - \
sqrt(15)*JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 1)) )/10 + \
JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 2)) )/2 + \
sqrt(3)*JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 2)) )/6 + \
2*sqrt(15)*JzKetCoupled(3, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 3)) )/15
assert couple(TensorProduct(JzKet(1, 1), JzKet(1, 0), JzKet(1, -1)), ((1, 3), (1, 2)) ) == \
-sqrt(6)*JzKetCoupled(0, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 0)) )/6 + \
sqrt(3)*JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 0), (1, 2, 1)) )/3 - \
sqrt(15)*JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 1)) )/15 + \
sqrt(3)*JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 2)) )/3 + \
sqrt(10)*JzKetCoupled(3, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 3)) )/10
assert couple(TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 1)), ((1, 3), (1, 2)) ) == \
sqrt(15)*JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 1)) )/5 + \
sqrt(3)*JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 2)) )/3 + \
sqrt(15)*JzKetCoupled(3, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 3)) )/15
assert couple(TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, 0)), ((1, 3), (1, 2)) ) == \
sqrt(6)*JzKetCoupled(0, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 0)) )/6 + \
JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 1)) )/2 + \
sqrt(15)*JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 1)) )/10 + \
sqrt(3)*JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 2)) )/6 + \
JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 2)) )/2 + \
sqrt(10)*JzKetCoupled(3, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 3)) )/10
assert couple(TensorProduct(JzKet(1, 1), JzKet(1, -1), JzKet(1, -1)), ((1, 3), (1, 2)) ) == \
sqrt(3)*JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 0), (1, 2, 1)) )/3 + \
JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 1)) )/2 + \
sqrt(15)*JzKetCoupled(1, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 1)) )/30 + \
JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 1), (1, 2, 2)) )/2 + \
sqrt(3)*JzKetCoupled(2, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 2)) )/6 + \
sqrt(15)*JzKetCoupled(3, -1, (1, 1, 1), ((1, 3, 2), (1, 2, 3)) )/15
assert couple(TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 1)), ((1, 3), (1, 2)) ) == \
-sqrt(2)*JzKetCoupled(2, 2, (1, 1, 1), ((1, 3, 1), (1, 2, 2)) )/2 - \
sqrt(6)*JzKetCoupled(2, 2, (1, 1, 1), ((1, 3, 2), (1, 2, 2)) )/6 + \
sqrt(3)*JzKetCoupled(3, 2, (1, 1, 1), ((1, 3, 2), (1, 2, 3)) )/3
assert couple(TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, 0)), ((1, 3), (1, 2)) ) == \
-sqrt(3)*JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 0), (1, 2, 1)) )/3 + \
sqrt(15)*JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 1)) )/15 - \
sqrt(3)*JzKetCoupled(2, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 2)) )/3 + \
2*sqrt(15)*JzKetCoupled(3, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 3)) )/15
assert couple(TensorProduct(JzKet(1, 0), JzKet(1, 1), JzKet(1, -1)), ((1, 3), (1, 2)) ) == \
sqrt(6)*JzKetCoupled(0, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 0)) )/6 - \
JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 1)) )/2 + \
sqrt(15)*JzKetCoupled(1, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 1)) )/10 + \
sqrt(3)*JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 1), (1, 2, 2)) )/6 - \
JzKetCoupled(2, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 2)) )/2 + \
sqrt(10)*JzKetCoupled(3, 0, (1, 1, 1), ((1, 3, 2), (1, 2, 3)) )/10
assert couple(TensorProduct(JzKet(1, 0), JzKet(1, 0), JzKet(1, 1)), ((1, 3), (1, 2)) ) == \
-JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 1), (1, 2, 1)) )/2 - \
sqrt(15)*JzKetCoupled(1, 1, (1, 1, 1), ((1, 3, 2), (1, 2, 1)) )/10 - \
JzKetCoupled(2, 1, (1, | |
24.4, 0.8)
model.createNode(2540, 0, 24.4, 0.4)
model.createNode(2541, 0, 24, 2.4)
model.createNode(2542, 0, 24, 2)
model.createNode(2543, 0, 24, 1.6)
model.createNode(2544, 0, 24, 1.2)
model.createNode(2545, 0, 24, 0.8)
model.createNode(2546, 0, 24, 0.4)
model.createNode(2547, 0, 23.6, 2.4)
model.createNode(2548, 0, 23.6, 2)
model.createNode(2549, 0, 23.6, 1.6)
model.createNode(2550, 0, 23.6, 1.2)
model.createNode(2551, 0, 23.6, 0.8)
model.createNode(2552, 0, 23.6, 0.4)
model.createNode(2553, 0, 23.2, 2.4)
model.createNode(2554, 0, 23.2, 2)
model.createNode(2555, 0, 23.2, 1.6)
model.createNode(2556, 0, 23.2, 1.2)
model.createNode(2557, 0, 23.2, 0.8)
model.createNode(2558, 0, 23.2, 0.4)
model.createNode(2559, 0, 22.8, 2.4)
model.createNode(2560, 0, 22.8, 2)
model.createNode(2561, 0, 22.8, 1.6)
model.createNode(2562, 0, 22.8, 1.2)
model.createNode(2563, 0, 22.8, 0.8)
model.createNode(2564, 0, 22.8, 0.4)
model.createNode(2565, 0, 22.4, 2.4)
model.createNode(2566, 0, 22.4, 2)
model.createNode(2567, 0, 22.4, 1.6)
model.createNode(2568, 0, 22.4, 1.2)
model.createNode(2569, 0, 22.4, 0.8)
model.createNode(2570, 0, 22.4, 0.4)
model.createNode(2571, 0, 22, 2.4)
model.createNode(2572, 0, 22, 2)
model.createNode(2573, 0, 22, 1.6)
model.createNode(2574, 0, 22, 1.2)
model.createNode(2575, 0, 22, 0.8)
model.createNode(2576, 0, 22, 0.4)
model.createNode(2577, 0, 21.6, 2.4)
model.createNode(2578, 0, 21.6, 2)
model.createNode(2579, 0, 21.6, 1.6)
model.createNode(2580, 0, 21.6, 1.2)
model.createNode(2581, 0, 21.6, 0.8)
model.createNode(2582, 0, 21.6, 0.4)
model.createNode(2583, 0, 21.2, 2.4)
model.createNode(2584, 0, 21.2, 2)
model.createNode(2585, 0, 21.2, 1.6)
model.createNode(2586, 0, 21.2, 1.2)
model.createNode(2587, 0, 21.2, 0.8)
model.createNode(2588, 0, 21.2, 0.4)
model.createNode(2589, 0, 20.8, 2.4)
model.createNode(2590, 0, 20.8, 2)
model.createNode(2591, 0, 20.8, 1.6)
model.createNode(2592, 0, 20.8, 1.2)
model.createNode(2593, 0, 20.8, 0.8)
model.createNode(2594, 0, 20.8, 0.4)
model.createNode(2595, 0, 20.4, 2.4)
model.createNode(2596, 0, 20.4, 2)
model.createNode(2597, 0, 20.4, 1.6)
model.createNode(2598, 0, 20.4, 1.2)
model.createNode(2599, 0, 20.4, 0.8)
model.createNode(2600, 0, 20.4, 0.4)
model.createNode(2601, 0, 20, 2.4)
model.createNode(2602, 0, 20, 2)
model.createNode(2603, 0, 20, 1.6)
model.createNode(2604, 0, 20, 1.2)
model.createNode(2605, 0, 20, 0.8)
model.createNode(2606, 0, 20, 0.4)
model.createNode(2607, 0, 19.6, 2.4)
model.createNode(2608, 0, 19.6, 2)
model.createNode(2609, 0, 19.6, 1.6)
model.createNode(2610, 0, 19.6, 1.2)
model.createNode(2611, 0, 19.6, 0.8)
model.createNode(2612, 0, 19.6, 0.4)
model.createNode(2613, 0, 19.2, 2.4)
model.createNode(2614, 0, 19.2, 2)
model.createNode(2615, 0, 19.2, 1.6)
model.createNode(2616, 0, 19.2, 1.2)
model.createNode(2617, 0, 19.2, 0.8)
model.createNode(2618, 0, 19.2, 0.4)
model.createNode(2619, 0, 18.8, 2.4)
model.createNode(2620, 0, 18.8, 2)
model.createNode(2621, 0, 18.8, 1.6)
model.createNode(2622, 0, 18.8, 1.2)
model.createNode(2623, 0, 18.8, 0.8)
model.createNode(2624, 0, 18.8, 0.4)
model.createNode(2625, 0, 18.4, 2.4)
model.createNode(2626, 0, 18.4, 2)
model.createNode(2627, 0, 18.4, 1.6)
model.createNode(2628, 0, 18.4, 1.2)
model.createNode(2629, 0, 18.4, 0.8)
model.createNode(2630, 0, 18.4, 0.4)
model.createNode(2631, 0, 18, 2.4)
model.createNode(2632, 0, 18, 2)
model.createNode(2633, 0, 18, 1.6)
model.createNode(2634, 0, 18, 1.2)
model.createNode(2635, 0, 18, 0.8)
model.createNode(2636, 0, 18, 0.4)
model.createNode(2637, 0, 17.6, 2.4)
model.createNode(2638, 0, 17.6, 2)
model.createNode(2639, 0, 17.6, 1.6)
model.createNode(2640, 0, 17.6, 1.2)
model.createNode(2641, 0, 17.6, 0.8)
model.createNode(2642, 0, 17.6, 0.4)
model.createNode(2643, 0, 17.2, 2.4)
model.createNode(2644, 0, 17.2, 2)
model.createNode(2645, 0, 17.2, 1.6)
model.createNode(2646, 0, 17.2, 1.2)
model.createNode(2647, 0, 17.2, 0.8)
model.createNode(2648, 0, 17.2, 0.4)
model.createNode(2649, 0, 16.8, 2.4)
model.createNode(2650, 0, 16.8, 2)
model.createNode(2651, 0, 16.8, 1.6)
model.createNode(2652, 0, 16.8, 1.2)
model.createNode(2653, 0, 16.8, 0.8)
model.createNode(2654, 0, 16.8, 0.4)
model.createNode(2655, 0, 16.4, 2.4)
model.createNode(2656, 0, 16.4, 2)
model.createNode(2657, 0, 16.4, 1.6)
model.createNode(2658, 0, 16.4, 1.2)
model.createNode(2659, 0, 16.4, 0.8)
model.createNode(2660, 0, 16.4, 0.4)
model.createNode(2661, 2.14807, 32, 2.06104)
model.createNode(2662, 2.30487, 32, 1.77366)
model.createNode(2663, 2.45007, 32, 1.44983)
model.createNode(2664, 2.56799, 32, 1.10429)
model.createNode(2665, 2.65359, 32, 0.744414)
model.createNode(2666, 2.70873, 32, 0.375008)
model.createNode(2667, 2.14575, 31.6, 2.05906)
model.createNode(2668, 2.30506, 31.6, 1.77108)
model.createNode(2669, 2.45187, 31.6, 1.44713)
model.createNode(2670, 2.57045, 31.6, 1.10191)
model.createNode(2671, 2.65588, 31.6, 0.742705)
model.createNode(2672, 2.71005, 31.6, 0.374193)
model.createNode(2673, 2.14352, 31.2, 2.05772)
model.createNode(2674, 2.30365, 31.2, 1.7694)
model.createNode(2675, 2.4511, 31.2, 1.44541)
model.createNode(2676, 2.57015, 31.2, 1.10041)
model.createNode(2677, 2.65589, 31.2, 0.741639)
model.createNode(2678, 2.71015, 31.2, 0.373678)
model.createNode(2679, 2.1417, 30.8, 2.05649)
model.createNode(2680, 2.30173, 30.8, 1.76799)
model.createNode(2681, 2.44927, 30.8, 1.44402)
model.createNode(2682, 2.56862, 30.8, 1.09922)
model.createNode(2683, 2.65485, 30.8, 0.740805)
model.createNode(2684, 2.70967, 30.8, 0.373267)
model.createNode(2685, 2.14006, 30.4, 2.05528)
model.createNode(2686, 2.29968, 30.4, 1.76664)
model.createNode(2687, 2.4471, 30.4, 1.44272)
model.createNode(2688, 2.56666, 30.4, 1.09813)
model.createNode(2689, 2.65342, 30.4, 0.740041)
model.createNode(2690, 2.70897, 30.4, 0.372887)
model.createNode(2691, 2.13847, 30, 2.05408)
model.createNode(2692, 2.29761, 30, 1.76531)
model.createNode(2693, 2.44483, 30, 1.44144)
model.createNode(2694, 2.56456, 30, 1.09707)
model.createNode(2695, 2.65185, 30, 0.739295)
model.createNode(2696, 2.70817, 30, 0.372516)
model.createNode(2697, 2.1369, 29.6, 2.05288)
model.createNode(2698, 2.29554, 29.6, 1.76398)
model.createNode(2699, 2.44253, 29.6, 1.44016)
model.createNode(2700, 2.56241, 29.6, 1.096)
model.createNode(2701, 2.65024, 29.6, 0.738553)
model.createNode(2702, 2.70735, 29.6, 0.372145)
model.createNode(2703, 2.13533, 29.2, 2.05169)
model.createNode(2704, 2.29346, 29.2, 1.76265)
model.createNode(2705, 2.44023, 29.2, 1.43888)
model.createNode(2706, 2.56026, 29.2, 1.09494)
model.createNode(2707, 2.64862, 29.2, 0.737811)
model.createNode(2708, 2.70652, 29.2, 0.371775)
model.createNode(2709, 2.13376, 28.8, 2.05049)
model.createNode(2710, 2.29139, 28.8, 1.76132)
model.createNode(2711, 2.43793, 28.8, 1.43761)
model.createNode(2712, 2.55811, 28.8, 1.09388)
model.createNode(2713, 2.64699, 28.8, 0.737069)
model.createNode(2714, 2.70569, 28.8, 0.371404)
model.createNode(2715, 2.1322, 28.4, 2.04929)
model.createNode(2716, 2.28932, 28.4, 1.75999)
model.createNode(2717, 2.43562, 28.4, 1.43633)
model.createNode(2718, 2.55595, 28.4, 1.09281)
model.createNode(2719, 2.64536, 28.4, 0.736327)
model.createNode(2720, 2.70486, 28.4, 0.371033)
model.createNode(2721, 2.13063, 28, 2.0481)
model.createNode(2722, 2.28724, 28, 1.75866)
model.createNode(2723, 2.43332, 28, 1.43505)
model.createNode(2724, 2.55379, 28, 1.09175)
model.createNode(2725, 2.64373, 28, 0.735585)
model.createNode(2726, 2.70402, 28, 0.370663)
model.createNode(2727, 2.12907, 27.6, 2.0469)
model.createNode(2728, 2.28517, 27.6, 1.75733)
model.createNode(2729, 2.43102, 27.6, 1.43378)
model.createNode(2730, 2.55164, 27.6, 1.09069)
model.createNode(2731, 2.64211, 27.6, 0.734843)
model.createNode(2732, 2.70319, 27.6, 0.370292)
model.createNode(2733, 2.1275, 27.2, 2.0457)
model.createNode(2734, 2.28309, 27.2, 1.756)
model.createNode(2735, 2.42871, 27.2, 1.4325)
model.createNode(2736, 2.54948, 27.2, 1.08962)
model.createNode(2737, 2.64048, 27.2, 0.734101)
model.createNode(2738, 2.70236, 27.2, 0.369921)
model.createNode(2739, 2.12594, 26.8, 2.0445)
model.createNode(2740, 2.28102, 26.8, 1.75467)
model.createNode(2741, 2.42641, 26.8, 1.43122)
model.createNode(2742, 2.54732, 26.8, 1.08856)
model.createNode(2743, 2.63885, 26.8, 0.733359)
model.createNode(2744, 2.70152, 26.8, 0.369551)
model.createNode(2745, 2.12437, 26.4, 2.04331)
model.createNode(2746, 2.27895, 26.4, 1.75334)
model.createNode(2747, 2.4241, 26.4, 1.42994)
model.createNode(2748, 2.54517, 26.4, 1.0875)
model.createNode(2749, 2.63722, 26.4, 0.732617)
model.createNode(2750, 2.70069, 26.4, 0.36918)
model.createNode(2751, 2.12281, 26, 2.04211)
model.createNode(2752, 2.27687, 26, 1.75201)
model.createNode(2753, 2.4218, 26, 1.42867)
model.createNode(2754, 2.54301, 26, 1.08644)
model.createNode(2755, 2.63559, 26, 0.731875)
model.createNode(2756, 2.69986, 26, 0.368809)
model.createNode(2757, 2.12124, 25.6, 2.04091)
model.createNode(2758, 2.2748, 25.6, 1.75068)
model.createNode(2759, 2.4195, 25.6, 1.42739)
model.createNode(2760, 2.54085, 25.6, 1.08537)
model.createNode(2761, 2.63396, 25.6, 0.731133)
model.createNode(2762, 2.69902, 25.6, 0.368438)
model.createNode(2763, 2.11968, 25.2, 2.03972)
model.createNode(2764, 2.27273, 25.2, 1.74935)
model.createNode(2765, 2.41719, 25.2, 1.42611)
model.createNode(2766, 2.5387, 25.2, 1.08431)
model.createNode(2767, 2.63234, 25.2, 0.730391)
model.createNode(2768, 2.69819, 25.2, 0.368068)
model.createNode(2769, 2.11811, 24.8, 2.03852)
model.createNode(2770, 2.27065, 24.8, 1.74802)
model.createNode(2771, 2.41489, 24.8, 1.42484)
model.createNode(2772, 2.53654, 24.8, 1.08325)
model.createNode(2773, 2.63071, 24.8, 0.729649)
model.createNode(2774, 2.69736, 24.8, 0.367697)
model.createNode(2775, 2.11655, 24.4, 2.03732)
model.createNode(2776, 2.26858, 24.4, 1.74669)
model.createNode(2777, 2.41258, 24.4, 1.42356)
model.createNode(2778, 2.53438, 24.4, 1.08218)
model.createNode(2779, 2.62908, 24.4, 0.728907)
model.createNode(2780, 2.69652, 24.4, 0.367326)
model.createNode(2781, 2.11498, 24, 2.03613)
model.createNode(2782, 2.26651, 24, 1.74536)
model.createNode(2783, 2.41028, 24, 1.42228)
model.createNode(2784, 2.53223, 24, 1.08112)
model.createNode(2785, 2.62745, 24, 0.728165)
model.createNode(2786, 2.69569, 24, 0.366956)
model.createNode(2787, 2.11342, 23.6, 2.03493)
model.createNode(2788, 2.26443, 23.6, 1.74403)
model.createNode(2789, 2.40797, 23.6, 1.421)
model.createNode(2790, 2.53007, 23.6, 1.08006)
model.createNode(2791, 2.62582, 23.6, 0.727424)
model.createNode(2792, 2.69486, 23.6, 0.366585)
model.createNode(2793, 2.11185, 23.2, 2.03373)
model.createNode(2794, 2.26236, 23.2, 1.7427)
model.createNode(2795, 2.40567, 23.2, 1.41973)
model.createNode(2796, 2.52791, 23.2, 1.07899)
model.createNode(2797, 2.6242, 23.2, 0.726682)
model.createNode(2798, 2.69402, 23.2, 0.366214)
model.createNode(2799, 2.11028, 22.8, 2.03254)
model.createNode(2800, 2.26029, 22.8, 1.74137)
model.createNode(2801, 2.40337, 22.8, 1.41845)
model.createNode(2802, 2.52576, 22.8, 1.07793)
model.createNode(2803, 2.62257, 22.8, 0.72594)
model.createNode(2804, 2.69319, 22.8, 0.365844)
model.createNode(2805, 2.10872, 22.4, 2.03134)
model.createNode(2806, 2.25821, 22.4, 1.74004)
model.createNode(2807, 2.40106, 22.4, 1.41717)
model.createNode(2808, 2.5236, 22.4, 1.07687)
model.createNode(2809, 2.62094, 22.4, 0.725198)
model.createNode(2810, 2.69236, 22.4, 0.365473)
model.createNode(2811, 2.10715, 22, 2.03014)
model.createNode(2812, 2.25614, 22, 1.73871)
model.createNode(2813, 2.39876, 22, 1.4159)
model.createNode(2814, 2.52145, 22, 1.0758)
model.createNode(2815, 2.61931, 22, 0.724456)
model.createNode(2816, 2.69152, 22, 0.365102)
model.createNode(2817, 2.10559, 21.6, 2.02895)
model.createNode(2818, 2.25407, 21.6, 1.73738)
model.createNode(2819, 2.39645, 21.6, 1.41462)
model.createNode(2820, 2.51929, 21.6, 1.07474)
model.createNode(2821, 2.61768, 21.6, 0.723714)
model.createNode(2822, 2.69069, 21.6, 0.364732)
model.createNode(2823, 2.10402, 21.2, 2.02775)
model.createNode(2824, 2.25199, 21.2, 1.73605)
model.createNode(2825, 2.39415, 21.2, 1.41334)
model.createNode(2826, 2.51713, 21.2, 1.07368)
model.createNode(2827, 2.61606, 21.2, 0.722972)
model.createNode(2828, 2.68985, 21.2, 0.364361)
model.createNode(2829, 2.10246, 20.8, 2.02655)
model.createNode(2830, 2.24992, 20.8, 1.73472)
model.createNode(2831, 2.39185, 20.8, 1.41206)
model.createNode(2832, 2.51498, 20.8, 1.07261)
model.createNode(2833, 2.61443, 20.8, 0.72223)
model.createNode(2834, 2.68902, 20.8, 0.36399)
model.createNode(2835, 2.10089, 20.4, 2.02536)
model.createNode(2836, 2.24785, 20.4, 1.73339)
model.createNode(2837, 2.38954, 20.4, 1.41079)
model.createNode(2838, 2.51282, 20.4, 1.07155)
model.createNode(2839, 2.6128, 20.4, 0.721488)
model.createNode(2840, 2.68819, 20.4, 0.36362)
model.createNode(2841, 2.09933, 20, 2.02416)
model.createNode(2842, 2.24577, 20, 1.73206)
model.createNode(2843, 2.38724, 20, 1.40951)
model.createNode(2844, 2.51066, 20, 1.07049)
model.createNode(2845, 2.61117, 20, 0.720746)
model.createNode(2846, 2.68735, 20, 0.363249)
model.createNode(2847, 2.09776, 19.6, 2.02296)
model.createNode(2848, 2.2437, 19.6, 1.73073)
model.createNode(2849, 2.38493, 19.6, 1.40823)
model.createNode(2850, 2.50851, 19.6, 1.06943)
model.createNode(2851, 2.60954, 19.6, 0.720004)
model.createNode(2852, 2.68652, 19.6, 0.362878)
model.createNode(2853, 2.0962, 19.2, 2.02176)
model.createNode(2854, 2.24163, 19.2, 1.7294)
model.createNode(2855, 2.38263, 19.2, 1.40696)
model.createNode(2856, 2.50635, 19.2, 1.06836)
model.createNode(2857, 2.60792, 19.2, 0.719262)
model.createNode(2858, 2.68569, 19.2, 0.362507)
model.createNode(2859, 2.09463, 18.8, 2.02057)
model.createNode(2860, 2.23955, 18.8, 1.72807)
model.createNode(2861, 2.38033, 18.8, 1.40568)
model.createNode(2862, 2.50419, 18.8, 1.0673)
model.createNode(2863, 2.60629, 18.8, 0.71852)
model.createNode(2864, 2.68485, 18.8, 0.362137)
model.createNode(2865, 2.09307, 18.4, 2.01937)
model.createNode(2866, 2.23748, 18.4, 1.72674)
model.createNode(2867, 2.37802, 18.4, 1.4044)
model.createNode(2868, 2.50204, 18.4, 1.06624)
model.createNode(2869, 2.60466, 18.4, 0.717778)
model.createNode(2870, 2.68402, 18.4, 0.361766)
model.createNode(2871, 2.0915, 18, 2.01817)
model.createNode(2872, 2.2354, 18, 1.72541)
model.createNode(2873, 2.37572, 18, 1.40312)
model.createNode(2874, 2.49988, 18, 1.06517)
model.createNode(2875, 2.60303, 18, 0.717036)
model.createNode(2876, 2.68319, 18, 0.361395)
model.createNode(2877, 2.08993, 17.6, 2.01698)
model.createNode(2878, 2.23333, 17.6, 1.72408)
model.createNode(2879, 2.37341, 17.6, 1.40185)
model.createNode(2880, 2.49772, 17.6, 1.06411)
model.createNode(2881, 2.6014, 17.6, 0.716294)
model.createNode(2882, 2.68235, 17.6, 0.361025)
model.createNode(2883, 2.08827, 17.2, 2.01597)
model.createNode(2884, 2.23128, 17.2, 1.72312)
model.createNode(2885, 2.37126, 17.2, 1.401)
model.createNode(2886, 2.49581, 17.2, 1.06345)
model.createNode(2887, 2.60005, 17.2, 0.715879)
model.createNode(2888, 2.68175, 17.2, 0.360877)
model.createNode(2889, 2.08699, 16.8, 2.0164)
model.createNode(2890, 2.22995, 16.8, 1.72393)
model.createNode(2891, 2.37015, 16.8, 1.40193)
model.createNode(2892, 2.49511, 16.8, 1.06434)
model.createNode(2893, 2.59981, 16.8, 0.716624)
model.createNode(2894, 2.68172, 16.8, 0.361276)
model.createNode(2895, 2.0874, 16.4, 2.01931)
model.createNode(2896, 2.23057, 16.4, 1.72753)
model.createNode(2897, 2.37119, 16.4, 1.40553)
model.createNode(2898, 2.49648, 16.4, 1.06743)
model.createNode(2899, 2.60107, 16.4, 0.7188)
model.createNode(2900, 2.68239, 16.4, 0.362308)
model.createNode(2901, 0.457143, 32, 0)
model.createNode(2902, 0.457143, 31.6, 0)
model.createNode(2903, 0.457143, 31.2, 0)
model.createNode(2904, 0.457143, 30.8, 0)
model.createNode(2905, 0.457143, 30.4, 0)
model.createNode(2906, 0.457143, 30, 0)
model.createNode(2907, 0.457143, 29.6, 0)
model.createNode(2908, 0.457143, 29.2, 0)
model.createNode(2909, 0.457143, 28.8, 0)
model.createNode(2910, 0.457143, 28.4, 0)
model.createNode(2911, 0.457143, 28, 0)
model.createNode(2912, 0.457143, 27.6, 0)
model.createNode(2913, 0.457143, 27.2, 0)
model.createNode(2914, 0.457143, 26.8, 0)
model.createNode(2915, 0.457143, 26.4, 0)
model.createNode(2916, 0.457143, 26, 0)
model.createNode(2917, 0.457143, 25.6, 0)
model.createNode(2918, 0.457143, 25.2, 0)
model.createNode(2919, 0.457143, 24.8, 0)
model.createNode(2920, 0.457143, 24.4, 0)
model.createNode(2921, 0.457143, 24, 0)
model.createNode(2922, 0.457143, 23.6, 0)
model.createNode(2923, 0.457143, 23.2, 0)
model.createNode(2924, 0.457143, 22.8, 0)
model.createNode(2925, 0.457143, 22.4, 0)
model.createNode(2926, 0.457143, 22, 0)
model.createNode(2927, 0.457143, 21.6, 0)
model.createNode(2928, 0.457143, 21.2, 0)
model.createNode(2929, 0.457143, 20.8, 0)
model.createNode(2930, 0.457143, 20.4, 0)
model.createNode(2931, 0.457143, 20, 0)
model.createNode(2932, 0.457143, 19.6, 0)
model.createNode(2933, 0.457143, 19.2, 0)
model.createNode(2934, 0.457143, 18.8, 0)
model.createNode(2935, 0.457143, 18.4, 0)
model.createNode(2936, 0.457143, 18, 0)
model.createNode(2937, 0.457143, 17.6, 0)
model.createNode(2938, 0.457143, 17.2, 0)
model.createNode(2939, 0.457143, 16.8, 0)
model.createNode(2940, 0.457143, 16.4, 0)
model.createNode(2941, 0.914286, 32, 0)
model.createNode(2942, 0.914286, 31.6, 0)
model.createNode(2943, 0.914286, 31.2, 0)
model.createNode(2944, 0.914286, 30.8, 0)
model.createNode(2945, 0.914286, 30.4, 0)
model.createNode(2946, 0.914286, 30, 0)
model.createNode(2947, 0.914286, 29.6, 0)
model.createNode(2948, 0.914286, 29.2, 0)
model.createNode(2949, 0.914286, 28.8, 0)
model.createNode(2950, 0.914286, 28.4, 0)
model.createNode(2951, 0.914286, 28, 0)
model.createNode(2952, 0.914286, 27.6, 0)
model.createNode(2953, 0.914286, 27.2, 0)
model.createNode(2954, 0.914286, 26.8, 0)
model.createNode(2955, 0.914286, 26.4, 0)
model.createNode(2956, 0.914286, 26, 0)
model.createNode(2957, 0.914286, 25.6, 0)
model.createNode(2958, 0.914286, 25.2, 0)
model.createNode(2959, 0.914286, 24.8, 0)
model.createNode(2960, 0.914286, 24.4, 0)
model.createNode(2961, 0.914286, 24, 0)
model.createNode(2962, 0.914286, 23.6, 0)
model.createNode(2963, 0.914286, 23.2, 0)
model.createNode(2964, 0.914286, 22.8, 0)
model.createNode(2965, 0.914286, 22.4, 0)
model.createNode(2966, | |
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import mock
import netaddr
import six
import yaml
from oslo_serialization import jsonutils
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy import models
from nailgun.network.manager import NetworkManager
from nailgun import objects
from nailgun.orchestrator import stages
from nailgun.test import base
from nailgun.utils import reverse
from nailgun.orchestrator.deployment_graph import AstuteGraph
from nailgun.orchestrator.deployment_serializers import \
get_serializer_for_cluster
from nailgun.orchestrator.neutron_serializers import \
NeutronNetworkDeploymentSerializer70
from nailgun.orchestrator.neutron_serializers import \
NeutronNetworkTemplateSerializer70
from nailgun.test.integration.test_orchestrator_serializer import \
BaseDeploymentSerializer
from nailgun.test.integration.test_orchestrator_serializer import \
TestDeploymentHASerializer61
from nailgun.test.integration.test_orchestrator_serializer import \
TestNovaOrchestratorSerializer
from nailgun.test.integration.test_orchestrator_serializer import \
TestSerializeInterfaceDriversData
class PrepareDataMixin(object):
def patch_net_roles_for_release(self):
rel_id = self.env.create_release(version=self.env_version).id
rel_db = self.db.query(models.Release).filter_by(id=rel_id).one()
to_patch = yaml.safe_load("""
-
id: "keystone/api"
default_mapping: "management"
properties: &default_network_roles_metadata_properties
subnet: true
gateway: false
vip: []
-
id: "admin/pxe"
default_mapping: "fuelweb_admin"
properties:
subnet: true
gateway: true
vip: []
-
id: "swift/api"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "neutron/api"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "sahara/api"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "ceilometer/api"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "cinder/api"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "glance/api"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "heat/api"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "nova/api"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "murano/api"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "horizon"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "mgmt/memcache"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "mgmt/database"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "mgmt/messaging"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "mgmt/corosync"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "mgmt/vip"
default_mapping: "management"
properties:
subnet: true
gateway: false
vip:
-
name: "vrouter"
namespace: "vrouter"
alias: "management_vrouter_vip"
-
name: "management"
namespace: "haproxy"
alias: "management_vip"
-
id: "public/vip"
default_mapping: "public"
properties:
subnet: true
gateway: true
vip:
-
name: "vrouter_pub"
namespace: "vrouter"
alias: "public_vrouter_vip"
-
name: "public"
namespace: "haproxy"
alias: "public_vip"
-
id: "neutron/private"
default_mapping: "private"
properties:
subnet: false
gateway: false
vip: []
-
id: "neutron/mesh"
default_mapping: "private"
properties: *default_network_roles_metadata_properties
-
id: "neutron/floating"
default_mapping: "public"
properties:
subnet: false
gateway: false
vip: []
-
id: "swift/replication"
default_mapping: "storage"
properties: *default_network_roles_metadata_properties
-
id: "ceph/public"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "ceph/radosgw"
default_mapping: "public"
properties: *default_network_roles_metadata_properties
-
id: "ceph/replication"
default_mapping: "storage"
properties: *default_network_roles_metadata_properties
-
id: "cinder/iscsi"
default_mapping: "storage"
properties: *default_network_roles_metadata_properties
-
id: "mongo/db"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "fw-admin"
default_mapping: "fuelweb_admin"
properties:
subnet: true
gateway: true
vip: []
-
id: "management"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
-
id: "ex"
default_mapping: "public"
properties:
subnet: true
gateway: true
vip: []
-
id: "storage"
default_mapping: "storage"
properties: *default_network_roles_metadata_properties
-
id: "nova/migration"
default_mapping: "management"
properties: *default_network_roles_metadata_properties
""")
rel_db.network_roles_metadata = to_patch
self.db.flush()
return rel_db
class BaseTestDeploymentAttributesSerialization70(BaseDeploymentSerializer,
PrepareDataMixin):
management = ['keystone/api', 'neutron/api', 'swift/api', 'sahara/api',
'ceilometer/api', 'cinder/api', 'glance/api', 'heat/api',
'nova/api', 'murano/api', 'horizon', 'management',
'mgmt/database', 'mgmt/messaging', 'mgmt/corosync',
'mgmt/memcache', 'mgmt/vip', 'mongo/db',
'ceph/public', 'nova/migration']
fuelweb_admin = ['admin/pxe', 'fw-admin']
neutron = ['neutron/private', 'neutron/floating']
storage = ['storage', 'ceph/replication', 'swift/replication',
'cinder/iscsi']
public = ['ex', 'public/vip', 'ceph/radosgw']
private = ['neutron/mesh']
networks = ['fuelweb_admin', 'storage', 'management', 'public', 'private']
# Must be set in subclasses
segmentation_type = None
env_version = '2015.1.0-7.0'
def setUp(self):
super(BaseTestDeploymentAttributesSerialization70, self).setUp()
self.cluster = self.create_env(consts.CLUSTER_MODES.ha_compact)
objects.Cluster.prepare_for_deployment(self.env.clusters[-1])
self.cluster_db = self.db.query(models.Cluster).get(self.cluster['id'])
serializer_type = get_serializer_for_cluster(self.cluster_db)
self.serializer = serializer_type(AstuteGraph(self.cluster_db))
self.serialized_for_astute = self.serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
self.vm_data = self.env.read_fixtures(['vmware_attributes'])
def create_env(self, mode):
release = self.patch_net_roles_for_release()
return self.env.create(
cluster_kwargs={
'release_id': release.id,
'mode': mode,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': self.segmentation_type},
nodes_kwargs=[
{'roles': ['controller'],
'pending_addition': True},
{'roles': ['compute'],
'pending_addition': True}])
def check_vips_serialized(self, node_data):
vips_names = ['vrouter', 'management', 'vrouter_pub', 'public']
# check that vip-related info is not in root
self.assertTrue(all(vip_name not in node_data
for vip_name in vips_names))
vips_data = node_data['network_metadata']['vips']
self.assertItemsEqual(vips_data,
vips_names)
for vip in vips_names:
self.assertItemsEqual(
vips_data[vip],
['network_role', 'namespace', 'ipaddr', 'node_roles']
)
class TestDeploymentAttributesSerialization70(
BaseTestDeploymentAttributesSerialization70
):
segmentation_type = consts.NEUTRON_SEGMENT_TYPES.vlan
custom_network = {
'name': 'custom',
'role': 'plugin/custom',
'cidr': '192.168.3.0/24',
'vlan_start': 50,
'bridge': 'br-custom',
}
plugin_network_roles = yaml.safe_load("""
- id: "{role}"
default_mapping: "{name}"
properties:
subnet: true
gateway: false
vip:
- name: "{name}"
namespace: "haproxy"
""".format(**custom_network))
def test_non_default_bridge_mapping(self):
expected_mapping = {
u'test': u'br-test',
u'testnetwork1': u'br-testnetwork',
u'testnetwork13': u'br-testnetwork',
u'my-super-network': u'br-my-super-net',
u'uplink-network-east': u'br-uplink-netw',
u'uplink-network-west': u'br-uplink-netw',
u'uplink-network-south': u'br-uplink-netw',
u'12345uplink-network-south': u'br-12345uplink',
u'fw-admin': u'br-fw-admi'
}
cluster = self.env.create(
cluster_kwargs={
'release_id': self.env.releases[0].id,
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': self.segmentation_type})
self.cluster_db = objects.Cluster.get_by_uid(cluster['id'])
for name in expected_mapping:
self.env._create_network_group(cluster=self.cluster_db,
name=name)
self.env.create_node(
api=True,
cluster_id=cluster['id'],
pending_roles=['controller'],
pending_addition=True)
net_serializer = self.serializer.get_net_provider_serializer(
self.cluster_db)
objects.Cluster.prepare_for_deployment(self.cluster_db)
mapping = net_serializer.get_node_non_default_bridge_mapping(
self.cluster_db.nodes[0])
# since we have a suffix generation for bridges, they may have
# different suffix based on PYTHONHASHSEED. hence, we can't
# come up with a normal dictionary comparison. so let's
# compare that all bridges are unique, and they are unique
# for networks which may have bridge collision.
br_collision = collections.defaultdict(list)
self.assertEqual(len(mapping), len(expected_mapping))
self.assertEqual(len(expected_mapping), len(set(mapping.values())))
for netname in expected_mapping:
# check that bridge name has been generated from the network
self.assertTrue(
mapping[netname].startswith(expected_mapping[netname]))
br_collision[expected_mapping[netname]].append(netname)
# check that there's no collision between networks
for bridge, netnames in six.iteritems(br_collision):
bridges = set((mapping[netname] for netname in netnames))
self.assertEqual(len(bridges), len(netnames))
def test_network_scheme_custom_networks(self):
cluster = self.env.create(
cluster_kwargs={
'release_id': self.env.releases[0].id,
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': self.segmentation_type})
self.cluster_db = objects.Cluster.get_by_uid(cluster['id'])
self.env._create_network_group(cluster=self.cluster_db,
name=self.custom_network['name'],
cidr=self.custom_network['cidr'],
vlan_start=self.custom_network[
'vlan_start'
])
self.env._add_plugin_network_roles(self.cluster_db,
self.plugin_network_roles)
self.env.create_node(
api=True,
cluster_id=cluster['id'],
pending_roles=['controller'],
pending_addition=True)
objects.Cluster.prepare_for_deployment(self.cluster_db)
serializer_type = get_serializer_for_cluster(self.cluster_db)
serializer = serializer_type(AstuteGraph(self.cluster_db))
serialized_for_astute = serializer.serialize(
self.cluster_db, self.cluster_db.nodes)
for node in serialized_for_astute:
vips = node['network_metadata']['vips']
roles = node['network_scheme']['roles']
transformations = node['network_scheme']['transformations']
node_network_roles = (node['network_metadata']['nodes']
['node-' + node['uid']]['network_roles'])
custom_ip = node_network_roles.get(self.custom_network['role'],
'0.0.0.0')
custom_brs = filter(lambda t: t.get('name') ==
self.custom_network['bridge'],
transformations)
custom_ports = filter(lambda t: t.get('name') ==
("eth0.%s" %
self.custom_network['vlan_start']),
transformations)
self.assertEqual(roles.get(self.custom_network['role']),
self.custom_network['bridge'])
self.assertEqual(vips.get(self.custom_network['name'],
{}).get('network_role'),
self.custom_network['role'])
self.assertTrue(netaddr.IPAddress(custom_ip) in
netaddr.IPNetwork(self.custom_network['cidr']))
self.assertEqual(len(custom_brs), 1)
self.assertEqual(len(custom_ports), 1)
self.assertEqual(custom_ports[0]['bridge'],
self.custom_network['bridge'])
def test_network_scheme(self):
for node in self.serialized_for_astute:
roles = node['network_scheme']['roles']
node = objects.Node.get_by_uid(node['uid'])
expected_roles = zip(
self.management, ['br-mgmt'] * len(self.management))
expected_roles += zip(
self.fuelweb_admin, ['br-fw-admin'] * len(self.fuelweb_admin))
expected_roles += zip(
self.storage, ['br-storage'] * len(self.storage))
if objects.Node.should_have_public(node):
expected_roles += zip(
self.public, ['br-ex'] * len(self.public))
expected_roles += [('neutron/floating', 'br-floating')]
if node.cluster.network_config.segmentation_type == \
consts.NEUTRON_SEGMENT_TYPES.vlan:
expected_roles += [('neutron/private', 'br-prv')]
if node.cluster.network_config.segmentation_type in \
(consts.NEUTRON_SEGMENT_TYPES.gre,
consts.NEUTRON_SEGMENT_TYPES.tun):
expected_roles += [('neutron/mesh', 'br-mesh')]
self.assertEqual(roles, dict(expected_roles))
def test_offloading_modes_serialize(self):
meta = self.env.default_metadata()
changed_offloading_modes = {}
for interface in meta['interfaces']:
changed_offloading_modes[interface['name']] = \
NetworkManager._get_modified_offloading_modes(
interface.get('offloading_modes'))
for node in self.serialized_for_astute:
interfaces = node['network_scheme']['interfaces']
for iface_name in interfaces:
ethtool_blk = interfaces[iface_name].get('ethtool', None)
self.assertIsNotNone(
ethtool_blk,
"There is no 'ethtool' block in deployment data")
offload_blk = ethtool_blk.get('offload', None)
self.assertIsNotNone(
offload_blk,
"There is no 'offload' block in deployment data")
self.assertDictEqual(offload_blk,
changed_offloading_modes[iface_name])
def test_network_metadata(self):
neutron_serializer = self.serializer.get_net_provider_serializer(
self.cluster_db)
for node_data in self.serialized_for_astute:
self.assertItemsEqual(
node_data['network_metadata'], ['nodes', 'vips'])
for k, v in six.iteritems(node_data['network_metadata']['nodes']):
self.assertItemsEqual(
v,
['uid', 'fqdn', 'name', 'user_node_name',
'swift_zone', 'node_roles', 'network_roles']
)
node = objects.Node.get_by_uid(v['uid'])
ip_by_net = neutron_serializer.get_network_to_ip_mapping(node)
self.assertEqual(objects.Node.get_slave_name(node), k)
self.assertEqual(v['uid'], node.uid)
self.assertEqual(v['fqdn'], objects.Node.get_node_fqdn(node))
self.assertEqual(v['name'], k)
self.assertEqual(v['user_node_name'], node.name)
self.assertEqual(v['swift_zone'], node.uid)
network_roles = zip(self.management,
[ip_by_net['management']] * len(
self.management))
network_roles += zip(self.fuelweb_admin,
[ip_by_net['fuelweb_admin']] * len(
self.fuelweb_admin))
network_roles += zip(
self.storage, [ip_by_net['storage']] * len(self.storage))
network_roles += zip(self.neutron, [None] * len(self.neutron))
if objects.Node.should_have_public(node):
network_roles += zip(
self.public, [ip_by_net['public']] * len(self.public))
if node.cluster.network_config.segmentation_type in \
(consts.NEUTRON_SEGMENT_TYPES.gre,
consts.NEUTRON_SEGMENT_TYPES.tun):
network_roles += zip(
self.private,
[ip_by_net['private']] * len(self.private))
self.assertEqual(v['network_roles'], dict(network_roles))
self.check_vips_serialized(node_data)
def test_generate_vmware_attributes_data(self):
self.check_generate_vmware_attributes_data()
result = self.serializer.serialize_node(
self.env.nodes[0], 'compute-vmware')
self.assertEqual(
result['vcenter']['computes'][0]['target_node'],
"test_target_node")
self.assertEqual(
result['vcenter']['computes'][2]['target_node'],
"controllers")
class TestDeploymentAttributesSerializationSegmentationGre70(
TestDeploymentAttributesSerialization70
):
segmentation_type = consts.NEUTRON_SEGMENT_TYPES.gre
class TestDeploymentAttributesSerializationSegmentationTun70(
TestDeploymentAttributesSerialization70
):
segmentation_type = consts.NEUTRON_SEGMENT_TYPES.tun
class TestDeploymentSerializationForNovaNetwork70(
BaseTestDeploymentAttributesSerialization70
):
def create_env(self, mode):
release = self.patch_net_roles_for_release()
return self.env.create(
cluster_kwargs={
'release_id': release.id,
'mode': mode,
'net_provider': consts.CLUSTER_NET_PROVIDERS.nova_network},
nodes_kwargs=[
{'roles': ['controller'],
'pending_addition': True,
'name': self.node_name,
}
])
def test_network_scheme(self):
for node in self.serialized_for_astute:
roles = node['network_scheme']['roles']
expected_roles = {
'admin/pxe': 'br-fw-admin',
'keystone/api': 'br-mgmt',
'swift/api': 'br-mgmt',
'sahara/api': 'br-mgmt',
'ceilometer/api': 'br-mgmt',
'cinder/api': 'br-mgmt',
'glance/api': 'br-mgmt',
'heat/api': 'br-mgmt',
'nova/api': 'br-mgmt',
'murano/api': 'br-mgmt',
'horizon': 'br-mgmt',
'mgmt/database': 'br-mgmt',
'mgmt/messaging': 'br-mgmt',
'mgmt/corosync': 'br-mgmt',
'mgmt/memcache': 'br-mgmt',
'mgmt/vip': 'br-mgmt',
'public/vip': 'br-ex',
'swift/replication': 'br-storage',
'ceph/public': 'br-mgmt',
'ceph/radosgw': 'br-ex',
'ceph/replication': 'br-storage',
'cinder/iscsi': 'br-storage',
'mongo/db': 'br-mgmt',
'novanetwork/fixed': 'eth0.103',
# deprecated
'fw-admin': 'br-fw-admin',
'management': 'br-mgmt',
'ex': 'br-ex',
'storage': 'br-storage',
}
self.assertEqual(roles, expected_roles)
def test_network_metadata(self):
nm = objects.Cluster.get_network_manager(self.env.clusters[0])
ip_by_net = {
'fuelweb_admin': None,
'storage': None,
'management': None,
'public': None
}
node = self.env.nodes[0]
networks = nm.get_node_networks(node)
| |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import re
import time
from collections import namedtuple
logger = logging.getLogger()
# create a named tuple to return both the concrete URI and SSL flag
AlertUri = namedtuple('AlertUri', 'uri is_ssl_enabled')
class BaseAlert(object):
RESULT_OK = "OK"
RESULT_WARNING = "WARNING"
RESULT_CRITICAL = "CRITICAL"
RESULT_UNKNOWN = "UNKNOWN"
RESULT_SKIPPED = "SKIPPED"
HA_NAMESERVICE_PARAM = "{{ha-nameservice}}"
HA_ALIAS_PARAM = "{{alias}}"
def __init__(self, alert_meta, alert_source_meta):
self.alert_meta = alert_meta
self.alert_source_meta = alert_source_meta
self.cluster_name = ''
self.host_name = ''
def interval(self):
""" gets the defined interval this check should run """
if not self.alert_meta.has_key('interval'):
return 1
else:
interval = self.alert_meta['interval']
return 1 if interval < 1 else interval
def is_enabled(self):
"""
gets whether the definition is enabled
"""
return self.alert_meta['enabled']
def get_name(self):
"""
gets the unique name of the alert definition
"""
return self.alert_meta['name']
def get_uuid(self):
"""
gets the unique has of the alert definition
"""
return self.alert_meta['uuid']
def set_helpers(self, collector, cluster_configuration):
"""
sets helper objects for alerts without having to use them in a constructor
"""
self.collector = collector
self.cluster_configuration = cluster_configuration
def set_cluster(self, cluster_name, host_name):
""" sets cluster information for the alert """
self.cluster_name = cluster_name
self.host_name = host_name
def _get_alert_meta_value_safely(self, meta_key):
"""
safe way to get a value when outputting result json. will not throw an exception
"""
if self.alert_meta.has_key(meta_key):
return self.alert_meta[meta_key]
else:
return None
def collect(self):
""" method used for collection. defers to _collect() """
res = (BaseAlert.RESULT_UNKNOWN, [])
res_base_text = None
try:
res = self._collect()
result_state = res[0]
reporting_state = result_state.lower()
# if the alert reports that it should be SKIPPED, then skip it
# this is useful for cases where the alert might run on multiple hosts
# but only 1 host should report the data
if result_state == BaseAlert.RESULT_SKIPPED:
logger.debug('[Alert][{0}] Skipping UUID {1}.'.format(self.get_name(),
self.get_uuid()))
return
# it's possible that the alert definition doesn't have reporting; safely
# check for it and fallback to default text if it doesn't exist
if ('reporting' in self.alert_source_meta) and \
(reporting_state in self.alert_source_meta['reporting']) and \
('text' in self.alert_source_meta['reporting'][reporting_state]):
res_base_text = self.alert_source_meta['reporting'][reporting_state]['text']
if res_base_text is None:
res_base_text = self._get_reporting_text(result_state)
except Exception as exception:
message = "[Alert][{0}] Unable to execute alert. {1}".format(
self.get_name(), str(exception))
# print the exception if in DEBUG, otherwise just log the warning
if logger.isEnabledFor(logging.DEBUG):
logger.exception(message)
else:
logger.warning(message)
res = (BaseAlert.RESULT_UNKNOWN, [str(exception)])
res_base_text = "{0}"
if logger.isEnabledFor(logging.DEBUG):
logger.debug("[Alert][{0}] result = {1}".format(self.get_name(), str(res)))
data = {}
data['name'] = self._get_alert_meta_value_safely('name')
data['label'] = self._get_alert_meta_value_safely('label')
data['state'] = res[0]
data['text'] = res_base_text.format(*res[1])
data['cluster'] = self.cluster_name
data['service'] = self._get_alert_meta_value_safely('serviceName')
data['component'] = self._get_alert_meta_value_safely('componentName')
data['timestamp'] = long(time.time() * 1000)
data['uuid'] = self._get_alert_meta_value_safely('uuid')
data['enabled'] = self._get_alert_meta_value_safely('enabled')
if logger.isEnabledFor(logging.DEBUG):
logger.debug("[Alert][{0}] text = {1}".format(self.get_name(), data['text']))
self.collector.put(self.cluster_name, data)
def _get_configuration_value(self, key):
"""
Gets the value of the specified configuration key from the cache. The key
should be of the form {{foo-bar/baz}}. If the key is not a lookup key
and is instead a constant, such as "foo" or "5", then the constant is
returned.
:return:
"""
if key is None:
return None
# parse {{foo-bar/baz}}
placeholder_keys = re.findall("{{([\S]+)}}", key)
# if none found, then return the original
if len(placeholder_keys) == 0:
return key
# this is a lookup key, so transform it into a value from the config cache
placeholder_key = placeholder_keys[0]
return self.cluster_configuration.get_configuration_value(
self.cluster_name, placeholder_key)
def _lookup_uri_property_keys(self, uri_structure):
"""
Loads the configuration lookup keys that the URI structure needs. This
will return a named tuple that contains the keys needed to lookup
parameterized URI values from the cached configuration.
The URI structure looks something like:
"uri":{
"http": foo,
"https": bar,
...
}
"""
if uri_structure is None:
return None
http_key = None
https_key = None
https_property_key = None
https_property_value_key = None
default_port = None
kerberos_keytab = None
kerberos_principal = None
ha_nameservice = None
ha_alias_key = None
ha_http_pattern = None
ha_https_pattern = None
if 'http' in uri_structure:
http_key = uri_structure['http']
if 'https' in uri_structure:
https_key = uri_structure['https']
if 'https_property' in uri_structure:
https_property_key = uri_structure['https_property']
if 'https_property_value' in uri_structure:
https_property_value_key = uri_structure['https_property_value']
if 'default_port' in uri_structure:
default_port = uri_structure['default_port']
if 'kerberos_keytab' in uri_structure:
kerberos_keytab = uri_structure['kerberos_keytab']
if 'kerberos_principal' in uri_structure:
kerberos_principal = uri_structure['kerberos_principal']
if 'high_availability' in uri_structure:
ha = uri_structure['high_availability']
if 'nameservice' in ha:
ha_nameservice = ha['nameservice']
if 'alias_key' in ha:
ha_alias_key = ha['alias_key']
if 'http_pattern' in ha:
ha_http_pattern = ha['http_pattern']
if 'https_pattern' in ha:
ha_https_pattern = ha['https_pattern']
AlertUriLookupKeys = namedtuple('AlertUriLookupKeys',
'http https https_property https_property_value default_port '
'kerberos_keytab kerberos_principal '
'ha_nameservice ha_alias_key ha_http_pattern ha_https_pattern')
alert_uri_lookup_keys = AlertUriLookupKeys(http=http_key, https=https_key,
https_property=https_property_key,
https_property_value=https_property_value_key, default_port=default_port,
kerberos_keytab=kerberos_keytab, kerberos_principal=kerberos_principal,
ha_nameservice=ha_nameservice, ha_alias_key=ha_alias_key,
ha_http_pattern=ha_http_pattern, ha_https_pattern=ha_https_pattern
)
return alert_uri_lookup_keys
def _get_uri_from_structure(self, alert_uri_lookup_keys):
"""
Gets the URI to use by examining the URI structure from the definition.
This will return a named tuple that has the uri and the SSL flag. The
URI structure looks something like:
"uri":{
"http": foo,
"https": bar,
...
}
"""
if alert_uri_lookup_keys is None:
return None
http_uri = None
https_uri = None
# first thing is first; if there are HA keys then try to dynamically build
# the property which is used to get the actual value of the uri
# (ie dfs.namenode.http-address.c1ha.nn2)
if alert_uri_lookup_keys.ha_nameservice is not None or alert_uri_lookup_keys.ha_alias_key is not None:
alert_uri = self._get_uri_from_ha_structure(alert_uri_lookup_keys)
if alert_uri is not None:
return alert_uri
# attempt to parse and parameterize the various URIs; properties that
# do not exist int he lookup map are returned as None
if alert_uri_lookup_keys.http is not None:
http_uri = self._get_configuration_value(alert_uri_lookup_keys.http)
if alert_uri_lookup_keys.https is not None:
https_uri = self._get_configuration_value(alert_uri_lookup_keys.https)
# without a URI, there's no way to create the structure we need - return
# the default port if specified, otherwise throw an exception
if http_uri is None and https_uri is None:
if alert_uri_lookup_keys.default_port is not None:
alert_uri = AlertUri(uri=alert_uri_lookup_keys.default_port, is_ssl_enabled=False)
return alert_uri
else:
raise Exception("Could not determine result. Either the http or https URI must be specified.")
# start out assuming plaintext
uri = http_uri
is_ssl_enabled = False
if https_uri is not None:
# https without http implies SSL, otherwise look it up based on the properties
if http_uri is None:
is_ssl_enabled = True
uri = https_uri
elif self._check_uri_ssl_property(alert_uri_lookup_keys):
is_ssl_enabled = True
uri = https_uri
alert_uri = AlertUri(uri=uri, is_ssl_enabled=is_ssl_enabled)
return alert_uri
def _get_uri_from_ha_structure(self, alert_uri_lookup_keys):
"""
Attempts to parse the HA URI structure in order to build a dynamic key
that represents the correct host URI to check.
:param alert_uri_lookup_keys:
:return: the AlertUri named tuple if there is a valid HA URL, otherwise None
"""
if alert_uri_lookup_keys is None:
return None
logger.debug("[Alert][{0}] HA URI structure detected in definition, attempting to lookup dynamic HA properties".format(self.get_name()))
ha_nameservice = self._get_configuration_value(alert_uri_lookup_keys.ha_nameservice)
ha_alias_key = alert_uri_lookup_keys.ha_alias_key
ha_http_pattern = alert_uri_lookup_keys.ha_http_pattern
ha_https_pattern = alert_uri_lookup_keys.ha_https_pattern
# at least one of these keys is needed
if ha_nameservice is None and ha_alias_key is None:
return None
# convert dfs.ha.namenodes.{{ha-nameservice}} into
# dfs.ha.namenodes.c1ha
if ha_nameservice is not None:
ha_alias_key = ha_alias_key.replace(self.HA_NAMESERVICE_PARAM, ha_nameservice)
# grab the alias value which should be like nn1, nn2
ha_nameservice_alias = self._get_configuration_value(ha_alias_key)
if ha_nameservice_alias is None:
logger.warning("[Alert][{0}] HA nameservice value is present but there are no aliases for {1}".format(
self.get_name(), ha_alias_key))
return None
# determine which pattern to use (http or https)
ha_pattern = ha_http_pattern
is_ssl_enabled = self._check_uri_ssl_property(alert_uri_lookup_keys)
if is_ssl_enabled:
ha_pattern = ha_https_pattern
# no pattern found
if ha_pattern is None:
logger.warning("[Alert][{0}] | |
<gh_stars>1-10
import math
from functools import partial
from typing import Callable, Dict, List
import torch
from detectron2.config import configurable
from detectron2.data import MetadataCatalog
from detectron2.layers import ShapeSpec
from detectron2.modeling import (
META_ARCH_REGISTRY,
SEM_SEG_HEADS_REGISTRY,
Backbone,
build_backbone,
build_sem_seg_head,
)
from detectron2.modeling.postprocessing import sem_seg_postprocess
from detectron2.structures import ImageList, Instances
from detectron2.utils.events import get_event_storage
from detectron2.utils.registry import Registry
from mgnet.geometry import inv2depth
from mgnet.postprocessing import (
get_depth_prediction,
get_instance_predictions,
get_panoptic_prediction,
)
from torch import nn
from torch.cuda.amp import custom_fwd
from torch.nn import functional as F
from .layers import GlobalContextModule, MGNetDecoder, MGNetHead, PoseCNN
from .loss import DeepLabCE, MultiViewPhotometricLoss, OhemCE
__all__ = [
"MGNet",
"INS_EMBED_HEADS_REGISTRY",
"build_ins_embed_head",
"DEPTH_HEADS_REGISTRY",
"build_depth_head",
"ExportableMGNet",
]
INS_EMBED_HEADS_REGISTRY = Registry("INS_EMBED_BRANCHES")
INS_EMBED_HEADS_REGISTRY.__doc__ = """
Registry for instance embedding heads, which make instance embedding predictions from feature maps.
"""
DEPTH_HEADS_REGISTRY = Registry("DEPTH_BRANCHES")
DEPTH_HEADS_REGISTRY.__doc__ = """
Registry for depth heads, which make depth predictions from feature maps.
"""
@META_ARCH_REGISTRY.register()
class MGNet(nn.Module):
"""
MGNet model described in the paper
https://openaccess.thecvf.com/content/ICCV2021/papers/Schon_MGNet_Monocular_Geometric_Scene_Understanding_for_Autonomous_Driving_ICCV_2021_paper.pdf # noqa
"""
@configurable
def __init__(
self,
*,
size_divisibility: int,
pixel_mean: List[float],
pixel_std: List[float],
backbone: Backbone,
global_context: nn.Module,
sem_seg_head: nn.Module,
ins_embed_head: nn.Module,
depth_head: nn.Module,
pose_net: nn.Module,
with_panoptic: bool,
with_depth: bool,
with_uncertainty: bool,
msc_flip_eval: bool,
predict_instances: bool,
instance_post_proc_func: Callable,
panoptic_post_proc_func: Callable,
depth_post_proc_func: Callable,
panoptic_post_proc_threshold: int,
panoptic_post_proc_nms_kernel: int,
):
super().__init__()
self.size_divisibility = size_divisibility
self.register_buffer(
"pixel_mean", torch.tensor([x / 255.0 for x in pixel_mean]).view(-1, 1, 1), False
)
self.register_buffer(
"pixel_std", torch.tensor([x / 255.0 for x in pixel_std]).view(-1, 1, 1), False
)
self.backbone = backbone
self.bb_features = [k for k, v in self.backbone.output_shape().items()]
self.global_context = global_context
self.sem_seg_head = sem_seg_head
self.ins_embed_head = ins_embed_head
self.depth_head = depth_head
self.pose_net = pose_net
self.with_panoptic = with_panoptic
self.with_depth = with_depth
self.with_uncertainty = with_uncertainty
if self.with_uncertainty:
self.register_parameter(
"log_vars", torch.nn.Parameter(torch.zeros(5), requires_grad=True)
)
self.msc_flip_eval = msc_flip_eval
self.predict_instances = predict_instances
self.instance_post_proc_func = instance_post_proc_func
self.panoptic_post_proc_func = panoptic_post_proc_func
self.depth_post_proc_func = depth_post_proc_func
# Used for ExportableMGNet model.
self.panoptic_post_proc_threshold = panoptic_post_proc_threshold
self.panoptic_post_proc_nms_kernel = panoptic_post_proc_nms_kernel
@classmethod
def from_config(cls, cfg):
pixel_mean = cfg.MODEL.PIXEL_MEAN
pixel_std = cfg.MODEL.PIXEL_STD
backbone = build_backbone(cfg)
global_context = GlobalContextModule(
in_channels=[x[1].channels for x in backbone.output_shape().items()][-1],
out_channels=cfg.MODEL.GCM.GCM_CHANNELS,
init_method=cfg.MODEL.GCM.INIT_METHOD,
)
with_panoptic = cfg.WITH_PANOPTIC
with_depth = cfg.WITH_DEPTH
with_uncertainty = cfg.WITH_UNCERTAINTY
sem_seg_head, ins_embed_head, depth_head, pose_net = None, None, None, None
if with_panoptic:
sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape())
ins_embed_head = build_ins_embed_head(cfg, backbone.output_shape())
if with_depth:
depth_head = build_depth_head(cfg, backbone.output_shape())
pose_net = PoseCNN(cfg)
msc_flip_eval = cfg.TEST.MSC_FLIP_EVAL
predict_instances = cfg.TEST.EVAL_INSTANCE
meta = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
instance_post_proc_func = None
if predict_instances:
instance_post_proc_func = partial(
get_instance_predictions,
thing_ids=list(meta.thing_dataset_id_to_contiguous_id.values()),
label_divisor=meta.label_divisor,
)
panoptic_post_proc_func = None
if with_panoptic:
# We cannot use partial here due to the torch.cuda.amp.custom_fwd used
def panoptic_post_proc_func(sem_seg, center_heatmap, offsets):
return get_panoptic_prediction(
sem_seg,
center_heatmap,
offsets,
num_thing_classes=len(meta.thing_dataset_id_to_contiguous_id.values()),
last_stuff_id=max(meta.stuff_dataset_id_to_contiguous_id.values()),
label_divisor=meta.label_divisor,
stuff_area=cfg.MODEL.POST_PROCESSING.STUFF_AREA,
void_label=-1,
threshold=cfg.MODEL.POST_PROCESSING.CENTER_THRESHOLD,
nms_kernel=cfg.MODEL.POST_PROCESSING.NMS_KERNEL,
)
depth_post_proc_func = None
if with_depth:
road_class_id = next(
(
item["trainId"] * meta.label_divisor
for item in meta.categories
if item["name"] == "road"
),
None,
)
depth_ignore_ids = []
if with_depth:
for cat in meta.categories:
if cat["name"] in cfg.INPUT.IGNORED_CATEGORIES_IN_DEPTH:
depth_ignore_ids.append(cat["trainId"] * meta.label_divisor)
depth_post_proc_func = partial(
get_depth_prediction,
use_dgc_scaling=cfg.MODEL.POST_PROCESSING.USE_DGC_SCALING,
road_class_id=road_class_id,
depth_filter_class_ids=depth_ignore_ids,
)
return {
"size_divisibility": cfg.MODEL.SIZE_DIVISIBILITY,
"pixel_mean": pixel_mean,
"pixel_std": pixel_std,
"backbone": backbone,
"global_context": global_context,
"sem_seg_head": sem_seg_head,
"ins_embed_head": ins_embed_head,
"depth_head": depth_head,
"pose_net": pose_net,
"with_panoptic": with_panoptic,
"with_depth": with_depth,
"with_uncertainty": with_uncertainty,
"msc_flip_eval": msc_flip_eval,
"predict_instances": predict_instances,
"instance_post_proc_func": instance_post_proc_func,
"panoptic_post_proc_func": panoptic_post_proc_func,
"depth_post_proc_func": depth_post_proc_func,
"panoptic_post_proc_threshold": cfg.MODEL.POST_PROCESSING.CENTER_THRESHOLD,
"panoptic_post_proc_nms_kernel": cfg.MODEL.POST_PROCESSING.NMS_KERNEL,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper`.
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* "image": Tensor, image in [C, H, W] format
* "image_prev": Tensor, previous image in video sequence in [C, H, W] format
* "image_next": Tensor, next image in video sequence in [C, H, W] format
* "*_orig": Unjittered image, image_prev and image_next for photometric loss calc
* "sem_seg": semantic segmentation ground truth
* "center": center points heatmap ground truth
* "offset": pixel offsets to center points ground truth
* "*_weights": pixel-wise loss weight maps for sem_seg, center and offset
* "camera_matrix": The [3, 3] intrinsic camera_matrix for image
* "reprojection_mask": bool tensor to mask out pixels in photometric loss
* Other information that's included in the original dicts, such as:
"height", "width" (int): the output resolution of the model (may be different
from input resolution), used in inference.
Returns:
list[dict]:
each dict is the results for one image. The dict contains the following keys:
* "panoptic_seg", "sem_seg": see documentation
:doc:`/tutorials/models` for the standard output format
* "depth": Tensor in [H, W] format, pixel-wise depth prediction
* "instances": available if ``predict_instances is True``. see documentation
:doc:`/tutorials/models` for the standard output format
"""
inputs, outputs, targets = {}, {}, {}
images = [x["image"].to(self.device).float() / 255.0 for x in batched_inputs]
images = ImageList.from_tensors(images, self.size_divisibility)
inputs["image"] = (images.tensor - self.pixel_mean) / self.pixel_std
# Process images through pose network during training
if self.training and self.with_depth:
images_prev = [x["image_prev"].to(self.device).float() / 255.0 for x in batched_inputs]
images_prev = ImageList.from_tensors(images_prev, self.size_divisibility)
inputs["image_prev"] = (images_prev.tensor - self.pixel_mean) / self.pixel_std
images_next = [x["image_next"].to(self.device).float() / 255.0 for x in batched_inputs]
images_next = ImageList.from_tensors(images_next, self.size_divisibility)
inputs["image_next"] = (images_next.tensor - self.pixel_mean) / self.pixel_std
outputs["poses"] = self.pose_net(torch.cat(list(inputs.values()), 1))
# Process images through MGNet
if self.msc_flip_eval and not self.training:
outputs.update(self.forward_multi_scale_flip(inputs["image"]))
else:
features = self.backbone(inputs["image"])
features["global_context"] = self.global_context(features[self.bb_features[-1]])
if self.with_panoptic:
outputs["sem_seg"] = self.sem_seg_head(features)
outputs["center"], outputs["offset"] = self.ins_embed_head(features)
if self.with_depth:
outputs["depth"] = self.depth_head(features)
if self.training:
# Add panoptic targets
if self.with_panoptic:
sem_seg_targets = [x["sem_seg"].to(self.device) for x in batched_inputs]
sem_seg_targets = ImageList.from_tensors(
sem_seg_targets, self.size_divisibility
).tensor
sem_seg_weights = [x["sem_seg_weights"].to(self.device) for x in batched_inputs]
sem_seg_weights = ImageList.from_tensors(
sem_seg_weights, self.size_divisibility
).tensor
center_targets = [x["center"].to(self.device) for x in batched_inputs]
center_targets = ImageList.from_tensors(
center_targets, self.size_divisibility
).tensor.unsqueeze(1)
center_weights = [x["center_weights"].to(self.device) for x in batched_inputs]
center_weights = ImageList.from_tensors(
center_weights, self.size_divisibility
).tensor
offset_targets = [x["offset"].to(self.device) for x in batched_inputs]
offset_targets = ImageList.from_tensors(
offset_targets, self.size_divisibility
).tensor
offset_weights = [x["offset_weights"].to(self.device) for x in batched_inputs]
offset_weights = ImageList.from_tensors(
offset_weights, self.size_divisibility
).tensor
targets.update(
{
"sem_seg": sem_seg_targets,
"sem_seg_weights": sem_seg_weights,
"center": center_targets,
"center_weights": center_weights,
"offset": offset_targets,
"offset_weights": offset_weights,
}
)
# Add depth targets
if self.with_depth:
# Add (non color jittered) images to targets for photometric loss calculation
images_orig = [
x["image_orig"].to(self.device).float() / 255.0 for x in batched_inputs
]
images_orig = ImageList.from_tensors(images_orig, self.size_divisibility).tensor
images_prev_orig = [
x["image_prev_orig"].to(self.device).float() / 255.0 for x in batched_inputs
]
images_prev_orig = ImageList.from_tensors(
images_prev_orig, self.size_divisibility
).tensor
images_next_orig = [
x["image_next_orig"].to(self.device).float() / 255.0 for x in batched_inputs
]
images_next_orig = ImageList.from_tensors(
images_next_orig, self.size_divisibility
).tensor
camera_matrices = [x["camera_matrix"] for x in batched_inputs]
camera_matrices = torch.stack(camera_matrices, dim=0).to(self.device)
masks = [x["reprojection_mask"].to(self.device) for x in batched_inputs]
masks = ImageList.from_tensors(masks, self.size_divisibility).tensor.unsqueeze_(1)
targets.update(
{
"image_orig": images_orig,
"image_prev_orig": images_prev_orig,
"image_next_orig": images_next_orig,
"camera_matrix": camera_matrices,
"reprojection_mask": masks,
}
)
# Calculate losses
losses = {}
if self.with_panoptic:
losses.update(self.sem_seg_head.losses(outputs, targets))
losses.update(self.ins_embed_head.losses(outputs, targets))
if self.with_depth:
losses.update(self.depth_head.losses(outputs, targets))
# Multiply loss values with task specific homoscedastic uncertainty
if self.with_uncertainty:
idx = 0
storage = get_event_storage()
for key, value in losses.items():
storage.put_scalar(key + "_raw", value.detach().item())
tau = 1.0 if key == "loss_sem_seg" else 0.5
losses[key] = (
tau * torch.exp(-self.log_vars[idx]) * value + 0.5 * self.log_vars[idx]
)
storage.put_scalar(
key + "_uncertainty", math.exp(self.log_vars[idx].detach().item())
)
idx = idx + 1
return losses
# Post-processing does not support batched inputs, hence process each input separately.
processed_results = []
for idx in range(len(batched_inputs)):
height = batched_inputs[idx].get("height")
width = batched_inputs[idx].get("width")
image_size = images.image_sizes[idx]
if self.with_panoptic:
r = sem_seg_postprocess(outputs["sem_seg"][idx], image_size, height, width)
c = sem_seg_postprocess(outputs["center"][idx], image_size, height, width)
o = sem_seg_postprocess(outputs["offset"][idx], image_size, height, width)
# Post-processing to get panoptic segmentation.
panoptic_prediction = self.panoptic_post_proc_func(
sem_seg=r.argmax(dim=0, keepdim=True),
center_heatmap=c,
offsets=o,
)
processed_results.append(
{"sem_seg": r, "panoptic_seg": (panoptic_prediction, None)}
)
if self.predict_instances:
# For instance segmentation evaluation. Disabled by default.
instances = self.instance_post_proc_func(
sem_seg=r,
center_heatmap=c,
panoptic_image=panoptic_prediction,
)
if len(instances) > 0:
processed_results[-1]["instances"] = Instances.cat(instances)
if self.with_depth:
d = sem_seg_postprocess(outputs["depth"][idx], image_size, height, width)
# Post-processing to get metric depth prediction.
depth_prediction, xyz_points = self.depth_post_proc_func(
depth_logits=d.unsqueeze(0),
camera_matrix=batched_inputs[0]["camera_matrix"].unsqueeze(0)
if "camera_matrix" in batched_inputs[0]
else None,
real_camera_height=batched_inputs[0]["camera_height"]
if "camera_height" in batched_inputs[0]
else None,
panoptic_seg=processed_results[-1]["panoptic_seg"][0]
if self.with_panoptic
else None,
)
if self.with_panoptic:
processed_results[-1]["depth"] = (depth_prediction, xyz_points)
else:
processed_results.append({"depth": (depth_prediction, xyz_points)})
return processed_results
def forward_multi_scale_flip(self, norm_images, scales=None, flip=True):
"""
Process norm_images through MGNet by
1. augmenting norm_images using multiple scales and horizontal flipping
2. averaging the raw predictions from each augmented forward pass.
Args:
norm_images: Tensor of shape [N, 3, H, W], normalized input image batch
scales: List of float values, scale factors | |
<filename>stats/utils.py<gh_stars>0
import argparse
from collections import Counter
import csv
import logging
import os
import math
import sqlite3
log = logging.getLogger()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
np.random.seed(sum(map(ord, "aesthetics")))
import seaborn as sns
FONT_SIZE = 18
def set_sns_style(font_size=FONT_SIZE):
settings = {
'legend.frameon': True,
'grid.color': '0.95',
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'font.family': 'serif',
'xtick.major.size': 0,
}
sns.set_style("white", settings)
EARLIEST_YEAR = 1900
LATEST_YEAR = 2014
HEIGHT = 5
WIDTH = 10
LINE_WIDTH = 3.5
FIGS = os.path.join(os.path.dirname(__file__), 'figs')
assert os.path.isdir(FIGS)
DECADES = [
(1900, 1920),
(1920, 1940),
(1940, 1960),
(1960, 1980),
(1980, 2000),
(2000, 2020),
]
def argparser_factory():
p = argparse.ArgumentParser()
p.add_argument('--db')
p.add_argument('--width', type=int, default=WIDTH)
p.add_argument('--height', type=int, default=HEIGHT)
p.add_argument('--font-size', type=float, default=FONT_SIZE)
logging.basicConfig(level=logging.INFO)
return p
def db_factory(db_filename):
return sqlite3.connect(db_filename)
def z_proportion(n1, n2, x1, x2, lower_x=5):
assert x1 <= n1, 'Count 1 must be lower than total 1\t{}\t{}'.format(x1, n1)
assert x2 <= n2, 'Count 2 must be lower than total 2\t{}\t{}'.format(x2, n2)
inv_x1 = n1 - x1
inv_x2 = n2 - x2
if any(i < lower_x for i in (x1, x2, inv_x1, inv_x2)):
logging.warning('Invalid z proportion test\tx1/inv\t{}/{}\tx2/inv\t{}/{}'.format(x1, inv_x1, x2, inv_x2))
return None
p1 = x1 / float(n1)
p2 = x2 / float(n2)
p = (x1 + x2) / float(n1 + n2)
return (p1 - p2) / math.sqrt((p * (1-p)) * (1/float(n1) + 1/float(n2)))
def set_axis_font_size(a, size=FONT_SIZE):
for item in ([a.title, a.xaxis.label, a.yaxis.label] +
a.get_xticklabels() + a.get_yticklabels()):
item.set_fontsize(size)
def plot_role_counts(counts, label, fname, window=5,
width=WIDTH, height=HEIGHT, font_size=FONT_SIZE):
df = pd.DataFrame(counts)
rm = pd.rolling_mean(df, window)
set_sns_style(font_size)
fig, ax1 = plt.subplots(figsize=(width, height))
ax1.plot(rm.index, rm['Count'], lw=LINE_WIDTH)
set_axis_font_size(ax1, font_size)
# Ducks.
ax1.set_ylabel('Count', rotation=90)
ax1.set_xlabel('Year')
ax1.set_ylim(0)
ax1.set_xlim(left=EARLIEST_YEAR, right=LATEST_YEAR)
plt.savefig(fname)
STYLES = {
'M': ('g', '-', None),
'F': ('b', '--', None),
'p(F)': ('r', ':', None),
'TV': ('g', '-', '--'),
'FILM': ('r', ':', '-.'),
}
def plot_gender_counts_pf(counts, label, window=5,
earliest_year=EARLIEST_YEAR,
width=WIDTH, height=HEIGHT,
font_size=FONT_SIZE):
set_sns_style(font_size)
df = pd.DataFrame(counts)
rm = pd.rolling_mean(df, window)
lines = []
labels = []
fig, ax1 = plt.subplots(figsize=(width, height))
for c in ['F', 'M']:
color, style, _ = STYLES[c]
lines.append(ax1.plot(rm.index, getattr(rm, c), color=color,
ls=style, lw=LINE_WIDTH)[0])
labels.append(c)
# Plot gender series.
ax2 = ax1.twinx()
c = 'p(F)'
color, style, _ = STYLES[c]
lines.append(ax2.plot(rm.index, rm[c], color=color,
ls=style, lw=LINE_WIDTH)[0])
labels.append(c)
# Ducks.
ax1.legend(lines, labels, loc=2, fontsize=font_size)
ax1.set_ylabel('Count', rotation=90)
ax1.set_xlabel('Year')
ax1.set_ylim(0)
ax1.set_xlim(left=earliest_year, right=LATEST_YEAR)
ax2.set_ylim(0, 1)
ax2.set_ylabel('p(F)', rotation=90)
ax2.set_xlim(left=earliest_year, right=LATEST_YEAR)
set_axis_font_size(ax1, font_size)
set_axis_font_size(ax2, font_size)
plt.savefig(os.path.join(FIGS, '{}.rm-{}.pdf'.format(label.replace(' ', '_'), window)))
def plot_gender_counts_pf_by_medium(counts, p_females, label, width=WIDTH,
height=HEIGHT, window=5, font_size=FONT_SIZE,
earliest_year=EARLIEST_YEAR):
set_sns_style(font_size)
df = pd.DataFrame(counts)
rm = pd.rolling_mean(df, window)
# Plot medium series.
lines = []
labels = []
fig, ax1 = plt.subplots(figsize=(width, height))
for c in rm.columns:
color, style1, style2 = STYLES[c]
lines.append(ax1.plot(rm.index, getattr(rm, c), color=color,
ls=style1, lw=LINE_WIDTH)[0])
labels.append(c)
set_axis_font_size(ax1, font_size)
# Plot gender series.
ax2 = ax1.twinx()
df = pd.DataFrame(p_females)
rm = pd.rolling_mean(df, window)
for c in rm.columns:
color, style1, style2 = STYLES[c]
lines.append(ax2.plot(rm.index, getattr(rm, c), color=color,
ls=style2, lw=LINE_WIDTH)[0])
labels.append('p(f|{})'.format(c))
set_axis_font_size(ax2, font_size)
# Ducks.
ax1.legend(lines, labels, loc=2, fontsize=font_size)
ax1.set_ylabel('Count', rotation=90)
ax1.set_xlabel('Year')
ax1.set_ylim(0)
ax1.set_xlim(left=earliest_year, right=LATEST_YEAR)
ax2.set_ylim(0, 1)
ax2.set_ylabel('p(F)', rotation=90)
ax2.set_xlim(left=earliest_year, right=LATEST_YEAR)
plt.savefig(os.path.join(FIGS, '{}.rm-{}.pdf'.format(label.replace(' ', '_'), window)))
def read_imdb_census_mapping(f):
labels = set()
label_to_imdb = {}
label_to_census = {}
for label, imdb_role, census_label in csv.reader(f, delimiter='\t'):
if label.startswith('#'):
continue
labels.add(label)
label_to_imdb.setdefault(label, set()).add(imdb_role)
label_to_census.setdefault(label, set()).add(census_label)
return labels, label_to_imdb, label_to_census
def read_census_data(f):
counts = {}
p_fs = {}
for row in csv.DictReader(f, delimiter='\t'):
# Remove blanks.
row = {k: v for k, v in row.iteritems() if v}
role = row.pop('Role')
for year, raw in row.iteritems():
year = int(year)
count, percent_f = raw.lstrip('(').rstrip(')').split()
count = float(count.rstrip(',')) * 1000
p_f = float(percent_f) / 100.0 if percent_f != -1 else 0
counts.setdefault(year, {})[role] = count
p_fs.setdefault(year, {})[role] = p_f
return pd.DataFrame(counts), pd.DataFrame(p_fs)
# Media.
MEDIA = {
None: '',
'TV': "medium = 'TV'",
'FILM': "(medium = 'FILM' OR medium = 'VIDEO')",
'FILM+TV': "(medium = 'FILM' OR medium = 'VIDEO' OR medium = 'TV')",
}
DEFAULT_MEDIUM = 'FILM+TV'
def get_all_role_counts(cursor, medium=DEFAULT_MEDIUM):
cursor.execute('''
SELECT year, SUM(count), SUM(count * female) / SUM(count)
FROM parts
WHERE {}
GROUP BY year;'''.format(MEDIA[medium]))
counts = {}
total = 0
for year, count, p_female in cursor.fetchall():
counts.setdefault('Count', {})[year] = count
total += count
return counts, total
def get_gender_roles_for_year(cursor, year, medium=DEFAULT_MEDIUM, country=None):
if country is None:
country_q = ''
bindings = (year,)
else:
country_q = ' AND country = ?'
bindings = (year, country)
q = '''
SELECT name, SUM(count), SUM(CAST(count*female AS INT))
FROM parts
WHERE year = ?
AND {}
{}
GROUP BY name'''.format(MEDIA[medium], country_q)
cursor.execute(q, bindings)
return {i[0]: (i[1], i[2]) for i in cursor.fetchall()}
def get_gender_counts_for_year(cursor, role=None, medium=DEFAULT_MEDIUM):
q = '''
SELECT year, SUM(count * female), SUM(count * (1-female)), SUM(count * female) / SUM(count)
FROM parts
WHERE {}'''.format(MEDIA[medium])
binding = tuple()
if role is not None:
q += ' AND name LIKE ?'
binding = ('%{}%'.format(role),)
q += ' GROUP BY year'
cursor.execute(q, binding)
counts = {}
for year, f_count, m_count, p_female in cursor.fetchall():
counts.setdefault('M', {})[year] = m_count
counts.setdefault('F', {})[year] = f_count
counts.setdefault('p(F)', {})[year] = p_female
return counts
def plot_census_comparison(data, year, medium, font_size=FONT_SIZE):
set_sns_style(font_size)
labels_z = {i['role']: i['z'] for i in data}
labels = [i['role'] for i in data]
imdb = [i['IMDb'] for i in data]
census = [i['Census'] for i in data]
fig, ax = plt.subplots(figsize=(7, 7))
ax.set_xlabel('p(F|OES @ {})'.format(year))
if medium:
ax.set_ylabel('p(F|IMDb {} @ {})'.format(medium, year))
else:
ax.set_ylabel('p(F|IMDb @ {})'.format(year))
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.scatter(census, imdb)
ax.plot([0, 1], [0, 1], ls='--', color='gray', alpha=0.6)
set_axis_font_size(ax, font_size)
for l, x, y in zip(labels, census, imdb):
z = labels_z.get(l)
if z is not None and abs(z) > 1.96:
l += '+'
xytext = (-1, 1)
va = 'bottom'
ha = 'right'
'''
if l.startswith('nurse'):
va = 'top'
if l.startswith('pilot'):
ha = 'left'
'''
ax.annotate(l, xy=(x, y), xytext=xytext,
textcoords='offset points', ha=ha, va=va,
fontsize=font_size,
)
plt.savefig(os.path.join(FIGS, 'IMDbVsCensus.{}.{}.pdf'.format(year, medium)))
def get_census_comparisons(mapping, census, cursor, focus_year=None, medium=DEFAULT_MEDIUM):
# Read mappings.
label, label_to_imdb, label_to_census = read_imdb_census_mapping(open(mapping))
census_to_label = {}
for label, c_labels in label_to_census.iteritems():
for c_label in c_labels:
if c_label in census_to_label:
log.warning('Census label "{}" already mapped!'.format(c_label))
else:
census_to_label[c_label] = label
# Read census data.
counts, p_fs = read_census_data(open(census))
unmapped_census, unmapped_census_years = Counter(), Counter()
all_plot_data = {}
# Scan each known year.
for year, data in counts.iteritems():
if focus_year and year != focus_year:
continue
log.info('Fetching roles for {}'.format(year))
mapped = total = compared = 0
plot_data = []
imdb_roles = get_gender_roles_for_year(cursor, year, medium, country='us')
# Cross-reference.
mapped_counts, mapped_f_counts = Counter(), Counter()
for census_role, census_count in data.iteritems():
if np.isnan(census_count):
continue
# Try to match from census to label mapping.
label = census_to_label.get(census_role)
total += 1
if label is None:
unmapped_census[census_role] += census_count
unmapped_census_years[census_role] += 1
continue
else:
mapped += 1
mapped_counts[label] += census_count
mapped_f_counts[label] += p_fs[year][census_role] * census_count
for label, total_census_count in sorted(mapped_counts.iteritems()):
total_census_f_count = mapped_f_counts[label]
# Try to map from label to IMDb.
imdb_count = imdb_f_count = 0
for imdb_role in label_to_imdb[label]:
role_count, role_f_count = imdb_roles.get(imdb_role, (0, 0))
log.info('Matched "{}" to "{}"\t{}\t{}'.format(label, imdb_role, role_count, role_f_count))
imdb_count += role_count
imdb_f_count += role_f_count
z = z_proportion(imdb_count, total_census_count, imdb_f_count, total_census_f_count)
if z is None:
log.warning('Invalid z for "{}"\timdb {}\ttotal_census {}\tf_imdb {}\tf_census {}'.format(
label, imdb_count, total_census_count, imdb_f_count, total_census_f_count))
if not imdb_count:
continue
compared += 1
plot_data.append({'role': label, 'z': z, 'IMDb': imdb_f_count / float(imdb_count),
'Census': total_census_f_count / float(total_census_count)})
log.info('Mapped {} of {}\t{:.1f}%\tCompared {}'.format(mapped, total, 100*mapped/float(total), compared))
if plot_data:
all_plot_data[year] = plot_data
return all_plot_data
def plot_census_comparison_line(df, annotations, fname, font_size=FONT_SIZE, width=WIDTH, height=HEIGHT):
set_sns_style(font_size)
fig, ax1 = plt.subplots(figsize=(width, height))
lines, labels = [], []
for c, ls in zip(['Census', 'IMDb'], ['--', '-']):
lines.append(ax1.plot(df.index, getattr(df, c), ls=ls, lw=5)[0])
labels.append(c)
for x, y in annotations:
ax1.annotate('o', (x, y), fontsize=font_size, weight='bold')
ax1.legend(lines, labels, fontsize=font_size)
ax1.set_ylabel('p(F)', rotation=90)
ax1.set_xlabel('Year')
set_axis_font_size(ax1, font_size)
plt.savefig(fname)
def plot_rolling_hellinger(df, fname, earliest_year, font_size=FONT_SIZE,
width=WIDTH, height=HEIGHT):
STYLES = {
'TV': ('g', '-'),
'TV count': ('g', '--'),
'TV Count Correlation': ('g', '--'),
'FILM': ('r', ':'),
'FILM count': ('r', '-.'),
'FILM Count Correlation': ('r', '-.'),
}
set_sns_style(font_size)
fig, ax1 = plt.subplots(figsize=(width, height))
lines, labels = [], []
a = ax1
set_axis_font_size(a, font_size)
for c in df.columns:
color, style = STYLES[c]
lines.append(a.plot(df.index, getattr(df, c), color=color,
ls=style, lw=LINE_WIDTH)[0])
labels.append(c)
ax1.legend(lines, labels, fontsize=font_size)
ax1.set_ylabel('Inter-year distance', rotation=90)
ax1.set_xlabel('Year')
ax1.set_xlim(left=earliest_year, right=LATEST_YEAR-1)
plt.savefig(fname)
"""
load list of proper names
"""
def properNames(paths=['../data/malenames.txt','../data/femalenames.txt']):
names = set()
for path | |
# This file is auto-generated from a Python script that parses a PhysiCell configuration (.xml) file.
#
# Edit at your own risk.
#
import os
from ipywidgets import Label,Text,Checkbox,Button,HBox,VBox,FloatText,IntText,BoundedIntText,BoundedFloatText,Layout,Box
class UserTab(object):
def __init__(self):
micron_units = Label('micron') # use "option m" (Mac, for micro symbol)
constWidth = '180px'
tab_height = '500px'
stepsize = 10
#style = {'description_width': '250px'}
style = {'description_width': '25%'}
layout = {'width': '400px'}
name_button_layout={'width':'25%'}
widget_layout = {'width': '15%'}
units_button_layout ={'width':'15%'}
desc_button_layout={'width':'45%'}
divider_button_layout={'width':'40%'}
param_name1 = Button(description='random_seed', disabled=True, layout=name_button_layout)
param_name1.style.button_color = 'lightgreen'
self.random_seed = IntText(
value=0,
step=1,
style=style, layout=widget_layout)
div_row1 = Button(description='---Initialization settings---', disabled=True, layout=divider_button_layout)
param_name2 = Button(description='number_of_A', disabled=True, layout=name_button_layout)
param_name2.style.button_color = 'tan'
self.number_of_A = IntText(
value=25,
step=1,
style=style, layout=widget_layout)
param_name3 = Button(description='number_of_B', disabled=True, layout=name_button_layout)
param_name3.style.button_color = 'lightgreen'
self.number_of_B = IntText(
value=25,
step=1,
style=style, layout=widget_layout)
param_name4 = Button(description='number_of_C', disabled=True, layout=name_button_layout)
param_name4.style.button_color = 'tan'
self.number_of_C = IntText(
value=25,
step=1,
style=style, layout=widget_layout)
param_name5 = Button(description='max_distance_from_origin', disabled=True, layout=name_button_layout)
param_name5.style.button_color = 'lightgreen'
self.max_distance_from_origin = FloatText(
value=150,
step=10,
style=style, layout=widget_layout)
div_row2 = Button(description='---Coloring settings---', disabled=True, layout=divider_button_layout)
param_name6 = Button(description='A_color', disabled=True, layout=name_button_layout)
param_name6.style.button_color = 'tan'
self.A_color = Text(
value='magenta',
style=style, layout=widget_layout)
param_name7 = Button(description='B_color', disabled=True, layout=name_button_layout)
param_name7.style.button_color = 'lightgreen'
self.B_color = Text(
value='green',
style=style, layout=widget_layout)
param_name8 = Button(description='C_color', disabled=True, layout=name_button_layout)
param_name8.style.button_color = 'tan'
self.C_color = Text(
value='cyan',
style=style, layout=widget_layout)
param_name9 = Button(description='standard_plots', disabled=True, layout=name_button_layout)
param_name9.style.button_color = 'lightgreen'
self.standard_plots = Checkbox(
value=True,
style=style, layout=widget_layout)
div_row3 = Button(description='---Overall signaling settings---', disabled=True, layout=divider_button_layout)
param_name10 = Button(description='hill_power', disabled=True, layout=name_button_layout)
param_name10.style.button_color = 'tan'
self.hill_power = FloatText(
value=5,
step=0.1,
style=style, layout=widget_layout)
param_name11 = Button(description='half_max', disabled=True, layout=name_button_layout)
param_name11.style.button_color = 'lightgreen'
self.half_max = FloatText(
value=0.1,
step=0.01,
style=style, layout=widget_layout)
div_row4 = Button(description='---cell type A settings---', disabled=True, layout=divider_button_layout)
param_name12 = Button(description='A_base_cycle', disabled=True, layout=name_button_layout)
param_name12.style.button_color = 'tan'
self.A_base_cycle = FloatText(
value=0.00072,
step=0.0001,
style=style, layout=widget_layout)
param_name13 = Button(description='A_max_cycle', disabled=True, layout=name_button_layout)
param_name13.style.button_color = 'lightgreen'
self.A_max_cycle = FloatText(
value=0.0072,
step=0.001,
style=style, layout=widget_layout)
param_name14 = Button(description='A_cycle_A', disabled=True, layout=name_button_layout)
param_name14.style.button_color = 'tan'
self.A_cycle_A = Text(
value='neutral',
style=style, layout=widget_layout)
param_name15 = Button(description='A_cycle_B', disabled=True, layout=name_button_layout)
param_name15.style.button_color = 'lightgreen'
self.A_cycle_B = Text(
value='neutral',
style=style, layout=widget_layout)
param_name16 = Button(description='A_cycle_C', disabled=True, layout=name_button_layout)
param_name16.style.button_color = 'tan'
self.A_cycle_C = Text(
value='neutral',
style=style, layout=widget_layout)
param_name17 = Button(description='A_cycle_pressure_threshold', disabled=True, layout=name_button_layout)
param_name17.style.button_color = 'lightgreen'
self.A_cycle_pressure_threshold = FloatText(
value=2.0,
step=0.1,
style=style, layout=widget_layout)
param_name18 = Button(description='A_base_death', disabled=True, layout=name_button_layout)
param_name18.style.button_color = 'tan'
self.A_base_death = FloatText(
value=5.31667e-05,
step=1e-05,
style=style, layout=widget_layout)
param_name19 = Button(description='A_max_death', disabled=True, layout=name_button_layout)
param_name19.style.button_color = 'lightgreen'
self.A_max_death = FloatText(
value=5.31667e-04,
step=0.0001,
style=style, layout=widget_layout)
param_name20 = Button(description='A_death_A', disabled=True, layout=name_button_layout)
param_name20.style.button_color = 'tan'
self.A_death_A = Text(
value='neutral',
style=style, layout=widget_layout)
param_name21 = Button(description='A_death_B', disabled=True, layout=name_button_layout)
param_name21.style.button_color = 'lightgreen'
self.A_death_B = Text(
value='neutral',
style=style, layout=widget_layout)
param_name22 = Button(description='A_death_C', disabled=True, layout=name_button_layout)
param_name22.style.button_color = 'tan'
self.A_death_C = Text(
value='neutral',
style=style, layout=widget_layout)
param_name23 = Button(description='A_death_R', disabled=True, layout=name_button_layout)
param_name23.style.button_color = 'lightgreen'
self.A_death_R = Text(
value='neutral',
style=style, layout=widget_layout)
param_name24 = Button(description='A_apoptosis_pressure_threshold', disabled=True, layout=name_button_layout)
param_name24.style.button_color = 'tan'
self.A_apoptosis_pressure_threshold = FloatText(
value=100.0,
step=10,
style=style, layout=widget_layout)
param_name25 = Button(description='A_necrosis_threshold', disabled=True, layout=name_button_layout)
param_name25.style.button_color = 'lightgreen'
self.A_necrosis_threshold = FloatText(
value=0.4,
step=0.1,
style=style, layout=widget_layout)
param_name26 = Button(description='A_base_speed', disabled=True, layout=name_button_layout)
param_name26.style.button_color = 'tan'
self.A_base_speed = FloatText(
value=0.1,
step=0.01,
style=style, layout=widget_layout)
param_name27 = Button(description='A_max_speed', disabled=True, layout=name_button_layout)
param_name27.style.button_color = 'lightgreen'
self.A_max_speed = FloatText(
value=1,
step=0.1,
style=style, layout=widget_layout)
param_name28 = Button(description='A_speed_A', disabled=True, layout=name_button_layout)
param_name28.style.button_color = 'tan'
self.A_speed_A = Text(
value='neutral',
style=style, layout=widget_layout)
param_name29 = Button(description='A_speed_B', disabled=True, layout=name_button_layout)
param_name29.style.button_color = 'lightgreen'
self.A_speed_B = Text(
value='neutral',
style=style, layout=widget_layout)
param_name30 = Button(description='A_speed_C', disabled=True, layout=name_button_layout)
param_name30.style.button_color = 'tan'
self.A_speed_C = Text(
value='neutral',
style=style, layout=widget_layout)
param_name31 = Button(description='A_speed_R', disabled=True, layout=name_button_layout)
param_name31.style.button_color = 'lightgreen'
self.A_speed_R = Text(
value='neutral',
style=style, layout=widget_layout)
param_name32 = Button(description='A_base_secretion', disabled=True, layout=name_button_layout)
param_name32.style.button_color = 'tan'
self.A_base_secretion = FloatText(
value=1,
step=0.1,
style=style, layout=widget_layout)
param_name33 = Button(description='A_max_secretion', disabled=True, layout=name_button_layout)
param_name33.style.button_color = 'lightgreen'
self.A_max_secretion = FloatText(
value=10,
step=1,
style=style, layout=widget_layout)
param_name34 = Button(description='A_signal_A', disabled=True, layout=name_button_layout)
param_name34.style.button_color = 'tan'
self.A_signal_A = Text(
value='neutral',
style=style, layout=widget_layout)
param_name35 = Button(description='A_signal_B', disabled=True, layout=name_button_layout)
param_name35.style.button_color = 'lightgreen'
self.A_signal_B = Text(
value='neutral',
style=style, layout=widget_layout)
param_name36 = Button(description='A_signal_C', disabled=True, layout=name_button_layout)
param_name36.style.button_color = 'tan'
self.A_signal_C = Text(
value='neutral',
style=style, layout=widget_layout)
param_name37 = Button(description='A_signal_R', disabled=True, layout=name_button_layout)
param_name37.style.button_color = 'lightgreen'
self.A_signal_R = Text(
value='neutral',
style=style, layout=widget_layout)
div_row5 = Button(description='---cell type B settings---', disabled=True, layout=divider_button_layout)
param_name38 = Button(description='B_base_cycle', disabled=True, layout=name_button_layout)
param_name38.style.button_color = 'tan'
self.B_base_cycle = FloatText(
value=0.00072,
step=0.0001,
style=style, layout=widget_layout)
param_name39 = Button(description='B_max_cycle', disabled=True, layout=name_button_layout)
param_name39.style.button_color = 'lightgreen'
self.B_max_cycle = FloatText(
value=0.0072,
step=0.001,
style=style, layout=widget_layout)
param_name40 = Button(description='B_cycle_A', disabled=True, layout=name_button_layout)
param_name40.style.button_color = 'tan'
self.B_cycle_A = Text(
value='neutral',
style=style, layout=widget_layout)
param_name41 = Button(description='B_cycle_B', disabled=True, layout=name_button_layout)
param_name41.style.button_color = 'lightgreen'
self.B_cycle_B = Text(
value='neutral',
style=style, layout=widget_layout)
param_name42 = Button(description='B_cycle_C', disabled=True, layout=name_button_layout)
param_name42.style.button_color = 'tan'
self.B_cycle_C = Text(
value='neutral',
style=style, layout=widget_layout)
param_name43 = Button(description='B_cycle_pressure_threshold', disabled=True, layout=name_button_layout)
param_name43.style.button_color = 'lightgreen'
self.B_cycle_pressure_threshold = FloatText(
value=2.0,
step=0.1,
style=style, layout=widget_layout)
param_name44 = Button(description='B_base_death', disabled=True, layout=name_button_layout)
param_name44.style.button_color = 'tan'
self.B_base_death = FloatText(
value=5.31667e-05,
step=1e-05,
style=style, layout=widget_layout)
param_name45 = Button(description='B_max_death', disabled=True, layout=name_button_layout)
param_name45.style.button_color = 'lightgreen'
self.B_max_death = FloatText(
value=5.31667e-04,
step=0.0001,
style=style, layout=widget_layout)
param_name46 = Button(description='B_death_A', disabled=True, layout=name_button_layout)
param_name46.style.button_color = 'tan'
self.B_death_A = Text(
value='neutral',
style=style, layout=widget_layout)
param_name47 = Button(description='B_death_B', disabled=True, layout=name_button_layout)
param_name47.style.button_color = 'lightgreen'
self.B_death_B = Text(
value='neutral',
style=style, layout=widget_layout)
param_name48 = Button(description='B_death_C', disabled=True, layout=name_button_layout)
param_name48.style.button_color = 'tan'
self.B_death_C = Text(
value='neutral',
style=style, layout=widget_layout)
param_name49 = Button(description='B_death_R', disabled=True, layout=name_button_layout)
param_name49.style.button_color = 'lightgreen'
self.B_death_R = Text(
value='neutral',
style=style, layout=widget_layout)
param_name50 = Button(description='B_apoptosis_pressure_threshold', disabled=True, layout=name_button_layout)
param_name50.style.button_color = 'tan'
self.B_apoptosis_pressure_threshold = FloatText(
value=100.0,
step=10,
style=style, layout=widget_layout)
param_name51 = Button(description='B_necrosis_threshold', disabled=True, layout=name_button_layout)
param_name51.style.button_color = 'lightgreen'
self.B_necrosis_threshold = FloatText(
value=0.4,
step=0.1,
style=style, layout=widget_layout)
param_name52 = Button(description='B_base_speed', disabled=True, layout=name_button_layout)
param_name52.style.button_color = 'tan'
self.B_base_speed = FloatText(
value=0.1,
step=0.01,
style=style, layout=widget_layout)
param_name53 = Button(description='B_max_speed', disabled=True, layout=name_button_layout)
param_name53.style.button_color = 'lightgreen'
self.B_max_speed = FloatText(
value=1,
step=0.1,
style=style, layout=widget_layout)
param_name54 = Button(description='B_speed_A', disabled=True, layout=name_button_layout)
param_name54.style.button_color = 'tan'
self.B_speed_A = Text(
value='neutral',
style=style, layout=widget_layout)
param_name55 = Button(description='B_speed_B', disabled=True, layout=name_button_layout)
param_name55.style.button_color = 'lightgreen'
self.B_speed_B = Text(
value='neutral',
style=style, layout=widget_layout)
param_name56 = Button(description='B_speed_C', disabled=True, layout=name_button_layout)
param_name56.style.button_color = 'tan'
self.B_speed_C = Text(
value='neutral',
style=style, layout=widget_layout)
param_name57 = Button(description='B_speed_R', disabled=True, layout=name_button_layout)
param_name57.style.button_color = 'lightgreen'
self.B_speed_R = Text(
value='neutral',
style=style, layout=widget_layout)
param_name58 = Button(description='B_base_secretion', disabled=True, layout=name_button_layout)
param_name58.style.button_color = 'tan'
self.B_base_secretion = FloatText(
value=1,
step=0.1,
style=style, layout=widget_layout)
param_name59 = Button(description='B_max_secretion', disabled=True, layout=name_button_layout)
param_name59.style.button_color = 'lightgreen'
self.B_max_secretion = FloatText(
value=10,
step=1,
style=style, layout=widget_layout)
param_name60 = Button(description='B_signal_A', disabled=True, layout=name_button_layout)
param_name60.style.button_color = 'tan'
self.B_signal_A = Text(
value='neutral',
style=style, layout=widget_layout)
param_name61 = Button(description='B_signal_B', disabled=True, layout=name_button_layout)
param_name61.style.button_color = 'lightgreen'
self.B_signal_B = Text(
value='neutral',
style=style, layout=widget_layout)
param_name62 = Button(description='B_signal_C', disabled=True, layout=name_button_layout)
param_name62.style.button_color = 'tan'
self.B_signal_C = Text(
value='neutral',
style=style, layout=widget_layout)
param_name63 = Button(description='B_signal_R', disabled=True, layout=name_button_layout)
param_name63.style.button_color = 'lightgreen'
self.B_signal_R = Text(
value='neutral',
style=style, layout=widget_layout)
div_row6 = Button(description='---cell type C settings---', disabled=True, layout=divider_button_layout)
param_name64 = Button(description='C_base_cycle', disabled=True, layout=name_button_layout)
param_name64.style.button_color = 'tan'
self.C_base_cycle = FloatText(
value=0.00072,
step=0.0001,
style=style, layout=widget_layout)
param_name65 = Button(description='C_max_cycle', disabled=True, layout=name_button_layout)
param_name65.style.button_color = 'lightgreen'
self.C_max_cycle = FloatText(
value=0.0072,
step=0.001,
style=style, layout=widget_layout)
param_name66 = Button(description='C_cycle_A', disabled=True, layout=name_button_layout)
param_name66.style.button_color = 'tan'
self.C_cycle_A = Text(
value='neutral',
style=style, layout=widget_layout)
param_name67 = Button(description='C_cycle_B', disabled=True, layout=name_button_layout)
param_name67.style.button_color = 'lightgreen'
self.C_cycle_B = Text(
value='neutral',
style=style, layout=widget_layout)
param_name68 = Button(description='C_cycle_C', disabled=True, layout=name_button_layout)
param_name68.style.button_color = 'tan'
self.C_cycle_C = Text(
value='neutral',
style=style, layout=widget_layout)
param_name69 = Button(description='C_cycle_pressure_threshold', disabled=True, layout=name_button_layout)
param_name69.style.button_color = 'lightgreen'
self.C_cycle_pressure_threshold = FloatText(
value=2.0,
step=0.1,
style=style, layout=widget_layout)
param_name70 = Button(description='C_base_death', disabled=True, layout=name_button_layout)
param_name70.style.button_color = 'tan'
self.C_base_death = FloatText(
value=5.31667e-05,
step=1e-05,
style=style, layout=widget_layout)
param_name71 = Button(description='C_max_death', disabled=True, layout=name_button_layout)
param_name71.style.button_color = 'lightgreen'
self.C_max_death = FloatText(
value=5.31667e-04,
step=0.0001,
style=style, layout=widget_layout)
param_name72 = Button(description='C_death_A', disabled=True, layout=name_button_layout)
param_name72.style.button_color = 'tan'
self.C_death_A = Text(
value='neutral',
style=style, layout=widget_layout)
param_name73 = Button(description='C_death_B', disabled=True, layout=name_button_layout)
param_name73.style.button_color = 'lightgreen'
self.C_death_B = Text(
value='neutral',
style=style, layout=widget_layout)
param_name74 = Button(description='C_death_C', disabled=True, layout=name_button_layout)
param_name74.style.button_color = 'tan'
self.C_death_C = Text(
value='neutral',
style=style, layout=widget_layout)
param_name75 = Button(description='C_death_R', disabled=True, layout=name_button_layout)
param_name75.style.button_color = 'lightgreen'
self.C_death_R = Text(
value='neutral',
style=style, layout=widget_layout)
param_name76 = Button(description='C_apoptosis_pressure_threshold', disabled=True, layout=name_button_layout)
param_name76.style.button_color = 'tan'
self.C_apoptosis_pressure_threshold = FloatText(
value=100.0,
step=10,
style=style, layout=widget_layout)
param_name77 = Button(description='C_necrosis_threshold', disabled=True, layout=name_button_layout)
param_name77.style.button_color = 'lightgreen'
self.C_necrosis_threshold = FloatText(
value=0.4,
step=0.1,
style=style, layout=widget_layout)
param_name78 = Button(description='C_base_speed', disabled=True, layout=name_button_layout)
param_name78.style.button_color = 'tan'
self.C_base_speed = FloatText(
value=0.1,
step=0.01,
style=style, layout=widget_layout)
param_name79 = Button(description='C_max_speed', disabled=True, layout=name_button_layout)
param_name79.style.button_color = 'lightgreen'
self.C_max_speed = FloatText(
value=1,
step=0.1,
style=style, layout=widget_layout)
param_name80 = Button(description='C_speed_A', disabled=True, layout=name_button_layout)
param_name80.style.button_color = 'tan'
self.C_speed_A = Text(
value='neutral',
style=style, layout=widget_layout)
param_name81 = Button(description='C_speed_B', disabled=True, layout=name_button_layout)
param_name81.style.button_color = 'lightgreen'
| |
<filename>distil/utils/data_handler.py<gh_stars>1-10
from torch.utils.data import Dataset
import numpy as np
from PIL import Image
from torchvision import transforms
import torch
class DataHandler_Points(Dataset):
"""
Data Handler to load data points.
This class extends :class:`torch.utils.data.Dataset` to handle
loading data even without labels
Parameters
----------
X: numpy array
Data to be loaded
y: numpy array, optional
Labels to be loaded (default: None)
select: bool
True if loading data without labels, False otherwise
"""
def __init__(self, X, Y=None, select=True, use_test_transform=False,return_index=True):
"""
Constructor
"""
self.select = select
if not self.select:
self.X = X.astype(np.float32)
self.Y = Y
else:
self.X = X.astype(np.float32) #For unlabeled Data
self.return_index = return_index
def __getitem__(self, index):
if not self.select:
x, y = self.X[index], self.Y[index]
if self.return_index:
return x, y, index
else:
return x, y
else:
x = self.X[index] #For unlabeled Data
if self.return_index:
return x, index
else:
return x
def __len__(self):
return len(self.X)
class DataHandler_SVHN(Dataset):
"""
Data Handler to load SVHN dataset.
This class extends :class:`torch.utils.data.Dataset` to handle
loading data even without labels
Parameters
----------
X: numpy array
Data to be loaded
y: numpy array, optional
Labels to be loaded (default: None)
select: bool
True if loading data without labels, False otherwise
use_test_transform: bool, optional
Use test transform without augmentations like crop, flip, etc. (default: False)
"""
def __init__(self, X, Y=None, select=True, use_test_transform=False,return_index=True):
"""
Constructor
"""
self.select = select
self.use_test_transform=use_test_transform
self.training_gen_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])
self.test_gen_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # ImageNet mean/std
if not self.select:
self.X = X
self.Y = Y
else:
self.X = X
self.return_index = return_index
def __getitem__(self, index):
if not self.select:
x, y = self.X[index], self.Y[index]
x = Image.fromarray(np.transpose(x, (1, 2, 0)))
if self.use_test_transform:
x = self.test_gen_transform(x)
else:
x = self.training_gen_transform(x)
if self.return_index:
return x, y, index
else:
return x, y
else:
x = self.X[index]
x = Image.fromarray(np.transpose(x, (1, 2, 0)))
if self.use_test_transform:
x = self.test_gen_transform(x)
else:
x = self.training_gen_transform(x)
if self.return_index:
return x, index
else:
return x
def __len__(self):
return len(self.X)
class DataHandler_MNIST(Dataset):
"""
Data Handler to load MNIST dataset.
This class extends :class:`torch.utils.data.Dataset` to handle
loading data even without labels
Parameters
----------
X: numpy array
Data to be loaded
y: numpy array, optional
Labels to be loaded (default: None)
image_dim: int, optional
dimension of the input image (32 for LeNet, 28 for MNISTNet)
select: bool
True if loading data without labels, False otherwise
use_test_transform: bool, optional
Use test transform without augmentations like crop, flip, etc. (default: False)
duplicateChannels: bool, optional
Duplicate channels for black and white images
"""
def __init__(self, X, Y=None, select=True, use_test_transform=False, image_dim=28, duplicateChannels=False):
"""
Constructor
"""
self.select = select
self.use_test_transform=use_test_transform
self.duplicateChannels = duplicateChannels
self.training_gen_transform = transforms.Compose([transforms.RandomCrop(image_dim, padding=4), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
self.test_gen_transform = transforms.Compose([transforms.Resize((image_dim, image_dim)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
if not self.select:
self.X = X
self.Y = Y
else:
self.X = X
self.return_index = return_index
def __getitem__(self, index):
if not self.select:
x, y = self.X[index], self.Y[index]
x = Image.fromarray(x)
if self.use_test_transform:
x = self.test_gen_transform(x)
else:
x = self.training_gen_transform(x)
if(x.shape[0]==1 and self.duplicateChannels): x = torch.repeat_interleave(x, 3, 0)
return x, y, index
else:
x = self.X[index]
x = Image.fromarray(x)
if self.use_test_transform:
x = self.test_gen_transform(x)
else:
x = self.training_gen_transform(x)
if(x.shape[0]==1 and self.duplicateChannels): x = torch.repeat_interleave(x, 3, 0)
return x, index
def __len__(self):
return len(self.X)
class DataHandler_KMNIST(Dataset):
"""
Data Handler to load KMNIST dataset.
This class extends :class:`torch.utils.data.Dataset` to handle
loading data even without labels
Parameters
----------
X: numpy array
Data to be loaded
y: numpy array, optional
Labels to be loaded (default: None)
select: bool
True if loading data without labels, False otherwise
use_test_transform: bool, optional
Use test transform without augmentations like crop, flip, etc. (default: False)
"""
def __init__(self, X, Y=None, select=True, use_test_transform=False,return_index=True):
"""
Constructor
"""
self.select = select
self.use_test_transform=use_test_transform
self.training_gen_transform = transforms.Compose([transforms.RandomCrop(28, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
self.test_gen_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) # Use mean/std of MNIST
if not self.select:
self.X = X
self.Y = Y
else:
self.X = X
self.return_index = return_index
def __getitem__(self, index):
if not self.select:
x, y = self.X[index], self.Y[index]
x = Image.fromarray(x)
if self.use_test_transform:
x = self.test_gen_transform(x)
else:
x = self.training_gen_transform(x)
if self.return_index:
return x, y, index
else:
return x, y
else:
x = self.X[index]
x = Image.fromarray(x)
if self.use_test_transform:
x = self.test_gen_transform(x)
else:
x = self.training_gen_transform(x)
if self.return_index:
return x, index
else:
return x
def __len__(self):
return len(self.X)
class DataHandler_FASHION_MNIST(Dataset):
"""
Data Handler to load FASHION_MNIST dataset.
This class extends :class:`torch.utils.data.Dataset` to handle
loading data even without labels
Parameters
----------
X: numpy array
Data to be loaded
y: numpy array, optional
Labels to be loaded (default: None)
select: bool
True if loading data without labels, False otherwise
use_test_transform: bool, optional
Use test transform without augmentations like crop, flip, etc. (default: False)
"""
def __init__(self, X, Y=None, select=True, use_test_transform=False,return_index=True):
"""
Constructor
"""
self.select = select
self.use_test_transform=use_test_transform
self.training_gen_transform = transforms.Compose([transforms.RandomCrop(28, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
self.test_gen_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) # Use mean/std of MNIST
if not self.select:
self.X = X
self.Y = Y
else:
self.X = X
self.return_index = return_index
def __getitem__(self, index):
if not self.select:
x, y = self.X[index], self.Y[index]
x = Image.fromarray(x)
if self.use_test_transform:
x = self.test_gen_transform(x)
else:
x = self.training_gen_transform(x)
if self.return_index:
return x, y, index
else:
return x, y
else:
x = self.X[index]
x = Image.fromarray(x)
if self.use_test_transform:
x = self.test_gen_transform(x)
else:
x = self.training_gen_transform(x)
if self.return_index:
return x, index
else:
return x
def __len__(self):
return len(self.X)
class DataHandler_CIFAR10(Dataset):
"""
Data Handler to load CIFAR10 dataset.
This class extends :class:`torch.utils.data.Dataset` to handle
loading data even without labels
Parameters
----------
X: numpy array
Data to be loaded
y: numpy array, optional
Labels to be loaded (default: None)
select: bool
True if loading data without labels, False otherwise
use_test_transform: bool, optional
Use test transform without augmentations like crop, flip, etc. (default: False)
"""
def __init__(self, X, Y=None, select=True, use_test_transform=False,return_index=True):
"""
Constructor
"""
self.select = select
self.use_test_transform=use_test_transform
self.training_gen_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
self.test_gen_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
if not self.select:
self.X = X
self.Y = Y
else:
self.X = X
self.return_index = return_index
def __getitem__(self, index):
if not self.select:
x, y = self.X[index], self.Y[index]
x = Image.fromarray(x)
if self.use_test_transform:
x = self.test_gen_transform(x)
else:
x = self.training_gen_transform(x)
if self.return_index:
return x, y, index
else:
return x, y
else:
x = self.X[index]
x = Image.fromarray(x)
if self.use_test_transform:
x = self.test_gen_transform(x)
else:
x = self.training_gen_transform(x)
if self.return_index:
return x, index
else:
return x
def __len__(self):
return len(self.X)
class DataHandler_CIFAR100(Dataset):
"""
Data Handler to load CIFAR100 dataset.
This class extends :class:`torch.utils.data.Dataset` to handle
loading data even without labels
Parameters
----------
X: numpy array
Data to be loaded
y: numpy array, optional
Labels to be loaded (default: None)
select: bool
True if loading data without labels, False otherwise
use_test_transform: bool, optional
Use test transform without augmentations like crop, flip, etc. (default: False)
"""
def __init__(self, X, Y=None, select=True, use_test_transform=False,return_index=True):
"""
Constructor
"""
self.select = select
self.use_test_transform=use_test_transform
self.training_gen_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))])
self.test_gen_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))])
if not self.select:
self.X = X
self.Y = Y
else:
self.X = X
self.return_index = return_index
def __getitem__(self, index):
if not self.select:
x, y = self.X[index], self.Y[index]
x = Image.fromarray(x)
if self.use_test_transform:
x = self.test_gen_transform(x)
else:
x = self.training_gen_transform(x)
if self.return_index:
return x, y, index
else:
return x, y
else:
x = self.X[index]
x = Image.fromarray(x)
if self.use_test_transform:
x = self.test_gen_transform(x)
else:
x = self.training_gen_transform(x)
if self.return_index:
return x, index
else:
return x
def __len__(self):
return len(self.X)
class DataHandler_STL10(Dataset):
"""
Data Handler to load STL10 dataset.
This class extends :class:`torch.utils.data.Dataset` to handle
loading data even without labels
Parameters
----------
X: numpy array
Data to be loaded
y: numpy array, optional
Labels to be loaded (default: None)
select: bool
| |
<reponame>zhangzan1997/MNN
_Int = int
_Float = float
from _mnncengine._expr import *
import _mnncengine._expr as _F
_numpy_supported = False
try:
import numpy as np
_numpy_supported = True
except Exception:
print ("Numpy not found. Using MNN without numpy.")
def _to_var(x, to_float=True):
if _numpy_supported:
if isinstance(x, np.ndarray): # convert numpy ndarray to MNN var
if to_float:
if x.dtype != np.float32:
x = x.astype(np.float32)
return _F.const(x, x.shape)
if not to_float:
if x.dtype != np.int32:
x = x.astype(np.int32)
return _F.const(x, x.shape, dtype=_F.int)
elif isinstance(x, (list, tuple)) and x: # convert list and tuple to MNN Var
x = np.array(x)
if to_float:
if x.dtype != np.float32:
x = x.astype(np.float32)
return _F.const(x, x.shape)
if not to_float:
if x.dtype != np.int32:
x = x.astype(np.int32)
return _F.const(x, x.shape, dtype=_F.int)
else: # No numpy support
if isinstance(x, _Int):
return _F.const(x, [], dtype=_F.int)
elif isinstance(x, _Float):
return _F.const(x, [], dtype=_F.float)
return x
def scalar(value):
if type(value) == type(1):
res = _F.const([value], [], _F.NCHW, _F.int)
return res
elif type(value) == type(1.):
res = _F.const([value], [], _F.NCHW, _F.float)
return res
else:
raise NotImplementedError("not supported data type for creating scalar variable")
def sign(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.sign(x)
def floor(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.floor(x)
def ceil(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.ceil(x)
def square(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.square(x)
def sqrt(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.sqrt(x)
def rsqrt(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.rsqrt(x)
def exp(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.exp(x)
def log(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.log(x)
def sin(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.sin(x)
def cos(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.cos(x)
def tan(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.tan(x)
def asin(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.asin(x)
def acos(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.acos(x)
def atan(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.atan(x)
def log1p(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.log1p(x)
def tanh(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.tanh(x)
def sigmoid(x):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
return _F.sigmoid(x)
def minimum(x, y):
x = _to_var(x)
y = _to_var(y)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
if not isinstance(y, Var):
raise RuntimeError("parameter y is not valid")
return _F.minimum(x, y)
def maximum(x, y):
x = _to_var(x)
y = _to_var(y)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
if not isinstance(y, Var):
raise RuntimeError("parameter y is not valid")
return _F.maximum(x, y)
def bias_add(value, bias):
"""
Adds bias to value.
This is (mostly) a special case of add where bias is restricted to 1-D.
Broadcasting is supported, so value may have any number of dimensions.
Unlike add, the type of bias is allowed to differ from value in the case where both types are quantized.
Example usage:
>>> MNN.expr.bias_add(np.eye(3,3), np.ones(3))
array([[2., 1., 1.],
[1., 2., 1.],
[1., 1., 2.]], dtype=float32)
Args:
value: A variable with type dtype.float or dtype.int.
bias: A 1-D variable with size matching the channel dimension of value.
Must be the same type as value unless value is a quantized type, in which case a different quantized type may be used.
Returns:
A variable with the same type as value.
"""
value = _to_var(value)
bias = _to_var(bias)
if not isinstance(value, Var):
raise RuntimeError("parameter value is not valid")
if not isinstance(bias, Var):
raise RuntimeError("parameter bias is not valid")
if len(bias.shape) != 1:
raise RuntimeError("parameter bias must be 1-D in bias_add")
if value.shape[-1] != bias.shape[-1]:
raise RuntimeError("parameter bias's dim must match parameter value's dim in bias_add")
return _F.bias_add(value, bias)
def unravel_index(indices, dims):
indices = _to_var(indices, to_float=False)
dims = _to_var(dims, to_float=False)
if not isinstance(indices, Var):
raise RuntimeError("parameter indices is not valid")
if not isinstance(dims, Var):
raise RuntimeError("parameter dims is not valid")
return _F.unravel_index(indices, dims)
def one_hot(indices, depth, on_value=1., off_value=0., axis=-1):
indices = _to_var(indices, to_float=False)
if not isinstance(indices, Var):
raise RuntimeError("parameter indices is not valid")
return _F.one_hot(indices, depth, on_value, off_value, axis)
def broadcast_to(input, shape):
shape = _to_var(shape, to_float=False)
if not isinstance(input, Var):
raise RuntimeError("parameter input is not valid")
if not isinstance(shape, Var):
raise RuntimeError("parameter shape is not valid")
return _F.broadcast_to(input, shape)
def zeros_like(input):
input = _to_var(input)
if not isinstance(input, Var):
raise RuntimeError("parameter input is not valid")
return _F.zeros_like(input)
def range(start, limit, delta):
start = _to_var(start)
limit = _to_var(limit)
delta = _to_var(delta)
if not isinstance(start, Var):
raise RuntimeError("parameter start is not valid")
if not isinstance(limit, Var):
raise RuntimeError("parameter limit is not valid")
if not isinstance(delta, Var):
raise RuntimeError("parameter delta is not valid")
if limit.dtype != start.dtype or delta.dtype != start.dtype:
raise RuntimeError("parameter start/limit/delta must use same data type, either all int or all float")
return _F.range(start, limit, delta)
def rank(input):
input = _to_var(input)
if not isinstance(input, Var):
raise RuntimeError("parameter input is not valid")
return _F.rank(input)
def space_to_batch_nd(input, block_shape, paddings):
input = _to_var(input)
block_shape = _to_var(block_shape, to_float=False)
paddings = _to_var(paddings, to_float=False)
if not isinstance(input, Var):
raise RuntimeError("parameter input is not valid")
if not isinstance(block_shape, Var):
raise RuntimeError("parameter block_shape is not valid")
if not isinstance(paddings, Var):
raise RuntimeError("parameter paddings is not valid")
if len(input.shape) != 4 or input.data_format != _F.NC4HW4:
raise RuntimeError("parameter input must be 4-D w/ NC4HW4 format")
if block_shape.dtype != _F.int or paddings.dtype != _F.int:
raise RuntimeError("parameter block_shape/paddings must be int type")
if len(block_shape.shape) != 1:
raise RuntimeError("parameter block_shape must be 1-D w/ shape [M]")
if len(paddings.shape) != 2 or paddings.shape[-1] != 2:
raise RuntimeError("parameter paddings must be 2-D w/ shape [M, 2]")
return _F.space_to_batch_nd(input, block_shape, paddings)
def batch_to_space_nd(input, block_shape, crops):
input = _to_var(input)
block_shape = _to_var(block_shape, to_float=False)
crops = _to_var(crops, to_float=False)
if not isinstance(input, Var):
raise RuntimeError("parameter input is not valid")
if not isinstance(block_shape, Var):
raise RuntimeError("parameter block_shape is not valid")
if not isinstance(crops, Var):
raise RuntimeError("parameter crops is not valid")
if len(input.shape) != 4 or input.data_format != _F.NC4HW4:
raise RuntimeError("parameter input must be 4-D w/ NC4HW4 format")
if block_shape.dtype != _F.int or crops.dtype != _F.int:
raise RuntimeError("parameter block_shape/crops must be int type")
if len(block_shape.shape) != 1:
raise RuntimeError("parameter block_shape must be 1-D w/ shape [M]")
if len(crops.shape) != 2 or crops.shape[-1] != 2 or crops.shape[0] != block_shape.shape[0]:
raise RuntimeError("parameter crops must be 2-D w/ shape [M, 2]")
return _F.batch_to_space_nd(input, block_shape, crops)
def setdiff1d(x, y):
x = _to_var(x)
y = _to_var(y)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
if not isinstance(y, Var):
raise RuntimeError("parameter y is not valid")
if len(x.shape) != 1 or len(y.shape) != 1:
raise RuntimeError("parameter x/y must be 1-D")
return _F.setdiff1d(x, y)
def moments(x, axes=[2, 3], shift=None, keep_dims=True):
x = _to_var(x)
if not isinstance(x, Var):
raise RuntimeError("parameter x is not valid")
if len(x.shape) != 4 or x.data_format != _F.NC4HW4:
raise RuntimeError("parameter x must be 4-D w/ NC4HW4 format")
if axes != [2, 3] and axes != (2, 3):
raise RuntimeError("parameter axes must be [2, 3] in current implementation")
shift = _F.const([0.], [1]) #though it's not used, it's preserved
return _F.moments(x, axes, shift, True)
def matrix_band_part(input, num_lower, num_upper):
input = _to_var(input)
num_lower = _to_var(num_lower)
num_upper = _to_var(num_upper)
if not isinstance(input, Var):
raise RuntimeError("parameter input is not valid")
if not isinstance(num_lower, Var):
raise RuntimeError("parameter num_lower is not valid")
if not isinstance(num_upper, Var):
raise RuntimeError("parameter num_upper is not valid")
if len(num_lower.shape) != 0 or num_lower.dtype != _F.int:
raise RuntimeError("parameter num_lower must be 0-D int")
if len(num_upper.shape) != 0 or num_upper.dtype != _F.int:
raise RuntimeError("parameter num_upper must be 0-D int")
return | |
pixels
bad_mask = is_dark_mask == 0
if im.mask is not None:
bad_mask = bad_mask | im.mask
# Flip bad mask around so we get only the dark pixels in the
# linear range
tim = im.data[bad_mask == 0]
std = np.std(tim)
#std = np.asscalar(std.data )
med = np.median(tim)
#med = np.asscalar(med.data )
mean = np.mean(tim)
#mean = np.asscalar(mean.data )
tmin = np.min(tim)
tmax = np.max(tim)
rdnoise = np.sqrt(np.median((tim[1:] - tim[0:-1])**2))
print('combined dark statistics for ' + outbase)
print('std, rdnoise, mean, med, min, max, n_dark_pix')
print(std, rdnoise, mean, med, tmin, tmax, n_dark_pix)
im.meta['STD'] = (std, 'Standard deviation of image (electron)')
im.meta['MEDIAN'] = (med, 'Median of image (electron)')
im.meta['MEAN'] = (mean, 'Mean of image (electron)')
im.meta['MIN'] = (tmin, 'Min of image (electron)')
im.meta['MAX'] = (tmax, 'Max of image (electron)')
im.meta['NCOMBINE'] = (len(out_fnames), 'Number of darks combined')
add_history(im.meta,
'Combining NCOMBINE biases indicated in FILENN')
im.meta['HIERARCH MASK_THRESHOLD'] \
= (mask_threshold, '*RDNOISE (electron)')
add_history(im.meta,
'Setting pixes below MASK_THRESHOLD to zero; prevents subtraction noise')
# Record each filename
for i, f in enumerate(fnames):
im.meta['FILE{0:02}'.format(i)] = f
# Prepare to write
if not os.path.exists(outdir):
os.mkdir(outdir)
outbase = os.path.join(outdir, outbase)
out_fname = outbase + '_dark_combined.fits'
# Leave these large for fast calculations downstream and make
# final results that primarily sit on disk in bulk small
#im.data = im.data.astype('float32')
#im.uncertainty.array = im.uncertainty.array.astype('float32')
im.write(out_fname, overwrite=True)
if show:
impl = plt.imshow(im, origin='lower', cmap=plt.cm.gray,
filternorm=0, interpolation='none',
vmin=med-std, vmax=med+std)
plt.show()
plt.close()
discard_intermediate(out_fnames, sdir,
calibration_scratch, keep_intermediate)
def dark_combine(directory=None,
collection=None,
subdirs=CALIBRATION_SUBDIRS,
glob_include=DARK_GLOB,
dccdt_tolerance=DCCDT_TOLERANCE,
num_processes=MAX_NUM_PROCESSES,
mem_frac=MAX_MEM_FRAC,
num_calibration_files=NUM_CALIBRATION_FILES,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
bitpix=MAX_CCDDATA_BITPIX,
process_expand_factor=COR_PROCESS_EXPAND_FACTOR,
**kwargs):
fdict_list = \
fdict_list_collector(bias_dark_fdict_creator,
directory=directory,
collection=collection,
subdirs=subdirs,
imagetyp='DARK',
glob_include=glob_include,
dccdt_tolerance=dccdt_tolerance)
if collection is not None:
# Make sure 'directory' is a valid variable
directory = collection.location
nfdicts = len(fdict_list)
if len(fdict_list) == 0:
log.debug('No usable darks found in: ' + directory)
return False
one_fdict_size = (num_calibration_files
* naxis1 * naxis2
* bitpix/8
* process_expand_factor)
our_num_processes = num_can_process(nfdicts,
num_processes=num_processes,
mem_frac=mem_frac,
process_size=one_fdict_size)
num_subprocesses = int(num_processes / our_num_processes)
subprocess_mem_frac = mem_frac / our_num_processes
log.debug('dark_combine: {} num_processes = {}, mem_frac = {}, our_num_processes = {}, num_subprocesses = {}, subprocess_mem_frac = {}'.format(directory, num_processes, mem_frac, our_num_processes, num_subprocesses, subprocess_mem_frac))
wwk = WorkerWithKwargs(dark_combine_one_fdict,
num_processes=num_subprocesses,
mem_frac=subprocess_mem_frac,
**kwargs)
if nfdicts == 1:
for fdict in fdict_list:
wwk.worker(fdict)
else:
with NestablePool(processes=our_num_processes) as p:
p.map(wwk.worker, fdict_list)
def flat_fdict_creator(collection,
imagetyp=None):
# Create a new collection narrowed to our imagetyp
directory = collection.location
collection = collection.filter(imagetyp=imagetyp)
# --> Oops, this recycles wrong-sized flats which are better
# rejected later
#try:
# collection = collection.filter(naxis1=sx694.naxis1,
# naxis2=sx694.naxis2)
#except Exception as e:
# log.error(f'Problem collecting full-frame files of imagetyp {imagetyp} in {directory}: {e}')
# return []
if 'filter' not in collection.keywords:
log.error(f'filter not found in any {imagetyp} files in {directory}')
return []
# Keep in mind filters will have our old names
ofilters = collection.values('filter', unique=True)
fdict_list = []
for ofilt in ofilters:
# The regexp_match=True is necessary for the H2O+ for some
# reason. re.escape is used for the [] stuff in some of the
# older filters, though I am not positive if it is necessary.
fcollection = collection.filter(filter=re.escape(ofilt),
regex_match=True)
fnames = fcollection.files_filtered(include_path=True)
date_obss = fcollection.values('date-obs')
# This is where we associated the old file names with the new
# filter designations
filt = get_filt_name(ofilt, date_obss[0])
fdict_list.append({'directory': directory,
'filter': filt,
'ofilter': ofilt,
'fnames': fnames})
return fdict_list
def flat_process(ccd, bmp_meta=None,
init_threshold=100, # units of readnoise
nd_edge_expand=ND_EDGE_EXPAND,
in_name=None,
**kwargs):
if ccd.meta.get('flatdiv') is not None:
raise ValueError('Trying to reprocess a processed flat')
# Use basic patch medians to spot pathological cases
mdp, mlp = im_med_min_max(ccd)
if mlp < 1000:
log.warning(f'flat median of {mlp} {ccd.unit} too low {in_name}')
return None
if mlp > ccd.meta['NONLIN']:
log.warning(f'flat median of {mlp} {ccd.unit} too high {in_name}')
return None
# Use photutils.Background2D to smooth each flat and get a
# good maximum value. Mask edges and ND filter so as to
# increase quality of background map
mask = np.zeros(ccd.shape, bool)
# Return a copy of ccd with the edge_mask property adjusted. Do
# it this way to keep ccd's ND filt parameters intact
emccd = RedCorData(ccd, edge_mask=-nd_edge_expand)
try:
mask[emccd.ND_coords] = True
except Exception as e:
# We should have caught all nasty cases above
log.error(f'ND_coords gave error: {e} for {in_name}')
return None
del emccd
rdnoise = ccd.meta['RDNOISE']
mask[ccd.data < rdnoise * init_threshold] = True
ccd.mask = mask
bkg_estimator = MedianBackground()
b = Background2D(ccd, 20, mask=mask, filter_size=5,
bkg_estimator=bkg_estimator)
max_flat = np.max(b.background)
if max_flat > ccd.meta['NONLIN']*ccd.unit:
log.debug(f'flat max value of {max_flat.value} {max_flat.unit} too bright: {in_name}')
return None
ccd.mask = None
ccd = ccd.divide(max_flat, handle_meta='first_found')
# --> This will get better if Card units are implemented
ccd.meta['FLATDIV'] = (max_flat.value, f'Normalization value (smoothed max) ({max_flat.unit})')
# Get ready to capture the mean DATE-OBS
tm = Time(ccd.meta['DATE-OBS'], format='fits')
if bmp_meta is not None:
bmp_meta['jd'] = tm.jd
return ccd
def flat_combine_one_fdict(fdict,
outdir=CALIBRATION_ROOT,
calibration_scratch=CALIBRATION_SCRATCH,
keep_intermediate=False,
min_num_flats=MIN_NUM_FLATS,
num_processes=MAX_NUM_PROCESSES,
mem_frac=MAX_MEM_FRAC,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
bitpix=MAX_CCDDATA_BITPIX,
show=False,
nd_edge_expand=ND_EDGE_EXPAND,
flat_cut=FLAT_CUT,
**kwargs):
fnames = fdict['fnames']
num_files = len(fnames)
this_filter = fdict['filter']
directory = fdict['directory']
tmp = RedCorData.read(fnames[0])
tm = tmp.meta['DATE-OBS']
this_dateb1, _ = tm.split('T')
outbase = os.path.join(outdir, this_dateb1)
bad_fname = outbase + '_' + this_filter + '_flat_bad.fits'
if len(fnames) < min_num_flats:
log.warning(f"Not enough good flats found for filter {this_filter} in {directory}")
Path(bad_fname).touch()
return False
# Make a scratch directory that is the date of the first file.
# Not as fancy as the biases, but, hey, it is a scratch directory
tmp = RedCorData.read(fnames[0])
tm = tmp.meta['DATE-OBS']
sdir = os.path.join(calibration_scratch, this_dateb1)
cmp = CorMultiPipe(num_processes=num_processes,
mem_frac=mem_frac,
naxis1=naxis1,
naxis2=naxis2,
bitpix=bitpix,
outdir=sdir,
create_outdir=True,
overwrite=True,
post_process_list=[flat_process, jd_meta],
**kwargs)
pout = cmp.pipeline(fnames, **kwargs)
pout, fnames = prune_pout(pout, fnames)
if len(pout) == 0:
log.warning(f"Not enough good flats found for filter {this_filter} in {directory}")
Path(bad_fname).touch()
return False
out_fnames, pipe_meta = zip(*pout)
if len(out_fnames) < min_num_flats:
log.warning(f"Not enough good flats found for filter {this_filter} in {directory}")
discard_intermediate(out_fnames, sdir,
calibration_scratch, keep_intermediate)
Path(bad_fname).touch()
return False
jds = [m['jd'] for m in pipe_meta]
# Combine our flats
mem = psutil.virtual_memory()
#print(f'flat_combine_one_filt: mem_frac {mem_frac}; num_processes {num_processes}')
#print(f'flat_combine_one_filt: mem_limit {mem.available*mem_frac/2**20}')
im = \
ccdp.combine(list(out_fnames),
method='average',
sigma_clip=True,
sigma_clip_low_thresh=5,
sigma_clip_high_thresh=5,
sigma_clip_func=np.ma.median,
sigma_clip_dev_func=mad_std,
mem_limit=mem.available*mem_frac)
im.meta['NCOMBINE'] = (len(out_fnames), 'Number of flats combined')
# Record each filename
for i, f in enumerate(fnames):
im.meta['FILE{0:02}'.format(i)] = f
add_history(im.meta,
'Combining NCOMBINE biases indicated in FILENN')
# Interpolate over our ND filter
#print(f'flat_combine_one_filt pre CorObsData: mem available: {mem.available/2**20}')
emccd = RedCorData(im, edge_mask=-nd_edge_expand)
good_mask = np.ones(im.shape, bool)
good_mask[emccd.ND_coords] = False
points = np.nonzero(good_mask)
values = im[points]
xi = emccd.ND_coords
log.debug(f'flat_combine_one_filt post CorObsData: mem available: {mem.available/2**20}')
# Linear behaved much better
nd_replacement = interpolate.griddata(points,
values,
xi,
method='linear')
#method='cubic')
log.debug(f'flat_combine_one_filt post interpolate.griddata mem available: {mem.available/2**20}')
im.data[xi] = nd_replacement
# Do one last smoothing and renormalization
bkg_estimator = MedianBackground()
b = Background2D(im, 20, mask=(im.data<flat_cut), filter_size=5,
bkg_estimator=bkg_estimator)
max_flat = np.max(b.background)
log.debug(f'flat_combine_one_filt post Background2D mem available: {mem.available/2**20}')
im = im.divide(max_flat, handle_meta='first_found')
im.mask = im.data < flat_cut
im.meta['FLAT_CUT'] = (flat_cut, 'Value below which flat is masked')
# Prepare to write
tm = Time(np.mean(jds), format='jd')
this_date = tm.fits
this_dateb = this_date.split('T')[0]
if this_dateb != this_dateb1:
log.warning(f"first flat is on {this_dateb1} but average is {this_dateb}")
outbase = '{}_{}'.format(this_dateb, this_filter)
if not os.path.exists(outdir):
os.mkdir(outdir)
outbase = os.path.join(outdir, outbase)
out_fname = outbase + '_flat.fits'
im.write(out_fname, overwrite=True)
if show:
impl = plt.imshow(im, origin='upper', cmap=plt.cm.gray)
plt.show()
plt.close()
discard_intermediate(out_fnames, sdir,
calibration_scratch, keep_intermediate)
def flat_combine(directory=None,
collection=None,
subdirs=CALIBRATION_SUBDIRS,
glob_include=FLAT_GLOB,
num_processes=MAX_NUM_PROCESSES,
mem_frac=MAX_MEM_FRAC,
num_calibration_files=NUM_CALIBRATION_FILES,
naxis1=sx694.naxis1,
naxis2=sx694.naxis2,
bitpix=64, # uncertainty and mask not used in griddata
griddata_expand_factor=GRIDDATA_EXPAND_FACTOR,
**kwargs):
print(f'flat_combine directory: {directory}')
fdict_list = \
fdict_list_collector(flat_fdict_creator,
directory=directory,
collection=collection,
subdirs=subdirs,
imagetyp='FLAT',
glob_include=glob_include)
if collection is not None:
# Make sure 'directory' is a valid variable
directory = collection.location
nfdicts = len(fdict_list)
if nfdicts == 0:
log.debug('No usable flats found in: ' + directory)
return False
one_filt_size = (num_calibration_files
* naxis1 * naxis2
* bitpix/8
* griddata_expand_factor)
our_num_processes = num_can_process(nfdicts,
num_processes=num_processes,
mem_frac=mem_frac,
process_size=one_filt_size,
error_if_zero=False)
our_num_processes = max(1, our_num_processes)
# Combining files is the slow part, so we want the maximum of
# processes doing that in parallel
log.debug(f'flat_combine: {directory}, nfdicts = {nfdicts}, our_num_processes = {our_num_processes}')
# Number of sub-processes in each process we will | |
from krrt.utils import read_file
from krrt.sat import CNF
class Node:
def __init__(self, childs = []):
assert None not in childs, str(childs)
self.children = childs
self._count = -1
self._nnf_index = -1
self._replacement = None
@property
def usedvars(self):
if self._replacement is None:
self._replacement = set()
for ch in self.children:
if bool != type(ch):
self._replacement = self._replacement | ch.usedvars
return self._replacement
@property
def nnf_index(self):
assert -1 != self._nnf_index
return self._nnf_index
def reset(self):
if (-1 != self._count) or (self._replacement is not None) or (self._nnf_index != -1):
for ch in self.children:
if ch not in [True, False]:
ch.reset()
self._count = -1
self._nnf_index = -1
self._replacement = None
def crawl(self, seen):
if self not in seen:
seen.add(self)
for ch in self.children:
if ch not in [True,False]:
ch.crawl(seen)
def condition(self, lits):
if self._replacement is None:
def recursively_apply(ch):
if ch in [True, False]:
return ch
else:
return ch.condition(lits)
vals = [recursively_apply(ch) for ch in self.children]
self._replacement = self._compress(vals)
return self._replacement
def forget(self, vars):
if self._replacement is None:
def recursively_apply(ch):
if ch in [True, False]:
return ch
else:
return ch.forget(vars)
vals = [recursively_apply(ch) for ch in self.children]
self._replacement = self._compress(vals)
return self._replacement
def simplify(self):
if self._replacement is None:
def recursively_apply(ch):
if ch in [True, False]:
return ch
else:
return ch.simplify()
vals = [recursively_apply(ch) for ch in self.children]
self._replacement = self._compress(vals)
return self._replacement
def dual_children(self, vals):
lits = set()
for v in filter(lambda x: isinstance(x, Lit), vals):
if (-1 * v.num) in lits:
return True
lits.add(v.num)
return False
def assign_nnf_indices(self, nlist):
if self._nnf_index != -1:
return
for ch in self.children:
ch.assign_nnf_indices(nlist)
self._nnf_index = len(nlist)
nlist.append(self)
def ensure_vars(self, vars):
if 0 == len(vars):
return self
elif isinstance(self, And):
parent = self
elif isinstance(self, Lit) or isinstance(self, Or):
parent = And([self])
parent = parent.smooth()
else:
assert False, "Not sure how to ensure variables for node type %s" % str(type(self))
new_children = []
for v in vars:
new_children.append(Or([Lit(v), Lit(v.negate())]))
new_parent = And(parent.children + new_children)
new_parent = new_parent.smooth()
return new_parent
def gen_nnf(self):
assert False, "Must override gen_nnf!"
class And(Node):
def count(self, vars):
if -1 == self._count:
self._count = {v:1 for v in vars}
self._count[-1] = 1
counts = {ch: ch.count(vars) for ch in self.children}
for v in self._count:
for ch in counts:
self._count[v] *= counts[ch][v]
return self._count
def _compress(self, vals):
new_vals = list(filter(lambda x: x != True, vals))
if False in new_vals:
return False
elif 0 == len(new_vals):
return True
elif 1 == len(new_vals):
return new_vals[0]
elif self.dual_children(new_vals):
return False
else:
final_vals = []
for ch in new_vals:
if isinstance(ch, And):
final_vals.extend(ch.children)
else:
final_vals.append(ch)
return And(final_vals)
def smooth(self):
if self._replacement is None:
new_vals = []
new_vars = set()
for ch in self.children:
new_vals.append(ch.smooth())
new_vars = new_vars | new_vals[-1]._vars
self._replacement = And(new_vals)
self._replacement._vars = new_vars
return self._replacement
def is_smooth(self):
if self._replacement is None:
chs = list(filter(lambda x: x not in [True,False], self.children))
chvals = [ch.is_smooth() for ch in chs]
if False in chvals:
return False
subvars = chs[0]._replacement
for ch in chs:
subvars = subvars | ch._replacement
self._replacement = subvars
return True
def gen_nnf(self):
return "A %d %s" % (len(self.children),
' '.join(map(str, [ch.nnf_index for ch in self.children])))
class Or(Node):
def __init__(self, childs=[], switch_var=0):
super().__init__(childs=childs)
self.switch_var = switch_var
def count(self, vars):
if -1 == self._count:
self._count = {v:0 for v in vars}
self._count[-1] = 0
counts = {ch: ch.count(vars) for ch in self.children}
for v in self._count:
for ch in counts:
self._count[v] += counts[ch][v]
return self._count
def _compress(self, vals):
new_vals = list(filter(lambda x: x != False, vals))
if True in new_vals:
return True
elif 0 == len(new_vals):
return False
elif 1 == len(new_vals):
return new_vals[0]
elif self.dual_children(new_vals):
return True
else:
final_vals = []
for ch in new_vals:
if isinstance(ch, Or):
final_vals.extend(ch.children)
else:
final_vals.append(ch)
return Or(final_vals)
def smooth(self):
if self._replacement is None:
new_vals = []
new_vars = set()
for ch in self.children:
new_vals.append(ch.smooth())
new_vars = new_vars | new_vals[-1]._vars
final_vals = [ch.ensure_vars(new_vars - ch._vars) for ch in new_vals]
self._replacement = Or(final_vals)
self._replacement._vars = new_vars
return self._replacement
def is_smooth(self):
if self._replacement is None:
chs = list(filter(lambda x: x not in [True,False], self.children))
chvals = [ch.is_smooth() for ch in chs]
if False in chvals:
return False
subvars = chs[0]._replacement
for ch in chs:
if (len(ch._replacement) != len(subvars)) or \
(len(ch._replacement | subvars) != len(subvars)):
return False
self._replacement = subvars
return True
def gen_nnf(self):
return "O %d %d %s" % (self.switch_var,
len(self.children),
' '.join(map(str, [ch.nnf_index for ch in self.children])))
class Lit(Node):
def __init__(self, lit):
super().__init__(childs=[])
self.lit = lit
self.reset()
@property
def usedvars(self):
return set([self.lit.var])
@property
def num(self):
return int(str(self.lit).replace('~','-'))
def reset(self):
self._count = -1
self._nnf_index = -1
self._replacement = None
def crawl(self, seen):
seen.add(self)
def count(self, vars):
toret = {v: 1 for v in vars}
toret[-1] = 1
if self.lit.negate() in vars:
toret[self.lit.negate()] = 0
return toret
def condition(self, lits):
if self._replacement is None:
if self.lit in lits:
self._replacement = True
elif self.lit.negate() in lits:
self._replacement = False
else:
self._replacement = Lit(self.lit)
return self._replacement
def forget(self, vars):
if self._replacement is None:
if self.lit.var in vars:
self._replacement = True
else:
self._replacement = Lit(self.lit)
return self._replacement
def simplify(self):
if self._replacement is None:
self._replacement = Lit(self.lit)
return self._replacement
def smooth(self):
if self._replacement is None:
self._replacement = Lit(self.lit)
self._replacement._vars = set([self.lit.var])
return self._replacement
def is_smooth(self):
self._replacement = set([self.lit.var])
return True
def gen_nnf(self):
return "L %d" % self.num
class dDNNF:
def __init__(self, root, allvars):
self.root = root
self.allvars = allvars
self.usedvars = set()
if bool != type(self.root):
self.root.reset()
self.usedvars = self.root.usedvars
self.root.reset()
def size(self):
seen = set()
if bool != type(self.root):
self.root.crawl(seen)
return len(seen)
def count(self, vars = set()):
if bool == type(self.root):
count = int(self.root) * (2**len(self.allvars))
if vars:
return {v: (count/2) for v in vars}
else:
return count
self.root.reset()
counts = self.root.count(vars)
if vars:
assert vars <= self.allvars
else:
return counts[-1]
toret = {v: 2**len(self.allvars - self.root.usedvars) * counts[v] for v in vars}
for var in (vars - self.usedvars):
toret[var] = counts[-1] / 2
return toret
def condition(self, lits):
if bool == type(self.root):
return dDNNF(self.root, self.allvars - set([l.var for l in lits]))
self.root.reset()
return dDNNF(self.root.condition(lits), self.allvars - set([l.var for l in lits]))
def forget(self, vars):
if bool == type(self.root):
return dDNNF(self.root, self.allvars - set(vars))
self.root.reset()
return dDNNF(self.root.forget(vars), self.allvars - set(vars))
def simplify(self):
if bool == type(self.root):
return dDNNF(self.root, self.allvars)
self.root.reset()
return dDNNF(self.root.simplify(), self.allvars)
def smooth(self):
if bool == type(self.root):
return dDNNF(self.root, self.allvars)
# Simplify before smoothing
self.root.reset()
simp_root = self.root.simplify()
# Smooth
simp_root.reset()
smooth_root = simp_root.smooth()
smooth_root = smooth_root.ensure_vars(self.allvars - smooth_root._vars)
smooth_root.reset()
assert len(self.allvars) == len(smooth_root._vars)
return dDNNF(smooth_root, self.allvars)
def is_smooth(self):
if bool == type(self.root):
return True
self.root.reset()
return self.root.is_smooth()
def gen_nnf(self):
n_list = []
self.root.reset()
self.root.assign_nnf_indices(n_list)
assert self.root == n_list[-1]
for i in range(len(n_list)):
assert n_list[i].nnf_index == i
nNodes = len(n_list)
nVars = len(self.usedvars)
nEdges = 0
for n in n_list:
nEdges += len(n.children)
toRet = "nnf %d %d %d\n" % (nNodes, nEdges, nVars)
for n in n_list:
toRet += "%s\n" % n.gen_nnf()
return toRet
def parseNNF(fname):
lines = read_file(fname)
(nNodes, nEdges, nVars) = map(int, lines.pop(0).split()[1:])
assert nNodes == len(lines)
# TODO: Double check that this badlist isn't included in the final d-DNNF
badlist = set()
allvars = set()
nodes = []
for line in lines:
parts = line.split()
if 'A 0' == line:
nodes.append(True)
elif 'O 0 0' == line:
nodes.append(False)
elif 'L' == parts[0]:
num = int(parts[1])
lit = CNF.Variable(abs(num))
allvars.add(lit)
if num < 0:
lit = CNF.Not(lit)
nodes.append(Lit(lit))
elif 'A' == parts[0]:
nCh = int(parts[1])
children = list(map(int, parts[2:]))
assert nCh == len(children), "Bad line? %s" % line
assert max(children) < len(nodes)
nodes.append(And([nodes[i] for i in children]))
elif 'O' == parts[0]:
switch = int(parts[1])
nCh = int(parts[2])
children = list(map(int, parts[3:]))
assert nCh == len(children), "Bad line? %s" % line
assert max(children) < len(nodes)
nodes.append(Or([nodes[i] for i in children], switch))
if 0 == switch:
badlist.add(len(nodes) - 1)
else:
assert False, "Unrecognized line: %s" % line
return dDNNF(nodes[-1], | |
#!/usr/bin/python3
from flask import Flask, render_template
from bluepy import btle
from bluepy.btle import Scanner, DefaultDelegate
import sys
import time
import math
import os
import signal
###############################################################################
#
# Reverie Powerbase Control API - <NAME>
#
# Version 20211211-001
#
# This is a fork based on the Flask Purple Powerbase project by <NAME>
# https://github.com/jbyerline/flask-purple-powerbase
#
# I used Jacob's project as a skeleton, and expanded it to handle positioning
# more cleanly, and handle some exceptions more gracefully. I also renamed
# the variables and functions, so it will not drop in to the homebridge
# plugin he also wrote without modification.
#
# I have the Reverie R650 bed frame. That is what I am testing against.
# The original project is tested against the "Purple Powerbase". It seems
# to be the same controller, so should work with most Reverie bases.
#
# One major caveat, and a TODO is to handle lumbar controls as well as tilt.
# With tilt, the flat position is in the middle (36%), whereas with lumbar,
# it is zero. Anyone who has a 650 and has tried to use the official App
# will know what I am talking about, as it does not properly recognize or
# handle tilt.
#
# It's still not perfect. When homekit spams it with requests, it usually
# works, but sometimes will miss a setting, or misfire one (for example,
# sometimes when I adjust the foot massage, the head massage will randomly
# start). Using light dimmers is not the best way. If I can find a better
# integration method on the homebridge side, I will update accordingly.
#
# This is also my very first python program. Much thanks to <NAME>
# for his assistance in helping me understand the "python way" and for some
# of the math magic.
#
###############################################################################
###############################################################################
#
# # # #####
# # # #### ###### ##### # # #### # # ###### # ####
# # # # # # # # # # ## # # # # #
# # # #### ##### # # # # # # # # ##### # #
# # # # # ##### # # # # # # # # # ###
# # # # # # # # # # # # # ## # # # #
# ##### #### ###### # # ##### #### # # # # ####
#
###############################################################################
###############################################################################
# These values should be defined in /etc/default/reverie-powerbase. Systemd
# will make them available to the script. I have included some reasonable
# defaults in case the script is run manually, or nor defined externally.
###############################################################################
# The script will attempt to locate and connect to a Reverie Powerbase
# automatically. If you have more than one, you will need to find the MAC
# address of the bed you want to use and set DEVICE_MAC to the correct bed. If
# you are not sure which is which, pick one and test it. Repeat until you find
# the right one.
#
# To find your bed, you can use hcitool from the bluez package:
#
# hcitool lescan | grep RevCB_A1
#
# If you don't find anything, adjust your grep to maybe "Rev" instead (and, of
# course, make sure the bed has power).
DEVICE_MAC = os.environ.get("DEVICE_MAC", "Auto")
print("Using device MAC address " + DEVICE_MAC)
# If you are going to run this on the same device as homebridge, use 127.0.0.1
# If you running this on its own device, uncomment 0.0.0.0 to have it listen
# on the public interfaces
RPI_LOCAL_IP = os.environ.get("RPI_LOCAL_IP", "127.0.0.1")
if RPI_LOCAL_IP == "0.0.0.0":
print("Listening interface set to all interfaces.")
else:
print("Listening interface set to " + RPI_LOCAL_IP)
# This is the TCP port that the service will listen on. You can use any
# unused port; it just needs to be set to the same one here and in the homebridge
# plugin.
RPI_LISTEN_PORT = os.environ.get("RPI_LISTEN_PORT", "8001")
print("Listening on port " + RPI_LISTEN_PORT)
# The factory set the fastest massage speed to 40% of what the motor
# will actually do. I am using that limit because I don't know if it's
# an issue that can damage the bed, or just a comfort issue.
# In fact, the bed doesn't use percentages; it has 10 discrete settings, but
# it seems you can set it to the full range. For reference, here are the values
# that the remote control uses:
#
# 1 - 0x04 (4)
# 2 - 0x08 (8)
# 3 - 0x0c (12)
# 4 - 0x10 (16)
# 5 - 0x14 (20)
# 6 - 0x18 (24)
# 7 - 0x1c (28)
# 8 - 0x20 (32)
# 9 - 0x24 (36)
# 10 - 0x28 (40)
#
# Set here what you want the maximum speed (decimal) of the motor to be.
# You can increase this at your own risk. Should be set to any positive
# integer value.
MAX_MASSAGE_SPEED = os.environ.get("MAX_MASSAGE_SPEED", 40)
MAX_MASSAGE_SPEED = int(MAX_MASSAGE_SPEED)
print("Maximum massage speed set to " + str(MAX_MASSAGE_SPEED))
# My bed has 4 massage wave speeds. Perhaps some other bases have more.
# Adjust to match your bed.
MAX_WAVES = os.environ.get("MAX_WAVES", 4)
MAX_WAVES = int(MAX_WAVES)
print("Number of wave settings is " + str(MAX_WAVES))
# If your bed has the tilt function, rather than lumbar support, set this to True.
# Set to False if you have lumbar adjustment.
USE_TILT = os.environ.get("USE_TILT", True)
print("Using tilt function rather than lumbar: " + str(USE_TILT))
# This is the (decimal) tilt position that reprents when the bed is flat. On the
# Reverie R650, flat is 36 (0x24).
TILT_FLAT = os.environ.get("TILT_FLAT", 36)
TILT_FLAT = int(TILT_FLAT)
print("Bed's flat position tilt is " + str(TILT_FLAT))
###############################################################################
# End User Config
###############################################################################
###############################################################################
# Function/Service Declarations
###############################################################################
def findBed():
class ScanDelegate(DefaultDelegate):
def __init__(self):
DefaultDelegate.__init__(self)
print("Scanning for Reverie Powerbases...")
scanner = Scanner().withDelegate(ScanDelegate())
devices = scanner.scan(10.0)
for device in devices:
#print ("Device %s (%s), RSSI=%d dB" % (device.addr, device.addrType, device.rssi))
for (adtype, desc, value) in device.getScanData():
#print (" %s = %s" % (desc, value))
if desc == "Complete Local Name" and value == "RevCB_A1":
print("Detected Reverie Powerbase: %s" % (device.addr))
return device.addr
return "None"
# Take the individual position values, and construct the HEX string needed to
# send the command as one string.
def MakePosition(position):
# Set the head, feet, and tilt into the correct part of the HEX string
# to be sent to the bed.
return "00"+position[0]+position[1]+position[2]+"00000000000000"
def getBedValue(getBedValue):
return str(int.from_bytes(getBedValue.read(), byteorder=sys.byteorder))
def setBedPosition(setBedPosition,position):
setBedPosition.write(bytes.fromhex(MakePosition(position)))
return
def setBedValue(setBedValue,percentage):
setBedValue.write(bytes.fromhex(percent2hex(percentage)))
return
# Convert a percentage (0-100 decimal) to Hex (0x00-0x64 hex)
def percent2hex(percentage):
percentage=int(percentage)
if percentage > 100:
percentage = 100
if percentage < 0:
percentage = 0
# We need a zero padded 2 byte hex value
hexformat="{value:02x}"
return hexformat.format(value=percentage)
# This waits for the bed to read the desired postion (ish) by polling
# the position repeatedly until it gets close to the desired position. Since
# the bed sometimes misses by 1 or 2, I couldn't test for the exact
# set value. I just wait until it's within 2, and call it good.
# If it hasn't reached the value after TOOLONG probes, assume it's never going to
# and stop polling.
def moveWait(service,desired):
TOOLONG=512
deadmancheck=1
# The timeout for the light object in homekit is VERY short, and if
# you wait for the bed to reach its position, it causes homekit
# to be unhappy, so we need to "fire and forget" and just tell it
# that it made it to keep homekit happy. When I find a better method
# of interacting with homekit, I'll adjust this accordingly.
return int(desired)
def readService(service):
return int.from_bytes(service.read(), byteorder=sys.byteorder)
check=readService(service)
while not math.isclose(check,int(desired),abs_tol=2):
if deadmancheck > TOOLONG:
break
check=readService(service)
deadmancheck += 1
###############################################################################
# Web API (flask) event loop definition
#
# All the @app.route() functions are URL calls to get or set values with the
# bed. Flask creates and event loop that will wait for a call to the defined
# path, and return the appropriate value (the current setting, or what it did).
###############################################################################
app = Flask(__name__)
# # For the main API page, present a brief help message on usage.
# # The html is stored in a "templates" directory in the same location as
# # this script runs from.
@app.route('/')
@app.route('/index')
@app.route('/help')
def index():
pagetitle = 'Reverie Controller'
return render_template('help.html', title=pagetitle)
###############################################################################
# Functions to control vendor named fixed positions
###############################################################################
@app.route("/flat")
def setFlat():
global position
# head, feet, tilt
position=FLAT
setBedPosition(PositionBed, position)
# Since the moveWait function will pass through if the
# position is already reached, I use all three here so
# that it will wait on whichever one takes the longest.
#
# The 16 means base 16 i.e. hex
moveWait(PositionHead,int(position[0],16))
moveWait(PositionFeet,int(position[1],16))
moveWait(PositionTilt,int(position[2],16))
return 'Position Set to Flat'
@app.route("/zeroG")
def setZeroG():
global position
# head, feet, tilt
position=ZEROG
setBedPosition(PositionBed, position)
# Since the moveWait function will pass through if the
# position is already reached, I use all | |
belongs
:param table: the wrapped table
"""
self._schema = schema
self._wrapped_table = table
self._name = table.name
self._uname = urlquote(table.name)
self._fqname = "%s:%s" % (urlquote(self._schema._name), self._uname)
self._instancename = '*'
self._projection_name = self._instancename
self._fromname = self._fqname
self.column_definitions = {
v.name: _ColumnWrapper(self, v)
for v in table.column_definitions
}
self._identifiers = _make_identifier_to_name_mapping(
self.column_definitions.keys(),
super(_TableWrapper, self).__dir__())
def __dir__(self):
return itertools.chain(
super(_TableWrapper, self).__dir__(),
self._identifiers.keys()
)
def __getattr__(self, a):
if a in self._identifiers:
return self.column_definitions[self._identifiers[a]]
else:
return getattr(super(_TableWrapper, self), a)
@deprecated
def describe(self):
"""Provides a description of the model element.
:return: a user-friendly string representation of the model element.
"""
s = "_TableWrapper name: '%s'\nList of columns:\n" % self._name
if len(self.column_definitions) == 0:
s += "none"
else:
s += "\n".join(" %s" % col._name for col in self.column_definitions.values())
return s
@deprecated
def _repr_html_(self):
return self.describe()
@property
def path(self):
"""Always a new DataPath instance that is rooted at this table.
Note that this table will be automatically aliased using its own table name.
"""
return DataPath(self.alias(self._name))
@property
def _contextualized_path(self):
"""Returns the path as contextualized for this table instance.
Conditionally updates the context of the path to which this table instance is bound.
"""
return self.path
@property
@deprecated
def uri(self):
return self.path.uri
def alias(self, alias_name):
"""Returns a table alias object.
:param alias_name: a string to use as the alias name
"""
return _TableAlias(self, alias_name)
def filter(self, filter_expression):
"""See the docs for this method in `DataPath` for more information."""
return self._contextualized_path.filter(filter_expression)
def link(self, right, on=None, join_type=''):
"""See the docs for this method in `DataPath` for more information."""
return self._contextualized_path.link(right, on, join_type)
def _query(self, mode='entity', projection=[], group_key=[], context=None):
"""Invokes query on the path for this table."""
return self.path._query(mode, projection, group_key=group_key, context=context)
def entities(self):
"""Returns a results set of whole entities from this data path's current context.
See the docs for this method in `DataPath` for more information.
"""
return self._query()
def aggregates(self, *functions):
"""Returns a results set of computed aggregates from this data path.
See the docs for this method in `DataPath` for more information.
"""
return self._query(mode=_Project.AGGREGATE, projection=list(functions))
def attributes(self, *attributes):
"""Returns a results set of attributes projected and optionally renamed from this data path.
See the docs for this method in `DataPath` for more information.
"""
return self._query(mode=_Project.ATTRIBUTE, projection=list(attributes))
def groupby(self, *keys):
"""Returns an attribute group object.
See the docs for this method in `DataPath` for more information.
"""
return _AttributeGroup(self, self._query, keys)
def insert(self, entities, defaults=set(), nondefaults=set(), add_system_defaults=True):
"""Inserts entities into the table.
:param entities: an iterable collection of entities (i.e., rows) to be inserted into the table.
:param defaults: optional, set of column names to be assigned the default expression value.
:param nondefaults: optional, set of columns names to override implicit system defaults
:param add_system_defaults: flag to add system columns to the set of default columns.
:return a collection of newly created entities.
"""
# empty entities will be accepted but results are therefore an empty entity set
if not entities:
return _ResultSet(self.path.uri, lambda ignore1, ignore2: [])
options = []
if defaults or add_system_defaults:
defaults_enc = {urlquote(cname) for cname in defaults}
if add_system_defaults:
defaults_enc |= _system_defaults - nondefaults
options.append("defaults={cols}".format(cols=','.join(defaults_enc)))
if nondefaults:
nondefaults_enc = {urlquote(cname) for cname in nondefaults}
options.append("nondefaults={cols}".format(cols=','.join(nondefaults_enc)))
path = '/entity/' + self._fqname
if options:
path += "?" + "&".join(options)
logger.debug("Inserting entities to path: {path}".format(path=path))
# JSONEncoder does not handle general iterable objects, so we have to make sure its an acceptable collection
if not hasattr(entities, '__iter__'):
raise TypeError('entities is not iterable')
entities = entities if isinstance(entities, (list, tuple)) else list(entities)
# test the first entity element to make sure that it looks like a dictionary
if not hasattr(entities[0], 'keys'):
raise TypeError('entities[0] does not look like a dictionary -- does not have a "keys()" method')
try:
resp = self._schema._catalog._wrapped_catalog.post(path, json=entities, headers={'Content-Type': 'application/json'})
return _ResultSet(self.path.uri, lambda ignore1, ignore2: resp.json())
except HTTPError as e:
logger.debug(e.response.text)
if 400 <= e.response.status_code < 500:
raise DataPathException(_http_error_message(e), e)
else:
raise e
def update(self, entities, correlation={'RID'}, targets=None):
"""Update entities of a table.
For more information see the ERMrest protocol for the `attributegroup` interface. By default, this method will
correlate the input data (entities) based on the `RID` column of the table. By default, the method will use all
column names found in the first row of the `entities` input, which are not found in the `correlation` set and
not defined as 'system columns' by ERMrest, as the targets if `targets` is not set.
:param entities: an iterable collection of entities (i.e., rows) to be updated in the table.
:param correlation: an iterable collection of column names used to correlate input set to the set of rows to be
updated in the catalog. E.g., `{'col name'}` or `{mytable.mycolumn}` will work if you pass a _ColumnWrapper object.
:param targets: an iterable collection of column names used as the targets of the update operation.
:return: a collection of updated entities as returned by the corresponding ERMrest interface.
"""
# empty entities will be accepted but results are therefore an empty entity set
if not entities:
return _ResultSet(self.path.uri, lambda ignore1, ignore2: [])
# JSONEncoder does not handle general iterable objects, so we have to make sure its an acceptable collection
if not hasattr(entities, '__iter__'):
raise TypeError('entities is not iterable')
entities = entities if isinstance(entities, (list, tuple)) else list(entities)
# test the first entity element to make sure that it looks like a dictionary
if not hasattr(entities[0], 'keys'):
raise TypeError('entities[0] does not look like a dictionary -- does not have a "keys()" method')
# Form the correlation keys and the targets
correlation_cnames = {urlquote(str(c)) for c in correlation}
if targets:
target_cnames = {urlquote(str(t)) for t in targets}
else:
exclusions = correlation_cnames | _system_defaults
target_cnames = {urlquote(str(t)) for t in entities[0].keys() if urlquote(str(t)) not in exclusions}
# test if there are any targets after excluding for correlation keys and system columns
if not target_cnames:
raise ValueError('No "targets" for the update. There must be at least one column as a target of the update,'
' and targets cannot overlap with "correlation" keys and system columns.')
# Form the path
path = '/attributegroup/{table}/{correlation};{targets}'.format(
table=self._fqname,
correlation=','.join(correlation_cnames),
targets=','.join(target_cnames)
)
try:
resp = self._schema._catalog._wrapped_catalog.put(path, json=entities, headers={'Content-Type': 'application/json'})
return _ResultSet(self.path.uri, lambda ignore1, ignore2: resp.json())
except HTTPError as e:
logger.debug(e.response.text)
if 400 <= e.response.status_code < 500:
raise DataPathException(_http_error_message(e), e)
else:
raise e
class _TableAlias (_TableWrapper):
"""Represents a table alias in datapath expressions.
"""
def __init__(self, base_table, alias_name):
"""Initializes the table alias.
:param base_table: the base table to be given an alias name
:param alias_name: the alias name
"""
assert isinstance(base_table, _TableWrapper)
super(_TableAlias, self).__init__(base_table._schema, base_table._wrapped_table)
self._parent = None
self._base_table = base_table
self._name = alias_name
self._uname = urlquote(alias_name)
self._fqname = self._base_table._fqname
self._instancename = self._uname + ":*"
self._projection_name = self._instancename
self._fromname = "%s:=%s" % (self._uname, self._base_table._fqname)
def __deepcopy__(self, memodict={}):
# deep copy implementation of a table alias should not make copies of model objects (ie, the base table)
return _TableAlias(self._base_table, self._name)
def _equivalent(self, alias):
"""Equivalence comparison between table aliases.
:param alias: another table alias
:return: True, if the base table and alias name match, else False
"""
if not isinstance(alias, _TableAlias):
raise TypeError("'alias' must be an instance of '%s'" % type(self).__name__)
return self._name == alias._name and self._base_table == alias._base_table
@property
def path(self):
"""Returns the parent path for this alias.
"""
if not self._parent:
self._parent = DataPath(self)
return self._parent
def _bind(self, parent_path):
"""Binds this table instance to the given parent path."""
if self._parent:
raise ValueError("Cannot bind a table instance that has already been bound.")
elif not isinstance(parent_path, DataPath):
raise TypeError("value must be a DataPath instance.")
self._parent = parent_path
@property
def _contextualized_path(self):
"""Returns the path as contextualized for this table instance.
Conditionally updates the context of the path to which this table instance is bound.
"""
path = self.path
if path.context != self:
path.context = self
return path
@property
@deprecated
def uri(self):
warnings.warn("'uri' has been deprecated", DeprecationWarning, stacklevel=2)
return self.path._contextualized_uri(self)
def _query(self, mode='entity', projection=[], group_key=[], context=None):
"""Overridden method to set context of query | |
<reponame>oldarmyc/ae_preflight<filename>tests/tests_reporting.py
from __future__ import absolute_import
from .fixtures import reporting_returns
from system_profile import profile
import glob
import sys
import os
if sys.version_info[:2] >= (2, 7):
from unittest import TestCase
else:
from unittest2 import TestCase
try:
from unittest import mock
except ImportError:
import mock
class TestReporting(TestCase):
def setUp(self):
pass
def tearDown(self):
files = glob.glob('results.txt')
for item in files:
os.remove(item)
@mock.patch('system_profile.profile.argparse')
def test_reporting_ubuntu(self, mock_args):
with mock.patch('system_profile.profile.get_os_info') as os:
os.return_value = reporting_returns.os_return('ubuntu')
with mock.patch(
'system_profile.profile.check_system_type'
) as system:
system.return_value = reporting_returns.system_compatability()
with mock.patch(
'system_profile.profile.system_requirements'
) as req:
req.return_value = reporting_returns.memory_cpu()
with mock.patch(
'system_profile.profile.mounts_check'
) as mount:
mount.return_value = reporting_returns.mounts()
with mock.patch(
'system_profile.profile.inspect_resolv_conf'
) as resolv:
resolv.return_value = (
reporting_returns.resolv_conf()
)
with mock.patch(
'system_profile.profile.check_open_ports'
) as port:
port.return_value = (
reporting_returns.ports()
)
with mock.patch(
'system_profile.profile.check_for_agents'
) as agent:
agent.return_value = (
reporting_returns.agents()
)
with mock.patch(
'system_profile.profile.check_modules'
) as module:
module.return_value = (
reporting_returns.modules()
)
with mock.patch(
'system_profile.profile.'
'check_sysctl'
) as sysctl:
sysctl.return_value = (
reporting_returns.sysctl()
)
profile.main()
results_file = glob.glob('results.txt')
self.assertEqual(
len(results_file),
1,
'Did not find results file'
)
expected = []
with open('tests/fixtures/ubuntu_pass.txt', 'r') as ubuntu:
expected = ubuntu.readlines()
differences = []
with open('results.txt', 'r') as results:
for line in results:
if line not in expected:
differences.append(line)
self.assertEquals(
differences,
[],
'Differences were found in the results from what is expected'
)
@mock.patch('system_profile.profile.argparse')
def test_reporting_suse(self, mock_args):
with mock.patch('system_profile.profile.get_os_info') as os:
os.return_value = reporting_returns.os_return('suse')
with mock.patch(
'system_profile.profile.check_system_type'
) as system:
system.return_value = reporting_returns.system_compatability()
with mock.patch(
'system_profile.profile.system_requirements'
) as req:
req.return_value = reporting_returns.memory_cpu()
with mock.patch(
'system_profile.profile.mounts_check'
) as mount:
mount.return_value = reporting_returns.mounts()
with mock.patch(
'system_profile.profile.inspect_resolv_conf'
) as resolv:
resolv.return_value = (
reporting_returns.resolv_conf()
)
with mock.patch(
'system_profile.profile.check_open_ports'
) as port:
port.return_value = (
reporting_returns.ports()
)
with mock.patch(
'system_profile.profile.check_for_agents'
) as agent:
agent.return_value = (
reporting_returns.agents()
)
with mock.patch(
'system_profile.profile.check_modules'
) as module:
module.return_value = (
reporting_returns.modules()
)
with mock.patch(
'system_profile.profile.'
'suse_infinity_check'
) as infinity:
infinity.return_value = (
reporting_returns.infinity()
)
with mock.patch(
'system_profile.profile.'
'check_sysctl'
) as sysctl:
sysctl.return_value = (
reporting_returns.sysctl()
)
profile.main()
results_file = glob.glob('results.txt')
self.assertEqual(
len(results_file),
1,
'Did not find results file'
)
expected = []
with open('tests/fixtures/suse_pass.txt', 'r') as suse:
expected = suse.readlines()
differences = []
with open('results.txt', 'r') as results:
for line in results:
if line not in expected:
differences.append(line)
self.assertEquals(
differences,
[],
'Differences were found in the results from what is expected'
)
@mock.patch('system_profile.profile.argparse')
def test_reporting_rhel(self, mock_args):
with mock.patch('system_profile.profile.get_os_info') as os:
os.return_value = reporting_returns.os_return('rhel')
with mock.patch(
'system_profile.profile.check_system_type'
) as system:
system.return_value = reporting_returns.system_compatability()
with mock.patch(
'system_profile.profile.system_requirements'
) as req:
req.return_value = reporting_returns.memory_cpu()
with mock.patch(
'system_profile.profile.mounts_check'
) as mount:
mount.return_value = reporting_returns.mounts()
with mock.patch(
'system_profile.profile.inspect_resolv_conf'
) as resolv:
resolv.return_value = (
reporting_returns.resolv_conf()
)
with mock.patch(
'system_profile.profile.check_open_ports'
) as port:
port.return_value = (
reporting_returns.ports()
)
with mock.patch(
'system_profile.profile.check_for_agents'
) as agent:
agent.return_value = (
reporting_returns.agents()
)
with mock.patch(
'system_profile.profile.check_modules'
) as module:
module.return_value = (
reporting_returns.modules()
)
with mock.patch(
'system_profile.profile.selinux'
) as selinux:
selinux.return_value = (
reporting_returns.selinux()
)
with mock.patch(
'system_profile.profile.'
'check_sysctl'
) as sysctl:
sysctl.return_value = (
reporting_returns.sysctl()
)
profile.main()
results_file = glob.glob('results.txt')
self.assertEqual(
len(results_file),
1,
'Did not find results file'
)
expected = []
with open('tests/fixtures/centos_pass.txt', 'r') as centos:
expected = centos.readlines()
differences = []
with open('results.txt', 'r') as results:
for line in results:
if line not in expected:
differences.append(line)
self.assertEquals(
differences,
[],
'Differences were found in the results from what is expected'
)
@mock.patch('system_profile.profile.argparse')
def test_reporting_fail_suse(self, mock_args):
test_pass = False
with mock.patch('system_profile.profile.get_os_info') as os:
os.return_value = reporting_returns.os_return('suse')
with mock.patch(
'system_profile.profile.check_system_type'
) as system:
system.return_value = reporting_returns.system_compatability(
test_pass
)
with mock.patch(
'system_profile.profile.system_requirements'
) as req:
req.return_value = reporting_returns.memory_cpu(test_pass)
with mock.patch(
'system_profile.profile.mounts_check'
) as mount:
mount.return_value = reporting_returns.mounts(
test_pass
)
with mock.patch(
'system_profile.profile.inspect_resolv_conf'
) as resolv:
resolv.return_value = (
reporting_returns.resolv_conf(test_pass)
)
with mock.patch(
'system_profile.profile.check_open_ports'
) as port:
port.return_value = (
reporting_returns.ports(test_pass)
)
with mock.patch(
'system_profile.profile.check_for_agents'
) as agent:
agent.return_value = (
reporting_returns.agents(test_pass)
)
with mock.patch(
'system_profile.profile.check_modules'
) as module:
module.return_value = (
reporting_returns.modules(
test_pass
)
)
with mock.patch(
'system_profile.profile.'
'suse_infinity_check'
) as infinity:
infinity.return_value = (
reporting_returns.infinity(
test_pass
)
)
with mock.patch(
'system_profile.profile.'
'check_sysctl'
) as sysctl:
sysctl.return_value = (
reporting_returns.sysctl(
test_pass
)
)
profile.main()
results_file = glob.glob('results.txt')
self.assertEqual(
len(results_file),
1,
'Did not find results file'
)
expected = []
with open('tests/fixtures/suse_fail.txt', 'r') as suse:
expected = suse.readlines()
differences = []
with open('results.txt', 'r') as results:
for line in results:
if line not in expected:
differences.append(line)
self.assertEquals(
differences,
[],
'Differences were found in the results from what is expected'
)
@mock.patch('system_profile.profile.argparse')
def test_reporting_fail_rhel(self, mock_args):
test_pass = False
with mock.patch('system_profile.profile.get_os_info') as os:
os.return_value = reporting_returns.os_return('rhel')
with mock.patch(
'system_profile.profile.check_system_type'
) as system:
system.return_value = reporting_returns.system_compatability(
test_pass
)
with mock.patch(
'system_profile.profile.system_requirements'
) as req:
req.return_value = reporting_returns.memory_cpu(test_pass)
with mock.patch(
'system_profile.profile.mounts_check'
) as mount:
mount.return_value = reporting_returns.mounts(
test_pass
)
with mock.patch(
'system_profile.profile.inspect_resolv_conf'
) as resolv:
resolv.return_value = (
reporting_returns.resolv_conf(test_pass)
)
with mock.patch(
'system_profile.profile.check_open_ports'
) as port:
port.return_value = (
reporting_returns.ports(test_pass)
)
with mock.patch(
'system_profile.profile.check_for_agents'
) as agent:
agent.return_value = (
reporting_returns.agents(test_pass)
)
with mock.patch(
'system_profile.profile.check_modules'
) as module:
module.return_value = (
reporting_returns.modules(
test_pass
)
)
with mock.patch(
'system_profile.profile.selinux'
) as selinux:
selinux.return_value = (
reporting_returns.selinux(
test_pass
)
)
with mock.patch(
'system_profile.profile.'
'check_sysctl'
) as sysctl:
sysctl.return_value = (
reporting_returns.sysctl(
test_pass
)
)
profile.main()
results_file = glob.glob('results.txt')
self.assertEqual(
len(results_file),
1,
'Did not find results file'
)
expected = []
with open('tests/fixtures/centos_fail.txt', 'r') as centos:
expected = centos.readlines()
differences = []
with open('results.txt', 'r') as results:
for line in results:
if line not in expected:
differences.append(line)
self.assertEquals(
differences,
[],
'Differences were found in the results from what is expected'
)
@mock.patch('system_profile.profile.argparse')
def test_reporting_ubuntu_trigger_warn_on_fs(self, mock_args):
with mock.patch('system_profile.profile.get_os_info') as os:
os.return_value = reporting_returns.os_return('ubuntu')
with mock.patch(
'system_profile.profile.check_system_type'
) as system:
system.return_value = reporting_returns.system_compatability()
with mock.patch(
'system_profile.profile.system_requirements'
) as req:
req.return_value = reporting_returns.memory_cpu()
with mock.patch(
'system_profile.profile.mounts_check'
) as mount:
mount.return_value = reporting_returns.mounts(
test_pass=False
)
with mock.patch(
'system_profile.profile.inspect_resolv_conf'
) as resolv:
resolv.return_value = (
reporting_returns.resolv_conf()
)
with mock.patch(
'system_profile.profile.check_open_ports'
) as port:
port.return_value = (
reporting_returns.ports()
)
with mock.patch(
'system_profile.profile.check_for_agents'
) as agent:
agent.return_value = (
reporting_returns.agents()
)
with mock.patch(
'system_profile.profile.check_modules'
) as module:
module.return_value = (
reporting_returns.modules()
)
with mock.patch(
'system_profile.profile.'
'check_sysctl'
) as sysctl:
sysctl.return_value = (
reporting_returns.sysctl()
)
profile.main()
results_file = glob.glob('results.txt')
self.assertEqual(
len(results_file),
1,
'Did not find results file'
)
expected = []
with open('tests/fixtures/fs_warn.txt', 'r') as ubuntu:
expected = ubuntu.readlines()
differences = []
with open('results.txt', 'r') as results:
for line in results:
if line not in expected:
differences.append(line)
self.assertEquals(
differences,
[],
'Differences were found in the results from what is expected'
)
@mock.patch('system_profile.profile.argparse')
def test_reporting_ubuntu_trigger_warn_resolve(self, mock_args):
with mock.patch('system_profile.profile.get_os_info') as os:
os.return_value = reporting_returns.os_return('ubuntu')
with mock.patch(
'system_profile.profile.check_system_type'
) as system:
system.return_value = reporting_returns.system_compatability()
with mock.patch(
'system_profile.profile.system_requirements'
) as req:
req.return_value = reporting_returns.memory_cpu()
with mock.patch(
'system_profile.profile.mounts_check'
) as mount:
mount.return_value = reporting_returns.mounts()
with mock.patch(
'system_profile.profile.inspect_resolv_conf'
) as resolv:
resolv.return_value = (
reporting_returns.resolv_conf_warn()
)
with mock.patch(
'system_profile.profile.check_open_ports'
) as port:
port.return_value = (
reporting_returns.ports()
)
with mock.patch(
'system_profile.profile.check_for_agents'
) as agent:
agent.return_value = (
reporting_returns.agents()
)
with mock.patch(
'system_profile.profile.check_modules'
) as module:
module.return_value = (
reporting_returns.modules()
)
with mock.patch(
'system_profile.profile.'
'check_sysctl'
) as sysctl:
sysctl.return_value = (
reporting_returns.sysctl()
)
profile.main()
results_file = glob.glob('results.txt')
self.assertEqual(
len(results_file),
1,
'Did not find results file'
)
expected = []
with open('tests/fixtures/resolv_warn.txt', 'r') as ubuntu:
expected = ubuntu.readlines()
differences = []
with open('results.txt', 'r') as results:
for line in results:
if line not in expected:
differences.append(line)
self.assertEquals(
differences,
[],
'Differences were found in the results from what is expected'
)
@mock.patch('system_profile.profile.argparse')
def test_reporting_ubuntu_trigger_warn_on_interface(self, mock_args):
with mock.patch('system_profile.profile.get_os_info') as os:
os.return_value = reporting_returns.os_return('ubuntu')
with mock.patch(
'system_profile.profile.check_system_type'
) as system:
system.return_value = reporting_returns.system_compatability()
with mock.patch(
'system_profile.profile.system_requirements'
) as req:
req.return_value = reporting_returns.memory_cpu()
with mock.patch(
'system_profile.profile.mounts_check'
) as mount:
mount.return_value = reporting_returns.mounts()
with mock.patch(
'system_profile.profile.inspect_resolv_conf'
) as resolv:
resolv.return_value = (
reporting_returns.resolv_conf()
)
with mock.patch(
'system_profile.profile.check_open_ports'
) as port:
port.return_value = (
reporting_returns.ports(test_pass=False)
)
with mock.patch(
'system_profile.profile.check_for_agents'
) as agent:
agent.return_value = (
reporting_returns.agents()
)
with mock.patch(
'system_profile.profile.check_modules'
) as module:
module.return_value = (
reporting_returns.modules()
)
with mock.patch(
'system_profile.profile.'
'check_sysctl'
) as sysctl:
sysctl.return_value = (
reporting_returns.sysctl()
)
profile.main()
results_file = glob.glob('results.txt')
self.assertEqual(
len(results_file),
1,
'Did not find results file'
)
expected = []
| |
'LocalVertexPool'
newObject['NumberOfVertices'] = self._readUInt(fromChunk = True)
newObject['AttributeMask'] = self._readUInt(fromChunk = True)
mask = 0x01
Flags = [False] * 12
# Now process the attribute mask:
for idx in range(12):
if newObject['AttributeMask'] & mask > 0:
Flags[idx] = True
# Shift the mask left by one
mask <<= 1
if Flags[1] and Flags[2]:
raise Exception("Unable to determine colour for vertex. Both colour index and RGBA colour are set.")
varNames = ['UVBase']
varNames.extend(['UV' + str(idx) for idx in range(1, 8)])
# Now only take those variable names that have been enabled
varNames = [filt[0] for filt in zip(varNames, Flags[4:]) if filt[1]]
newObject['LocalVertexPool'] = []
for idx in range(newObject['NumberOfVertices']):
tempDict = dict()
if Flags[0]:
tempDict['Coordinate'] = np.zeros((1, 3))
for colIdx in range(3):
tempDict['Coordinate'][0, colIdx] = self._readDouble(fromChunk = True)
if Flags[1] or Flags[2]:
# Whilst the flags mean different things, they have similar construction
tempDict['Colour'] = np.zeros((1, 4))
for colIdx in range(4):
tempDict['Colour'][0, colIdx] = self._readUChar(fromChunk = True)
if Flags[3]:
tempDict['Normal'] = np.zeros((1, 3))
for colIdx in range(3):
tempDict['Normal'][0, colIdx] = self._readFloat(fromChunk = True)
for varName in varNames:
tempDict[varName] = np.zeros((1, 2))
for colIdx in range(2):
tempDict[varName][0, colIdx] = self._readFloat(fromChunk = True)
newObject['LocalVertexPool'].append(tempDict)
tempDict = None
# The data chunk should be processed. Reset the variable to None:
self._Chunk = None
self._addObject(newObject)
def _opMeshPrim(self):
# Opcode 86
newObject = dict()
newObject['Datatype'] = 'MeshPrimitive'
# Read the data to memory and extract data as normal with modified
# read functions
self._readChunk()
newObject['PrimitiveType'] = self._readShort(fromChunk = True)
indexSize = self._readUShort(fromChunk = True)
if indexSize not in [1, 2, 4]:
raise Exception("Unable to determine the index size.")
functions = {1: self._readSChar, 2: self._readShort, 4: self._readInt}
readFunction = functions[indexSize]
newObject['VertexCount'] = self._readUInt(fromChunk = True)
newObject['VertexIndex'] = []
for idx in range(newObject['VertexCount']):
newObject['VertexIndex'].append(readFunction(fromChunk = True))
# The data chunk should be processed. Reset the variable to None:
self._Chunk = None
self._addObject(newObject)
def _opRoadSeg(self):
# Opcode 87
newObject = dict()
newObject['Datatype'] = "RoadSegment"
newObject['ASCIIID'] = self._readString(8)
self._addObject(newObject)
def _opRoadZone(self):
# Opcode 88
newObject = dict()
newObject['Datatype'] = 'RoadZone'
newObject['ZoneFilename'] = self._readString(120)
self._skip(4)
varNames = ['LowerLeft', 'UpperRight']
coordTypes = ['x', 'y']
for varName in varNames:
for coordType in coordTypes:
newObject[coordType + varName] = self._readDouble()
newObject['GridInterval'] = self._readDouble()
newObject['NoPostsX'] = self._readUInt()
newObject['NoPostsY'] = self._readUInt()
self._addObject(newObject)
def _opMorphVertex(self):
# Opcode 89
newObject = dict()
newObject['Datatype'] = 'MorphVertexList'
# Read the data to memory and extract data as normal with modified
# read fucntions
self._readChunk()
newObject['Offset0'] = []
newObject['Offset100'] = []
for idx in range(len(self._Chunk) / 8):
newObject['Offset0'].append(self._readInt(fromChunk = True))
newObject['Offset100'].append(self._readInt(fromChunk = True))
# The data chunk should be processed. Reset the variable to None:
self._Chunk = None
self._addObject(newObject)
def _opLinkPalette(self):
# Opcode 90
newObject = dict()
newObject['Datatype'] = 'LinkagePalette'
RecordLength = self._readUShort()
# Next read the subtype
subtype = self._readInt()
if subtype == 1:
newObject['Subtype'] = 'KeyTableHeader'
varNames = ['MaxNumber', 'ActualNumber', 'TotalLength']
for varName in varNames:
newObject[varName] = self._readInt()
# Skip over the reserved area:
self._skip(12)
newObject['Records'] = []
varNames = ['KeyValue', 'KeyDatatype', 'DataOffset']
for idx in range(newObject['ActualNumber']):
tempDict = dict()
for varName in varNames:
tempDict[varName] = self._readInt()
if varName['KeyDatatype'] not in [0x12120001, 0x12120002, 0x12120004]:
raise Exception('Unable to determine data type for record ' + str(idx))
# Append this record to the record list:
newObject.append(varName)
if subtype == 2:
newObject['Subtype'] = 'KeyDataRecord'
newObject['DataLength'] = self._readInt()
newobject['PackedData'] = self.f.read(RecordLength - 12)
# Finally, add this object to the stack:
self._addObject(newObject)
def _opSound(self):
# Opcode 91
newObject = dict()
newObject['Datatype'] = 'Sound'
newObject['ASCIIID'] = self._readString(8)
self._skip(4)
newObject['IndexIntoSoundPalette'] = self._readUInt()
self._skip(4)
newObject['OffsetCoordinate'] = np.zeros((1, 3))
for colIdx in range(3):
newObject['OffsetCoordinate'][0, colIdx] = self._readDouble()
newObject['SoundDirection'] = np.zeros((1, 3))
for colIdx in range(3):
newObject['SoundDirection'][0, colIdx] = self._readFloat()
varNames = ['Amplitude', 'PitchBend', 'Priority', 'Falloff', 'Width']
for varName in varNames:
newObject[varName] = self._readFloat()
newObject['Flags'] = self._readUInt()
# Skip over reserved space
self._skip(4)
self._addObject(newObject)
def _opRoadPath(self):
# Opcode 92
newObject = dict()
newObject['Datatype'] = 'RoadPath'
newObject['ASCIIID'] = self._readString(8)
self._skip(4)
newObject['PathName'] = self._readString(120)
newObject['SpeedLimit'] = self._readDouble()
# No passing should be a *4 byte* boolean. I will read this as an integer instead.
newObject['NoPassing'] = self._readUInt()
newObject['VertexType'] = self._readUInt()
if newObject['VertexType'] not in [1, 2]:
raise Exception("Unable to determine vertex type.")
self._skip(480)
self._addObject(newObject)
def _opSoundPalette(self):
# Opcode 93
newObject = dict()
newObject['Datatype'] = 'SoundPaletteData'
RecordLength = self._readUShort()
# This can be of two types based on the subtype value:
Subtype = self._readUInt()
if Subtype == 1:
# This is a sound palette header record
newObject['Subtype'] = "Header"
varNames = ['MaxNumber', 'ActualNumber']
for varName in varNames:
newObject[varName] = self._readUInt()
# Skip over reserved area
self._skip(12)
for soundNo in range(newObject['ActualNumber']):
SoundName = "Sound" + str(soundNo)
newObject[SoundName] = dict()
newObject[SoundName]['SoundIndex'] = self._readUInt()
# Reserved space for this entry
self._skip(4)
newObject[SoundName]['FilenameOffset'] = self._readUInt()
elif Subtype == 2:
# This is a sound palette data record
newObject['Subtype'] = "Data"
newObject['TotalLength'] = self._readUInt()
newObject['PackedFilenames'] = self._readString(RecordLength - 12)
else:
# This is not recognised.
raise Exception("Unable to determine sound record subtype.")
self._addObject(newObject)
def _opGenMatrix(self):
# Opcode 94
# This is the same as the matrix command, so call the matrix function
self._opMatrix(fileName)
def _opText(self):
# Opcode 95
newObject = dict()
newObject['Datatype'] = 'Text'
newObject['ASCIIID'] = self._readString(8)
self._skip(8)
newObject['Type'] = self._readUInt()
if newObject['Type'] not in [-1, 0, 1, 2]:
raise Exception("Unable to determine type.")
newObject['DrawType'] = self._readUInt()
if newObject['DrawType'] not in [0, 1, 2, 3]:
raise Exception("Unable to determine draw type.")
newObject['Justification'] = self._readUInt()
if newObject['Justification'] not in [-1, 0, 1, 2]:
raise Exception("Unable to determine justification.")
newObject['FloatingPointValue'] = self._readDouble()
newObject['IntegerValue'] = self._readInt()
self._skip(20)
varNames = ['Flags', 'Colour', 'Colour2', 'Material', None, 'MaxLines', 'MaxCharacters', 'CurrentLength', 'NextLineNumber', 'LineNumberAtTop', 'LowInteger', 'HighInteger']
for varName in varNames:
if varNames is None:
self._skip(4)
else:
newObject[varName] = self._readUInt()
newObject['LowFloat'] = self._readDouble()
newObject['HighFloat'] = self._readDouble()
varNames = ['LowerLeftCorner', 'UpperRightCorner']
for varName in varNames:
newObject[varName] = np.zeros((1, 3))
for colIdx in range(3):
newObject[varName][0, colIdx] = self._readDouble()
newObject['FontName'] = self._readString(120)
varNames = ['DrawVertical', 'DrawItalic', 'DrawBold', 'DrawUnderline', 'LineStyle']
for varName in varNames:
newObject[varName] = self._readUInt()
self._skip(4)
self._addObject(newObject)
def _opSwitch(self):
# Opcode 96
newObject = dict()
newObject['Datatype'] = 'Switch'
RecordLength = self._readUShort()
newObject['ASCIIID'] = self._readString(8)
self._skip(4)
varNames = ['CurrentMask', 'NumberOfMasks', 'NumberOfWordsPerMask']
for varName in varNames:
newObject[varName] = self._readUInt()
newObject['MaskWords'] = []
for idx in range(varNames['NumberOfMasks'] * varNames['NumberOfWordsPerMask']):
newObject['MaskWords'].append(self._readUInt())
self._addObject(newObject)
def _opLineStylePalette(self):
# Opcode 97
newObject = dict()
newObject['Datatype'] = 'LineStylePalette'
newObject['LineStyleIdx'] = self._readUShort()
newObject['PatternMask'] = self._readUShort()
newObject['LineWidth'] = self._readUInt()
self._addObject(newObject)
def _opClipRegion(self):
# Opcode 98
newObject = dict()
newObject['Datatype'] = 'ClipRegion'
newObject['ASCIIID'] = self._readString(8)
self._skip(6)
newObject['Flags'] = []
for flagNo in range(5):
newObject['Flags'].append(self._readChar())
self._skip(1)
for regionIdx in range(1, 5):
newObject['Region' + str(regionIdx)] = np.zeros((1, 3))
for colIdx in range(3):
newObject['Region' + str(regionIdx)][0, colIdx] = self._readDouble()
varNames = ['CoeffsA', 'CoeffsB', 'CoeffsC', 'CoeffsD']
for varName in varNames:
newObject[varName] = []
for colIdx in range(5):
newObject[varName].append(struct.unpack('>d', self.f.read(8))[0])
self._addObject(newObject)
def _opExtension(self):
# Opcode 100
newObject = dict()
# Read the data to memory and extract data as normal with modified
# read functions
self._readChunk()
newObject['Datatype'] = 'Extension'
varNames = ['ASCIIID', 'SiteID']
for varName in varNames:
newObject[varName] = self._readString(8, fromChunk = True)
self._skip(1, fromChunk = True)
newObject['Revision'] = self._readSChar(fromChunk = True)
newObject['RecordCode'] = self._readUShort(fromChunk = True)
newObject['ExtendedData'] = self._readString(len(self._Chunk), fromChunk = True)
# The data chunk should be processed. Reset the variable to None:
| |
to -1
stdout = chan.makefile('r', -1)
stderr = chan.makefile_stderr('r', -1)
try:
stdoutdata = stdout.read()
exit_status = stdin.channel.recv_exit_status()
if exit_status != 0:
if not allow_fail:
raise RemoteCommandError(self, cmd, exit_status, stderr.read())
else:
self._log(logging.DEBUG, "Running ssh command '%s' exited with status %d and message: %s" %
(cmd, exit_status, stderr.read()))
finally:
stdin.close()
stdout.close()
stderr.close()
return stdoutdata
def alive(self, pid):
"""Return True if and only if process with given pid is alive."""
try:
self.ssh("kill -0 %s" % str(pid), allow_fail=False)
return True
except Exception:
return False
def signal(self, pid, sig, allow_fail=False):
cmd = "kill -%s %s" % (str(sig), str(pid))
self.ssh(cmd, allow_fail=allow_fail)
def kill_process(self, process_grep_str, clean_shutdown=True, allow_fail=False):
cmd = """ps ax | grep -i """ + process_grep_str + """ | grep -v grep | awk '{print $1}'"""
pids = [pid for pid in self.ssh_capture(cmd, allow_fail=True)]
if clean_shutdown:
sig = signal.SIGTERM
else:
sig = signal.SIGKILL
for pid in pids:
self.signal(pid, sig, allow_fail=allow_fail)
def java_pids(self, match):
"""
Get all the Java process IDs matching 'match'.
:param match: The AWK expression to match
"""
cmd = """jcmd | awk '/%s/ { print $1 }'""" % match
return [int(pid) for pid in self.ssh_capture(cmd, allow_fail=True)]
def kill_java_processes(self, match, clean_shutdown=True, allow_fail=False):
"""
Kill all the java processes matching 'match'.
:param match: The AWK expression to match
:param clean_shutdown: True if we should shut down cleanly with SIGTERM;
false if we should shut down with SIGKILL.
:param allow_fail: True if we should throw exceptions if the ssh commands fail.
"""
cmd = """jcmd | awk '/%s/ { print $1 }'""" % match
pids = [pid for pid in self.ssh_capture(cmd, allow_fail=True)]
if clean_shutdown:
sig = signal.SIGTERM
else:
sig = signal.SIGKILL
for pid in pids:
self.signal(pid, sig, allow_fail=allow_fail)
def copy_between(self, src, dest, dest_node):
"""Copy src to dest on dest_node
:param src: Path to the file or directory we want to copy
:param dest: The destination path
:param dest_node: The node to which we want to copy the file/directory
Note that if src is a directory, this will automatically copy recursively.
"""
# TODO: if dest is an existing file, what is the behavior?
temp_dir = tempfile.mkdtemp()
try:
# TODO: deal with very unlikely case that src_name matches temp_dir name?
# TODO: I think this actually works
local_dest = self._re_anchor_basename(src, temp_dir)
self.copy_from(src, local_dest)
dest_node.account.copy_to(local_dest, dest)
finally:
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
def scp_from(self, src, dest, recursive=False):
warnings.warn("scp_from is now deprecated. Please use copy_from")
self.copy_from(src, dest)
def _re_anchor_basename(self, path, directory):
"""Anchor the basename of path onto the given directory
Helper for the various copy_* methods.
:param path: Path to a file or directory. Could be on the driver machine or a worker machine.
:param directory: Path to a directory. Could be on the driver machine or a worker machine.
Example::
path/to/the_basename, another/path/ -> another/path/the_basename
"""
path_basename = path
# trim off path separator from end of path
# this is necessary because os.path.basename of a path ending in a separator is an empty string
# For example:
# os.path.basename("the/path/") == ""
# os.path.basename("the/path") == "path"
if path_basename.endswith(os.path.sep):
path_basename = path_basename[:-len(os.path.sep)]
path_basename = os.path.basename(path_basename)
return os.path.join(directory, path_basename)
def copy_from(self, src, dest):
if os.path.isdir(dest):
# dest is an existing directory, so assuming src looks like path/to/src_name,
# in this case we'll copy as:
# path/to/src_name -> dest/src_name
dest = self._re_anchor_basename(src, dest)
if self.isfile(src):
self.sftp_client.get(src, dest)
elif self.isdir(src):
# we can now assume dest path looks like: path_that_exists/new_directory
os.mkdir(dest)
# for obj in `ls src`, if it's a file, copy with copy_file_from, elif its a directory, call again
for obj in self.sftp_client.listdir(src):
obj_path = os.path.join(src, obj)
if self.isfile(obj_path) or self.isdir(obj_path):
self.copy_from(obj_path, dest)
else:
# TODO what about uncopyable file types?
pass
def scp_to(self, src, dest, recursive=False):
warnings.warn("scp_to is now deprecated. Please use copy_to")
self.copy_to(src, dest)
def copy_to(self, src, dest):
if self.isdir(dest):
# dest is an existing directory, so assuming src looks like path/to/src_name,
# in this case we'll copy as:
# path/to/src_name -> dest/src_name
dest = self._re_anchor_basename(src, dest)
if os.path.isfile(src):
# local to remote
self.sftp_client.put(src, dest)
elif os.path.isdir(src):
# we can now assume dest path looks like: path_that_exists/new_directory
self.mkdir(dest)
# for obj in `ls src`, if it's a file, copy with copy_file_from, elif its a directory, call again
for obj in os.listdir(src):
obj_path = os.path.join(src, obj)
if os.path.isfile(obj_path) or os.path.isdir(obj_path):
self.copy_to(obj_path, dest)
else:
# TODO what about uncopyable file types?
pass
def islink(self, path):
try:
# stat should follow symlinks
path_stat = self.sftp_client.lstat(path)
return stat.S_ISLNK(path_stat.st_mode)
except Exception:
return False
def isdir(self, path):
try:
# stat should follow symlinks
path_stat = self.sftp_client.stat(path)
return stat.S_ISDIR(path_stat.st_mode)
except Exception:
return False
def exists(self, path):
"""Test that the path exists, but don't follow symlinks."""
try:
# stat follows symlinks and tries to stat the actual file
self.sftp_client.lstat(path)
return True
except IOError:
return False
def isfile(self, path):
"""Imitates semantics of os.path.isfile
:param path: Path to the thing to check
:return: True if path is a file or a symlink to a file, else False. Note False can mean path does not exist.
"""
try:
# stat should follow symlinks
path_stat = self.sftp_client.stat(path)
return stat.S_ISREG(path_stat.st_mode)
except Exception:
return False
def open(self, path, mode='r'):
return self.sftp_client.open(path, mode)
def create_file(self, path, contents):
"""Create file at path, with the given contents.
If the path already exists, it will be overwritten.
"""
# TODO: what should semantics be if path exists? what actually happens if it already exists?
# TODO: what happens if the base part of the path does not exist?
with self.sftp_client.open(path, "w") as f:
f.write(contents)
def mkdir(self, path, mode=0755):
self.sftp_client.mkdir(path, mode)
def mkdirs(self, path, mode=0755):
self.ssh("mkdir -p %s && chmod %o %s" % (path, mode, path))
def remove(self, path, allow_fail=False):
"""Remove the given file or directory"""
if allow_fail:
cmd = "rm -rf %s" % path
else:
cmd = "rm -r %s" % path
self.ssh(cmd, allow_fail=allow_fail)
@contextmanager
def monitor_log(self, log):
"""
Context manager that returns an object that helps you wait for events to
occur in a log. This checks the size of the log at the beginning of the
block and makes a helper object available with convenience methods for
checking or waiting for a pattern to appear in the log. This will commonly
be used to start a process, then wait for a log message indicating the
process is in a ready state.
See ``LogMonitor`` for more usage information.
"""
try:
offset = int(self.ssh_output("wc -c %s" % log).split()[0])
except Exception:
offset = 0
yield LogMonitor(self, log, offset)
class SSHOutputIter(object):
"""Helper class that wraps around an iterable object to provide has_next() in addition to next()
"""
def __init__(self, iter_obj_func, channel_file=None):
"""
:param iter_obj_func: A generator that returns an iterator over stdout from the remote process
:param channel_file: A paramiko ``ChannelFile`` object
"""
self.iter_obj_func = iter_obj_func
self.iter_obj = iter_obj_func()
self.channel_file = channel_file
# sentinel is used as an indicator that there is currently nothing cached
# If self.cached is self.sentinel, then next object from ier_obj is not yet cached.
self.sentinel = object()
self.cached = self.sentinel
def __iter__(self):
return self
def next(self):
if self.cached is self.sentinel:
return next(self.iter_obj)
next_obj = self.cached
self.cached = self.sentinel
return next_obj
def has_next(self, timeout_sec=None):
"""Return True if next(iter_obj) would return another object within timeout_sec, else False.
If timeout_sec is None, next(iter_obj) may block indefinitely.
"""
assert timeout_sec is None or self.channel_file is not None, "should have descriptor to enforce timeout"
prev_timeout = None
if self.cached is self.sentinel:
if self.channel_file is not None:
prev_timeout = self.channel_file.channel.gettimeout()
# when timeout_sec is None, next(iter_obj) will block indefinitely
self.channel_file.channel.settimeout(timeout_sec)
try:
self.cached = next(self.iter_obj, self.sentinel)
except socket.timeout:
self.iter_obj = self.iter_obj_func()
self.cached = self.sentinel
finally:
if self.channel_file is not None:
# restore preexisting timeout
self.channel_file.channel.settimeout(prev_timeout)
return self.cached is not self.sentinel
class LogMonitor(object):
"""
Helper class returned by monitor_log. Should be used as::
with remote_account.monitor_log("/path/to/log") as monitor:
remote_account.ssh("/command/to/start")
monitor.wait_until("pattern.*to.*grep.*for", timeout_sec=5)
to run the command and then wait for the pattern to appear in the log.
"""
def __init__(self, acct, log, offset):
self.acct = acct
self.log = log
| |
return 'Using unknown version found on system.'
status, output = getstatusoutput("libpng-config --version")
if status == 0:
version = output
else:
version = None
try:
return self._check_for_pkg_config(
'libpng', 'png.h',
min_version='1.2', version=version)
except CheckFailed as e:
if has_include_file(get_include_dirs(), 'png.h'):
return str(e) + ' Using unknown version found on system.'
raise
def get_extension(self):
sources = [
'src/_png.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib._png', sources)
pkg_config.setup_extension(
ext, 'libpng', default_libraries=['png', 'z'],
alt_exec='libpng-config --ldflags')
Numpy().add_flags(ext)
return ext
class Qhull(SetupPackage):
name = "qhull"
def check(self):
self.__class__.found_external = True
try:
return self._check_for_pkg_config(
'qhull', 'qhull/qhull_a.h', min_version='2003.1')
except CheckFailed as e:
self.__class__.found_pkgconfig = False
# Qhull may not be in the pkg-config system but may still be
# present on this system, so check if the header files can be
# found.
include_dirs = [
os.path.join(x, 'qhull') for x in get_include_dirs()]
if has_include_file(include_dirs, 'qhull_a.h'):
return 'Using system Qhull (version unknown, no pkg-config info)'
else:
self.__class__.found_external = False
return str(e) + ' Using local copy.'
def add_flags(self, ext):
if self.found_external:
pkg_config.setup_extension(ext, 'qhull',
default_libraries=['qhull'])
else:
ext.include_dirs.append('extern')
ext.sources.extend(glob.glob('extern/qhull/*.c'))
class TTConv(SetupPackage):
name = "ttconv"
def get_extension(self):
sources = [
'src/_ttconv.cpp',
'extern/ttconv/pprdrv_tt.cpp',
'extern/ttconv/pprdrv_tt2.cpp',
'extern/ttconv/ttutil.cpp'
]
ext = make_extension('matplotlib.ttconv', sources)
Numpy().add_flags(ext)
ext.include_dirs.append('extern')
return ext
class Path(SetupPackage):
name = "path"
def get_extension(self):
sources = [
'src/py_converters.cpp',
'src/_path_wrapper.cpp'
]
ext = make_extension('matplotlib._path', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
return ext
class Image(SetupPackage):
name = "image"
def get_extension(self):
sources = [
'src/_image.cpp',
'src/mplutils.cpp',
'src/_image_wrapper.cpp'
]
ext = make_extension('matplotlib._image', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
return ext
class ContourLegacy(SetupPackage):
name = "contour_legacy"
def get_extension(self):
sources = [
"src/cntr.c"
]
ext = make_extension('matplotlib._cntr', sources)
Numpy().add_flags(ext)
return ext
class Contour(SetupPackage):
name = "contour"
def get_extension(self):
sources = [
"src/_contour.cpp",
"src/_contour_wrapper.cpp",
]
ext = make_extension('matplotlib._contour', sources)
Numpy().add_flags(ext)
return ext
class Delaunay(SetupPackage):
name = "delaunay"
def get_packages(self):
return ['matplotlib.delaunay']
def get_extension(self):
sources = ["_delaunay.cpp", "VoronoiDiagramGenerator.cpp",
"delaunay_utils.cpp", "natneighbors.cpp"]
sources = [os.path.join('lib/matplotlib/delaunay', s) for s in sources]
ext = make_extension('matplotlib._delaunay', sources)
Numpy().add_flags(ext)
return ext
class QhullWrap(SetupPackage):
name = "qhull_wrap"
def get_extension(self):
sources = ['src/qhull_wrap.c']
ext = make_extension('matplotlib._qhull', sources,
define_macros=[('MPL_DEVNULL', os.devnull)])
Numpy().add_flags(ext)
Qhull().add_flags(ext)
return ext
class Tri(SetupPackage):
name = "tri"
def get_extension(self):
sources = [
"lib/matplotlib/tri/_tri.cpp",
"lib/matplotlib/tri/_tri_wrapper.cpp",
"src/mplutils.cpp"
]
ext = make_extension('matplotlib._tri', sources)
Numpy().add_flags(ext)
return ext
class Externals(SetupPackage):
name = "externals"
def get_packages(self):
return ['matplotlib.externals']
class Pytz(SetupPackage):
name = "pytz"
def check(self):
try:
import pytz
except ImportError:
raise CheckFailed(
"could not be found")
return "using pytz version %s" % pytz.__version__
def get_install_requires(self):
return ['pytz']
class Cycler(SetupPackage):
name = "cycler"
def check(self):
try:
import cycler
except ImportError:
raise CheckFailed(
"could not be found")
return "using cycler version %s" % cycler.__version__
def get_install_requires(self):
return ['cycler']
class Dateutil(SetupPackage):
name = "dateutil"
def __init__(self, version=None):
self.version = version
def check(self):
try:
import dateutil
except ImportError:
# dateutil 2.1 has a file encoding bug that breaks installation on
# python 3.3
# https://github.com/matplotlib/matplotlib/issues/2373
# hack around the problem by installing the (working) v2.0
#major, minor1, _, _, _ = sys.version_info
#if self.version is None and (major, minor1) == (3, 3):
#self.version = '!=2.1'
raise CheckFailed (
"could not be found")
major, minor1, _, _, _ = sys.version_info
if dateutil.__version__ == '2.1' and (major, minor1) == (3, 3):
raise CheckFailed (
"dateutil v. 2.1 has a bug that breaks installation"
"on python 3.3.x, use another dateutil version")
return "using dateutil version %s" % dateutil.__version__
def get_install_requires(self):
dateutil = 'python-dateutil'
if self.version is not None:
dateutil += self.version
return [dateutil]
class Tornado(SetupPackage):
name = "tornado"
def check(self):
try:
import tornado
except ImportError:
raise CheckFailed (
"could not be found")
return "using tornado version %s" % tornado.version
class Pyparsing(SetupPackage):
name = "pyparsing"
# pyparsing 2.0.4 has broken python 3 support.
# pyparsing 2.1.2 is broken in python3.4/3.3.
def is_ok(self):
# pyparsing 2.0.0 bug, but it may be patched in distributions
try:
import pyparsing
f = pyparsing.Forward()
f <<= pyparsing.Literal('a')
return f is not None
except (ImportError, TypeError):
return False
def check(self):
try:
import pyparsing
except ImportError:
raise CheckFailed(
"could not be found")
required = [1, 5, 6]
if [int(x) for x in pyparsing.__version__.split('.')] < required:
raise CheckFailed(
"matplotlib requires pyparsing >= {0}".format(
'.'.join(str(x) for x in required)))
if not self.is_ok():
return (
"Your pyparsing contains a bug that will be monkey-patched by "
"matplotlib. For best results, upgrade to pyparsing 2.0.1 or "
"later.")
return "using pyparsing version %s" % pyparsing.__version__
def get_install_requires(self):
versionstring = 'pyparsing>=1.5.6,!=2.0.4,!=2.1.2'
if self.is_ok():
return [versionstring]
else:
return [versionstring + ',!=2.0.0']
class BackendAgg(OptionalBackendPackage):
name = "agg"
force = True
def get_extension(self):
sources = [
"src/mplutils.cpp",
"src/py_converters.cpp",
"src/_backend_agg.cpp",
"src/_backend_agg_wrapper.cpp"
]
ext = make_extension('matplotlib.backends._backend_agg', sources)
Numpy().add_flags(ext)
LibAgg().add_flags(ext)
FreeType().add_flags(ext)
return ext
class BackendTkAgg(OptionalBackendPackage):
name = "tkagg"
force = True
def check(self):
return "installing; run-time loading from Python Tcl / Tk"
def get_extension(self):
sources = [
'src/py_converters.cpp',
'src/_tkagg.cpp'
]
ext = make_extension('matplotlib.backends._tkagg', sources)
self.add_flags(ext)
Numpy().add_flags(ext)
LibAgg().add_flags(ext, add_sources=False)
return ext
def add_flags(self, ext):
ext.include_dirs.extend(['src'])
if sys.platform == 'win32':
# PSAPI library needed for finding Tcl / Tk at run time
ext.libraries.extend(['psapi'])
class BackendGtk(OptionalBackendPackage):
name = "gtk"
def check_requirements(self):
try:
import gtk
except ImportError:
raise CheckFailed("Requires pygtk")
except RuntimeError:
raise CheckFailed('pygtk present, but import failed.')
else:
version = (2, 2, 0)
if gtk.pygtk_version < version:
raise CheckFailed(
"Requires pygtk %d.%d.%d or later. "
"Found %d.%d.%d" % (version + gtk.pygtk_version))
ext = self.get_extension()
self.add_flags(ext)
check_include_file(ext.include_dirs,
os.path.join("gtk", "gtk.h"),
'gtk')
check_include_file(ext.include_dirs,
os.path.join("pygtk", "pygtk.h"),
'pygtk')
return 'Gtk: %s pygtk: %s' % (
".".join(str(x) for x in gtk.gtk_version),
".".join(str(x) for x in gtk.pygtk_version))
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def get_extension(self):
sources = [
'src/_backend_gdk.c'
]
ext = make_extension('matplotlib.backends._backend_gdk', sources)
self.add_flags(ext)
Numpy().add_flags(ext)
return ext
def add_flags(self, ext):
if sys.platform == 'win32':
def getoutput(s):
ret = os.popen(s).read().strip()
return ret
if 'PKG_CONFIG_PATH' not in os.environ:
# If Gtk+ is installed, pkg-config is required to be installed
os.environ['PKG_CONFIG_PATH'] = 'C:\\GTK\\lib\\pkgconfig'
# popen broken on my win32 plaform so I can't use pkgconfig
ext.library_dirs.extend(
['C:/GTK/bin', 'C:/GTK/lib'])
ext.include_dirs.extend(
['win32_static/include/pygtk-2.0',
'C:/GTK/include',
'C:/GTK/include/gobject',
'C:/GTK/include/gext',
'C:/GTK/include/glib',
'C:/GTK/include/pango',
'C:/GTK/include/atk',
'C:/GTK/include/X11',
'C:/GTK/include/cairo',
'C:/GTK/include/gdk',
'C:/GTK/include/gdk-pixbuf',
'C:/GTK/include/gtk',
])
pygtkIncludes = getoutput(
'pkg-config --cflags-only-I pygtk-2.0').split()
gtkIncludes = getoutput(
'pkg-config --cflags-only-I gtk+-2.0').split()
includes = pygtkIncludes + gtkIncludes
ext.include_dirs.extend([include[2:] for include in includes])
pygtkLinker = getoutput('pkg-config --libs pygtk-2.0').split()
gtkLinker = getoutput('pkg-config --libs gtk+-2.0').split()
linkerFlags = pygtkLinker + gtkLinker
ext.libraries.extend(
[flag[2:] for flag in linkerFlags if flag.startswith('-l')])
ext.library_dirs.extend(
[flag[2:] for flag in linkerFlags if flag.startswith('-L')])
ext.extra_link_args.extend(
[flag for flag in linkerFlags if not
(flag.startswith('-l') or flag.startswith('-L'))])
# visual studio doesn't need the math library
if (sys.platform == 'win32' and
win32_compiler == 'msvc' and
'm' in ext.libraries):
ext.libraries.remove('m')
elif sys.platform != 'win32':
pkg_config.setup_extension(ext, 'pygtk-2.0')
pkg_config.setup_extension(ext, 'gtk+-2.0')
class BackendGtkAgg(BackendGtk):
name = "gtkagg"
def check(self):
try:
return super(BackendGtkAgg, self).check()
except:
raise
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def get_extension(self):
sources = [
'src/py_converters.cpp',
'src/_gtkagg.cpp',
'src/mplutils.cpp'
]
ext = make_extension('matplotlib.backends._gtkagg', sources)
self.add_flags(ext)
LibAgg().add_flags(ext)
Numpy().add_flags(ext)
return ext
def backend_gtk3agg_internal_check(x):
try:
import gi
except ImportError:
return (False, "Requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except ValueError:
return (False, "Requires gtk3 development files to be installed.")
except AttributeError:
return (False, "pygobject version too old.")
try:
from gi.repository import Gtk, Gdk, GObject
except (ImportError, RuntimeError):
return (False, "Requires pygobject to be installed.")
return (True, "version %s.%s.%s" % (
Gtk.get_major_version(),
Gtk.get_micro_version(),
Gtk.get_minor_version()))
class BackendGtk3Agg(OptionalBackendPackage):
name = "gtk3agg"
def check_requirements(self):
if 'TRAVIS' in os.environ:
raise CheckFailed("Can't build with Travis")
# This check needs to be performed out-of-process, because
# importing gi and then importing regular old pygtk afterward
# segfaults the interpreter.
try:
p = multiprocessing.Pool()
except:
return "unknown (can not use multiprocessing to determine)"
try:
res = p.map_async(backend_gtk3agg_internal_check, [0])
success, msg = res.get(timeout=10)[0]
except multiprocessing.TimeoutError:
p.terminate()
# No result returned. Probaly hanging, terminate the process.
success = False
raise CheckFailed("Check timed out")
except:
p.close()
# Some other error.
success = False
msg = "Could not determine"
raise
else:
p.close()
finally:
p.join()
if success:
return msg
else:
raise CheckFailed(msg)
def get_package_data(self):
return {'matplotlib': ['mpl-data/*.glade']}
def backend_gtk3cairo_internal_check(x):
try:
import cairocffi
except ImportError:
try:
import cairo
except ImportError:
return (False, "Requires cairocffi or pycairo to be installed.")
try:
import gi
except ImportError:
return (False, "Requires pygobject to be installed.")
try:
gi.require_version("Gtk", "3.0")
except ValueError:
return (False, "Requires gtk3 development files to be installed.")
except AttributeError:
return (False, "pygobject version too | |
is then used by its `OutputPort(s) <Mechanism_OutputPorts>` to assign
items to its `output_values <Mechanism_Base.output_values>` attribute.
.. _Mechanism_Ports:
*Ports*
~~~~~~~~
Every Mechanism has one or more of each of three types of Ports: `InputPort(s) <InputPort>`,
`ParameterPort(s) <ParameterPort>`, `and OutputPort(s) <OutputPort>`. Generally, these are created automatically
when the Mechanism is created. InputPorts and OutputPorts (but not ParameterPorts) can also be specified explicitly
for a Mechanism, or added to an existing Mechanism using its `add_ports <Mechanism_Base.add_ports>` method, as
described `above <Mechanism_Port_Specification>`).
.. _Mechanism_Figure:
The three types of Ports are shown schematically in the figure below, and described briefly in the following sections.
.. figure:: _static/Mechanism_Ports_fig.svg
:alt: Mechanism Ports
:scale: 75 %
:align: left
**Schematic of a Mechanism showing its three types of Ports** (`InputPort`, `ParameterPort` and `OutputPort`).
Every Mechanism has at least one (`primary <InputPort_Primary>`) InputPort and one (`primary
<OutputPort_Primary>`) OutputPort, but can have additional ports of each type. It also has one
`ParameterPort` for each of its parameters and the parameters of its `function <Mechanism_Base.function>`.
The `value <InputPort.value>` of each InputPort is assigned as an item of the Mechanism's `variable
<Mechanism_Base.variable>`, and the result of its `function <Mechanism_Base.function>` is assigned as the Mechanism's
`value <Mechanism_Base.value>`, the items of which are referenced by its OutputPorts to determine their own
`value <OutputPort.value>`\\s (see `Mechanism_Variable_and_Value` above, and more detailed descriptions below).
.. _Mechanism_InputPorts:
InputPorts
^^^^^^^^^^^
These receive, potentially combine, and represent the input to a Mechanism, and provide this to the Mechanism's
`function <Mechanism_Base.function>`. Usually, a Mechanism has only one (`primary <InputPort_Primary>`) `InputPort`,
identified in its `input_port <Mechanism_Base.input_port>` attribute. However some Mechanisms have more than one
InputPort. For example, a `ComparatorMechanism` has one InputPort for its **SAMPLE** and another for its **TARGET**
input. All of the Mechanism's InputPorts (including its primary InputPort <InputPort_Primary>` are listed in its
`input_ports <Mechanism_Base.input_ports>` attribute (note the plural). The `input_ports
<Mechanism_Base.input_ports>` attribute is a ContentAddressableList -- a PsyNeuLink-defined subclass of the Python
class `UserList <https://docs.python.org/3.6/library/collections.html?highlight=userlist#collections.UserList>`_ --
that allows a specific InputPort in the list to be accessed using its name as the index for the list (e.g.,
``my_mechanism['InputPort name']``).
.. _Mechanism_Variable_and_InputPorts:
The `value <InputPort.value>` of each InputPort for a Mechanism is assigned to a different item of the Mechanism's
`variable <Mechanism_Base.variable>` attribute (a 2d np.array), as well as to a corresponding item of its `input_values
<Mechanism_Base.input_values>` attribute (a list). The `variable <Mechanism_Base.variable>` provides the input to the
Mechanism's `function <Mechanism_Base.function>`, while its `input_values <Mechanism_Base.input_values>` provides a
convenient way of accessing the value of its individual items. Because there is a one-to-one correspondence between
a Mechanism's InputPorts and the items of its `variable <Mechanism_Base.variable>`, their size along their outermost
dimension (axis 0) must be equal; that is, the number of items in the Mechanism's `variable <Mechanism_Base.variable>`
attribute must equal the number of InputPorts in its `input_ports <Mechanism_Base.input_ports>` attribute. A
Mechanism's constructor does its best to insure this: if its **default_variable** and/or its **size** argument is
specified, it constructs a number of InputPorts (and each with a `value <InputPort.value>`) corresponding to the
items specified for the Mechanism's `variable <Mechanism_Base.variable>`, as in the examples below::
my_mech_A = pnl.TransferMechanism(default_variable=[[0],[0,0]])
print(my_mech_A.input_ports)
> [(InputPort InputPort-0), (InputPort InputPort-1)]
print(my_mech_A.input_ports[0].value)
> [ 0.]
print(my_mech_A.input_ports[1].value)
> [ 0. 0.]
my_mech_B = pnl.TransferMechanism(default_variable=[[0],[0],[0]])
print(my_mech_B.input_ports)
> [(InputPort InputPort-0), (InputPort InputPort-1), (InputPort InputPort-2)]
Conversely, if the **input_ports** argument is used to specify InputPorts for the Mechanism, they are used to format
the Mechanism's variable::
my_mech_C = pnl.TransferMechanism(input_ports=[[0,0], 'Hello'])
print(my_mech_C.input_ports)
> [(InputPort InputPort-0), (InputPort Hello)]
print(my_mech_C.variable)
> [array([0, 0]) array([0])]
If both the **default_variable** (or **size**) and **input_ports** arguments are specified, then the number and format
of their respective items must be the same (see `Port <Port_Examples>` for additional examples of specifying Ports).
If InputPorts are added using the Mechanism's `add_ports <Mechanism_Base.add_ports>` method, then its
`variable <Mechanism_Base.variable>` is extended to accommodate the number of InputPorts added (note that this must
be coordinated with the Mechanism's `function <Mechanism_Base.function>`, which takes the Mechanism's `variable
<Mechanism_Base.variable>` as its input (see `note <Mechanism_Add_InputPorts_Note>`).
The order in which `InputPorts are specified <Mechanism_InputPort_Specification>` in the Mechanism's constructor,
and/or `added <Mechanism_Add_InputPorts>` using its `add_ports <Mechanism_Base.add_ports>` method, determines the
order of the items to which they are assigned assigned in he Mechanism's `variable <Mechanism_Base.variable>`,
and are listed in its `input_ports <Mechanism_Base.input_ports>` and `input_values <Mechanism_Base.input_values>`
attribute. Note that a Mechanism's `input_values <Mechanism_Base.input_values>` attribute has the same information as
the Mechanism's `variable <Mechanism_Base.variable>`, but in the form of a list rather than an ndarray.
.. _Mechanism_InputPort_Specification:
**Specifying InputPorts and a Mechanism's** `variable <Mechanism_Base.variable>` **Attribute**
When a Mechanism is created, the number and format of the items in its `variable <Mechanism_Base.variable>`
attribute, as well as the number of InputPorts it has and their `variable <InputPort.variable>` and `value
<InputPort.value>` attributes, are determined by one of the following arguments in the Mechanism's constructor:
* **default_variable** (at least 2d ndarray) -- determines the number and format of the items of the Mechanism's
`variable <Mechanism_Base.variable>` attribute. The number of items in its outermost dimension (axis 0) determines
the number of InputPorts created for the Mechanism, and the format of each item determines the format for the
`variable <InputPort.variable>` and `value <InputPort.value>` attributes of the corresponding InputPort.
If any InputPorts are specified in the **input_ports** argument or an *INPUT_PORTS* entry of
a specification dictionary assigned to the **params** argument of the Mechanism's constructor, then the number
must match the number of items in **default_variable**, or an error is generated. The format of the items in
**default_variable** are used to specify the format of the `variable <InputPort.variable>` or `value
<InputPort.value>` of the corresponding InputPorts for any that are not explicitly specified in the
**input_ports** argument or *INPUT_PORTS* entry (see below).
..
* **size** (int, list or ndarray) -- specifies the number and length of items in the Mechanism's variable,
if **default_variable** is not specified. For example, the following mechanisms are equivalent::
T1 = TransferMechanism(size = [3, 2])
T2 = TransferMechanism(default_variable = [[0, 0, 0], [0, 0]])
The relationship to any specifications in the **input_ports** argument or
*INPUT_PORTS* entry of a **params** dictionary is the same as for the **default_variable** argument,
with the latter taking precedence (see above).
..
* **input_ports** (list) -- this can be used to explicitly `specify the InputPorts <InputPort_Specification>`
created for the Mechanism. Each item must be an `InputPort specification <InputPort_Specification>`, and the number
of items must match the number of items in the **default_variable** argument or **size** argument
if either of those is specified. If the `variable <InputPort.variable>` and/or `value <InputPort.value>`
is `explicitly specified for an InputPort <InputPort_Variable_and_Value>` in the **input_ports** argument or
*INPUT_PORTS* entry of a **params** dictionary, it must be compatible with the value of the corresponding
item of **default_variable**; otherwise, the format of the item in **default_variable** corresponding to the
InputPort is used to specify the format of the InputPort's `variable <InputPort.variable>` (e.g., the InputPort is
`specified using an OutputPort <InputPort_Projection_Source_Specification>` to project to it;). If
**default_variable** is not specified, a default value is specified by the Mechanism. InputPorts can also be
specifed that `shadow the inputs <InputPort_Shadow_Inputs>` of other InputPorts and/or Mechanisms; that is, receive
Projections from all of the same `senders <Projection_Base.sender>` as those specified.
COMMENT:
*** ADD SOME EXAMPLES HERE (see `examples <XXX>`)
COMMENT
COMMENT:
*** ADD THESE TO ABOVE WHEN IMPLEMENTED:
If more InputPorts are specified than there are items in `variable <Mechanism_Base.variable>,
the latter is extended to match the former.
If the Mechanism's `variable <Mechanism_Base.variable>` has more than one item, it may still be assigned
a single InputPort; in that case, the `value <InputPort.value>` of that InputPort must have the same
number of items as the Mechanisms's `variable <Mechanism_Base.variable>`.
COMMENT
..
* *INPUT_PORTS* entry of a params dict (list) -- specifications are treated in the same manner as those in the
**input_ports** argument, and take precedence over those.
.. _Mechanism_Add_InputPorts:
**Adding InputPorts**
InputPorts can be added to a Mechanism using its `add_ports <Mechanism_Base.add_ports>` method; this extends its
`variable <Mechanism_Base.variable>` by a number of items equal to the number of InputPorts added, and each new item
is assigned a format compatible with the `value <InputPort.value>` of the corresponding InputPort added; if the
InputPort's `variable <InputPort.variable>` is not specified, it is assigned the default format for an item of the
owner's `variable <Mechanism_Base.variable>` attribute. The InputPorts are appended to the end of the list in | |
<filename>phonology-augmented_transliterator/syl_splitter/syl_struct_generator/SylStructGenerator.py
# Generate all possible syllable structure hypothesis of the entries in an input file
import os
import sys
import time
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from LangAssets.LangAssets import LangAssets
from LangAssets.Syllable import Syllable
from LangAssets.Word import Word
from LangAssets.WordHyp import WordHyp
#---------------- IMPORT ALL VALID ENGLISH CONSONANTS/ VOWELS ---------------
# All possible roles of an alphabet
ONSET = "O"
NUCLEUS = "N"
CODA = "Cd"
ONSET_NUCLEUS = "O_N"
ONSET_CODA = "O_Cd"
NUCLEUS_ONSET = "N_O"
NUCLEUS_NUCLEUS = "N_N"
NUCLEUS_CODA = "N_Cd"
CODA_ONSET = "Cd_O"
CODA_NUCLEUS = "Cd_N"
CODA_CODA = "Cd_Cd"
REMOVE = "R"
lang_assets = LangAssets()
VOWEL = LangAssets.VOWEL
CONSONANT = LangAssets.CONSONANT
GENERIC_VOWEL = LangAssets.GENERIC_VOWEL
PossibleRoles = LangAssets.POSSIBLE_ROLES
ValidEnConsos = lang_assets.valid_en_consos
ValidEnVowels = lang_assets.valid_en_vowels
ValidVieConsos = lang_assets.valid_vie_consos
ValidVieVowels = lang_assets.valid_vie_vowels
ValidConsoRoles = LangAssets.VALID_CONSO_ROLES
ValidVowelRoles = LangAssets.VALID_VOWEL_ROLES
ValidSubSylUnit = {
ONSET: lang_assets.valid_en_onsets,
NUCLEUS: lang_assets.valid_en_nuclei,
CODA: lang_assets.valid_en_codas
}
# Each role can only be assigned to more than a specific ratio of all the letters
# TO-DO estimate the maximum ratio of each role from the Vietnamese entries in training data
MAX_ROLES_RATIOS = {
ONSET: 0.3,
NUCLEUS: 0.4,
CODA: 0.3,
ONSET_NUCLEUS: 0.2,
NUCLEUS_ONSET: 0.2,
NUCLEUS_NUCLEUS: 0.2,
NUCLEUS_CODA: 0.2,
CODA_ONSET: 0.2,
CODA_NUCLEUS: 0.2,
CODA_CODA: 0.2,
REMOVE: 0.2,
}
#print ValidEnConsos
#print ValidEnVowels
#print ValidSubSylUnit[ONSET]
#print ValidSubSylUnit[NUCLEUS]
#print ValidSubSylUnit[CODA]
#---------------- LABEL ALL LETTERS IN A WORD ------------------#
def label_letters(word):
labels = [None] * len(word)
for pos in range(len(word)):
if word[pos] not in ValidEnVowels and word[pos] not in ValidEnConsos:
print "ERROR: Unlabeled letter %s in word %s" % (word[pos], word)
sys.exit(1)
else:
if word[pos] in ValidEnVowels:
labels[pos] = VOWEL
elif word[pos] in ValidEnConsos:
labels[pos] = CONSONANT
return labels
#---------------- ENCODE A VIETNAMESE ENTRY IN SUBSYLLABIC UNITS ------------------#
def encode_vie_word(word):
syls = word.split(".")
toneless_syls = [(" ").join(syl.strip().split(" ")[:-1]) for syl in syls]
encoded_units = []
for syl in toneless_syls:
vie_phons = syl.split(" ")
syl_struct = [None] * len(vie_phons)
# Encode the nucleus of the syllable
for i in range(len(vie_phons)):
phon = vie_phons[i]
if phon in ValidVieVowels:
syl_struct[i] = NUCLEUS
break
for i in range(len(vie_phons)):
phon = vie_phons[i]
# Check if a phone is either a valid Vietnamese vowels or consonants
if phon not in ValidVieVowels and phon not in ValidVieConsos:
print ("Error: unaccounted phon %s" % vie_phons[i])
sys.exit(1)
if phon in ValidVieConsos:
# Encode the syllable's onset
if i+1 < len(syl_struct) and syl_struct[i+1] == NUCLEUS:
syl_struct[i] = ONSET
# Encode the syllable's coda
elif i-1 >= 0 and syl_struct[i-1] == NUCLEUS:
syl_struct[i] = CODA
encoded_units.append(" ".join(syl_struct))
return " . ".join(encoded_units)
#---------------- GENERATE ALL POSSIBLE ROLES FOR ALPHABETS IN A WORD ------------------#
# Use a dictionary to keep count on the number times a role is assigned to a letter
role_count = {}
for role in PossibleRoles:
role_count[role] = 0
def generate_roles(word, labels, roles, pos, encoded_vie_units, best_word_hyps_list):
# roles[pos] is not yet assigned
if pos-1 >= 0 and (roles[pos-1] != REMOVE or pos == len(roles)):
#print ("Word: %s" % word)
#print ("Labels: %s" % str(labels))
#print ("Roles: %s" % str(roles))
if not is_valid_subsyllabic_unit(word, labels, roles, pos-1):
#print " Invalid at: " + str(pos-1)
#print ("Word: %s" % word)
#print ("Labels: %s" % str(labels))
#print ("Roles: %s" % str(roles))
return
if pos >= len(labels):
#print ("Word: %s" % word)
#print ("Labels: %s" % str(labels))
#print ("Roles: %s" % str(roles))
hyp_word = construct_syls(word, labels, roles)
#print ("hyp_word: %s" % str(hyp_word))
update_best_hyp_words_list(word, encoded_vie_units, roles, hyp_word, best_word_hyps_list)
return
# if the boundary of a subsyllabic unit is hit, check for the validity of the unit
# ignore the checkingwhen the role assigned to the letter is REMOVE
# if the subsyllabic unit is invalid, return
if labels[pos] == CONSONANT:
for idx in range(len(ValidConsoRoles)):
role = ValidConsoRoles[idx]
if role_count[role] >= (len(labels) * MAX_ROLES_RATIOS[role] + 1):
continue
else:
role_count[role] = role_count[role] + 1
generate_roles(word, labels, roles[0:pos] + [role] + roles[pos+1:len(labels)], pos + 1, encoded_vie_units, best_word_hyps_list)
role_count[role] = role_count[role] - 1
elif labels[pos] == VOWEL:
for idx in range(len(ValidVowelRoles)):
role = ValidVowelRoles[idx]
if role_count[role] >= (len(labels) * MAX_ROLES_RATIOS[role] + 1):
continue
else:
role_count[role] = role_count[role] + 1
generate_roles(word, labels, roles[0:pos] + [role] + roles[pos+1:len(labels)], pos + 1, encoded_vie_units, best_word_hyps_list)
role_count[role] = role_count[role] - 1
#---------------- CHECK IF A SUBSYLLABIC UNIT IS VALID ------------------#
# From position pos, look backwards along the string to check if the current
# subsyllabic unit is valid
def is_valid_subsyllabic_unit(word, labels, roles, pos):
# if the letter under consideration is at the beginning of the string
# check if the letter is assigned a valid role to be at the
# beginning of the word
word_beginning = 0
for i in range(len(roles)):
if roles[i] != REMOVE:
word_beginning = i
break
if pos == word_beginning:
# roles[pos] can only possibly be ONSET or NUCLEUS
if roles[pos] != ONSET and roles[pos] != NUCLEUS:
return False
# if pos is at the end of the word
# check if the final letter is a valid subsyllabic unit
if pos == len(roles) - 1:
word_ending = -1
for i in reversed(range(0, len(roles))):
if roles[i] != REMOVE:
word_ending = i
break
#print "word_ending: " + str(word_ending)
if word_ending >= 0:
curr_role = roles[word_ending].split("_")[-1]
if roles[word_ending].split("_")[-1] == ONSET:
return False
last_non_R_pos = -1
end_of_unit = -1
curr_role = ""
subsyl_unit = ""
# Get the last position in the word that is not assgined the REMOVE role
# if the position of consideration is at the end of the word and is not
# assigned a REMOVE role, it is the last_non_R_pos
for i in reversed(range(0, pos)):
if roles[i] != REMOVE:
last_non_R_pos = i
break
#if pos == len(roles)-1 and roles[pos] != REMOVE:
# last_non_R_pos = pos
# if end of the word is reached
if pos == len(roles)-1 and roles[pos] != REMOVE:
if roles[pos] == NUCLEUS_CODA and labels[pos] == VOWEL:
# the syllabel is one single vowel
return True
#print ("last_non_R_pos: %d" % last_non_R_pos)
if last_non_R_pos >= 0:
if roles[last_non_R_pos] == NUCLEUS_CODA and labels[last_non_R_pos] == VOWEL:
# the syllabel is one single vowel
return True
# if role is either ONSET, CODA or NUCLEUS
if len(roles[pos].split("_")) == 1:
if last_non_R_pos >= 0:
if roles[pos].split("_")[0] == CODA:
if roles[last_non_R_pos].split("_")[-1] == ONSET or \
(roles[last_non_R_pos].split("_")[-1] == NUCLEUS and labels[last_non_R_pos] == CONSONANT):
return False
if roles[pos] != roles[last_non_R_pos]:
end_of_unit = last_non_R_pos
#print ("roles[last_non_R_pos]: %s" % roles[last_non_R_pos])
curr_role = roles[last_non_R_pos].split("_")[-1]
else:
if last_non_R_pos >= 0:
if roles[pos].split("_")[0] == CODA and \
not (roles[pos] == NUCLEUS_CODA and labels[pos] == VOWEL):
if roles[last_non_R_pos].split("_")[-1] == ONSET or \
(roles[last_non_R_pos].split("_")[-1] == NUCLEUS and labels[last_non_R_pos] == CONSONANT):
return False
#print "CODA follows ONSET"
if roles[pos].split("_")[0] == NUCLEUS and \
roles[last_non_R_pos] == ONSET_NUCLEUS and labels[last_non_R_pos] == CONSONANT:
return False
#print "NUCLEUS follows GENERIC_VOWEL"
if roles[pos].split("_")[0] == roles[last_non_R_pos].split("_")[-1]:
end_of_unit = last_non_R_pos
curr_role = roles[pos].split("_")[0]
subsyl_unit = word[pos]
else:
if word[pos] not in ValidSubSylUnit[roles[pos].split("_")[0]]:
return False
else:
end_of_unit = last_non_R_pos
curr_role = roles[last_non_R_pos].split("_")[-1]
#print ("end_of_unit: %d" % end_of_unit)
# create the subsyllabic unit
if end_of_unit >= 0:
for i in reversed(range(0, end_of_unit+1)):
# if the role assigned to the letter at position i is REMOVE, skip the letter
#print "i: " + str(i)
if roles[i] == REMOVE:
continue
# Exception: if the current role of consideration is nucleus
# if the letter at position i is a consonant and is assigned role
# "NN", the letter is not prepened to subsyl_unit
elif curr_role == NUCLEUS and labels[i] == CONSONANT \
and (roles[i] == NUCLEUS_NUCLEUS or roles[i] == CODA_NUCLEUS or roles[i] == ONSET_NUCLEUS):
break
# if the role assigned to the letter at position i is the same as
# curr_role, prepend the letter to the subsyllabic unit of consideration
elif roles[i].split("_")[-1] == curr_role:
#print "roles[i][-1]: " + roles[i].split("_")[-1]
subsyl_unit = word[i] + subsyl_unit
# otherwise, a subsyllabic boundary is hit
# check if the letter can still be prepended to the subsyllabic unit of consideration
else:
break
#print ("Curr role: %s" % curr_role)
#print ("Subsyllabic unit: %s" % subsyl_unit)
if subsyl_unit in ValidEnConsos and curr_role == NUCLEUS:
return True
# Check if the subsyl_unit is a valid subsyllabic unit of the role curr_role (onset, nucleus, coda)
if subsyl_unit != "" and \
subsyl_unit not in ValidSubSylUnit[curr_role]:
return False
return True
#---------------- CONSTRUCT SYLLABLES ------------------#
def construct_syls(word, labels, roles):
reconstructed_word = Word()
# Search for the first | |
<gh_stars>10-100
""" SamplingVolume_Widget module. """
# ISC License
#
# Copyright (c) 2020–2021, <NAME>, <NAME>. <<EMAIL>>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from typing import Optional, List, Tuple
import numpy as np
import qtawesome as qta
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QHBoxLayout, QLabel, QPushButton, QSpinBox, QComboBox, QSizePolicy, QWidget
from magneticalc.Constraint import Constraint
from magneticalc.Constraint_Editor import Constraint_Editor
from magneticalc.Debug import Debug
from magneticalc.IconLabel import IconLabel
from magneticalc.Groupbox import Groupbox
from magneticalc.HLine import HLine
from magneticalc.ModelAccess import ModelAccess
from magneticalc.OverridePadding_Dialog import OverridePadding_Dialog
from magneticalc.SamplingVolume import SamplingVolume
from magneticalc.Theme import Theme
class SamplingVolume_Widget(Groupbox):
""" SamplingVolume_Widget class. """
# Display settings
UnitsLabelWidth = 26
# Spinbox limits
PaddingMin = -1e+3
PaddingMax = +1e+3
# Resolution options
ResolutionOptionsDict = {
"256" : 8,
"128" : 7,
"64" : 6,
"32" : 5,
"16" : 4,
"8" : 3,
"4" : 2,
"2" : 1,
"1" : 0,
"1 / 2" : -1,
"1 / 4" : -2,
"1 / 8" : -3,
"1 / 16" : -4,
"1 / 32" : -5,
"1 / 64" : -6,
"1 / 128" : -7,
"1 / 256" : -8,
}
def __init__(self, gui):
"""
Populates the widget.
@param gui: GUI
"""
Groupbox.__init__(self, "Sampling Volume")
self.gui = gui
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Insert constraint editor, but don't fully initialize it yet
self.constraint_editor = Constraint_Editor(self.gui)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
padding_icon_label = IconLabel("mdi.arrow-expand-all", "Padding")
padding_override_button = QPushButton(" Override … ") # Leading and trailing spaces for padding
padding_override_button.clicked.connect(self.override_padding)
padding_override_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
padding_icon_label.addWidget(padding_override_button)
padding_clear_button = QPushButton()
padding_clear_button.setIcon(qta.icon("fa.eraser"))
padding_clear_button.clicked.connect(self.clear_padding)
padding_icon_label.addWidget(padding_clear_button)
padding_units_label = QLabel("cm")
padding_units_label.setAlignment(Qt.AlignRight)
padding_units_label.setFixedWidth(self.UnitsLabelWidth)
padding_icon_label.addWidget(padding_units_label)
self.addWidget(padding_icon_label)
self.padding_widget = QWidget()
padding_layout = QHBoxLayout()
padding_label = [None, None, None]
self.padding_spinbox = [None, None, None]
for i in range(3):
self.padding_spinbox[i] = QSpinBox(self.gui)
self.padding_spinbox[i].setMinimum(self.PaddingMin)
self.padding_spinbox[i].setMaximum(self.PaddingMax)
self.padding_spinbox[i].valueChanged.connect(self.update_padding)
padding_label[i] = QLabel(["X", "Y", "Z"][i] + ":")
padding_label[i].setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
padding_layout.addWidget(padding_label[i], alignment=Qt.AlignVCenter)
padding_layout.addWidget(self.padding_spinbox[i], alignment=Qt.AlignVCenter)
self.padding_widget.setLayout(padding_layout)
self.addWidget(self.padding_widget)
total_extent_layout = QHBoxLayout()
total_extent_label_left = QLabel("Total extent:")
total_extent_label_left.setStyleSheet(f"color: {Theme.LightColor}; font-style: italic;")
self.total_extent_label = QLabel("N/A")
self.total_extent_label.setStyleSheet(f"color: {Theme.PrimaryColor};")
self.total_extent_label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
total_extent_layout.addWidget(total_extent_label_left, alignment=Qt.AlignVCenter)
total_extent_layout.addWidget(self.total_extent_label, alignment=Qt.AlignVCenter)
self.addLayout(total_extent_layout)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
self.addWidget(HLine())
constraints_icon_label = IconLabel("mdi.playlist-edit", "Constraints")
constraint_shortcut_label = QLabel("⟨F3⟩")
constraint_shortcut_label.setStyleSheet(f"font-size: 13px; color: {Theme.LightColor}")
constraints_icon_label.addWidget(constraint_shortcut_label)
constraint_edit_button = QPushButton("Edit …")
constraint_edit_button.clicked.connect(self.open_constraint_editor)
constraints_icon_label.addWidget(constraint_edit_button)
self.addWidget(constraints_icon_label)
total_constraints_layout = QHBoxLayout()
total_constraints_label_left = QLabel("Total constraints:")
total_constraints_label_left.setStyleSheet(f"color: {Theme.LightColor}; font-style: italic;")
self.total_constraints_label = QLabel("N/A")
self.total_constraints_label.setStyleSheet(f"color: {Theme.PrimaryColor};")
self.total_constraints_label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
total_constraints_layout.addWidget(total_constraints_label_left, alignment=Qt.AlignVCenter)
total_constraints_layout.addWidget(self.total_constraints_label, alignment=Qt.AlignVCenter)
self.addLayout(total_constraints_layout)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
self.addWidget(HLine())
self.addWidget(IconLabel("fa.th", "Resolution"))
self.resolution_combobox = QComboBox()
resolution_layout = QHBoxLayout()
resolution_layout.addWidget(self.resolution_combobox, alignment=Qt.AlignVCenter)
self.resolution_units_label = QLabel(" Points / cm")
self.resolution_units_label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
resolution_layout.addWidget(self.resolution_units_label, alignment=Qt.AlignVCenter)
self.addLayout(resolution_layout)
# Populate resolution combobox
for i, value in enumerate(self.ResolutionOptionsDict):
self.resolution_combobox.addItem(str(value))
self.resolution_combobox.currentIndexChanged.connect(
lambda: self.set_sampling_volume(
resolution_exponent=self.ResolutionOptionsDict.get(
self.resolution_combobox.currentText(),
0
)
)
)
total_points_layout = QHBoxLayout()
total_points_label_left = QLabel("Total sampling points:")
total_points_label_left.setStyleSheet(f"color: {Theme.LightColor}; font-style: italic;")
self.total_points_label = QLabel("N/A")
self.total_points_label.setStyleSheet(f"color: {Theme.PrimaryColor};")
self.total_points_label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
total_points_layout.addWidget(total_points_label_left, alignment=Qt.AlignVCenter)
total_points_layout.addWidget(self.total_points_label, alignment=Qt.AlignVCenter)
self.addLayout(total_points_layout)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
self.reinitialize()
def reinitialize(self):
"""
Re-initializes the widget and the constraint editor
"""
Debug(self, ".reinitialize()")
self.blockSignals(True)
for i in range(3):
self.padding_spinbox[i].setValue(self.gui.config.get_point("sampling_volume_padding")[i])
# Set default resolution if it is not available anymore
target = self.gui.config.get_int("sampling_volume_resolution_exponent")
if target not in self.ResolutionOptionsDict.values():
Debug(
self,
f": Invalid: sampling_volume_resolution_exponent = {target}",
color=Theme.WarningColor,
force=True
)
self.gui.config.set_int("sampling_volume_resolution_exponent", 0)
# Select the resolution
target = self.gui.config.get_int("sampling_volume_resolution_exponent")
for i, value in enumerate(self.ResolutionOptionsDict.values()):
if value == target:
self.resolution_combobox.setCurrentIndex(i)
self.blockSignals(False)
# Re-initialize the constraint editor
self.constraint_editor.reinitialize()
# Initially load sampling volume from configuration
self.set_sampling_volume(recalculate=False, invalidate_self=False)
# ------------------------------------------------------------------------------------------------------------------
def update_padding(self):
"""
Updates padding.
"""
if self.signalsBlocked():
return
self.set_sampling_volume(padding=[self.padding_spinbox[i].value() for i in range(3)])
def clear_padding(self):
"""
Clears the padding values.
"""
self.gui.config.set_bool("sampling_volume_override_padding", False)
self.gui.config.set_points("sampling_volume_bounding_box", [np.zeros(3), np.zeros(3)])
self.set_sampling_volume(padding=[0, 0, 0])
self.blockSignals(True)
for i in range(3):
self.padding_spinbox[i].setValue(0)
self.blockSignals(False)
def override_padding(self) -> None:
"""
Override padding, setting the bounding box directly.
"""
dialog = OverridePadding_Dialog(self.gui)
dialog.show()
if not dialog.success:
return
# This needlessly also clears the override padding and bounding box config, but that's acceptable.
self.clear_padding()
self.set_sampling_volume(
override_padding=True,
bounding_box=[
[dialog.bounds_min_spinbox[i].value() for i in range(3)],
[dialog.bounds_max_spinbox[i].value() for i in range(3)]
]
)
self.update_controls()
# ------------------------------------------------------------------------------------------------------------------
def set_sampling_volume(
self,
resolution_exponent: Optional[int] = None,
label_resolution_exponent: Optional[int] = None,
padding: Optional[List] = None,
override_padding: Optional[bool] = None,
bounding_box: List[List] = None,
recalculate: bool = True,
invalidate_self: bool = True
):
"""
Sets the sampling volume. This will overwrite the currently set sampling volume in the model.
Parameters may be left set to None in order to load their default value.
@param resolution_exponent: Sampling volume resolution exponent
@param label_resolution_exponent: Sampling volume label resolution exponent
@param padding: Padding (3D point)
@param override_padding: Enable to override padding instead, setting the bounding box directly
@param bounding_box: Bounding box (used in conjunction with override_padding)
@param recalculate: Enable to trigger final re-calculation
@param invalidate_self: Enable to invalidate the old sampling volume before setting a new one
"""
if self.signalsBlocked():
return
with ModelAccess(self.gui, recalculate):
resolution_exponent = self.gui.config.set_get_int(
"sampling_volume_resolution_exponent",
resolution_exponent
)
label_resolution_exponent = self.gui.config.set_get_int(
"sampling_volume_label_resolution_exponent",
label_resolution_exponent
)
resolution = np.power(2.0, resolution_exponent)
label_resolution = np.power(2.0, label_resolution_exponent)
self.gui.model.set_sampling_volume(
SamplingVolume(resolution=resolution, label_resolution=label_resolution),
invalidate_self=invalidate_self
)
self.readjust(padding, override_padding, bounding_box)
# Load sampling volume constraints from configuration
for i, constraint in enumerate(self.constraint_editor.get_constraints()):
constraint = self.gui.config.set_get_dict(
prefix=f"constraint_",
suffix=f"_{i}",
types=self.constraint_editor.Constraint_Types,
values=None
)
self.gui.model.sampling_volume.add_constraint(
Constraint(
constraint["norm"],
constraint["comparison"],
constraint["min"],
constraint["max"],
constraint["permeability"]
)
)
if recalculate:
# The display widget depends on the sampling volume
self.gui.sidebar_right.display_widget.update()
# ------------------------------------------------------------------------------------------------------------------
def readjust(
self,
padding: Optional[List] = None,
override_padding: Optional[bool] = None,
bounding_box: Optional[Tuple[np.ndarray, np.ndarray]] = None,
):
"""
Readjusts the sampling volume to the currently set wire bounds and padding.
This also readjusts the minimum padding bounds in case the wire bounds have shrunk too far.
Padding may also be overridden instead, setting the bounding box directly.
Parameters may be left set to None in order to load their default value.
@param padding: Padding (3D point)
@param override_padding: Enable to override padding instead, setting the bounding box directly
@param bounding_box: Bounding box (used in conjunction with override_padding)
"""
Debug(self, ".readjust()")
override_padding = self.gui.config.set_get_bool("sampling_volume_override_padding", override_padding)
bounding_box = self.gui.config.set_get_points("sampling_volume_bounding_box", bounding_box)
if override_padding:
self.gui.model.sampling_volume.set_bounds_nearest(*bounding_box)
else:
self.gui.model.sampling_volume.set_bounds_nearest(*self.gui.model.wire.get_bounds())
bounds_min, | |
#!/usr/bin/python
##########################################################################
# Copyright (c) 2015, Salesforce.com, Inc.
# All rights reserved.
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of Salesforce.com nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##########################################################################
"""
**diskstats.py**
This plug-in reads /proc/diskstats and collects stats
for a sub-set of devices, such as sda, sdb, ..., fio,
md0, md1, md2, md3, etc.
In addition to collecting raw stats, plugin can cache
current value to determine time series derivatives (such as, iops, device
utilization, etc.) at next collection interval.
/proc/diskstats output shows one device stats per line and
each line looks like this:
252 1 dm-1 321244 0 12631594 480236 1104086 0 29567992 46637332 0 1841812 47117632
Meaning of these lines is explained in linux kernel
Documentation/iostat.txt (ignores first 3 fileds):
- Field 1 -- # of reads completed
This is the total number of reads completed successfully.
- Field 2 -- # of reads merged, field 6 -- # of writes merged
Reads and writes which are adjacent to each other may be merged for
efficiency. Thus two 4K reads may become one 8K read before it is
ultimately handed to the disk, and so it will be counted (and queued)
as only one I/O. This field lets you know how often this was done.
- Field 3 -- # of sectors read
This is the total number of sectors read successfully.
- Field 4 -- # of milliseconds spent reading
This is the total number of milliseconds spent by all reads (as
measured from __make_request() to end_that_request_last()).
- Field 5 -- # of writes completed
This is the total number of writes completed successfully.
- Field 7 -- # of sectors written
This is the total number of sectors written successfully.
- Field 8 -- # of milliseconds spent writing
This is the total number of milliseconds spent by all writes (as
measured from __make_request() to end_that_request_last()).
- Field 9 -- # of I/Os currently in progress
The only field that should go to zero. Incremented as requests are
given to appropriate struct request_queue and decremented as they finish.
- Field 10 -- # of milliseconds spent doing I/Os
This field is increases so long as field 9 is nonzero.
- Field 11 -- weighted # of milliseconds spent doing I/Os
This field is incremented at each I/O start, I/O completion, I/O
merge, or read of these stats by the number of I/Os in progress
(field 9) times the number of milliseconds spent doing I/O since the
last update of this field. This can provide an easy measure of both
I/O completion time and the backlog that may be accumulating.
"""
import collectd
import platform
import socket
import time
import re
### Globals ###
OS_NAME = platform.system()
HOST_NAME = socket.gethostbyaddr(socket.gethostname())[0]
diskstat_fields = ['major', 'minor', 'device',
'reads_completed', 'reads_merged',
'sectors_read', 'time_spent_reading_ms',
'writes_completed', 'writes_merged',
'sectors_written', 'time_spent_writing_ms',
'inflight_ios', 'io_time_ms',
'weighted_time_spent_io']
diskstat_metrics = ['iops_read', 'iops_write', 'iops_rw',
'bytes_ps_read', 'bytes_ps_write', 'bytes_ps_rw',
'bytes_per_read', 'bytes_per_write', 'bytes_per_rw',
'await_read', 'await_write', 'await_rw',
'util_pct', 'avgqu_sz', 'svc_tm']
config = {}
dev_list = []
device_filter_regexes = []
filtered_metrics = []
DISKSTATS_FNAME = '/proc/diskstats'
METRIC_PLUGIN = 'diskstats_telemetry'
METRIC_TYPE = 'gauge'
DISK_FILTER = 'DiskFilter'
METRIC_FILTER = 'Filter'
# previous and current stats for derivative metrics
dev_stats_cache = {}
dev_stats_current = {}
# we should get it from /sys/block/fioa/queue/pysical_block_size
dev_blk_sz = 512
one_K = 1024
def get_dev_list():
if device_filter_regexes is not None and len(device_filter_regexes) > 0:
get_filtered_dev_list()
else:
get_default_dev_list()
def get_filtered_dev_list():
with open(DISKSTATS_FNAME) as f:
for line in f:
fields = line.split()
fields = [fl.strip() for fl in fields]
devname = fields[2]
for regex in device_filter_regexes:
if re.match(regex, devname):
dev_list.append(devname)
continue
def get_default_dev_list():
"""
This function determines a device list for this plugin monitoring.
There are a number of non-storage devices in /proc/diskstat output
that need to be skipped. These inclue loop, ram, and possibly
other devices and partitions.
Args:
None
Returns:
Updated global dev_list
"""
with open(DISKSTATS_FNAME) as f:
for line in f:
fields = line.split()
fields = [fl.strip() for fl in fields]
is_loop = re.match('^loop', fields[2])
is_ram = re.match('^ram', fields[2])
is_sr = re.match('^sr', fields[2])
is_disk_part = re.findall('(^[hs]d[a-z]+)([\d]+)', fields[2])
is_raid_md = re.findall('^md[0-9]+', fields[2])
is_raid_dm = re.findall('^dm-[0-9]+', fields[2])
if (is_loop) or (is_ram) or (is_sr) or is_disk_part or is_raid_md or is_raid_dm:
continue
else:
dev_list.append(fields[2])
f.close()
collectd.info('diskstats get_default_dev_list: dev_list: --- %s\n'
% (dev_list))
def init_dev_stats_cache():
global dev_stats_cache
dev_stats_cache = collect_diskstats()
def collect_diskstats():
"""
Collectd statistics for devices in global dev_list from /proc/diskstats
Args: None
Returns: A dictionary collection of device specific raw statistics that
includes timestamps
"""
device_stats = {}
with open(DISKSTATS_FNAME) as f:
for line in f:
fields = line.split()
fields = [fl.strip() for fl in fields]
dev_name = fields[2]
if any(dev_name in s for s in dev_list):
for i in range(3, 14):
device_stats[(dev_name, diskstat_fields[i])] = fields[i]
device_stats[(dev_name, 'ts')] = time.time()
f.close()
return device_stats
def swap_current_cache():
global dev_stats_cache
dev_stats_cache = dev_stats_current.copy()
def calc_del_t(dev):
cur_t = float(dev_stats_current[(dev, 'ts')])
pre_t = float(dev_stats_cache[(dev, 'ts')])
del_t = cur_t - pre_t
return del_t
def calc_metrics(dev):
# time_delta is in seconds
time_delta = calc_del_t(dev)
# read and write operation that actually complete
cur_r = int(dev_stats_current[(dev, 'reads_completed')])
pre_r = int(dev_stats_cache[(dev, 'reads_completed')])
cur_w = int(dev_stats_current[(dev, 'writes_completed')])
pre_w = int(dev_stats_cache[(dev, 'writes_completed')])
# read and write blocks
cur_sec_r = int(dev_stats_current[(dev, 'sectors_read')])
pre_sec_r = int(dev_stats_cache[(dev, 'sectors_read')])
cur_sec_w = int(dev_stats_current[(dev, 'sectors_written')])
pre_sec_w = int(dev_stats_cache[(dev, 'sectors_written')])
# read and write times in msec. These are 32-bit counters and
# monotonically increase except on overflow, when they restart
# from zero.
cur_t_r = float(dev_stats_current[(dev, 'time_spent_reading_ms')])
pre_t_r = float(dev_stats_cache[(dev, 'time_spent_reading_ms')])
cur_t_w = float(dev_stats_current[(dev, 'time_spent_writing_ms')])
pre_t_w = float(dev_stats_cache[(dev, 'time_spent_writing_ms')])
# note that io_time_ms is NOT equal to time_spent_reading +
# time_spent_writing.
# io_time_ms measures the time device is in use
cur_t_io = float(dev_stats_current[(dev, 'io_time_ms')])
pre_t_io = float(dev_stats_cache[(dev, 'io_time_ms')])
# weighted io times for calculating backlog.
# Will go to zero on 32-bit counter overflow.
cur_t_rq = float(dev_stats_current[(dev, 'weighted_time_spent_io')])
pre_t_rq = float(dev_stats_cache[(dev, 'weighted_time_spent_io')])
# number of reads and writes
nr_r = cur_r - pre_r
nr_w = cur_w - pre_w
nr_rw = nr_r + nr_w
# number of sectors read and written
nr_sec_r = cur_sec_r - pre_sec_r
nr_sec_w = cur_sec_w - pre_sec_w
nr_sec_rw = nr_sec_r + nr_sec_w
# read and write times in seconds
t_r = (cur_t_r - pre_t_r)/1000.0
t_w = (cur_t_w - pre_t_w)/1000.0
t_rw = t_r + t_w
t_io = (cur_t_io - pre_t_io)/1000.0
t_rq = (cur_t_rq - pre_t_rq)/1000.0
# iops
iops_r = nr_r/time_delta if (nr_r >= 0 and time_delta > 0.0) else None
iops_w = nr_w/time_delta if (nr_w >= 0 and time_delta > 0.0) else None
try:
iops = iops_r + iops_w
except:
iops = None
# read/write bytes/second
bps_r = (nr_sec_r/time_delta)*dev_blk_sz if (nr_sec_r >= 0 and
time_delta > 0.0) else None
bps_w = (nr_sec_w/time_delta)*dev_blk_sz if (nr_sec_w >= 0 and
time_delta > 0.0) else None
try:
bps = bps_r + bps_w
except:
bps = None
# request sizes in bytes per read, write, and all operations
sz_r = (nr_sec_r * dev_blk_sz)/(nr_r) if (nr_sec_r >= 0 and
nr_r > 0) else None
sz_w = (nr_sec_w * dev_blk_sz)/(nr_w) if (nr_sec_w >= 0 and
nr_w > 0) else None
sz = (nr_sec_rw * dev_blk_sz)/(nr_rw) if (nr_sec_rw >= 0 and
nr_rw > 0) else None
# average time for read and write operations
await_r | |
<gh_stars>1-10
import math
from collections import namedtuple
import AppKit
import defcon
from booleanOperations.booleanGlyph import BooleanGlyph
from lib.tools import bezierTools
from mojo.events import installTool, BaseEventTool, extractNSEvent, addObserver
from mojo.UI import getDefault
from mojo.extensions import getExtensionDefault
from .editor import GuidelineEditorController
from .images import guideToolToolbarIcon, guideToolCursor
from .defaults import extensionIdentifier
deleteKeys = [
AppKit.NSBackspaceCharacter,
AppKit.NSDeleteFunctionKey,
AppKit.NSDeleteCharacter,
chr(0x007F),
]
horizontalAngles = (0, 180)
verticalAngles = (90, 270)
rightAngles = horizontalAngles + verticalAngles
class GuidelineTool(BaseEventTool):
selectedGuidelines = {}
selectedGuidelineLayers = {}
inRectSelection = False
isDraggingGuidelines = False
mouseDownPoint = None
snappingToThesePoints = []
snapToPointSymbolColor = None
mouseSequenceUndoCoalescing = None
wantItalicAngle = True
wantsSnapToPoint = True
wantsSnapToFuturePoints = True
highlightAlphaScale = 0.15
def setup(self):
self.loadDefaults()
container = self.extensionContainer(
identifier=extensionIdentifier + ".background",
location="background",
clear=True
)
self.marqueeLayer = container.appendRectangleSublayer(
name="marquee",
fillColor=self.colorMarquee
)
self.selectionIndicatorLayer = container.appendBaseSublayer(
name="selectionIndicator"
)
self.snapToPointsLayer = container.appendBaseSublayer(
name="snapIndicator"
)
addObserver(
self,
"extensionDefaultsChanged",
extensionIdentifier + ".defaultsChanged"
)
addObserver(
self,
"guidelineEditedInEditor",
extensionIdentifier + ".guidelineEditedInEditor"
)
def loadDefaults(self):
self.colorMarquee = getDefault("glyphViewSelectionMarqueColor")
self.roundValuesTo = getDefault("glyphViewRoundValues", defaultClass=int)
self.arrowIncrement = float(getDefault("glyphViewIncrement"))
self.arrowShiftIncrement = float(getDefault("glyphViewShiftIncrement"))
self.arrowCommandShiftIncrement = float(getDefault("glyphViewCommandShiftIncrement"))
self.colorGlyphGuideline = getDefault("glyphViewLocalGuidesColor")
self.colorFontGuideline = getDefault("glyphViewGlobalGuidesColor")
self.highlightStrokeWidth = getDefault("glyphViewStrokeWidth") * 10
self.snapToPointSymbolSize = getDefault("glyphViewStrokeWidth") * 40
self.snapToPointSymbolSettings = dict(
name="star",
size=(self.snapToPointSymbolSize, self.snapToPointSymbolSize),
pointCount=10,
inner=0.2
)
self.wantItalicAngle = getExtensionDefault(extensionIdentifier + ".wantItalicAngle")
self.wantsSnapToPoint = getExtensionDefault(extensionIdentifier + ".snapToPoint")
self.wantsSnapToFuturePoints = getExtensionDefault(extensionIdentifier + ".snapToFuturePoint")
self.wantsHapticFeedbackOnSnapTo = getExtensionDefault(extensionIdentifier + ".hapticFeedbackOnSnapTo")
self.highlightAlphaScale = getExtensionDefault(extensionIdentifier + ".highlightAlphaScale")
def getToolbarIcon(self):
return guideToolToolbarIcon
def getDefaultCursor(self):
return guideToolCursor
def preferencesChanged(self):
self.loadDefaults()
self.marqueeLayer.setFillColor(self.colorMarquee)
def extensionDefaultsChanged(self, event):
self.loadDefaults()
self.updateSelectedGuidelineColors()
def currentGlyphChanged(self):
self.deselectAll()
def didUndo(self, notification):
glyph = self.getGlyph()
font = glyph.font
glyphGuidelines = glyph.guidelines
fontGuidelines = font.guidelines
remove = []
for guideline in self.selectedGuidelines.keys():
if guideline in glyphGuidelines:
continue
if guideline in fontGuidelines:
continue
remove.append(guideline)
for guideline in remove:
del self.selectedGuidelines[guideline]
self.displaySelectedGuidelines()
def becomeActive(self):
pass
def becomeInactive(self):
self.deselectAll()
# Display
def displaySnapToPoints(self):
container = self.snapToPointsLayer
if not self.snappingToThesePoints:
container.clearSublayers()
else:
imageSettings = self.snapToPointSymbolSettings
imageSettings["size"] = (self.snapToPointSymbolSize, self.snapToPointSymbolSize)
imageSettings["fillColor"] = self.snapToPointSymbolColor
needed = list(self.snappingToThesePoints)
remove = []
for symbol in container.getSublayers():
point = symbol.getPosition()
if point in needed:
needed.remove(point)
else:
remove.append(symbol)
if needed or remove:
with container.sublayerGroup():
for point in needed:
container.appendSymbolSublayer(
position=point,
imageSettings=imageSettings
)
for symbol in remove:
container.removeSublayer(symbol)
if needed and self.wantsHapticFeedbackOnSnapTo:
performer = AppKit.NSHapticFeedbackManager.defaultPerformer()
performer.performFeedbackPattern_performanceTime_(
AppKit.NSHapticFeedbackPatternAlignment,
AppKit.NSHapticFeedbackPerformanceTimeDefault
)
def displaySelectedGuidelines(self):
container = self.selectionIndicatorLayer
# clear all
if not self.selectedGuidelines:
container.clearSublayers()
self.selectedGuidelineLayers = {}
return
# clear subset
remove = [
guideline
for guideline in self.selectedGuidelineLayers.keys()
if guideline not in self.selectedGuidelines
]
if remove:
with container.sublayerGroup():
for guideline in remove:
layer = self.selectedGuidelineLayers.pop(guideline)
container.removeSublayer(layer)
add = []
for guideline in self.selectedGuidelines.keys():
if guideline in self.selectedGuidelineLayers:
layer = self.selectedGuidelineLayers[guideline]
with layer.propertyGroup():
startPoint, endPoint = getGuidelinePathPoints(guideline)
color = getGuidelineHighlightColor(
guideline,
self.colorFontGuideline,
self.colorGlyphGuideline,
self.highlightAlphaScale
)
layer.setStartPoint(startPoint)
layer.setEndPoint(endPoint)
layer.setStrokeColor(color)
layer.setPosition((guideline.x, guideline.y))
else:
add.append(guideline)
if add:
with container.sublayerGroup():
for guideline in add:
startPoint, endPoint = getGuidelinePathPoints(guideline)
color = getGuidelineHighlightColor(
guideline,
self.colorFontGuideline,
self.colorGlyphGuideline,
self.highlightAlphaScale
)
self.selectedGuidelineLayers[guideline] = container.appendLineSublayer(
position=(guideline.x, guideline.y),
startPoint=startPoint,
endPoint=endPoint,
strokeColor=color,
strokeWidth=self.highlightStrokeWidth
)
def guidelineEditedInEditor(self, event):
guideline = event["guideline"]
if guideline in self.selectedGuidelines:
self.displaySelectedGuidelines()
def updateSelectedGuidelineColors(self):
for guideline, layer in self.selectedGuidelineLayers.items():
color = getGuidelineHighlightColor(
guideline,
self.colorFontGuideline,
self.colorGlyphGuideline,
self.highlightAlphaScale
)
layer.setStrokeColor(color)
def displayMarquee(self, point=None):
skip = False
if not self.inRectSelection:
skip = True
elif self.mouseDownPoint is None:
skip = True
elif point is None:
skip = True
if skip:
self.marqueeLayer.setVisible(False)
return
x1, y1 = self.mouseDownPoint
x2, y2 = point
x1, x2 = sorted((x1, x2))
y1, y2 = sorted((y1, y2))
w = x2 - x1
h = y2 - y1
with self.marqueeLayer.propertyGroup():
self.marqueeLayer.setVisible(True)
self.marqueeLayer.setPosition((x1, y1))
self.marqueeLayer.setSize((w, h))
# Editor
def openGuidelineEditor(self):
if len(self.selectedGuidelines) != 1:
return
guideline = self.selectedGuideline
glyph = self.getGlyph()
glyphEditor = self.getNSView()
GuidelineEditorController(guideline, glyph, glyphEditor)
# Interaction
def getVisibleRect(self):
(vX, vY), (vW, vH) = self.getNSView().scaledVisibleRect()
xO, yO = self.getNSView()._offset
vX -= xO
vY -= yO
return ((vX, vY), (vW, vH))
def mouseDown(self, point, clickCount):
self.mouseSequenceUndoCoalescing = set()
shouldIgnoreFollowingMouseEvents = False
self.mouseDownPoint = point
self.isDraggingGuidelines = False
self.inRectSelection = False
modifiers = self.getModifiers()
shiftDown = modifiers["shiftDown"]
capLockDown = modifiers["capLockDown"]
commandDown = modifiers["commandDown"]
optionDown = modifiers["optionDown"]
controlDown = modifiers["controlDown"]
hitGuideline = self.findGuidelineAtPoint(point)
if hitGuideline:
# open editor
if clickCount == 2:
self.selectedGuidelines = {
hitGuideline : getGuidelineState(hitGuideline)
}
self.openGuidelineEditor()
# modifying selected guides
elif shiftDown:
# remove
if hitGuideline in self.selectedGuidelines:
del self.selectedGuidelines[hitGuideline]
# add
else:
self.selectedGuidelines[hitGuideline] = getGuidelineState(hitGuideline)
self.isDraggingGuidelines = True
# initiating drag
elif hitGuideline in self.selectedGuidelines:
self.isDraggingGuidelines = True
# selecting new
else:
self.selectedGuidelines = {
hitGuideline : getGuidelineState(hitGuideline)
}
self.isDraggingGuidelines = True
else:
isInRuler, rulerData = self.isInRuler(point)
# add new
if clickCount == 2:
x, y = point
if optionDown:
angle = self.getVerticalAngle()
y = 0
else:
angle = 0
x = 0
x = bezierTools.roundValue(x, self.roundValuesTo)
y = bezierTools.roundValue(y, self.roundValuesTo)
dest = self.getGlyph()
if commandDown:
dest = dest.font
# XXX can't put an undo state for this on the font
if not commandDown:
if dest not in self.mouseSequenceUndoCoalescing:
dest.prepareUndo("Add guideline")
self.mouseSequenceUndoCoalescing.add(dest)
guideline = dest.appendGuideline(
position=(x, y),
angle=angle
)
self.selectedGuidelines = {
guideline : getGuidelineState(guideline)
}
# drag from ruler
elif isInRuler:
x, y = rulerData["point"]
angle = rulerData["angle"]
x = bezierTools.roundValue(x, self.roundValuesTo)
y = bezierTools.roundValue(y, self.roundValuesTo)
dest = self.getGlyph()
if commandDown:
dest = dest.font
# XXX can't put an undo state for this on the font
if not commandDown:
if dest not in self.mouseSequenceUndoCoalescing:
dest.prepareUndo("Move guideline")
self.mouseSequenceUndoCoalescing.add(dest)
guideline = dest.appendGuideline(
position=(x, y),
angle=angle
)
self.selectedGuidelines = {
guideline : getGuidelineState(guideline)
}
self.isDraggingGuidelines = True
# deselect and start marquee
else:
self.selectedGuidelines = {}
self.inRectSelection = True
self.displaySelectedGuidelines()
def isInRuler(self, point):
ruler = 20
scale = self.getNSView().inverseScale()
ruler *= scale
x, y = point
(vX, vY), (vW, vH) = self.getVisibleRect()
leftMin = vX
leftMax = vX + ruler
rightMax = vX + vW
rightMin = rightMax - ruler
topMax = vY + vH
topMin = topMax - ruler
bottomMin = vY
bottomMax = vY + ruler
angle = None
# top
if y >= topMin and y <= topMax:
angle = 0
x = 0
# bottom
elif y >= bottomMin and y <= bottomMax:
angle = 0
x = 0
# left
elif x >= leftMin and x <= leftMax:
angle = self.getVerticalAngle()
if angle == 90:
y = 0
# right
elif x >= rightMin and x <= rightMax:
angle = self.getVerticalAngle()
if angle == 90:
y = 0
isInRuler = angle is not None
data = dict(
angle=angle,
point=(x, y)
)
return isInRuler, data
def mouseDragged(self, point, delta):
modifiers = self.getModifiers()
shiftDown = modifiers["shiftDown"]
capLockDown = modifiers["capLockDown"]
commandDown = modifiers["commandDown"]
optionDown = modifiers["optionDown"]
controlDown = modifiers["controlDown"]
# marquee selection
if self.inRectSelection:
self.selectedGuidelines = self.findGuidelinesIntersectedBySelectionRect()
# editing
elif self.isDraggingGuidelines:
# origin editing
if commandDown and len(self.selectedGuidelines) == 1:
guideline = self.selectedGuideline
parent = getGuidelineParentForUndo(guideline)
if parent not in self.mouseSequenceUndoCoalescing:
self.mouseSequenceUndoCoalescing.add(parent)
parent.prepareUndo("Move Guideline")
x, y = point
x = bezierTools.roundValue(x, self.roundValuesTo)
y = bezierTools.roundValue(y, self.roundValuesTo)
guideline.x = x
guideline.y = y
# angle editing
elif optionDown and len(self.selectedGuidelines) == 1:
x, y = point
guideline = self.selectedGuideline
if bezierTools.distanceFromPointToPoint((guideline.x, guideline.y), (x, y)) > 5:
parent = getGuidelineParentForUndo(guideline)
if parent not in self.mouseSequenceUndoCoalescing:
self.mouseSequenceUndoCoalescing.add(parent)
parent.prepareUndo("Change Guideline Angle")
guideline.angle = bezierTools.calculateAngle(
(guideline.x, guideline.y),
(x, y)
)
# dragging
else:
# dragging 1
if len(self.selectedGuidelines) == 1:
x, y = point
guideline = self.selectedGuideline
state = self.selectedGuidelines[guideline]
dx = x - state.x
dy = y - state.y
# dragging > 1
else:
sx, sy = self.mouseDownPoint
x, y = point
dx = x - sx
dy = y - sy
self.snappingToThesePoints = set()
snapTo = self.wantsSnapToPoint
if snapTo:
snapTo = len(self.selectedGuidelines) == 1
for guideline, state in self.selectedGuidelines.items():
parent = getGuidelineParentForUndo(guideline)
if parent not in self.mouseSequenceUndoCoalescing:
self.mouseSequenceUndoCoalescing.add(parent)
parent.prepareUndo("Move Guideline")
x = state.x
y = state.y
angle = state.angle
if angle in horizontalAngles:
y += dy
elif angle in verticalAngles:
x += dx
else:
x += dx
y += dy
x = bezierTools.roundValue(x, self.roundValuesTo)
y = bezierTools.roundValue(y, self.roundValuesTo)
if snapTo:
self.snappingToThesePoints = self.findSnapToPoints((x, y), angle)
self.snapToPointSymbolColor = getGuidelineHighlightColor(
guideline,
self.colorFontGuideline,
self.colorGlyphGuideline,
self.highlightAlphaScale
)
if self.snappingToThesePoints:
snapTo = list(self.snappingToThesePoints)[0]
if angle in horizontalAngles:
y = snapTo[1]
elif angle in verticalAngles:
x = snapTo[0]
else:
x, y = snapTo
| |
<reponame>hubo1016/vlcp
import logging
import copy
from uuid import uuid1
from vlcp.server.module import Module,depend,api,call_api
from vlcp.event.runnable import RoutineContainer
from vlcp.utils.networkmodel import *
from vlcp.utils.dataobject import watch_context,set_new,dump,ReferenceObject,\
request_context, create_new
from vlcp.utils.ethernet import ip4_addr
from vlcp.utils.netutils import format_network_cidr,check_ip_address, parse_ip4_network, ip_in_network,\
parse_ip4_address
import vlcp.service.kvdb.objectdb as objectdb
from vlcp.config.config import defaultconfig
from pychecktype.checked import checked
from pychecktype import tuple_
from vlcp.utils.typelib import ip_address_type, cidr_nonstrict_type
from collections import OrderedDict
from contextlib import suppress
from vlcp.utils.exceptions import WalkKeyNotRetrieved
@depend(objectdb.ObjectDB)
@defaultconfig
class VRouterApi(Module):
"""
Standard network model for L3 SDN
"""
def __init__(self,server):
super(VRouterApi,self).__init__(server)
self.app_routine = RoutineContainer(self.scheduler)
self._reqid = 0
self.createAPI(api(self.createvirtualrouter,self.app_routine),
api(self.createvirtualrouters,self.app_routine),
api(self.updatevirtualrouter,self.app_routine),
api(self.updatevirtualrouters,self.app_routine),
api(self.deletevirtualrouter,self.app_routine),
api(self.deletevirtualrouters,self.app_routine),
api(self.listvirtualrouters,self.app_routine),
api(self.addrouterinterface,self.app_routine),
api(self.addrouterinterfaces,self.app_routine),
api(self.removerouterinterface,self.app_routine),
api(self.removerouterinterfaces,self.app_routine),
api(self.listrouterinterfaces,self.app_routine))
async def _dumpkeys(self, keys, filter=None):
self._reqid += 1
reqid = ('virtualrouter', self._reqid)
with request_context(reqid, self.app_routine):
retobjs = await call_api(self.app_routine,'objectdb','mget',{'keys':keys,'requestid':reqid})
if filter is None:
return [dump(o) for o in retobjs]
else:
return [dump(o) for o in retobjs if o is not None and all(getattr(o, k, None) == v for k, v in filter.items())]
async def _dumpone(self, key, filter):
return await self._dumpkeys([key], filter)
async def createvirtualrouter(self, id: (str, None) = None,
**kwargs: {"?routes": [tuple_((cidr_nonstrict_type, ip_address_type))]}):
"""
Create a virtual router
:param id: Virtual router id. If omitted, an UUID is generated.
:param \*\*kwargs: extra attributes for creation.
:return: A dictionary of information of the virtual router.
"""
if not id:
id = str(uuid1())
router = {"id":id}
router.update(kwargs)
return await self.createvirtualrouters([router])
@checked
async def createvirtualrouters(self, routers: [{"?id": str,
"?routes": [tuple_((cidr_nonstrict_type, ip_address_type))]}]):
"""
Create multiple virtual routers in a transaction
"""
idset = set()
newrouters = []
for router in routers:
router = copy.deepcopy(router)
if "id" not in router:
router["id"] = str(uuid1())
else:
if router["id"] in idset:
raise ValueError("Repeated ID: " + router['id'])
else:
idset.add(router['id'])
newrouters.append(router)
routerkeys = [VRouter.default_key(r['id']) for r in newrouters]
routersetkey = [VRouterSet.default_key()]
routerobjects = [self._createvirtualrouter(**r) for r in newrouters ]
def createrouterdb(keys,values):
routerset = values[0]
for i in range(0,len(routerkeys)):
values[i+1] = set_new(values[i+1],routerobjects[i])
routerset.set.dataset().add(routerobjects[i].create_weakreference())
return keys,values
await call_api(self.app_routine,"objectdb","transact",
{"keys":routersetkey+routerkeys,"updater":createrouterdb})
return await self._dumpkeys(routerkeys)
def _createvirtualrouter(self,id,**kwargs):
router = VRouter.create_instance(id)
# [(prefix, nexthop),(prefix, nexthop)]
if 'routes' in kwargs:
router.routes.extend(kwargs['routes'])
for k,v in kwargs.items():
if k != "routes":
setattr(router,k,v)
return router
async def updatevirtualrouter(self, id: str, **kwargs: {"?routes": [tuple_((cidr_nonstrict_type, ip_address_type))]}):
"""
Update virtual router
"""
if not id:
raise ValueError("must specify id")
router = {"id":id}
router.update(kwargs)
return await self.updatevirtualrouters([router])
@checked
async def updatevirtualrouters(self, routers: [{"id": str,
"?routes": [tuple_((cidr_nonstrict_type, ip_address_type))]}]):
"Update multiple virtual routers"
idset = set()
for router in routers:
if 'id' not in router:
raise ValueError("must specify id")
else:
if router['id'] in idset:
raise ValueError("Repeated ID: " + router['id'])
else:
idset.add(router['id'])
routerkeys = [VRouter.default_key(r['id']) for r in routers]
def updaterouter(keys,values):
for i in range(0,len(routers)):
if values[i]:
for k,v in routers[i].items():
if k == 'routes':
values[i].routes[:] = copy.deepcopy(v)
else:
setattr(values[i], k, v)
else:
raise ValueError("Virtual router not exists: " + routers[i]['id'])
return keys,values
await call_api(self.app_routine,'objectdb','transact',
{'keys':routerkeys,'updater':updaterouter})
return await self._dumpkeys(routerkeys)
async def deletevirtualrouter(self, id: str):
"Delete virtual router"
if not id:
raise ValueError("must specify id")
router = {"id":id}
return await self.deletevirtualrouters([router])
@checked
async def deletevirtualrouters(self, routers: [{"id": str}]):
"Delete multiple virtual routers"
idset = set()
for router in routers:
if 'id' not in router:
raise ValueError("must specify id")
else:
if router['id'] in idset:
raise ValueError("Repeated ID: " + router['id'])
else:
idset.add(router['id'])
routersetkey = [VRouterSet.default_key()]
routerkeys = [VRouter.default_key(v['id']) for v in routers]
def deletevrouter(keys,values):
routerset = values[0]
for i in range(0,len(routers)):
if values[i+1].interfaces.dataset():
raise ValueError("there are still interface(s) in this router, delete them first")
routerset.set.dataset().discard(values[i+1].create_weakreference())
return keys,[routerset] + [None] * len(routers)
await call_api(self.app_routine,"objectdb","transact",
{"keys":routersetkey + routerkeys,"updater":deletevrouter})
return {"status":"OK"}
async def listvirtualrouters(self, id: (str, None) = None, **kwargs):
"""
Query virtual router
:param id: if specified, only return virtual router with this ID
:param \*\*kwargs: customized filter
:return: a list of dictionaries each stands for a matched virtual router.
"""
if id:
return await self._dumpone(VRouter.default_key(id), kwargs)
else:
# we get all router from vrouterset index
routersetkey = VRouterSet.default_key()
self._reqid += 1
reqid = ("virtualrouter",self._reqid)
def set_walker(key,set,walk,save):
for weakobj in set.dataset():
routerkey = weakobj.getkey()
try:
router = walk(routerkey)
except KeyError:
pass
else:
if all(getattr(router,k,None) == v for k,v in kwargs.items()):
save(routerkey)
def walker_func(set_func):
def walker(key,obj,walk,save):
if obj is None:
return
set_walker(key,set_func(obj),walk,save)
return walker
with request_context(reqid, self.app_routine):
_, values = await call_api(self.app_routine,"objectdb","walk",
{"keys":[routersetkey],"walkerdict":{routersetkey:walker_func(lambda x:x.set)},
"requestid":reqid})
return [dump(r) for r in values]
async def addrouterinterface(self, router: str, subnet: str, id: (str, None) = None,
**kwargs: {'?ip_address': ip_address_type}):
"""
Connect virtual router to a subnet
:param router: virtual router ID
:param subnet: subnet ID
:param id: router port ID
:param \*\*kwargs: customized options
:return: A dictionary of information of the created router port
"""
if not id:
id = str(uuid1())
if not router:
raise ValueError("must specify virtual router ID")
if not subnet:
raise ValueError("must specify subnet ID")
interface = {"id":id,"router":router,"subnet":subnet}
interface.update(kwargs)
return await self.addrouterinterfaces([interface])
@checked
async def addrouterinterfaces(self, interfaces: [{"router": str,
"subnet": str,
"?id": str,
'?ip_address': ip_address_type}]):
"""
Create multiple router interfaces
"""
keys = set()
parameter_dict = OrderedDict()
for interface in interfaces:
interface = copy.deepcopy(interface)
if 'id' not in interface:
interface['id'] = str(uuid1())
key = RouterPort.default_key(interface['id'])
if key in parameter_dict:
raise ValueError("Repeated ID: "+interface['id'])
parameter_dict[key] = interface
keys.add(key)
keys.add(VRouter.default_key(interface['router']))
keys.add(SubNet.default_key(interface['subnet']))
keys.add(SubNetMap.default_key(interface['subnet']))
def walker(walk, write):
for key, parameters in parameter_dict.items():
with suppress(WalkKeyNotRetrieved):
value = walk(key)
value = create_new(RouterPort, value, parameters['id'])
subnet = walk(SubNet.default_key(parameters['subnet']))
if subnet is None:
raise ValueError("Subnet " + parameters['subnet'] + " not exists")
subnet_map = walk(SubNetMap.default_key(parameters['subnet']))
router = walk(VRouter.default_key(parameters['router']))
if router is None:
raise ValueError("Virtual router " + parameters['router'] + " not exists")
if hasattr(subnet, 'router'):
# normal subnet can only have one router
_, (rid,) = VRouter._getIndices(subnet.router.getkey())
raise ValueError("Subnet %r is already in virtual router %r", parameters['subnet'], rid)
if hasattr(subnet_map, 'routers'):
if router.create_weakreference() in subnet_map.routers.dataset():
raise ValueError("Subnet %r is already in virtual router %r", parameters['subnet'],
parameters['router'])
if 'ip_address' in parameters:
if getattr(subnet, 'isexternal', False):
raise ValueError("Cannot specify IP address when add external subnet to virtual router")
# Check IP address in CIDR
nip = parse_ip4_address(parameters['ip_address'])
cidr, prefix = parse_ip4_network(subnet.cidr)
if not ip_in_network(nip, cidr, prefix):
raise ValueError("IP address " + parameters['ip_address'] + " not in subnet CIDR")
# Check IP not allocated
if str(nip) in subnet_map.allocated_ips:
raise ValueError("IP address " + parameters['ip_address'] + " has been used")
else:
# Save to allocated map
subnet_map.allocated_ips[str(nip)] = (value.create_weakreference(), router.create_weakreference())
write(subnet_map.getkey(), subnet_map)
else:
# Use gateway
if not hasattr(subnet, 'gateway'):
raise ValueError("Subnet " + subnet.id + " does not have a gateway, IP address on router port must be specified explicitly")
if not hasattr(subnet_map, 'routers'):
subnet_map.routers = DataObjectSet()
subnet_map.routers.dataset().add(router.create_weakreference())
if not hasattr(subnet_map, 'routerports'):
subnet_map.routerports = {}
subnet_map.routerports[router.id] = value.create_weakreference()
write(subnet_map.getkey(), subnet_map)
if not getattr(subnet, 'isexternal', False):
subnet.router = value.create_weakreference()
write(subnet.getkey(), subnet)
# Save port to router
router.interfaces.dataset().add(value.create_weakreference())
write(router.getkey(), router)
value.router = router.create_reference()
value.subnet = subnet.create_reference()
for k, v in parameters.items():
if k not in ('router', 'subnet', 'id'):
setattr(value, k, v)
write(key, value)
await call_api(self.app_routine,'objectdb','writewalk',
{"keys":keys, 'walker':walker})
return await self._dumpkeys(parameter_dict)
async def removerouterinterface(self, router: str, subnet: str):
"""
Remote a subnet from the router
:param router: virtual router ID
:param subnet: subnet ID
:return: ``{"status": "OK"}``
"""
if not router:
raise ValueError("must specify router id")
if not subnet:
raise ValueError("must specify subnet id")
interface = {"router": router, "subnet": subnet}
await self.removerouterinterfaces([interface])
@checked
async def removerouterinterfaces(self, interfaces: [{"router": str,
"subnet": str}]):
"""
Remote multiple subnets from routers
"""
for interface in interfaces:
if 'router' not in interface:
raise ValueError("must specify router ID")
if "subnet" not in interface:
raise ValueError("must specify subnet ID")
keys = set()
keys.update(VRouter.default_key(interface['router']) for interface in interfaces)
keys.update(SubNet.default_key(interface['subnet']) for interface in interfaces)
keys.update(SubNetMap.default_key(interface['subnet']) for interface in interfaces)
def walker(walk, write):
for interface in interfaces:
with suppress(WalkKeyNotRetrieved):
router = walk(VRouter.default_key(interface['router']))
if router is None:
raise ValueError("Virtual router " + interface['router'] + " not exists")
subnet = walk(SubNet.default_key(interface['subnet']))
if subnet is None:
raise ValueError("Subnet " + interface['subnet'] + " not exists")
subnet_map = walk(SubNetMap.default_key(interface['subnet']))
if router.create_weakreference() not in subnet_map.routers.dataset():
raise ValueError("Subnet %r not in virtual router %r" % (subnet.id, router.id))
port = subnet_map.routerports.pop(router.id)
| |
i in range(0, self.number_of_directions):
offset = number_of_input_convolutions_per_direction * i
input_columns_for_direction = \
input_columns[offset:offset + number_of_input_convolutions_per_direction]
input_columns_lists.append(input_columns_for_direction)
return MultiDirectionalMultiDimensionalLSTMParametersParallelWithSeparateInputConvolution. \
concatenate_elements_list_of_lists_along_dimension(input_columns_lists, 1)
def prepare_input_convolutions(self, skewed_images_variable):
self.skewed_images_variable = skewed_images_variable
# print("self.skewed_images_variable.size(): " +
# str(self.skewed_images_variable.size()))
return
def prepare_computation_next_column_functions(self, previous_hidden_state_column,
previous_memory_state_column, mask: torch.Tensor):
# print("Entered MultiDirectionalMultiDimensionalLSTMParametersFullyParallel." +
# "prepare_computation_next_column_functions...")
if previous_hidden_state_column.size(1) != self.hidden_states_size * self.number_of_directions:
raise RuntimeError("Error: the size of the second dimension of" +
" previous_hidden_state_column should match the number of directions" +
"times the number of hidden states")
if previous_memory_state_column.size(1) != self.hidden_states_size * self.number_of_directions:
raise RuntimeError("Error: the size of the second dimension of" +
" memory_state_column should match the number of directions " +
"times the number of hidden states")
previous_hidden_state_columns_split_by_direction = torch.chunk(previous_hidden_state_column,
self.number_of_directions, 1)
previous_memory_state_columns_split_by_direction = torch.chunk(previous_memory_state_column,
self.number_of_directions, 1)
# print("multi_dimensional_lstm_parameters - self.next_input_column_index: " +
# str(self.next_input_column_index))
# print("multi_dimensional_lstm_parameters - self.bladie_input_column_index: "
# + str(self.bladie_input_column_index))
input_column = self.skewed_images_variable[:, :, :, self.next_input_column_index]
if input_column.size(1) < self.number_of_directions:
raise RuntimeError("Error: input_column.size(1) = " + str(input_column.size(1)) +
", but it should be a multiple of self.number_of_directions = " +
str(self.number_of_directions))
# print("prepare_computation_next_column_functions - input_column.size(): " + str(input_column.size()))
input_columns_split_by_direction = torch.chunk(input_column,
self.number_of_directions, 1)
# print(">>> self.number_of_directions: " + str(self.number_of_directions))
# print(">>> len(input_columns_split_by_direction): " + str(len(input_columns_split_by_direction)))
computation_arguments_list = list([])
for i in range(0, self.number_of_directions):
computation_arguments_list.append(previous_hidden_state_columns_split_by_direction[i])
computation_arguments_list.append(previous_memory_state_columns_split_by_direction[i])
# The computation of node_hidden_and_memory_state_columns appears to be the
# first call in this function that causes a memory leak. With everything
# after it commented out, the memory leak does not seem to appear. With
# everything after it commented out, but itself not commented out, the
# memory leak appears.
node_hidden_and_memory_state_columns = \
self.parallel_hidden_and_memory_state_column_computation.\
compute_result_and_split_into_pairs_with_second_pair_element_shifted_multiple_groups(
computation_arguments_list,
mask)
input_convolution_result_columns = \
self.parallel_input_column_computation.\
compute_result_and_split_into_columns_multiple_groups(
input_columns_split_by_direction)
# Sanity check that the number of output pairs is as expected
if len(node_hidden_and_memory_state_columns) != \
(MultiDirectionalMultiDimensionalLSTMParametersParallelWithSeparateInputConvolution.
num_paired_hidden_and_memory_state_weightings() * self.number_of_directions):
raise RuntimeError("Error: there are " + str(self.number_of_directions) + " directions, " +
"therefore expected " +
str(MultiDirectionalMultiDimensionalLSTMParametersParallelWithSeparateInputConvolution.
num_paired_hidden_and_memory_state_weightings()) + " * " +
str(self.number_of_directions) + " output pairs, but got" +
str(len(node_hidden_and_memory_state_columns)))
self.node_hidden_state_columns = self.get_next_node_hidden_state_columns(node_hidden_and_memory_state_columns)
self.node_memory_state_columns = self.get_next_node_memory_state_columns(node_hidden_and_memory_state_columns)
self.input_columns = self.get_next_input_columns(input_convolution_result_columns)
self.previous_memory_state_column = previous_memory_state_column
# print("finished prepare_computation_next_column_functions")
# Increment the next input column index
self.next_input_column_index += 1
# self.bladie_input_column_index += 1
def compute_output_gate_memory_state_weighted_input(self, previous_memory_state_column):
if TensorUtils.number_of_dimensions(previous_memory_state_column) != 3:
raise RuntimeError("Error: prepare_input_convolution requires 3 dimensional input"
+ " got size: " + str(previous_memory_state_column.size()))
if previous_memory_state_column.size(1) != self.hidden_states_size * self.number_of_directions:
raise RuntimeError("Error: the size of the first dimension of" +
" memory_state_column should match the number of directions" +
" times the number of hidden states")
# previous_memory_state_columns_split_by_direction = torch.split(previous_memory_state_column, 1, 0)
# previous_memory_state_column_catted_on_channel_dimension = \
# torch.cat(previous_memory_state_columns_split_by_direction, 1)
#
# result_catted_on_channel_dimension = StateUpdateBlock.compute_weighted_state_input_state_one(
# self.output_gate_memory_state_convolution,
# previous_memory_state_column_catted_on_channel_dimension)
# # print("result_catted_on_channel_dimension.size(): " + str(result_catted_on_channel_dimension.size()))
# result_split_into_directions = torch.chunk(result_catted_on_channel_dimension, self.number_of_directions, 1)
# # Re-concatenate the direction results on the batch dimension
# result = torch.cat(result_split_into_directions, 0)
# # print("result.size(): " + str(result.size()))
# return result
result_catted_on_channel_dimension = StateUpdateBlock.compute_weighted_state_input_state_one(
self.output_gate_memory_state_convolution,
previous_memory_state_column)
return result_catted_on_channel_dimension
def get_input_input_column(self, column_index):
result = self.input_columns[0]
# print("get_input_input_colum - result" + str(result))
# print("get_input_input_colum - result.size()" + str(result.size()))
return result
def get_input_gate_input_column(self, column_index):
return self.input_columns[1]
def get_forget_gate_one_input_column(self, column_index):
return self.input_columns[2]
def get_forget_gate_two_input_column(self, column_index):
return self.input_columns[3]
def get_output_gate_input_column(self, column_index):
return self.input_columns[4]
def get_input_hidden_state_column(self):
return self.node_hidden_state_columns[0]
def get_input_gate_hidden_state_column(self):
return self.node_hidden_state_columns[1]
def get_forget_gate_one_hidden_state_column(self):
return self.node_hidden_state_columns[2]
def get_forget_gate_two_hidden_state_column(self):
return self.node_hidden_state_columns[3]
def get_output_gate_hidden_state_column(self):
return self.node_hidden_state_columns[4]
def get_input_gate_memory_state_column(self):
input_gate_memory_state_column_part_pair = \
self.node_memory_state_columns[0]
input_gate_memory_state_column = input_gate_memory_state_column_part_pair[0] + \
input_gate_memory_state_column_part_pair[1]
return input_gate_memory_state_column
def get_forget_gate_one_memory_state_column(self):
forget_gate_memory_state_column_part_pair = \
self.node_memory_state_columns[1]
forget_gate_memory_state_column = forget_gate_memory_state_column_part_pair[0]
return forget_gate_memory_state_column
def get_forget_gate_two_memory_state_column(self):
forget_gate_memory_state_column_part_pair = \
self.node_memory_state_columns[1]
forget_gate_memory_state_column = forget_gate_memory_state_column_part_pair[1]
return forget_gate_memory_state_column
def number_of_hidden_and_memory_state_weights_per_direction(self):
return self.hidden_states_size * 2 * MultiDirectionalMultiDimensionalLSTMParametersParallelWithSeparateInputConvolution.\
num_paired_hidden_and_memory_state_weightings()
def number_of_input_weighting_weights_per_direction(self):
return self.hidden_states_size * \
MultiDirectionalMultiDimensionalLSTMParametersParallelWithSeparateInputConvolution.\
NUMBER_OF_INPUT_CONVOLUTIONS_PER_DIRECTION
def set_bias_forget_gates_hidden_states_input(self):
relative_start_index = self.hidden_states_size * 2 * 2
relative_end_index = self.hidden_states_size * 2 * 4
# print("start index: " + str(relative_start_index) + " end index: " + str(relative_end_index))
for direction_index in range(0, self.number_of_directions):
offset = self.number_of_hidden_and_memory_state_weights_per_direction() * direction_index
for index in range(offset + relative_start_index, offset + relative_end_index):
self.parallel_hidden_and_memory_state_column_computation.parallel_convolution.bias.data[index] = \
OneDirectionalMultiDimensionalLSTMParametersBase.FORGET_GATE_BIAS_INIT
# print("after: self.parallel_hidden_state_column_computation.parallel_convolution.bias.data: " +
# str(self.parallel_hidden_state_column_computation.parallel_convolution.bias.data))
def set_bias_forget_gates_memory_states_input(self):
# self.forget_gate_one_input_convolution.bias.data.fill_(FORGET_GATE_BIAS_INIT)
# self.forget_gate_one_hidden_state_update_block.set_bias_for_convolutions(FORGET_GATE_BIAS_INIT)
# self.forget_gate_one_memory_state_convolution.bias.data.fill_(FORGET_GATE_BIAS_INIT)
# print("before: self.parallel_memory_state_column_computation.parallel_convolution.bias.data: " +
# str(self.parallel_memory_state_column_computation.parallel_convolution.bias.data))
relative_start_index = self.hidden_states_size * 2 * 6
relative_end_index = self.number_of_hidden_and_memory_state_weights_per_direction()
# print("start index: " + str(relative_start_index) + " end index: " + str(relative_end_index))
for direction_index in range(0, self.number_of_directions):
offset = self.number_of_hidden_and_memory_state_weights_per_direction() * direction_index
for index in range(offset + relative_start_index, offset + relative_end_index):
self.parallel_hidden_and_memory_state_column_computation.parallel_convolution.bias.data[index] = \
OneDirectionalMultiDimensionalLSTMParametersBase.FORGET_GATE_BIAS_INIT
# print("after: self.parallel_memory_state_column_computation.parallel_convolution.bias.data: " +
# str(self.parallel_memory_state_column_computation.parallel_convolution.bias.data))
def set_bias_everything_to_zero(self):
"""
For Leaky LP Cell networks, rather than doing complicated initialization of some of
the gate biases to one, simply set all bias values to zero.
Importantly, for Leaky LP cells, initializing bias for lambda gates to one is a
bad idea. Since a lambda gate is a switch, and a-priori both switch options should
be equally likely, so bias zero is appropriate for such gates. A bias of one,
tells that one of the outputs of the switch is preferred strongly, but there is
no ground for that. Initialiation to one only makes sense for normal (MD)LSTM
forget gates.
)
:return: Nothing, the bias is set in place
"""
print(">>> Multi_dimensional_lstm_parameters: Initializing the bias of everything to zero!")
# Set bias to zero
with torch.no_grad():
self.output_gate_memory_state_convolution.bias.zero_()
self.parallel_hidden_and_memory_state_column_computation.parallel_convolution.bias.zero_()
def set_bias_forget_gates_to_one(self):
print(">>> Multi_dimensional_lstm_parameters: Set bias forget gates to one")
# self.set_bias_forget_gates_image_input()
self.set_bias_forget_gates_memory_states_input()
self.set_bias_forget_gates_hidden_states_input()
def set_training(self, training):
self.parallel_hidden_and_memory_state_column_computation.set_training(training)
def forward(self, x):
raise NotImplementedError
def copy_weights_parallel_hidden_and_memory_states_convolution_to_one_directional_mdlstm_parameters(
self, mdlstm_parameters_one_direction, direction_index):
relative_start_index = 0
relative_end_index = self.number_of_hidden_and_memory_state_weights_per_direction()
for one_directional_mdlstm_index in range(relative_start_index, relative_end_index):
multi_directional_mdlstm_index = \
one_directional_mdlstm_index + \
self.number_of_hidden_and_memory_state_weights_per_direction() * direction_index
mdlstm_parameters_one_direction.parallel_hidden_and_memory_state_column_computation.\
parallel_convolution.bias.data[one_directional_mdlstm_index] =\
self.parallel_hidden_and_memory_state_column_computation.\
parallel_convolution.bias.data[multi_directional_mdlstm_index]
mdlstm_parameters_one_direction.parallel_hidden_and_memory_state_column_computation.\
parallel_convolution.weight.data[one_directional_mdlstm_index, :, :] = \
self.parallel_hidden_and_memory_state_column_computation.parallel_convolution.\
weight.data[multi_directional_mdlstm_index, :, :]
def copy_parallel_multiple_input_convolutions_computation_to_one_directional_mdlstm_parameters(
self, mdlstm_parameters_one_direction, direction_index):
# print(">>> copy_parallel_multiple_input_convolutions_computation_to_one_directional_mdlstm_parameters...")
# mdlstm_parameters_one_direction.parallel_multiple_input_convolutions_computation = \
# self.parallel_multiple_input_convolutions_computations[direction_index]
# print(">>> direction_index: " + str(direction_index))
# print(">>> self.number_of_input_weighting_weights_per_direction(): "
# + str(self.number_of_input_weighting_weights_per_direction()))
relative_start_index = 0
relative_end_index = self.number_of_input_weighting_weights_per_direction()
for one_directional_mdlstm_index in range(relative_start_index, relative_end_index):
multi_directional_mdlstm_index = \
one_directional_mdlstm_index + \
self.number_of_input_weighting_weights_per_direction() * direction_index
mdlstm_parameters_one_direction.parallel_multiple_input_convolutions_computation.\
parallel_convolution.bias.data[one_directional_mdlstm_index] = \
self.parallel_input_column_computation.\
parallel_convolution.bias.data[multi_directional_mdlstm_index]
# print("one_directional_mdlstm_index: " + str(one_directional_mdlstm_index))
# print("multi_directional_mdlstm_index: " + str(multi_directional_mdlstm_index))
#
# print(" mdlstm_parameters_one_direction.parallel_hidden_and_memory_state_column_computation.\
# parallel_convolution.weight.data.size()" +
# str(mdlstm_parameters_one_direction.parallel_hidden_and_memory_state_column_computation.\
# parallel_convolution.weight.data.size()))
# print(" mdlstm_parameters_one_direction.parallel_multiple_input_convolutions_computation.\
# parallel_convolution.weight.data.size()" +
# str(mdlstm_parameters_one_direction.parallel_multiple_input_convolutions_computation. \
# parallel_convolution.weight.data.size()))
# print(" mdlstm_parameters_one_direction.parallel_multiple_input_convolutions_computation.\
# parallel_convolution.weight.data[one_directional_mdlstm_index, :, :].size()" +
# str( mdlstm_parameters_one_direction.parallel_multiple_input_convolutions_computation.\
# parallel_convolution.weight.data[one_directional_mdlstm_index, :, :].size()))
# print("self.parallel_input_and_hidden_and_memory_state_column_computation.parallel_convolution. \
# weight.data[multi_directional_mdlstm_index, :, :].size(): " +
# str(self.parallel_hidden_and_memory_state_column_computation.parallel_convolution. \
# weight.data[multi_directional_mdlstm_index, :, :].size()))
# print(" mdlstm_parameters_one_direction.parallel_multiple_input_convolutions_computation.\
# parallel_convolution.weight.data[begin_index:end_index, :, :, :].size()" +
# str(mdlstm_parameters_one_direction.parallel_multiple_input_convolutions_computation.\
# parallel_convolution.weight.data[begin_index:end_index, :, :, :].size()))
# print("self.parallel_input_column_computation.parallel_convolution.\
# weight.data.size(): " + str(self.parallel_input_column_computation.
# parallel_convolution.weight.data.size()))
mdlstm_parameters_one_direction.parallel_multiple_input_convolutions_computation.\
parallel_convolution.weight.data[one_directional_mdlstm_index, :, 0, 0] = \
self.parallel_input_column_computation.parallel_convolution. \
weight.data[multi_directional_mdlstm_index, :, :]
def copy_output_gate_memory_state_convolution_to_one_directional_mdlstm_parameters(
self, mdlstm_parameters_one_direction, direction_index):
relative_start_index = 0
relative_end_index = self.hidden_states_size
for one_directional_mdlstm_index in range(relative_start_index, relative_end_index):
multi_directional_mdlstm_index = \
self.hidden_states_size * direction_index + one_directional_mdlstm_index
mdlstm_parameters_one_direction.output_gate_memory_state_convolution.bias[one_directional_mdlstm_index] = \
self.output_gate_memory_state_convolution.bias[multi_directional_mdlstm_index]
mdlstm_parameters_one_direction.output_gate_memory_state_convolution.\
weight[one_directional_mdlstm_index, :, :] = \
self.output_gate_memory_state_convolution.weight[multi_directional_mdlstm_index, :, :]
"""
This methods extracts/creates a list of one-directional MDLSTM parameters based on the
current weights of the multi-directional MDLSTM parameters. This is mainly useful for
testing purposes. Creating (one-directional) MDLSTMs from the thus extracted parameters,
it can be tested whether separately executed one-directional MDLSTMs produce the same results
as the corresponding outputs for that direction of the multi-directional MDLSTM from which
the parameters were taken. This kind of testing is useful, since it is very hard to test
directly whether the multi-directional MDLSTM "works". This is actually a general problem
for testing neural networks: finding and fixing problems with mismatching tensor sizes etc
is easy enough. But once something runs without errors, it can be very hard to determine
whether it is actually computing the right thing.
"""
def create_one_directional_mdlstm_parameters_each_direction_using_current_weights(self):
result = list([])
for i in range(0, self.number_of_directions):
mdlstm_parameters_one_direction = MultiDimensionalLSTMParametersOneDirectionFullyParallel.\
create_multi_dimensional_lstm_parameters_one_direction_fully_parallel(
self.hidden_states_size, self.input_channels, False, self.use_dropout)
self.copy_weights_parallel_hidden_and_memory_states_convolution_to_one_directional_mdlstm_parameters(
mdlstm_parameters_one_direction, i)
self.copy_parallel_multiple_input_convolutions_computation_to_one_directional_mdlstm_parameters(
mdlstm_parameters_one_direction, i)
self.copy_output_gate_memory_state_convolution_to_one_directional_mdlstm_parameters(
mdlstm_parameters_one_direction, i)
result.append(mdlstm_parameters_one_direction)
return result
class MultiDimensionalLSTMParametersCreator:
# Needs to be implemented in the subclasses
@abstractmethod
def create_multi_dimensional_lstm_parameters_one_direction(self,
hidden_states_size, input_channels,
use_dropout: bool,
clamp_gradients: bool):
raise RuntimeError("not implemented")
@abstractmethod
def create_multi_directional_multi_dimensional_lstm_parameters(self,
hidden_states_size, input_channels,
use_dropout: bool,
| |
#/*
# * Copyright 2016 -- 2021 IBM Corporation
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *
# *****************************************************************************
# * @file : tc_TcpEcho.py
# * @brief : A multi-threaded script to send and receive traffic on the
# * TCP connection of an FPGA module.
# *
# * System: : cloudFPGA
# * Component : cFp_BringUp/ROLE
# * Language : Python 3
# *
# *****************************************************************************
# ### REQUIRED PYTHON PACKAGES ################################################
import argparse
import datetime
import errno
import filecmp
import socket
import threading
import time
# ### REQUIRED TESTCASE MODULES ###############################################
from tc_utils import *
# ### GLOBAL VARIABLES ########################################################
gEchoRxPath = './echoRx.dat'
gEchoTxPath = './echoTx.dat'
def tcp_tx(sock, message, count, verbose=False):
"""TCP Tx Thread.
:param sock, the socket to send to.
:param message, the random string to sent.
:param count, the number of segments to send.
:param verbose, enables verbosity.
:return None"""
if verbose:
print("The following message of %d bytes will be sent out %d times:\n Message=%s\n" %
(len(message), count, message.decode('ascii')))
# Create a Tx Reference File
echoTxFile = open(gEchoTxPath, 'w')
if count <= 1000:
loop = 0
while loop < count:
echoTxFile.write(message.decode('ascii'))
loop += 1
# Start Data Transmission
loop = 0
startTime = datetime.datetime.now()
while loop < count:
try:
sock.sendall(message)
finally:
pass
loop += 1
endTime = datetime.datetime.now()
elapseTime = endTime - startTime;
bandwidth = len(message) * 8 * count * 1.0 / (elapseTime.total_seconds() * 1024 * 1024)
print("##################################################")
print("#### TCP TX DONE with bandwidth = %6.1f Mb/s ####" % bandwidth)
print("##################################################")
print()
# Close the Tx Reference File
echoTxFile.close()
# Push a few more bytes to force the FPGA to flush its buffers
try:
sock.sendall(message)
finally:
pass
def tcp_rx(sock, message, count, verbose):
"""TCP Rx Thread.
:param sock, the socket to receive from.
:param message, the expected string message to be received.
:param count, the number of segment to receive.
:param verbose, enables verbosity.
:return None"""
# Create an Rx Test File
echoRxFile = open(gEchoRxPath, 'w')
# Start Data Reception
loop = 0
rxBytes = 0
expectedBytes = count*len(message)
startTime = datetime.datetime.now()
while rxBytes < expectedBytes:
try:
data = sock.recv(expectedBytes - rxBytes)
rxBytes += len(data)
if count <= 1000:
echoRxFile.write(data.decode('ascii'))
except socket.error as exc:
print("[EXCEPTION] Socket error while receiving :: %s" % exc)
else:
if verbose:
print("Loop=%d | RxBytes=%d" % (loop, rxBytes))
loop += 1
endTime = datetime.datetime.now()
elapseTime = endTime - startTime
bandwidth = len(message) * 8 * count * 1.0 / (elapseTime.total_seconds() * 1024 * 1024)
print("##################################################")
print("#### TCP RX DONE with bandwidth = %6.1f Mb/s ####" % bandwidth)
print("##################################################")
print()
# Close the Rx Test File
echoRxFile.close()
def waitUntilSocketPairCanBeReused(ipFpga, portFpga):
"""Check and wait until the a socket pair can be reused.
[INFO] When a client or a server initiates an active close, then the same destination socket
(i.e. the same IP address / TCP port number) cannot be re-used immediately because
of security issues. Therefore, a closed connection must linger in a 'TIME_WAIT' or
'FIN_WAIT' state for as long as 2xMSL (Maximum Segment Lifetime), which corresponds
to twice the time a TCP segment might exist in the internet system. The MSL is
arbitrarily defined to be 2 minutes long.
:param ipFpga: the IP address of FPGA.
:param portFpga: the TCP port of the FPGA.
:return: nothing
"""
wait = True
# NETSTAT example: rc = os.system("netstat | grep '10.12.200.163:8803' | grep TIME_WAIT")
cmdStr = "netstat | grep \'" + str(ipFpga) + ":" + str(portFpga) + "\' | grep \'TIME_WAIT\|FIN_WAIT\' "
while wait:
rc = os.system(cmdStr)
if rc == 0:
print("[INFO] Cannot reuse this socket as long as it is in the \'TIME_WAIT\' or \'FIN_WAIT\' state.")
print(" Let's sleep for 5 sec...")
time.sleep(5)
else:
wait = False
def tcp_txrx_loop(sock, message, count, verbose=False):
"""TCP Tx-Rx Single-Thread Loop.
:param sock The socket to send/receive to/from.
:param message The message string to sent.
:param count The number of segments send.
:param verbose Enables verbosity.
:return None"""
if verbose:
print("[INFO] The following message of %d bytes will be sent out %d times:\n Message=%s\n" %
(len(message), count, message.decode('ascii')))
nrErr = 0
txMssgCnt = 0
rxMssgCnt = 0
rxByteCnt = 0
txStream = ""
rxStream = ""
# Init the Tx reference stream
for i in range(count):
txStream = txStream + message.decode('ascii')
startTime = datetime.datetime.now()
while rxByteCnt < (count * len(message)):
if txMssgCnt < count:
# Send a new message
# ------------------------
try:
tcpSock.sendall(message)
txMssgCnt += 1
finally:
pass
# Receive a segment
# --------------------
try:
data = tcpSock.recv(len(message))
rxByteCnt += len(data)
rxMssgCnt += 1
if verbose:
print("%d:%s" % (rxMssgCnt, data.decode('ascii')))
except IOError as e:
# On non blocking connections - when there are no incoming data, error is going to be
# raised. Some operating systems will indicate that using AGAIN, and some using
# WOULDBLOCK error code. We are going to check for both - if one of them - that's
# expected, means no incoming data, continue as normal. If we got different error code,
# something happened
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
print('[ERROR] Socket reading error: {}'.format(str(e)))
exit(1)
# We just did not receive anything
continue
except socket.error as exc:
# Any other exception
print("[EXCEPTION] Socket error while receiving :: %s" % exc)
# exit(1)
finally:
pass
rxStream = rxStream + data.decode('ascii')
endTime = datetime.datetime.now()
if verbose:
print("\n")
# Compare Tx and Rx stream
if rxStream != txStream:
print(" KO | Received stream = %s" % data.decode('ascii'))
print(" | Expected stream = %s" % rxStream)
nrErr += 1
elif verbose:
print(" OK | Received %d bytes in %d messages." % (rxByteCnt, rxMssgCnt))
elapseTime = endTime - startTime;
bandwidth = len(message) * 8 * count * 1.0 / (elapseTime.total_seconds() * 1024 * 1024)
print("[INFO] Transferred a total of %d bytes." % rxByteCnt)
print("#####################################################")
print("#### TCP Tx/Rx DONE with bandwidth = %6.1f Mb/s ####" % bandwidth)
print("#####################################################")
print()
def tcp_txrx_ramp(sock, message, count, verbose=False):
"""TCP Tx-Rx Single-Thread Ramp.
:param sock The socket to send/receive to/from.
:param message The message string to sent.
:param count The number of segments to send.
:param verbose Enables verbosity.
:return None"""
if verbose:
print("[INFO] The following message of %d bytes will be sent out incrementally %d times:\n Message=%s\n" %
(len(message), count, message.decode('ascii')))
nrErr = 0
loop = 0
rxByteCnt = 0
startTime = datetime.datetime.now()
while loop < count:
i = 1
while i <= len(message):
subMsg = message[0:i]
# Send datagram
# -------------------
try:
tcpSock.sendall(subMsg)
finally:
pass
# Receive datagram
# -------------------
try:
data = tcpSock.recv(len(subMsg))
rxByteCnt += len(data)
if data == subMsg:
if verbose:
print("Loop=%d | RxBytes=%d" % (loop, len(data)))
else:
print("Loop=%d | RxBytes=%d" % (loop, len(data)))
print(" KO | Received Message=%s" % data.decode('ascii'))
print(" | Expecting Message=%s" % subMsg)
nrErr += 1
except IOError as e:
# On non blocking connections - when there are no incoming data, error is going to be raised
# Some operating systems will indicate that using AGAIN, and some using WOULDBLOCK error code
# We are going to check for both - if one of them - that's expected, means no incoming data,
# continue as normal. If we got different error code - something happened
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
print('[ERROR] Socket reading error: {}'.format(str(e)))
exit(1)
# We just did not receive anything
continue
except socket.error as exc:
# Any other exception
print("[EXCEPTION] Socket error while receiving :: %s" % exc)
# exit(1)
finally:
pass
i += 1
loop += 1
endTime = datetime.datetime.now()
elapseTime = endTime - startTime
bandwidth = (rxByteCnt * 8 | |
<reponame>manliu1225/Facebook_crawler<filename>ven2/lib/python2.7/site-packages/persistent/picklecache.py
##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import gc
from weakref import WeakValueDictionary
from zope.interface import implementer
from zope.interface import classImplements
from persistent._compat import use_c_impl
from persistent._compat import PYPY
from persistent.interfaces import GHOST
from persistent.interfaces import IPickleCache
from persistent.interfaces import IExtendedPickleCache
from persistent.interfaces import OID_TYPE
from persistent.interfaces import UPTODATE
from persistent.persistence import PersistentPy
from persistent.persistence import _estimated_size_in_24_bits
from persistent.ring import Ring
__all__ = [
'PickleCache',
'PickleCachePy',
]
# We're tightly coupled to the PersistentPy implementation and access
# its internals.
# pylint:disable=protected-access
_OGA = object.__getattribute__
_OSA = object.__setattr__
def _sweeping_ring(f):
# A decorator for functions in the PickleCache
# that are sweeping the entire ring (mutating it);
# serves as a pseudo-lock to not mutate the ring further
# in other functions
def locked(self, *args, **kwargs):
self._is_sweeping_ring = True
try:
return f(self, *args, **kwargs)
finally:
self._is_sweeping_ring = False
return locked
class _WeakValueDictionary(object):
# Maps from OID -> Persistent object, but
# only weakly references the Persistent object. This is similar
# to ``weakref.WeakValueDictionary``, but is customized depending on the
# platform. On PyPy, all objects can cheaply use a WeakRef, so that's
# what we actually use. On CPython, though, ``PersistentPy`` cannot be weakly
# referenced, so we rely on the fact that the ``id()`` of an object is its
# memory location, and we use ``ctypes`` to cast that integer back to
# the object.
#
# To remove stale addresses, we rely on the ``ffi.gc()`` object with the exact
# same lifetime as the ``PersistentPy`` object. It calls us, we get the ``id``
# back out of the CData, and clean up.
if PYPY: # pragma: no cover
def __init__(self):
self._data = WeakValueDictionary()
def _from_addr(self, addr):
return addr
def _save_addr(self, oid, obj):
return obj
cleanup_hook = None
else:
def __init__(self):
# careful not to require ctypes at import time; most likely the
# C implementation is in use.
import ctypes
self._data = {}
self._addr_to_oid = {}
self._cast = ctypes.cast
self._py_object = ctypes.py_object
def _save_addr(self, oid, obj):
i = id(obj)
self._addr_to_oid[i] = oid
return i
def _from_addr(self, addr):
return self._cast(addr, self._py_object).value
def cleanup_hook(self, cdata):
oid = self._addr_to_oid.pop(cdata.pobj_id, None)
self._data.pop(oid, None)
def __contains__(self, oid):
return oid in self._data
def __len__(self):
return len(self._data)
def __setitem__(self, key, value):
addr = self._save_addr(key, value)
self._data[key] = addr
def pop(self, oid):
return self._from_addr(self._data.pop(oid))
def items(self):
from_addr = self._from_addr
for oid, addr in self._data.items():
yield oid, from_addr(addr)
def get(self, oid, default=None):
addr = self._data.get(oid, self)
if addr is self:
return default
return self._from_addr(addr)
def __getitem__(self, oid):
addr = self._data[oid]
return self._from_addr(addr)
@use_c_impl
# We actually implement IExtendedPickleCache, but
# the C version does not, and our interface declarations are
# copied over by the decorator. So we make the declaration
# of IExtendedPickleCache later.
@implementer(IPickleCache)
class PickleCache(object):
# Tests may modify this to add additional types
_CACHEABLE_TYPES = (type, PersistentPy)
_SWEEPABLE_TYPES = (PersistentPy,)
total_estimated_size = 0
cache_size_bytes = 0
# Set by functions that sweep the entire ring (via _sweeping_ring)
# Serves as a pseudo-lock
_is_sweeping_ring = False
def __init__(self, jar, target_size=0, cache_size_bytes=0):
# TODO: forward-port Dieter's bytes stuff
self.jar = jar
# We expect the jars to be able to have a pointer to
# us; this is a reference cycle, but certain
# aspects of invalidation and accessing depend on it.
# The actual Connection objects we're used with do set this
# automatically, but many test objects don't.
# TODO: track this on the persistent objects themself?
try:
jar._cache = self
except AttributeError:
# Some ZODB tests pass in an object that cannot have an _cache
pass
self.cache_size = target_size
self.drain_resistance = 0
self.non_ghost_count = 0
self.persistent_classes = {}
self.data = _WeakValueDictionary()
self.ring = Ring(self.data.cleanup_hook)
self.cache_size_bytes = cache_size_bytes
# IPickleCache API
def __len__(self):
""" See IPickleCache.
"""
return (len(self.persistent_classes) +
len(self.data))
def __getitem__(self, oid):
""" See IPickleCache.
"""
value = self.data.get(oid, self)
if value is not self:
return value
return self.persistent_classes[oid]
def __setitem__(self, oid, value):
""" See IPickleCache.
"""
# The order of checks matters for C compatibility;
# the ZODB tests depend on this
# The C impl requires either a type or a Persistent subclass
if not isinstance(value, self._CACHEABLE_TYPES):
raise TypeError("Cache values must be persistent objects.")
value_oid = value._p_oid
if not isinstance(oid, OID_TYPE) or not isinstance(value_oid, OID_TYPE):
raise TypeError('OID must be %s: key=%s _p_oid=%s' % (OID_TYPE, oid, value_oid))
if value_oid != oid:
raise ValueError("Cache key does not match oid")
if oid in self.persistent_classes or oid in self.data:
# Have to be careful here, a GC might have just run
# and cleaned up the object
existing_data = self.get(oid)
if existing_data is not None and existing_data is not value:
# Raise the same type of exception as the C impl with the same
# message.
raise ValueError('A different object already has the same oid')
# Match the C impl: it requires a jar. Let this raise AttributeError
# if no jar is found.
jar = value._p_jar
if jar is None:
raise ValueError("Cached object jar missing")
# It also requires that it cannot be cached more than one place
existing_cache = getattr(jar, '_cache', None) # type: PickleCache
if (existing_cache is not None
and existing_cache is not self
and oid in existing_cache.data):
raise ValueError("Cache values may only be in one cache.")
if isinstance(value, type): # ZODB.persistentclass.PersistentMetaClass
self.persistent_classes[oid] = value
else:
self.data[oid] = value
if _OGA(value, '_p_state') != GHOST and value not in self.ring:
self.ring.add(value)
self.non_ghost_count += 1
elif self.data.cleanup_hook:
# Ensure we begin monitoring for ``value`` to
# be deallocated.
self.ring.ring_node_for(value)
def __delitem__(self, oid):
""" See IPickleCache.
"""
if not isinstance(oid, OID_TYPE):
raise TypeError('OID must be %s: %s' % (OID_TYPE, oid))
if oid in self.persistent_classes:
del self.persistent_classes[oid]
else:
pobj = self.data.pop(oid)
self.ring.delete(pobj)
def get(self, oid, default=None):
""" See IPickleCache.
"""
value = self.data.get(oid, self)
if value is not self:
return value
return self.persistent_classes.get(oid, default)
def mru(self, oid):
""" See IPickleCache.
"""
if self._is_sweeping_ring:
# accessess during sweeping, such as with an
# overridden _p_deactivate, don't mutate the ring
# because that could leave it inconsistent
return False # marker return for tests
value = self.data[oid]
was_in_ring = value in self.ring
if not was_in_ring:
if _OGA(value, '_p_state') != GHOST:
self.ring.add(value)
self.non_ghost_count += 1
else:
self.ring.move_to_head(value)
return None
def ringlen(self):
""" See IPickleCache.
"""
return len(self.ring)
def items(self):
""" See IPickleCache.
"""
return self.data.items()
def lru_items(self):
""" See IPickleCache.
"""
return [
(obj._p_oid, obj)
for obj in self.ring
]
def klass_items(self):
""" See IPickleCache.
"""
return self.persistent_classes.items()
def incrgc(self, ignored=None):
""" See IPickleCache.
"""
target = self.cache_size
if self.drain_resistance >= 1:
size = self.non_ghost_count
target2 = size - 1 - (size // self.drain_resistance)
if target2 < target:
target = target2
# return value for testing
return self._sweep(target, self.cache_size_bytes)
def full_sweep(self, target=None):
""" See IPickleCache.
"""
# return value for testing
return self._sweep(0)
minimize = full_sweep
def new_ghost(self, oid, obj):
""" See IPickleCache.
"""
if obj._p_oid is not None:
raise ValueError('Object already has oid')
if obj._p_jar is not None:
raise ValueError('Object already has jar')
if oid in self.persistent_classes or oid in self.data:
raise KeyError('Duplicate OID: %s' % oid)
obj._p_oid = oid
obj._p_jar = self.jar
if not isinstance(obj, type):
if obj._p_state != GHOST:
# The C implementation sets this stuff directly,
# but we delegate to the class. However, we must be
# careful to avoid broken _p_invalidate and _p_deactivate
# that don't call the super class. See ZODB's
# testConnection.doctest_proper_ghost_initialization_with_empty__p_deactivate
obj._p_invalidate_deactivate_helper(False)
self[oid] = obj
def reify(self, to_reify):
""" See IPickleCache.
"""
if isinstance(to_reify, OID_TYPE): #bytes
to_reify = [to_reify]
for oid in to_reify:
value = self[oid]
if value._p_state == GHOST:
value._p_activate()
self.non_ghost_count += 1
self.mru(oid)
def invalidate(self, to_invalidate):
""" See IPickleCache.
"""
if isinstance(to_invalidate, OID_TYPE):
| |
<reponame>dkn22/findatapy<filename>findatapy/timeseries/calculations.py
__author__ = 'saeedamen' # <NAME>
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
# the License for the specific language governing permissions and limitations under the License.
#
import datetime
import functools
import math
import numpy
import pandas
import pandas.tseries.offsets
try:
from pandas.stats.api import ols
except:
# temporary fix to get compilation, need to rewrite regression code to get this to work
# later versions of pandas no longer support OLS
#
# fails with SciPy 1.3.0 unless we have the very latest version of StatsModels pip install statsmodels==0.10.0rc2 --pre
#
try:
from statsmodels.formula.api import ols
except:
pass
from findatapy.timeseries.filter import Filter
from findatapy.timeseries.filter import Calendar
from pandas import compat
import copy
from datetime import timedelta
class Calculations(object):
"""Calculations on time series, such as calculating strategy returns and various wrappers on pandas for rolling sums etc.
"""
##### calculate
def calculate_signal_tc(self, signal_data_frame, tc, period_shift=1):
"""Calculates the transaction costs for a particular signal
Parameters
----------
signal_data_frame : DataFrame
contains trading signals
tc : float
transaction costs
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return (signal_data_frame.shift(period_shift) - signal_data_frame).abs().multiply(tc)
def calculate_entry_tc(self, entry_data_frame, tc, period_shift=1):
"""Calculates the transaction costs for defined trading points
Parameters
----------
entry_data_frame : DataFrame
contains points where we enter/exit trades
tc : float
transaction costs
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
return entry_data_frame.abs().multiply(tc)
def calculate_signal_returns(self, signal_data_frame, returns_data_frame, period_shift=1):
"""Calculates the trading startegy returns for given signal and asset
Parameters
----------
signal_data_frame : DataFrame
trading signals
returns_data_frame: DataFrame
returns of asset to be traded
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
# can cause issues, if the names of the columns are not identical
return signal_data_frame.shift(period_shift) * returns_data_frame
def calculate_signal_returns_as_matrix(self, signal_data_frame, returns_data_frame, period_shift=1):
return pandas.DataFrame(
signal_data_frame.shift(period_shift).values * returns_data_frame.values, index=returns_data_frame.index,
columns=returns_data_frame.columns)
def calculate_individual_trade_gains(self, signal_data_frame, strategy_returns_data_frame):
"""Calculates profits on every trade (experimental code)
Parameters
----------
signal_data_frame : DataFrame
trading signals
strategy_returns_data_frame: DataFrame
returns of strategy to be tested
Returns
-------
DataFrame contains the P&L for every trade
"""
# signal need to be aligned to NEXT period for returns
# signal_data_frame_pushed = signal_data_frame.shift(1)
# find all the trade points
trade_points = ((signal_data_frame - signal_data_frame.shift(1)).abs())
cumulative = self.create_mult_index(strategy_returns_data_frame)
indices = trade_points > 0
indices.columns = cumulative.columns
# get P&L for every trade (from the end point - start point)
trade_returns = numpy.nan * cumulative
trade_points_cumulative = cumulative[indices]
# for each set of signals/returns, calculate the trade returns - where there isn't a trade
# assign a NaN
# TODO do in one vectorised step without for loop
for col_name in trade_points_cumulative:
col = trade_points_cumulative[col_name]
col = col.dropna()
col = col / col.shift(1) - 1
# TODO experiment with quicker ways of writing below?
# for val in col.index:
# trade_returns.set_value(val, col_name, col[val])
# trade_returns.ix[val, col_name] = col[val]
date_indices = trade_returns.index.searchsorted(col.index)
trade_returns.loc[date_indices, col_name] = col
return trade_returns
def calculate_cum_rets_trades(self, signal_data_frame, strategy_returns_data_frame):
"""Calculates cumulative returns resetting at each new trade
Parameters
----------
signal_data_frame : DataFrame
trading signals
strategy_returns_data_frame: DataFrame
returns of strategy to be tested
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
# signal need to be aligned to NEXT period for returns
signal_data_frame_pushed = signal_data_frame.shift(1)
# find all the trade points
reset_points = ((signal_data_frame_pushed - signal_data_frame_pushed.shift(1)).abs())
reset_points = reset_points.cumsum()
# make sure they have the same column names (otherwise issues around pandas calc - assume same ordering for cols)
old_cols = strategy_returns_data_frame.columns
strategy_returns_data_frame.columns = signal_data_frame_pushed.columns
for c in reset_points.columns:
strategy_returns_data_frame[c + 'cumsum'] = reset_points[c]
strategy_returns_data_frame[c] = strategy_returns_data_frame.groupby([c + 'cumsum'])[c].cumsum()
strategy_returns_data_frame = strategy_returns_data_frame.drop([c + 'cumsum'], axis=1)
strategy_returns_data_frame.columns = old_cols
return strategy_returns_data_frame
def calculate_trade_no(self, signal_data_frame):
####### how many trades have there been (ignore size of the trades)
trades = abs(signal_data_frame - signal_data_frame.shift(-1))
trades = trades[trades > 0].count()
signal_data_frame = pandas.DataFrame(index=trades.index, columns=['Trades'], data=trades)
return signal_data_frame
def calculate_trade_duration(self, signal_data_frame):
"""Calculates cumulative trade durations
Parameters
----------
signal_data_frame : DataFrame
trading signals
strategy_returns_data_frame: DataFrame
returns of strategy to be tested
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
# TODO
# # signal need to be aligned to NEXT period for returns
# signal_data_frame_pushed = signal_data_frame.shift(1)
#
# # find all the trade points
# reset_points = ((signal_data_frame_pushed - signal_data_frame_pushed.shift(1)).abs())
#
# reset_points = reset_points.cumsum()
#
# time_data_frame = pandas.DataFrame(index = signal_data_frame.index, columns = signal_data_frame.columns,
# data=numpy.ones([len(signal_data_frame.index), len(signal_data_frame.columns)]))
#
# # make sure they have the same column names (otherwise issues around pandas calc - assume same ordering for cols)
# old_cols = time_data_frame.columns
# time_data_frame.columns = signal_data_frame_pushed.columns
#
# for c in reset_points.columns:
# time_data_frame[c + 'cumperiods'] = reset_points[c]
# time_data_frame[c] = time_data_frame.groupby([c + 'cumperiods'])[c].cumsum()
# time_data_frame = time_data_frame.drop([c + 'cumperiods'], axis=1)
#
# time_data_frame.columns = old_cols
#
# return time_data_frame
def calculate_final_trade_duration(self, signal_data_frame):
"""Calculates cumulative trade durations
Parameters
----------
signal_data_frame : DataFrame
trading signals
strategy_returns_data_frame: DataFrame
returns of strategy to be tested
period_shift : int
number of periods to shift signal
Returns
-------
DataFrame
"""
# signal need to be aligned to NEXT period for returns
signal_data_frame_pushed = signal_data_frame.shift(1)
# find all the trade points
reset_points = ((signal_data_frame_pushed - signal_data_frame_pushed.shift(1)).abs())
reset_points = reset_points.cumsum()
time_data_frame = pandas.DataFrame(index=signal_data_frame.index, columns=signal_data_frame.columns,
data=numpy.ones(
[len(signal_data_frame.index), len(signal_data_frame.columns)]))
# make sure they have the same column names (otherwise issues around pandas calc - assume same ordering for cols)
old_cols = time_data_frame.columns
time_data_frame.columns = signal_data_frame_pushed.columns
for c in reset_points.columns:
time_data_frame[c + 'cumperiods'] = reset_points[c]
time_data_frame[c] = time_data_frame.groupby([c + 'cumperiods'])[c].cumsum()
time_data_frame = time_data_frame.drop([c + 'cumperiods'], axis=1)
time_data_frame.columns = old_cols
return time_data_frame
def calculate_risk_stop_signals(self, signal_data_frame, cum_rets_trades, stop_loss, take_profit):
"""
Parameters
----------
signal_data_frame : DataFrame
Contains all the trade signals (typically mix of 0, +1 and +1
cum_rets_trades : DataFrame
Cumulative returns of strategy reset at every new trade
stop_loss : float (or DataFrame)
Stop loss level eg. -0.02
take_profit : float (or DataFrame)
Take profit level eg. +0.03
Returns
-------
DataFrame containing amended signals that take into account stops and take profits
"""
signal_data_frame_pushed = signal_data_frame # signal_data_frame.shift(1)
reset_points = ((signal_data_frame_pushed - signal_data_frame_pushed.shift(1)).abs())
ind = (cum_rets_trades > take_profit) | (cum_rets_trades < stop_loss)
# to allow indexing, need to match column names
ind.columns = signal_data_frame.columns
signal_data_frame[ind] = 0
reset_points[ind] = 1
signal_data_frame[reset_points == 0] = numpy.nan
signal_data_frame = signal_data_frame.ffill()
# signal_data_frame = signal_data_frame.shift(-1)
return signal_data_frame
def calculate_risk_stop_dynamic_signals(self, signal_data_frame, asset_data_frame, stop_loss_df, take_profit_df):
"""
Parameters
----------
signal_data_frame : DataFrame
Contains all the trade signals (typically mix of 0, +1 and +1
stop_loss_df : DataFrame
Continuous stop losses in the asset (in price amounts eg +2, +2.5, +2.6 USD - as opposed to percentages)
take_profit_df : DataFrame
Continuous take profits in the asset (in price amounts eg -2, -2.1, -2.5 USD - as opposed to percentages)
Returns
-------
DataFrame containing amended signals that take into account stops and take profits
"""
signal_data_frame_pushed = signal_data_frame # signal_data_frame.shift(1)
reset_points = ((signal_data_frame_pushed - signal_data_frame_pushed.shift(1)).abs())
# ensure all the inputs are pandas DataFrames (rather than mixture of Series and DataFrames)
asset_data_frame = pandas.DataFrame(asset_data_frame)
signal_data_frame = pandas.DataFrame(signal_data_frame)
stop_loss_df = pandas.DataFrame(stop_loss_df)
take_profit_df = pandas.DataFrame(take_profit_df)
non_trades = reset_points == 0
# need to temporarily change column names to allow indexing (ASSUMES: columns in same order!)
non_trades.columns = take_profit_df.columns
# where we don't have a trade fill with NaNs
take_profit_df[non_trades] = numpy.nan
non_trades.columns = stop_loss_df.columns
stop_loss_df[non_trades] = numpy.nan
asset_df_copy = asset_data_frame.copy(deep=True)
non_trades.columns = asset_df_copy.columns
asset_df_copy[non_trades] = numpy.nan
take_profit_df = take_profit_df.ffill()
stop_loss_df = stop_loss_df.ffill()
asset_df_copy = asset_df_copy.ffill()
# take profit for buys
ind1 = (asset_data_frame.values > (asset_df_copy.values + take_profit_df.values)) & (
signal_data_frame.values > 0)
# take profit for sells
ind2 = (asset_data_frame.values < (asset_df_copy.values - take_profit_df.values)) | |
#!/usr/bin/env python3
# Copyright 2019 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @title :train.py
# @author :jvo
# @contact :<EMAIL>
# @created :07/08/2019
# @version :1.0
# @python_version :3.6.8
"""
Continual learning of splitMNIST with hypernetworks.
-----------------------------------------------------
The module :mod:`mnist.train_splitMNIST` implements all training logic
for the MNIST experiments (splitMNIST, permutedMNIST).
See :ref:`README <mnist-readme-reference-label>`
for an overview how to use this script.
"""
# Do not delete the following import for all executable scripts!
import matplotlib
matplotlib.use('Agg')
import torch
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import os
import copy
from mnist.replay.train_gan import sample as sample_gan, train_gan_one_t
from mnist.replay.train_replay import run as replay_model
from mnist.replay.train_replay import train_vae_one_t, init_plotting_embedding
from mnist.replay.train_replay import sample as sample_vae
from mnist.train_args_default import _set_default
from mnist import train_utils
from mnist import train_args
from mnist.plotting import _plotImages
import mnist.hp_search_splitMNIST as hpsearch
from mnets.classifier_interface import Classifier
from utils import misc
import utils.optim_step as opstep
import utils.hnet_regularizer as hreg
def _save_performance_summary(config, train_iter=None):
"""Save a summary of the test results achieved so far in a easy to parse
file (for humans and subsequent programs).
Args:
config: Command-line arguments.
train_iter: (optional) The current training iteration.
Though, the results written in the file correspond have there own
training iteration assigned.
"""
if train_iter is None:
train_iter = config.n_iter
tp = dict()
if config.upper_bound or (config.infer_task_id and config.cl_scenario == 1):
config.num_weights_rp_net = 0
config.num_weights_rp_hyper_net = 0
config.compression_ratio_rp = 0
tp["acc_after_list"] = misc.list_to_str(config.overall_acc_list)
tp["acc_during_list"] = misc.list_to_str(config.during_accs_final)
tp["acc_after_mean"] = config.acc_mean
tp["acc_during_mean"] = sum(config.during_accs_final) / config.num_tasks
tp["num_weights_class_net"] = config.num_weights_class_net
tp["num_weights_rp_net"] = config.num_weights_rp_net
tp["num_weights_rp_hyper_net"] = config.num_weights_rp_hyper_net
tp["num_weights_class_hyper_net"] = config.num_weights_class_hyper_net
tp["compression_ratio_rp"] = config.compression_ratio_rp
tp["compression_ratio_class"] = config.compression_ratio_class
tp["overall_task_infer_accuracy_list"] = \
misc.list_to_str(config.overall_task_infer_accuracy_list)
tp["acc_task_infer_mean"] = config.acc_task_infer_mean
# Note, the keywords of this dictionary are defined by the array:
# hpsearch._SUMMARY_KEYWORDS
with open(os.path.join(config.out_dir,
hpsearch._SUMMARY_FILENAME), 'w') as f:
assert ('num_train_iter' in hpsearch._SUMMARY_KEYWORDS)
for kw in hpsearch._SUMMARY_KEYWORDS:
if kw == 'num_train_iter':
f.write('%s %d\n' % ('num_train_iter', train_iter))
continue
if kw == 'finished':
continue
else:
try:
f.write('%s %f\n' % (kw, tp[kw]))
except:
f.write('%s %s\n' % (kw, tp[kw]))
def test(dhandlers, class_nets, infer_net, device, config, writer,
task_id=None):
""" Test continual learning experiments on MNIST dataset. This can either
be splitMNIST or permutedMNIST.
Depending on the method and cl scenario used, this methods manages
to measure the test accuracy of a given task or all tasks after
training. In order to do so, correct targets need to be constructed
and output heads need to be set (or inferred).
Furthermore, this method distinguises between classification accuracy
on a task or on the accuracy to infer task id's if applicable.
Args:
(....): See docstring of function :func:`train_tasks`.
task_id: (optional) If not None, the method will compute and return
test acc for the the given task id, not all tasks.
Returns:
Scalar represting the test accuracy for the given task id.
If ``task_id`` is None, the accuracy of the last task of the cl
experiment is returned.
"""
# get hnet if this option is given
if class_nets is not None:
if config.training_with_hnet:
c_net_hnet = class_nets[1]
c_net = class_nets[0]
c_net.eval()
c_net_hnet.eval()
else:
c_net = class_nets
if infer_net is not None:
infer_net.eval()
with torch.no_grad():
overall_acc = 0
overall_acc_list = []
overall_task_infer_accuracy = 0
overall_task_infer_accuracy_list = []
# choose tasks to test
if task_id is not None:
task_range = range(task_id, task_id + 1)
else:
task_range = range(config.num_tasks)
# iterate through all old tasks
for t in task_range:
print("Testing task: ", t)
# reset data
if task_id is not None:
dhandler = dhandlers[0]
else:
dhandler = dhandlers[t]
# create some variables
N_processed = 0
test_size = dhandler.num_test_samples
# is task id has to be inferred, for every x we have to do that
# and therefore have one h(e) = W per data point - this is only
# possible with batch size one, for now
if (config.infer_task_id and infer_net is not None) or \
config.infer_with_entropy:
curr_bs = 1
else:
curr_bs = config.test_batch_size
classifier_accuracy = 0
task_infer_accuracy = 0
Y_hat_all = []
T_all = []
# go through test set
while N_processed < test_size:
# test size of tasks might be "arbitrary"
if N_processed + curr_bs > test_size:
curr_bs = test_size - N_processed
N_processed += curr_bs
# get data
real_batch = dhandler.next_test_batch(curr_bs)
X_real = dhandler.input_to_torch_tensor(real_batch[0], device,
mode='inference')
T_real = dhandler.output_to_torch_tensor(real_batch[1], device,
mode='inference')
# get short version of output dim
od = config.out_dim
#######################################
# SET THE OUTPUT HEAD / COMPUTE TARGETS
#######################################
# get dummy for easy access to the output dim of our main
# network as a dummy, only needed for the first iteration
if class_nets is not None:
if config.training_with_hnet:
weights_dummy = c_net_hnet.forward(0)
Y_dummies = c_net.forward(X_real, weights_dummy)
else:
Y_dummies = c_net.forward(X_real)
else:
Y_dummies = infer_net.forward(X_real)
# build one hots if this option was chosen
# here we build targets if only have one neuron per task
# which we set to 1
if config.class_incremental:
task_out = [0, config.num_tasks]
T_real = torch.zeros((Y_dummies.shape[0],
config.num_tasks)).to(device)
T_real[:, t] = 1
# compute targets - this is a bit unelegant, cl 3 requires hacks
elif config.cl_scenario == 1 or config.cl_scenario == 2:
if config.cl_scenario == 1:
# take the task specific output neuron
task_out = [t * od, t * od + od]
else:
# always all output neurons (only one head is used)
task_out = [0, od]
else:
# This here is the classic CL 3 scenario
# first we get the predictions, this is over all neurons
task_out = [0, config.num_tasks * od]
# Here we build the targets, this is zero everywhere
# except for the current task - here the correct target
# is inserted
# build the two zero tensors that surround the targets
zeros1 = torch.zeros(Y_dummies[:, 0:t * od].shape). \
to(device)
zeros2 = torch.zeros(Y_dummies[:, 0:(config.num_tasks \
- 1 - t) * od].shape).to(device)
T_real = torch.cat([zeros1, T_real, zeros2], dim=-1)
#################
# TASK PREDICTION
#################
# get task predictions
if config.cl_scenario != 1:
if infer_net is not None:
# get infer net to predict the apparent task id
task_pred = infer_net.forward(X_real)
task_pred = task_pred[:, 0:config.num_tasks]
task_pred = torch.sigmoid(task_pred)
_, inf_task_id = torch.max(task_pred, 1)
# measure acc of prediction
task_infer_accuracy += (inf_task_id == t).float()
elif config.infer_with_entropy and class_nets is not None \
and config.training_with_hnet:
entropies = []
if task_id is not None:
entrop_to_test = range(0, task_id + 1)
else:
entrop_to_test = range(config.num_tasks)
# infer task id through entropy of softmax outputs of
# different models
for e in entrop_to_test:
weights_c = c_net_hnet.forward(e)
Y_hat_logits = c_net.forward(X_real, weights_c)
if config.cl_scenario == 2:
task_out = [0, od]
else:
task_out = [e * od, e * od + od]
Y_hat = F.softmax(Y_hat_logits[:,
task_out[0]:task_out[1]] / config.soft_temp, -1)
entropy = -1 * torch.sum(Y_hat * torch.log(Y_hat))
entropies.append(entropy)
inf_task_id = torch.argmin(torch.stack(entropies))
task_infer_accuracy += (inf_task_id == t).float()
if config.cl_scenario == 3 and config.infer_output_head:
task_out = [inf_task_id * od, inf_task_id * od + od]
else:
# if task id is known, task inference acc is 100%
task_infer_accuracy += 1
inf_task_id = t
if class_nets is not None:
# from the given inf_task_id we try to produce the
# correct model for that tasks
if config.training_with_hnet:
weights_c = c_net_hnet.forward(inf_task_id)
Y_hat_logits = c_net.forward(X_real, weights_c)
else:
Y_hat_logits = c_net.forward(X_real)
#################
# CLASSIFICATION
#################
if class_nets is not None:
# save predictions of current batch
Y_hat_logits = Y_hat_logits[:, task_out[0]:task_out[1]]
Y_hat = F.softmax(Y_hat_logits, dim=1)
if config.cl_scenario == 3 and config.infer_output_head:
# this is the special case where the output head is
# inferred. Here we compute the argmax of the single
# head and add the number of previous neurons such that
# it coincides with the argmax of | |
0.5 or min_consensus > 1:
raise ValueError(
"Minimal consensus should be above 0.5 and under 1")
# Get lineages in REVERSED order
lineages = [Lineage(self[str(txd)], ascending=False)
for txd in taxid_list]
# Extend lineages so that they all are same size
maxlen = max([len(lin) for lin in lineages])
for lin in lineages:
if len(lin) < maxlen:
lin.extend([DummyNode()] * (maxlen - len(lin)))
# Iterate over ranks descending to find last node above consensus level
total = len(taxid_list)
i = 0
last = None
while i < maxlen:
count = Counter([lin[i] for lin in lineages])
mostCommon = count.most_common(1)
if mostCommon[0][1] / total >= min_consensus:
if not(isinstance(mostCommon[0][0], DummyNode)):
# save current succesful consensus, and check the next one
last = mostCommon[0][0]
i += 1
else:
break
return last
def lca(self, taxid_list: list[Union[str, int]]) -> Node:
"""
Get lowest common node of a bunch of taxids
Parameters
----------
taxid_list:
list of taxonomic identification numbers
See Also
--------
Taxonomy.consensus
Examples
--------
>>> node0 = Node(taxid = 0, name = "root",
rank = "root", parent = None)
>>> node1 = Node(taxid = 1, name = "node1",
rank = "rank1", parent = node0)
>>> node2 = Node(taxid = 2, name = "node2",
rank = "rank1", parent = node0)
>>> node11 = Node(taxid = 11, name = "node11",
rank = "rank2", parent = node1)
>>> node12 = Node(taxid = 12, name = "node12",
rank = "rank2", parent = node1)
>>> tax = Taxonomy.from_list([node0, node1, node2, node11, node12])
>>> tax.lca([11, 12, 2])
Node(0)
"""
return self.consensus(taxid_list, 1)
def distance(self, taxid1: Union[str, int],
taxid2: Union[str, int]) -> int:
"""
Measures the distance between two nodes.
Parameters
----------
taxid1:
Taxonomic identification number
taxid2:
Taxonomic identification number
Examples
--------
>>> node0 = Node(taxid = 0, name = "root",
rank = "root", parent = None)
>>> node1 = Node(taxid = 1, name = "node1",
rank = "rank1", parent = node0)
>>> node2 = Node(taxid = 2, name = "node2",
rank = "rank1", parent = node0)
>>> node11 = Node(taxid = 11, name = "node11",
rank = "rank2", parent = node1)
>>> node12 = Node(taxid = 12, name = "node12",
rank = "rank2", parent = node1)
>>> tax = Taxonomy.from_list([node0, node1, node2, node11, node12])
>>> tax.distance(11, 2)
3
>>> tax.distance(11, 12)
2
"""
lca = self.lca([str(taxid1), str(taxid2)]).taxid
d1 = len(Lineage(self[str(taxid1)])) - 1
d2 = len(Lineage(self[str(taxid2)])) - 1
dlca = len(Lineage(self[lca])) - 1
return d1 + d2 - 2 * dlca
def listDescendant(self, taxid: Union[str, int],
ranks: Optional[list] = None) -> set[Node]:
"""
List all descendant of a node
Parameters
----------
taxid:
Taxonomic identification number
ranks:
list of ranks for which to return nodes
Examples
--------
>>> node0 = Node(taxid = 0, name = "root",
rank = "root", parent = None)
>>> node1 = Node(taxid = 1, name = "node1",
rank = "rank1", parent = node0)
>>> node2 = Node(taxid = 2, name = "node2",
rank = "rank1", parent = node0)
>>> node11 = Node(taxid = 11, name = "node11", #
rank = "rank2", parent = node1)
>>> node12 = Node(taxid = 12, name = "node12",
rank = "rank2", parent = node1)
>>> tax = Taxonomy.from_list([node0, node1, node2, node11, node12])
>>> tax.listDescendant(1)
[Node(11), Node(12)]
>>> tax.listDescendant(2)
[]
"""
current = copy(self[str(taxid)].children)
# dont't want to update the original set!
next = _flatten([child.children for child in current])
all = current
while next:
all.update(next)
current = next
next = _flatten([child.children for child in current])
return all
def prune(self, taxid: Union[str, int]) -> None:
"""
Prune the Taxonomy at the given taxid
Nodes not in the lineage (upwards and downwards)
of the given taxid will be discarded.
The Ancestors of the given taxid will be kept!
Parameters
----------
taxid:
taxid whose Lineage to keep
Examples
--------
>>> node0 = Node(taxid = 0, name = "root",
rank = "root", parent = None)
>>> node1 = Node(taxid = 1, name = "node1",
rank = "rank1", parent = node0)
>>> node2 = Node(taxid = 2, name = "node2",
rank = "rank1", parent = node0)
>>> node11 = Node(taxid = 11, name = "node11",
rank = "rank2", parent = node1)
>>> node12 = Node(taxid = 12, name = "node12",
rank = "rank2", parent = node1)
>>> tax = Taxonomy.from_list([node0, node1, node2, node11, node12])
>>> tax.prune(1)
Ancestry is kept_
>>> tax.getAncestry(11)
Lineage([Node(11), Node(1), Node(0)])
But other branches are gone
>>> tax.get('2')
KeyError: '2'
"""
# Getting upstream nodes
nodes = self.getAncestry(taxid)
# Unlinking other branches from upstream nodes
# No need to change parents of the other nodes,
# they will be removed from Taxonomy
for i in range(1, len(nodes)):
nodes[i].children = [nodes[i - 1]]
# Adding all downstream nodes
nodes.extend(self.listDescendant(taxid))
# Update taxonomy
self.data = {node.taxid: node for node in nodes}
def filterRanks(self, ranks: list[str] = linne()) -> None:
"""
Filter a Taxonomy to keep only the ranks provided as arguments.
Modifies Taxonomy in-place to keep only the Nodes at the requested
ranks. Nodes will be modified to conserve linkage in the Taxonomy.
Parameters
----------
ranks:
List of ranks to keep. Must be sorted by ascending ranks.
Notes
-----
In order to enforce ankering of the Taxonomy, the root node will
always be kept.
Examples
--------
>>> node1 = Node(1, rank = "root")
>>> node11 = Node(11, rank = "rank1", parent = node1)
>>> node111 = Node(111, rank = "rank2", parent = node11)
>>> node001 = Node('001', rank = "rank2", parent = node1)
>>> tax = Taxonomy.from_list([node1, node11, node111, node001])
>>> tax.filterRanks(['rank2', 'rank1', 'root'])
>>> tax
{Node(1), Node(11), DummyNode(tO841ymu), Node(111), Node(001)}
DummyNodes are created s placeholders
for missing ranks in the taxonomy:
>>> node001.parent
DummyNode(tO841ymu)
Note that the root will be kept regardless of the input:
>>> node1 = Node(1, rank = "root")
>>> node11 = Node(11, rank = "rank1", parent = node1)
>>> node111 = Node(111, rank = "rank2", parent = node11)
>>> node001 = Node('001', rank = "rank2", parent = node1)
>>> tax = Taxonomy.from_list([node1, node11, node111, node001])
>>> tax.filterRanks(['rank2', 'rank1'])
>>> tax
{DummyNode(wmnar5QT), Node(001), Node(1), Node(11), Node(111)}
"""
# Create a list of nodes that will be used to update self
new_nodes = []
# First step, reduce tree
# Remove unwanted nodes
for node in self.values():
if node.rank in ranks:
new_nodes.append(node)
else:
try:
node._relink()
except TypeError:
# relinking a parent-less node raises TypeError
# The root will be kept whatever is asked to keep coherence
new_nodes.append(node)
# Second step, expand tree
# Reccursively add DummyNode to fill gaps
root = self.root
if ranks[-1] == self.root:
ranks = ranks[:-1]
new_nodes.extend(_insert_nodes_recc(root, ranks))
# Update self
self.data = {node.taxid: node for node in new_nodes}
def write(self, path: str) -> None:
"""
Write taxonomy to a JSON file.
Parameters
----------
path:
File path for the output
"""
writer = json.dumps([node._to_dict()
for node in self.values()],
indent=4)
with open(path, 'w') as fi:
fi.write(writer)
@property
def root(self) -> Node:
"""
Returns the root Node, assumes a single root shared by all Nodes
"""
anynode = next(iter(self.values()))
return Lineage(anynode)[-1]
def __repr__(self):
return f"{set(self.values())}"
def load(path: str) -> Taxonomy:
"""
Load a Taxonomy from a previously exported json file.
Parameters
----------
path:
Path of file to load
See Also
--------
Taxonomy.write
"""
return Taxonomy.from_json(path)
def _parse_dump(filepath: str) -> Iterator:
"""
Dump file line iterator, returns a yields of fields
"""
with open(filepath, 'r') as dmp:
for line in dmp:
yield [item.strip() for item in line.split("|")]
def | |
= - model.ztilde[model.slayer - 1] / (4 * np.pi)
amp_tm_ez = (model.ry - model.sy) / (4 * np.pi \
* model.ytilde[model.rlayer - 1] * model.r)
amp_tm_hx_g_1 = (model.ry - model.sy) ** 2 \
/ (4 * np.pi * model.r ** 2)
amp_tm_hx_g_2 = - (2 * (model.ry - model.sy) ** 2 / model.r ** 3 \
- 1 / model.r) / (4 * np.pi)
amp_te_hx_g_1 = + (model.ry - model.sy) ** 2 \
/ (4 * np.pi * model.r ** 2) \
* model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1]
amp_te_hx_g_2 = - (2 * (model.ry - model.sy) ** 2 / model.r ** 3 \
- 1 / model.r) / (4 * np.pi) \
* model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1]
amp_te_hx_line = -model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1] / (4 * np.pi)
amp_tm_hy_g_1 = - (model.rx - model.sx) * (model.ry - model.sy) \
/ (4 * np.pi * model.r ** 2)
amp_tm_hy_g_2 = (model.rx - model.sx) * (model.ry - model.sy) \
/ (2 * np.pi * model.r ** 3)
amp_te_hy_g_1 = - (model.rx - model.sx) * (model.ry - model.sy) \
/ (4 * np.pi * model.r ** 2) \
* model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1]
amp_te_hy_g_2 = (model.rx - model.sx) * (model.ry - model.sy) \
/ (2 * np.pi * model.r ** 3) \
* model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1]
amp_te_hz_line = -model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1] \
* (model.rx - model.sx) / (4 * np.pi * model.r)
ans["e_x"] = amp_tm_ex_g_1 * tm_er_1 + amp_tm_ex_g_2 * tm_er_2 \
+ amp_te_ex_g_1 * te_er_1 + amp_te_ex_g_2 * te_er_2
ans["e_y"] = amp_tm_ey_g_1 * tm_er_1 + amp_tm_ey_g_2 * tm_er_2 \
+ amp_te_ey_g_1 * te_er_1 + amp_te_ey_g_2 * te_er_2 \
+ amp_te_ey_line * te_er_1
ans["e_z"] = amp_tm_ez * tm_ez
ans["h_x"] = amp_tm_hx_g_1 * tm_hr_1 + amp_tm_hx_g_2 * tm_hr_2 \
+ amp_te_hx_g_1 * te_hr_1 + amp_te_hx_g_2 * te_hr_2 \
+ amp_te_hx_line * te_hr_1
ans["h_y"] = amp_tm_hy_g_1 * tm_hr_1 + amp_tm_hy_g_2 * tm_hr_2 \
+ amp_te_hy_g_1 * te_hr_1 + amp_te_hy_g_2 * te_hr_2
ans["h_z"] = amp_te_hz_line * te_hz
return ans
@staticmethod
def circular_loop(model, omega):
"""
"""
y_base, wt0, wt1 = filters.load_hankel_filter(model.hankel_filter)
model.filter_length = len(y_base)
model.lambda_ = y_base / model.src.radius
kernel = kernels.compute_kernel_circular(model, omega)
ans = {}
e_phai = np.dot(wt1, kernel[0]) / model.src.radius
h_r = np.dot(wt1, kernel[1]) / model.src.radius
h_z = np.dot(wt1, kernel[2]) / model.src.radius
ans["e_x"] = model.ztilde[model.slayer - 1] * model.src.radius\
* model.sin_phi / 2 * e_phai
ans["e_y"] = -model.ztilde[model.slayer - 1] * model.src.radius\
* model.cos_phi / 2 * e_phai
ans["e_z"] = 0
ans["h_x"] = -model.src.radius * model.ztilde[model.slayer - 1]\
/ model.ztilde[model.rlayer - 1] \
* model.cos_phi / 2 * h_r
ans["h_y"] = -model.src.radius * model.ztilde[model.slayer - 1]\
/ model.ztilde[model.rlayer - 1] \
* model.sin_phi / 2 * h_r
ans["h_z"] = model.src.radius * model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1] / 2 * h_z
return ans
@staticmethod
def coincident_loop(model, omega):
"""
"""
y_base, wt0, wt1 = filters.load_hankel_filter(model.hankel_filter)
model.filter_length = len(y_base)
model.lambda_ = y_base / model.r
kernel = kernels.compute_kernel_coincident(model, omega)
ans = {}
h_z_co = np.dot(wt1, kernel[0]) / model.src.radius
ans["e_x"] = 0
ans["e_y"] = 0
ans["e_z"] = 0
ans["h_x"] = 0
ans["h_y"] = 0
ans["h_z"] = (1 * np.pi * model.src.radius ** 2 * h_z_co)
return ans
@staticmethod
def grounded_wire(model, omega):
"""
"""
y_base, wt0, wt1 = filters.load_hankel_filter(model.hankel_filter)
model.filter_length = len(y_base)
y_base_wire = np.ones((model.filter_length, model.src.nsplit)) \
* np.array([y_base]).T
lambda_ = y_base_wire / model.rn
kernel = np.zeros((6, model.filter_length, model.src.nsplit), dtype=complex)
for i in range(model.src.nsplit):
model.lambda_ = lambda_[:,i]
kernel[:,:,i] = kernels.compute_kernel_hed(model, omega)
model.lambda_ = lambda_
tm_er_g_first = np.dot(wt1, kernel[0][:, 0]) / model.rn[0]
tm_er_g_end = np.dot(wt1, kernel[0][:, model.src.nsplit - 1]) \
/ model.rn[model.src.nsplit - 1]
te_er_g_first = np.dot(wt1, kernel[1][:, 0]) / model.rn[0]
te_er_g_end = np.dot(wt1, kernel[1][:, model.src.nsplit - 1]) \
/ model.rn[model.src.nsplit - 1]
tm_ez_1 = np.dot(wt0, kernel[2][:, 0] * model.lambda_[:, 0]) \
/ model.rn[0]
tm_ez_2 = np.dot(wt0, kernel[2][:, model.src.nsplit - 1] \
* model.lambda_[:, model.src.nsplit - 1]) \
/ model.rn[model.src.nsplit - 1]
tm_hr_g_first = np.dot(wt1, kernel[3][:, 0]) / model.rn[0]
tm_hr_g_end = np.dot(wt1, kernel[3][:, model.src.nsplit - 1]) \
/ model.rn[model.src.nsplit - 1]
te_hr_g_first = np.dot(wt1, kernel[4][:, 0]) / model.rn[0]
te_hr_g_end = np.dot(wt1, kernel[4][:, model.src.nsplit - 1]) \
/ model.rn[model.src.nsplit - 1]
te_hz_l = np.dot(wt1, kernel[5] * model.lambda_ ** 2) / model.rn
te_ex_l = np.dot(wt0, kernel[1] * model.lambda_) / model.rn
te_hy_l = np.dot(wt0, kernel[4] * model.lambda_) / model.rn
amp_tm_ex_1 = (model.xx[0] / model.rn[0]) \
/ (4 * np.pi * model.ytilde[model.rlayer - 1])
amp_tm_ex_2 = (-model.xx[model.src.nsplit-1] \
/ model.rn[model.src.nsplit-1]) \
/ (4 * np.pi * model.ytilde[model.rlayer - 1])
amp_te_ex_1 = (model.xx[0] / model.rn[0]) \
* model.ztilde[model.slayer - 1] / (4 * np.pi)
amp_te_ex_2 = (-model.xx[model.src.nsplit-1] \
/ model.rn[model.src.nsplit-1]) \
* model.ztilde[model.slayer - 1] / (4 * np.pi)
te_ex_line = -model.ztilde[model.slayer - 1] / (4 * np.pi)
amp_tm_ey_1 = (model.yy[0] / model.rn[0]) \
/ (4 * np.pi * model.ytilde[model.rlayer - 1])
amp_tm_ey_2 = (-model.yy[model.src.nsplit-1] \
/ model.rn[model.src.nsplit-1]) \
/ (4 * np.pi * model.ytilde[model.rlayer - 1])
amp_te_ey_1 = (model.yy[0] / model.rn[0]) \
* model.ztilde[model.slayer - 1] / (4 * np.pi)
amp_te_ey_2 = (-model.yy[model.src.nsplit-1] \
/ model.rn[model.src.nsplit-1]) \
* model.ztilde[model.slayer - 1] / (4 * np.pi)
amp_tm_ez_1 = 1 / (4 * np.pi * model.ytilde[model.rlayer - 1])
amp_tm_ez_2 = -1 / (4 * np.pi * model.ytilde[model.rlayer - 1])
amp_tm_hx_1 = 1 / (4 * np.pi) * model.yy[0] / model.rn[0]
amp_tm_hx_2 = - 1 / (4 *np.pi) * model.yy[model.src.nsplit-1] \
/ model.rn[model.src.nsplit-1]
amp_te_hx_1 = model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1] / (4 * np.pi) \
* model.yy[0] / model.rn[0]
amp_te_hx_2 = - model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1] / (4 *np.pi) \
* model.yy[model.src.nsplit-1] \
/ model.rn[model.src.nsplit-1]
amp_tm_hy_1 = -1 / (4 * np.pi) * model.xx[0] / model.rn[0]
amp_tm_hy_2 = 1 / ( 4 *np.pi) * model.xx[model.src.nsplit-1] \
/ model.rn[model.src.nsplit-1]
amp_te_hy_1 = -model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1] / (4 * np.pi) \
* model.xx[0] / model.rn[0]
amp_te_hy_2 = model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1] / (4 * np.pi) \
* model.xx[model.src.nsplit-1] \
/ model.rn[model.src.nsplit-1]
te_hy_line = model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1] / (4 * np.pi)
rot_ans = {}
rot_ans["e_x"] = (amp_tm_ex_1 * tm_er_g_first \
+ amp_tm_ex_2 * tm_er_g_end \
+ amp_te_ex_1 * te_er_g_first \
+ amp_te_ex_2 * te_er_g_end) \
+ te_ex_line * model.ds \
* np.dot(te_ex_l, np.ones((model.src.nsplit)))
rot_ans["e_y"] = amp_tm_ey_1 * tm_er_g_first + amp_tm_ey_2 * tm_er_g_end \
+ amp_te_ey_1 * te_er_g_first \
+ amp_te_ey_2 * te_er_g_end
rot_ans["e_z"] = amp_tm_ez_1 * tm_ez_1 + amp_tm_ez_2 * tm_ez_2
rot_ans["h_x"] = (amp_tm_hx_1 * tm_hr_g_first \
+ amp_tm_hx_2 * tm_hr_g_end \
+ amp_te_hx_1 * te_hr_g_first \
+ amp_te_hx_2 * te_hr_g_end)
rot_ans["h_y"] = amp_tm_hy_1 * tm_hr_g_first \
+ amp_tm_hy_2 * tm_hr_g_end \
+ amp_te_hy_1 * te_hr_g_first \
+ amp_te_hy_2 * te_hr_g_end \
+ te_hy_line * model.ds \
* np.dot(te_hy_l, np.ones((model.src.nsplit)))
rot_ans["h_z"] = np.dot(model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1] \
* model.yy / model.rn * model.ds / (4*np.pi) \
,te_hz_l.T)
ans = {}
ans["e_x"] = model.cos_theta * rot_ans["e_x"] - model.sin_theta * rot_ans["e_y"]
ans["e_y"] = model.cos_theta * rot_ans["e_y"] + model.sin_theta * rot_ans["e_x"]
ans["e_z"] = rot_ans["e_z"]
ans["h_x"] = model.cos_theta * rot_ans["h_x"] - model.sin_theta * rot_ans["h_y"]
ans["h_y"] = model.cos_theta * rot_ans["h_y"] + model.sin_theta * rot_ans["h_x"]
ans["h_z"] = rot_ans["h_z"]
return ans
@staticmethod
def loop_source(model, omega):
"""
"""
y_base, wt0, wt1 = filters.load_hankel_filter(model.hankel_filter)
model.filter_length = len(y_base)
model.lambda_ = y_base / model.r
kernel = kernels.compute_kernel_hed(model, omega)
ans = {}
te_ex_l = np.dot(wt0, kernel[1] * model.lambda_) / model.rn
te_hy_l = np.dot(wt0, kernel[4] * model.lambda_) / model.rn
te_hz_l = np.dot(wt1, kernel[5] * model.lambda_ ** 2) / model.rn
te_ex_line = -model.ztilde[model.slayer - 1] / (4 * np.pi)
te_hy_line = model.ztilde[model.slayer - 1] \
/ model.ztilde[model.rlayer - 1] / (4 * np.pi)
ans["e_x"] = te_ex_line * model.ds \
* np.dot(te_ex_l, np.ones((model.src.num_dipole,1)))
ans["e_y"] = 0
ans["e_z"] = 0
ans["h_x"] = 0
ans["h_y"] = te_hy_line * model.ds \
* np.dot(te_hy_l, np.ones((model.src.num_dipole,1)))
ans["h_z"] | |
<gh_stars>0
import json
import numpy as np
from PyQt5 import QtCore, QtGui, QtWidgets
from sscanss.core.instrument import Link, circle_point_analysis, generate_description
from sscanss.core.math import clamp
from sscanss.core.util import create_scroll_area
from .widgets import ScriptWidget, JawsWidget, PositionerWidget, DetectorWidget
class Controls(QtWidgets.QDialog):
"""Creates a widget that creates and manages the instrument control widgets.
The widget creates instrument controls if the instrument description file is correct,
otherwise the widget will be blank.
:param parent: main window instance
:type parent: MainWindow
"""
def __init__(self, parent):
super().__init__(parent)
self.parent = parent
self.setWindowTitle('Instrument Control')
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
self.tabs = QtWidgets.QTabWidget()
self.tabs.setMinimumWidth(600)
self.tabs.setMinimumHeight(600)
self.tabs.tabBarClicked.connect(self.updateTabs)
layout.addWidget(self.tabs)
self.last_tab_index = 0
self.last_stack_name = ''
self.last_collimator_name = {}
def createWidgets(self):
"""Creates widgets for positioner, jaws, detector, and script"""
self.tabs.clear()
positioner_widget = PositionerWidget(self.parent)
if self.last_stack_name and self.last_stack_name in self.parent.instrument.positioning_stacks.keys():
positioner_widget.changeStack(self.last_stack_name)
positioner_widget.stack_combobox.activated[str].connect(self.setStack)
self.tabs.addTab(create_scroll_area(positioner_widget), 'Positioner')
self.tabs.addTab(create_scroll_area(JawsWidget(self.parent)), 'Jaws')
collimator_names = {}
for name in self.parent.instrument.detectors:
pretty_name = name if name.lower() == 'detector' else f'{name} Detector'
detector_widget = DetectorWidget(self.parent, name)
self.tabs.addTab(create_scroll_area(detector_widget), pretty_name)
collimator_name = self.last_collimator_name.get(name, '')
if collimator_name:
collimator_names[name] = collimator_name
detector_widget.combobox.setCurrentText(collimator_name)
detector_widget.changeCollimator()
detector_widget.collimator_changed.connect(self.setCollimator)
self.last_collimator_name = collimator_names
self.script_widget = ScriptWidget(self.parent)
self.tabs.addTab(create_scroll_area(self.script_widget), 'Script')
self.tabs.setCurrentIndex(clamp(self.last_tab_index, 0, self.tabs.count()))
def reset(self):
"""Resets stored states"""
self.last_tab_index = 0
self.last_stack_name = ''
self.last_collimator_name = {}
def setStack(self, stack_name):
"""Stores the last loaded positioning stack. This preserves the active stack
between description file modifications
:param stack_name: name of active positioning stack
:type stack_name: str
"""
self.last_stack_name = stack_name
def setCollimator(self, detector, collimator_name):
"""Stores the last loaded collimator on a detector. This preserves the active
collimator between description file modifications
:param detector: name of detector
:type detector: str
:param collimator_name: name of active collimator
:type collimator_name: str
"""
self.last_collimator_name[detector] = collimator_name
def updateTabs(self, index):
"""Stores the last open tab.
:param index: tab index
:type index: int
"""
self.last_tab_index = index
if self.tabs.tabText(index) == 'Script':
self.script_widget.updateScript()
class CalibrationWidget(QtWidgets.QDialog):
"""Creates a widget for performing kinematic calibration and displaying the residual errors.
:param parent: main window instance
:type parent: MainWindow
:param points: measured 3D points for each joint
:type points: List[numpy.ndarray]
:param joint_types: types of each joint
:type joint_types: List[Link.Type]
:param joint_offsets: measured offsets for each measurement
:type joint_offsets: List[numpy.ndarray]
:param joint_homes: home position for each measurement
:type joint_homes: List[float]
"""
def __init__(self, parent, points, joint_types, joint_offsets, joint_homes):
super().__init__(parent)
self.points = points
self.offsets = joint_offsets
self.robot_name = 'Positioning System'
self.order = list(range(len(points)))
self.names = [f'Joint {i + 1}' for i in range(len(points))]
self.homes = joint_homes
self.types = joint_types
main_layout = QtWidgets.QVBoxLayout()
self.setLayout(main_layout)
# create stacked layout
self.stack = QtWidgets.QStackedLayout()
main_layout.addLayout(self.stack)
self.stack1 = QtWidgets.QWidget()
self.stack2 = QtWidgets.QWidget()
self.stack.addWidget(self.stack1)
self.stack.addWidget(self.stack2)
self.createCalibrationForm()
self.createResultTable()
self.setLayout(main_layout)
self.setMinimumSize(800, 720)
self.setWindowTitle('Kinematic Calibration')
def calibrate(self):
"""Creates kinematic model of a robot from measurement and displays result"""
self.results = circle_point_analysis(self.points, self.types, self.offsets, self.homes)
self.json = generate_description(self.robot_name, self.results.base, self.results.tool, self.order, self.names,
self.types, self.results.joint_axes, self.results.joint_origins, self.homes,
self.offsets)
self.displayResiduals()
self.stack.setCurrentIndex(1)
def changeRobotName(self, value):
"""Changes name of the robot
:param value: name of robot
:type value: str
"""
self.robot_name = value
self.validateForm()
def changeOrder(self, values):
"""Changes the display order of joints
:param values: joint indices arranged in the display order
:type values: List[int]
"""
size = len(self.points)
try:
order = [int(value) - 1 for value in values.split(',')]
if len(set(order)) != size:
raise ValueError
if min(order) != 0 or max(order) != size - 1:
raise ValueError
self.order = order
except ValueError:
self.order = []
self.validateForm()
def changeJointNames(self, index, value):
"""Changes the name of the joint at given index
:param index: joint index
:type index: int
:param value: joint name
:type value: str
"""
self.names[index] = value
self.validateForm()
def changeHome(self, index, value):
"""Changes the home position of the joint at given index
:param index: joint index
:type index: int
:param value: joint home position
:type value: str
"""
self.homes[index] = value
def changeType(self, index, value):
"""Changes the type of the joint at given index
:param index: joint index
:type index: int
:param value: joint type
:type value: str
"""
self.types[index] = Link.Type(value.lower())
def createCalibrationForm(self):
"""Creates inputs for the calibration arguments"""
layout = QtWidgets.QVBoxLayout()
layout.setSpacing(5)
layout.setContentsMargins(0, 0, 0, 0)
row_layout = QtWidgets.QHBoxLayout()
self.error_text = QtWidgets.QLabel()
self.error_text.setWordWrap(True)
self.error_text.setStyleSheet('color: red;')
self.calibrate_button = QtWidgets.QPushButton('Generate Model')
self.calibrate_button.clicked.connect(self.calibrate)
row_layout.addStretch(1)
row_layout.addWidget(self.error_text, 4)
row_layout.addStretch(1)
row_layout.addWidget(self.calibrate_button)
layout.addLayout(row_layout)
layout.addSpacing(10)
row_layout = QtWidgets.QHBoxLayout()
row_layout.addWidget(QtWidgets.QLabel('Name of Positioner:\t'))
name_line_edit = QtWidgets.QLineEdit(self.robot_name)
name_line_edit.textChanged.connect(self.changeRobotName)
row_layout.addWidget(name_line_edit, 2)
row_layout.addStretch(1)
layout.addLayout(row_layout)
row_layout = QtWidgets.QHBoxLayout()
order_line_edit = QtWidgets.QLineEdit(','.join(str(x + 1) for x in self.order))
order_line_edit.textChanged.connect(self.changeOrder)
row_layout.addWidget(QtWidgets.QLabel('Custom Order:\t'))
row_layout.addWidget(order_line_edit, 2)
row_layout.addStretch(1)
layout.addLayout(row_layout)
divider = QtWidgets.QFrame()
divider.setFrameShape(QtWidgets.QFrame.HLine)
divider.setFrameShadow(QtWidgets.QFrame.Sunken)
layout.addSpacing(10)
layout.addWidget(QtWidgets.QLabel('<p style="font-size:14px">Joint Information</p>'))
layout.addWidget(divider)
layout.addSpacing(5)
# Define Scroll Area
scroll_area = QtWidgets.QScrollArea()
scroll_area.setWidgetResizable(True)
scroll_area.setFrameShape(QtWidgets.QFrame.NoFrame)
widget = QtWidgets.QWidget()
sub_layout = QtWidgets.QVBoxLayout()
sub_layout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
widget.setLayout(sub_layout)
sub_layout.setSpacing(5)
sub_layout.setContentsMargins(10, 0, 10, 0)
scroll_area.setWidget(widget)
layout.addWidget(scroll_area)
for i in range(len(self.points)):
name_line_edit = QtWidgets.QLineEdit(self.names[i])
name_line_edit.textChanged.connect(lambda value, index=i: self.changeJointNames(index, value))
joint_type_combobox = QtWidgets.QComboBox()
joint_type_combobox.setView(QtWidgets.QListView())
joint_type_combobox.addItems([t.value.title() for t in Link.Type])
joint_type_combobox.setCurrentText(self.types[i].value.title())
joint_type_combobox.currentTextChanged.connect(lambda value, index=i: self.changeType(index, value))
joint_home_spinbox = QtWidgets.QDoubleSpinBox()
joint_home_spinbox.setDecimals(3)
joint_home_spinbox.setRange(-10000, 10000)
joint_home_spinbox.setValue(self.homes[i])
joint_home_spinbox.valueChanged.connect(lambda value, index=i: self.changeHome(index, value))
row_layout = QtWidgets.QHBoxLayout()
column_layout = QtWidgets.QVBoxLayout()
column_layout.addWidget(QtWidgets.QLabel(f'Name of Joint {i + 1}:\t'))
column_layout.addWidget(name_line_edit)
column_layout.addStretch(1)
row_layout.addLayout(column_layout, 4)
row_layout.addStretch(1)
sub_layout.addLayout(row_layout)
row_layout = QtWidgets.QHBoxLayout()
column_layout = QtWidgets.QVBoxLayout()
column_layout.addWidget(QtWidgets.QLabel(f'Type of Joint {i + 1}:\t'))
column_layout.addWidget(joint_type_combobox)
column_layout.addStretch(1)
row_layout.addLayout(column_layout, 2)
column_layout = QtWidgets.QVBoxLayout()
column_layout.addWidget(QtWidgets.QLabel(f'Home for Joint {i + 1}:\t'))
column_layout.addWidget(joint_home_spinbox)
column_layout.addStretch(1)
row_layout.addLayout(column_layout, 2)
row_layout.addStretch(1)
sub_layout.addLayout(row_layout)
divider = QtWidgets.QFrame()
divider.setFrameShape(QtWidgets.QFrame.HLine)
divider.setFrameShadow(QtWidgets.QFrame.Sunken)
sub_layout.addWidget(divider)
sub_layout.addStretch(1)
self.stack1.setLayout(layout)
def copyModel(self):
"""Copies json description of robot to the clipboard"""
QtWidgets.QApplication.clipboard().setText(json.dumps(self.json, indent=2))
def saveModel(self):
"""Saves json description of the robot to file"""
filename, _ = QtWidgets.QFileDialog.getSaveFileName(self, "Choose a file name", '.', "JSON File (*.json)")
if not filename:
return
with open(filename, 'w') as json_file:
json.dump(self.json, json_file, indent=2)
def createResultTable(self):
"""Creates widget to show calibration errors"""
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
sub_layout = QtWidgets.QHBoxLayout()
self.filter_combobox = QtWidgets.QComboBox()
self.filter_combobox.setView(QtWidgets.QListView())
self.filter_combobox.addItems(['All', *[f'{i + 1}' for i in range(len(self.points))]])
self.filter_combobox.currentIndexChanged.connect(self.displayResiduals)
self.copy_model_button = QtWidgets.QPushButton('Copy Model')
self.copy_model_button.clicked.connect(self.copyModel)
self.save_model_button = QtWidgets.QPushButton('Save Model')
self.save_model_button.clicked.connect(self.saveModel)
sub_layout.addWidget(QtWidgets.QLabel('Show Joint: '))
sub_layout.addWidget(self.filter_combobox)
sub_layout.addStretch(1)
sub_layout.addWidget(self.copy_model_button)
sub_layout.addWidget(self.save_model_button)
layout.addLayout(sub_layout)
layout.addSpacing(10)
self.result_label = QtWidgets.QLabel()
self.tabs = QtWidgets.QTabWidget()
self.tabs.setTabPosition(QtWidgets.QTabWidget.West)
self.model_error_table = QtWidgets.QTableWidget()
self.model_error_table.setColumnCount(4)
self.model_error_table.setHorizontalHeaderLabels(['X', 'Y', 'Z', 'Norm'])
self.model_error_table.setAlternatingRowColors(True)
self.model_error_table.setMinimumHeight(300)
self.model_error_table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.model_error_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.model_error_table.horizontalHeader().setMinimumSectionSize(40)
self.model_error_table.horizontalHeader().setDefaultSectionSize(40)
self.tabs.addTab(self.model_error_table, 'Model Error')
self.fit_error_table = QtWidgets.QTableWidget()
self.fit_error_table.setColumnCount(4)
self.fit_error_table.setHorizontalHeaderLabels(['X', 'Y', 'Z', 'Norm'])
self.fit_error_table.setAlternatingRowColors(True)
self.fit_error_table.setMinimumHeight(300)
self.fit_error_table.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.fit_error_table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.fit_error_table.horizontalHeader().setMinimumSectionSize(40)
self.fit_error_table.horizontalHeader().setDefaultSectionSize(40)
self.tabs.addTab(self.fit_error_table, 'Fitting Error')
self.tabs.currentChanged.connect(self.displayResiduals)
layout.addWidget(self.result_label)
layout.addWidget(self.tabs)
self.stack2.setLayout(layout)
def displayResiduals(self):
"""Populates table widgets with residual error"""
tol = 0.1
active_tab = self.tabs.currentIndex()
joint_to_show = self.filter_combobox.currentIndex() - 1
if active_tab == 0:
table = self.model_error_table
residuals = self.results.model_errors
else:
table = self.fit_error_table
residuals = self.results.fit_errors
residuals = np.vstack(residuals) if joint_to_show == -1 else residuals[joint_to_show]
norm = np.linalg.norm(residuals, axis=1)
result_text = '<p style="font-size:14px">The Average {} is ' \
'<span style="color:{};font-weight:500;">{:.3f}</span> mm</p>'
mean = np.mean(norm)
colour = 'Tomato' if mean > tol else 'SeaGreen'
self.result_label.setText(result_text.format(self.tabs.tabText(active_tab), colour, mean))
table.setRowCount(residuals.shape[0])
for row, vector in enumerate(residuals):
x = QtWidgets.QTableWidgetItem(f'{vector[0]:.3f}')
x.setTextAlignment(QtCore.Qt.AlignCenter)
y = QtWidgets.QTableWidgetItem(f'{vector[1]:.3f}')
y.setTextAlignment(QtCore.Qt.AlignCenter)
z = QtWidgets.QTableWidgetItem(f'{vector[2]:.3f}')
z.setTextAlignment(QtCore.Qt.AlignCenter)
n = QtWidgets.QTableWidgetItem(f'{norm[row]:.3f}')
n.setTextAlignment(QtCore.Qt.AlignCenter)
tomato = QtGui.QBrush(QtGui.QColor('Tomato'))
if abs(vector[0]) > tol:
x.setData(QtCore.Qt.BackgroundRole, tomato)
if abs(vector[1]) > tol:
y.setData(QtCore.Qt.BackgroundRole, tomato)
if abs(vector[2]) > tol:
z.setData(QtCore.Qt.BackgroundRole, tomato)
if norm[row] > tol:
n.setData(QtCore.Qt.BackgroundRole, tomato)
table.setItem(row, 0, x)
table.setItem(row, 1, y)
table.setItem(row, 2, z)
table.setItem(row, 3, n)
def validateForm(self):
"""Validates calibration inputs"""
error = []
size = len(self.names)
if not self.robot_name:
error.append('"Name of Positioner" cannot be blank')
if not self.order:
error.append(f'"Custom order" should contain comma separated indices for joints 1 to {size}.')
for index, name in enumerate(self.names):
if not name:
error.append(f'"Name of Joint {index + 1}" cannot be blank')
if len(set(self.names)) != len(self.names):
error.append('Joint names must be unique')
self.error_text.setText('\n'.join(error))
self.calibrate_button.setDisabled(len(error) != 0)
class FindWidget(QtWidgets.QDialog):
"""Creates a widget that searches the Instrument file text and highlights the next occurrence.
Can chose to match case, or require search to be the whole word
:param parent: main window instance
:type parent: MainWindow
"""
def __init__(self, parent):
super().__init__(parent)
self.setWindowTitle('Find')
self.search_box = QtWidgets.QLineEdit()
self.search_box.setPlaceholderText("Search..")
self.match_case = QtWidgets.QCheckBox()
self.match_case.setText("Match Case")
self.whole_word = QtWidgets.QCheckBox()
self.whole_word.setText("Match whole word")
self.find_button = QtWidgets.QPushButton("Find")
self.search_box.textChanged.connect(self.resetSearch)
self.match_case.stateChanged.connect(self.resetSearch)
self.whole_word.stateChanged.connect(self.resetSearch)
self.find_button.clicked.connect(self.search)
self.status_box = QtWidgets.QLabel()
layout = QtWidgets.QGridLayout()
layout.addWidget(self.search_box, 0, 0)
layout.addWidget(self.match_case, 1, 0)
layout.addWidget(self.whole_word, 2, 0)
layout.addWidget(self.find_button, 2, 1)
layout.addWidget(self.status_box, 3, 0)
self.setLayout(layout)
def search(self):
"""Performs a search for the input_text in the editor window"""
input_text = self.search_box.text()
case = self.match_case.isChecked()
whole_word = self.whole_word.isChecked()
if self.fist_search_flag:
findable = self.parent().editor.findFirst(input_text, False, case, whole_word, False, True, 0, 0)
| |
HAIR_CURLY: 'CommonGameTag' = 314
HAIR_LENGTH_LONG: 'CommonGameTag' = 664
HAIR_LENGTH_MEDIUM: 'CommonGameTag' = 820
HAIR_LENGTH_SHORT: 'CommonGameTag' = 662
HAIR_LENGTH_UPDO: 'CommonGameTag' = 2173
HAIR_LONG: 'CommonGameTag' = 151
HAIR_MEDIUM: 'CommonGameTag' = 150
HAIR_SHORT: 'CommonGameTag' = 149
HAIR_STRAIGHT: 'CommonGameTag' = 313
HAIR_TEXTURE_BALD: 'CommonGameTag' = 12391
HAIR_TEXTURE_CURLY: 'CommonGameTag' = 821
HAIR_TEXTURE_STRAIGHT: 'CommonGameTag' = 822
HAIR_TEXTURE_WAVY: 'CommonGameTag' = 663
HAIR_WAVY: 'CommonGameTag' = 315
HAT_BRIM: 'CommonGameTag' = 371
HAT_BRIMLESS: 'CommonGameTag' = 372
HAT_CAP: 'CommonGameTag' = 373
HAT_PAPER_BAG: 'CommonGameTag' = 2428
HOUSEHOLD_MEMBER_1: 'CommonGameTag' = 642
HOUSEHOLD_MEMBER_2: 'CommonGameTag' = 643
HOUSEHOLD_MEMBER_3: 'CommonGameTag' = 644
HOUSEHOLD_MEMBER_4: 'CommonGameTag' = 645
HOUSEHOLD_MEMBER_5: 'CommonGameTag' = 646
HOUSEHOLD_MEMBER_6: 'CommonGameTag' = 647
HOUSEHOLD_MEMBER_7: 'CommonGameTag' = 648
HOUSEHOLD_MEMBER_8: 'CommonGameTag' = 649
INSTRUMENT_VIOLIN: 'CommonGameTag' = 401
INTERACTION_ADOPTION: 'CommonGameTag' = 57441
INTERACTION_ADVENTUROUS_ONE_SHOT: 'CommonGameTag' = 69723
INTERACTION_ALL: 'CommonGameTag' = 462
INTERACTION_ARGUMENT: 'CommonGameTag' = 43015
INTERACTION_ASK_TO_LEAVE_LOT: 'CommonGameTag' = 689
INTERACTION_BAR_VENUE: 'CommonGameTag' = 1599
INTERACTION_BASKETBALL_PLAY: 'CommonGameTag' = 2127
INTERACTION_BATHTUB: 'CommonGameTag' = 2348
INTERACTION_BATUU_IGNORE_REPUTATION: 'CommonGameTag' = 51246
INTERACTION_BE_READ_TO: 'CommonGameTag' = 863
INTERACTION_BONFIRE: 'CommonGameTag' = 24590
INTERACTION_BROWSE_RESEARCH: 'CommonGameTag' = 757
INTERACTION_CAMPFIRE: 'CommonGameTag' = 10262
INTERACTION_CAREER_WORK_RABBIT_HOLE: 'CommonGameTag' = 2490
INTERACTION_CHARITY: 'CommonGameTag' = 750
INTERACTION_CHAT: 'CommonGameTag' = 342
INTERACTION_CLEAN: 'CommonGameTag' = 781
INTERACTION_CLIMBING_ROUTE: 'CommonGameTag' = 69691
INTERACTION_COLLECT: 'CommonGameTag' = 1309
INTERACTION_COMEDY_MIC: 'CommonGameTag' = 1613
INTERACTION_COMPUTER: 'CommonGameTag' = 439
INTERACTION_COMPUTER_TYPING: 'CommonGameTag' = 1367
INTERACTION_CONSUME: 'CommonGameTag' = 394
INTERACTION_COOK: 'CommonGameTag' = 358
INTERACTION_CURIO_SHOP_BROWSE_BUY: 'CommonGameTag' = 47134
INTERACTION_DEATH: 'CommonGameTag' = 425
INTERACTION_DOCTOR_TREAT_PATIENT: 'CommonGameTag' = 12337
INTERACTION_DRINK: 'CommonGameTag' = 654
INTERACTION_ECO_FOOTPRINT_GREEN: 'CommonGameTag' = 67603
INTERACTION_EXAM_TABLE_EXAM: 'CommonGameTag' = 57391
INTERACTION_EXTREME_SPORTS: 'CommonGameTag' = 69727
INTERACTION_FASHION_BLOG: 'CommonGameTag' = 2131
INTERACTION_FESTIVE: 'CommonGameTag' = 2058
INTERACTION_FOOSBALL_TABLE_PLAY: 'CommonGameTag' = 24581
INTERACTION_FRIENDLY: 'CommonGameTag' = 431
INTERACTION_FUNNY: 'CommonGameTag' = 432
INTERACTION_GAME_CONSOLE: 'CommonGameTag' = 55384
INTERACTION_GO_JOGGING: 'CommonGameTag' = 926
INTERACTION_GREEN_UPGRADED: 'CommonGameTag' = 67589
INTERACTION_GREETING: 'CommonGameTag' = 453
INTERACTION_GROUP_DANCE_TOGETHER: 'CommonGameTag' = 24607
INTERACTION_GROUP_WORKOUT: 'CommonGameTag' = 71683
INTERACTION_HACK: 'CommonGameTag' = 435
INTERACTION_HUG: 'CommonGameTag' = 1990
INTERACTION_IGNORE_GROUNDING: 'CommonGameTag' = 43028
INTERACTION_INFECT_HOUSE: 'CommonGameTag' = 47125
INTERACTION_INSTRUMENT_LISTEN: 'CommonGameTag' = 639
INTERACTION_INTELLIGENCE_RESEARCH: 'CommonGameTag' = 746
INTERACTION_INVENTION_CONSTRUCTOR_UPGRADE: 'CommonGameTag' = 12368
INTERACTION_INVITE_TO_STAY: 'CommonGameTag' = 417
INTERACTION_JOKE: 'CommonGameTag' = 871
INTERACTION_JUICE_KEG: 'CommonGameTag' = 2347
INTERACTION_KARAOKE_VENUE: 'CommonGameTag' = 1600
INTERACTION_KISS: 'CommonGameTag' = 350
INTERACTION_KNITTING: 'CommonGameTag' = 83984
INTERACTION_LAUNDRY_GENERATE_NO_PILE: 'CommonGameTag' = 2035
INTERACTION_LAUNDRY_PUT_AWAY_FINISHED_LAUNDRY: 'CommonGameTag' = 2034
INTERACTION_LEAVE: 'CommonGameTag' = 420
INTERACTION_LEAVE_MUST_RUN: 'CommonGameTag' = 419
INTERACTION_LIFESTYLES_ADRENALINE_SEEKER_DISCOURAGE_AUTONOMY: 'CommonGameTag' = 69730
INTERACTION_LIFESTYLES_ADRENALINE_SEEKER_FLEXIBLE_LENGTH: 'CommonGameTag' = 69655
INTERACTION_LIFESTYLES_ADRENALINE_SEEKER_MUNDANE: 'CommonGameTag' = 69712
INTERACTION_LIFESTYLES_ADRENALINE_SEEKER_ONE_SHOT: 'CommonGameTag' = 69656
INTERACTION_LIFESTYLES_ELECTRONICS: 'CommonGameTag' = 69651
INTERACTION_LIFESTYLES_ELECTRONICS_REPAIR: 'CommonGameTag' = 69652
INTERACTION_LIFESTYLES_ENERGETIC_FLEXIBLE_LENGTH: 'CommonGameTag' = 69634
INTERACTION_LIFESTYLES_ENERGETIC_ONE_SHOT: 'CommonGameTag' = 69690
INTERACTION_LIFESTYLES_ENERGETIC_AUTONOMY: 'CommonGameTag' = 69737
INTERACTION_LIFESTYLES_FREQUENT_TRAVELER_FLEXIBLE_LENGTH: 'CommonGameTag' = 69636
INTERACTION_LIFESTYLES_FREQUENT_TRAVELER_ONE_SHOT: 'CommonGameTag' = 69635
INTERACTION_LIFESTYLES_INDOORSY_FLEXIBLE_LENGTH: 'CommonGameTag' = 69657
INTERACTION_LIFESTYLES_INDOORSY_ONE_SHOT: 'CommonGameTag' = 69658
INTERACTION_LIFESTYLES_INDOORSY_AUTONOMY: 'CommonGameTag' = 69731
INTERACTION_LIFESTYLES_OUTDOORSY_FLEXIBLE_LENGTH: 'CommonGameTag' = 69659
INTERACTION_LIFESTYLES_OUTDOORSY_ONE_SHOT: 'CommonGameTag' = 69660
INTERACTION_LIFESTYLES_OUTDOORSY_AUTONOMY: 'CommonGameTag' = 69734
INTERACTION_LIFESTYLES_ROMANTIC_MEDIA: 'CommonGameTag' = 69713
INTERACTION_LIFESTYLES_SEDENTARY_FLEXIBLE_LENGTH: 'CommonGameTag' = 69633
INTERACTION_LIFESTYLES_SEDENTARY_ONE_SHOT: 'CommonGameTag' = 69689
INTERACTION_LIFESTYLES_SEDENTARY_AUTONOMY: 'CommonGameTag' = 69736
INTERACTION_LIFESTYLES_TECH_CAREER: 'CommonGameTag' = 69663
INTERACTION_LIFESTYLES_TECHIE_FLEXIBLE_LENGTH: 'CommonGameTag' = 69638
INTERACTION_LIFESTYLES_TECHIE_ONE_SHOT: 'CommonGameTag' = 69639
INTERACTION_LIFESTYLES_TECHIE_AUTONOMY: 'CommonGameTag' = 69735
INTERACTION_LIFESTYLES_TECHNOPHOBE_FLEXIBLE_LENGTH: 'CommonGameTag' = 69641
INTERACTION_LIFESTYLES_TECHNOPHOBE_ONE_SHOT: 'CommonGameTag' = 69640
INTERACTION_LIFESTYLES_TECHNOPHOBE_SABOTAGE: 'CommonGameTag' = 69704
INTERACTION_LISTEN_MUSIC: 'CommonGameTag' = 444
INTERACTION_MAKE_APP: 'CommonGameTag' = 683
INTERACTION_MAKE_COFFEE_OR_TEA: 'CommonGameTag' = 1028
INTERACTION_MARKET_STALL_TEND: 'CommonGameTag' = 55400
INTERACTION_MARKET_STALLS_TEND: 'CommonGameTag' = 1934
INTERACTION_MASSAGE_TABLE: 'CommonGameTag' = 18439
INTERACTION_MEAN: 'CommonGameTag' = 433
INTERACTION_MENTOR: 'CommonGameTag' = 455
INTERACTION_MENTOR_MUSIC: 'CommonGameTag' = 695
INTERACTION_MISCHIEVOUS: 'CommonGameTag' = 434
INTERACTION_MIXER: 'CommonGameTag' = 461
INTERACTION_NAP: 'CommonGameTag' = 591
INTERACTION_NESTING_BLOCKS: 'CommonGameTag' = 1698
INTERACTION_NOISY_ELECTRONICS: 'CommonGameTag' = 1628
INTERACTION_OBSERVATORY: 'CommonGameTag' = 1598
INTERACTION_OLD_DAY_FINE: 'CommonGameTag' = 67638
INTERACTION_PAINT: 'CommonGameTag' = 694
INTERACTION_PAINT_BY_REFERENCE: 'CommonGameTag' = 1372
INTERACTION_PAINT_MURAL: 'CommonGameTag' = 55359
INTERACTION_PARK_VENUE: 'CommonGameTag' = 1601
INTERACTION_PARTY: 'CommonGameTag' = 2061
INTERACTION_PERFORM_COMEDY_ROUTINE: 'CommonGameTag' = 469
INTERACTION_PET_MISBEHAVIOR: 'CommonGameTag' = 57397
INTERACTION_PETS_FRIENDLY: 'CommonGameTag' = 57370
INTERACTION_PETS_GREETING: 'CommonGameTag' = 57372
INTERACTION_PETS_MEAN: 'CommonGameTag' = 57371
INTERACTION_PETS_SS3_ALLOWED: 'CommonGameTag' = 2015
INTERACTION_PHOTO_STUDIO_TAKE_PICTURE: 'CommonGameTag' = 1942
INTERACTION_PLAY_DJ_BOOTH: 'CommonGameTag' = 1618
INTERACTION_PLAY_GAME: 'CommonGameTag' = 640
INTERACTION_PLAY_GUITAR: 'CommonGameTag' = 1615
INTERACTION_PLAY_GUITAR_FOR_TIPS: 'CommonGameTag' = 1024
INTERACTION_PLAY_INSTRUMENT: 'CommonGameTag' = 442
INTERACTION_PLAY_INSTRUMENT_FOR_TIPS: 'CommonGameTag' = 443
INTERACTION_PLAY_INSTRUMENT_OR_COMEDY_FOR_TIPS: 'CommonGameTag' = 606
INTERACTION_PLAY_PIANO: 'CommonGameTag' = 690
INTERACTION_PLAY_PIANO_FOR_TIPS: 'CommonGameTag' = 1025
INTERACTION_PLAY_TOY: 'CommonGameTag' = 1339
INTERACTION_PLAY_VIDEO_GAMES: 'CommonGameTag' = 685
INTERACTION_PLAY_VIOLIN: 'CommonGameTag' = 1616
INTERACTION_PLAY_VIOLIN_FOR_TIPS: 'CommonGameTag' = 1026
INTERACTION_PLAY_WITH_CAT: 'CommonGameTag' = 57362
INTERACTION_PLAY_WITH_DOG: 'CommonGameTag' = 57363
INTERACTION_PRACTICE_ACTING: 'CommonGameTag' = 61552
INTERACTION_PRACTICE_CODING: 'CommonGameTag' = 693
INTERACTION_PRACTICE_DEBATE: 'CommonGameTag' = 65648
INTERACTION_PRACTICE_WRITING: 'CommonGameTag' = 692
INTERACTION_PRANK: 'CommonGameTag' = 583
INTERACTION_PRANK_OBJECT: 'CommonGameTag' = 752
INTERACTION_PROGRAMMING: 'CommonGameTag' = 751
INTERACTION_PUBLISH_BOOK: 'CommonGameTag' = 660
INTERACTION_READ_TO_CHILD: 'CommonGameTag' = 931
INTERACTION_REPAIR: 'CommonGameTag' = 464
INTERACTION_RESTAURANT_WAIT_TO_PLACE_ORDER: 'CommonGameTag' = 2151
INTERACTION_RETAIL: 'CommonGameTag' = 12347
INTERACTION_ROCKET: 'CommonGameTag' = 465
INTERACTION_ROCKET_SHIP_LAUNCH: 'CommonGameTag' = 438
INTERACTION_ROCKET_SHIP_UPGRADE: 'CommonGameTag' = 437
INTERACTION_RUN_AWAY: 'CommonGameTag' = 57443
INTERACTION_SCHOOL_WORK: 'CommonGameTag' = 43026
INTERACTION_SCIENCE_TABLE: 'CommonGameTag' = 786
INTERACTION_SEASON_FALL: 'CommonGameTag' = 59420
INTERACTION_SEASON_SPRING: 'CommonGameTag' = 59418
INTERACTION_SEASON_SUMMER: 'CommonGameTag' = 59419
INTERACTION_SEASON_WINTER: 'CommonGameTag' = 59421
INTERACTION_SELL_ART: 'CommonGameTag' = 661
INTERACTION_SHOWER: 'CommonGameTag' = 1447
INTERACTION_SHOWOFF: 'CommonGameTag' = 427
INTERACTION_SIM_TV: 'CommonGameTag' = 55362
INTERACTION_SITUATION_PHOTOGRAPHY: 'CommonGameTag' = 79876
INTERACTION_SKATING_ICE_SKATING: 'CommonGameTag' = 59395
INTERACTION_SKATING_ROLLER_SKATING: 'CommonGameTag' = 59396
INTERACTION_SKATING_ROUTINE: 'CommonGameTag' = 59397
INTERACTION_SKATING_SKATING: 'CommonGameTag' = 59394
INTERACTION_SKATING_TRICK: 'CommonGameTag' = 59401
INTERACTION_SKETCH: 'CommonGameTag' = 2132
INTERACTION_SKIING: 'CommonGameTag' = 69726
INTERACTION_SKILL_ACTING: 'CommonGameTag' = 2340
INTERACTION_SKILL_BAKING: 'CommonGameTag' = 2346
INTERACTION_SKILL_BARTENDING: 'CommonGameTag' = 835
INTERACTION_SKILL_CHARISMA: 'CommonGameTag' = 837
INTERACTION_SKILL_CHILD_CREATIVITY: 'CommonGameTag' = 853
INTERACTION_SKILL_CHILD_MENTAL: 'CommonGameTag' = 854
INTERACTION_SKILL_CHILD_MOTOR: 'CommonGameTag' = 855
INTERACTION_SKILL_CHILD_SOCIAL: 'CommonGameTag' = 856
INTERACTION_SKILL_COMEDY: 'CommonGameTag' = 838
INTERACTION_SKILL_DANCING: 'CommonGameTag' = 2343
INTERACTION_SKILL_DJ_MIXING: 'CommonGameTag' = 2342
INTERACTION_SKILL_DOG_TRAINING: 'CommonGameTag' = 57373
INTERACTION_SKILL_FABRICATION: 'CommonGameTag' = 2434
INTERACTION_SKILL_FISHING: 'CommonGameTag' = 839
INTERACTION_SKILL_FITNESS: 'CommonGameTag' = 836
INTERACTION_SKILL_FLOWER_ARRANGEMENT: 'CommonGameTag' = 2344
INTERACTION_SKILL_GARDENING: 'CommonGameTag' = 834
INTERACTION_SKILL_GOURMET_COOKING: 'CommonGameTag' = 840
INTERACTION_SKILL_GUITAR: 'CommonGameTag' = 841
INTERACTION_SKILL_HANDINESS: 'CommonGameTag' = 842
INTERACTION_SKILL_HERBALISM: 'CommonGameTag' = 2339
INTERACTION_SKILL_HOME_STYLE_COOKING: 'CommonGameTag' = 843
INTERACTION_SKILL_JUICE_FIZZING: 'CommonGameTag' = 2424
INTERACTION_SKILL_KNITTING: 'CommonGameTag' = 2461
INTERACTION_SKILL_LOGIC: 'CommonGameTag' = 844
INTERACTION_SKILL_MEDIA_PRODUCTION: 'CommonGameTag' = 2338
INTERACTION_SKILL_MISCHIEF: 'CommonGameTag' = 845
INTERACTION_SKILL_PAINTING: 'CommonGameTag' = 846
INTERACTION_SKILL_PHOTOGRAPHY: 'CommonGameTag' = 1938
INTERACTION_SKILL_PIANO: 'CommonGameTag' = 847
INTERACTION_SKILL_PIPE_ORGAN: 'CommonGameTag' = 2341
INTERACTION_SKILL_PROGRAMMING: 'CommonGameTag' = 848
INTERACTION_SKILL_ROBOTICS: 'CommonGameTag' = 2345
INTERACTION_SKILL_ROCKET_SCIENCE: 'CommonGameTag' = 849
INTERACTION_SKILL_SINGING: 'CommonGameTag' = 55364
INTERACTION_SKILL_SINGING_KARAOKE: 'CommonGameTag' = 1617
INTERACTION_SKILL_VIDEO_GAMING: 'CommonGameTag' = 850
INTERACTION_SKILL_VIOLIN: 'CommonGameTag' = 851
INTERACTION_SKILL_WELLNESS: 'CommonGameTag' = 18465
INTERACTION_SKILL_WELLNESS_BG: 'CommonGameTag' = 2337
INTERACTION_SKILL_WRITING: 'CommonGameTag' = 852
INTERACTION_SLEDDING: 'CommonGameTag' = 69725
INTERACTION_SLEEP: 'CommonGameTag' = 451
INTERACTION_SLEEP_GROUP: 'CommonGameTag' = 2094
INTERACTION_SLEEP_NAP: 'CommonGameTag' = 59477
INTERACTION_SNIFF_NEW_OBJECTS: 'CommonGameTag' = 2093
INTERACTION_SNOWBOARDING: 'CommonGameTag' = 69724
INTERACTION_SOCIAL_ALL: 'CommonGameTag' = 2161
INTERACTION_SOCIAL_CONTAGIOUS: 'CommonGameTag' = 2041
INTERACTION_SOCIAL_MEDIA_CHECK_IN: 'CommonGameTag' = 1619
INTERACTION_SOCIAL_MEDIA_PERSUADE_TO: 'CommonGameTag' = 55319
INTERACTION_SOCIAL_MIXER: 'CommonGameTag' = 2162
INTERACTION_SOCIAL_NETWORK: 'CommonGameTag' = 1595
INTERACTION_SOCIAL_SUPER: 'CommonGameTag' = 454
INTERACTION_SOCIAL_TOUCHING: 'CommonGameTag' = 2163
INTERACTION_SPRAY_GRAFFITI: 'CommonGameTag' = 55361
INTERACTION_STEREO_DANCE: 'CommonGameTag' = 876
INTERACTION_STEREO_LISTEN: 'CommonGameTag' = 638
INTERACTION_STUFFED_ANIMAL_BABBLE: 'CommonGameTag' = 1723
INTERACTION_SUPER: 'CommonGameTag' = 460
INTERACTION_SURGERY_STATION_EXAM: 'CommonGameTag' = 57392
INTERACTION_SWIM: 'CommonGameTag' = 1591
INTERACTION_TAKE_PHOTO: 'CommonGameTag' = 1939
INTERACTION_TAKE_PIZZA: 'CommonGameTag' = 1640
INTERACTION_TALK_LIKE_A_PIRATE_DAY: 'CommonGameTag' = 59439
INTERACTION_TEEN_CAREER_RABBIT_HOLE: 'CommonGameTag' = 1719
INTERACTION_TELESCOPE: 'CommonGameTag' = 436
INTERACTION_TELL_STORY: 'CommonGameTag' = 466
INTERACTION_TENT_SLEEP: 'CommonGameTag' = 2477
INTERACTION_THROWING: 'CommonGameTag' = 2488
INTERACTION_THROWING_MUD: 'CommonGameTag' = 59425
INTERACTION_THROWING_SNOWBALL: 'CommonGameTag' = 2489
INTERACTION_THROWING_WATER_BALLOON: 'CommonGameTag' = 59426
INTERACTION_TOURNAMENT: 'CommonGameTag' = 749
INTERACTION_TRANSFER_FIRELEAF_RASH: 'CommonGameTag' = 2479
INTERACTION_TREADMILL: 'CommonGameTag' = 353
INTERACTION_TRY_FOR_BABY: 'CommonGameTag' = 452
INTERACTION_UNIVERSITY_STUDY_WITH: 'CommonGameTag' = 65609
INTERACTION_UPGRADE: 'CommonGameTag' = 658
INTERACTION_USE_TOILET: 'CommonGameTag' = 396
INTERACTION_VIDEO_GAME_LIVESTREAM: 'CommonGameTag' = 1641
INTERACTION_VIDEO_GAME_MONEY: 'CommonGameTag' = 655
INTERACTION_VIDEO_GAME_STREAM_LETS_PLAY: 'CommonGameTag' = 1642
INTERACTION_VIEW_ART: 'CommonGameTag' = 758
INTERACTION_VISIT_LOT: 'CommonGameTag' = 449
INTERACTION_VOODOO: 'CommonGameTag' = 426
INTERACTION_WAIT_IN_LINE: 'CommonGameTag' = 2497
INTERACTION_WAITSTAFF_IDLE: 'CommonGameTag' = 26634
INTERACTION_WATCH_PERFORMER: 'CommonGameTag' = 1597
INTERACTION_WATCH_TV: 'CommonGameTag' = 450
INTERACTION_WATCH_TV_COOKING: 'CommonGameTag' = 55320
INTERACTION_WATCH_TV_ROM_COM_ACT: 'CommonGameTag' = 55321
INTERACTION_WEATHER_RAIN: 'CommonGameTag' = 59423
INTERACTION_WEATHER_SNOW: 'CommonGameTag' = 59422
INTERACTION_WOODWORKING: 'CommonGameTag' = 1612
INTERACTION_WORKOUT: 'CommonGameTag' = 463
INTERACTION_WORKOUT_MACHINE: 'CommonGameTag' = 354
INTERACTION_WORKOUT_PUSH_THE_LIMITS: 'CommonGameTag' = 1171
INTERACTION_WRITE: 'CommonGameTag' = 55360
INTERACTION_WRITE_ARTICLE: 'CommonGameTag' = 665
INTERACTION_WRITE_JOKES: 'CommonGameTag' = 696
INTERACTION_YOGA_CLASS_MEMBER: 'CommonGameTag' = 18461
INVENTORY_BOOKS_FUN: 'CommonGameTag' = 2350
INVENTORY_BOOKS_OTHER: 'CommonGameTag' = 2352
INVENTORY_BOOKS_SKILL: 'CommonGameTag' = 2351
INVENTORY_COLLECTIBLE_CREATURE: 'CommonGameTag' = 2353
INVENTORY_COLLECTIBLE_DECORATION: 'CommonGameTag' = 2354
INVENTORY_COLLECTIBLE_NATURAL: 'CommonGameTag' = 2355
INVENTORY_COLLECTIBLE_OTHER: 'CommonGameTag' = 2356
INVENTORY_CONSUMABLE_DRINK: 'CommonGameTag' = 2358
INVENTORY_CONSUMABLE_FOOD: 'CommonGameTag' = 2357
INVENTORY_CONSUMABLE_OTHER: 'CommonGameTag' = 2359
INVENTORY_GARDENING_OTHER: 'CommonGameTag' = 2360
INVENTORY_HOME_SKILL_DECORATION: 'CommonGameTag' = 2362
INVENTORY_HOME_SKILL_HOME: 'CommonGameTag' = 2363
INVENTORY_HOME_SKILL_LITTLE_ONES: 'CommonGameTag' = 2364
INVENTORY_HOME_SKILL_SKILL: 'CommonGameTag' = 2361
INVENTORY_PLOPSY_ALL: 'CommonGameTag' = 2459
INVENTORY_PLOPSY_LISTED: 'CommonGameTag' = 2457
INVENTORY_PLOPSY_PENDING_SALE: 'CommonGameTag' = 2458
INVENTORY_PLOPSY_UNAVAILABLE: 'CommonGameTag' = 83989
INVENTORY_SCRAPS_JUNK: 'CommonGameTag' = 2371
INVENTORY_SCRAPS_PARTS: 'CommonGameTag' = 2370
INVENTORY_SIM_CRAFTED_ARTWORK: 'CommonGameTag' = 2368
INVENTORY_SIM_CRAFTED_OTHER: 'CommonGameTag' = 2369
INVENTORY_SPECIAL_CAREER_ACTIVITY: 'CommonGameTag' = 2365
INVENTORY_SPECIAL_EDUCATION: 'CommonGameTag' = 2366
INVENTORY_SPECIAL_STORY: 'CommonGameTag' = 2367
JOB_BATUU_NPC: 'CommonGameTag' = 2512
| |
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.name = None
self.bdac_storm_control_types = L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes()
self.bdac_storm_control_types.parent = self
self.interface_dai = L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai()
self.interface_dai.parent = self
self.interface_flooding = None
self.interface_flooding_unknown_unicast = None
self.interface_igmp_snoop = None
self.interface_ip_source_guard = L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceIpSourceGuard()
self.interface_ip_source_guard.parent = self
self.interface_mac = L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceMac()
self.interface_mac.parent = self
self.interface_mld_snoop = None
self.interface_profile = L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceProfile()
self.interface_profile.parent = self
self.split_horizon = L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon()
self.split_horizon.parent = self
self.static_mac_addresses = L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.StaticMacAddresses()
self.static_mac_addresses.parent = self
class InterfaceIpSourceGuard(object):
"""
IP Source Guard
.. attribute:: disable
Disable L2 Interface Dynamic IP source guard
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: enable
Enable IP Source Guard
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: logging
Logging Type
**type**\: :py:class:`L2VpnLoggingEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2VpnLoggingEnum>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.disable = None
self.enable = None
self.logging = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:interface-ip-source-guard'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.disable is not None:
return True
if self.enable is not None:
return True
if self.logging is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceIpSourceGuard']['meta_info']
class InterfaceDai(object):
"""
L2 Interface Dynamic ARP Inspection
.. attribute:: disable
Disable L2 Interface Dynamic ARP Inspection
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: enable
Enable L2 Interface Dynamic ARP Inspection
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: interface_dai_address_validation
Address Validation
**type**\: :py:class:`InterfaceDaiAddressValidation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai.InterfaceDaiAddressValidation>`
.. attribute:: logging
Logging Type
**type**\: :py:class:`L2VpnLoggingEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2VpnLoggingEnum>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.disable = None
self.enable = None
self.interface_dai_address_validation = L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai.InterfaceDaiAddressValidation()
self.interface_dai_address_validation.parent = self
self.logging = None
class InterfaceDaiAddressValidation(object):
"""
Address Validation
.. attribute:: destination_mac_verification
Destination MAC Verification
**type**\: :py:class:`L2VpnVerificationEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2VpnVerificationEnum>`
.. attribute:: enable
Enable Address Validation
**type**\: :py:class:`Empty<ydk.types.Empty>`
.. attribute:: ipv4_verification
IPv4 Verification
**type**\: :py:class:`L2VpnVerificationEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2VpnVerificationEnum>`
.. attribute:: source_mac_verification
Source MAC Verification
**type**\: :py:class:`L2VpnVerificationEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2VpnVerificationEnum>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.destination_mac_verification = None
self.enable = None
self.ipv4_verification = None
self.source_mac_verification = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:interface-dai-address-validation'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.destination_mac_verification is not None:
return True
if self.enable is not None:
return True
if self.ipv4_verification is not None:
return True
if self.source_mac_verification is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai.InterfaceDaiAddressValidation']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:interface-dai'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.disable is not None:
return True
if self.enable is not None:
return True
if self.interface_dai_address_validation is not None and self.interface_dai_address_validation._has_data():
return True
if self.logging is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceDai']['meta_info']
class InterfaceProfile(object):
"""
Attach a DHCP profile
.. attribute:: dhcp_snooping_id
Disable DHCP snooping
**type**\: str
.. attribute:: profile_id
Set the snooping profile
**type**\: :py:class:`InterfaceProfileEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.InterfaceProfileEnum>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.dhcp_snooping_id = None
self.profile_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:interface-profile'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.dhcp_snooping_id is not None:
return True
if self.profile_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.InterfaceProfile']['meta_info']
class BdacStormControlTypes(object):
"""
Storm Control
.. attribute:: bdac_storm_control_type
Storm Control Type
**type**\: list of :py:class:`BdacStormControlType <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bdac_storm_control_type = YList()
self.bdac_storm_control_type.parent = self
self.bdac_storm_control_type.name = 'bdac_storm_control_type'
class BdacStormControlType(object):
"""
Storm Control Type
.. attribute:: sctype <key>
Storm Control Type
**type**\: :py:class:`StormControlEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.StormControlEnum>`
.. attribute:: storm_control_unit
Specify units for Storm Control Configuration
**type**\: :py:class:`StormControlUnit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType.StormControlUnit>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sctype = None
self.storm_control_unit = L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType.StormControlUnit()
self.storm_control_unit.parent = self
class StormControlUnit(object):
"""
Specify units for Storm Control Configuration
.. attribute:: kbits_per_sec
Kilobits Per Second, PktsPerSec and KbitsPerSec cannot be configured together
**type**\: int
**range:** 64..1280000
**units**\: kbit/s
.. attribute:: pkts_per_sec
Packets Per Second, PktsPerSec and KbitsPerSec cannot be configured together
**type**\: int
**range:** 1..160000
**units**\: packet/s
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.kbits_per_sec = None
self.pkts_per_sec = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:storm-control-unit'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.kbits_per_sec is not None:
return True
if self.pkts_per_sec is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType.StormControlUnit']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.sctype is None:
raise YPYModelError('Key property sctype is None')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:bdac-storm-control-type[Cisco-IOS-XR-l2vpn-cfg:sctype = ' + str(self.sctype) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.sctype is not None:
return True
if self.storm_control_unit is not None and self.storm_control_unit._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes.BdacStormControlType']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:bdac-storm-control-types'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.bdac_storm_control_type is not None:
for child_ref in self.bdac_storm_control_type:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.BdacStormControlTypes']['meta_info']
class SplitHorizon(object):
"""
Split Horizon
.. attribute:: split_horizon_group_id
Split Horizon Group ID
**type**\: :py:class:`SplitHorizonGroupId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon.SplitHorizonGroupId>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.split_horizon_group_id = L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon.SplitHorizonGroupId()
self.split_horizon_group_id.parent = self
class SplitHorizonGroupId(object):
"""
Split Horizon Group ID
.. attribute:: enable
Enable split horizon group
**type**\: :py:class:`Empty<ydk.types.Empty>`
"""
_prefix = 'l2vpn-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.enable = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:split-horizon-group-id'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.enable is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon.SplitHorizonGroupId']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-l2vpn-cfg:split-horizon'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.split_horizon_group_id is not None and self.split_horizon_group_id._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_l2vpn_cfg as meta
return meta._meta_table['L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.SplitHorizon']['meta_info']
class StaticMacAddresses(object):
"""
Static Mac Address Table
.. attribute:: static_mac_address
Static Mac Address Configuration
**type**\: list of :py:class:`StaticMacAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_l2vpn_cfg.L2Vpn.Database.BridgeDomainGroups.BridgeDomainGroup.BridgeDomains.BridgeDomain.BdAttachmentCircuits.BdAttachmentCircuit.StaticMacAddresses.StaticMacAddress>`
"""
_prefix = 'l2vpn-cfg'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.