input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
**units**\: second
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute, self).__init__()
self.yang_name = "incomplete-traceroute"
self.yang_parent_name = "incomplete-traceroutes"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['domain','service','mep_id','interface','transaction_id']
self._child_classes = OrderedDict([("traceroute-information", ("traceroute_information", Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation))])
self._leafs = OrderedDict([
('domain', (YLeaf(YType.str, 'domain'), ['str'])),
('service', (YLeaf(YType.str, 'service'), ['str'])),
('mep_id', (YLeaf(YType.uint32, 'mep-id'), ['int'])),
('interface', (YLeaf(YType.str, 'interface'), ['str'])),
('transaction_id', (YLeaf(YType.uint32, 'transaction-id'), ['int'])),
('time_left', (YLeaf(YType.uint64, 'time-left'), ['int'])),
])
self.domain = None
self.service = None
self.mep_id = None
self.interface = None
self.transaction_id = None
self.time_left = None
self.traceroute_information = Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation()
self.traceroute_information.parent = self
self._children_name_map["traceroute_information"] = "traceroute-information"
self._segment_path = lambda: "incomplete-traceroute" + "[domain='" + str(self.domain) + "']" + "[service='" + str(self.service) + "']" + "[mep-id='" + str(self.mep_id) + "']" + "[interface='" + str(self.interface) + "']" + "[transaction-id='" + str(self.transaction_id) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-cfm-oper:cfm/global/incomplete-traceroutes/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute, ['domain', 'service', 'mep_id', 'interface', 'transaction_id', 'time_left'], name, value)
class TracerouteInformation(Entity):
"""
Information about the traceroute operation
.. attribute:: options
Options affecting traceroute behavior
**type**\: :py:class:`Options <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options>`
.. attribute:: domain
Maintenance domain name
**type**\: str
.. attribute:: service
Service name
**type**\: str
.. attribute:: level
Maintenance level
**type**\: :py:class:`CfmBagMdLevel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmBagMdLevel>`
.. attribute:: source_mep_id
Source MEP ID
**type**\: int
**range:** 0..65535
.. attribute:: source_interface
Source interface
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: source_mac_address
Source MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: target_mac_address
Target MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: directed_mac_address
Directed MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: target_mep_id
Target MEP ID
**type**\: int
**range:** 0..65535
.. attribute:: timestamp
Timestamp of initiation time (seconds)
**type**\: int
**range:** 0..18446744073709551615
**units**\: second
.. attribute:: ttl
Time to live
**type**\: int
**range:** 0..255
.. attribute:: transaction_id
Transaction ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation, self).__init__()
self.yang_name = "traceroute-information"
self.yang_parent_name = "incomplete-traceroute"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("options", ("options", Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options))])
self._leafs = OrderedDict([
('domain', (YLeaf(YType.str, 'domain'), ['str'])),
('service', (YLeaf(YType.str, 'service'), ['str'])),
('level', (YLeaf(YType.enumeration, 'level'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmBagMdLevel', '')])),
('source_mep_id', (YLeaf(YType.uint16, 'source-mep-id'), ['int'])),
('source_interface', (YLeaf(YType.str, 'source-interface'), ['str'])),
('source_mac_address', (YLeaf(YType.str, 'source-mac-address'), ['str'])),
('target_mac_address', (YLeaf(YType.str, 'target-mac-address'), ['str'])),
('directed_mac_address', (YLeaf(YType.str, 'directed-mac-address'), ['str'])),
('target_mep_id', (YLeaf(YType.uint16, 'target-mep-id'), ['int'])),
('timestamp', (YLeaf(YType.uint64, 'timestamp'), ['int'])),
('ttl', (YLeaf(YType.uint8, 'ttl'), ['int'])),
('transaction_id', (YLeaf(YType.uint32, 'transaction-id'), ['int'])),
])
self.domain = None
self.service = None
self.level = None
self.source_mep_id = None
self.source_interface = None
self.source_mac_address = None
self.target_mac_address = None
self.directed_mac_address = None
self.target_mep_id = None
self.timestamp = None
self.ttl = None
self.transaction_id = None
self.options = Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options()
self.options.parent = self
self._children_name_map["options"] = "options"
self._segment_path = lambda: "traceroute-information"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation, ['domain', 'service', 'level', 'source_mep_id', 'source_interface', 'source_mac_address', 'target_mac_address', 'directed_mac_address', 'target_mep_id', 'timestamp', 'ttl', 'transaction_id'], name, value)
class Options(Entity):
"""
Options affecting traceroute behavior
.. attribute:: basic_options
Options for a basic IEEE 802.1ag Linktrace
**type**\: :py:class:`BasicOptions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options.BasicOptions>`
.. attribute:: exploratory_options
Options for an Exploratory Linktrace
**type**\: :py:class:`ExploratoryOptions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options.ExploratoryOptions>`
.. attribute:: mode
Mode
**type**\: :py:class:`CfmPmLtMode <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmLtMode>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options, self).__init__()
self.yang_name = "options"
self.yang_parent_name = "traceroute-information"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("basic-options", ("basic_options", Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options.BasicOptions)), ("exploratory-options", ("exploratory_options", Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options.ExploratoryOptions))])
self._leafs = OrderedDict([
('mode', (YLeaf(YType.enumeration, 'mode'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmLtMode', '')])),
])
self.mode = None
self.basic_options = Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options.BasicOptions()
self.basic_options.parent = self
self._children_name_map["basic_options"] = "basic-options"
self.exploratory_options = Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options.ExploratoryOptions()
self.exploratory_options.parent = self
self._children_name_map["exploratory_options"] = "exploratory-options"
self._segment_path = lambda: "options"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options, ['mode'], name, value)
class BasicOptions(Entity):
"""
Options for a basic IEEE 802.1ag Linktrace
.. attribute:: is_auto
Traceroute was initiated automatically
**type**\: bool
.. attribute:: fdb_only
Only use the Filtering Database for forwarding lookups
**type**\: bool
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options.BasicOptions, self).__init__()
self.yang_name = "basic-options"
self.yang_parent_name = "options"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('is_auto', (YLeaf(YType.boolean, 'is-auto'), ['bool'])),
('fdb_only', (YLeaf(YType.boolean, 'fdb-only'), ['bool'])),
])
self.is_auto = None
self.fdb_only = None
self._segment_path = lambda: "basic-options"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options.BasicOptions, ['is_auto', 'fdb_only'], name, value)
class ExploratoryOptions(Entity):
"""
Options for an Exploratory Linktrace
.. attribute:: delay_model
Delay model for delay calculations
**type**\: :py:class:`CfmPmEltDelayModel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmEltDelayModel>`
.. attribute:: delay_constant_factor
Constant Factor for delay calculations
**type**\: int
**range:** 0..4294967295
.. attribute:: reply_filter
Reply Filtering mode used by responders
**type**\: :py:class:`CfmPmElmReplyFilter <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmElmReplyFilter>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options.ExploratoryOptions, self).__init__()
self.yang_name = "exploratory-options"
self.yang_parent_name = "options"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('delay_model', (YLeaf(YType.enumeration, 'delay-model'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmEltDelayModel', '')])),
('delay_constant_factor', (YLeaf(YType.uint32, 'delay-constant-factor'), ['int'])),
('reply_filter', (YLeaf(YType.enumeration, 'reply-filter'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmElmReplyFilter', '')])),
])
self.delay_model = None
self.delay_constant_factor = None
self.reply_filter = None
self._segment_path = lambda: "exploratory-options"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.IncompleteTraceroutes.IncompleteTraceroute.TracerouteInformation.Options.ExploratoryOptions, ['delay_model', 'delay_constant_factor', 'reply_filter'], name, value)
class MaintenancePoints(Entity):
"""
Maintenance Points table
.. attribute:: maintenance_point
Information about a particular Maintenance Point
**type**\: list of :py:class:`MaintenancePoint <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.MaintenancePoints.MaintenancePoint>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.MaintenancePoints, self).__init__()
self.yang_name = "maintenance-points"
self.yang_parent_name = "global"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("maintenance-point", ("maintenance_point", Cfm.Global.MaintenancePoints.MaintenancePoint))])
self._leafs = OrderedDict()
self.maintenance_point = YList(self)
self._segment_path = lambda: "maintenance-points"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-cfm-oper:cfm/global/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.MaintenancePoints, [], name, value)
class MaintenancePoint(Entity):
"""
Information about a particular Maintenance
Point
.. attribute:: domain (key)
Maintenance Domain
**type**\: str
**length:** 1..79
.. attribute:: service (key)
Service (Maintenance Association)
**type**\: str
**length:** 1..79
.. attribute:: interface (key)
Interface
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: maintenance_point
Maintenance Point
**type**\: :py:class:`MaintenancePoint_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.MaintenancePoints.MaintenancePoint.MaintenancePoint_>`
.. attribute:: mep_has_error
MEP error flag
**type**\: bool
.. attribute:: mac_address
MAC Address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.MaintenancePoints.MaintenancePoint, self).__init__()
self.yang_name = "maintenance-point"
self.yang_parent_name = "maintenance-points"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['domain','service','interface']
self._child_classes = OrderedDict([("maintenance-point", ("maintenance_point", Cfm.Global.MaintenancePoints.MaintenancePoint.MaintenancePoint_))])
self._leafs = OrderedDict([
('domain', (YLeaf(YType.str, 'domain'), ['str'])),
('service', (YLeaf(YType.str, 'service'), ['str'])),
('interface', (YLeaf(YType.str, 'interface'), ['str'])),
('mep_has_error', (YLeaf(YType.boolean, 'mep-has-error'), ['bool'])),
('mac_address', (YLeaf(YType.str, 'mac-address'), ['str'])),
])
self.domain = None
self.service = None
self.interface = None
self.mep_has_error = None
self.mac_address = None
self.maintenance_point = Cfm.Global.MaintenancePoints.MaintenancePoint.MaintenancePoint_()
self.maintenance_point.parent = self
self._children_name_map["maintenance_point"] = "maintenance-point"
self._segment_path = lambda: "maintenance-point" + "[domain='" + str(self.domain) + "']" + "[service='" + str(self.service) + "']" + "[interface='" + str(self.interface) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-ethernet-cfm-oper:cfm/global/maintenance-points/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.MaintenancePoints.MaintenancePoint, ['domain', 'service', 'interface', u'mep_has_error', u'mac_address'], name, value)
class MaintenancePoint_(Entity):
"""
Maintenance Point
.. attribute:: domain_name
Domain name
**type**\: str
.. attribute:: level
Domain level
**type**\: :py:class:`CfmBagMdLevel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmBagMdLevel>`
.. attribute:: service_name
Service name
**type**\: str
.. attribute:: interface
Interface
**type**\: str
**pattern:** [a\-zA\-Z0\-9.\_/\-]+
.. attribute:: maintenance_point_type
Type of Maintenance Point
**type**\: :py:class:`CfmMaMpVariety <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmMaMpVariety>`
.. attribute:: mep_id
MEP ID
**type**\: int
**range:** 0..65535
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.MaintenancePoints.MaintenancePoint.MaintenancePoint_, self).__init__()
self.yang_name = "maintenance-point"
self.yang_parent_name = "maintenance-point"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('domain_name', (YLeaf(YType.str, 'domain-name'), ['str'])),
('level', (YLeaf(YType.enumeration, 'level'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmBagMdLevel', '')])),
('service_name', (YLeaf(YType.str, 'service-name'), ['str'])),
('interface', (YLeaf(YType.str, 'interface'), ['str'])),
('maintenance_point_type', (YLeaf(YType.enumeration, 'maintenance-point-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmMaMpVariety', '')])),
('mep_id', (YLeaf(YType.uint16, 'mep-id'), ['int'])),
])
self.domain_name = None
self.level = None
self.service_name = None
self.interface = None
self.maintenance_point_type = None
self.mep_id = None
self._segment_path = lambda: "maintenance-point"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.MaintenancePoints.MaintenancePoint.MaintenancePoint_, [u'domain_name', u'level', u'service_name', u'interface', u'maintenance_point_type', | |
#!/usr/bin/env python
"""
CMSC733 Spring 2020: Classical and Deep Learning Approaches for
Geometric Computer Vision
Homework 0: Alohomora: Phase 1 Starter Code
Author(s):
<NAME> (<EMAIL>)
PhD Candidate in Computer Science,
University of Maryland, College Park
<NAME> (<EMAIL>)
PhD Student in Computer Science,
University of Maryland, College Park
<NAME> (<EMAIL>)
Graduate Student pursuing Masters in Robotics,
University of Maryland, College Park
"""
# Code starts here:
import numpy as np
import cv2
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import os
import glob
# Helper Functions.
def rotateImage(image, angle):
image_center = tuple(np.array(np.array(image).shape[1::-1]) / 2)
rotation_matrix = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rotation_matrix, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def GaussianKernel(n, sigma):
variance = sigma ** 2
size = int((n - 1) / 2)
g = np.asarray([[(-y * np.exp(-1 * (x ** 2 + y ** 2) / (2 * variance))) for x in range(-size, size + 1)] for y in
range(-size, size + 1)])
gauss = g / (2 * np.pi * variance * variance)
return gauss
def Gaussian1D(sigma, mu, x, order):
x = np.array(x) - mu
variance = sigma ** 2
gauss = (1 / np.sqrt(2 * np.pi * variance)) * (np.exp((-1 * x * x) / (2 * variance)))
if order == 0:
return gauss
elif order == 1:
gauss = - gauss * ((x) / (variance))
return gauss
else:
gauss = gauss * (((x * x) - variance) / (variance ** 2))
return gauss
def Gaussian2D(k, sigma):
size = int((k - 1) / 2)
variance = sigma ** 2
s = np.asarray([[x ** 2 + y ** 2 for x in range(-size, size + 1)] for y in range(-size, size + 1)])
Gauss = (1 / np.sqrt(2 * np.pi * variance)) * np.exp(-s / (2 * variance))
return Gauss
def LOG2D(k, sigma):
size = int((k - 1) / 2)
variance = sigma ** 2
s = np.asarray([[x ** 2 + y ** 2 for x in range(-size, size + 1)] for y in range(-size, size + 1)])
p = (1 / np.sqrt(2 * np.pi * variance)) * np.exp(-s / (2 * variance))
Laplacian = p * (s - variance) / (variance ** 2)
return Laplacian
def makefilter(scale, PhaseX, PhaseY, Points, k):
gx = Gaussian1D(3 * scale, 0, Points[0, ...], PhaseX)
gy = Gaussian1D(scale, 0, Points[1, ...], PhaseY)
image = gx * gy
image = np.reshape(image, (k, k))
return image
def binary(image, bins):
binary_img = image * 0
for r in range(0, image.shape[0]):
for c in range(0, image.shape[1]):
if image[r, c] == bins:
binary_img[r, c] = 1
else:
binary_img[r, c] = 0
return binary_img
def compute_gradient(maps, numbins, mask_1, mask_2):
maps = maps.astype(np.float64)
gradient = np.zeros((maps.shape[0], maps.shape[1], 12))
for m in range(0, 12):
chi_square = np.zeros((maps.shape))
for i in range(1, numbins):
tmp = binary(maps, i)
g_i = cv2.filter2D(tmp, -1, mask_1[m])
h_i = cv2.filter2D(tmp, -1, mask_2[m])
chi_square = chi_square + ((g_i - h_i) ** 2) / (g_i + h_i + 0.0001)
gradient[:, :, m] = chi_square
return gradient
#Half-Disk.
def half_disk(radius):
halfd_ = np.zeros((radius * 2, radius * 2))
rad_ = radius ** 2;
for i in range(0, radius):
m = (i - radius) ** 2
for j in range(0, 2 * radius):
if m + (j - radius) ** 2 < rad_:
halfd_[i, j] = 1
return halfd_
#Filters.
def Oriented_DoG():
sigma = [1, 3]
orients = 16
orientation = np.arange(0, 360, 360 / orients)
plt.figure(figsize=(25, 5))
val = []
for i in range(0, len(sigma)):
kernel = (GaussianKernel(7, sigma[i]))
for j in range(0, orients):
filterg = rotateImage(kernel, orientation[j])
val.append(filterg)
plt.suptitle("OrientedDoG")
plt.subplot(len(sigma), orients, orients * (i) + j + 1)
plt.axis('off')
plt.imshow(val[orients * (i) + j], cmap='gray')
plt.show()
return val
def LML():
k = 49
scaleX = np.sqrt(2) ** np.array([1, 2, 3])
Orientation = 6
Rotation_ = 12
Bar_ = len(scaleX) * Orientation
Edge_ = len(scaleX) * Orientation
nF = Bar_ + Edge_ + Rotation_
F = np.zeros([k, k, nF])
hK = (k - 1) / 2
x = [np.arange(-hK, hK + 1)]
y = [np.arange(-hK, hK + 1)]
[x, y] = np.meshgrid(x, y)
orgPts = [x.flatten(), y.flatten()]
orgPts = np.array(orgPts)
count = 0
for scale in range(len(scaleX)):
for orient in range(Orientation):
angle = (np.pi * orient) / Orientation
cosine_ = np.cos(angle)
sin_ = np.sin(angle)
rotPts = [[cosine_, -sin_], [sin_, cosine_]]
rotPts = np.array(rotPts)
rotPts = np.dot(rotPts, orgPts)
# print(rotPts)
F[:, :, count] = makefilter(scaleX[scale], 0, 1, rotPts, k)
F[:, :, count + Edge_] = makefilter(scaleX[scale], 0, 2, rotPts, k)
count = count + 1
count = Bar_ + Edge_
scales = np.sqrt(2) ** np.array([1, 2, 3, 4])
for i in range(len(scales)):
F[:, :, count] = Gaussian2D(k, scales[i])
count = count + 1
for i in range(len(scales)):
F[:, :, count] = LOG2D(k, scales[i])
count = count + 1
for i in range(len(scales)):
F[:, :, count] = LOG2D(k, 3 * scales[i])
count = count + 1
plt.figure(figsize=(12, 8))
for i in range(0, 48):
plt.subplot(6, 8, i + 1)
plt.axis('off')
plt.imshow(F[:, :, i], cmap='gray')
plt.suptitle("LML")
plt.show()
return F
def LMS():
k = 49
scaleX = np.sqrt(2) ** np.array([0, 1, 2])
Orientation = 6
Rotation_ = 12
Bar_ = len(scaleX) * Orientation
Edge_ = len(scaleX) * Orientation
nF = Bar_ + Edge_ + Rotation_
F = np.zeros([k, k, nF])
hK = (k - 1) / 2
x = [np.arange(-hK, hK + 1)]
y = [np.arange(-hK, hK + 1)]
[x, y] = np.meshgrid(x, y)
orgPts = [x.flatten(), y.flatten()]
orgPts = np.array(orgPts)
count = 0
for scale in range(len(scaleX)):
for orient in range(Orientation):
angle = (np.pi * orient) / Orientation
cosine_ = np.cos(angle)
sin_ = np.sin(angle)
rotPts = [[cosine_, -sin_], [sin_, cosine_]]
rotPts = np.array(rotPts)
rotPts = np.dot(rotPts, orgPts)
# print(rotPts)
F[:, :, count] = makefilter(scaleX[scale], 0, 1, rotPts, k)
F[:, :, count + Edge_] = makefilter(scaleX[scale], 0, 2, rotPts, k)
count = count + 1
count = Bar_ + Edge_
scales = np.sqrt(2) ** np.array([0, 1, 2, 3])
for i in range(len(scales)):
F[:, :, count] = Gaussian2D(k, scales[i])
count = count + 1
for i in range(len(scales)):
F[:, :, count] = LOG2D(k, scales[i])
count = count + 1
for i in range(len(scales)):
F[:, :, count] = LOG2D(k, 3 * scales[i])
count = count + 1
plt.figure(figsize=(12, 8))
for i in range(0, 48):
plt.subplot(6, 8, i + 1)
plt.axis('off')
plt.imshow(F[:, :, i], cmap='gray')
plt.suptitle("LMS")
plt.show()
return F
def gabor(sigma, theta, lambda_, psi, gamma):
gabor_ = list()
filters = 15
for k in sigma:
xsigma = k
ysigma = float(k) / gamma
std_ = 3
xmax = np.ceil(max(1, max(abs(std_ * xsigma * np.cos(theta)), abs(std_ * ysigma * np.sin(theta)))))
ymax = np.ceil(max(1, max(abs(std_ * xsigma * np.sin(theta)), abs(std_ * ysigma * np.cos(theta)))))
xmin = -xmax
ymin = -ymax
(y, x) = np.meshgrid(np.arange(ymin, ymax + 1), np.arange(xmin, xmax + 1))
x_theta = x * np.cos(theta) + y * np.sin(theta)
y_theta = -x * np.sin(theta) + y * np.cos(theta)
gab_ = np.exp(-.5 * (x_theta ** 2 / xsigma ** 2 + y_theta ** 2 / ysigma ** 2)) * np.cos(
2 * np.pi / lambda_ * x_theta + psi)
angle = np.linspace(0, 360, filters)
for i in range(filters):
image = rotateImage(gab_, angle[i])
gabor_.append(image)
l_ = len(gabor_)
for i in range(l_):
plt.subplot(l_ / 5, 5, i + 1)
plt.axis('off')
plt.imshow(gabor_[i], cmap='gray')
plt.show()
return gabor_
def main():
"""
Generate Difference of Gaussian Filter Bank: (DoG)
Display all the filters in this filter bank and save image as DoG.png,
use command "cv2.imwrite(...)"
"""
DoG = Oriented_DoG()
"""
Generate Leung-Malik Filter Bank: (LM)
Display all the filters in this filter bank and save image as LM.png,
use command "cv2.imwrite(...)"
"""
L1 = LML()
L2 = LMS()
"""
Generate Gabor Filter Bank: (Gabor)
Display all the filters in this filter bank and save image as Gabor.png,
use command "cv2.imwrite(...)"
"""
G = gabor(sigma=[9, 13], theta=0.25, lambda_=7, psi=0.5, gamma=1)
"""
Generate Half-disk masks
Display all the Half-disk masks and save image as HDMasks.png,
use command "cv2.imwrite(...)"
"""
orientation = np.arange(0, 360, 360 / 8)
scales = np.asarray([5, 7, 10])
mask_3 = []
mask_4 = []
sz = scales.size
oz = orientation.size
for i in range(0, sz):
halfd_ = half_disk(scales[i])
for m in range(0, oz):
mask_1 = rotateImage(halfd_, orientation[m])
mask_3.append(mask_1)
mask_2 = rotateImage(mask_1, 180)
mask_4.append(mask_2)
plt.subplot(sz * 2, oz, oz * 2 * (i) + m + 1)
plt.axis('off')
plt.imshow(mask_1, cmap='gray')
plt.subplot(sz * 2, oz, oz * 2 * (i) + m + 1 + oz)
plt.axis('off')
plt.imshow(mask_2, cmap='gray')
plt.show()
# Filter_Bank.
filter_bank = []
for i in range(0, len(DoG)):
filter_bank.append(DoG[i])
for i in range(0, 48):
filter_bank.append(L1[:, :, i])
for i in range(0, 48):
filter_bank.append(L2[:, :, i])
for i in range(len(G)):
filter_bank.append(G[i])
os.chdir("../BSDS500/Images")
Images_ = []
for img in sorted(glob.glob("*.jpg")):
img_ = cv2.imread(img)
Images_.append(img_)
Image_No = 7 #Will range from 0-9.
plt.imshow(cv2.cvtColor(Images_[Image_No], cv2.COLOR_BGR2RGB))
plt.show()
os.chdir("../../Code")
Img_ = cv2.cvtColor(Images_[Image_No], cv2.COLOR_BGR2GRAY)
Img_C = Images_[Image_No]
"""
Generate Texton Map
Filter image using oriented gaussian filter bank
"""
data = np.zeros((Img_.size, len(filter_bank)))
for i in range(0, len(filter_bank)):
temp_image = cv2.filter2D(Img_, -1, filter_bank[i])
temp_image = temp_image.reshape((1, Img_.size))
data[:, i] = temp_image
"""
Generate texture ID's using K-means clustering
Display texton map and save image as TextonMap_ImageName.png,
use command "cv2.imwrite('...)"
"""
k_means_texton = KMeans(n_clusters=64, n_init=4)
k_means_texton.fit(data)
labels = k_means_texton.labels_
texton_map = np.reshape(labels, (Img_.shape))
plt.imshow(texton_map, cmap=None)
plt.title("TextonMap")
plt.axis('off')
plt.show()
"""
Generate Texton Gradient (Tg)
Perform Chi-square calculation on Texton Map
Display Tg and save image as Tg_ImageName.png,
use command "cv2.imwrite(...)"
"""
texton_gradient = compute_gradient(texton_map, 64, mask_1, mask_2)
tex_gm = np.mean(texton_gradient, axis=2)
plt.imshow(tex_gm, cmap=None)
plt.title("TextonGradient")
plt.axis('off')
plt.show()
"""
Generate Brightness Map
Perform brightness binning
"""
m = Img_.reshape((Img_.shape[0] * Img_.shape[1]), 1)
k_means_Brightness = KMeans(n_clusters=16, random_state=4)
k_means_Brightness.fit(m)
labels = k_means_Brightness.labels_
brightness_map = np.reshape(labels, (Img_.shape[0], Img_.shape[1]))
mini_ = np.min(brightness_map)
maxx_ = np.max(brightness_map)
brightnessmap_final = 255 * (brightness_map - mini_) / np.float((maxx_ - mini_))
plt.imshow(brightnessmap_final, cmap='gray')
plt.title("BrightnessMap")
plt.axis('off')
plt.show()
"""
Generate Brightness Gradient (Bg)
Perform Chi-square calculation on Brightness Map
Display Bg and save image as Bg_ImageName.png,
use command "cv2.imwrite(...)"
"""
bright_gradient = compute_gradient(brightness_map, 16, mask_1, mask_2)
bright_gm = np.mean(bright_gradient, axis=2)
plt.imshow(bright_gm, cmap='gray')
plt.title("BrightnessGradient")
plt.axis('off')
plt.show()
"""
Generate Color Map
Perform color binning or clustering
"""
m = Img_C.reshape((Img_C.shape[0] * Img_C.shape[1]), 3)
k_means_color = KMeans(n_clusters=16, | |
<gh_stars>0
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This test script performs unit tests on functions in the adb_utils module."""
import grp
import json
import os
import subprocess
from unittest import mock
from gazoo_device import config
from gazoo_device import errors
from gazoo_device.tests.unit_tests.utils import unit_test_case
from gazoo_device.utility import adb_utils
from gazoo_device.utility import host_utils
ADB_CMD_PATH = "/usr/bin/adb"
FAKE_ADB_DEVICES_OUTPUT = ("List of devices attached\n"
"04576e89\tdevice\n"
"04576ee5\tsideload\n"
"04576eaz\toffline\n"
"172.16.31.10:5555\tdevice\n"
"172.16.31.10:5555\tsideload\n"
"192.168.3.11:5555\toffline\n\n")
ADB_DEVICES = ["04576e89", "172.16.31.10"]
SIDELOAD_DEVICES = ["04576ee5", "172.16.31.10:5555"]
FAKE_ADB_REBOOT = ""
FAKE_ADB_ROOT = ""
FAKE_SHELL = "abc\n123\n"
FASTBOOT_CMD_PATH = "/usr/bin/fastboot"
FASTBOOT_CMD = os.path.basename(FASTBOOT_CMD_PATH)
FASTBOOT_DEVICES = ["04576e89", "06011HFDD0165R", "04576ee5"]
FAKE_FASTBOOT = ("04576e89 fastboot\n"
"06011HFDD0165R Android Fastboot\n"
"04576ee5 fastboot\n\n")
FAKE_FASTBOOT_REBOOT = ("Rebooting...\n\n"
"Finished. Total time: 0.157s\n")
DEVICE_NAME = "somedevice"
DEVICE_ADB_SERIAL = "aabbccdd"
DEVICE_FASTBOOT_SERIAL = "aabbccdd"
TEST_GROUP_ENTRY = ("plugdev", None, 46, None)
TEST_GOOD_GROUP_LIST = [42, 46]
TEST_USER_UID = 1000
TEST_USER_NAME = "test_user"
class AdbUtilsTests(unit_test_case.UnitTestCase):
"""ADB utility tests."""
@mock.patch.object(host_utils, "has_command", return_value=False)
def test_010_adb_utils_get_fastboot_path_raises_error(self,
mock_get_command_path):
"""Verify get_fastboot_path raises error if get_command_path fails."""
with self.assertRaises(RuntimeError):
adb_utils.get_fastboot_path()
mock_get_command_path.assert_called()
@mock.patch.object(
host_utils, "get_command_path", return_value=FASTBOOT_CMD_PATH)
def test_011_adb_utils_get_fastboot_path_calls_get_command_path(
self, mock_get_command_path):
"""Verify get_fastboot_path calls get_command_path."""
self.assertEqual(FASTBOOT_CMD_PATH, adb_utils.get_fastboot_path())
mock_get_command_path.assert_called()
@mock.patch.object(
subprocess,
"check_output",
return_value=FAKE_FASTBOOT.encode("utf-8", errors="replace"))
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_020_adb_utils_get_fastboot_devices_calls_get_fastboot_path(
self, mock_get_fastboot_path, mock_subprocess):
"""Verify get_fastboot_devices calls get_fastboot_path."""
self.assertEqual(FASTBOOT_DEVICES, adb_utils.get_fastboot_devices())
mock_get_fastboot_path.assert_called()
mock_subprocess.assert_called()
@mock.patch.object(host_utils, "has_command", return_value=False)
def test_021_adb_utils_get_fastboot_devices_bad_fastboot_path(
self, mock_has_command):
"""Verify get_fastboot_devices skips get_fastboot_path."""
devices = adb_utils.get_fastboot_devices(fastboot_path="bogus/path")
self.assertEqual(devices, [])
mock_has_command.assert_called()
@mock.patch.object(
subprocess,
"check_output",
side_effect=subprocess.CalledProcessError(-1, ["fastboot", "devices"]))
@mock.patch.object(
adb_utils, "get_fastboot_path", return_value=FASTBOOT_CMD_PATH)
def test_022_adb_utils_get_fastboot_devices_subprocess_errors(
self, mock_get_fastboot_path, mock_subprocess):
"""Verify get_fastboot_devices handles subprocess errors internally."""
self.assertEqual([], adb_utils.get_fastboot_devices())
mock_get_fastboot_path.assert_called()
mock_subprocess.assert_called()
@mock.patch.object(os.path, "exists", return_value=True)
def test_023_adb_utils_get_fastboot_path_uses_correct_path(self, mock_exists):
"""Verify get_fastboot_devices skips get_fastboot_path."""
path = adb_utils.get_fastboot_path(fastboot_path="genuine/path")
self.assertEqual(path, "genuine/path")
@mock.patch.object(
adb_utils, "get_fastboot_devices", return_value=FASTBOOT_DEVICES)
def test_030_adb_utils_is_fastboot_mode_true(self, mock_get_fastboot_devices):
"""Verify is_fastboot_mode returns True."""
adb_serial = "04576e89"
self.assertTrue(adb_utils.is_fastboot_mode(adb_serial))
mock_get_fastboot_devices.assert_called()
@mock.patch.object(
adb_utils, "get_fastboot_devices", return_value=FASTBOOT_DEVICES)
def test_031_adb_utils_is_fastboot_mode_false(self,
mock_get_fastboot_devices):
"""Verify is_fastboot_mode returns False."""
adb_serial = "bogus"
self.assertFalse(adb_utils.is_fastboot_mode(adb_serial))
mock_get_fastboot_devices.assert_called()
@mock.patch.object(
adb_utils, "get_sideload_devices", return_value=SIDELOAD_DEVICES)
def test_032_adb_utils_is_sideload_mode_true(self, mock_get_sideload_devices):
"""Verify is_sideload_mode on True."""
adb_serial = SIDELOAD_DEVICES[0]
self.assertTrue(adb_utils.is_sideload_mode(adb_serial))
mock_get_sideload_devices.assert_called_once()
@mock.patch.object(
adb_utils, "get_sideload_devices", return_value=SIDELOAD_DEVICES)
def test_033_adb_utils_is_sideload_mode_false(self,
mock_get_sideload_devices):
"""Verify is_sideload_mode on False."""
adb_serial = "bogus"
self.assertFalse(adb_utils.is_sideload_mode(adb_serial))
mock_get_sideload_devices.assert_called_once()
@mock.patch.object(
subprocess,
"check_output",
return_value=FASTBOOT_CMD_PATH.encode("utf-8", errors="replace"))
@mock.patch.object(grp, "getgrnam", return_value=TEST_GROUP_ENTRY)
@mock.patch.object(os, "getgroups", return_value=TEST_GOOD_GROUP_LIST)
@mock.patch.object(os, "getuid", return_value=TEST_USER_UID)
@mock.patch.object(os, "getlogin", return_value=TEST_USER_NAME)
def test_040_adb_utils_verify_user_has_fastboot(self, mock_getlogin,
mock_getuid, mock_getgroups,
mock_getgrnam,
mock_check_output):
"""Verify that verify_usr_has_fastboot works correctly."""
try:
adb_utils.verify_user_has_fastboot(DEVICE_NAME)
mock_check_output.assert_called()
except subprocess.CalledProcessError as err:
self.fail("verify_user_has_fastboot() raised error: {!r}".format(err))
@mock.patch.object(
subprocess,
"check_output",
side_effect=subprocess.CalledProcessError(1, ["which", FASTBOOT_CMD]))
def test_041_adb_utils_verify_user_has_fastboot_no_fastboot(
self, mock_check_output):
"""Verify that verify_user_has_fastboot raises if fastboot not present."""
with self.assertRaises(errors.DeviceError):
adb_utils.verify_user_has_fastboot(DEVICE_NAME)
mock_check_output.assert_called()
@mock.patch.object(host_utils, "get_command_path", return_value=ADB_CMD_PATH)
def test_050_adb_utils_get_adb_path_no_config_file(self,
mock_get_command_path):
"""Verify get_adb_path handles open errors internally."""
config_file = os.path.join(self.artifacts_directory,
self._testMethodName + ".json")
with mock.patch.dict(config.__dict__,
{"DEFAULT_GDM_CONFIG_FILE": config_file}):
self.assertEqual(ADB_CMD_PATH, adb_utils.get_adb_path())
mock_get_command_path.assert_called()
@mock.patch.object(host_utils, "get_command_path", return_value=ADB_CMD_PATH)
@mock.patch.object(json, "load", side_effect=ValueError)
def test_051_adb_utils_get_adb_path_bad_config_data(self, mock_json_load,
mock_get_command_path):
"""Verify get_adb_path handles json.load errors internally."""
config_file = os.path.join(self.artifacts_directory,
self._testMethodName + ".json")
with open(config_file, "w") as gdm_config:
gdm_config.write("{}")
with mock.patch.dict(config.__dict__,
{"DEFAULT_GDM_CONFIG_FILE": config_file}):
self.assertEqual(ADB_CMD_PATH, adb_utils.get_adb_path())
mock_json_load.assert_called()
mock_get_command_path.assert_called()
@mock.patch.object(host_utils, "get_command_path", return_value=ADB_CMD_PATH)
def test_052_adb_utils_get_adb_path_no_adb_path_in_config(
self, mock_get_command_path):
"""Verify get_adb_path handles missing adb_path key errors internally."""
config_file = os.path.join(self.artifacts_directory,
self._testMethodName + ".json")
with open(config_file, "w") as gdm_config:
gdm_config.write("{}")
with mock.patch.dict(config.__dict__,
{"DEFAULT_GDM_CONFIG_FILE": config_file}):
self.assertEqual(ADB_CMD_PATH, adb_utils.get_adb_path())
mock_get_command_path.assert_called()
@mock.patch.object(host_utils, "has_command", return_value=False)
def test_053_adb_utils_get_adb_path_bad_adb_path_raises_error(
self, mock_has_command):
"""Verify get_adb_path bad adb_path raises error."""
config_file = os.path.join(self.artifacts_directory,
self._testMethodName + ".json")
with open(config_file, "w") as gdm_config:
gdm_config.write("{\"")
gdm_config.write(config.ADB_BIN_PATH_CONFIG)
gdm_config.write("\":")
gdm_config.write("\"/some/bad/path\"}")
with mock.patch.dict(config.__dict__,
{"DEFAULT_GDM_CONFIG_FILE": config_file}):
with self.assertRaises(RuntimeError):
adb_utils.get_adb_path()
@mock.patch.object(os.path, "exists", return_value=True)
def test_054_adb_utils_get_fadb_path_uses_correct_path(self, mock_exists):
"""Verify get_adb_path defaults to path passed in."""
path = adb_utils.get_adb_path(adb_path="genuine/path")
self.assertEqual(path, "genuine/path")
@mock.patch.object(
adb_utils, "_adb_command", return_value=FAKE_ADB_DEVICES_OUTPUT)
def test_060_adb_utils_get_adb_devices_calls_get_adb_path(
self, mock_adb_command):
"""Verify get_adb_devices calls _adb_command."""
self.assertEqual(ADB_DEVICES, adb_utils.get_adb_devices())
mock_adb_command.assert_called()
@mock.patch.object(host_utils, "has_command", return_value=False)
@mock.patch.object(os.path, "exists", return_value=False)
def test_061_adb_utils_get_adb_devices_returns_list_when_no_adb(
self, mock_exists, mock_has_command):
"""Verify get_adb_devices calls _adb_command."""
self.assertEqual([], adb_utils.get_adb_devices())
@mock.patch.object(
adb_utils, "_adb_command", return_value=FAKE_ADB_DEVICES_OUTPUT)
def test_062_adb_utils_get_sideload_devices_on_success(
self, mock_adb_command):
"""Verify get_sideload_devices returns devices on success."""
self.assertEqual(SIDELOAD_DEVICES, adb_utils.get_sideload_devices())
mock_adb_command.assert_called_once_with("devices", adb_path=None)
@mock.patch.object(adb_utils, "_adb_command", side_effect=RuntimeError())
def test_063_adb_utils_get_sideload_devices_on_failure(
self, mock_adb_command):
"""Verify get_sideload_devices returns empty list on failure."""
self.assertEqual([], adb_utils.get_sideload_devices())
mock_adb_command.assert_called_once_with("devices", adb_path=None)
@mock.patch.object(adb_utils, "get_adb_devices", return_value=ADB_DEVICES)
def test_070_adb_utils_is_adb_mode_returns_true(self, mock_get_adb_devices):
"""Verify is_adb_mode calls get_adb_devices."""
adb_serial = "04576e89"
self.assertTrue(adb_utils.is_adb_mode(adb_serial))
mock_get_adb_devices.assert_called()
@mock.patch.object(adb_utils, "get_adb_devices", return_value=ADB_DEVICES)
def test_071_adb_utils_is_adb_mode_returns_false(self, mock_get_adb_devices):
"""Verify is_adb_mode calls get_adb_devices."""
adb_serial = "bogus"
self.assertFalse(adb_utils.is_adb_mode(adb_serial))
mock_get_adb_devices.assert_called()
@mock.patch.object(adb_utils, "is_fastboot_mode", return_value=False)
@mock.patch.object(adb_utils, "is_adb_mode", return_value=True)
def test_080_adb_utils_is_device_online_yes_no(self, mock_is_adb_mode,
mock_is_fastboot_mode):
"""Verify is_device_online calls is_adb_mode and not is_fastboot_mode."""
self.assertTrue(adb_utils.is_device_online(DEVICE_ADB_SERIAL))
mock_is_adb_mode.assert_called()
mock_is_fastboot_mode.assert_not_called()
@mock.patch.object(adb_utils, "is_fastboot_mode", return_value=True)
@mock.patch.object(adb_utils, "is_adb_mode", return_value=False)
def test_081_adb_utils_is_device_online_no_yes(self, mock_is_adb_mode,
mock_is_fastboot_mode):
"""Verify is_device_online calls is_adb_mode and is_fastboot_mode."""
self.assertTrue(adb_utils.is_device_online(DEVICE_ADB_SERIAL))
mock_is_adb_mode.assert_called()
mock_is_fastboot_mode.assert_called()
@mock.patch.object(adb_utils, "is_fastboot_mode", return_value=False)
@mock.patch.object(adb_utils, "is_adb_mode", return_value=False)
def test_082_adb_utils_is_device_online_no_no(self, mock_is_adb_mode,
mock_is_fastboot_mode):
"""Verify is_device_online calls is_adb_mode and is_fastboot_mode."""
self.assertFalse(adb_utils.is_device_online(DEVICE_ADB_SERIAL))
mock_is_adb_mode.assert_called()
mock_is_fastboot_mode.assert_called()
@mock.patch.object(adb_utils, "is_fastboot_mode", return_value=True)
@mock.patch.object(adb_utils, "is_adb_mode", return_value=True)
def test_083_adb_utils_is_device_online_yes_yes(self, mock_is_adb_mode,
mock_is_fastboot_mode):
"""Verify is_device_online calls is_adb_mode and not is_fastboot_mode."""
self.assertTrue(adb_utils.is_device_online(DEVICE_ADB_SERIAL))
mock_is_adb_mode.assert_called()
mock_is_fastboot_mode.assert_not_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_100_adb_utils_adb_command_without_adb_serial(self,
mock_get_adb_path):
"""Verify _adb_command without adb_serial."""
command = "fake_command"
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_101_adb_utils_adb_command_with_string_command(
self, mock_get_adb_path):
"""Verify _adb_command with string command."""
command = "fake_command"
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command, DEVICE_ADB_SERIAL)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_102_adb_utils_adb_command_with_string_command(
self, mock_get_adb_path):
"""Verify _adb_command with unicode command."""
command = u"fake_command"
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command, DEVICE_ADB_SERIAL)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_103_adb_utils_adb_command_with_list_command(self, mock_get_adb_path):
"""Verify _adb_command with command list."""
command = ["fake_command", "arg1"]
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command, DEVICE_ADB_SERIAL)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_104_adb_utils_adb_command_with_tuple_command(self,
mock_get_adb_path):
"""Verify _adb_command with tuple list."""
command = ("fake_command", "arg1")
command_output = "fake output\n"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command, DEVICE_ADB_SERIAL)
self.assertEqual(command_output, output)
mock_get_adb_path.assert_called()
@mock.patch.object(os.path, "exists", return_value=False)
@mock.patch.object(host_utils, "has_command", return_value=False)
def test_105_adb_utils_adb_command_bad_adb_path(self, mock_has_command,
mock_os_path_exists):
"""Verify _adb_command skips get_adb_path raises error on bad path."""
with self.assertRaises(RuntimeError):
adb_utils._adb_command(
"fake_command", DEVICE_ADB_SERIAL, adb_path="bogus/path")
mock_os_path_exists.assert_called()
mock_has_command.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_106_adb_utils_adb_command_include_return_code(
self, mock_get_adb_path):
"""Verify _adb_command include_return_code returns tuple."""
command = "fake_command"
command_output = "fake output\n"
command_return_code = 1
mock_popen = mock.MagicMock(
spec=subprocess.Popen, returncode=command_return_code)
mock_popen.communicate.return_value = (command_output.encode(
"utf-8", errors="replace"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output, return_code = adb_utils._adb_command(
command, DEVICE_ADB_SERIAL, include_return_code=True)
self.assertEqual(command_output, output)
self.assertEqual(command_return_code, return_code)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "get_adb_path", return_value=ADB_CMD_PATH)
def test_107_adb_utils_adb_command_with_offline(self, mock_get_adb_path):
"""Verify _adb_command succeeds if output includes "offline"."""
command = "fake_command"
mock_popen = mock.MagicMock(spec=subprocess.Popen, returncode=0)
mock_popen.communicate.return_value = (
FAKE_ADB_DEVICES_OUTPUT.encode("utf-8"), None)
with mock.patch.object(subprocess, "Popen", return_value=mock_popen):
output = adb_utils._adb_command(command)
self.assertEqual(FAKE_ADB_DEVICES_OUTPUT, output)
mock_get_adb_path.assert_called()
@mock.patch.object(adb_utils, "_adb_command", return_value="Success\n")
@mock.patch.object(os.path, "exists", return_value=True)
def test_119_adb_utils_install_package_on_device_success(
self, mock_path_exists, mock_adb_command):
"""Verify install_package_on_device on success."""
fake_package_path = "/tmp/xxx.apk"
adb_utils.install_package_on_device(
fake_package_path, adb_serial=DEVICE_ADB_SERIAL, adb_path=ADB_CMD_PATH)
mock_path_exists.assert_called_once_with(fake_package_path)
mock_adb_command.assert_called_once_with(("install", fake_package_path),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command", return_value="Success\n")
@mock.patch.object(os.path, "exists", return_value=True)
def test_120_adb_utils_install_package_on_device_with_flags_success(
self, mock_path_exists, mock_adb_command):
"""Verify install_package_on_device with flags on success."""
fake_package_path = "/tmp/xxx.apk"
adb_utils.install_package_on_device(
fake_package_path,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH,
allow_downgrade=True,
allow_test_apk=True,
reinstall=True,
all_permissions=True)
mock_path_exists.assert_called_once_with(fake_package_path)
mock_adb_command.assert_called_once_with(
("install", "-d", "-g", "-r", "-t", fake_package_path),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command")
@mock.patch.object(os.path, "exists")
def test_121_adb_utils_install_package_on_device_exception(
self, mock_path_exists, mock_adb_command):
"""Verify install_package_on_device raise exception."""
# Note:
# install_package_on_device() raises exception when:
# 1) package_path is not a file.
# 2) 'Success\n' is not found in command response.
fake_package_path = "/tmp/xxx.apk"
# 1) package path not a file
mock_path_exists.return_value = False
with self.assertRaises(ValueError):
adb_utils.install_package_on_device(
fake_package_path,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_path_exists.assert_called_with(fake_package_path)
# 2) 'Success\n' is not in command response
mock_path_exists.return_value = True
mock_adb_command.return_value = ""
with self.assertRaises(errors.DeviceError):
adb_utils.install_package_on_device(
fake_package_path,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_with(("install", fake_package_path),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command", return_value="Success\n")
def test_122_adb_utils_uninstall_package_on_device_success(
self, mock_adb_command):
"""Verify uninstall_package_on_device on success."""
fake_package_name = "com.google.fakepackage"
adb_utils.uninstall_package_on_device(
fake_package_name, adb_serial=DEVICE_ADB_SERIAL, adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(("uninstall", fake_package_name),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command", return_value="")
def test_123_adb_utils_uninstall_package_on_device_exception(
self, mock_adb_command):
"""Verify uninstall_package_on_device raise exception."""
fake_package_name = "com.google.fakepackage"
with self.assertRaises(errors.DeviceError):
adb_utils.uninstall_package_on_device(
fake_package_name,
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
mock_adb_command.assert_called_once_with(("uninstall", fake_package_name),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=ADB_CMD_PATH)
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_SHELL)
@mock.patch.object(os.path, "isfile", return_value=True)
def test_124_adb_utils_sideload_package_on_success(self, mock_os_path_isfile,
mock_adb_command):
"""Verify sideload_pacakge calls _adb_command."""
package_path = "/tmp/abc"
self.assertEqual(
adb_utils.sideload_package(package_path, DEVICE_ADB_SERIAL), FAKE_SHELL)
mock_os_path_isfile.assert_called_once_with(package_path)
mock_adb_command.assert_called_once_with(("sideload", package_path),
adb_serial=DEVICE_ADB_SERIAL,
adb_path=None)
@mock.patch.object(adb_utils, "_adb_command", return_value=FAKE_SHELL)
@mock.patch.object(os.path, "isfile", return_value=False)
def test_125_adb_utils_sideload_package_on_exception(self,
mock_os_path_isfile,
mock_adb_command):
"""Verify | |
# Copyright BigchainDB GmbH and BigchainDB contributors
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
"""Transaction related models to parse and construct transaction
payloads.
Attributes:
UnspentOutput (namedtuple): Object holding the information
representing an unspent output.
"""
from collections import namedtuple
from copy import deepcopy
from functools import reduce
import base58
from cryptoconditions import Fulfillment, ThresholdSha256, Ed25519Sha256
from cryptoconditions.exceptions import (
ParsingError, ASN1DecodeError, ASN1EncodeError, UnsupportedTypeError)
from sha3 import sha3_256
from bigchaindb.common.crypto import PrivateKey, hash_data
from bigchaindb.common.exceptions import (KeypairMismatchException,
InputDoesNotExist, DoubleSpend,
InvalidHash, InvalidSignature,
AmountError, AssetIdMismatch,
ThresholdTooDeep)
from bigchaindb.common.utils import serialize
UnspentOutput = namedtuple(
'UnspentOutput', (
# TODO 'utxo_hash': sha3_256(f'{txid}{output_index}'.encode())
# 'utxo_hash', # noqa
'transaction_id',
'output_index',
'amount',
'asset_id',
'condition_uri',
)
)
class Input(object):
"""A Input is used to spend assets locked by an Output.
Wraps around a Crypto-condition Fulfillment.
Attributes:
fulfillment (:class:`cryptoconditions.Fulfillment`): A Fulfillment
to be signed with a private key.
owners_before (:obj:`list` of :obj:`str`): A list of owners after a
Transaction was confirmed.
fulfills (:class:`~bigchaindb.common.transaction. TransactionLink`,
optional): A link representing the input of a `TRANSFER`
Transaction.
"""
def __init__(self, fulfillment, owners_before, fulfills=None):
"""Create an instance of an :class:`~.Input`.
Args:
fulfillment (:class:`cryptoconditions.Fulfillment`): A
Fulfillment to be signed with a private key.
owners_before (:obj:`list` of :obj:`str`): A list of owners
after a Transaction was confirmed.
fulfills (:class:`~bigchaindb.common.transaction.
TransactionLink`, optional): A link representing the input
of a `TRANSFER` Transaction.
"""
if fulfills is not None and not isinstance(fulfills, TransactionLink):
raise TypeError('`fulfills` must be a TransactionLink instance')
if not isinstance(owners_before, list):
raise TypeError('`owners_after` must be a list instance')
self.fulfillment = fulfillment
self.fulfills = fulfills
self.owners_before = owners_before
def __eq__(self, other):
# TODO: If `other !== Fulfillment` return `False`
return self.to_dict() == other.to_dict()
def to_dict(self):
"""Transforms the object to a Python dictionary.
Note:
If an Input hasn't been signed yet, this method returns a
dictionary representation.
Returns:
dict: The Input as an alternative serialization format.
"""
try:
fulfillment = self.fulfillment.serialize_uri()
except (TypeError, AttributeError, ASN1EncodeError):
fulfillment = _fulfillment_to_details(self.fulfillment)
try:
# NOTE: `self.fulfills` can be `None` and that's fine
fulfills = self.fulfills.to_dict()
except AttributeError:
fulfills = None
input_ = {
'owners_before': self.owners_before,
'fulfills': fulfills,
'fulfillment': fulfillment,
}
return input_
@classmethod
def generate(cls, public_keys):
# TODO: write docstring
# The amount here does not really matter. It is only use on the
# output data model but here we only care about the fulfillment
output = Output.generate(public_keys, 1)
return cls(output.fulfillment, public_keys)
@classmethod
def from_dict(cls, data):
"""Transforms a Python dictionary to an Input object.
Note:
Optionally, this method can also serialize a Cryptoconditions-
Fulfillment that is not yet signed.
Args:
data (dict): The Input to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.Input`
Raises:
InvalidSignature: If an Input's URI couldn't be parsed.
"""
fulfillment = data['fulfillment']
if not isinstance(fulfillment, (Fulfillment, type(None))):
try:
fulfillment = Fulfillment.from_uri(data['fulfillment'])
except ASN1DecodeError:
# TODO Remove as it is legacy code, and simply fall back on
# ASN1DecodeError
raise InvalidSignature("Fulfillment URI couldn't been parsed")
except TypeError:
# NOTE: See comment about this special case in
# `Input.to_dict`
fulfillment = _fulfillment_from_details(data['fulfillment'])
fulfills = TransactionLink.from_dict(data['fulfills'])
return cls(fulfillment, data['owners_before'], fulfills)
def _fulfillment_to_details(fulfillment):
"""Encode a fulfillment as a details dictionary
Args:
fulfillment: Crypto-conditions Fulfillment object
"""
if fulfillment.type_name == 'ed25519-sha-256':
return {
'type': 'ed25519-sha-256',
'public_key': base58.b58encode(fulfillment.public_key),
}
if fulfillment.type_name == 'threshold-sha-256':
subconditions = [
_fulfillment_to_details(cond['body'])
for cond in fulfillment.subconditions
]
return {
'type': 'threshold-sha-256',
'threshold': fulfillment.threshold,
'subconditions': subconditions,
}
raise UnsupportedTypeError(fulfillment.type_name)
def _fulfillment_from_details(data, _depth=0):
"""Load a fulfillment for a signing spec dictionary
Args:
data: tx.output[].condition.details dictionary
"""
if _depth == 100:
raise ThresholdTooDeep()
if data['type'] == 'ed25519-sha-256':
public_key = base58.b58decode(data['public_key'])
return Ed25519Sha256(public_key=public_key)
if data['type'] == 'threshold-sha-256':
threshold = ThresholdSha256(data['threshold'])
for cond in data['subconditions']:
cond = _fulfillment_from_details(cond, _depth+1)
threshold.add_subfulfillment(cond)
return threshold
raise UnsupportedTypeError(data.get('type'))
class TransactionLink(object):
"""An object for unidirectional linking to a Transaction's Output.
Attributes:
txid (str, optional): A Transaction to link to.
output (int, optional): An output's index in a Transaction with id
`txid`.
"""
def __init__(self, txid=None, output=None):
"""Create an instance of a :class:`~.TransactionLink`.
Note:
In an IPLD implementation, this class is not necessary anymore,
as an IPLD link can simply point to an object, as well as an
objects properties. So instead of having a (de)serializable
class, we can have a simple IPLD link of the form:
`/<tx_id>/transaction/outputs/<output>/`.
Args:
txid (str, optional): A Transaction to link to.
output (int, optional): An Outputs's index in a Transaction with
id `txid`.
"""
self.txid = txid
self.output = output
def __bool__(self):
return self.txid is not None and self.output is not None
def __eq__(self, other):
# TODO: If `other !== TransactionLink` return `False`
return self.to_dict() == other.to_dict()
def __hash__(self):
return hash((self.txid, self.output))
@classmethod
def from_dict(cls, link):
"""Transforms a Python dictionary to a TransactionLink object.
Args:
link (dict): The link to be transformed.
Returns:
:class:`~bigchaindb.common.transaction.TransactionLink`
"""
try:
return cls(link['transaction_id'], link['output_index'])
except TypeError:
return cls()
def to_dict(self):
"""Transforms the object to a Python dictionary.
Returns:
(dict|None): The link as an alternative serialization format.
"""
if self.txid is None and self.output is None:
return None
else:
return {
'transaction_id': self.txid,
'output_index': self.output,
}
def to_uri(self, path=''):
if self.txid is None and self.output is None:
return None
return '{}/transactions/{}/outputs/{}'.format(path, self.txid,
self.output)
class Output(object):
"""An Output is used to lock an asset.
Wraps around a Crypto-condition Condition.
Attributes:
fulfillment (:class:`cryptoconditions.Fulfillment`): A Fulfillment
to extract a Condition from.
public_keys (:obj:`list` of :obj:`str`, optional): A list of
owners before a Transaction was confirmed.
"""
MAX_AMOUNT = 9 * 10 ** 18
def __init__(self, fulfillment, public_keys=None, amount=1):
"""Create an instance of a :class:`~.Output`.
Args:
fulfillment (:class:`cryptoconditions.Fulfillment`): A
Fulfillment to extract a Condition from.
public_keys (:obj:`list` of :obj:`str`, optional): A list of
owners before a Transaction was confirmed.
amount (int): The amount of Assets to be locked with this
Output.
Raises:
TypeError: if `public_keys` is not instance of `list`.
"""
if not isinstance(public_keys, list) and public_keys is not None:
raise TypeError('`public_keys` must be a list instance or None')
if not isinstance(amount, int):
raise TypeError('`amount` must be an int')
if amount < 1:
raise AmountError('`amount` must be greater than 0')
if amount > self.MAX_AMOUNT:
raise AmountError('`amount` must be <= %s' % self.MAX_AMOUNT)
self.fulfillment = fulfillment
self.amount = amount
self.public_keys = public_keys
def __eq__(self, other):
# TODO: If `other !== Condition` return `False`
return self.to_dict() == other.to_dict()
def to_dict(self):
"""Transforms the object to a Python dictionary.
Note:
A dictionary serialization of the Input the Output was
derived from is always provided.
Returns:
dict: The Output as an alternative serialization format.
"""
# TODO FOR CC: It must be able to recognize a hashlock condition
# and fulfillment!
condition = {}
try:
condition['details'] = _fulfillment_to_details(self.fulfillment)
except AttributeError:
pass
try:
condition['uri'] = self.fulfillment.condition_uri
except AttributeError:
condition['uri'] = self.fulfillment
output = {
'public_keys': self.public_keys,
'condition': condition,
'amount': str(self.amount),
}
return output
@classmethod
def generate(cls, public_keys, amount):
"""Generates a Output from a specifically formed tuple or list.
Note:
If a ThresholdCondition has to be generated where the threshold
is always the number of subconditions it is split between, a
list of the following structure is sufficient:
[(address|condition)*, [(address|condition)*, ...], ...]
Args:
public_keys (:obj:`list` of :obj:`str`): The public key of
the users that should be able to fulfill the Condition
that is being created.
amount (:obj:`int`): The amount locked by the Output.
Returns:
An Output that can be used in a Transaction.
Raises:
TypeError: If `public_keys` is not an instance of `list`.
ValueError: If `public_keys` is an empty list.
"""
threshold = len(public_keys)
if not isinstance(amount, int):
raise TypeError('`amount` must be a int')
if amount < 1:
raise AmountError('`amount` needs to be greater than zero')
if not isinstance(public_keys, list):
raise TypeError('`public_keys` must be an instance of list')
if len(public_keys) == 0:
raise ValueError('`public_keys` needs to contain at least one'
'owner')
elif len(public_keys) == 1 and not isinstance(public_keys[0], list):
if isinstance(public_keys[0], Fulfillment):
ffill = public_keys[0]
else:
ffill = Ed25519Sha256(
public_key=base58.b58decode(public_keys[0]))
return cls(ffill, public_keys, amount=amount)
else:
initial_cond = ThresholdSha256(threshold=threshold)
threshold_cond = reduce(cls._gen_condition, public_keys,
initial_cond)
return cls(threshold_cond, public_keys, amount=amount)
@classmethod
def _gen_condition(cls, initial, new_public_keys):
"""Generates ThresholdSha256 conditions from a list of new owners.
Note:
This method is intended only to be used with a | |
<gh_stars>0
#coding: utf-8
from flask import Flask, request, abort
from linebot import ( LineBotApi, WebhookHandler )
from linebot.exceptions import( InvalidSignatureError )
from linebot.models import *
app = Flask(__name__)
line_bot_api = LineBotApi("<KEY> #-- YOUR_CHANNEL_ACCESS_TOKEN
handler = WebhookHandler("2<PASSWORD>")
@app.route("/callback", methods=['POST'])
def callback():
print(">>>>>>>>> 1.testing")
signature = request.headers['X-Line-Signature']
print(">>>>>>>>> 2.testing")
body = request.get_data(as_text=True)
print(">>>>>>>>> 3.testing"+body)
app.logger.info("Request body: " + body)
print(">>>>>>>>> 4.testing-body:"+body)
try:
print(">>>>>>>>> 5.testing-try:...")
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
print(event)
if event.message.id == "100001":
return
text = event.message.text
if (text=="help"):
reply_text = "請對我輸入以下的關鍵字\n\n目前支援指令:\n[時間] + [縣市] + 天氣 or 空氣 or 帶傘\n[縣市] + [時間] + 天氣 or 空氣 or 帶傘\n\n例如:今天桃園市天氣、臺北市明天空氣、後天臺中市帶傘\n\n[時間]:今天、明天、後天\n[縣市]:臺北市、新北市、桃園市、臺中市、臺南市、高雄市、基隆市、新竹市、嘉義市、新竹縣、苗栗縣、彰化縣、南投縣、雲林縣、嘉義縣、屏東縣、宜蘭縣、花蓮縣、臺東縣、台東縣、澎湖縣、金門縣、連江縣"
elif(text=="Help"):
reply_text = "請對我輸入以下的關鍵字\n\n目前支援指令:\n[時間] + [縣市] + 天氣 or 空氣 or 帶傘\n[縣市] + [時間] + 天氣 or 空氣 or 帶傘\n\n例如:今天桃園市天氣、臺北市明天空氣、後天臺中市帶傘\n\n[時間]:今天、明天、後天\n[縣市]:臺北市、新北市、桃園市、臺中市、臺南市、高雄市、基隆市、新竹市、嘉義市、新竹縣、苗栗縣、彰化縣、南投縣、雲林縣、嘉義縣、屏東縣、宜蘭縣、花蓮縣、臺東縣、台東縣、澎湖縣、金門縣、連江縣"
elif(text=="hElp"):
reply_text = "請對我輸入以下的關鍵字\n\n目前支援指令:\n[時間] + [縣市] + 天氣 or 空氣 or 帶傘\n[縣市] + [時間] + 天氣 or 空氣 or 帶傘\n\n例如:今天桃園市天氣、臺北市明天空氣、後天臺中市帶傘\n\n[時間]:今天、明天、後天\n[縣市]:臺北市、新北市、桃園市、臺中市、臺南市、高雄市、基隆市、新竹市、嘉義市、新竹縣、苗栗縣、彰化縣、南投縣、雲林縣、嘉義縣、屏東縣、宜蘭縣、花蓮縣、臺東縣、台東縣、澎湖縣、金門縣、連江縣"
elif(text=="heLp"):
reply_text = "請對我輸入以下的關鍵字\n\n目前支援指令:\n[時間] + [縣市] + 天氣 or 空氣 or 帶傘\n[縣市] + [時間] + 天氣 or 空氣 or 帶傘\n\n例如:今天桃園市天氣、臺北市明天空氣、後天臺中市帶傘\n\n[時間]:今天、明天、後天\n[縣市]:臺北市、新北市、桃園市、臺中市、臺南市、高雄市、基隆市、新竹市、嘉義市、新竹縣、苗栗縣、彰化縣、南投縣、雲林縣、嘉義縣、屏東縣、宜蘭縣、花蓮縣、臺東縣、台東縣、澎湖縣、金門縣、連江縣"
elif(text=="helP"):
reply_text = "請對我輸入以下的關鍵字\n\n目前支援指令:\n[時間] + [縣市] + 天氣 or 空氣 or 帶傘\n[縣市] + [時間] + 天氣 or 空氣 or 帶傘\n\n例如:今天桃園市天氣、臺北市明天空氣、後天臺中市帶傘\n\n[時間]:今天、明天、後天\n[縣市]:臺北市、新北市、桃園市、臺中市、臺南市、高雄市、基隆市、新竹市、嘉義市、新竹縣、苗栗縣、彰化縣、南投縣、雲林縣、嘉義縣、屏東縣、宜蘭縣、花蓮縣、臺東縣、台東縣、澎湖縣、金門縣、連江縣"
elif(text=="HELP"):
reply_text = "請對我輸入以下的關鍵字\n\n目前支援指令:\n[時間] + [縣市] + 天氣 or 空氣 or 帶傘\n[縣市] + [時間] + 天氣 or 空氣 or 帶傘\n\n例如:今天桃園市天氣、臺北市明天空氣、後天臺中市帶傘\n\n[時間]:今天、明天、後天\n[縣市]:臺北市、新北市、桃園市、臺中市、臺南市、高雄市、基隆市、新竹市、嘉義市、新竹縣、苗栗縣、彰化縣、南投縣、雲林縣、嘉義縣、屏東縣、宜蘭縣、花蓮縣、臺東縣、台東縣、澎湖縣、金門縣、連江縣"
elif(text=="hELP"):
reply_text = "請對我輸入以下的關鍵字\n\n目前支援指令:\n[時間] + [縣市] + 天氣 or 空氣 or 帶傘\n[縣市] + [時間] + 天氣 or 空氣 or 帶傘\n\n例如:今天桃園市天氣、臺北市明天空氣、後天臺中市帶傘\n\n[時間]:今天、明天、後天\n[縣市]:臺北市、新北市、桃園市、臺中市、臺南市、高雄市、基隆市、新竹市、嘉義市、新竹縣、苗栗縣、彰化縣、南投縣、雲林縣、嘉義縣、屏東縣、宜蘭縣、花蓮縣、臺東縣、台東縣、澎湖縣、金門縣、連江縣"
elif(text=="HeLP"):
reply_text = "請對我輸入以下的關鍵字\n\n目前支援指令:\n[時間] + [縣市] + 天氣 or 空氣 or 帶傘\n[縣市] + [時間] + 天氣 or 空氣 or 帶傘\n\n例如:今天桃園市天氣、臺北市明天空氣、後天臺中市帶傘\n\n[時間]:今天、明天、後天\n[縣市]:臺北市、新北市、桃園市、臺中市、臺南市、高雄市、基隆市、新竹市、嘉義市、新竹縣、苗栗縣、彰化縣、南投縣、雲林縣、嘉義縣、屏東縣、宜蘭縣、花蓮縣、臺東縣、台東縣、澎湖縣、金門縣、連江縣"
elif(text=="HElP"):
reply_text = "請對我輸入以下的關鍵字\n\n目前支援指令:\n[時間] + [縣市] + 天氣 or 空氣 or 帶傘\n[縣市] + [時間] + 天氣 or 空氣 or 帶傘\n\n例如:今天桃園市天氣、臺北市明天空氣、後天臺中市帶傘\n\n[時間]:今天、明天、後天\n[縣市]:臺北市、新北市、桃園市、臺中市、臺南市、高雄市、基隆市、新竹市、嘉義市、新竹縣、苗栗縣、彰化縣、南投縣、雲林縣、嘉義縣、屏東縣、宜蘭縣、花蓮縣、臺東縣、台東縣、澎湖縣、金門縣、連江縣"
elif(text=="HELp"):
reply_text = "請對我輸入以下的關鍵字\n\n目前支援指令:\n[時間] + [縣市] + 天氣 or 空氣 or 帶傘\n[縣市] + [時間] + 天氣 or 空氣 or 帶傘\n\n例如:今天桃園市天氣、臺北市明天空氣、後天臺中市帶傘\n\n[時間]:今天、明天、後天\n[縣市]:臺北市、新北市、桃園市、臺中市、臺南市、高雄市、基隆市、新竹市、嘉義市、新竹縣、苗栗縣、彰化縣、南投縣、雲林縣、嘉義縣、屏東縣、宜蘭縣、花蓮縣、臺東縣、台東縣、澎湖縣、金門縣、連江縣"
elif(text=="今天臺北市天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:30%\n風速:26公里/時"
elif(text=="今天台北市天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:30%\n風速:26公里/時"
elif(text=="今天新北市天氣"):
reply_text ="多雲\n降雨機率:20%\n濕度:25%\n風速:32公里/時"
elif(text=="今天桃園市天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:20%\n風速:20公里/時"
elif(text=="今天臺中市天氣"):
reply_text ="陰\n降雨機率:30%\n濕度:42%\n風速:27公里/時"
elif(text=="今天台中市天氣"):
reply_text ="陰\n降雨機率:30%\n濕度:42%\n風速:27公里/時"
elif(text=="今天臺南市天氣"):
reply_text ="陣雨\n降雨機率:60%\n濕度:70%\n風速:28公里/時"
elif(text=="今天台南市天氣"):
reply_text ="陣雨\n降雨機率:60%\n濕度:70%\n風速:28公里/時"
elif(text=="今天高雄市天氣"):
reply_text ="雷陣雨\n降雨機率:100%\n濕度:85%\n風速:45公里/時"
elif(text=="今天基隆市天氣"):
reply_text ="小雨\n降雨機率:70%\n濕度:75%\n風速:15公里/時"
elif(text=="今天新竹市天氣"):
reply_text ="中雨\n降雨機率:100%\n濕度:80%\n風速:37公里/時"
elif(text=="今天嘉義市天氣"):
reply_text ="大雨\n降雨機率:100%\n濕度:90%\n風速:41公里/時"
elif(text=="今天新竹縣天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:12%\n風速:15公里/時"
elif(text=="今天苗栗縣天氣"):
reply_text ="多雲\n降雨機率:0%\n濕度:16%\n風速:26公里/時"
elif(text=="今天彰化縣天氣"):
reply_text ="陰\n降雨機率:50%\n濕度:55%\n風速:17公里/時"
elif(text=="今天南投縣天氣"):
reply_text ="陣雨\n降雨機率:77%\n濕度:64%\n風速:30公里/時"
elif(text=="今天雲林縣天氣"):
reply_text ="雷陣雨\n降雨機率:98%\n濕度:80%\n風速:12公里/時"
elif(text=="今天嘉義縣天氣"):
reply_text ="小雨\n降雨機率:85%\n濕度:75%\n風速:34公里/時"
elif(text=="今天屏東縣天氣"):
reply_text ="中雨\n降雨機率:92%\n濕度:88%\n風速:17公里/時"
elif(text=="今天宜蘭縣天氣"):
reply_text ="大雨\n降雨機率:100%\n濕度:90%\n風速:24公里/時"
elif(text=="今天花蓮縣天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="今天臺東縣天氣"):
reply_text ="多雲\n降雨機率:15%\n濕度:28%\n風速:29公里/時"
elif(text=="今天台東縣天氣"):
reply_text ="多雲\n降雨機率:15%\n濕度:28%\n風速:29公里/時"
elif(text=="今天澎湖縣天氣"):
reply_text ="陰\n降雨機率:34%\n濕度:66%\n風速:17公里/時"
elif(text=="今天金門縣天氣"):
reply_text ="陣雨\n降雨機率:86%\n濕度:76%\n風速:25公里/時"
elif(text=="今天連江縣天氣"):
reply_text ="雷陣雨\n降雨機率:96%\n濕度:86%\n風速:47公里/時"
elif(text=="明天臺北市天氣"):
reply_text ="中雨\n降雨機率:92%\n濕度:88%\n風速:17公里/時"
elif(text=="明天台北市天氣"):
reply_text ="中雨\n降雨機率:92%\n濕度:88%\n風速:17公里/時"
elif(text=="明天新北市天氣"):
reply_text ="雷陣雨\n降雨機率:96%\n濕度:86%\n風速:47公里/時"
elif(text=="明天桃園市天氣"):
reply_text ="陰\n降雨機率:30%\n濕度:42%\n風速:27公里/時"
elif(text=="明天臺中市天氣"):
reply_text ="雷陣雨\n降雨機率:98%\n濕度:80%\n風速:12公里/時"
elif(text=="明天台中市天氣"):
reply_text ="雷陣雨\n降雨機率:98%\n濕度:80%\n風速:12公里/時"
elif(text=="明天臺南市天氣"):
reply_text ="陣雨\n降雨機率:77%\n濕度:64%\n風速:30公里/時"
elif(text=="明天台南市天氣"):
reply_text ="陣雨\n降雨機率:77%\n濕度:64%\n風速:30公里/時"
elif(text=="明天高雄市天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:20%\n風速:20公里/時"
elif(text=="明天基隆市天氣"):
reply_text ="多雲\n降雨機率:20%\n濕度:25%\n風速:32公里/時"
elif(text=="明天新竹市天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="明天嘉義市天氣"):
reply_text ="陰\n降雨機率:30%\n濕度:42%\n風速:27公里/時"
elif(text=="明天新竹縣天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="明天苗栗縣天氣"):
reply_text ="多雲\n降雨機率:0%\n濕度:16%\n風速:26公里/時"
elif(text=="明天彰化縣天氣"):
reply_text ="大雨\n降雨機率:100%\n濕度:90%\n風速:41公里/時"
elif(text=="明天南投縣天氣"):
reply_text ="中雨\n降雨機率:92%\n濕度:88%\n風速:17公里/時"
elif(text=="明天雲林縣天氣"):
reply_text ="陣雨\n降雨機率:75%\n濕度:77%\n風速:15公里/時"
elif(text=="明天嘉義縣天氣"):
reply_text ="多雲\n降雨機率:20%\n濕度:25%\n風速:32公里/時"
elif(text=="明天屏東縣天氣"):
reply_text ="小雨\n降雨機率:70%\n濕度:75%\n風速:15公里/時"
elif(text=="明天宜蘭縣天氣"):
reply_text ="雷陣雨\n降雨機率:96%\n濕度:86%\n風速:47公里/時"
elif(text=="明天花蓮縣天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:20%\n風速:20公里/時"
elif(text=="明天臺東縣天氣"):
reply_text ="小雨\n降雨機率:70%\n濕度:75%\n風速:15公里/時"
elif(text=="明天台東縣天氣"):
reply_text ="小雨\n降雨機率:70%\n濕度:75%\n風速:15公里/時"
elif(text=="明天澎湖縣天氣"):
reply_text ="陣雨\n降雨機率:60%\n濕度:70%\n風速:28公里/時"
elif(text=="明天金門縣天氣"):
reply_text ="小雨\n降雨機率:70%\n濕度:75%\n風速:15公里/時"
elif(text=="明天連江縣天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:30%\n風速:26公里/時"
elif(text=="後天臺北市天氣"):
reply_text ="多雲\n降雨機率:15%\n濕度:28%\n風速:29公里/時"
elif(text=="後天台北市天氣"):
reply_text ="多雲\n降雨機率:15%\n濕度:28%\n風速:29公里/時"
elif(text=="後天新北市天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="後天桃園市天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:30%\n風速:26公里/時"
elif(text=="後天臺中市天氣"):
reply_text ="陣雨\n降雨機率:86%\n濕度:76%\n風速:25公里/時"
elif(text=="後天台中市天氣"):
reply_text ="陣雨\n降雨機率:86%\n濕度:76%\n風速:25公里/時"
elif(text=="後天臺南市天氣"):
reply_text ="多雲\n降雨機率:20%\n濕度:25%\n風速:32公里/時"
elif(text=="後天台南市天氣"):
reply_text ="多雲\n降雨機率:20%\n濕度:25%\n風速:32公里/時"
elif(text=="後天高雄市天氣"):
reply_text ="陣雨\n降雨機率:75%\n濕度:77%\n風速:15公里/時"
elif(text=="後天基隆市天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:20%\n風速:20公里/時"
elif(text=="後天新竹市天氣"):
reply_text ="陰\n降雨機率:50%\n濕度:55%\n風速:17公里/時"
elif(text=="後天嘉義市天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="後天新竹縣天氣"):
reply_text ="雷陣雨\n降雨機率:96%\n濕度:86%\n風速:47公里/時"
elif(text=="後天苗栗縣天氣"):
reply_text ="多雲\n降雨機率:20%\n濕度:25%\n風速:32公里/時"
elif(text=="後天彰化縣天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="後天南投縣天氣"):
reply_text ="陣雨\n降雨機率:86%\n濕度:76%\n風速:25公里/時"
elif(text=="後天雲林縣天氣"):
reply_text ="小雨\n降雨機率:70%\n濕度:75%\n風速:15公里/時"
elif(text=="後天嘉義縣天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:20%\n風速:20公里/時"
elif(text=="後天屏東縣天氣"):
reply_text ="大雨\n降雨機率:100%\n濕度:90%\n風速:41公里/時"
elif(text=="後天宜蘭縣天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="後天花蓮縣天氣"):
reply_text ="中雨\n降雨機率:92%\n濕度:88%\n風速:17公里/時"
elif(text=="後天臺東縣天氣"):
reply_text ="多雲\n降雨機率:0%\n濕度:16%\n風速:26公里/時"
elif(text=="後天台東縣天氣"):
reply_text ="多雲\n降雨機率:0%\n濕度:16%\n風速:26公里/時"
elif(text=="後天澎湖縣天氣"):
reply_text ="雷陣雨\n降雨機率:98%\n濕度:80%\n風速:12公里/時"
elif(text=="後天金門縣天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="後天連江縣天氣"):
reply_text ="多雲\n降雨機率:15%\n濕度:28%\n風速:29公里/時"
elif(text=="臺北市今天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:30%\n風速:26公里/時"
elif(text=="台北市今天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:30%\n風速:26公里/時"
elif(text=="新北市今天天氣"):
reply_text ="多雲\n降雨機率:20%\n濕度:25%\n風速:32公里/時"
elif(text=="桃園市今天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:20%\n風速:20公里/時"
elif(text=="臺中市今天天氣"):
reply_text ="陰\n降雨機率:30%\n濕度:42%\n風速:27公里/時"
elif(text=="台中市今天天氣"):
reply_text ="陰\n降雨機率:30%\n濕度:42%\n風速:27公里/時"
elif(text=="臺南市今天天氣"):
reply_text ="陣雨\n降雨機率:60%\n濕度:70%\n風速:28公里/時"
elif(text=="台南市今天天氣"):
reply_text ="陣雨\n降雨機率:60%\n濕度:70%\n風速:28公里/時"
elif(text=="高雄市今天天氣"):
reply_text ="雷陣雨\n降雨機率:100%\n濕度:85%\n風速:45公里/時"
elif(text=="基隆市今天天氣"):
reply_text ="小雨\n降雨機率:70%\n濕度:75%\n風速:15公里/時"
elif(text=="新竹市今天天氣"):
reply_text ="中雨\n降雨機率:100%\n濕度:80%\n風速:37公里/時"
elif(text=="嘉義市今天天氣"):
reply_text ="大雨\n降雨機率:100%\n濕度:90%\n風速:41公里/時"
elif(text=="新竹縣今天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:12%\n風速:15公里/時"
elif(text=="苗栗縣今天天氣"):
reply_text ="多雲\n降雨機率:0%\n濕度:16%\n風速:26公里/時"
elif(text=="彰化縣今天天氣"):
reply_text ="陰\n降雨機率:50%\n濕度:55%\n風速:17公里/時"
elif(text=="南投縣今天天氣"):
reply_text ="陣雨\n降雨機率:77%\n濕度:64%\n風速:30公里/時"
elif(text=="雲林縣今天天氣"):
reply_text ="雷陣雨\n降雨機率:98%\n濕度:80%\n風速:12公里/時"
elif(text=="嘉義縣今天天氣"):
reply_text ="小雨\n降雨機率:85%\n濕度:75%\n風速:34公里/時"
elif(text=="屏東縣今天天氣"):
reply_text ="中雨\n降雨機率:92%\n濕度:88%\n風速:17公里/時"
elif(text=="宜蘭縣今天天氣"):
reply_text ="大雨\n降雨機率:100%\n濕度:90%\n風速:24公里/時"
elif(text=="花蓮縣今天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="臺東縣今天天氣"):
reply_text ="多雲\n降雨機率:15%\n濕度:28%\n風速:29公里/時"
elif(text=="台東縣今天天氣"):
reply_text ="多雲\n降雨機率:15%\n濕度:28%\n風速:29公里/時"
elif(text=="澎湖縣今天天氣"):
reply_text ="陰\n降雨機率:34%\n濕度:66%\n風速:17公里/時"
elif(text=="金門縣今天天氣"):
reply_text ="陣雨\n降雨機率:86%\n濕度:76%\n風速:25公里/時"
elif(text=="連江縣今天天氣"):
reply_text ="雷陣雨\n降雨機率:96%\n濕度:86%\n風速:47公里/時"
elif(text=="臺北市明天天氣"):
reply_text ="中雨\n降雨機率:92%\n濕度:88%\n風速:17公里/時"
elif(text=="台北市明天天氣"):
reply_text ="中雨\n降雨機率:92%\n濕度:88%\n風速:17公里/時"
elif(text=="新北市明天天氣"):
reply_text ="雷陣雨\n降雨機率:96%\n濕度:86%\n風速:47公里/時"
elif(text=="桃園市明天天氣"):
reply_text ="陰\n降雨機率:30%\n濕度:42%\n風速:27公里/時"
elif(text=="臺中市明天天氣"):
reply_text ="雷陣雨\n降雨機率:98%\n濕度:80%\n風速:12公里/時"
elif(text=="台中市明天天氣"):
reply_text ="雷陣雨\n降雨機率:98%\n濕度:80%\n風速:12公里/時"
elif(text=="臺南市明天天氣"):
reply_text ="陣雨\n降雨機率:77%\n濕度:64%\n風速:30公里/時"
elif(text=="台南市明天天氣"):
reply_text ="陣雨\n降雨機率:77%\n濕度:64%\n風速:30公里/時"
elif(text=="高雄市明天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:20%\n風速:20公里/時"
elif(text=="基隆市明天天氣"):
reply_text ="多雲\n降雨機率:20%\n濕度:25%\n風速:32公里/時"
elif(text=="新竹市明天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="嘉義市明天天氣"):
reply_text ="陰\n降雨機率:30%\n濕度:42%\n風速:27公里/時"
elif(text=="新竹縣明天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="苗栗縣明天天氣"):
reply_text ="多雲\n降雨機率:0%\n濕度:16%\n風速:26公里/時"
elif(text=="彰化縣明天天氣"):
reply_text ="大雨\n降雨機率:100%\n濕度:90%\n風速:41公里/時"
elif(text=="南投縣明天天氣"):
reply_text ="中雨\n降雨機率:92%\n濕度:88%\n風速:17公里/時"
elif(text=="雲林縣明天天氣"):
reply_text ="陣雨\n降雨機率:75%\n濕度:77%\n風速:15公里/時"
elif(text=="嘉義縣明天天氣"):
reply_text ="多雲\n降雨機率:20%\n濕度:25%\n風速:32公里/時"
elif(text=="屏東縣明天天氣"):
reply_text ="小雨\n降雨機率:70%\n濕度:75%\n風速:15公里/時"
elif(text=="宜蘭縣明天天氣"):
reply_text ="雷陣雨\n降雨機率:96%\n濕度:86%\n風速:47公里/時"
elif(text=="花蓮縣明天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:20%\n風速:20公里/時"
elif(text=="臺東縣明天天氣"):
reply_text ="小雨\n降雨機率:70%\n濕度:75%\n風速:15公里/時"
elif(text=="台東縣明天天氣"):
reply_text ="小雨\n降雨機率:70%\n濕度:75%\n風速:15公里/時"
elif(text=="澎湖縣明天天氣"):
reply_text ="陣雨\n降雨機率:60%\n濕度:70%\n風速:28公里/時"
elif(text=="金門縣明天天氣"):
reply_text ="小雨\n降雨機率:70%\n濕度:75%\n風速:15公里/時"
elif(text=="連江縣明天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:30%\n風速:26公里/時"
elif(text=="臺北市後天天氣"):
reply_text ="多雲\n降雨機率:15%\n濕度:28%\n風速:29公里/時"
elif(text=="台北市後天天氣"):
reply_text ="多雲\n降雨機率:15%\n濕度:28%\n風速:29公里/時"
elif(text=="新北市後天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="桃園市後天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:30%\n風速:26公里/時"
elif(text=="臺中市後天天氣"):
reply_text ="陣雨\n降雨機率:86%\n濕度:76%\n風速:25公里/時"
elif(text=="台中市後天天氣"):
reply_text ="陣雨\n降雨機率:86%\n濕度:76%\n風速:25公里/時"
elif(text=="臺南市後天天氣"):
reply_text ="多雲\n降雨機率:20%\n濕度:25%\n風速:32公里/時"
elif(text=="台南市後天天氣"):
reply_text ="多雲\n降雨機率:20%\n濕度:25%\n風速:32公里/時"
elif(text=="高雄市後天天氣"):
reply_text ="陣雨\n降雨機率:75%\n濕度:77%\n風速:15公里/時"
elif(text=="基隆市後天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:20%\n風速:20公里/時"
elif(text=="新竹市後天天氣"):
reply_text ="陰\n降雨機率:50%\n濕度:55%\n風速:17公里/時"
elif(text=="嘉義市後天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="新竹縣後天天氣"):
reply_text ="雷陣雨\n降雨機率:96%\n濕度:86%\n風速:47公里/時"
elif(text=="苗栗縣後天天氣"):
reply_text ="多雲\n降雨機率:20%\n濕度:25%\n風速:32公里/時"
elif(text=="彰化縣後天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="南投縣後天天氣"):
reply_text ="陣雨\n降雨機率:86%\n濕度:76%\n風速:25公里/時"
elif(text=="雲林縣後天天氣"):
reply_text ="小雨\n降雨機率:70%\n濕度:75%\n風速:15公里/時"
elif(text=="嘉義縣後天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:20%\n風速:20公里/時"
elif(text=="屏東縣後天天氣"):
reply_text ="大雨\n降雨機率:100%\n濕度:90%\n風速:41公里/時"
elif(text=="宜蘭縣後天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="花蓮縣後天天氣"):
reply_text ="中雨\n降雨機率:92%\n濕度:88%\n風速:17公里/時"
elif(text=="臺東縣後天天氣"):
reply_text ="多雲\n降雨機率:0%\n濕度:16%\n風速:26公里/時"
elif(text=="台東縣後天天氣"):
reply_text ="多雲\n降雨機率:0%\n濕度:16%\n風速:26公里/時"
elif(text=="澎湖縣後天天氣"):
reply_text ="雷陣雨\n降雨機率:98%\n濕度:80%\n風速:12公里/時"
elif(text=="金門縣後天天氣"):
reply_text ="晴\n降雨機率:0%\n濕度:16%\n風速:45公里/時"
elif(text=="連江縣後天天氣"):
reply_text ="多雲\n降雨機率:15%\n濕度:28%\n風速:29公里/時"
elif(text=="今天臺北市帶傘"):
reply_text ="今天臺北市降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="今天台北市帶傘"):
reply_text ="今天台北市降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="今天新北市帶傘"):
reply_text ="今天新北市降雨機率為20%\n外出時不必攜帶雨具"
elif(text=="今天桃園市帶傘"):
reply_text ="今天桃園市降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="今天臺中市帶傘"):
reply_text ="今天臺中市降雨機率為30%\n外出時不必攜帶雨具"
elif(text=="今天台中市帶傘"):
reply_text ="今天台中市降雨機率為30%\n外出時不必攜帶雨具"
elif(text=="今天臺南市帶傘"):
reply_text ="今天臺南市降雨機率為60%\n建議您外出時要攜帶雨具"
elif(text=="今天台南市帶傘"):
reply_text ="今天台南市降雨機率為60%\n建議您外出時要攜帶雨具"
elif(text=="今天高雄市帶傘"):
reply_text ="今天高雄市降雨機率為100%\n建議您外出時要攜帶雨具"
elif(text=="今天基隆市帶傘"):
reply_text ="今天基隆市降雨機率為70%\n建議您外出時要攜帶雨具"
elif(text=="今天新竹市帶傘"):
reply_text ="今天新竹市降雨機率為100%\n建議您外出時要攜帶雨具"
elif(text=="今天嘉義市帶傘"):
reply_text ="今天嘉義市降雨機率為100%\n建議您外出時要攜帶雨具"
elif(text=="今天新竹縣帶傘"):
reply_text ="今天新竹縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="今天苗栗縣帶傘"):
reply_text ="今天苗栗縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="今天彰化縣帶傘"):
reply_text ="今天彰化縣降雨機率為50%\n建議您外出時要攜帶雨具"
elif(text=="今天南投縣帶傘"):
reply_text ="今天南投縣降雨機率為77%\n建議您外出時要攜帶雨具"
elif(text=="今天雲林縣帶傘"):
reply_text ="今天雲林縣降雨機率為98%\nn建議您外出時要攜帶雨具"
elif(text=="今天嘉義縣帶傘"):
reply_text ="今天嘉義縣降雨機率為85%\n建議您外出時要攜帶雨具"
elif(text=="今天屏東縣帶傘"):
reply_text ="今天屏東縣降雨機率為92%\n建議您外出時要攜帶雨具"
elif(text=="今天宜蘭縣帶傘"):
reply_text ="今天宜蘭縣降雨機率為100%\n建議您外出時要攜帶雨具"
elif(text=="今天花蓮縣帶傘"):
reply_text ="今天花蓮縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="今天臺東縣帶傘"):
reply_text ="今天臺東縣降雨機率為15%\n外出時不必攜帶雨具"
elif(text=="今天台東縣帶傘"):
reply_text ="今天台東縣降雨機率為15%\n外出時不必攜帶雨具"
elif(text=="今天澎湖縣帶傘"):
reply_text ="今天澎湖縣降雨機率為34%\n外出時不必攜帶雨具"
elif(text=="今天金門縣帶傘"):
reply_text ="今天金門縣降雨機率:86%\n建議您外出時要攜帶雨具"
elif(text=="今天連江縣帶傘"):
reply_text ="今天連江縣降雨機率:96%\n建議您外出時要攜帶雨具"
elif(text=="明天臺北市帶傘"):
reply_text ="明天臺北市降雨機率為92%\n建議您外出時要攜帶雨具"
elif(text=="明天台北市帶傘"):
reply_text ="明天台北市降雨機率為92%\n建議您外出時要攜帶雨具"
elif(text=="明天新北市帶傘"):
reply_text ="明天新北市降雨機率為96%\n建議您外出時要攜帶雨具"
elif(text=="明天桃園市帶傘"):
reply_text ="明天桃園市降雨機率為30%\n外出時不必攜帶雨具"
elif(text=="明天臺中市帶傘"):
reply_text ="明天臺中市降雨機率為98%\n建議您外出時要攜帶雨具"
elif(text=="明天台中市帶傘"):
reply_text ="明天台中市降雨機率為98%\n建議您外出時要攜帶雨具"
elif(text=="明天臺南市帶傘"):
reply_text ="明天臺南市降雨機率為77%\n建議您外出時要攜帶雨具"
elif(text=="明天台南市帶傘"):
reply_text ="明天台南市降雨機率為77%\n建議您外出時要攜帶雨具"
elif(text=="明天高雄市帶傘"):
reply_text ="明天高雄市降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="明天基隆市帶傘"):
reply_text ="明天基隆市降雨機率為20%\n外出時不必攜帶雨具"
elif(text=="明天新竹市帶傘"):
reply_text ="明天新竹市降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="明天嘉義市帶傘"):
reply_text ="明天嘉義市降雨機率為30%\n外出時不必攜帶雨具"
elif(text=="明天新竹縣帶傘"):
reply_text ="明天新竹縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="明天苗栗縣帶傘"):
reply_text ="明天苗栗縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="明天彰化縣帶傘"):
reply_text ="明天彰化縣降雨機率為100%\n建議您外出時要攜帶雨具"
elif(text=="明天南投縣帶傘"):
reply_text ="明天南投縣降雨機率為92%\n建議您外出時要攜帶雨具"
elif(text=="明天雲林縣帶傘"):
reply_text ="明天雲林縣降雨機率為75%\n建議您外出時要攜帶雨具"
elif(text=="明天嘉義縣帶傘"):
reply_text ="明天嘉義縣降雨機率20%\n外出時不必攜帶雨具"
elif(text=="明天屏東縣帶傘"):
reply_text ="明天屏東縣降雨機率為70%\n建議您外出時要攜帶雨具"
elif(text=="明天宜蘭縣帶傘"):
reply_text ="明天宜蘭縣降雨機率為96%\n建議您外出時要攜帶雨具"
elif(text=="明天花蓮縣帶傘"):
reply_text ="明天花蓮縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="明天臺東縣帶傘"):
reply_text ="明天臺東縣降雨機率為70%\n建議您外出時要攜帶雨具"
elif(text=="明天台東縣帶傘"):
reply_text ="明天台東縣降雨機率為70%\n建議您外出時要攜帶雨具"
elif(text=="明天澎湖縣帶傘"):
reply_text ="明天澎湖縣降雨機率:60%\n建議您外出時要攜帶雨具"
elif(text=="明天金門縣帶傘"):
reply_text ="明天金門縣降雨機率:70%\n建議您外出時要攜帶雨具"
elif(text=="明天連江縣帶傘"):
reply_text ="明天連江縣降雨機率:0%\n外出時不必攜帶雨具"
elif(text=="後天臺北市帶傘"):
reply_text ="後天臺北市降雨機率為15%\n外出時不必攜帶雨具"
elif(text=="後天台北市帶傘"):
reply_text ="後天台北市降雨機率為15%\n外出時不必攜帶雨具"
elif(text=="後天新北市帶傘"):
reply_text ="後天新北市降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="後天桃園市帶傘"):
reply_text ="後天桃園市降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="後天臺中市帶傘"):
reply_text ="後天臺中市降雨機率為86%\n建議您外出時要攜帶雨具"
elif(text=="後天台中市帶傘"):
reply_text ="後天台中市降雨機率為86%\n建議您外出時要攜帶雨具"
elif(text=="後天臺南市帶傘"):
reply_text ="後天臺南市降雨機率為20%\n外出時不必攜帶雨具"
elif(text=="後天台南市帶傘"):
reply_text ="後天台南市降雨機率為20%\n外出時不必攜帶雨具"
elif(text=="後天高雄市帶傘"):
reply_text ="後天高雄市降雨機率為75%\n建議您外出時要攜帶雨具"
elif(text=="後天基隆市帶傘"):
reply_text ="後天基隆市降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="後天新竹市帶傘"):
reply_text ="後天新竹市降雨機率為50%\n建議您外出時要攜帶雨具"
elif(text=="後天嘉義市帶傘"):
reply_text ="後天嘉義市降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="後天新竹縣帶傘"):
reply_text ="後天新竹縣降雨機率為96%\n建議您外出時要攜帶雨具"
elif(text=="後天苗栗縣帶傘"):
reply_text ="後天苗栗縣降雨機率為20%\n外出時不必攜帶雨具"
elif(text=="後天彰化縣帶傘"):
reply_text ="後天彰化縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="後天南投縣帶傘"):
reply_text ="後天南投縣降雨機率為86%\n建議您外出時要攜帶雨具"
elif(text=="後天雲林縣帶傘"):
reply_text ="後天雲林縣降雨機率為70%\n建議您外出時要攜帶雨具"
elif(text=="後天嘉義縣帶傘"):
reply_text ="後天嘉義縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="後天屏東縣帶傘"):
reply_text ="後天屏東縣降雨機率為100%\n建議您外出時要攜帶雨具"
elif(text=="後天宜蘭縣帶傘"):
reply_text ="後天宜蘭縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="後天花蓮縣帶傘"):
reply_text ="後天花蓮縣降雨機率為92%\n建議您外出時要攜帶雨具"
elif(text=="後天臺東縣帶傘"):
reply_text ="後天臺東縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="後天台東縣帶傘"):
reply_text ="後天台東縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="後天澎湖縣帶傘"):
reply_text ="後天澎湖縣降雨機率為98%\n建議您外出時要攜帶雨具"
elif(text=="後天金門縣帶傘"):
reply_text ="後天金門縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="後天連江縣帶傘"):
reply_text ="後天連江縣降雨機率為15%\n外出時不必攜帶雨具"
elif(text=="臺北市今天帶傘"):
reply_text ="臺北市今天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="台北市今天帶傘"):
reply_text ="台北市今天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="新北市今天帶傘"):
reply_text ="新北市今天降雨機率為20%\n外出時不必攜帶雨具"
elif(text=="桃園市今天帶傘"):
reply_text ="桃園市今天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="臺中市今天帶傘"):
reply_text ="臺中市今天降雨機率為30%\n外出時不必攜帶雨具"
elif(text=="台中市今天帶傘"):
reply_text ="台中市今天降雨機率為30%\n外出時不必攜帶雨具"
elif(text=="臺南市今天帶傘"):
reply_text ="臺南市今天降雨機率為60%\n建議您外出時要攜帶雨具"
elif(text=="台南市今天帶傘"):
reply_text ="台南市今天降雨機率為60%\n建議您外出時要攜帶雨具"
elif(text=="高雄市今天帶傘"):
reply_text ="高雄市今天降雨機率為100%\n建議您外出時要攜帶雨具"
elif(text=="基隆市今天帶傘"):
reply_text ="基隆市今天降雨機率為70%\n建議您外出時要攜帶雨具"
elif(text=="新竹市今天帶傘"):
reply_text ="新竹市今天降雨機率為100%\n建議您外出時要攜帶雨具"
elif(text=="嘉義市今天帶傘"):
reply_text ="嘉義市今天降雨機率為100%\n建議您外出時要攜帶雨具"
elif(text=="新竹縣今天帶傘"):
reply_text ="新竹縣今天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="苗栗縣今天帶傘"):
reply_text ="苗栗縣今天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="彰化縣今天帶傘"):
reply_text ="彰化縣今天降雨機率為50%\n建議您外出時要攜帶雨具"
elif(text=="南投縣今天帶傘"):
reply_text ="南投縣今天降雨機率為77%\n建議您外出時要攜帶雨具"
elif(text=="雲林縣今天帶傘"):
reply_text ="雲林縣今天降雨機率為98%\nn建議您外出時要攜帶雨具"
elif(text=="嘉義縣今天帶傘"):
reply_text ="嘉義縣今天降雨機率為85%\n建議您外出時要攜帶雨具"
elif(text=="屏東縣今天帶傘"):
reply_text ="屏東縣今天降雨機率為92%\n建議您外出時要攜帶雨具"
elif(text=="宜蘭縣今天帶傘"):
reply_text ="宜蘭縣今天降雨機率為100%\n建議您外出時要攜帶雨具"
elif(text=="花蓮縣今天帶傘"):
reply_text ="花蓮縣今天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="臺東縣今天帶傘"):
reply_text ="臺東縣今天降雨機率為15%\n外出時不必攜帶雨具"
elif(text=="台東縣今天帶傘"):
reply_text ="台東縣今天降雨機率為15%\n外出時不必攜帶雨具"
elif(text=="澎湖縣今天帶傘"):
reply_text ="澎湖縣今天降雨機率為34%\n外出時不必攜帶雨具"
elif(text=="金門縣今天帶傘"):
reply_text ="金門縣今天降雨機率:86%\n建議您外出時要攜帶雨具"
elif(text=="連江縣今天帶傘"):
reply_text ="連江縣今天降雨機率:96%\n建議您外出時要攜帶雨具"
elif(text=="臺北市明天帶傘"):
reply_text ="臺北市明天降雨機率為92%\n建議您外出時要攜帶雨具"
elif(text=="台北市明天帶傘"):
reply_text ="台北市明天降雨機率為92%\n建議您外出時要攜帶雨具"
elif(text=="新北市明天帶傘"):
reply_text ="新北市明天降雨機率為96%\n建議您外出時要攜帶雨具"
elif(text=="桃園市明天帶傘"):
reply_text ="桃園市明天降雨機率為30%\n外出時不必攜帶雨具"
elif(text=="臺中市明天帶傘"):
reply_text ="臺中市明天降雨機率為98%\n建議您外出時要攜帶雨具"
elif(text=="台中市明天帶傘"):
reply_text ="台中市明天降雨機率為98%\n建議您外出時要攜帶雨具"
elif(text=="臺南市明天帶傘"):
reply_text ="臺南市明天降雨機率為77%\n建議您外出時要攜帶雨具"
elif(text=="台南市明天帶傘"):
reply_text ="台南市明天降雨機率為77%\n建議您外出時要攜帶雨具"
elif(text=="高雄市明天帶傘"):
reply_text ="高雄市明天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="基隆市明天帶傘"):
reply_text ="基隆市明天降雨機率為20%\n外出時不必攜帶雨具"
elif(text=="新竹市明天帶傘"):
reply_text ="新竹市明天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="嘉義市明天帶傘"):
reply_text ="嘉義市明天降雨機率為30%\n外出時不必攜帶雨具"
elif(text=="新竹縣明天帶傘"):
reply_text ="新竹縣明天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="苗栗縣明天帶傘"):
reply_text ="苗栗縣明天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="彰化縣明天帶傘"):
reply_text ="彰化縣明天降雨機率為100%\n建議您外出時要攜帶雨具"
elif(text=="南投縣明天帶傘"):
reply_text ="南投縣明天降雨機率為92%\n建議您外出時要攜帶雨具"
elif(text=="雲林縣明天帶傘"):
reply_text ="雲林縣明天降雨機率為75%\n建議您外出時要攜帶雨具"
elif(text=="嘉義縣明天帶傘"):
reply_text ="嘉義縣明天降雨機率20%\n外出時不必攜帶雨具"
elif(text=="屏東縣明天帶傘"):
reply_text ="屏東縣明天降雨機率為70%\n建議您外出時要攜帶雨具"
elif(text=="宜蘭縣明天帶傘"):
reply_text ="宜蘭縣明天降雨機率為96%\n建議您外出時要攜帶雨具"
elif(text=="花蓮縣明天帶傘"):
reply_text ="明天花蓮縣降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="臺東縣明天帶傘"):
reply_text ="臺東縣明天降雨機率為70%\n建議您外出時要攜帶雨具"
elif(text=="台東縣明天帶傘"):
reply_text ="台東縣明天降雨機率為70%\n建議您外出時要攜帶雨具"
elif(text=="澎湖縣明天帶傘"):
reply_text ="澎湖縣明天降雨機率:60%\n建議您外出時要攜帶雨具"
elif(text=="金門縣明天帶傘"):
reply_text ="金門縣明天降雨機率:70%\n建議您外出時要攜帶雨具"
elif(text=="連江縣明天帶傘"):
reply_text ="連江縣明天降雨機率:0%\n外出時不必攜帶雨具"
elif(text=="臺北市後天帶傘"):
reply_text ="臺北市後天降雨機率為15%\n外出時不必攜帶雨具"
elif(text=="台北市後天帶傘"):
reply_text ="台北市後天降雨機率為15%\n外出時不必攜帶雨具"
elif(text=="新北市後天帶傘"):
reply_text ="新北市後天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="桃園市後天帶傘"):
reply_text ="桃園市後天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="臺中市後天帶傘"):
reply_text ="臺中市後天降雨機率為86%\n建議您外出時要攜帶雨具"
elif(text=="台中市後天帶傘"):
reply_text ="台中市後天降雨機率為86%\n建議您外出時要攜帶雨具"
elif(text=="臺南市後天帶傘"):
reply_text ="臺南市後天降雨機率為20%\n外出時不必攜帶雨具"
elif(text=="台南市後天帶傘"):
reply_text ="台南市後天降雨機率為20%\n外出時不必攜帶雨具"
elif(text=="高雄市後天帶傘"):
reply_text ="高雄市後天降雨機率為75%\n建議您外出時要攜帶雨具"
elif(text=="基隆市後天帶傘"):
reply_text ="基隆市後天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="新竹市後天帶傘"):
reply_text ="新竹市後天降雨機率為50%\n建議您外出時要攜帶雨具"
elif(text=="嘉義市後天帶傘"):
reply_text ="嘉義市後天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="新竹縣後天帶傘"):
reply_text ="新竹縣後天降雨機率為96%\n建議您外出時要攜帶雨具"
elif(text=="苗栗縣後天帶傘"):
reply_text ="苗栗縣後天降雨機率為20%\n外出時不必攜帶雨具"
elif(text=="彰化縣後天帶傘"):
reply_text ="彰化縣後天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="南投縣後天帶傘"):
reply_text ="南投縣後天降雨機率為86%\n建議您外出時要攜帶雨具"
elif(text=="雲林縣後天帶傘"):
reply_text ="雲林縣後天降雨機率為70%\n建議您外出時要攜帶雨具"
elif(text=="嘉義縣後天帶傘"):
reply_text ="嘉義縣後天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="屏東縣後天帶傘"):
reply_text ="屏東縣後天降雨機率為100%\n建議您外出時要攜帶雨具"
elif(text=="宜蘭縣後天帶傘"):
reply_text ="宜蘭縣後天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="花蓮縣後天帶傘"):
reply_text ="花蓮縣後天降雨機率為92%\n建議您外出時要攜帶雨具"
elif(text=="臺東縣後天帶傘"):
reply_text ="臺東縣後天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="台東縣後天帶傘"):
reply_text ="台東縣後天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="澎湖縣後天帶傘"):
reply_text ="澎湖縣後天降雨機率為98%\n建議您外出時要攜帶雨具"
elif(text=="金門縣後天帶傘"):
reply_text ="金門縣後天降雨機率為0%\n外出時不必攜帶雨具"
elif(text=="連江縣後天帶傘"):
reply_text ="連江縣後天降雨機率為15%\n外出時不必攜帶雨具"
elif(text=="今天臺北市空氣"):
reply_text ="今天臺北市空氣品質指標AQI為120\n對敏感族群不健康\n建議您外出時配戴口罩"
elif(text=="今天台北市空氣"):
reply_text ="今天台北市空氣品質指標AQI為120\n對敏感族群不健康\n建議您外出時配戴口罩"
elif(text=="今天新北市空氣"):
reply_text ="今天新北市空氣品質指標AQI為140\n對敏感族群不健康\n建議您外出時配戴口罩"
elif(text=="今天桃園市空氣"):
reply_text ="今天桃園市空氣品質指標AQI為20\n空氣品質良好"
elif(text=="今天臺中市空氣"):
reply_text ="今天臺中市空氣品質指標AQI為42\n空氣品質良好"
elif(text=="今天台中市空氣"):
reply_text ="今天台中市空氣品質指標AQI為42\n空氣品質良好"
elif(text=="今天臺南市空氣"):
reply_text ="今天臺南市空氣品質指標AQI為67\n空氣品質普通"
elif(text=="今天台南市空氣"):
reply_text ="今天台南市空氣品質指標AQI為67\n空氣品質普通"
elif(text=="今天高雄市空氣"):
reply_text ="今天高雄市空氣品質指標AQI為30\n空氣品質良好"
elif(text=="今天基隆市空氣"):
reply_text ="今天基隆市空氣品質指標AQI為90\n空氣品質普通"
elif(text=="今天新竹市空氣"):
reply_text ="今天新竹市空氣品質指標AQI為25\n空氣品質良好"
elif(text=="今天嘉義市空氣"):
reply_text ="今天嘉義市空氣品質指標AQI為210\n非常不健康\n建議您盡量少外出"
elif(text=="今天新竹縣空氣"):
reply_text ="今天新竹縣空氣品質指標AQI為20\n空氣品質良好"
elif(text=="今天苗栗縣空氣"):
reply_text ="今天苗栗縣空氣品質指標AQI為320\n對健康有危害影響\n建議您盡量少外出"
elif(text=="今天彰化縣空氣"):
reply_text ="今天彰化縣空氣品質指標AQI為400\n對健康有危害影響\n建議您盡量少外出"
elif(text=="今天南投縣空氣"):
reply_text ="今天南投縣空氣品質指標AQI為160\n對所有族群不健康\n建議您外出時配戴口罩"
elif(text=="今天雲林縣空氣"):
reply_text ="今天雲林縣空氣品質指標AQI為34\n空氣品質良好"
elif(text=="今天嘉義縣空氣"):
reply_text ="今天嘉義縣空氣品質指標AQI為125\n對敏感族群不健康\n建議您外出時配戴口罩"
elif(text=="今天屏東縣空氣"):
reply_text ="今天屏東縣空氣品質指標AQI為26\n空氣品質良好"
elif(text=="今天宜蘭縣空氣"):
reply_text ="今天宜蘭縣空氣品質指標AQI為140\n對敏感族群不健康\n建議您外出時配戴口罩"
elif(text=="今天花蓮縣空氣"):
reply_text ="今天花蓮縣空氣品質指標AQI為10\n空氣品質良好"
elif(text=="今天臺東縣空氣"):
| |
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Depth and Ego-Motion networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
SIMPLE = 'simple'
RESNET = 'resnet'
ARCHITECTURES = [SIMPLE, RESNET]
SCALE_TRANSLATION = 0.001
SCALE_ROTATION = 0.01
# Disparity (inverse depth) values range from 0.01 to 10. Note that effectively,
# this is undone if depth normalization is used, which scales the values to
# have a mean of 1.
DISP_SCALING = 10
MIN_DISP = 0.01
WEIGHT_DECAY_KEY = 'WEIGHT_DECAY'
EGOMOTION_VEC_SIZE = 6
def egomotion_net(image_stack, disp_bottleneck_stack, joint_encoder, seq_length,
weight_reg):
"""Predict ego-motion vectors from a stack of frames or embeddings.
Args:
image_stack: Input tensor with shape [B, h, w, seq_length * 3] in order.
disp_bottleneck_stack: Input tensor with shape [B, h_hidden, w_hidden,
seq_length * c_hidden] in order.
joint_encoder: Determines if the same encoder is used for computing the
bottleneck layer of both the egomotion and the depth prediction
network. If enabled, disp_bottleneck_stack is used as input, and the
encoding steps are skipped. If disabled, a separate encoder is defined
on image_stack.
seq_length: The sequence length used.
weight_reg: The amount of weight regularization.
Returns:
Egomotion vectors with shape [B, seq_length - 1, 6].
"""
num_egomotion_vecs = seq_length - 1
with tf.variable_scope('pose_exp_net') as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
normalizer_fn=None,
weights_regularizer=slim.l2_regularizer(weight_reg),
normalizer_params=None,
activation_fn=tf.nn.relu,
outputs_collections=end_points_collection):
if not joint_encoder:
# Define separate encoder. If sharing, we can skip the encoding step,
# as the bottleneck layer will already be passed as input.
cnv1 = slim.conv2d(image_stack, 16, [7, 7], stride=2, scope='cnv1')
cnv2 = slim.conv2d(cnv1, 32, [5, 5], stride=2, scope='cnv2')
cnv3 = slim.conv2d(cnv2, 64, [3, 3], stride=2, scope='cnv3')
cnv4 = slim.conv2d(cnv3, 128, [3, 3], stride=2, scope='cnv4')
cnv5 = slim.conv2d(cnv4, 256, [3, 3], stride=2, scope='cnv5')
with tf.variable_scope('pose'):
inputs = disp_bottleneck_stack if joint_encoder else cnv5
cnv6 = slim.conv2d(inputs, 256, [3, 3], stride=2, scope='cnv6')
cnv7 = slim.conv2d(cnv6, 256, [3, 3], stride=2, scope='cnv7')
pred_channels = EGOMOTION_VEC_SIZE * num_egomotion_vecs
egomotion_pred = slim.conv2d(cnv7, pred_channels, [1, 1], scope='pred',
stride=1, normalizer_fn=None,
activation_fn=None)
egomotion_avg = tf.reduce_mean(egomotion_pred, [1, 2])
egomotion_res = tf.reshape(
egomotion_avg, [-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE])
# Tinghui found that scaling by a small constant facilitates training.
egomotion_scaled = tf.concat([egomotion_res[:, 0:3] * SCALE_TRANSLATION,
egomotion_res[:, 3:6] * SCALE_ROTATION],
axis=1)
return egomotion_scaled
def objectmotion_net(image_stack, disp_bottleneck_stack, joint_encoder,
seq_length, weight_reg):
"""Predict object-motion vectors from a stack of frames or embeddings.
Args:
image_stack: Input tensor with shape [B, h, w, seq_length * 3] in order.
disp_bottleneck_stack: Input tensor with shape [B, h_hidden, w_hidden,
seq_length * c_hidden] in order.
joint_encoder: Determines if the same encoder is used for computing the
bottleneck layer of both the egomotion and the depth prediction
network. If enabled, disp_bottleneck_stack is used as input, and the
encoding steps are skipped. If disabled, a separate encoder is defined
on image_stack.
seq_length: The sequence length used.
weight_reg: The amount of weight regularization.
Returns:
Egomotion vectors with shape [B, seq_length - 1, 6].
"""
num_egomotion_vecs = seq_length - 1
with tf.variable_scope('pose_exp_net') as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
normalizer_fn=None,
weights_regularizer=slim.l2_regularizer(weight_reg),
normalizer_params=None,
activation_fn=tf.nn.relu,
outputs_collections=end_points_collection):
if not joint_encoder:
# Define separate encoder. If sharing, we can skip the encoding step,
# as the bottleneck layer will already be passed as input.
cnv1 = slim.conv2d(image_stack, 16, [7, 7], stride=2, scope='cnv1')
cnv2 = slim.conv2d(cnv1, 32, [5, 5], stride=2, scope='cnv2')
cnv3 = slim.conv2d(cnv2, 64, [3, 3], stride=2, scope='cnv3')
cnv4 = slim.conv2d(cnv3, 128, [3, 3], stride=2, scope='cnv4')
cnv5 = slim.conv2d(cnv4, 256, [3, 3], stride=2, scope='cnv5')
with tf.variable_scope('pose'):
inputs = disp_bottleneck_stack if joint_encoder else cnv5
cnv6 = slim.conv2d(inputs, 256, [3, 3], stride=2, scope='cnv6')
cnv7 = slim.conv2d(cnv6, 256, [3, 3], stride=2, scope='cnv7')
pred_channels = EGOMOTION_VEC_SIZE * num_egomotion_vecs
egomotion_pred = slim.conv2d(cnv7, pred_channels, [1, 1], scope='pred',
stride=1, normalizer_fn=None,
activation_fn=None)
egomotion_avg = tf.reduce_mean(egomotion_pred, [1, 2])
egomotion_res = tf.reshape(
egomotion_avg, [-1, num_egomotion_vecs, EGOMOTION_VEC_SIZE])
# Tinghui found that scaling by a small constant facilitates training.
egomotion_scaled = tf.concat([egomotion_res[:, 0:3] * SCALE_TRANSLATION,
egomotion_res[:, 3:6] * SCALE_ROTATION],
axis=1)
return egomotion_scaled
def disp_net(architecture, image, use_skip, weight_reg, is_training):
"""Defines an encoder-decoder architecture for depth prediction."""
if architecture not in ARCHITECTURES:
raise ValueError('Unknown architecture.')
encoder_selected = encoder(architecture)
decoder_selected = decoder(architecture)
# Encode image.
bottleneck, skip_connections = encoder_selected(image, weight_reg,
is_training)
# Decode to depth.
multiscale_disps_i = decoder_selected(target_image=image,
bottleneck=bottleneck,
weight_reg=weight_reg,
use_skip=use_skip,
skip_connections=skip_connections)
return multiscale_disps_i, bottleneck
def encoder(architecture):
return encoder_resnet if architecture == RESNET else encoder_simple
def decoder(architecture):
return decoder_resnet if architecture == RESNET else decoder_simple
def encoder_simple(target_image, weight_reg, is_training):
"""Defines the old encoding architecture."""
del is_training
with slim.arg_scope([slim.conv2d],
normalizer_fn=None,
normalizer_params=None,
weights_regularizer=slim.l2_regularizer(weight_reg),
activation_fn=tf.nn.relu):
# Define (joint) encoder.
cnv1 = slim.conv2d(target_image, 32, [7, 7], stride=2, scope='cnv1')
cnv1b = slim.conv2d(cnv1, 32, [7, 7], stride=1, scope='cnv1b')
cnv2 = slim.conv2d(cnv1b, 64, [5, 5], stride=2, scope='cnv2')
cnv2b = slim.conv2d(cnv2, 64, [5, 5], stride=1, scope='cnv2b')
cnv3 = slim.conv2d(cnv2b, 128, [3, 3], stride=2, scope='cnv3')
cnv3b = slim.conv2d(cnv3, 128, [3, 3], stride=1, scope='cnv3b')
cnv4 = slim.conv2d(cnv3b, 256, [3, 3], stride=2, scope='cnv4')
cnv4b = slim.conv2d(cnv4, 256, [3, 3], stride=1, scope='cnv4b')
cnv5 = slim.conv2d(cnv4b, 512, [3, 3], stride=2, scope='cnv5')
cnv5b = slim.conv2d(cnv5, 512, [3, 3], stride=1, scope='cnv5b')
cnv6 = slim.conv2d(cnv5b, 512, [3, 3], stride=2, scope='cnv6')
cnv6b = slim.conv2d(cnv6, 512, [3, 3], stride=1, scope='cnv6b')
cnv7 = slim.conv2d(cnv6b, 512, [3, 3], stride=2, scope='cnv7')
cnv7b = slim.conv2d(cnv7, 512, [3, 3], stride=1, scope='cnv7b')
return cnv7b, (cnv6b, cnv5b, cnv4b, cnv3b, cnv2b, cnv1b)
def decoder_simple(target_image, bottleneck, weight_reg, use_skip,
skip_connections):
"""Defines the old depth decoder architecture."""
h = target_image.get_shape()[1].value
w = target_image.get_shape()[2].value
(cnv6b, cnv5b, cnv4b, cnv3b, cnv2b, cnv1b) = skip_connections
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
normalizer_fn=None,
normalizer_params=None,
weights_regularizer=slim.l2_regularizer(weight_reg),
activation_fn=tf.nn.relu):
up7 = slim.conv2d_transpose(bottleneck, 512, [3, 3], stride=2,
scope='upcnv7')
up7 = _resize_like(up7, cnv6b)
if use_skip:
i7_in = tf.concat([up7, cnv6b], axis=3)
else:
i7_in = up7
icnv7 = slim.conv2d(i7_in, 512, [3, 3], stride=1, scope='icnv7')
up6 = slim.conv2d_transpose(icnv7, 512, [3, 3], stride=2, scope='upcnv6')
up6 = _resize_like(up6, cnv5b)
if use_skip:
i6_in = tf.concat([up6, cnv5b], axis=3)
else:
i6_in = up6
icnv6 = slim.conv2d(i6_in, 512, [3, 3], stride=1, scope='icnv6')
up5 = slim.conv2d_transpose(icnv6, 256, [3, 3], stride=2, scope='upcnv5')
up5 = _resize_like(up5, cnv4b)
if use_skip:
i5_in = tf.concat([up5, cnv4b], axis=3)
else:
i5_in = up5
icnv5 = slim.conv2d(i5_in, 256, [3, 3], stride=1, scope='icnv5')
up4 = slim.conv2d_transpose(icnv5, 128, [3, 3], stride=2, scope='upcnv4')
up4 = _resize_like(up4, cnv3b)
if use_skip:
i4_in = tf.concat([up4, cnv3b], axis=3)
else:
i4_in = up4
icnv4 = slim.conv2d(i4_in, 128, [3, 3], stride=1, scope='icnv4')
disp4 = (slim.conv2d(icnv4, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,
normalizer_fn=None, scope='disp4')
* DISP_SCALING + MIN_DISP)
disp4_up = tf.image.resize_bilinear(disp4, [np.int(h / 4), np.int(w / 4)],
align_corners=True)
up3 = slim.conv2d_transpose(icnv4, 64, [3, 3], stride=2, scope='upcnv3')
up3 = _resize_like(up3, cnv2b)
if use_skip:
i3_in = tf.concat([up3, cnv2b, disp4_up], axis=3)
else:
i3_in = tf.concat([up3, disp4_up])
icnv3 = slim.conv2d(i3_in, 64, [3, 3], stride=1, scope='icnv3')
disp3 = (slim.conv2d(icnv3, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,
normalizer_fn=None, scope='disp3')
* DISP_SCALING + MIN_DISP)
disp3_up = tf.image.resize_bilinear(disp3, [np.int(h / 2), np.int(w / 2)],
align_corners=True)
up2 = slim.conv2d_transpose(icnv3, 32, [3, 3], stride=2, scope='upcnv2')
up2 = _resize_like(up2, cnv1b)
if use_skip:
i2_in = tf.concat([up2, cnv1b, disp3_up], axis=3)
else:
i2_in = tf.concat([up2, disp3_up])
icnv2 = slim.conv2d(i2_in, 32, [3, 3], stride=1, scope='icnv2')
disp2 = (slim.conv2d(icnv2, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,
normalizer_fn=None, scope='disp2')
* DISP_SCALING + MIN_DISP)
disp2_up = tf.image.resize_bilinear(disp2, [h, w], align_corners=True)
up1 = slim.conv2d_transpose(icnv2, 16, [3, 3], stride=2, scope='upcnv1')
i1_in = tf.concat([up1, disp2_up], axis=3)
icnv1 = slim.conv2d(i1_in, 16, [3, 3], stride=1, scope='icnv1')
disp1 = (slim.conv2d(icnv1, 1, [3, 3], stride=1, activation_fn=tf.sigmoid,
normalizer_fn=None, scope='disp1')
* DISP_SCALING + MIN_DISP)
return [disp1, disp2, disp3, disp4]
def encoder_resnet(target_image, weight_reg, is_training):
"""Defines a ResNet18-based encoding architecture.
This implementation follows <NAME>'s implementation of ResNet18 on GitHub:
https://github.com/dalgu90/resnet-18-tensorflow
Args:
target_image: Input tensor with shape [B, h, w, 3] to encode.
weight_reg: Parameter ignored.
is_training: Whether the model is being trained or not.
| |
<reponame>chisuhua/coasm
from enum import Enum
from collections import OrderedDict
from collections import namedtuple
from jinja2 import Template
from ctypes import *
import pdb
MAX_SREG_NUM = 255
MAX_USER_SREG_NUM = 220
MAX_VREG_NUM = 255
def utilReplace(str):
str = str.replace('_', "'_'")
str = str.replace('3', "'3'")
str = str.replace('2', "'2'")
str = str.replace('6', "'6'")
str = str.replace('4', "'4'")
str = str.replace('0', "'0'")
str = str.replace('1', "'1'")
str = str.replace('8', "'8'")
return str
class SpecialREG(Enum):
SREG_VCC = 0xdf
SREG_TREG0 = 0xe0
SREG_TREG15 = 0xef
HWREG_EMSK = 0xf0
HWREG_SCC = 0xf1
HWREG_X0 = 0xf2
HWREG_MODE = 0xf3
HWREG_STATUS = 0xf4
HWREG_VCB = 0xf5
LTID = 0xfe
ISREG = 0xff
IVREG = 0xff
#SpecialRegAlias = {"vcc":SREG_VCC}
class Scope:
def getScopeName(self):
pass
def getEnclosingScope(self):
pass
def define(self, sym):
pass
def resolve(self, name): pass
class MemSpace:
MaxGlobalSize = 1000
MaxSharedSize = 1000
class KindEnum(Enum):
INVALID = 0
GLOBAL = 1
SHARED = 2
def __init__(self, string):
self.kind = MemSpace.KindEnum.INVALID
if string.startswith("."):
string = string[1:]
if string == "global":
self.kind = MemSpace.KindEnum.GLOBAL
self.name = "global"
elif string == "shared":
self.kind = MemSpace.KindEnum.SHARED
self.name = "shared"
else:
assert("dont't know the space {}".format(string))
def getMaxSize(self):
if self.name == "global":
return MemSpace.MaxGlobalSize
elif self.name == "shared":
return MemSpace.MaxSharedSize
class DataType:
class TypeEnum(Enum):
DT_INT8 = 0
DT_UINT8 = 1
DT_INT16 = 2
DT_UINT16 = 3
DT_INT32 = 4
DT_UINT32 = 5
DT_FP16 = 6
DT_BF16 = 7
DT_TF32 = 8
DT_FP32 = 9
DT_B16 = 10
DT_B32 = 11
DT_I16x2 = 12
DT_U16x2 = 13
DT_B16x2 = 14
DT_FP16x2 = 15
DT_BF16x2 = 16
DT_INT64 = 17
DT_UINT64 = 18
DT_B64 = 19
DT_MAX = 99
class TypeKind(Enum):
INT = 0
FLOAT = 0
Dtype = namedtuple('Dtype', ['dtype_enum', 'size', 'dtype_kind', 'signed'])
dtype= {}
dtype["i8"] = Dtype(TypeEnum.DT_INT8, 1, TypeKind.INT, True)
dtype["u8"] = Dtype(TypeEnum.DT_UINT8, 1, TypeKind.INT, False)
dtype["i16"] = Dtype(TypeEnum.DT_INT16, 2, TypeKind.INT, True)
dtype["u16"] = Dtype(TypeEnum.DT_UINT16, 2, TypeKind.INT, False)
dtype["i32"] = Dtype(TypeEnum.DT_INT32, 4, TypeKind.INT, True)
dtype["u32"] = Dtype(TypeEnum.DT_UINT32, 4, TypeKind.INT, False)
dtype["i64"] = Dtype(TypeEnum.DT_INT64, 8, TypeKind.INT, True)
dtype["u64"] = Dtype(TypeEnum.DT_UINT64, 8, TypeKind.INT, False)
dtype["f16"] = Dtype(TypeEnum.DT_FP16, 2, TypeKind.FLOAT, None)
dtype["bf16"] = Dtype(TypeEnum.DT_BF16, 2, TypeKind.FLOAT, None)
dtype["f32"] = Dtype(TypeEnum.DT_FP32, 4, TypeKind.FLOAT, None)
dtype["tf32"] = Dtype(TypeEnum.DT_TF32, 4, TypeKind.FLOAT, None)
dtype["b16"] = Dtype(TypeEnum.DT_B16, 2, TypeKind.INT, None)
dtype["b32"] = Dtype(TypeEnum.DT_B32, 4, TypeKind.INT, None)
dtype["b64"] = Dtype(TypeEnum.DT_B64, 8, TypeKind.INT, None)
dtype["i16x2"] = Dtype(TypeEnum.DT_I16x2, 4, TypeKind.INT, True)
dtype["u16x2"] = Dtype(TypeEnum.DT_U16x2, 4, TypeKind.INT, False)
dtype["b16x2"] = Dtype(TypeEnum.DT_B16x2, 4, TypeKind.INT, None)
dtype["fp16x2"] = Dtype(TypeEnum.DT_FP16x2, 4, TypeKind.FLOAT, None)
dtype["bf16x2"] = Dtype(TypeEnum.DT_BF16x2, 4, TypeKind.FLOAT, None)
dtype["ptr"] = Dtype(TypeEnum.DT_UINT64, 8, TypeKind.INT, None)
def __init__(self, string):
self.is_ptr = False
self.data_type = None
if string.endswith("*"):
string = string[:-1]
self.is_ptr = True
if string.startswith("."):
string = string[1:]
if string in DataType.dtype:
self.data_type = DataType.dtype[string]
else:
assert("Invalid Datatype " + string)
def getSize(self):
return self.data_type.size
def isSignedInt(self):
if self.data_type.dtype_kind is DataType.TypeKind.INT and self.data_type.signed is True:
return True
return False
def isUnSignedInt(self):
if self.data_type.dtype_kind is DataType.TypeKind.INT and self.data_type.signed is False:
return True
return False
def isInt(self):
if self.data_type.dtype_kind is DataType.TypeKind.INT:
return True
def isFloat(self):
if self.data_type.dtype_kind is DataType.TypeKind.FLOAT:
return True
def isPointer(self):
return self.is_ptr
class Symbol:
class TypeEnum(Enum):
INVALID = 0
LABEL = 1
KERNEL = 2
FUNC = 3
VAR = 4
REG = 5
class RegEnum(Enum):
REG_DST = 0
REG_SRC0 = 1
REG_SRC1 = 2
REG_SRC2 = 3
REG_SRC3 = 4
def __init__(self, name='', stype=TypeEnum.INVALID):
self.name = name
self.type = stype
self.scope = None
def __str__(self):
if self.type != Symbol.TypeEnum.INVALID:
return '<'+self.name+':'+self.type.name+'>'
else:
return self.name
class BaseScope(Scope):
def __init__(self, scope: Scope):
self.enclosingScope = scope
self.symbols = {}
def resolve(self, name):
s = self.symbols.get(name)
if s is not None:
return s
if self.enclosingScope is not None:
return self.enclosingScope.resolve(name)
return None
def define(self, sym: Symbol):
self.symbols[sym.name] = sym
def getEnclosingScope(self):
return self.enclosingScope
def __str__(self):
buf = self.getScopeName() + ' : ' + list(self.symbols.keys()).__str__()
return buf
class GlobalScope(BaseScope):
def __init__(self, scope):
super().__init__(scope)
def getScopeName(self):
return 'globals'
class LocalScope(BaseScope):
def __init__(self, parent):
super().__init__(parent)
def getScopeName(self):
return 'locals'
class FunctionSymbol(Symbol, Scope):
def __init__(self, name='', stype=Symbol.TypeEnum.KERNEL, scope=None):
super().__init__(name, stype)
self.enclosingScope = scope
self.arguments = {}
def resolve(self, name):
s = self.arguments.get(name)
if s is not None:
return s
if self.enclosingScope is not None:
return self.enclosingScope.resolve(name)
return None
def define(self, sym: Symbol):
self.arguments[sym.name] = sym
sym.scope = self
def getEnclosingScope(self):
return self.enclosingScope
def getScopeName(self):
return self.name
def __str__(self):
buf = "function "
buf += super().__str__()
buf += " : "
for par in self.arguments.values():
buf += par.__str__() + ';'
return buf
class VariableSymbol(Symbol):
class RelocType(Enum):
Normal = 0
RelBranch = 1
SavePC = 2
PCRel32 = 3
PCRel32Lo = 4
PCRel32Hi = 5
Rel32Lo = 6
Rel32Hi = 7
def __init__(self, name):
super().__init__(name, Symbol.TypeEnum.VAR)
func_name = ""
class Reg(VariableSymbol):
class RegType(Enum):
Scalar = 0
Vector = 1
def __init__(self, reg_str):
self.is_neg = False
self.reg_name = None
self.idx = None
self.end_idx = None
start_pos = 0
print("INFO: reg str:" + reg_str)
if reg_str[0] == '-' or reg_str[0] == '!':
self.is_neg = True
start_pos = 1
reg_str = reg_str[start_pos:]
super().__init__(reg_str)
idx_pos = reg_str.find("[")
if idx_pos > 0:
colon_pos = reg_str.find(":")
end_idx_pos = reg_str.find("]")
self.reg_name = reg_str[0:idx_pos]
self.idx = int(reg_str[idx_pos+1:colon_pos])
self.end_idx = int(reg_str[colon_pos+1:end_idx_pos])
else:
# here we assume reg name is s|v or s|vreg
if reg_str[1].isdigit():
idx_pos = 1
elif reg_str[4].isdigit():
idx_pos = 4
else:
assert("Invalid Reg str")
self.reg_name = reg_str[0:idx_pos]
self.idx = int(reg_str[idx_pos:])
self.end_idx = self.idx
if reg_str[0] == "s":
self.rtype = Reg.RegType.Scalar
elif reg_str[0] == "v":
self.rtype = Reg.RegType.Vector
else:
assert("invalid reg name")
self.alias = None
self.reloc_type = VariableSymbol.RelocType.Normal
def __str__(self):
if self.end_idx is not None:
return "{}[{}:{}]".format( "sreg" if self.rtype is Reg.RegType.Scalar else "vreg", self.idx, self.end_idx)
else:
return "{}{}".format( "sreg" if self.rtype is Reg.RegType.Scalar else "vreg", self.idx)
class LabelSymbol(Symbol):
def __init__(self, name):
super().__init__(name, Symbol.TypeEnum.LABEL)
self.instr = None
def addInstr(self, instr):
self.instr = instr
class KernelSymbol(Symbol):
def __init__(self, name):
super().__init__(name, Symbol.TypeEnum.KERNEL)
class ArgSymbol(Symbol):
class Kind(Enum):
GLOBAL_PTR = 0
GLOBAL_VALUE = 1
SREG_PTR = 2
SREG_VALUE = 3
HIDDEN_GM_BASE = 4 # base ptr for global data
HIDDEN_KM_BASE = 5 # base ptr for kernel args in global memory
HIDDEN_PM_BASE = 6 # base ptr for stack
HIDDEN_PM_SIZE = 7 # stack size per thread in bytes
INVALID_ARG_VALUE_KIND = 0xff
def __init__(self, name):
super().__init__(name)
self.offset = 0
self.size = 0
self.kind = ArgSymbol.Kind.SREG_VALUE
class InstrField:
class TypeEnum(Enum):
UINT = 0
INT = 1
STRUCT = 2
UNION = 3
def __init__(self, name="", type=TypeEnum.UINT):
self.name = name
self.msb = 0
self.lsb = 0
self.type = TypeEnum.UINT
self.child = []
class VisitType(Enum):
GRAMMAR_INSTR_FMT = 1
GRAMMAR_INSTR_DEF = 2
GET_INSTR_FMT_LIST = 3
GEN_INSTR_FMT_FIELD = 4
GEN_INSTR_OPCODE_DEF = 5
GEN_INSTR_OP_ENUM_DEF = 6
GEN_INSTR_DEF = 7
class Instr(Structure):
_pack_ = 1
FmtEnc = namedtuple('FmtEnc', ['bit_start', 'width', 'value'])
fmt_enc = {}
fmt_enc["VOP2"] = FmtEnc(31, 1, 0b0)
fmt_enc["SOP2"] = FmtEnc(31, 2, 0b11)
fmt_enc["SOPK"] = FmtEnc(31, 4, 0b1001)
fmt_enc["FLAT"] = FmtEnc(31, 4, 0b1011)
fmt_enc["SMRD"] = FmtEnc(31, 5, 0b10001)
fmt_enc["DS"] = FmtEnc(31, 6, 0b100000)
fmt_enc["MUBUF"] =FmtEnc(31, 6, 0b100001)
fmt_enc["VOP3A"] =FmtEnc(31, 6, 0b101001)
fmt_enc["VOP3B"] =FmtEnc(31, 6, 0b101011)
fmt_enc["VOP1"] = FmtEnc(31, 7, 0b1010001)
fmt_enc["VOPC"] = FmtEnc(31, 7, 0b1010000)
fmt_enc["SOPC"] = FmtEnc(31, 9, 0b101010000)
fmt_enc["SOPP"] = FmtEnc(31, 9, 0b101010001)
fmt_enc["SOP1"] = FmtEnc(31, 9, 0b101010010)
SpecialReg = namedtuple('SpecialReg', ['name', 'reg'])
special_reg = {}
special_reg['m0'] = SpecialReg("RegisterM0", Reg('s124'))
special_reg['vcc'] = SpecialReg("RegisterVcc", Reg('s106'))
special_reg['vccz'] = SpecialReg("RegisterVcc", Reg('s251'))
special_reg['exec'] = SpecialReg("RegisterVcc", Reg('s126'))
special_reg['execz'] = SpecialReg("RegisterVcc", Reg('s252'))
special_reg['scc'] = SpecialReg("RegisterVcc", Reg('s253'))
def __init__(self, name=''):
#super().__init__(name)
self.opcode = None
#self.has_imm = False
#self.imm = None
self.lop_imm = None
self.stride_imm = 0
self.branch_cond = 0
self.pos = 0
self.flags = {}
self.encoded_inst = 0
self.is_encoded = False
self.fmt_dst = 0
self.fmt_src = 0
self.stride_pos = 0
self.regs = []
self.operands = []
self.special_operands = {}
self.dst_reg = None
self.name = name # in fact it is line number
self.instr_str = None
self.func_name = None
self.instr_size = sizeof(self)
#if hasattr(self, "field"):
# for n,v in self.field.items():
# setattr(self, n, 0)
print("[instr create info] create instr with op {}".format(name))
if name == "":
assert("Warn:create instr without correct op enum")
else:
self.enc = Instr.fmt_enc[self.getFmtName()].value
self.op = self.OpcodeEnum[name.upper()].value
def addOperand(self, operand):
if (isinstance(operand, int)):
self.setImm(operand)
self.operands.append(operand)
def addReg(self, reg: Reg):
if self.dst_reg is None:
self.dst_reg = reg
self.regs.append(reg)
self.addOperand(reg)
# | |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Name: segment.py
# Purpose: Division of stream.Part into segments for individual handling
# Authors: <NAME>
#
# Copyright: Copyright © 2012 <NAME> and the music21 Project
# License: BSD, see license.txt
# ------------------------------------------------------------------------------
'''
Inner classes and methods for transcribing musical segments into braille.
This module was made in consultation with the manual "Introduction to Braille
Music Transcription, Second Edition" by <NAME>, 2005. It is
available from the Library of Congress `here <http://www.loc.gov/nls/music/>`_,
and will henceforth be referred to as BMTM.
'''
import collections
import copy
import enum
import unittest
from music21 import bar
from music21 import chord
from music21 import clef
from music21 import dynamics
from music21 import exceptions21
from music21 import environment
from music21 import expressions
from music21 import key
from music21 import layout
from music21 import meter
from music21 import note
from music21 import spanner
from music21 import stream
from music21 import tempo
from music21.prebase import ProtoM21Object
from music21.braille import basic
from music21.braille import lookup
from music21.braille import noteGrouping as ngMod
from music21.braille import text
from music21.braille.objects import BrailleTranscriptionHelper
from music21.common.numberTools import opFrac
symbols = lookup.symbols
environRules = environment.Environment('segment.py')
# ------------------------------------------------------------------------------
class BrailleSegmentException(exceptions21.Music21Exception):
pass
class Affinity(enum.IntEnum):
_LOWEST = -1
SIGNATURE = 3
TTEXT = 4
MMARK = 5
LONG_TEXTEXPR = 6
INACCORD = 7
SPLIT1_NOTEGROUP = 8
NOTEGROUP = 9
SPLIT2_NOTEGROUP = 10
# Class Sort Order -- differs for Braille than for general music21
CSO_NOTE = 10
CSO_REST = 10
CSO_CHORD = 10
CSO_DYNAMIC = 9
CSO_CLEF = 7
CSO_BARLINE = 0
CSO_KEYSIG = 1
CSO_TIMESIG = 2
CSO_TTEXT = 3
CSO_MMARK = 4
CSO_VOICE = 10
# (music21Object, affinity code, class sort order)
affinityCodes = [(note.Note, Affinity.NOTEGROUP, CSO_NOTE),
(note.Rest, Affinity.NOTEGROUP, CSO_REST),
(chord.Chord, Affinity.NOTEGROUP, CSO_CHORD),
(dynamics.Dynamic, Affinity.NOTEGROUP, CSO_DYNAMIC),
(clef.Clef, Affinity.NOTEGROUP, CSO_CLEF),
(bar.Barline, Affinity.SPLIT2_NOTEGROUP, CSO_BARLINE),
(key.KeySignature, Affinity.SIGNATURE, CSO_KEYSIG),
(meter.TimeSignature, Affinity.SIGNATURE, CSO_TIMESIG),
(tempo.TempoText, Affinity.TTEXT, CSO_TTEXT),
(tempo.MetronomeMark, Affinity.MMARK, CSO_MMARK),
(stream.Voice, Affinity.INACCORD, CSO_VOICE)]
affinityNames = {Affinity.SIGNATURE: 'Signature Grouping',
Affinity.TTEXT: 'Tempo Text Grouping',
Affinity.MMARK: 'Metronome Mark Grouping',
Affinity.LONG_TEXTEXPR: 'Long Text Expression Grouping',
Affinity.INACCORD: 'Inaccord Grouping',
Affinity.NOTEGROUP: 'Note Grouping',
Affinity.SPLIT1_NOTEGROUP: 'Split Note Grouping A',
Affinity.SPLIT2_NOTEGROUP: 'Split Note Grouping B',
}
excludeFromBrailleElements = [spanner.Slur,
layout.SystemLayout,
layout.PageLayout,
layout.StaffLayout]
# Uncomment when Python 3.8 is the minimum version
# from typing import TypedDict, Optional
# class GroupingGlobals(TypedDict):
# keySignature: Optional[key.KeySignature]
# timeSignature: Optional[meter.TimeSignature]
# GROUPING_GLOBALS: GroupingGlobals = {...}
GROUPING_GLOBALS = {
'keySignature': None, # will be key.KeySignature(0) on first call
'timeSignature': None, # will be meter.TimeSignature('4/4') on first call
}
GROUPING_DESC_CHORDS = True
GROUPING_SHOW_CLEFS = False
GROUPING_UPPERFIRST_NOTEFINGERING = True
GROUPING_WITHHYPHEN = False
GROUPING_NUMREPEATS = 0
def setGroupingGlobals():
'''
sets defaults for grouping globals. Called first time anything
in Braille is run, but saves creating two expensive objects if never run
'''
if GROUPING_GLOBALS['keySignature'] is None:
# remove noinspection when Python 3.8 is the minimum
# noinspection PyTypeChecker
GROUPING_GLOBALS['keySignature'] = key.KeySignature(0)
if GROUPING_GLOBALS['timeSignature'] is None:
# remove noinspection when Python 3.8 is the minimum
# noinspection PyTypeChecker
GROUPING_GLOBALS['timeSignature'] = meter.TimeSignature('4/4')
# defaults for BrailleSegments
SEGMENT_CANCEL_OUTGOINGKEYSIG = True
SEGMENT_DUMMYRESTLENGTH = None
SEGMENT_LINELENGTH = 40
SEGMENT_SHOWFIRSTMEASURENUMBER = True
SEGMENT_SHOWHAND = None # override with None, 'left', or 'right'
SEGMENT_SHOWHEADING = True
SEGMENT_SUPPRESSOCTAVEMARKS = False
SEGMENT_ENDHYPHEN = False
SEGMENT_SLURLONGPHRASEWITHBRACKETS = True
SEGMENT_SHOWSHORTSLURSANDTIESTOGETHER = False
SEGMENT_SHOWLONGSLURSANDTIESTOGETHER = False
SEGMENT_MAXNOTESFORSHORTSLUR = 4
MAX_ELEMENTS_IN_SEGMENT = 48 # 8 measures of 6 notes, etc. each
_ThreeDigitNumber = collections.namedtuple('_ThreeDigitNumber', 'hundreds tens ones')
SegmentKey = collections.namedtuple('SegmentKey', 'measure ordinal affinity hand')
SegmentKey.__new__.__defaults__ = (0, 0, None, None)
# ------------------------------------------------------------------------------
class BrailleElementGrouping(ProtoM21Object):
_DOC_ATTR = {
'keySignature': 'The last :class:`~music21.key.KeySignature` preceding the grouping.',
'timeSignature': 'The last :class:`~music21.meter.TimeSignature` preceding the grouping.',
'descendingChords': '''True if a :class:`~music21.chord.Chord` should be spelled
from highest to lowest pitch
in braille, False if the opposite is the case.''',
'showClefSigns': '''If True, clef signs are shown in braille.
Representation of music in braille is not
dependent upon clefs and staves, so the clef signs would be displayed
for referential or historical purposes.''',
# 'upperFirstInNoteFingering' : 'No documentation.',
'withHyphen': 'If True, this grouping will end with a music hyphen.',
'numRepeats': 'The number of times this grouping is repeated.'
}
def __init__(self, *args):
'''
A BrailleElementGrouping is a superclass of list of objects which should be displayed
without a space in braille.
>>> from music21.braille import segment
>>> bg = segment.BrailleElementGrouping()
>>> bg.append(note.Note('C4'))
>>> bg.append(note.Note('D4'))
>>> bg.append(note.Rest())
>>> bg.append(note.Note('F4'))
>>> bg
<music21.braille.segment.BrailleElementGrouping [<music21.note.Note C>,
<music21.note.Note D>, <music21.note.Rest rest>, <music21.note.Note F>]>
>>> print(bg)
<music21.note.Note C>
<music21.note.Note D>
<music21.note.Rest rest>
<music21.note.Note F>
These are the defaults and they are shared across all objects...
>>> bg.keySignature
<music21.key.KeySignature of no sharps or flats>
>>> bg.timeSignature
<music21.meter.TimeSignature 4/4>
>>> bg.descendingChords
True
>>> bg.showClefSigns
False
>>> bg.upperFirstInNoteFingering
True
>>> bg.withHyphen
False
>>> bg.numRepeats
0
'''
super().__init__()
self.internalList = list(*args)
setGroupingGlobals()
self.keySignature = GROUPING_GLOBALS['keySignature']
self.timeSignature = GROUPING_GLOBALS['timeSignature']
self.descendingChords = GROUPING_DESC_CHORDS
self.showClefSigns = GROUPING_SHOW_CLEFS
self.upperFirstInNoteFingering = GROUPING_UPPERFIRST_NOTEFINGERING
self.withHyphen = GROUPING_WITHHYPHEN # False
self.numRepeats = GROUPING_NUMREPEATS
def __getitem__(self, item):
return self.internalList[item]
def __setitem__(self, pos, item):
self.internalList[pos] = item
def __len__(self):
return len(self.internalList)
def __getattr__(self, attr):
if attr == 'internalList':
raise AttributeError('internalList not defined yet')
return getattr(self.internalList, attr)
def __str__(self):
'''
Return an unicode braille representation
of each object in the BrailleElementGrouping.
'''
allObjects = []
for obj in self:
if isinstance(obj, stream.Voice):
for obj2 in obj:
try:
allObjects.append('\n'.join(obj2.editorial.brailleEnglish))
except (AttributeError, TypeError):
allObjects.append(str(obj2))
else:
try:
allObjects.append('\n'.join(obj.editorial.brailleEnglish))
except (AttributeError, TypeError):
allObjects.append(str(obj))
if self.numRepeats > 0:
allObjects.append(f'** Grouping x {self.numRepeats + 1} **')
if self.withHyphen is True:
allObjects.append(f'music hyphen {lookup.symbols["music_hyphen"]}')
out = '\n'.join(allObjects)
return out
def _reprInternal(self):
return repr(self.internalList)
class BrailleSegment(text.BrailleText):
_DOC_ATTR = {
'cancelOutgoingKeySig': '''If True, the previous key signature should be
cancelled immediately before a new key signature is encountered.''',
'dummyRestLength': '''For a given positive integer n, adds n "dummy rests"
near the beginning of a segment. Designed for test purposes, as they
are used to demonstrate measure division at the end of braille lines.''',
'lineLength': '''The maximum amount of braille characters that should be
present in a line. The standard is 40 characters.''',
'showFirstMeasureNumber': '''If True, then a measure number is shown
following the heading (if applicable) and preceding the music.''',
'showHand': '''If set to "right" or "left", shows the corresponding
hand sign at the beginning of the first line.''',
'showHeading': '''If True, then a braille heading is displayed.
See :meth:`~music21.braille.basic.transcribeHeading`
for more details on headings.''',
'suppressOctaveMarks': '''If True, then all octave marks are suppressed.
Designed for test purposes, as octave marks were not presented
until Chapter 7 of BMTM.''',
'endHyphen': '''If True, then the last
:class:`~music21.braille.segment.BrailleElementGrouping` of this
segment will be followed by a music hyphen.
The last grouping is incomplete, because a segment
break occurred in the middle of a measure.''',
'beginsMidMeasure': '''If True, then the initial measure number of this
segment should be followed by a dot. This segment
is starting in the middle of a measure.'''
}
def __init__(self):
'''
A segment is "a group of measures occupying more than one braille line."
Music is divided into segments so as to "present the music to the reader
in a meaningful manner and to give him convenient reference points to
use in memorization" (BMTM, 71).
>>> brailleSeg = braille.segment.BrailleSegment()
>>> brailleSeg.cancelOutgoingKeySig
True
>>> brailleSeg.dummyRestLength
>>> brailleSeg.lineLength
40
>>> brailleSeg.showFirstMeasureNumber
True
Possible showHand values are None, 'right', 'left':
>>> brailleSeg.showHand is None
True
>>> brailleSeg.showHeading
True
>>> brailleSeg.suppressOctaveMarks
False
>>> brailleSeg.endHyphen
False
>>> brailleSeg.beginsMidMeasure
False
A BrailleSegment is a type of defaultdict that returns a BrailleElementGrouping
when a key is missing.
>>> len(brailleSeg.keys())
0
>>> beg = brailleSeg[braille.segment.SegmentKey(4, 1, 9)]
>>> type(beg) is braille.segment.BrailleElementGrouping
True
Of course, creating random keys like this will have consequences:
>>> print(str(brailleSeg))
---begin segment---
<music21.braille.segment BrailleSegment>
Measure 4, Note Grouping 2:
<BLANKLINE>
===
---end segment---
'''
super().__init__(lineLength=SEGMENT_LINELENGTH)
self._groupingDict = {}
self.groupingKeysToProcess = None
self.currentGroupingKey = None
self.lastNote = None
self.previousGroupingKey = None
self.cancelOutgoingKeySig = SEGMENT_CANCEL_OUTGOINGKEYSIG
self.dummyRestLength = SEGMENT_DUMMYRESTLENGTH
self.showFirstMeasureNumber = SEGMENT_SHOWFIRSTMEASURENUMBER
self.showHand = SEGMENT_SHOWHAND
self.showHeading = SEGMENT_SHOWHEADING
self.suppressOctaveMarks = SEGMENT_SUPPRESSOCTAVEMARKS
self.endHyphen = SEGMENT_ENDHYPHEN
self.beginsMidMeasure = False
def __setitem__(self, item, value):
self._groupingDict[item] = value
def __getitem__(self, item):
if item not in self._groupingDict:
self._groupingDict[item] = BrailleElementGrouping()
return self._groupingDict[item]
def __delitem__(self, item):
if item not in self.__dict__:
del self._groupingDict[item]
else:
return ValueError(f'No item {item!r} in Segment')
def __getattr__(self, item):
return getattr(self._groupingDict, item)
def __contains__(self, item):
return item in self._groupingDict
def __iter__(self):
return iter(self._groupingDict)
def __len__(self):
return len(self._groupingDict)
@property
def brailleText(self):
'''
Returns the string from the BrailleText object
'''
return text.BrailleText.__str__(self)
def __str__(self):
name = '<music21.braille.segment BrailleSegment>'
allItems = sorted(self.items())
allKeys = []
allGroupings = []
# noinspection PyArgumentList
| |
65680: # {U_Prelude.Show.{Prec instance of Prelude.Classes.Ord_lam3}1}
return _idris_Prelude_46_Show_46__123_Prec_32_instance_32_of_32_Prelude_46_Classes_46_Ord_95_lam3_125_(
arg0
)
elif fn0[0] == 65681: # {U_Prelude.Show.{Prec instance of Prelude.Classes.Ord_lam4}1}
P_c0 = fn0[1]
return _idris_Prelude_46_Show_46__123_Prec_32_instance_32_of_32_Prelude_46_Classes_46_Ord_95_lam4_125_(
P_c0, arg0
)
else: # {U_Prelude.Show.{Prec instance of Prelude.Classes.Ord_lam5}1}
return _idris_Prelude_46_Show_46__123_Prec_32_instance_32_of_32_Prelude_46_Classes_46_Ord_95_lam5_125_(
arg0
)
else:
if fn0[0] < 65686:
if fn0[0] == 65683: # {U_Prelude.Show.{case block in showLitChar at ./Prelude/Show.idr:126:27_lam0}1}
P_c0 = fn0[1]
return _idris_Prelude_46_Show_46__123_case_32_block_32_in_32_showLitChar_32_at_32__46__47_Prelude_47_Show_46_idr_58_126_58_27_95_lam0_125_(
P_c0, arg0
)
elif fn0[0] == 65684: # {U_Prelude.Show.{primNumShow0}1}
return _idris_Prelude_46_Show_46__123_primNumShow0_125_(arg0)
else: # {U_Prelude.Show.{showLitChar0}1}
return _idris_Prelude_46_Show_46__123_showLitChar0_125_(arg0)
else:
if fn0[0] < 65688:
if fn0[0] == 65686: # {U_Prelude.Show.{showLitChar10}1}
P_c0 = fn0[1]
return _idris_Prelude_46_Show_46__123_showLitChar10_125_(P_c0, arg0)
else: # {U_Prelude.Show.{showLitChar1}1}
return _idris_Prelude_46_Show_46__123_showLitChar1_125_(arg0)
else:
if fn0[0] == 65688: # {U_Prelude.Show.{showLitChar2}1}
return _idris_Prelude_46_Show_46__123_showLitChar2_125_(arg0)
else: # {U_Prelude.Show.{showLitChar3}1}
return _idris_Prelude_46_Show_46__123_showLitChar3_125_(arg0)
else:
if fn0[0] < 65716:
if fn0[0] < 65703:
if fn0[0] < 65696:
if fn0[0] < 65693:
if fn0[0] == 65690: # {U_Prelude.Show.{showLitChar4}1}
return _idris_Prelude_46_Show_46__123_showLitChar4_125_(arg0)
elif fn0[0] == 65691: # {U_Prelude.Show.{showLitChar5}1}
return _idris_Prelude_46_Show_46__123_showLitChar5_125_(arg0)
else: # {U_Prelude.Show.{showLitChar6}1}
return _idris_Prelude_46_Show_46__123_showLitChar6_125_(arg0)
else:
if fn0[0] == 65693: # {U_Prelude.Show.{showLitChar7}1}
return _idris_Prelude_46_Show_46__123_showLitChar7_125_(arg0)
elif fn0[0] == 65694: # {U_Prelude.Show.{showLitChar8}1}
return _idris_Prelude_46_Show_46__123_showLitChar8_125_(arg0)
else: # {U_Prelude.Show.{showLitChar9}1}
return _idris_Prelude_46_Show_46__123_showLitChar9_125_(arg0)
else:
if fn0[0] < 65699:
if fn0[0] == 65696: # {U_Prelude.Show.{showLitString0}1}
return _idris_Prelude_46_Show_46__123_showLitString0_125_(arg0)
elif fn0[0] == 65697: # {U_io_bind1}
P_c0, P_c1, P_c2, P_c3, P_c4 = fn0[1:]
return _idris_io_95_bind(P_c0, P_c1, P_c2, P_c3, P_c4, arg0)
else: # {U_io_return1}
P_c0, P_c1, P_c2 = fn0[1:]
return _idris_io_95_return(P_c0, P_c1, P_c2, arg0)
else:
if fn0[0] < 65701:
if fn0[0] == 65699: # {U_prim__strCons1}
P_c0 = fn0[1]
return _idris_prim_95__95_strCons(P_c0, arg0)
else: # {U_prim__toStrInt1}
return _idris_prim_95__95_toStrInt(arg0)
else:
if fn0[0] == 65701: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab0}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab0_125_(
arg0
)
else: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab10}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab10_125_(
arg0
)
else:
if fn0[0] < 65709:
if fn0[0] < 65706:
if fn0[0] == 65703: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab11}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab11_125_(
arg0
)
elif fn0[0] == 65704: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab12}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab12_125_(
arg0
)
else: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab13}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab13_125_(
arg0
)
else:
if fn0[0] == 65706: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab14}1}
P_c0 = fn0[1]
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab14_125_(
P_c0, arg0
)
elif fn0[0] == 65707: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab15}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab15_125_(
arg0
)
else: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab16}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab16_125_(
arg0
)
else:
if fn0[0] < 65712:
if fn0[0] == 65709: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab17}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab17_125_(
arg0
)
elif fn0[0] == 65710: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab18}1}
P_c0 = fn0[1]
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab18_125_(
P_c0, arg0
)
else: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab19}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab19_125_(
arg0
)
else:
if fn0[0] < 65714:
if fn0[0] == 65712: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab1}1}
P_c0 = fn0[1]
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab1_125_(
P_c0, arg0
)
else: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab20}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab20_125_(
arg0
)
else:
if fn0[0] == 65714: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab21}1}
P_c0 = fn0[1]
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab21_125_(
P_c0, arg0
)
else: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab22}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab22_125_(
arg0
)
else:
if fn0[0] < 65729:
if fn0[0] < 65722:
if fn0[0] < 65719:
if fn0[0] == 65716: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab23}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab23_125_(
arg0
)
elif fn0[0] == 65717: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab24}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab24_125_(
arg0
)
else: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab25}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab25_125_(
arg0
)
else:
if fn0[0] == 65719: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab26}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab26_125_(
arg0
)
elif fn0[0] == 65720: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab2}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab2_125_(
arg0
)
else: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab3}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab3_125_(
arg0
)
else:
if fn0[0] < 65725:
if fn0[0] == 65722: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab4}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab4_125_(
arg0
)
elif fn0[0] == 65723: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab5}1}
P_c0 = fn0[1]
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab5_125_(
P_c0, arg0
)
else: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab6}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab6_125_(
arg0
)
else:
if fn0[0] < 65727:
if fn0[0] == 65725: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab7}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab7_125_(
arg0
)
else: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab8}1}
P_c0 = fn0[1]
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab8_125_(
P_c0, arg0
)
else:
if fn0[0] == 65727: # {U_{PE_(a, b) instance of Prelude.Show.Show_a94d79ab9}1}
return _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab9_125_(
arg0
)
else: # {U_{PE_concatMap_af3155d10}1}
P_c0 = fn0[1]
return _idris__123_PE_95_concatMap_95_af3155d10_125_(P_c0, arg0)
else:
if fn0[0] < 65736:
if fn0[0] < 65732:
if fn0[0] == 65729: # {U_{PE_show_249676530}1}
return _idris__123_PE_95_show_95_249676530_125_(arg0)
elif fn0[0] == 65730: # {U_{PE_show_249676531}1}
return _idris__123_PE_95_show_95_249676531_125_(arg0)
else: # {U_{PE_show_249676532}1}
return _idris__123_PE_95_show_95_249676532_125_(arg0)
else:
if fn0[0] < 65734:
if fn0[0] == 65732: # {U_{Prelude.List.sortBy, splitRec_lam0}1}
P_c0 = fn0[1]
return _idris__123_Prelude_46_List_46_sortBy_44__32_splitRec_95_lam0_125_(P_c0, arg0)
else: # {U_{io_bind1}1}
P_c0, P_c1, P_c2, P_c3, P_c4, P_c5 = fn0[1:]
return io_bind1(P_c0, P_c1, P_c2, P_c3, P_c4, P_c5, arg0)
else:
if fn0[0] == 65734: # {U_PE_@@constructor of Prelude.Algebra.Monoid#Semigroup a_42111bf02}
P_c0 = fn0[1]
return (65645, P_c0, arg0) # {U_PE_@@constructor of Prelude.Algebra.Monoid#Semigroup a_42111bf01}
else: # {U_PE_@@constructor of Prelude.Applicative.Alternative#Applicative f_5102bba82}
return (65646, arg0) # {U_PE_@@constructor of Prelude.Applicative.Alternative#Applicative f_5102bba81}
else:
if fn0[0] < 65739:
if fn0[0] == 65736: # {U_PE_@@constructor of Prelude.Monad.Monad#Applicative m_d05ad59e2}
return (65647, arg0) # {U_PE_@@constructor of Prelude.Monad.Monad#Applicative m_d05ad59e1}
elif fn0[0] == 65737: # {U_Prelude.List.List instance of Prelude.Foldable.Foldable2}
P_c0, P_c1, P_c2 = fn0[1:]
return (65665, P_c0, P_c1, P_c2, arg0) # {U_Prelude.List.List instance of Prelude.Foldable.Foldable1}
else: # {U_Prelude.Nat.Nat instance of Prelude.Classes.Eq2}
return (65666, arg0) # {U_Prelude.Nat.Nat instance of Prelude.Classes.Eq1}
else:
if fn0[0] < 65741:
if fn0[0] == 65739: # {U_Prelude.List.List instance of Prelude.Foldable.Foldable3}
P_c0, P_c1 = fn0[1:]
return (65737, P_c0, P_c1, arg0) # {U_Prelude.List.List instance of Prelude.Foldable.Foldable2}
else: # {U_Prelude.List.List instance of Prelude.Foldable.Foldable4}
P_c0 = fn0[1]
return (65739, P_c0, arg0) # {U_Prelude.List.List instance of Prelude.Foldable.Foldable3}
else:
assert fn0[0] == 65741 # {U_Prelude.List.List instance of Prelude.Foldable.Foldable5}
return (65740, arg0) # {U_Prelude.List.List instance of Prelude.Foldable.Foldable4}
return _idris_error("unreachable due to case in tail position")
# Prelude.Classes.{Char instance of Prelude.Classes.Ord_lam0}
def _idris_Prelude_46_Classes_46__123_Char_32_instance_32_of_32_Prelude_46_Classes_46_Ord_95_lam0_125_(
in0, in1
):
while True:
return _idris_Prelude_46_Classes_46_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Char_58__33_compare_58_0(
in0, in1
)
# {EVAL0}
def EVAL0(arg0):
while True:
return arg0
# Prelude.Classes.{Int instance of Prelude.Classes.Ord_lam0}
def _idris_Prelude_46_Classes_46__123_Int_32_instance_32_of_32_Prelude_46_Classes_46_Ord_95_lam0_125_(
in0, in1
):
while True:
return _idris_Prelude_46_Classes_46_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Int_58__33_compare_58_0(
in0, in1
)
# Prelude.Show.{Int instance of Prelude.Show.Show_lam0}
def _idris_Prelude_46_Show_46__123_Int_32_instance_32_of_32_Prelude_46_Show_46_Show_95_lam0_125_(
in0
):
while True:
return APPLY0(
APPLY0(
_idris_Prelude_46_Show_46_showPrec(
None,
_idris_Prelude_46_Show_46__64_Prelude_46_Show_46_Show_36_Int()
),
(0,) # Prelude.Show.Open
),
in0
)
# Prelude.Nat.{Nat instance of Prelude.Classes.Ord_lam0}
def _idris_Prelude_46_Nat_46__123_Nat_32_instance_32_of_32_Prelude_46_Classes_46_Ord_95_lam0_125_(
in0, in1
):
while True:
return _idris_Prelude_46_Classes_46_Prelude_46_Nat_46__64_Prelude_46_Classes_46_Ord_36_Nat_58__33_compare_58_0(
in0, in1
)
# {PE_(a, b) instance of Prelude.Show.Show_a94d79ab0}
def _idris__123_PE_95__40_a_44__32_b_41__32_instance_32_of_32_Prelude_46_Show_46_Show_95_a94d79ab0_125_(
in1
):
while True:
return _idris_Prelude_46_Show_46_primNumShow(None, (65700,), (0,), in1) # {U_prim__toStrInt1}, Prelude.Show.Open
# {PE_concatMap_af3155d10}
def _idris__123_PE_95_concatMap_95_af3155d10_125_(e2, in0):
while True:
return APPLY0(
_idris_Prelude_46_Algebra_46__60__43__62_(None, (65734, None)), # {U_PE_@@constructor of Prelude.Algebra.Monoid#Semigroup a_42111bf02}
APPLY0(e2, in0)
)
# {PE_show_249676530}
def _idris__123_PE_95_show_95_249676530_125_(in0):
while True:
return _idris_Prelude_46_Show_46_Prelude_46_Show_46__64_Prelude_46_Show_46_Show_36_String_58__33_show_58_0(
in0
)
# Prelude.Show.{Prec instance of Prelude.Classes.Ord_lam0}
def _idris_Prelude_46_Show_46__123_Prec_32_instance_32_of_32_Prelude_46_Classes_46_Ord_95_lam0_125_(
in0, in1
):
while True:
return _idris_Prelude_46_Classes_46_Prelude_46_Show_46__64_Prelude_46_Classes_46_Ord_36_Prec_58__33_compare_58_0(
in0, in1
)
# Prelude.Classes.{Prelude.Classes.Char instance of Prelude.Classes.Ord, method <=_lam0}
def _idris_Prelude_46_Classes_46__123_Prelude_46_Classes_46_Char_32_instance_32_of_32_Prelude_46_Classes_46_Ord_44__32_method_32__60__61__95_lam0_125_(
e0, e1
):
while True:
aux1 = (e0 == e1)
if aux1 == 0:
return False
else:
return True
return _idris_error("unreachable due to case in tail position")
# Prelude.Classes.{Prelude.Classes.Char instance of Prelude.Classes.Ord, method >=_lam0}
def _idris_Prelude_46_Classes_46__123_Prelude_46_Classes_46_Char_32_instance_32_of_32_Prelude_46_Classes_46_Ord_44__32_method_32__62__61__95_lam0_125_(
e0, e1
):
while True:
aux1 = (e0 == e1)
if aux1 == 0:
return False
else:
return True
return _idris_error("unreachable due to case in tail position")
# Prelude.Classes.{Prelude.Classes.Int instance of Prelude.Classes.Ord, method <=_lam0}
def _idris_Prelude_46_Classes_46__123_Prelude_46_Classes_46_Int_32_instance_32_of_32_Prelude_46_Classes_46_Ord_44__32_method_32__60__61__95_lam0_125_(
e0, e1
):
while True:
aux1 = (e0 == e1)
if aux1 == 0:
return False
else:
return True
return _idris_error("unreachable due to case in tail position")
# {Prelude.List.sortBy, splitRec_lam0}
def _idris__123_Prelude_46_List_46_sortBy_44__32_splitRec_95_lam0_125_(in0, in6):
while True:
return in6.cons(in0)
# Prelude.Classes.{Prelude.Show.Prec instance of Prelude.Classes.Ord, method >=_lam0}
def _idris_Prelude_46_Classes_46__123_Prelude_46_Show_46_Prec_32_instance_32_of_32_Prelude_46_Classes_46_Ord_44__32_method_32__62__61__95_lam0_125_(
e0, e1
):
while True:
return _idris_Prelude_46_Classes_46_Prelude_46_Show_46__64_Prelude_46_Classes_46_Eq_36_Prec_58__33__61__61__58_0(
e0, e1
)
# Prelude.Show.{case block in showLitChar at ./Prelude/Show.idr:126:27_lam0}
def _idris_Prelude_46_Show_46__123_case_32_block_32_in_32_showLitChar_32_at_32__46__47_Prelude_47_Show_46_idr_58_126_58_27_95_lam0_125_(
in0, in1
):
while True:
return (in0 + in1)
# {io_bind0}
def io_bind0(e0, e1, e2, e3, e4, _idris_w, in0):
while True:
return APPLY0(e4, in0)
# Prelude.Chars.{isDigit0}
def _idris_Prelude_46_Chars_46__123_isDigit0_125_(e0):
while True:
return _idris_Prelude_46_Classes_46_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_Char_58__33__60__61__58_0(
e0, u'9'
)
# Main.{main0}
def _idris_Main_46__123_main0_125_(in1, in2):
while True:
return _idris_Prelude_46_Classes_46_Prelude_46_Classes_46__64_Prelude_46_Classes_46_Ord_36_String_58__33_compare_58_0(
in1, in2
)
# Prelude.Show.{primNumShow0}
def _idris_Prelude_46_Show_46__123_primNumShow0_125_(in1):
while True:
aux1 = (in1 == u'-')
if aux1 == 0:
return False
else:
return True
return _idris_error("unreachable due to case in tail position")
# Prelude.Interactive.{putStr'0}
def _idris_Prelude_46_Interactive_46__123_putStr_39_0_125_(e1, in0):
while True:
return sys.stdout.write(e1)
# Main.{pythag0}
def _idris_Main_46__123_pythag0_125_(in2, in1, in0, in3):
while True:
return APPLY0(
_idris_Prelude_46_Applicative_46_pure(None, None, (65736,)), # {U_PE_@@constructor of Prelude.Monad.Monad#Applicative m_d05ad59e2}
(in2, (in1, in0))
)
# {runMain0}
def runMain0():
while True:
return EVAL0(APPLY0(_idris_Main_46_main(), None))
# Prelude.Show.{showLitChar0}
def _idris_Prelude_46_Show_46__123_showLitChar0_125_(in0):
while True:
return (u'\\a' + in0)
# Prelude.Show.{showLitString0}
def _idris_Prelude_46_Show_46__123_showLitString0_125_(in2):
while True:
return (u'\\"' + in2)
# Prelude.Classes.{Char instance of Prelude.Classes.Ord_lam1}
def _idris_Prelude_46_Classes_46__123_Char_32_instance_32_of_32_Prelude_46_Classes_46_Ord_95_lam1_125_(
| |
"""
dic_mean = {}
dic_var = {}
for i in range(self.n):
conf = tuple(self.list_conf[i])
f = self.F[i]
mean = f.dot(w)
var = pow( np.exp(f.dot(v)), 2)
dic_mean[conf] = mean
dic_var[conf] = var
return (dic_mean, dic_var)
class LR:
"""
baseline: linear regression
"""
def __init__(self, data, hetero = False):
self.dic_conf_wl = analysis.get_dic_conf_wl(data)
n = len(self.dic_conf_wl)
list_conf = self.dic_conf_wl.keys()
self.F = []
self.empi_mean = []
self.empi_var = []
for conf in list_conf:
f = create_features(conf)
self.F.append(f)
labels = self.dic_conf_wl[conf][1]
self.empi_mean.append( np.mean(labels) )
self.empi_var.append ( np.var(labels) )
self.lr_mean = sklearn.linear_model.LinearRegression(fit_intercept = False)
self.lr_mean.fit(self.F, self.empi_mean)
self.const_var = np.sum((self.lr_mean.predict(self.F) - self.empi_mean)**2) *1.0/ (n-2)
self.lr_var = sklearn.linear_model.LinearRegression(fit_intercept = False)
#self.lr_var.fit(self.F, self.empi_var )
#self.lr_var.fit(self.F, np.log( pow(np.asarray(self.empi_var), 0.5)))
self.hetero = hetero
def predict(self, list_conf):
"""
predict mean and var of new conf
"""
self.tF = []
for conf in list_conf:
f = create_features(conf)
self.tF.append(f)
res_mean = self.lr_mean.predict(self.tF)
if self.hetero:
res_var = self.lr_var.predict(self.tF)
#res_var = pow( np.exp(self.lr_var.predict(self.tF)), 2)
else:
res_var = [self.const_var] * len(list_conf)
return (res_mean, res_var)
class baseline_spam(model):
"""
baselines for spam detection
"""
def __init__(self, data):
model.__init__(self, data)
#get spam score
self.ss = {}
for w in self.dic_w_il:
self.ss[w] = 0
for i, l in self.dic_w_il[w]:
# difference between label and average label
self.ss[w] += np.abs( l - np.mean(self.L[i][1]) )
self.ss[w] = self.ss[w] * 1.0 / len(self.dic_w_il[w])
#normalize:
max_score = max(self.ss.values())
for w in self.ss:
self.ss[w] = self.ss[w] * 1.0 / max_score
def spam_score(self, workers):
res = []
for w in workers:
res.append(self.ss[w])
return res
empirical_spam = [0.13, 0.25, 0.22, 0.27, 0.14]
def plot_empi_spam():
fig, ax = plt.subplots()
ax.bar(np.asarray([1,2,3,4,5]) - 0.5, empirical_spam)
ax.set_xlabel('Rating')
ax.set_ylabel('Proportion')
ax.set_xticks(np.asarray([1,2,3,4,5]))
class eval():
"""
evaluate
"""
def __init__(self, data, ptrain = 0.6, pval = 0.2, prw = 0.1, prl = 0.8, ptr = 1.0, plt = 1.0, pwk = 0.0, rand_seed = 1234, noise = 'empirical', bad_guys = []):
"""
ptrain = train set
pval = validation set
prw = proportion of random workers (spammers)
prl = proportion of random labels (how often a random worker gives a random label)
ptr = proportion of train conf
plt = proportion of labels for each conf in the train set
pwk = proportion of workers to be removed (remove the ones with high diff)
"""
self.data = copy.deepcopy(data)
self.pwk = pwk
self.del_wk()
self.dic_conf_wl = analysis.get_dic_conf_wl(self.data)
self.list_conf = self.dic_conf_wl.keys()
#self.rs = np.random.RandomState(1)
#self.rs.shuffle(self.list_conf)
self.rs = np.random.RandomState(rand_seed)
self.rs.shuffle(self.list_conf)
self.n = len(self.list_conf)
self.n_train = int(ptrain * self.n) # number of total train conf
self.n_given = int(self.n_train * ptr) # number of train conf given to method
self.train_conf = self.list_conf[:self.n_given]
self.n_val = int(pval * self.n) # number of total validation conf
self.val_conf = self.list_conf[self.n_train:self.n_train+self.n_val]
self.test_conf = self.list_conf[self.n_train+self.n_val:]
# get gold L for test
self.gold_mean = []; self.gold_var = []; self.gold_num = []
for conf in self.test_conf:
labs = self.dic_conf_wl[conf][1]
workers = self.dic_conf_wl[conf][0]
labels = []
for l, w in zip(labs, workers):
if w not in bad_guys:
labels.append(l)
self.gold_mean.append( np.mean(labels) )
self.gold_var.append ( np.var(labels) )
self.gold_num.append( len(labels) )
# also get gold L for train
self.train_mean = []; self.train_var = []; self.train_num = []
for conf in self.train_conf:
labels = self.dic_conf_wl[conf][1]
self.train_mean.append( np.mean(labels) )
self.train_var.append ( np.var(labels) )
self.train_num.append( len(labels) )
# also get gold L for valildataion
self.val_mean = []; self.val_var = []; self.val_num = []
for conf in self.val_conf:
labels = self.dic_conf_wl[conf][1]
self.val_mean.append( np.mean(labels) )
self.val_var.append ( np.var(labels) )
self.val_num.append( len(labels) )
self.plt = plt
self.get_train_data()
#inject noise
train_workers = analysis.get_list_workers(self.train_data)
self.rs.shuffle(train_workers)
self.n_random_workers = int(prw * len(train_workers))
self.random_workers = train_workers[:self.n_random_workers]
self.train_workers = train_workers
self.noise = noise
self.prl = prl
self.inject_noise()
def rand_rating(self):
if self.noise == 'uniform':
return self.rs.randint(1,6)
elif self.noise == 'empirical':
return np.nonzero(self.rs.multinomial(1, empirical_spam))[0][0]+1
else:
raise "unknown noise"
def inject_noise(self):
for i in range(len(self.train_data)):
w = self.train_data[i][0]
if w in self.random_workers:
if np.random.uniform() < self.prl:
self.train_data[i][3] = str(self.rand_rating())
def get_train_data(self):
self.train_data = []
dic_conf_num = {}# conf-> number of crowd labels this conf got
for d in self.data:
conf = analysis.get_conf(d)
if conf in self.train_conf:
if conf not in dic_conf_num: dic_conf_num[conf] = 0
if dic_conf_num[conf] > self.plt * len(self.dic_conf_wl[conf][0]): continue
dic_conf_num[conf] += 1
self.train_data.append(d)
def del_wk(self):
"""
remove workers with high deviation
remove a proportion of self.pwk workers
"""
if self.pwk == 0.0: return
dic_ul = analysis.get_dic_url_labels(self.data)
dic_w, dic_mean = analysis.agreement(self.data, dic_ul)
self.workers = sorted(dic_mean.items(), key = lambda i : abs(i[1]), reverse = False)
nwk = len(self.workers)
keep_workers = list( zip(*self.workers[:int((1-self.pwk) * nwk)])[0]) # list of workers to keep
new_data = []
for i in self.data:
if i[0] in keep_workers:
new_data.append(i)
self.data = new_data
def get_mae(self, a, b):
if ( len(a) != len(b) ) : raise "len not equal"
res = 0
for x,y in zip(a,b):
res += np.abs(x-y)
res = res * 1.0 / len(a)
return res
def eval(self, model):
"""
model has beeen trained
model has a predict method
"""
res_mean, res_var = model.predict(self.test_conf)
mae_mean = self.get_mae(res_mean, self.gold_mean)
mae_var = self.get_mae(res_var, self.gold_var)
#print "correlation: ", pearsonr(res_var, self.gold_var)
return [mae_mean, mae_var]
def eval_val(self, model):
"""
model has beeen trained
model has a predict method
evaluate on validation data
"""
res_mean, res_var = model.predict(self.val_conf)
mae_mean = self.get_mae(res_mean, self.val_mean)
mae_var = self.get_mae(res_var, self.val_var)
#print "correlation: ", pearsonr(res_var, self.gold_var)
return [mae_mean, mae_var]
def print_val(self, model):
res_mean, res_var = model.predict(self.val_conf)
mae_var = self.get_mae(res_var, self.val_var)
s = 0
for i,j in zip(res_var, self.val_var):
print i, j, i - j
s += (i-j)
print "s = ", s
def eval_all(self, em_it = 3):
"""
evaluate
"""
# LR
#lr = LR(self.train_data, hetero = True)
lr = LR(self.train_data, hetero = False)
eval_lr = self.eval(lr)
# nospam model
#ns = model_nospam(self.train_data)
#ns.init_em()
#ns.em(em_it)
#eval_ns = self.eval(ns)
# model
#new99 = model(self.train_data)
#new99.init_em(0.99)
#new99.em(em_it)
#eval_new99 = self.eval(new99)
new8 = model(self.train_data)
#new8.init_em(0.99)
new8.init_em(1)
new8.em(1,1)
eval_new8 = self.eval(new8)
# fix bias model
#fb = model_fixbias(self.train_data)
#fb.init_em()
#fb.em(em_it)
#eval_fb = self.eval(fb)
# variational model
var82 = model_var(self.train_data, 9.9, 0.1)
var82.init_em()
#var82.e_step()
var82.em(1,1)
eval_var82 = self.eval(var82)
#var191 = model_var(self.train_data, 19, 1)
#var191.init_em()
#var191.em(em_it)
#eval_var191 = self.eval(var191)
# spamer score
ss_baseline = self.detect_spammer(baseline_spam(self.train_data))
ss_new = self.detect_spammer(new8)
ss_var82 = self.detect_spammer(var82)
print "linear reg/baseline:", eval_lr, ss_baseline
#print "no spam model:", eval_ns
print "new model:", eval_new8, ss_new
#print "new model(fixbias)", eval_fb
print "var model", eval_var82, ss_var82
#return ([eval_lr, eval_new99, eval_new9, eval_ns, eval_fb, eval_var91, eval_var191], ss_baseline, ss_new)
#return ([eval_lr, eval_new8], ss_baseline, ss_new)
return ([eval_lr, eval_new8, eval_var82], ss_baseline, ss_new, ss_var82)
def detect_spammer(self, model):
"""
return AUC of the model in detecting the spammers.
model has a method spam_score(list_workers) that return the prob of being spammer
"""
# in self.train_workers, the first n_random_workers
if self.n_random_workers == 0:
return -1
score = model.spam_score(self.train_workers)
y = [1] * self.n_random_workers + [0] * (len(self.train_workers) - self.n_random_workers)
return roc_auc_score(y, score)
class model_constvar(model):
"""
same model with constant variance
"""
def __init__(self, data):
model.__init__(self,data)
self.std = 1
def get_std(self, i):
return self.std
def expected_ll(self, w):
"""
return expected log likelihood
"""
res = 0
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
pt0 = 1 - pt1
theta = self.theta[worker]
ll0 = np.log(self.spam_dist(l)) + np.log(1-theta)
mean = self.F[i].dot(w)
std = self.std
if std < self.ep: std = self.ep
ll1 = scipy.stats.norm.logpdf(l, loc = mean, scale = std ) + np.log(theta)
res += pt0*ll0 + pt1*ll1
return res
def grad_expected_ll(self, w):
gw = np.zeros( (self.m,) )
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
wtc = self.F[i].dot(w)
sigma = self.std
if sigma < self.ep: sigma = self.ep
update_w = pt1*(l-wtc)/pow(sigma,2)*self.F[i]
gw += update_w
return gw
def m_step_var(self):
s1 = 0
s2 = 0
for i in range(self.n):
workers, labels = self.L[i]
for worker, l, pt1 in zip(workers, labels, self.pt[i]):
wtx = self.F[i].dot(self.w)
s1 += pt1*pow(l-wtx,2)
s2 += pt1
self.var = s1*1.0/s2
def m_step(self):
"""
maximize theta, W and var
"""
self.m_step_theta()
| |
<gh_stars>1-10
import os
import uuid
from datetime import datetime
from collections import defaultdict, abc
from pathlib import Path
import numpy as np
import distutils.version
import spikeextractors as se
from spikeextractors.extraction_tools import check_get_traces_args, check_valid_unit_id
try:
import pynwb
from pynwb import NWBHDF5IO
from pynwb import NWBFile
from pynwb.ecephys import ElectricalSeries
from pynwb.ecephys import ElectrodeGroup
from hdmf.data_utils import DataChunkIterator
HAVE_NWB = True
except ModuleNotFoundError:
HAVE_NWB = False
def check_nwb_install():
assert HAVE_NWB, "To use the Nwb extractors, install pynwb: \n\n pip install pynwb\n\n"
def set_dynamic_table_property(dynamic_table, row_ids, property_name, values, index=False,
default_value=np.nan, description='no description'):
check_nwb_install()
if not isinstance(row_ids, list) or not all(isinstance(x, int) for x in row_ids):
raise TypeError("'ids' must be a list of integers")
ids = list(dynamic_table.id[:])
if any([i not in ids for i in row_ids]):
raise ValueError("'ids' contains values outside the range of existing ids")
if not isinstance(property_name, str):
raise TypeError("'property_name' must be a string")
if len(row_ids) != len(values) and index is False:
raise ValueError("'ids' and 'values' should be lists of same size")
if index is False:
if property_name in dynamic_table:
for (row_id, value) in zip(row_ids, values):
dynamic_table[property_name].data[ids.index(row_id)] = value
else:
col_data = [default_value] * len(ids) # init with default val
for (row_id, value) in zip(row_ids, values):
col_data[ids.index(row_id)] = value
dynamic_table.add_column(
name=property_name,
description=description,
data=col_data,
index=index
)
else:
if property_name in dynamic_table:
# TODO
raise NotImplementedError
else:
dynamic_table.add_column(
name=property_name,
description=description,
data=values,
index=index
)
def get_dynamic_table_property(dynamic_table, *, row_ids=None, property_name):
all_row_ids = list(dynamic_table.id[:])
if row_ids is None:
row_ids = all_row_ids
return [dynamic_table[property_name][all_row_ids.index(x)] for x in row_ids]
def find_all_unit_property_names(properties_dict: dict, features_dict: dict):
"""
Finds all existing units properties and units spikes features in the sorting
dictionaries.
"""
properties_set = set()
for k, v in properties_dict.items():
properties_set.update(list(v.keys()))
features_set = set()
for k, v in features_dict.items():
features_set.update(list(v.keys()))
return properties_set, features_set
def get_nspikes(units_table, unit_id):
"""Returns the number of spikes for chosen unit."""
check_nwb_install()
if unit_id not in units_table.id[:]:
raise ValueError(str(unit_id) + " is an invalid unit_id. "
"Valid ids: " + str(units_table.id[:].tolist()))
nSpikes = np.diff([0] + list(units_table['spike_times_index'].data[:])).tolist()
ind = np.where(np.array(units_table.id[:]) == unit_id)[0][0]
return nSpikes[ind]
def most_relevant_ch(traces):
"""
Calculates the most relevant channel for an Unit.
Estimates the channel where the max-min difference of the average traces is greatest.
traces : ndarray
ndarray of shape (nSpikes, nChannels, nSamples)
"""
n_channels = traces.shape[1]
avg = np.mean(traces, axis=0)
max_min = np.zeros(n_channels)
for ch in range(n_channels):
max_min[ch] = avg[ch, :].max() - avg[ch, :].min()
relevant_ch = np.argmax(max_min)
return relevant_ch
def update_dict(d, u):
for k, v in u.items():
if isinstance(v, abc.Mapping):
d[k] = update_dict(d.get(k, {}), v)
else:
d[k] = v
return d
class NwbRecordingExtractor(se.RecordingExtractor):
extractor_name = 'NwbRecording'
has_default_locations = True
installed = HAVE_NWB # check at class level if installed or not
is_writable = True
mode = 'file'
installation_mesg = "To use the Nwb extractors, install pynwb: \n\n pip install pynwb\n\n"
def __init__(self, file_path, electrical_series_name='ElectricalSeries'):
"""
Parameters
----------
file_path: path to NWB file
electrical_series_name: str, optional
"""
check_nwb_install()
se.RecordingExtractor.__init__(self)
self._path = file_path
with NWBHDF5IO(self._path, 'r') as io:
nwbfile = io.read()
if electrical_series_name is not None:
self._electrical_series_name = electrical_series_name
else:
a_names = list(nwbfile.acquisition)
if len(a_names) > 1:
raise ValueError('More than one acquisition found. You must specify electrical_series.')
if len(a_names) == 0:
raise ValueError('No acquisitions found in the .nwb file.')
self._electrical_series_name = a_names[0]
es = nwbfile.acquisition[self._electrical_series_name]
if hasattr(es, 'timestamps') and es.timestamps:
# self.sampling_frequency = 1. / np.median(np.diff(es.timestamps))
self.sampling_frequency = 1. / np.median(np.diff(es.timestamps[:1000]))
self.recording_start_time = es.timestamps[0]
else:
self.sampling_frequency = es.rate
if hasattr(es, 'starting_time'):
self.recording_start_time = es.starting_time
else:
self.recording_start_time = 0.
self.num_frames = int(es.data.shape[0])
num_channels = len(es.electrodes.table.id[:])
# Channels gains - for RecordingExtractor, these are values to cast traces to uV
if es.channel_conversion is not None:
gains = es.conversion * es.channel_conversion[:] * 1e6
else:
gains = es.conversion * np.ones(num_channels) * 1e6
# Fill channel properties dictionary from electrodes table
self.channel_ids = es.electrodes.table.id[:]
for ind, i in enumerate(self.channel_ids):
self.set_channel_property(i, 'gain', gains[ind])
this_loc = []
if 'rel_x' in nwbfile.electrodes:
this_loc.append(nwbfile.electrodes['rel_x'][ind])
if 'rel_y' in nwbfile.electrodes:
this_loc.append(nwbfile.electrodes['rel_y'][ind])
else:
this_loc.append(0)
self.set_channel_locations(this_loc, i)
# reversed order of loop so to avoid significant delays for lg. # channels
for col in nwbfile.electrodes.colnames:
if isinstance(nwbfile.electrodes[col][0], ElectrodeGroup): # this is the time-consuming line
pass
elif col == 'group_name':
pass
# Extractors channel groups must be integers, but Nwb electrodes group_name can be strings
unique_grp_names = list(np.unique(nwbfile.electrodes['group_name'][:]))
for ind, i in enumerate(self.channel_ids):
self.set_channel_groups(int(unique_grp_names.index(nwbfile.electrodes[col][ind])), i)
elif col == 'location':
pass
for ind, i in enumerate(self.channel_ids):
self.set_channel_property(i, 'brain_area', nwbfile.electrodes[col][ind])
elif col in ['x', 'y', 'z', 'rel_x', 'rel_y']:
pass
else:
# only set explicitly-named channel properties -- for efficiency
pass
# Fill epochs dictionary
self._epochs = {}
if nwbfile.epochs is not None:
df_epochs = nwbfile.epochs.to_dataframe()
self._epochs = {row['tags'][0]: {
'start_frame': self.time_to_frame(row['start_time']),
'end_frame': self.time_to_frame(row['stop_time'])}
for _, row in df_epochs.iterrows()}
self._kwargs = {'file_path': str(Path(file_path).absolute()), 'electrical_series_name': electrical_series_name}
self.make_nwb_metadata(nwbfile=nwbfile, es=es)
def make_nwb_metadata(self, nwbfile, es):
# Metadata dictionary - useful for constructing a nwb file
self.nwb_metadata = dict()
self.nwb_metadata['NWBFile'] = {
'session_description': nwbfile.session_description,
'identifier': nwbfile.identifier,
'session_start_time': nwbfile.session_start_time,
'institution': nwbfile.institution,
'lab': nwbfile.lab
}
self.nwb_metadata['Ecephys'] = dict()
# Update metadata with Device info
self.nwb_metadata['Ecephys']['Device'] = []
for dev in nwbfile.devices:
self.nwb_metadata['Ecephys']['Device'].append({'name': dev})
# Update metadata with ElectrodeGroup info
self.nwb_metadata['Ecephys']['ElectrodeGroup'] = []
for k, v in nwbfile.electrode_groups.items():
self.nwb_metadata['Ecephys']['ElectrodeGroup'].append({
'name': v.name,
'description': v.description,
'location': v.location,
'device': v.device.name
})
# Update metadata with ElectricalSeries info
self.nwb_metadata['Ecephys']['ElectricalSeries'] = []
self.nwb_metadata['Ecephys']['ElectricalSeries'].append({
'name': es.name,
'description': es.description
})
@check_get_traces_args
def get_traces(self, channel_ids=None, start_frame=None, end_frame=None):
check_nwb_install()
with NWBHDF5IO(self._path, 'r') as io:
nwbfile = io.read()
es = nwbfile.acquisition[self._electrical_series_name]
# table_ids = [list(es.electrodes.data[:]).index(id) for id in channel_ids]
# table_ids = [list(es.electrodes[:].index).index(id) for id in channel_ids]
# table_ids = [list(nwbfile.electrodes.id[:]).index(id) for id in channel_ids]
es_channel_ids = np.array(es.electrodes.table.id[:])[es.electrodes.data[:]].tolist()
table_ids = [es_channel_ids.index(id) for id in channel_ids]
if np.array(channel_ids).size > 1 and np.any(np.diff(channel_ids) < 0):
sorted_idx = np.argsort(table_ids)
recordings = es.data[start_frame:end_frame, np.sort(table_ids)].T
traces = recordings[sorted_idx, :]
else:
traces = es.data[start_frame:end_frame, table_ids].T
# This DatasetView and lazy operations will only work within context
# We're keeping the non-lazy version for now
# es_view = DatasetView(es.data) # es is an instantiated h5py dataset
# traces = es_view.lazy_slice[start_frame:end_frame, channel_ids].lazy_transpose()
return traces
def get_sampling_frequency(self):
return self.sampling_frequency
def get_num_frames(self):
return self.num_frames
def get_channel_ids(self):
return self.channel_ids.tolist()
@staticmethod
def add_devices(recording, nwbfile, metadata):
# Devices
if 'Ecephys' not in metadata:
metadata['Ecephys'] = dict()
if 'Device' not in metadata['Ecephys']:
metadata['Ecephys']['Device'] = [{'name': 'Device'}]
# Tests if devices exist in nwbfile, if not create them from metadata
for dev in metadata['Ecephys']['Device']:
if dev['name'] not in nwbfile.devices:
nwbfile.create_device(name=dev['name'])
return nwbfile
@staticmethod
def add_electrode_groups(recording, nwbfile, metadata):
channel_ids = recording.get_channel_ids()
# Electrode groups
if 'ElectrodeGroup' not in metadata['Ecephys']:
metadata['Ecephys']['ElectrodeGroup'] = []
# Check if 'groups' property exists in self._channel_properties
if 'group' in recording.get_shared_channel_property_names():
RX_groups_names = np.unique(recording.get_channel_groups()).tolist()
else:
RX_groups_names = ["0"]
# Electrode groups are required for NWB, for consistency we create group for Recording channels
vals = [0] * len(channel_ids)
recording.set_channel_groups(channel_ids=channel_ids, groups=vals)
for grp_name in RX_groups_names:
metadata['Ecephys']['ElectrodeGroup'].append({
'name': grp_name,
'description': 'electrode_group_description',
'location': 'electrode_group_location',
'device': metadata['Ecephys']['Device'][0]['name']
})
# Tests if electrode groups exist in nwbfile, if not create them from metadata
for grp in metadata['Ecephys']['ElectrodeGroup']:
if str(grp['name']) not in nwbfile.electrode_groups:
nwbfile.create_electrode_group(
name=str(grp['name']),
location=grp['location'],
device=nwbfile.devices[grp['device']],
description=grp['description']
)
return nwbfile
@staticmethod
def add_electrodes(recording, nwbfile, metadata):
"""
Auxiliary static method for nwbextractor.
Adds channels from recording object as electrodes to nwbfile object.
"""
# Check for existing electrodes
if nwbfile.electrodes is not None:
nwb_elec_ids = nwbfile.electrodes.id.data[:]
else:
nwb_elec_ids = []
# Extractors channel groups must be integers, but Nwb electrodes group_name can be strings
# nwb_groups_names = [str(grp['name']) for grp in metadata['Ecephys']['ElectrodeGroup']]
nwb_groups_names = list(nwbfile.electrode_groups.keys())
# For older versions of pynwb, we need to manually add these columns
if distutils.version.LooseVersion(pynwb.__version__) < '1.3.0':
if nwbfile.electrodes is None or 'rel_x' not in nwbfile.electrodes.colnames:
nwbfile.add_electrode_column('rel_x', 'x position of electrode in electrode group')
if nwbfile.electrodes is None or 'rel_y' not in nwbfile.electrodes.colnames:
nwbfile.add_electrode_column('rel_y', 'y position of electrode in electrode group')
# add new electrodes with id, (rel_x, rel_y) and groups
channel_ids = recording.get_channel_ids()
for m in channel_ids:
if m not in nwb_elec_ids:
location = recording.get_channel_locations(channel_ids=m)[0]
grp_name = recording.get_channel_groups(channel_ids=m)[0]
grp = nwbfile.electrode_groups[nwb_groups_names[grp_name]]
impedance = -1.0
nwbfile.add_electrode(
id=m,
x=np.nan, y=np.nan, z=np.nan,
rel_x=float(location[0]),
rel_y=float(location[1]),
imp=impedance,
location='unknown',
filtering='none',
group=grp,
)
electrode_table = nwbfile.electrodes
# add/update electrode properties
for ch in channel_ids:
rx_channel_properties = recording.get_channel_property_names(channel_id=ch)
for | |
in chaining.
"""
if not (B_G > 0):
raise ValueError('must have B_G > 0; got %r' % (B_G,))
self.in_vals[IN_VAL_B] = B_G
return self
def set_bfield_for_s0(self, s0):
"""Set B to probe a certain harmonic number.
**Call signature**
*s0*
The harmonic number to probe at the lowest frequency
Returns
*self* for convenience in chaining.
This just proceeds from the relation ``nu = s nu_c = s e B / 2 pi m_e
c``. Since *s* and *nu* scale with each other, if multiple frequencies
are being probed, the harmonic numbers being probed will scale in the
same way.
"""
if not (s0 > 0):
raise ValueError('must have s0 > 0; got %r' % (s0,))
B0 = 2 * np.pi * cgs.me * cgs.c * self.in_vals[IN_VAL_FREQ0] / (cgs.e * s0)
self.in_vals[IN_VAL_B] = B0
return self
def set_edist_powerlaw(self, emin_mev, emax_mev, delta, ne_cc):
"""Set the energy distribution function to a power law.
**Call signature**
*emin_mev*
The minimum energy of the distribution, in MeV
*emax_mev*
The maximum energy of the distribution, in MeV
*delta*
The power-law index of the distribution
*ne_cc*
The number density of energetic electrons, in cm^-3.
Returns
*self* for convenience in chaining.
"""
if not (emin_mev >= 0):
raise ValueError('must have emin_mev >= 0; got %r' % (emin_mev,))
if not (emax_mev >= emin_mev):
raise ValueError('must have emax_mev >= emin_mev; got %r, %r' % (emax_mev, emin_mev))
if not (delta >= 0):
raise ValueError('must have delta >= 0; got %r, %r' % (delta,))
if not (ne_cc >= 0):
raise ValueError('must have ne_cc >= 0; got %r, %r' % (ne_cc,))
self.in_vals[IN_VAL_EDIST] = EDIST_PLW
self.in_vals[IN_VAL_EMIN] = emin_mev
self.in_vals[IN_VAL_EMAX] = emax_mev
self.in_vals[IN_VAL_DELTA1] = delta
self.in_vals[IN_VAL_NB] = ne_cc
return self
def set_edist_powerlaw_gamma(self, gmin, gmax, delta, ne_cc):
"""Set the energy distribution function to a power law in the Lorentz factor
**Call signature**
*gmin*
The minimum Lorentz factor of the distribution
*gmax*
The maximum Lorentz factor of the distribution
*delta*
The power-law index of the distribution
*ne_cc*
The number density of energetic electrons, in cm^-3.
Returns
*self* for convenience in chaining.
"""
if not (gmin >= 1):
raise ValueError('must have gmin >= 1; got %r' % (gmin,))
if not (gmax >= gmin):
raise ValueError('must have gmax >= gmin; got %r, %r' % (gmax, gmin))
if not (delta >= 0):
raise ValueError('must have delta >= 0; got %r, %r' % (delta,))
if not (ne_cc >= 0):
raise ValueError('must have ne_cc >= 0; got %r, %r' % (ne_cc,))
self.in_vals[IN_VAL_EDIST] = EDIST_PLG
self.in_vals[IN_VAL_EMIN] = (gmin - 1) * E0_MEV
self.in_vals[IN_VAL_EMAX] = (gmax - 1) * E0_MEV
self.in_vals[IN_VAL_DELTA1] = delta
self.in_vals[IN_VAL_NB] = ne_cc
return self
def set_freqs(self, n, f_lo_ghz, f_hi_ghz):
"""Set the frequency grid on which to perform the calculations.
**Call signature**
*n*
The number of frequency points to sample.
*f_lo_ghz*
The lowest frequency to sample, in GHz.
*f_hi_ghz*
The highest frequency to sample, in GHz.
Returns
*self* for convenience in chaining.
"""
if not (f_lo_ghz >= 0):
raise ValueError('must have f_lo_ghz >= 0; got %r' % (f_lo_ghz,))
if not (f_hi_ghz >= f_lo_ghz):
raise ValueError('must have f_hi_ghz >= f_lo_ghz; got %r, %r' % (f_hi_ghz, f_lo_ghz))
if not n >= 1:
raise ValueError('must have n >= 1; got %r' % (n,))
self.in_vals[IN_VAL_NFREQ] = n
self.in_vals[IN_VAL_FREQ0] = f_lo_ghz * 1e9 # GHz => Hz
self.in_vals[IN_VAL_LOGDFREQ] = np.log10(f_hi_ghz / f_lo_ghz) / n
return self
def set_hybrid_parameters(self, s_C, s_WH, do_renorm=True):
"""Set the hybrid/renormalization control parameters.
**Call signature**
*s_C*
The harmonic number above which the continuous approximation
is used (with special behavior; see below).
*s_WH*
The harmonic number above which the Wild-Hill BEssel function
approximations are used.
*do_renorm* (default True)
Whether to do any renormalization at all.
Returns
*self* for convenience in chaining.
FK10 uses frequency parameters f^C_cr and f^WH_cr to control some of
its optimizations. This function sets these parameters as multiples of
the electron cyclotron frequency (f_Be in FK10 notation): e.g.,
``f^C_cr = s_C * f_Be``.
At frequencies above f^C_cr, the "continuum" approximation is
introduced, replacing the "exact" sum with an integral. At frequencies
above f^WH_cr, the Wild-Hild approximations to the Bessel functions
are used. In both cases, the activation of the optimizations can
result in normalization shifts in the calculations. "Renormalization"
computes these shifts (by doing both kinds of calculations at the
transition frequencies) and attempts to correct them. (Some of the
FK10 documentation seems to refer to renormalization as
"R-optimization".)
If f^C_cr is below the lowest frequency integrated, all calculations
will be done in continuum mode. In this case, the sign of *s_C* sets
whether Wild-Hill renormalization is applied. If *s_C* is negative and
f^WH_cr is above the lowest frequency integration, renormalization is
done. Otherwise, it is not.
The documentation regarding f^WH_cr is confusing. It states that
f^WH_cr only matters if (1) s_WH < s_C or (2) s_C < 0 and f^WH_cr >
f_0. It is not obvious to me why s_WH > s_C should only matter if s_C
< 0, but that's what's implied.
In most examples in FK10, both of these parameters are set to 12.
"""
self.in_vals[IN_VAL_FCCR] = s_C
self.in_vals[IN_VAL_FWHCR] = s_WH
self.in_vals[IN_VAL_RENORMFLAG] = 1 if do_renorm else 0
return self
def set_obs_angle(self, theta_rad):
"""Set the observer angle relative to the field.
**Call signature**
*theta_rad*
The angle between the ray path and the local magnetic field,
in radians.
Returns
*self* for convenience in chaining.
"""
self.in_vals[IN_VAL_THETA] = theta_rad * 180 / np.pi # rad => deg
return self
def set_one_freq(self, f_ghz):
"""Set the code to calculate results at just one frequency.
**Call signature**
*f_ghz*
The frequency to sample, in GHz.
Returns
*self* for convenience in chaining.
"""
if not (f_ghz >= 0):
raise ValueError('must have f_lo_ghz >= 0; got %r' % (f_lo_ghz,))
self.in_vals[IN_VAL_NFREQ] = 1
self.in_vals[IN_VAL_FREQ0] = f_ghz * 1e9 # GHz -> Hz
self.in_vals[IN_VAL_LOGDFREQ] = 1.0
return self
def set_padist_gaussian_loss_cone(self, boundary_rad, expwidth):
"""Set the pitch-angle distribution to a Gaussian loss cone.
**Call signature**
*boundary_rad*
The angle inside which there are no losses, in radians.
*expwidth*
The characteristic width of the Gaussian loss profile
*in direction-cosine units*.
Returns
*self* for convenience in chaining.
See ``OnlineI.pdf`` in the Supplementary Data for a precise
definition. (And note the distinction between α_c and μ_c since not
everything is direction cosines.)
"""
self.in_vals[IN_VAL_PADIST] = PADIST_GLC
self.in_vals[IN_VAL_LCBDY] = boundary_rad * 180 / np.pi # rad => deg
self.in_vals[IN_VAL_DELTAMU] = expwidth
return self
def set_padist_isotropic(self):
"""Set the pitch-angle distribution to be isotropic.
**Returns**
*self* for convenience in chaining.
"""
self.in_vals[IN_VAL_PADIST] = PADIST_ISO
return self
def set_ignore_q_terms(self, ignore_q_terms):
"""Set whether "Q" terms are ignored.
**Call signature**
*ignore_q_terms*
If true, ignore "Q" terms in the integrations.
Returns
*self* for convenience in chaining.
See Section 4.3 of FK10 and ``OnlineII.pdf`` in the Supplementary Data
for a precise explanation. The default is to *not* ignore the terms.
"""
# Note that we are ignoring the magic 1 value that only applies to the
# "HomSrc_C" version of the library.
self.in_vals[IN_VAL_QFLAG] = 0 if ignore_q_terms else 2
return self
def set_thermal_background(self, T_K, nth_cc):
"""Set the properties of the background thermal plasma.
**Call signature**
*T_K*
The temperature of the background plasma, in Kelvin.
*nth_cc*
The number density of thermal electrons, in cm^-3.
Returns
*self* for convenience in chaining.
Note that the parameters set here are the same as the ones that
describe the thermal electron distribution, if you choose one of the
electron energy distributions that explicitly models a thermal
component ("thm", "tnt", "tnp", "tng", "kappa" in the code's
terminology). For the power-law-y electron distributions, these
parameters are used to calculate dispersion parameters (e.g.
refractive indices) and a free-free contribution, but their
synchrotron contribution is ignored.
"""
if not (T_K >= 0):
raise ValueError('must have T_K >= 0; got %r' % (T_K,))
if not (nth_cc >= 0):
raise ValueError('must have nth_cc >= 0; got %r, %r' % (nth_cc,))
self.in_vals[IN_VAL_T0] = T_K
self.in_vals[IN_VAL_N0] = nth_cc
return self
def set_trapezoidal_integration(self, n):
"""Set the code to use trapezoidal | |
import unittest
from context_with_email import GraphQlContextWithEmail
from graphql.document import GraphQlParser
from graphql.executor import GraphQlContext
from graphql.executor import GraphQlExecutor
from graphql.executor.test.star_wars_extra import get_sw_ship
from graphql.executor.test.star_wars_extra import SwShip
from graphql.executor.test.star_wars_extra import SwUsers
from graphql.schema import GraphQlSchemaFactory
from silent_context import SilentGraphQlContext
from tracking_context import TrackingGraphQlContext
class GraphQlExecutorTest(unittest.TestCase):
def _context(self):
"""Return a GraphQlContext for the "star_wars" module."""
schema = GraphQlSchemaFactory.create_from_modules([
'graphql.executor.test.star_wars',
'graphql.scalar_descriptors.strict'])
return GraphQlContext(schema)
def _extra_schema(self):
"""Return a GraphQlSchema for the "star_wars_extra" module."""
return GraphQlSchemaFactory.create_from_modules([
'graphql.executor.test.star_wars',
'graphql.executor.test.star_wars_extra',
'graphql.scalar_descriptors.strict'])
def _extra_context(self):
"""Return a GraphQlContext for the "star_wars_extra" module."""
return GraphQlContext(self._extra_schema())
def test_friend_queries(self):
"""Test GraphQlExecutor on the field Character{friend}."""
context = self._context()
result = GraphQlExecutor.execute(
"query HeroNameQuery {\n"
" hero {\n"
" name\n"
" }\n"
"}\n",
context)
self.assertEqual({'data': {'hero': {'name': 'R2-D2'}}}, result)
result = GraphQlExecutor.execute(
"query HeroNameAndFriendsQuery {\n"
" hero {\n"
" id\n"
" name\n"
" friends {\n"
" name\n"
" }\n"
" }\n"
"}\n",
context)
self.assertEqual(
{
'data': {
'hero': {
'friends': [
{'name': '<NAME>'},
{'name': '<NAME>'},
{'name': '<NAME>'},
],
'id': '2001',
'name': 'R2-D2',
},
},
}, result)
result = GraphQlExecutor.execute(
"query NestedQuery {\n"
" hero {\n"
" name\n"
" friends {\n"
" name\n"
" appearsIn\n"
" friends {\n"
" name\n"
" }\n"
" }\n"
" }\n"
"}\n",
context)
self.assertEqual(
{
'data': {
'hero': {
'friends': [
{
'appearsIn': ['NEWHOPE', 'EMPIRE', 'JEDI'],
'friends': [
{'name': '<NAME>'},
{'name': '<NAME>'},
{'name': 'C-3PO'},
{'name': 'R2-D2'},
],
'name': '<NAME>',
},
{
'appearsIn': ['NEWHOPE', 'EMPIRE', 'JEDI'],
'friends': [
{'name': '<NAME>'},
{'name': '<NAME>'},
{'name': 'R2-D2'},
],
'name': '<NAME>',
},
{
'appearsIn': ['NEWHOPE', 'EMPIRE', 'JEDI'],
'friends': [
{'name': '<NAME>'},
{'name': '<NAME>'},
{'name': 'C-3PO'},
{'name': 'R2-D2'},
],
'name': '<NAME>',
},
],
'name': 'R2-D2',
},
},
}, result)
def test_id_queries(self):
"""Test GraphQlExecutor on queries for Star Wars characters by ID."""
context = self._context()
result = GraphQlExecutor.execute(
"query FetchLukeQuery {\n"
" human(id: \"1000\") {\n"
" name\n"
" }\n"
"}\n",
context)
self.assertEqual(
{'data': {'human': {'name': '<NAME>'}}}, result)
result = GraphQlExecutor.execute(
"query FetchSomeIDQuery($someId: String!) {\n"
" human(id: $someId) {\n"
" name\n"
" }\n"
"}\n",
context, {'someId': '1000'})
self.assertEqual(
{'data': {'human': {'name': '<NAME>'}}}, result)
result = GraphQlExecutor.execute(
"query FetchSomeIDQuery($someId: String!) {\n"
" human(id: $someId) {\n"
" name\n"
" }\n"
"}\n",
context, {'someId': '1002'})
self.assertEqual({'data': {'human': {'name': '<NAME>'}}}, result)
result = GraphQlExecutor.execute(
"query FetchSomeIDQuery($someId: String!) {\n"
" human(id: $someId) {\n"
" name\n"
" }\n"
"}\n",
context, {'someId': 'not a valid id'})
self.assertEqual({'data': {'human': None}}, result)
def test_alias_queries(self):
"""Test GraphQlExecutor on queries that use aliases."""
context = self._context()
result = GraphQlExecutor.execute(
"query FetchLukeAliased {\n"
" luke: human(id: \"1000\") {\n"
" name\n"
" }\n"
"}\n",
context)
self.assertEqual(
{'data': {'luke': {'name': '<NAME>'}}}, result)
result = GraphQlExecutor.execute(
"query FetchLukeAndLeiaAliased {\n"
" luke: human(id: \"1000\") {\n"
" name\n"
" }\n"
" leia: human(id: \"1003\") {\n"
" name\n"
" }\n"
"}\n",
context)
self.assertEqual(
{
'data': {
'luke': {'name': '<NAME>'},
'leia': {'name': '<NAME>'},
},
}, result)
result = GraphQlExecutor.execute(
"query DuplicateFields {\n"
" luke: human(id: \"1000\") {\n"
" name\n"
" homePlanet\n"
" }\n"
" leia: human(id: \"1003\") {\n"
" name\n"
" homePlanet\n"
" }\n"
"}\n",
context)
self.assertEqual(
{
'data': {
'luke': {
'homePlanet': 'Tatooine',
'name': '<NAME>',
},
'leia': {
'homePlanet': 'Alderaan',
'name': '<NAME>',
},
},
}, result)
result = GraphQlExecutor.execute(
"query UseFragment {\n"
" luke: human(id: \"1000\") {\n"
" ...HumanFragment\n"
" }\n"
" leia: human(id: \"1003\") {\n"
" ...HumanFragment\n"
" }\n"
"}\n"
"\n"
"fragment HumanFragment on Human {\n"
" name\n"
" homePlanet\n"
"}\n",
context)
self.assertEqual(
{
'data': {
'luke': {
'homePlanet': 'Tatooine',
'name': '<NAME>',
},
'leia': {
'homePlanet': 'Alderaan',
'name': '<NAME>',
},
},
}, result)
def test_typename_queries(self):
"""Test GraphQlExecutor on queries that request the __typename field.
"""
context = self._context()
result = GraphQlExecutor.execute(
"query CheckTypeOfR2 {\n"
" hero {\n"
" __typename\n"
" name\n"
" }\n"
"}\n",
context)
self.assertEqual(
{'data': {'hero': {'name': 'R2-D2', '__typename': 'Droid'}}},
result)
result = GraphQlExecutor.execute(
"query CheckTypeOfLuke {\n"
" hero(episode: EMPIRE) {\n"
" __typename\n"
" name\n"
" }\n"
"}\n",
context)
self.assertEqual(
{
'data': {
'hero': {
'name': '<NAME>',
'__typename': 'Human',
},
},
}, result)
def test_directives(self):
"""Test GraphQlExecutor on documents with directives."""
context = self._context()
result = GraphQlExecutor.execute(
'{human(id: "1000"){name @include(if: true)}}', context)
self.assertEqual(
{'data': {'human': {'name': '<NAME>'}}}, result)
result = GraphQlExecutor.execute(
'{human(id: "1000"){name @include(if: false)}}', context)
self.assertEqual({'data': {'human': {}}}, result)
result = GraphQlExecutor.execute(
'{human(id: "1000"){name @skip(if: true)}}', context)
self.assertEqual({'data': {'human': {}}}, result)
result = GraphQlExecutor.execute(
'{human(id: "1000"){name @skip(if: false)}}', context)
self.assertEqual(
{'data': {'human': {'name': '<NAME>'}}}, result)
result = GraphQlExecutor.execute(
'{human(id: "1000") @include(if: true){name}}', context)
self.assertEqual(
{'data': {'human': {'name': '<NAME>'}}}, result)
result = GraphQlExecutor.execute(
'{human(id: "1000") @include(if: false){name}}', context)
self.assertEqual({'data': {}}, result)
result = GraphQlExecutor.execute(
'{human(id: "1000"){...HumanFields}} '
'fragment HumanFields on Human @include(if: true){name}',
context)
self.assertEqual(
{'data': {'human': {'name': '<NAME>'}}}, result)
result = GraphQlExecutor.execute(
'{human(id: "1000"){...HumanFields}} '
'fragment HumanFields on Human @include(if: false){name}',
context)
self.assertEqual({'data': {'human': {}}}, result)
result = GraphQlExecutor.execute(
'{human(id: "1000"){...HumanFields}} '
'fragment HumanFields on Human{name @include(if: true)}',
context)
self.assertEqual(
{'data': {'human': {'name': '<NAME>'}}}, result)
result = GraphQlExecutor.execute(
'{human(id: "1000"){...HumanFields}} '
'fragment HumanFields on Human{name @include(if: false)}',
context)
self.assertEqual({'data': {'human': {}}}, result)
result = GraphQlExecutor.execute(
'{human(id: "1000"){...HumanFields @include(if: true)}} '
'fragment HumanFields on Human{name}',
context)
self.assertEqual(
{'data': {'human': {'name': '<NAME>'}}}, result)
result = GraphQlExecutor.execute(
'{human(id: "1000"){...HumanFields @include(if: false)}} '
'fragment HumanFields on Human{name}',
context)
self.assertEqual({'data': {'human': {}}}, result)
result = GraphQlExecutor.execute(
'($if: Boolean!) {human(id: "1000"){name @include(if: $if)}}',
context, {'if': True})
self.assertEqual(
{'data': {'human': {'name': '<NAME>'}}}, result)
result = GraphQlExecutor.execute(
'($if: Boolean!) {human(id: "1000"){name @include(if: $if)}}',
context, {'if': False})
self.assertEqual({'data': {'human': {}}}, result)
def test_gexecute_document(self):
"""Test GraphQlExecutor.execute_document."""
context = self._context()
document = GraphQlParser(
'{human(id: "1000"){name}}', context.schema).parse()
result = GraphQlExecutor.execute_document(document, context)
self.assertEqual(
{'data': {'human': {'name': '<NAME>'}}}, result)
result = GraphQlExecutor.execute_document(document, context)
self.assertEqual(
{'data': {'human': {'name': '<NAME>'}}}, result)
def test_mutations(self):
"""Test GraphQlExecutor on documents with mutations."""
SwShip.reset()
context = self._extra_context()
result = GraphQlExecutor.execute(
'mutation{introduceShip(input: '
'{clientMutationId: "foo", name: "Y-wing"})'
'{clientMutationId, ship{id, name}}}',
context)
self.assertEqual(['data'], list(result.iterkeys()))
self.assertEqual(['introduceShip'], list(result['data'].iterkeys()))
self.assertEqual(
set(['clientMutationId', 'ship']),
set(result['data']['introduceShip'].iterkeys()))
self.assertEqual(
'foo', result['data']['introduceShip']['clientMutationId'])
self.assertEqual(
set(['id', 'name']),
set(result['data']['introduceShip']['ship'].iterkeys()))
self.assertEqual(
'Y-wing', result['data']['introduceShip']['ship']['name'])
ship_id = result['data']['introduceShip']['ship']['id']
self.assertEqual('Y-wing', get_sw_ship(ship_id).name)
result = GraphQlExecutor.execute(
'mutation{ship1: setFavoriteShip(id: "3000"){name}, '
'ship2: setFavoriteShip(id: "does not exist"){name}, '
'ship3: setFavoriteShip(id: "3001"){name}}',
context)
self.assertEqual(
{
'data': {
'ship1': {'name': 'Millennium Falcon'},
'ship2': None,
'ship3': {'name': 'X-wing'},
},
}, result)
self.assertEqual('X-wing', SwShip.favorite_ship().name)
def test_unions(self):
"""Test GraphQlExecutor on documents with union fields."""
context = self._extra_context()
result = GraphQlExecutor.execute(
'{search(name: "X-wing") {'
'... on Character {appearsIn}, ... on Ship {id}}}',
context)
self.assertEqual({'data': {'search': {'id': '3001'}}}, result)
result = GraphQlExecutor.execute(
'{search(name: "<NAME>") {'
'... on Character {appearsIn}, ... on Ship {id}}}',
context)
self.assertEqual(
{'data': {'search': {'appearsIn': ['NEWHOPE', 'EMPIRE', 'JEDI']}}},
result)
result = GraphQlExecutor.execute(
'{search(name: "<NAME>") {'
'... on Character {appearsIn}, ... on Ship {id}}}',
context)
self.assertEqual({'data': {'search': None}}, result)
def test_context_args(self):
"""Test context arguments, as in GraphQlContext.context_arg."""
context = GraphQlContextWithEmail('<EMAIL>')
result = GraphQlExecutor.execute('{favoriteCharacter {name}}', context)
self.assertEqual(
{'data': {'favoriteCharacter': {'name': '<NAME>'}}}, result)
context = GraphQlContextWithEmail('<EMAIL>')
result = GraphQlExecutor.execute('{favoriteCharacter {name}}', context)
self.assertEqual(
{'data': {'favoriteCharacter': {'name': 'R2-D2'}}}, result)
def test_multiple_inheritance(self):
"""Test GraphQlExecutor with multiple inheritance."""
schema = GraphQlSchemaFactory.create_from_modules([
'graphql.executor.test.multiple_inheritance',
'graphql.scalar_descriptors.strict'])
context = SilentGraphQlContext(schema)
result = GraphQlExecutor.execute(
'{getA{__typename}, getB{__typename}, getC{__typename}}', context)
self.assertEqual(
{
'data': {
'getA': {'__typename': 'MultipleInheritanceD'},
'getB': {'__typename': 'MultipleInheritanceD'},
'getC': {'__typename': 'MultipleInheritanceD'},
},
}, result)
result = GraphQlExecutor.execute('{getAAsF{__typename}}', context)
self.assertTrue(
set(result.iterkeys()).issubset(set(['data', 'errors'])))
self.assertTrue('errors' in result)
self.assertGreater(len(result['errors']), 0)
self.assertTrue('data' in result)
self.assertEqual({'getAAsF': None}, result['data'])
def _validate_error_response(self, graphql_response):
"""Assert the specified value is a valid GraphQL error response.
This only performs basic validation. For example, it does not
verify that the entire response is a JSON value.
"""
self.assertTrue(
set(graphql_response.iterkeys()).issubset(
set(['errors', 'extensions'])))
self.assertTrue('errors' in graphql_response)
self.assertGreater(len(graphql_response['errors']), 0)
def test_overriding(self):
"""Test GraphQlExecutor with field overriding."""
schema = GraphQlSchemaFactory.create_from_modules([
'graphql.executor.test.overriding',
'graphql.scalar_descriptors.strict'])
context = SilentGraphQlContext(schema)
result = GraphQlExecutor.execute(
'{stringManipulatorDerived {'
'manipulate(base: "foo", prefix: "bar", suffix: "baz")}}',
context)
self.assertEqual(
{
'data': {
'stringManipulatorDerived': {
'manipulate': 'barfoobaz',
},
},
}, result)
result = GraphQlExecutor.execute(
'{stringManipulatorDerived {'
'manipulate(base: "foo", prefix: "bar")}}',
context)
self.assertEqual(
{
'data': {
'stringManipulatorDerived': {
'manipulate': 'barfoo',
},
},
}, result)
result = GraphQlExecutor.execute(
'{stringManipulatorDerived {'
'... on StringManipulatorMiddleInterface {'
'manipulate(base: "foo", prefix: "bar")}}}',
context)
self.assertEqual(
{
'data': {
'stringManipulatorDerived': {
'manipulate': 'barfoo',
},
},
}, result)
result = GraphQlExecutor.execute(
'{stringManipulatorDerived {'
'... on StringManipulatorMiddle {'
'manipulate(base: "foo", prefix: "bar")}}}',
context)
self.assertEqual(
{
'data': {
'stringManipulatorDerived': | |
# Copyright 2019 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from glob import glob
import os
import re
import sys
import subprocess
import fileinput
import readline
from shutil import copy2
from subprocess import Popen, PIPE
from netaddr import IPNetwork, IPAddress
from tabulate import tabulate
from lib.config import Config
import lib.logger as logger
PATTERN_MAC = r'[\da-fA-F]{2}:){5}[\da-fA-F]{2}'
CalledProcessError = subprocess.CalledProcessError
def get_network_addr(ipaddr, prefix):
return str(IPNetwork(f'{ipaddr}/{prefix}').network)
def get_netmask(prefix):
return str(IPNetwork(f'0.0.0.0/{prefix}').netmask)
def get_prefix(netmask):
return IPAddress(netmask).netmask_bits()
def bash_cmd(cmd):
"""Run command in Bash subprocess
Args:
cmd (str): Command to run
Returns:
output (str): stdout from command
"""
log = logger.getlogger()
command = ['bash', '-c', cmd]
log.debug('Run subprocess: %s' % ' '.join(command))
output = subprocess.check_output(command, universal_newlines=True,
stderr=subprocess.STDOUT)
try:
output = output.decode('utf-8')
except AttributeError:
pass
log.debug(output)
return output
def backup_file(path, suffix='.orig', multi=True):
"""Save backup copy of file
Backup copy is saved as the name of the original with the value of suffix
appended. If multi is True, and a backup already exists, an additional
backup is made with a numeric index value appended to the name. The backup
copy filemode is set to read-only.
Args:
path (str): Path of file to backup
suffix (str): String to append to the filename of the backup
multi (bin): Set False to only make a backup if one does not exist
already.
"""
log = logger.getlogger()
backup_path = path + suffix
version = 0
while os.path.exists(backup_path) and multi:
version += 1
backup_path += "." + str(version)
log.debug('Make backup copy of orignal file: \'%s\'' % backup_path)
copy2(path, backup_path)
os.chmod(backup_path, 0o444)
def append_line(path, line, check_exists=True):
"""Append line to end of text file
Args:
path (str): Path of file
line (str): String to append
check_exists(bool): Check if line exists before appending
"""
log = logger.getlogger()
log.debug('Add line \'%s\' to file \'%s\'' % (line, path))
if not line.endswith('\n'):
line += '\n'
exists = False
if check_exists:
with open(path, 'r') as file_in:
for read_line in file_in:
if read_line == line:
exists = True
if not exists:
with open(path, 'a') as file_out:
file_out.write(line)
def remove_line(path, regex):
"""Remove line(s) from file containing a regex pattern
Any lines matching the regex pattern will be removed.
Args:
path (str): Path of file
regex (str): Regex pattern
"""
log = logger.getlogger()
log.debug('Remove lines containing regex \'%s\' from file \'%s\'' %
(regex, path))
for line in fileinput.input(path, inplace=1):
if not re.match(regex, line):
print(line, end='')
def line_in_file(path, regex, replace, backup=None):
"""If 'regex' exists in the file specified by path, then replace it with
the value in 'replace'. Else append 'replace' to the end of the file. This
facilitates simplified changing of a parameter to a desired value if it
already exists in the file or adding the paramater if it does not exist.
Inputs:
path (str): path to the file
regex (str): Python regular expression
replace (str): Replacement string
backup (str): If specified, a backup of the orginal file will be made
if a backup does not already exist. The backup is made in the same
directory as the original file by appending the value of backup to
the filename.
"""
if os.path.isfile(path):
if backup:
backup_file(path, multi=False)
try:
with open(path, 'r') as f:
data = f.read()
except FileNotFoundError as exc:
print(f'File not found: {path}. Err: {exc}')
else:
data = data.splitlines()
in_file = False
# open 'r+' to maintain owner
with open(path, 'r+') as f:
for line in data:
in_line = re.search(regex, line)
if in_line:
line = re.sub(regex, replace, line)
in_file = True
f.write(line + '\n')
if not in_file:
f.write(replace + '\n')
def replace_regex(path, regex, replace):
"""Replace line(s) from file containing a regex pattern
Any lines matching the regex pattern will be removed and replaced
with the 'replace' string.
Args:
path (str): Path of file
regex (str): Regex pattern
replace (str): String to replace matching line
"""
log = logger.getlogger()
log.debug('Replace regex \'%s\' with \'%s\' in file \'%s\'' %
(regex, replace, path))
for line in fileinput.input(path, inplace=1):
print(re.sub(regex, replace, line), end='')
def copy_file(source, dest):
"""Copy a file to a given destination
Args:
source (str): Path of source file
dest (str): Destination path to copy file to
"""
log = logger.getlogger()
log.debug('Copy file, source:%s dest:%s' % (source, dest))
copy2(source, dest)
def sub_proc_launch(cmd, stdout=PIPE, stderr=PIPE):
"""Launch a subprocess and return the Popen process object.
This is non blocking. This is useful for long running processes.
"""
proc = Popen(cmd.split(), stdout=stdout, stderr=stderr)
return proc
def sub_proc_exec(cmd, stdout=PIPE, stderr=PIPE, shell=False):
"""Launch a subprocess wait for the process to finish.
Returns stdout from the process
This is blocking
"""
if not shell:
cmd = cmd.split()
proc = Popen(cmd, stdout=stdout, stderr=stderr, shell=shell)
stdout, stderr = proc.communicate()
try:
stdout = stdout.decode('utf-8')
except AttributeError:
pass
try:
stderr = stderr.decode('utf-8')
except AttributeError:
pass
return stdout, stderr, proc.returncode
def sub_proc_display(cmd, stdout=None, stderr=None, shell=False):
"""Popen subprocess created without PIPES to allow subprocess printing
to the parent screen. This is a blocking function.
"""
if not shell:
cmd = cmd.split()
proc = Popen(cmd, stdout=stdout, stderr=stderr, shell=shell)
proc.wait()
rc = proc.returncode
return rc
def sub_proc_wait(proc):
"""Launch a subprocess and display a simple time counter while waiting.
This is a blocking wait. NOTE: sleeping (time.sleep()) in the wait loop
dramatically reduces performace of the subprocess. It would appear the
subprocess does not get it's own thread.
"""
cnt = 0
rc = None
while rc is None:
rc = proc.poll()
print('\rwaiting for process to finish. Time elapsed: {:2}:{:2}:{:2}'.
format(cnt // 3600, cnt % 3600 // 60, cnt % 60), end="")
sys.stdout.flush()
cnt += 1
print('\n')
resp, err = proc.communicate()
print(resp)
return rc
class Color:
black = '\033[90m'
red = '\033[91m'
green = '\033[92m'
yellow = '\033[93m'
blue = '\033[94m'
purple = '\033[95m'
cyan = '\033[96m'
white = '\033[97m'
bold = '\033[1m'
underline = '\033[4m'
sol = '\033[1G'
clr_to_eol = '\033[K'
clr_to_bot = '\033[J'
scroll_five = '\n\n\n\n\n'
scroll_ten = '\n\n\n\n\n\n\n\n\n\n'
up_one = '\033[1A'
up_five = '\033[5A'
up_ten = '\033[10A'
header1 = ' ' + bold + underline
endc = '\033[0m'
def heading1(text='-', width=79):
text1 = f' {Color.bold}{Color.underline}{text}{Color.endc}'
print(f'\n{text1: <{width + 8}}')
def bold(text):
return Color.bold + text + Color.endc
def rlinput(prompt, prefill=''):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt)
finally:
readline.set_startup_hook()
def files_present(url, fileglobs, _all=True):
"""Return true if any/all of the fileglobs are present in the url.
"""
any_present = False
all_present = True
fileglobsstr = ','.join(fileglobs)
if fileglobs:
cmd = (f'wget -r -l 10 -nd -np --spider --accept={fileglobsstr} {url}')
reply, err, rc = sub_proc_exec(cmd)
err = err.replace('%2B', '+')
if rc == 0:
for fileglob in fileglobs:
regx = fileglob_to_regx(fileglob)
res = re.findall(regx, err)
any_present = any_present or res != []
all_present = all_present and res != []
if not fileglobs:
return True
if _all:
return all_present
else:
return any_present
def fileglob_to_regx(fileglob):
regx = fileglob.replace('.', r'\.')
regx = regx.replace('+', r'\+')
regx = regx.replace(']*', '][0-9]{0,3}')
regx = regx.replace('*', '.*')
regx = 'http.+' + regx
return regx
def get_url(url='http://', fileglob='', prompt_name='', repo_chk='', contains=[],
excludes=[], filelist=[]):
"""Input a URL from user. The URL is checked for validity using curl and
wget and the user can continue modifying it indefinitely until a response
is obtained or he can enter 'sss' to skip (stop) entry.
If a fileglob is specified, the specified url is searched
recursively (crawled) up to 10 levels deep looking for matches.
If repo_chk is specified, the url is searched recursively looking for a
marker specific to that repo type. If multiple URL's are found, the
list of found url's is filtered using 'contains', 'excludes' and
'files_present'. The user is again prompted to make a selection.
fileglob and repo_chk are mutually exclusive.
If neither fileglob nor repo_chk are specified, and the url does not end in '/'
then the url is assumed to be | |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import copy
import os
import datetime
import shutil
from collections import OrderedDict
from itertools import product
import yaml
from numpy import uint8 # noqa: F401 (used in evaluated strings)
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
class Argument:
__slots__ = ("name", "typename", "direction", "role")
def __init__(self, name, typename, direction, role="default"):
self.name = name
self.typename = typename
self.direction = direction
self.role = role
class Specification:
def __init__(self, templatized_kernel_name, spec, testdata, blacklisted):
self.templatized_kernel_name = templatized_kernel_name
self.name = spec["name"]
self.args = []
for arg in spec["args"]:
self.args.append(
Argument(
arg["name"],
arg["type"],
arg["dir"],
arg["role"] if "role" in arg.keys() else "default",
)
)
if blacklisted:
self.tests = []
else:
self.tests = self.gettests(testdata)
def validateoverflow(self, testvals):
flag = True
for arg in self.args:
if "uint" in arg.typename and (
any(n < 0 for n in testvals["inargs"][arg.name])
or (
"outargs" in testvals.keys()
and arg.name in testvals["outargs"].keys()
and any(n < 0 for n in testvals["outargs"][arg.name])
)
):
flag = False
return flag
def dicttolist(self, outputdict, typename):
typeval = gettypeval(typename)
vallist = []
count = 0
for num in sorted(outputdict):
if num == count:
vallist.append(outputdict[num])
else:
while num != count:
count += 1
vallist.append(typeval)
vallist.append(outputdict[num])
count += 1
return vallist
def getdummyvalue(self, typename, length):
return [gettypeval(typename)] * length
def typevalidates(self, testdict, arglist):
for arg in arglist:
if isinstance(testdict[arg.name], list):
if testdict[arg.name] == []:
return False
if not isinstance(
testdict[arg.name][0], type(gettypeval(arg.typename))
):
return False
else:
if not isinstance(testdict[arg.name], type(gettypeval(arg.typename))):
return False
return True
def gettests(self, testdata):
allvals = []
with open(
os.path.join(CURRENT_DIR, "..", "tests-spec", "kernels.py")
) as kernelfile:
wrap_exec(kernelfile.read(), globals(), locals())
instancedict = {}
funcpassdict = OrderedDict()
count = 0
for arg in self.args:
funcpassdict[arg.name] = []
if arg.role == "default":
group = str(count)
assert group not in instancedict.keys()
instancedict[group] = [arg.name]
if arg.direction == "out":
funcpassdict[arg.name].append({})
else:
funcpassdict[arg.name].append(testdata["num"])
assert len(funcpassdict[arg.name]) == 1
count += 1
else:
group = arg.role[: arg.role.find("-")]
if group not in instancedict.keys():
instancedict[group] = []
instancedict[group].append(arg.name)
if group not in testdata.keys() and group[:-1] in testdata.keys():
pseudogroup = copy.copy(group[:-1])
elif group in testdata.keys():
pseudogroup = copy.copy(group)
role = pseudogroup + arg.role[arg.role.find("-") :]
for x in range(len(testdata[pseudogroup])):
funcpassdict[arg.name].append(testdata[pseudogroup][x][role])
instancedictlist = list(instancedict.keys())
combinations = []
for name in instancedictlist:
temp = []
for arg in instancedict[name]:
temp.append(funcpassdict[arg])
combinations.append(zip(*temp))
for x in product(*combinations):
origtemp = OrderedDict()
for groupName, t in zip(instancedictlist, x):
for key, value in zip(instancedict[groupName], t):
origtemp[key] = value
temp = copy.deepcopy(origtemp)
funcPy = wrap_eval(self.name, globals(), locals())
intests = OrderedDict()
outtests = OrderedDict()
tempdict = {}
try:
funcPy(**temp)
for arg in self.args:
if arg.direction == "out":
assert isinstance(temp[arg.name], dict)
temparglist = self.dicttolist(temp[arg.name], arg.typename)
intests[arg.name] = self.getdummyvalue(
arg.typename, len(temparglist)
)
outtests[arg.name] = temparglist
else:
intests[arg.name] = temp[arg.name]
tempdict["outargs"] = copy.deepcopy(outtests)
tempdict["success"] = True
except ValueError:
for arg in self.args:
if arg.direction == "out":
intests[arg.name] = self.getdummyvalue(
arg.typename, len(temp[arg.name])
)
else:
intests[arg.name] = temp[arg.name]
tempdict["success"] = False
tempdict["inargs"] = copy.deepcopy(intests)
if self.typevalidates(
tempdict["inargs"], self.args
) and self.validateoverflow(tempdict):
allvals.append(tempdict)
return allvals
def readspec():
genpykernels()
specdict = {}
with open(os.path.join(CURRENT_DIR, "..", "kernel-specification.yml")) as specfile:
loadfile = yaml.safe_load(specfile)
indspec = loadfile["kernels"]
data = yaml.safe_load(
open(os.path.join(CURRENT_DIR, "..", "kernel-test-data.yml"))
)["tests"]
for spec in indspec:
if "def " in spec["definition"]:
for childfunc in spec["specializations"]:
specdict[childfunc["name"]] = Specification(
spec["name"],
childfunc,
data,
not spec["automatic-tests"],
)
return specdict
def wrap_exec(string, globs, locs):
exec(string, globs, locs)
def wrap_eval(string, globs, locs):
return eval(string, globs, locs)
def gettypename(spectype):
typename = spectype.replace("List", "").replace("[", "").replace("]", "")
if typename.endswith("_t"):
typename = typename[:-2]
return typename
def getfuncnames():
funcs = {}
with open(os.path.join(CURRENT_DIR, "..", "kernel-specification.yml")) as specfile:
indspec = yaml.safe_load(specfile)["kernels"]
for spec in indspec:
funcs[spec["name"]] = []
for childfunc in spec["specializations"]:
funcs[spec["name"]].append(childfunc["name"])
return funcs
def genpykernels():
print("Generating Python kernels")
prefix = """
from numpy import uint8
kMaxInt64 = 9223372036854775806
kSliceNone = kMaxInt64 + 1
"""
tests_spec = os.path.join(CURRENT_DIR, "..", "tests-spec")
if os.path.exists(tests_spec):
shutil.rmtree(tests_spec)
os.mkdir(tests_spec)
with open(os.path.join(tests_spec, "__init__.py"), "w") as f:
f.write(
"""# AUTO GENERATED ON {}
# DO NOT EDIT BY HAND!
#
# To regenerate file, run
#
# python dev/generate-tests.py
#
# fmt: off
""".format(
datetime.datetime.now().isoformat().replace("T", " AT ")[:22]
)
)
with open(
os.path.join(CURRENT_DIR, "..", "tests-spec", "kernels.py"), "w"
) as outfile:
outfile.write(prefix)
with open(
os.path.join(CURRENT_DIR, "..", "kernel-specification.yml")
) as specfile:
indspec = yaml.safe_load(specfile)["kernels"]
for spec in indspec:
if "def " in spec["definition"]:
outfile.write(spec["definition"] + "\n")
for childfunc in spec["specializations"]:
outfile.write(
"{} = {}\n".format(childfunc["name"], spec["name"])
)
outfile.write("\n\n")
unit_tests = os.path.join(CURRENT_DIR, "..", "tests-spec-explicit")
if os.path.exists(unit_tests):
shutil.rmtree(unit_tests)
os.mkdir(unit_tests)
final_dest = os.path.join(CURRENT_DIR, "..", "tests-spec-explicit")
copy_dest = os.path.join(CURRENT_DIR, "..", "tests-spec", "kernels.py")
shutil.copy(copy_dest, final_dest)
def gettypeval(typename):
if "int" in typename:
typeval = 123
elif "bool" in typename:
typeval = True
elif "double" in typename or "float" in typename:
typeval = 123.0
else:
raise ValueError("Unknown type encountered")
return typeval
def getcudakernelslist():
cudakernels = []
for f in os.listdir(
os.path.join(
os.path.dirname(CURRENT_DIR),
"src",
"awkward",
"_v2",
"_connect",
"cuda",
"cuda_kernels",
)
):
if os.path.isfile(os.path.join(CURRENT_DIR, "..", "src", "cuda-kernels", f)):
if f.startswith("awkward_") and f.endswith(".cu"):
cudakernels.append(f[:-3])
elif f.startswith("manual_awkward_") and f.endswith(".cu"):
cudakernels.append(f[len("manual_") : -3])
return cudakernels
def genspectests(specdict):
print("Generating files for testing specification")
for spec in specdict.values():
with open(
os.path.join(
CURRENT_DIR, "..", "tests-spec", "test_py" + spec.name + ".py"
),
"w",
) as f:
f.write(
"""# AUTO GENERATED ON {}
# DO NOT EDIT BY HAND!
#
# To regenerate file, run
#
# python dev/generate-tests.py
#
# fmt: off
""".format(
datetime.datetime.now().isoformat().replace("T", " AT ")[:22]
)
)
f.write("import pytest\nimport kernels\n\n")
num = 1
if spec.tests == []:
f.write(
"@pytest.mark.skip(reason='Unable to generate any tests for kernel')\n"
)
f.write("def test_py" + spec.name + "_" + str(num) + "():\n")
f.write(
" " * 4
+ "raise NotImplementedError('Unable to generate any tests for kernel')\n"
)
else:
for test in spec.tests:
f.write("def test_py" + spec.name + "_" + str(num) + "():\n")
num += 1
args = ""
for arg, val in test["inargs"].items():
f.write(" " * 4 + arg + " = " + str(val) + "\n")
f.write(
" " * 4 + "funcPy = getattr(kernels, '" + spec.name + "')\n"
)
count = 0
for arg in test["inargs"].keys():
if count == 0:
args += arg + "=" + arg
count += 1
else:
args += ", " + arg + "=" + arg
if test["success"]:
f.write(" " * 4 + "funcPy" + "(" + args + ")\n")
for arg, val in test["outargs"].items():
f.write(" " * 4 + "pytest_" + arg + " = " + str(val) + "\n")
if isinstance(val, list):
f.write(
" " * 4
+ "assert {0}[:len(pytest_{0})] == pytest.approx(pytest_{0})\n".format(
arg
)
)
else:
f.write(
" " * 4 + "assert {0} == pytest_{0}\n".format(arg)
)
else:
f.write(" " * 4 + "with pytest.raises(Exception):\n")
f.write(" " * 8 + "funcPy(" + args + ")\n")
f.write("\n")
def remove_const(typename):
if "Const[" in typename:
typename = typename.replace("Const[", "", 1).rstrip("]")
return typename
def getctypelist(arglist):
newctypes = []
for arg in arglist:
typename = remove_const(arg.typename)
if "List" in typename:
count = typename.count("List")
typename = typename.replace("List", "").replace("[", "").replace("]", "")
if typename.endswith("_t"):
typename = typename[:-2]
start = ""
end = ")"
for i in range(count):
if i > 0:
start += "("
end += ")"
start += "ctypes.POINTER"
start += "(ctypes.c_"
newctypes.append(start + typename + end)
else:
if typename.endswith("_t"):
typename = typename[:-2]
newctypes.append("ctypes.c_" + typename)
count = 0
funcCtypes = "("
for x in newctypes:
if count == 0:
funcCtypes += x
count += 1
else:
funcCtypes += ", " + x
funcCtypes += ")"
return funcCtypes
def gencpukerneltests(specdict):
print("Generating files for testing CPU kernels")
tests_cpu_kernels = os.path.join(CURRENT_DIR, "..", "tests-cpu-kernels")
if os.path.exists(tests_cpu_kernels):
shutil.rmtree(tests_cpu_kernels)
os.mkdir(tests_cpu_kernels)
with open(os.path.join(tests_cpu_kernels, "__init__.py"), "w") as f:
f.write(
"""# AUTO GENERATED ON {}
# DO NOT EDIT BY HAND!
#
# To regenerate file, run
#
# python dev/generate-tests.py
#
# fmt: off
""".format(
datetime.datetime.now().isoformat().replace("T", " AT ")[:22]
)
)
for spec in specdict.values():
with open(
os.path.join(tests_cpu_kernels, "test_cpu" + spec.name + ".py"), "w"
) as f:
f.write(
"""# AUTO GENERATED ON {}
# DO NOT EDIT BY HAND!
#
# To regenerate file, run
#
# python dev/generate-tests.py
#
# fmt: off
""".format(
datetime.datetime.now().isoformat().replace("T", " AT ")[:22]
)
)
f.write(
"import ctypes\nimport pytest\n\nfrom awkward._cpu_kernels import lib\n\n"
)
num = 1
if spec.tests == []:
f.write(
"@pytest.mark.skip(reason='Unable to generate any tests for kernel')\n"
)
f.write("def test_cpu" + spec.name + "_" + str(num) + "():\n")
f.write(
" " * 4
+ "raise NotImplementedError('Unable to generate any tests for kernel')\n"
)
for test in spec.tests:
f.write("def test_cpu" + spec.name + "_" | |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import csv
from .utils import *
from schulze_voting import SchulzeVote
from median_voting import MedianVote
class ParseException(Exception):
"""Exception thrown by all parse methods."""
pass
# Regular expression to parse lines from a voters file.
_voter_rx = re.compile(r'\s*[*]\s+(?P<name>.+?):\s*(?P<weight>\d+)$')
def parse_voters(reader):
"""Parse a voters file.
Such a file must contain one voter entry in each line. Each line must be of the form
* <VOTER-NAME>: <WEIGHT>
It does not return a list of the voters but an iterator over WeightedVoter objects
(groups name and weight together).
Args:
reader (iterable of string): Anything to iterate over and receive lines (file opened with open, list of strings)
Yields:
WeightedVoter: All parsed voters.
Raises:
ParseException: If there is a syntax error.
"""
for line_num, line in enumerate(reader, 1):
line = line.strip()
if not line or line.startswith('#'):
continue
m = _voter_rx.match(line)
if not m:
raise ParseException('Invalid syntax in line %d, must be of form "* voter: weight"' % line_num)
name, weight = m.group('name'), m.group('weight')
# should not happen, just to be sure
try:
weight = int(weight)
except ValueError as e:
raise ParseException('Invalid enry in line %d: %s, line must be of form "voter: weight"' % (line_num, str(e)))
yield WeightedVoter(name, weight)
# The following section contains regular expressions used to parse a description file.
_head_rx = re.compile(r'\s*#\s+(?P<title>.+)$')
_group_rx = re.compile(r'\s*##\s+(?P<group>.+?)$')
_voting_rx = re.compile(r'\s*###\s+(?P<voting>.+?)$')
_schulze_option_rx = re.compile(r'\s*[*]\s+(?P<option>.+?)$')
_median_option_rx = re.compile(r'\s*[-]\s+(?P<euro>\d+)(?:[.,](?P<cent>\d{1,2}))?\s*(?P<currency>[€$£])?$')
# not nice, mostly just a copy of the regex before
_currency_rx = re.compile(r'(?P<euro>\d+)(?:[.,](?P<cent>\d{1,2}))?\s*(?P<currency>[€$£])?$')
def currency_match(match):
"""Parses a number with a currency.
Args:
match (regex match): A match
"""
if not match:
return None
# maybe the try is not necessary because it should always be parsable as int, but just to be sure
try:
value = int(match.group('euro')) * 100
cent = match.group('cent')
if cent is not None:
if len(cent) == 1:
value += (int(cent) * 10)
elif len(cent) == 2:
value += int(cent)
else:
assert False
return value, match.group('currency')
except ValueError as e:
return None
def parse_currency(s):
"""Parse a currency value from a string.
Args:
s (str): String in the currency format.
Returns:
(int, str): The value (in cent) and the currency. currency may be None if not provided in the string.
Examples:
>>> parse_currency('100')
(10000, None)
>>> parse_currency('42.84 €')
(4284, '€')
>>> parse_currency('1337,1 $')
(133710, '$')
"""
return currency_match(_currency_rx.match(s))
# states for the collection parser
_head_state = 'start'
_group_state = 'group'
_voting_state = 'voting'
_option_state = 'option'
_group_or_voting_state = 'group-or-voting'
_schulze_option_state = 'schulze-option'
# tries to match the string s against a list of regexes, returns first index and the match object. Returns -1 and None
# on failure.
def _match_first(s, *args):
for i, rx in enumerate(args):
m = rx.match(s)
if m:
return i, m
return -1, None
def parse_voting_collection(reader):
"""Parse a voting collection from a list of strings (or a file).
For syntax information see the wiki.
Args:
reader: File like object to read from (a list will also do); something to iterate over and receive lines.
Returns:
VotingCollection: The parsed collection.
Raises:
ParseException: If there is a syntax / parse error.
"""
res = VotingCollection('', None, [])
state = _head_state
last_voting_name = None
for line_num, line in enumerate(reader, 1):
line = line.strip()
if not line:
continue
if state == _head_state:
m = _head_rx.match(line)
if not m:
raise ParseException('Invalid head line in line %d, must be "# <TITLE>"' % line_num)
res.name = m.group('title')
state = _group_state
elif state == _group_state:
state = _handle_group_state(res, line, line_num)
elif state == _voting_state:
# parse a voting name
last_voting_name, state = _handle_voting_state(res, line, line_num)
elif state == _option_state:
state = _handle_option_state(res, last_voting_name, line, line_num)
elif state == _group_or_voting_state:
last_voting_name, state = _handle_group_or_voting_state(res, last_voting_name, line, line_num)
elif state == _schulze_option_state:
last_voting_name, state = _handle_schulze_option_state(res, last_voting_name, line, line_num)
else:
raise ParseException('Internal error: Invalid state while parsing voting collection: %s' % str(state))
return res
# The following block contains state parse methods
def _handle_group_state(res, line, line_num):
# parse a new group name
m = _group_rx.match(line)
if not m:
raise ParseException('Invalid group in line %d, must be "## <GROUP>"' % line_num)
# create new group
group = VotingGroup(m.group('group'), [], [])
# append group to result
res.groups.append(group)
return _voting_state
def _handle_voting_state(res, line, line_num):
# parse a voting name
m = _voting_rx.match(line)
if not m:
raise ParseException('Invalid voting in line %d, must be "### <VOTING>"' % line_num)
last_voting_name = m.group('voting')
state = _option_state
return last_voting_name, state
def _handle_option_state(res, last_voting_name, line, line_num):
if not res.groups or last_voting_name == "" or last_voting_name is None:
# should not happen, just to be sure
raise ParseException('Internal error: Illegal state while parsing voting options in line %d' % line_num)
last_group = res.groups[-1]
# parse either a median or schulze option
# there must be an option now
id, m = _match_first(line, _schulze_option_rx, _median_option_rx)
if id < 0:
raise ParseException('Invalid voting option in line %d, must be a Median or Schulze option' % line_num)
elif id == 0:
# we parsed a schulze option
# create a new schulze voting (this is the first time we parsed an option)
option = m.group('option')
schulze_skel = SchulzeVotingSkeleton(last_voting_name, [option, ], id=len(last_group))
last_group.schulze_votings.append(schulze_skel)
# now parse more schulze options or new group / voting
state = _schulze_option_state
elif id == 1:
# we parsed the value of a median voting, transform to int
parse_res = currency_match(m)
if not parse_res:
# should never happen
raise ParseException('Internal error: Unable to parse value for median voting in line %d' % line_num)
val, currency = parse_res
median_skel = MedianVotingSkeleton(last_voting_name, val, currency, id=len(last_group))
last_group.median_votings.append(median_skel)
# now we must parse a group or a voting
state = _group_or_voting_state
else:
assert False
return state
def _handle_group_or_voting_state(res, last_voting_name, line, line_num):
# first try to handle as group
try:
return last_voting_name, _handle_group_state(res, line, line_num)
except ParseException as e:
pass
try:
return _handle_voting_state(res, line, line_num)
except ParseException as e:
raise ParseException('Invalid syntax in line %d: Must be either a group or a voting' % line_num)
def _handle_schulze_option_state(res, last_voting_name, line, line_num):
# now we must parse either a new schulze option or an new group or voting
m = _schulze_option_rx.match(line)
if m:
# code duplication but ok
if not res.groups:
# should not happen
raise ParseException('Internal error: Invalid syntax in line %d: No group given.' % line_num)
last_group = res.groups[-1]
skel = last_group.schulze_votings[-1]
skel.options.append(m.group('option'))
# state does not change
state = _schulze_option_state
return last_voting_name, state
else:
# now it must be a group or a new voting
try:
return _handle_group_or_voting_state(res, last_voting_name, line, line_num)
except ParseException as e:
raise ParseException('Invalid syntax in line %d: Must be a Schulze option, group or new voting' % line_num)
_csv_median_head_rx = re.compile(r'[Mm]edian\s*\((?P<value>\d+)\)\s*$')
_csv_schulze_head_rx = re.compile(r'[Ss]chulze\s*\((?P<num>\d+)\)\s*$')
def _parse_csv_head(head_row):
if len(head_row) < 2:
raise ParseException('csv head must contain at least two columns')
group = VotingGroup('Votings', [], [])
res = VotingCollection('', None, [group,])
for col_num, col in enumerate(head_row[2:]):
col = col.strip()
i, m = _match_first(col, _csv_median_head_rx, _csv_schulze_head_rx)
if i < 0:
raise ParseException('Invalid column %d: Expected Schulze or Median definition' % col_num)
elif i == 0:
try:
value = int(m.group('value'))
voting = MedianVotingSkeleton('Voting %d' % (col_num + 1), value, | |
<reponame>vkolli/5.0_contrail-test<gh_stars>0
from base import BasePolicyTest
from tcutils.wrappers import preposttest_wrapper
import test
from vn_test import VNFixture
from policy_test import PolicyFixture, copy
from common.policy import policy_test_utils
from vm_test import VMFixture, time
from tcutils.topo.sdn_topo_setup import sdnTopoSetupFixture
from tcutils.util import get_random_name, get_random_cidr
from common.system.system_verification import system_vna_verify_policy
from tcutils.test_lib.test_utils import assertEqual
import sdn_basic_topology
import os
import sdn_single_vm_multiple_policy_topology
import sdn_single_vm_policy_topology
from vnc_api.vnc_api import *
af_test = 'dual'
class TestBasicPolicy(BasePolicyTest):
'''Policy config tests'''
_interface = 'json'
@classmethod
def setUpClass(cls):
super(TestBasicPolicy, cls).setUpClass()
def runTest(self):
pass
@test.attr(type=['suite1', 'vcenter'])
@preposttest_wrapper
def test_policy(self):
""" Configure policies based on topology and run policy related verifications.
"""
result = True
#
# Get config for test from topology
topology_class_name = sdn_basic_topology.sdn_basic_config
self.logger.info(
"Scenario for the test used is: %s" %
(topology_class_name))
# set project name
try:
# provided by wrapper module if run in parallel test env
topo = topology_class_name(
project=self.project.project_name,
username=self.project.project_username,
password=self.project.project_user_password)
except NameError:
topo = topology_class_name()
#
# Test setup: Configure policy, VN, & VM
# return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
# Returned topo is of following format:
# config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
setup_obj = self.useFixture(
sdnTopoSetupFixture(self.connections, topo))
out = setup_obj.topo_setup()
assertEqual(out['result'], True, out['msg'])
if out['result']:
topo, config_topo = out['data']
#
# Verify [and assert on fail] after setup
# Calling system policy verification, pick any policy fixture to
# access fixture verification
policy_name = topo.policy_list[0]
system_vna_verify_policy(
self,
config_topo['policy'][policy_name],
topo,
'setup')
# Verify ICMP traffic between the two VM's.
if not config_topo['vm'][topo.vmc_list[0]].ping_with_certainty(
expectation=True,
dst_vm_fixture=config_topo['vm'][topo.vmc_list[1]]):
self.logger.error(
'Ping from %s to %s failed,expected it to pass' %
(config_topo['vm'][topo.vmc_list[0]].vm_name,
config_topo['vm'][topo.vmc_list[1]].vm_name))
return False
return True
# end test_policy
@test.attr(type=['cb_sanity', 'sanity', 'ci_sanity', 'quick_sanity', 'vcenter', 'suite1', 'vcenter_compute'])
@preposttest_wrapper
def test_basic_policy_allow_deny(self):
'''
Create 2 Vns and allow icmp traffic between them and validate with pings
Update the policy to deny the same traffic
Check that pings fail
'''
vn1_fixture = self.create_vn()
vn2_fixture = self.create_vn()
# vn1_name = get_random_name('vn1')
# vn1_subnets = ['192.168.10.0/24']
policy_name = get_random_name('policy1')
rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'icmp',
'source_network': vn1_fixture.vn_name,
'dest_network': vn2_fixture.vn_name,
},
]
policy_fixture = self.setup_policy_between_vns(vn1_fixture,
vn2_fixture, rules)
assert vn1_fixture.verify_on_setup()
assert vn2_fixture.verify_on_setup()
vm1_fixture = self.create_vm(vn1_fixture)
vm2_fixture = self.create_vm(vn2_fixture)
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip), (
'Ping failed between VNs with allow-policy')
# Deny the same traffic
policy_id = policy_fixture.get_id()
rules[0]['simple_action'] = 'deny'
policy_entries = policy_fixture.get_entries()
if type(policy_entries) is PolicyEntriesType:
policy_entries.policy_rule[0].action_list.simple_action = 'deny'
p_rules = policy_entries
else:
policy_entries['policy_rule'][0]['action_list']['simple_action'] = 'deny'
p_rules = {'policy': {'entries':policy_entries}}
policy_fixture.update_policy(policy_id, p_rules)
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip,
expectation=False), ('Ping passed between VNs with deny-policy')
# end test_basic_policy_allow_deny
@preposttest_wrapper
def test_policy_to_deny(self):
''' Test to validate that with policy having rule to disable icmp within the VN, ping between VMs should fail
1. Pick 2 VN from resource pool which have one VM in each
2. Create policy with icmp deny rule
3. Associate policy to both VN
4. Ping from one VM to another. Ping should fail
Pass criteria: Step 2,3 and 4 should pass
'''
vn1_name = get_random_name('vn1')
vn1_subnets = ['192.168.10.0/24']
policy_name = get_random_name('policy1')
rules = [
{
'direction': '<>', 'simple_action': 'deny',
'protocol': 'icmp',
'source_network': vn1_name,
'dest_network': vn1_name,
},
]
policy_fixture = self.useFixture(
PolicyFixture(
policy_name=policy_name, rules_list=rules, inputs=self.inputs,
connections=self.connections))
vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
vn1_fixture.bind_policies(
[policy_fixture.policy_fq_name], vn1_fixture.vn_id)
self.addCleanup(vn1_fixture.unbind_policies,
vn1_fixture.vn_id, [policy_fixture.policy_fq_name])
assert vn1_fixture.verify_on_setup()
vn1_vm1_name = get_random_name('vn1_vm1')
vn1_vm2_name = get_random_name('vn1_vm2')
vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
vm1_fixture.wait_till_vm_is_up()
vm2_fixture.wait_till_vm_is_up()
if not vm1_fixture.ping_with_certainty(expectation=False,dst_vm_fixture=vm2_fixture):
self.logger.error(
'Ping from %s to %s passed,expected it to fail' %
(vm1_fixture.vm_name, vm2_fixture.vm_name))
self.logger.info('Doing verifications on the fixtures now..')
return True
# end test_policy_to_deny
# end of class TestBasicPolicyConfig
class TestBasicPolicyNegative(BasePolicyTest):
'''Negative tests'''
_interface = 'json'
@classmethod
def setUpClass(cls):
super(TestBasicPolicyNegative, cls).setUpClass()
def runTest(self):
pass
@test.attr(type=['suite1', 'vcenter'])
@preposttest_wrapper
def test_remove_policy_with_ref(self):
''' This tests the following scenarios.
1. Test to validate that policy removal will fail when it referenced with VN.
2. validate vn_policy data in api-s against quantum-vn data, when created and unbind policy from VN thru quantum APIs.
3. validate policy data in api-s against quantum-policy data, when created and deleted thru quantum APIs.
'''
vn1_name = get_random_name('vn4')
vn1_subnets = ['10.1.1.0/24']
policy_name = get_random_name('policy1')
rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'icmp',
'source_network': vn1_name,
'dest_network': vn1_name,
},
]
policy_fixture = self.useFixture(
PolicyFixture(
policy_name=policy_name,
rules_list=rules,
inputs=self.inputs,
connections=self.connections))
vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
vn1_fixture.bind_policies(
[policy_fixture.policy_fq_name], vn1_fixture.vn_id)
assert vn1_fixture.verify_on_setup()
ret = policy_fixture.verify_on_setup()
if ret['result'] == False:
self.logger.error(
"Policy %s verification failed after setup" % policy_name)
assert ret['result'], ret['msg']
self.logger.info(
"Done with setup and verification, moving onto test ..")
# try to remove policy which was referenced with VN.
policy_removal = True
pol_id = None
try:
self.vnc_lib.network_policy_delete(id=policy_fixture.get_id())
except Exception as e:
policy_removal = False
self.assertFalse(
policy_removal,
'Policy removal succeed as not expected since policy is referenced with VN')
#assert vn1_fixture.verify_on_setup()
# policy_fixture.verify_policy_in_api_server()
return True
# end test_remove_policy_with_ref
# end of class TestBasicPolicyNegative
class TestBasicPolicyModify(BasePolicyTest):
'''Policy modification related tests'''
_interface = 'json'
@classmethod
def setUpClass(cls):
super(TestBasicPolicyModify, cls).setUpClass()
def runTest(self):
pass
@test.attr(type=['suite1', 'vcenter'])
@preposttest_wrapper
def test_policy_modify_vn_policy(self):
""" Configure policies based on topology;
"""
###
# Get config for test from topology
# very simple topo will do, one vn, one vm, one policy, 3 rules
topology_class_name = sdn_single_vm_policy_topology.sdn_single_vm_policy_config
self.logger.info(
"Scenario for the test used is: %s" %
(topology_class_name))
# set project name
try:
# provided by wrapper module if run in parallel test env
topo = topology_class_name(
project=self.project.project_name,
username=self.project.username,
password=<PASSWORD>)
except NameError:
topo = topology_class_name()
###
# Test setup: Configure policy, VN, & VM
# return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
# Returned topo is of following format:
# config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
setup_obj = self.useFixture(
sdnTopoSetupFixture(
self.connections,
topo))
out = setup_obj.topo_setup()
assertEqual(out['result'], True, out['msg'])
if out['result']:
topo, config_topo = out['data']
###
# Verify [and assert on fail] after setup
# Calling system policy verification, pick any policy fixture to
# access fixture verification
policy_name = topo.policy_list[0]
system_vna_verify_policy(
self,
config_topo['policy'][policy_name],
topo,
'setup')
###
# Test procedure:
# Test adding new policy to VN's exiting policy list
state = "add policy: "
test_vm = topo.vmc_list[0]
test_vn = topo.vn_of_vm[test_vm]
# Init test data, take backup of current topology
initial_vn_policy_list = copy.copy(topo.vn_policy[test_vn])
new_policy_to_add = policy_test_utils.get_policy_not_in_vn(
initial_vn_policy_list,
topo.policy_list)
if not new_policy_to_add:
result = 'False'
msg = "test %s cannot be run as required config not available in topology; aborting test"
self.logger.info(msg)
assertEqual(result, True, msg)
initial_policy_vn_list = copy.copy(topo.policy_vn[new_policy_to_add])
new_vn_policy_list = copy.copy(initial_vn_policy_list)
new_policy_vn_list = copy.copy(initial_policy_vn_list)
new_vn_policy_list.append(new_policy_to_add)
new_policy_vn_list.append(test_vn)
test_vn_fix = config_topo['vn'][test_vn]
test_vn_id = test_vn_fix.vn_id
# configure new policy
config_topo['policy'][new_policy_to_add] = self.useFixture(
PolicyFixture(
policy_name=new_policy_to_add,
rules_list=topo.rules[new_policy_to_add],
inputs=self.inputs,
connections=self.connections))
# get new policy_set to be pushed for the vn
test_policy_fq_names = []
for policy in new_vn_policy_list:
name = config_topo['policy'][policy].policy_fq_name
test_policy_fq_names.append(name)
self.logger.info(
"adding policy %s to vn %s" %
(new_policy_to_add, test_vn))
test_vn_fix.bind_policies(test_policy_fq_names, test_vn_id)
# wait for tables update before checking after making changes to system
time.sleep(5)
self.logger.info(
"New policy list of VN %s is %s" %
(test_vn, new_vn_policy_list))
# update expected topology with this new info for verification
topo.vn_policy[test_vn] = new_vn_policy_list
topo.policy_vn[new_policy_to_add] = new_policy_vn_list
system_vna_verify_policy(
self,
config_topo['policy'][new_policy_to_add],
topo,
state)
# Test unbinding all policies from VN
state = "unbinding all policies"
test_vn_fix.unbind_policies(test_vn_id)
# wait for tables update before checking after making changes to system
time.sleep(5)
current_vn_policy_list = new_vn_policy_list
new_vn_policy_list = []
self.logger.info(
"New policy list of VN %s is %s" %
(test_vn, new_vn_policy_list))
# update expected topology with this new info for verification
topo.vn_policy[test_vn] = new_vn_policy_list
for policy in current_vn_policy_list:
topo.policy_vn[policy].remove(test_vn)
system_vna_verify_policy(
self,
config_topo['policy'][new_policy_to_add],
topo,
state)
return True
# end test_policy_modify
# end of class TestBasicPolicyModify
class TestDetailedPolicy0(BasePolicyTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(TestDetailedPolicy0, cls).setUpClass()
@test.attr(type=['vcenter', 'suite1'])
@preposttest_wrapper
def test_repeated_policy_modify(self):
""" Configure policies based on topology; Replace VN's existing policy [same policy name but with different rule set] multiple times and verify.
"""
###
# Get config for test from topology
# very simple topo will do, one vn, one vm, multiple policies with n
# rules
topology_class_name = sdn_single_vm_multiple_policy_topology.sdn_single_vm_multiple_policy_config
self.logger.info(
"Scenario for the test used is: %s" %
(topology_class_name))
# set project name
try:
# provided by wrapper module if run in parallel test env
topo = topology_class_name(
project=self.project.project_name,
username=self.project.username,
password=self.<PASSWORD>)
except NameError:
topo = topology_class_name()
###
# Test setup: Configure policy, VN, & VM
# return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
| |
import unittest
from textwrap import TextWrapper, wrap, fill, dedent, indent, shorten
class BaseTestCase(unittest.TestCase):
"""Parent class with utility methods for textwrap tests."""
def show(self, textin):
if isinstance(textin, list):
result = []
for i in range(len(textin)):
result.append(' %d: %r' % (i, textin[i]))
result = '\n'.join(result) if result else ' no lines'
elif isinstance(textin, str):
result = ' %s\n' % repr(textin)
return result
def check(self, result, expect):
self.assertEqual(result, expect, 'expected:\n%s\nbut got:\n%s' % (
self.show(expect), self.show(result)))
def check_wrap(self, text, width, expect, **kwargs):
result = wrap(text, width, **kwargs)
self.check(result, expect)
def check_split(self, text, expect):
result = self.wrapper._split(text)
self.assertEqual(result, expect, '\nexpected %r\nbut got %r' % (
expect, result))
class WrapTestCase(BaseTestCase):
def setUp(self):
self.wrapper = TextWrapper(width=45)
def test_simple(self):
text = "Hello there, how are you this fine day? I'm glad to hear it!"
self.check_wrap(text, 12, ['Hello there,', 'how are you',
'this fine', "day? I'm", 'glad to hear', 'it!'])
self.check_wrap(text, 42, [
'Hello there, how are you this fine day?', "I'm glad to hear it!"])
self.check_wrap(text, 80, [text])
def test_empty_string(self):
self.check_wrap('', 6, [])
self.check_wrap('', 6, [], drop_whitespace=False)
def test_empty_string_with_initial_indent(self):
self.check_wrap('', 6, [], initial_indent='++')
self.check_wrap('', 6, [], initial_indent='++', drop_whitespace=False)
def test_whitespace(self):
text = """This is a paragraph that already has
line breaks. But some of its lines are much longer than the others,
so it needs to be wrapped.
Some lines are tabbed too.
What a mess!
"""
expect = ['This is a paragraph that already has line',
'breaks. But some of its lines are much',
'longer than the others, so it needs to be',
'wrapped. Some lines are tabbed too. What a', 'mess!']
wrapper = TextWrapper(45, fix_sentence_endings=True)
result = wrapper.wrap(text)
self.check(result, expect)
result = wrapper.fill(text)
self.check(result, '\n'.join(expect))
text = '\tTest\tdefault\t\ttabsize.'
expect = [' Test default tabsize.']
self.check_wrap(text, 80, expect)
text = '\tTest\tcustom\t\ttabsize.'
expect = [' Test custom tabsize.']
self.check_wrap(text, 80, expect, tabsize=4)
def test_fix_sentence_endings(self):
wrapper = TextWrapper(60, fix_sentence_endings=True)
text = 'A short line. Note the single space.'
expect = ['A short line. Note the single space.']
self.check(wrapper.wrap(text), expect)
text = 'Well, Doctor? What do you think?'
expect = ['Well, Doctor? What do you think?']
self.check(wrapper.wrap(text), expect)
text = 'Well, Doctor?\nWhat do you think?'
self.check(wrapper.wrap(text), expect)
text = 'I say, chaps! Anyone for "tennis?"\nHmmph!'
expect = ['I say, chaps! Anyone for "tennis?" Hmmph!']
self.check(wrapper.wrap(text), expect)
wrapper.width = 20
expect = ['I say, chaps!', 'Anyone for "tennis?"', 'Hmmph!']
self.check(wrapper.wrap(text), expect)
text = 'And she said, "Go to hell!"\nCan you believe that?'
expect = ['And she said, "Go to', 'hell!" Can you', 'believe that?']
self.check(wrapper.wrap(text), expect)
wrapper.width = 60
expect = ['And she said, "Go to hell!" Can you believe that?']
self.check(wrapper.wrap(text), expect)
text = 'File stdio.h is nice.'
expect = ['File stdio.h is nice.']
self.check(wrapper.wrap(text), expect)
def test_wrap_short(self):
text = 'This is a\nshort paragraph.'
self.check_wrap(text, 20, ['This is a short', 'paragraph.'])
self.check_wrap(text, 40, ['This is a short paragraph.'])
def test_wrap_short_1line(self):
text = 'This is a short line.'
self.check_wrap(text, 30, ['This is a short line.'])
self.check_wrap(text, 30, ['(1) This is a short line.'],
initial_indent='(1) ')
def test_hyphenated(self):
text = (
"this-is-a-useful-feature-for-reformatting-posts-from-tim-peters'ly"
)
self.check_wrap(text, 40, ['this-is-a-useful-feature-for-',
"reformatting-posts-from-tim-peters'ly"])
self.check_wrap(text, 41, ['this-is-a-useful-feature-for-',
"reformatting-posts-from-tim-peters'ly"])
self.check_wrap(text, 42, [
'this-is-a-useful-feature-for-reformatting-',
"posts-from-tim-peters'ly"])
expect = (
"this-|is-|a-|useful-|feature-|for-|reformatting-|posts-|from-|tim-|peters'ly"
.split('|'))
self.check_wrap(text, 1, expect, break_long_words=False)
self.check_split(text, expect)
self.check_split('e-mail', ['e-mail'])
self.check_split('Jelly-O', ['Jelly-O'])
self.check_split('half-a-crown', 'half-|a-|crown'.split('|'))
def test_hyphenated_numbers(self):
text = """Python 1.0.0 was released on 1994-01-26. Python 1.0.1 was
released on 1994-02-15."""
self.check_wrap(text, 30, ['Python 1.0.0 was released on',
'1994-01-26. Python 1.0.1 was', 'released on 1994-02-15.'])
self.check_wrap(text, 40, [
'Python 1.0.0 was released on 1994-01-26.',
'Python 1.0.1 was released on 1994-02-15.'])
self.check_wrap(text, 1, text.split(), break_long_words=False)
text = 'I do all my shopping at 7-11.'
self.check_wrap(text, 25, ['I do all my shopping at', '7-11.'])
self.check_wrap(text, 27, ['I do all my shopping at', '7-11.'])
self.check_wrap(text, 29, ['I do all my shopping at 7-11.'])
self.check_wrap(text, 1, text.split(), break_long_words=False)
def test_em_dash(self):
text = 'Em-dashes should be written -- thus.'
self.check_wrap(text, 25, ['Em-dashes should be', 'written -- thus.'])
self.check_wrap(text, 29, ['Em-dashes should be written', '-- thus.'])
expect = ['Em-dashes should be written --', 'thus.']
self.check_wrap(text, 30, expect)
self.check_wrap(text, 35, expect)
self.check_wrap(text, 36, ['Em-dashes should be written -- thus.'])
text = 'You can also do--this or even---this.'
expect = ['You can also do', '--this or even', '---this.']
self.check_wrap(text, 15, expect)
self.check_wrap(text, 16, expect)
expect = ['You can also do--', 'this or even---', 'this.']
self.check_wrap(text, 17, expect)
self.check_wrap(text, 19, expect)
expect = ['You can also do--this or even', '---this.']
self.check_wrap(text, 29, expect)
self.check_wrap(text, 31, expect)
expect = ['You can also do--this or even---', 'this.']
self.check_wrap(text, 32, expect)
self.check_wrap(text, 35, expect)
text = "Here's an -- em-dash and--here's another---and another!"
expect = ["Here's", ' ', 'an', ' ', '--', ' ', 'em-', 'dash', ' ',
'and', '--', "here's", ' ', 'another', '---', 'and', ' ',
'another!']
self.check_split(text, expect)
text = 'and then--bam!--he was gone'
expect = ['and', ' ', 'then', '--', 'bam!', '--', 'he', ' ', 'was',
' ', 'gone']
self.check_split(text, expect)
def test_unix_options(self):
text = 'You should use the -n option, or --dry-run in its long form.'
self.check_wrap(text, 20, ['You should use the',
'-n option, or --dry-', 'run in its long', 'form.'])
self.check_wrap(text, 21, ['You should use the -n',
'option, or --dry-run', 'in its long form.'])
expect = ['You should use the -n option, or',
'--dry-run in its long form.']
self.check_wrap(text, 32, expect)
self.check_wrap(text, 34, expect)
self.check_wrap(text, 35, expect)
self.check_wrap(text, 38, expect)
expect = ['You should use the -n option, or --dry-',
'run in its long form.']
self.check_wrap(text, 39, expect)
self.check_wrap(text, 41, expect)
expect = ['You should use the -n option, or --dry-run',
'in its long form.']
self.check_wrap(text, 42, expect)
text = 'the -n option, or --dry-run or --dryrun'
expect = ['the', ' ', '-n', ' ', 'option,', ' ', 'or', ' ',
'--dry-', 'run', ' ', 'or', ' ', '--dryrun']
self.check_split(text, expect)
def test_funky_hyphens(self):
self.check_split('what the--hey!', ['what', ' ', 'the', '--', 'hey!'])
self.check_split('what the--', ['what', ' ', 'the--'])
self.check_split('what the--.', ['what', ' ', 'the--.'])
self.check_split('--text--.', ['--text--.'])
self.check_split('--option', ['--option'])
self.check_split('--option-opt', ['--option-', 'opt'])
self.check_split('foo --option-opt bar', ['foo', ' ', '--option-',
'opt', ' ', 'bar'])
def test_punct_hyphens(self):
self.check_split("the 'wibble-wobble' widget", ['the', ' ',
"'wibble-", "wobble'", ' ', 'widget'])
self.check_split('the "wibble-wobble" widget', ['the', ' ',
'"wibble-', 'wobble"', ' ', 'widget'])
self.check_split('the (wibble-wobble) widget', ['the', ' ',
'(wibble-', 'wobble)', ' ', 'widget'])
self.check_split("the ['wibble-wobble'] widget", ['the', ' ',
"['wibble-", "wobble']", ' ', 'widget'])
self.check_split("what-d'you-call-it.", "what-d'you-|call-|it.".
split('|'))
def test_funky_parens(self):
self.check_split('foo (--option) bar', ['foo', ' ', '(--option)',
' ', 'bar'])
self.check_split('foo (bar) baz', ['foo', ' ', '(bar)', ' ', 'baz'])
self.check_split('blah (ding dong), wubba', ['blah', ' ', '(ding',
' ', 'dong),', ' ', 'wubba'])
def test_drop_whitespace_false(self):
text = ' This is a sentence with much whitespace.'
self.check_wrap(text, 10, [' This is a', ' ', 'sentence ',
'with ', 'much white', 'space.'], drop_whitespace=False)
def test_drop_whitespace_false_whitespace_only(self):
self.check_wrap(' ', 6, [' '], drop_whitespace=False)
def test_drop_whitespace_false_whitespace_only_with_indent(self):
self.check_wrap(' ', 6, [' '], drop_whitespace=False,
initial_indent=' ')
def test_drop_whitespace_whitespace_only(self):
self.check_wrap(' ', 6, [])
def test_drop_whitespace_leading_whitespace(self):
text = ' This is a sentence with leading whitespace.'
self.check_wrap(text, 50, [
' This is a sentence with leading whitespace.'])
self.check_wrap(text, 30, [' This is a sentence with',
'leading whitespace.'])
def test_drop_whitespace_whitespace_line(self):
text = 'abcd efgh'
self.check_wrap(text, 6, ['abcd', ' ', 'efgh'], drop_whitespace=
False)
self.check_wrap(text, 6, ['abcd', 'efgh'])
def test_drop_whitespace_whitespace_only_with_indent(self):
self.check_wrap(' ', 6, [], initial_indent='++')
def test_drop_whitespace_whitespace_indent(self):
self.check_wrap('abcd efgh', 6, [' abcd', ' efgh'],
initial_indent=' ', subsequent_indent=' ')
def test_split(self):
text = 'Hello there -- you goof-ball, use the -b option!'
result = self.wrapper._split(text)
self.check(result, ['Hello', ' ', 'there', ' ', '--', ' ', 'you',
' ', 'goof-', 'ball,', ' ', 'use', ' ', 'the', ' ', '-b', ' ',
'option!'])
def test_break_on_hyphens(self):
text = 'yaba daba-doo'
self.check_wrap(text, 10, ['yaba daba-', 'doo'], break_on_hyphens=True)
self.check_wrap(text, 10, ['yaba', 'daba-doo'], break_on_hyphens=False)
def test_bad_width(self):
text = "Whatever, it doesn't matter."
self.assertRaises(ValueError, wrap, text, 0)
self.assertRaises(ValueError, wrap, text, -1)
def test_no_split_at_umlaut(self):
text = 'Die Empfänger-Auswahl'
self.check_wrap(text, 13, ['Die', 'Empfänger-', 'Auswahl'])
def test_umlaut_followed_by_dash(self):
text = 'aa ää-ää'
self.check_wrap(text, 7, ['aa ää-', 'ää'])
def test_non_breaking_space(self):
text = 'This is a sentence with non-breaking\xa0space.'
self.check_wrap(text, 20, | |
"""
Authors: <NAME>, <NAME>
Project: Graduation Thesis: GIAdog
This file provides an interface to control and monitor the simulation status.
"""
import numpy as np
import pybullet_utils.bullet_client as bc
from scipy import interpolate
from gym import spaces
from Scene import TerrainScene
from typing import List, Tuple, Callable
from pyBulletPainter import create_ball, update_ball
from bullet_dataclasses import ContactInfo, JointState, LinkState
from giadog.src.GymEnvs.RewardFunctions.reward_handler import reward_handler
from kinematics import transformation_matrices, solve_leg_IK, FTG_handler,\
estimate_base_linear_velocity
from __env__ import EPSILON, MESH_SCALE, GRAVITY_VECTOR, SIM_SECONDS_PER_STEP, \
TOES_IDS, EXTERNAL_FORCE_MAGN, JOINTS_IDS, THIGHS_IDS, SHANKS_IDS, \
EXTERNAL_FORCE_TIME, ANGULAR_VEL_NOISE, ORIENTATION_NOISE, VELOCITY_NOISE, \
ACCELERATION_NOISE, JOINT_ANGLE_NOISE, JOINT_VELOCITY_NOISE, \
SELECTED_GAIT, LEG_SPAN, VEL_TH, SWIGN_PH, GRAVITY_VECTOR, MIN_DESIRED_VEL,\
MIN_DESIRED_TRAV, GOAL_RADIUS_2, MAX_ITER_TIME, DIRECTED_TURNING, \
ACTION_SPACE_SIZE, STATE_SIZE, PRIVILEGED_STATE_SIZE, REWARD_FUNCTION
class QuadrupedRobot(object):
""" Control and monitor the simulation of the spot-mini in pybullet. """
UPDATE_METHODS = [
'update_command_dir',
'update_position_orientation',
'update_rotation_matrix',
'update_wf_velocity',
'update_acceleration',
'update_linear_velocity',
'update_angular_velocity',
'update_joints_sensors',
'update_joints_torques',
'update_toes_contact_sensors',
'update_toes_contact_force',
'update_thighs_contact_info',
'update_shanks_contact_info',
'update_height_scan',
'update_toes_force',
'update_external_force',
'update_transf_matrices',
'update_is_fallen',
'update_gravity_vector',
'update_feet_target_hist',
'update_joint_error_hist',
'update_joint_velocity_hist',
'update_base_linear_velocity_estimation',
'dummy_update'
]
DEPENDENCES_METHODS = {
'update_rotation_matrix' : ['update_position_orientation'],
'update_acceleration' : ['update_wf_velocity'],
'update_linear_velocity' : ['update_wf_velocity', 'update_rotation_matrix'],
'update_angular_velocity' : ['update_wf_velocity', 'update_rotation_matrix'],
'update_joints_torques' : ['update_joints_sensors'],
'update_toes_contact_sensors' : ['update_rotation_matrix'],
'update_toes_contact_force' : ['update_toes_contact_sensors'],
'update_external_force' : ['update_rotation_matrix'],
'update_transf_matrices' : ['update_rotation_matrix'],
'update_is_fallen' : ['update_rotation_matrix'],
'update_gravity_vector' : ['update_rotation_matrix'],
'update_joint_error_hist' : ['update_joints_sensors'],
'update_joint_velocity_hist' : ['update_joints_sensors'],
'update_base_linear_velocity_estimation': ['update_joints_sensors', 'update_toes_contact_sensors']
}
CALLBACK_METHODS = {
'update_wf_velocity',
}
MANDATORY_METHODS = [
'update_is_fallen',
'update_traversability_traj',
'update_command_dir',
"update_transf_matrices"
]
def __init__(
self,
giadog_urdf_file: str,
pybullet_client: bc.BulletClient,
update_methods: List[str],
state_features: List[str],
privileged_state_features: List[str],
observation_space_size : spaces.Box = STATE_SIZE + PRIVILEGED_STATE_SIZE,
action_space_size : spaces.Box = ACTION_SPACE_SIZE
):
"""
Arguments:
----------
giadog_urdf_file: str
Path to the URDF file of the quadruped robot.
pybullet_client: pybullet_utils.bullet_client.BulletClient
"""
self.giadog_urdf_file = giadog_urdf_file
self.client = pybullet_client
self.state_features = state_features
self.privileged_state_features = privileged_state_features
self.observation_space = spaces.Box(
low = np.float32(-np.inf * np.ones((observation_space_size,))),
high = np.float32(np.inf * np.ones((observation_space_size,))),
dtype = np.float32
)
self.action_space = spaces.Box(
low = np.float32(-np.ones((action_space_size,))),
high = np.float32(np.ones((action_space_size,))),
dtype = np.float32
)
# Verify that all data in states are valid
for method in update_methods:
if method not in self.UPDATE_METHODS:
raise Exception(f'Method {method} not in class {QuadrupedRobot}')
# Sort methods according to dependencies
self.to_update = update_methods
# We check which dependencies are not updated
dependences = []
change = True
while change:
change = False
for method in self.to_update + dependences:
if method in self.DEPENDENCES_METHODS:
for d_method in self.DEPENDENCES_METHODS[method]:
if d_method not in self.to_update + dependences:
change = True
dependences.append(d_method)
self.to_update += dependences
self.to_update.sort(
key=lambda method : self.UPDATE_METHODS.index(method)
)
self.__get_reward = reward_handler(REWARD_FUNCTION)
self.first_execution = True
self.scene = None
self.goal_ball_id = False
self.foot_trajectory_generator = FTG_handler(SELECTED_GAIT)
self.directed_turning = DIRECTED_TURNING
def __set_toes_friction_coefficients(self, friction_coefficient: float):
"""
Changes the friction coeficient of the quadruped toes. It sets the
lateral friction coeficient (the one that is mainly used by
pybullet)
Arguments:
---------
* friction_coefficient: float
The desired friction coeficient to be set on the quadruped
toes.
"""
for toe_id in TOES_IDS:
self.client.changeDynamics(self.robot_id, toe_id,
lateralFriction = friction_coefficient)
self.client.changeDynamics(self.terrain, -1,
lateralFriction = friction_coefficient)
def calculate_terrain_height_function(self):
"""
Calculates the height function of the terrain.
"""
# First we get the terrain x, y coordinates
rows, cols = self.terrain_array.shape
lenght = rows * MESH_SCALE[0]
height = cols * MESH_SCALE[1]
x_0, x_f = -lenght/2, lenght/2
y_0, y_f = -height/2, height/2
x = np.linspace(x_0, x_f, num = rows, endpoint=True)
y = np.linspace(y_0, y_f, num = cols, endpoint=True)
# Using the rayTest method, we calculate the average height difference
# betweent the terrain array and the terrain in the simulation
self.z_diff = 0
for i in range(rows):
for j in range(cols):
ray_info = self.client.rayTest([x[i],y[j], 50],
[x[i],y[j], -50])[0]
self.z_diff += self.terrain_array[i][j] - ray_info[3][2]
self.z_diff /= rows*cols
# We calculate the new terrain array by substracting the average height
# difference from the original terrain array
z = self.terrain_array - self.z_diff
# We create the height function of the terrain
# By using a 2d linear interpolation:
th_func = interpolate.interp2d(x, y, z, kind='linear',
fill_value = np.NaN)
def f(x, y):
return th_func(x,y)[0]
self.terrain_height = f
def __foot_scan_coordinates(self, x: float, y: float, alpha: float) -> np.ndarray:
"""
Given a robot toe position and orientation, returns the positions
of the toe height sensor coordinates.
Arguments:
----------
x: float
x coordinate of the robot toe. [In the world frame]
y: float
y coordinate of the robot toe. [In the world frame]
alpha: float
Orientation of the toe.
Return:
-------
numpy.ndarray, shape (9, 2)
Array with each of the toe height sensor coordinates.
"""
n = 8 # Number of points around each foot
r = 0.07 # Radius of the height sensors around each toe.
P = np.empty([n+1, 2])
phi = 2*np.pi/n
for i in range(n):
angle_i = alpha + i* phi
P[i] = np.array([x + r * np.cos(angle_i), y + r * np.sin(angle_i)])
P[i+1] = np.array([x, y])
return P
def apply_force(self, F: List[float]):
"""
Applies a force to the base of the robot.
Arguments:
----------
F: List[float], shape (3,)
Force vector to be applied to the base of the robot.
Expressed in the world frame.
"""
self.client.applyExternalForce(
self.robot_id,
-1,
F,
[0,0,0],
self.client.WORLD_FRAME
)
def __set_external_force(self):
"""
Set and randomize the external force applied to the robot base.
Refrenece:
----------
https://github.com/leggedrobotics/learning_quadrupedal_locomotion_over_challenging_terrain_supplementary/blob/master/include/environment/environment_c010.hpp
Line: 1575
"""
# The module is a number sampled from 0 to E Newtons
# In the original paper the force is sampled from 0 to 120 Newtons
force_module = np.random.uniform() * EXTERNAL_FORCE_MAGN # N
# Randomize the direction of the force
az = np.pi * np.random.uniform()
el = np.pi/2 * np.random.uniform()
force_norm = np.array([
np.cos(az) * np.cos(el),
np.sin(az) * np.cos(el),
np.sin(el),
])
# External force refered to the world frame
self.external_force_wf = force_norm * force_module
def actuate_joints(self, joint_target_positions: List[float]):
"""
Moves the robot joints to a given target position.
Arguments:
---------
joint_target_positions: List[float], shape (12,)
Quadruped joints desired angles.
The order is the same as for the robot joints_ids.
The order should be as follows:
'motor_front_left_hip'
'motor_front_left_upper_leg'// "Front left thigh"
'motor_front_left_lower_leg'// "Front left shank"
'motor_front_right_hip'
'motor_front_right_upper_leg'// "Front right thigh"
'motor_front_right_lower_leg'// "Front right shank"
'motor_back_left_hip'
'motor_back_left_upper_leg'// "Back left thigh"
'motor_back_left_lower_leg'// "Back left shank"
'motor_back_right_hip'
'motor_back_right_upper_leg'// "Back right thigh"
'motor_back_right_lower_leg'// "Back right shank"
Note: It may be useful to add the Kp and Kd as inputs
"""
try:
self.client.setJointMotorControlArray(
bodyUniqueId = self.robot_id,
jointIndices = JOINTS_IDS,
controlMode = self.client.POSITION_CONTROL,
targetPositions = joint_target_positions,
)
except Exception as e:
print(f'\033[1;93m[w]\033[0m {e}.')
def add_to_scene(
self,
scene : TerrainScene,
x_o : float = 0.0,
y_o : float = 0.0,
fix_robot_base : bool = False,
first_execution: bool = False,
):
"""
Adds the quadruped robot to the scene.
Arguments:
----------
scene: Scene
Scene object to add the robot to.
x_o: float, optional
x coordintate of the robot initial position (In the world
frame).
Default: 0.0
y_o: float, optional
y coordintate of the robot initial position (In the world
frame).
Default: 0.0
fix_robot_base: bool, optional
If True, the robot base will be fixed in the world frame.
(The robot will be floating in the air, but still able to
move its feet)
Default: False
"""
self.scene = scene
self.terrain_array = self.scene.terrain_array
self.terrain = self.scene.terrain_id
self.calculate_terrain_height_function()
# Obtain the maximum height around the starting point
# Obtain the maximum height around the starting point
z_x_min = self.terrain_height(x_o -0.3, y_o)
z_x_max = self.terrain_height(x_o + 0.3, y_o)
z_y_min = self.terrain_height(x_o, y_o+ 0.3)
z_y_max = self.terrain_height(x_o, y_o- 0.3)
z_med = self.terrain_height(x_o, y_o)
z_o = np.max([z_x_min, z_x_max, z_y_min, z_y_max, z_med])
z_o += 0.3
if first_execution:
self.robot_id = self.client.loadURDF(
self.giadog_urdf_file ,
[0, 0, z_o],
flags = self.client.URDF_ENABLE_CACHED_GRAPHICS_SHAPES,
useFixedBase = fix_robot_base,
)
# Torque sensors are enable on the quadruped toes
for toe_id in TOES_IDS:
self.client.enableJointForceTorqueSensor(
bodyUniqueId = self.robot_id,
jointIndex = toe_id,
enableSensor = True,
)
for ID in JOINTS_IDS:
self.client.resetJointState(self.robot_id, ID, 0)
else:
self.client.resetBasePositionAndOrientation(
bodyUniqueId = self.robot_id,
posObj | |
in embeddding but yes in files
else:
embedded_dataset = None
if load_ground_truth:
print("loading ground truth...")
box_data, qgt = self.load_ground_truth()
else:
box_data = None
qgt = None
# if os.path.exists(self.index_path()):
# vec_index = self.index_path() # start actor elsewhere
# else:
vec_index = None
return EvDataset(
root=self.image_root,
paths=self.paths,
embedded_dataset=embedded_dataset,
query_ground_truth=qgt,
box_data=box_data,
embedding=None, # model used for embedding
fine_grained_embedding=fine_grained_embedding,
fine_grained_meta=fine_grained_meta,
vec_index_path=None,
vec_index=vec_index,
)
def split_df(df, n_splits):
lendf = df.shape[0]
base_lens = [lendf // n_splits] * n_splits
for i in range(lendf % n_splits):
base_lens[i] += 1
assert sum(base_lens) == lendf
assert len(base_lens) == n_splits
indices = np.cumsum([0] + base_lens)
start_index = indices[:-1]
end_index = indices[1:]
cutoffs = zip(start_index, end_index)
splits = []
for (a, b) in cutoffs:
splits.append(df.iloc[a:b])
tot = sum(map(lambda df: df.shape[0], splits))
assert df.shape[0] == tot
return splits
import pickle
def convert_dbidx(ev: EvDataset, ds: SeesawDatasetManager, prepend_ev: str = ""):
new_path_df = ds.file_meta.assign(dbidx=np.arange(ds.file_meta.shape[0]))
old_path_df = pd.DataFrame(
{"file_path": prepend_ev + ev.paths, "dbidx": np.arange(len(ev.paths))}
)
ttab = pd.merge(
new_path_df,
old_path_df,
left_on="file_path",
right_on="file_path",
suffixes=["_new", "_old"],
how="outer",
)
assert ttab[ttab.dbidx_new.isna()].shape[0] == 0
tmp = pd.merge(
ev.box_data,
ttab[["dbidx_new", "dbidx_old"]],
left_on="dbidx",
right_on="dbidx_old",
how="left",
)
tmp = tmp.assign(dbidx=tmp.dbidx_new)
new_box_data = tmp[[c for c in tmp if c not in ["dbidx_new", "dbidx_old"]]]
ds.save_ground_truth(new_box_data)
def infer_qgt_from_boxes(box_data, num_files):
qgt = box_data.groupby(["dbidx", "category"]).size().unstack(level=1).fillna(0)
qgt = qgt.reindex(np.arange(num_files)).fillna(0)
return qgt.clip(0, 1)
def infer_coarse_embedding(pdtab):
# max_zoom_out = pdtab.groupby('file_path').zoom_level.max().rename('max_zoom_level')
# wmax = pd.merge(pdtab, max_zoom_out, left_on='file_path', right_index=True)
wmax = pdtab
lev1 = wmax[wmax.zoom_level == wmax.max_zoom_level]
ser = lev1.groupby("dbidx").vectors.mean().reset_index()
res = ser["vectors"].values.to_numpy()
normres = res / np.maximum(np.linalg.norm(res, axis=1, keepdims=True), 1e-6)
return ser.assign(vectors=TensorArray(normres))
"""
Expected data layout for a seesaw root with a few datasets:
/workdir/seesaw_data
├── coco ### not yet preproceseed, just added
│ ├── file_meta.parquet
│ ├── images -> /workdir/datasets/coco
├── coco5 ### preprocessed
│ ├── file_meta.parquet
│ ├── images -> /workdir/datasets/coco
│ └── meta
│ └── vectors
│ ├── part_000.parquet
│ └── part_001.parquet
├── mini_coco
│ ├── file_meta.parquet
│ ├── images -> /workdir/datasets/mini_coco/images
│ └── meta
│ └── vectors
│ ├── part_000.parquet
│ └── part_001.parquet
"""
import sqlite3
def ensure_db(dbpath):
conn_uri = f"file:{dbpath}?nolock=1" # lustre makes locking fail, this should run only from manager though.
conn = sqlite3.connect(conn_uri, uri=True)
try:
cur = conn.cursor()
cur.execute(
"""CREATE TABLE IF NOT EXISTS models(
m_id INTEGER PRIMARY KEY,
m_created DATETIME default current_timestamp,
m_name TEXT UNIQUE,
m_path TEXT UNIQUE,
m_constructor TEXT,
m_origin_path TEXT
)"""
)
cur.execute(
"""CREATE TABLE IF NOT EXISTS datasets(
d_id INTEGER PRIMARY KEY,
d_created DATETIME default current_timestamp,
d_name TEXT UNIQUE,
d_path TEXT UNIQUE,
d_origin_path TEXT
)"""
)
cur.execute(
"""CREATE TABLE IF NOT EXISTS indices(
i_id INTEGER PRIMARY KEY,
i_created DATETIME default current_timestamp,
i_name TEXT,
i_constructor TEXT,
i_path TEXT,
d_id INTEGER,
m_id INTEGER,
FOREIGN KEY(d_id) REFERENCES datasets(d_id)
FOREIGN KEY(m_id) REFERENCES models(_id)
UNIQUE(d_id, i_name)
)"""
)
conn.commit()
finally:
conn.close()
class GlobalDataManager:
global_cache: CacheStub
def __init__(self, root):
root = os.path.abspath(os.path.expanduser(root))
if not os.path.exists(root):
print(f"creating new root folder at {root}")
os.makedirs(root)
self.root = root
self.data_root = f"{root}/data/"
self.model_root = f"{root}/models/"
self.index_root = f"{root}/indices/"
self.global_cache = CacheStub("actor#cache")
paths = [self.data_root, self.model_root, self.index_root]
for p in paths:
os.makedirs(p, exist_ok=True)
self.dbpath = f"{self.root}/meta.sqlite"
self.dburi = f"file:{self.dbpath}?nolock=1&mode=ro"
ensure_db(self.dbpath)
def _get_connection(self, url_mode="ro"):
dburi = f"file:{self.dbpath}?nolock=1&mode={url_mode}"
return sqlite3.connect(dburi, uri=True)
def _fetch(self, sql, *args, mode="plain", **kwargs):
try:
conn = self._get_connection()
if mode == "dict":
conn.row_factory = sqlite3.Row
tups = conn.execute(sql, *args, **kwargs).fetchall()
return [dict(tup) for tup in tups]
elif mode == "plain":
return conn.execute(sql, *args, **kwargs).fetchall()
elif mode == "df":
return pd.read_sql_query(sql, conn)
finally:
conn.close()
def _fetch_unique(self, *args, **kwargs):
tups = self._fetch(*args, **kwargs)
assert len(tups) == 1
return tups[0]
def list_datasets(self):
tups = self._fetch(
"""
select d_name from datasets
order by d_id
"""
)
return [t[0] for t in tups]
def list_indices(self):
df = self._fetch(
"""
select d_name, i_name, m_name from indices, datasets, models
where datasets.d_id == indices.d_id
and models.m_id == indices.m_id
order by datasets.d_id, i_id
""",
mode="df",
)
recs = df.to_dict(orient="records")
return [IndexSpec(**d) for d in recs]
def get_index_construction_data(self, dataset_name, index_name):
return self._fetch_unique(
"""select i_constructor, i_path, m_name from indices,models,datasets
where d_name == ? and i_name == ?
and indices.d_id == datasets.d_id
and indices.m_id == models.m_id
""",
(dataset_name, index_name),
)
def load_index(self, dataset_name, index_name) -> AccessMethod:
print("loading index")
cons_name, data_path, model_name = self.get_index_construction_data(
dataset_name, index_name
)
print("got index data")
return AccessMethod.restore(self, cons_name, data_path, model_name)
def _get_model_path(self, model_name: str) -> str:
return self._fetch_unique(
"""select m_path from models where m_name == ?""", (model_name,)
)[0]
def get_model_actor(self, model_name: str):
actor_name = f"/model_actor#{model_name}" # the slash is important
try:
ref = ray.get_actor(actor_name)
return ModelStub(ref)
except ValueError as e:
pass # will create instead
def _init_model_actor():
m_path = self._get_model_path(model_name)
full_path = f"{self.root}/{m_path}"
if ray.cluster_resources().get("GPU", 0) == 0:
device = "cpu"
num_gpus = 0
num_cpus = 8
else:
device = "cuda:0"
num_gpus = 0.5
num_cpus = 4
r = (
ray.remote(HGWrapper)
.options(
name=actor_name,
num_gpus=num_gpus,
num_cpus=num_cpus,
lifetime="detached",
)
.remote(path=full_path, device=device)
)
# wait for it to be ready
ray.get(r.ready.remote())
return r
# we're using the cache just as a lock
self.global_cache._with_lock(actor_name, _init_model_actor)
# must succeed now...
ref = ray.get_actor(actor_name)
return ModelStub(ref)
def create_dataset(self, image_src, dataset_name, paths=[]) -> SeesawDatasetManager:
"""
if not given explicit paths, it assumes every jpg, jpeg and png is wanted
"""
assert dataset_name is not None
assert " " not in dataset_name
assert "/" not in dataset_name
assert not dataset_name.startswith("_")
assert (
dataset_name not in self.list_datasets()
), "dataset with same name already exists"
image_src = os.path.realpath(image_src)
assert os.path.isdir(image_src)
dspath = f"{self.data_root}/{dataset_name}"
assert not os.path.exists(dspath), "name already used"
os.mkdir(dspath)
image_path = f"{dspath}/images"
os.symlink(image_src, image_path)
if len(paths) == 0:
paths = list_image_paths(image_src)
df = pd.DataFrame({"file_path": paths})
## use file name order to keep things intuitive
df = df.sort_values("file_path").reset_index(drop=True)
df.to_parquet(f"{dspath}/file_meta.parquet")
return self.get_dataset(dataset_name)
def _fetch_dataset_path(self, dataset_name):
d_path = self._fetch_unique(
"""
select d_path from datasets where d_name == ?
""",
(dataset_name,),
)[0]
return d_path
def get_dataset(self, dataset_name) -> SeesawDatasetManager:
all_ds = self.list_datasets()
assert dataset_name in all_ds, f"{dataset_name} not found in {all_ds}"
d_path = self._fetch_dataset_path(dataset_name)
return SeesawDatasetManager(self.root, dataset_name, d_path, self.global_cache)
def clone(
self, ds=None, ds_name=None, clone_name: str = None
) -> SeesawDatasetManager:
assert ds is not None or ds_name is not None
if ds is None:
ds = self.get_dataset(ds_name)
if clone_name is None:
dss = self.list_datasets()
for i in range(len(dss)):
new_name = f"{ds.dataset_name}_clone_{i:03d}"
if new_name not in dss:
clone_name = new_name
break
assert clone_name is not None
shutil.copytree(
src=ds.dataset_root, dst=f"{self.data_root}/{clone_name}", symlinks=True
)
return self.get_dataset(clone_name)
def clone_subset(
self, ds=None, ds_name=None, subset_name: str = None, file_names=None
) -> SeesawDatasetManager:
assert ds is not None or ds_name is not None
if ds is None:
ds = self.get_dataset(ds_name)
dataset = ds
# 1: names -> indices
image_src = os.path.realpath(dataset.image_root)
assert subset_name not in self.list_datasets(), "dataset already exists"
file_set = set(file_names)
self.create_dataset(
image_src=image_src, dataset_name=subset_name, paths=file_names
)
subds = self.get_dataset(subset_name)
def vector_subset(tab):
vt = tab.to_pandas()
vt = vt[vt.file_path.isin(file_set)]
return vt
if os.path.exists(dataset.vector_path()):
dataset.load_vec_table().map_batches(vector_subset).write_parquet(
subds.vector_path()
)
if os.path.exists(dataset.ground_truth_path()):
os.symlink(
os.path.realpath(dataset.ground_truth_path()),
subds.ground_truth_path().rstrip("/"),
)
return self.get_dataset(subset_name)
def __repr__(self):
return f"{self.__class__.__name__}({self.root})"
def prep_ground_truth(paths, box_data, qgt):
"""adds dbidx column to box data, sets dbidx in qgt and sorts qgt by dbidx"""
path2idx = dict(zip(paths, range(len(paths))))
mapfun = lambda x: path2idx.get(x, -1)
box_data = box_data.assign(dbidx=box_data.file_path.map(mapfun).astype("int"))
box_data = box_data[box_data.dbidx >= 0].reset_index(drop=True)
new_ids = qgt.index.map(mapfun)
qgt = qgt[new_ids >= 0]
qgt = qgt.set_index(new_ids[new_ids >= 0])
qgt = qgt.sort_index()
## Add entries for files with no labels...
qgt = qgt.reindex(np.arange(len(paths))) # na values will be ignored...
assert len(paths) == qgt.shape[0], "every path should be in the ground truth"
return box_data, qgt
import annoy
import random
import os
import sys
import time
def build_annoy_idx(*, vecs, output_path, n_trees):
start = time.time()
t = annoy.AnnoyIndex(512, "dot") # Length of item vector that will be indexed
for i in range(len(vecs)):
t.add_item(i, vecs[i])
print(f"done adding items...{time.time() - start} sec.")
t.build(n_trees=n_trees) # 10 trees
delta = time.time() - start
print(f"done building...{delta} sec.")
t.save(output_path)
return delta
def build_nndescent_idx(vecs, output_path, n_trees):
import pynndescent
start = time.time()
ret = pynndescent.NNDescent(
vecs.copy(),
metric="dot",
n_neighbors=100,
n_trees=n_trees,
diversify_prob=0.5,
pruning_degree_multiplier=2.0,
low_memory=False,
)
print("first phase done...")
ret.prepare()
print("prepare done... writing output...", output_path)
end = time.time()
difftime = end - start
pickle.dump(ret, file=open(output_path, "wb"))
return difftime
import shutil
class VectorIndex:
def __init__(self, *, base_dir, load_path, copy_to_tmpdir: bool, prefault=False):
t = annoy.AnnoyIndex(512, "dot")
self.vec_index = t
if copy_to_tmpdir:
print("cacheing first", base_dir, DATA_CACHE_DIR, load_path)
actual_load_path = parallel_copy(
base_dir=base_dir, cache_dir=DATA_CACHE_DIR, rel_path=load_path
)
else:
print("loading directly")
actual_load_path | |
0.027929975725048677, 0.023096283447740142, 0.013246239250661214, 0.016591515379385257,
0.020124742001645903, 0.04280129966494781],
[0.012788278610805889, 0.01900825221815599, 0.03422528506129086, 0.04729076846086603,
0.029862497132645566, 0.026514444167261703, 0.04284894436016072, 0.04584681968530258,
0.011033377836564044, 0.04140422115291157],
[0.023058256237638824, 0.04399940986324865, 0.04016330041918303, 0.005080603117752815,
0.03491398104901271, 0.040045341864139515, 0.034031172740374836, 0.019191471290492413,
0.03603531159570868, 0.042596477736541946],
[0.046358014675969055, 0.017818607736363488, 0.04395747429229237, 0.005829241029777856,
0.016683124380426722, 0.005200651117956997, 0.030306466915438843, 0.03661464213907885,
0.005090182595693544, 0.0301431636975622],
[0.032889802142968445, 0.04623167588947986, 0.02429066759611954, 0.01019869133107324,
0.02175423771985827, 0.006625026391789455, 0.028123290834043163, 0.010706010728849573,
0.03902060361203935, 0.03755032077975127],
[0.018834168499581472, 0.03942227869844037, 0.03983383260559758, 0.0428100516081356,
0.030688144655158758, 0.0485062601257023, 0.017755941698274227, 0.01210051249940899,
0.03933183479283753, 0.04638445915853673],
[0.03056041779152236, 0.03759257529108607, 0.0366005750697856, 0.020631892502207302,
0.027009282640311376, 0.02641750375615283, 0.023964856578300026, 0.037227445354505,
0.0386585988477621, 0.04604459662682718],
[0.012419087881714167, 0.03703773656029617, 0.024846102884127856, 0.049133009671587076,
0.03743310053107589, 0.04752077466835828, 0.009494830498448388, 0.032018971327987665,
0.009239722098387238, 0.008696028406467276],
[0.04677425050099796, 0.04311911432874717, 0.019969688591523125, 0.02978635139417161,
0.04223280883254018, 0.01930223442774412, 0.012856881552766406, 0.04384250501892262,
0.01710129928829977, 0.024722516262693745],
[0.03484055128265013, 0.023113339805512265, 0.038386342235461286, 0.005401638584295629,
0.03134640756044108, 0.040277626651505964, 0.026557745081425048, 0.03404446144757719,
0.044289024903750425, 0.040077752190681486],
[0.014012256826217352, 0.016758453099948594, 0.010808189054267586, 0.006111630051461744,
0.019042470025272464, 0.04042850912861643, 0.025455037232476045, 0.016312067256119826,
0.005396766756093934, 0.045164032397425724],
[0.03872241213709443, 0.02277202327839479, 0.044556308469085426, 0.014559638766047395,
0.022573840557378728, 0.010008178966811557, 0.029115591925928972, 0.007014702026984695,
0.016734975727248767, 0.009466736342239136],
[0.04404112993905618, 0.04069125003618314, 0.019069274431826284, 0.026115764119378892,
0.03990362228986361, 0.041716631787353076, 0.03579457273909513, 0.047279419269428496,
0.03147944291742222, 0.015826458794738655],
[0.008762838356233748, 0.01987508641753536, 0.014816522742694342, 0.01068422760939676,
0.03688148664398665, 0.015426771586047543, 0.017095386408499055, 0.014283247860243094,
0.020373123882911733, 0.026120923049234662],
[0.005829486336636015, 0.04994120265972566, 0.04412052033079155, 0.01195942466542867,
0.008731682747246103, 0.008438704722250459, 0.04495078556563886, 0.009811285525867044,
0.03688084235015041, 0.010756119199715964],
[0.04272761357629337, 0.007681750191763918, 0.040983580169520924, 0.006601186663526549,
0.01589793312648897, 0.030142944566899955, 0.024928680854923255, 0.012573685832080333,
0.027260240768042194, 0.020896378804933134],
[0.01896635733933095, 0.01398666541620848, 0.02216384800624855, 0.04167467312730931,
0.04143609992452048, 0.027130044431265408, 0.016809601988894054, 0.008379492735642381,
0.049676104840005286, 0.03693406007505278],
[0.03359674584478351, 0.00606435856428158, 0.03350343517685703, 0.006952226153367241,
0.01871204901131894, 0.00850083163279318, 0.0438908144231299, 0.03164530039030832,
0.04108438711661421, 0.021065866208347203],
[0.024510717520045487, 0.010965859312899284, 0.04501404257276819, 0.022737295516923838,
0.0497133517848057, 0.025595544066567062, 0.04868950598939358, 0.023252629447687388,
0.04653569744263246, 0.04776086990323713],
[0.03377953458588898, 0.014877060798937004, 0.04307291553269222, 0.025740012955137673,
0.034811327099332, 0.04018522188719354, 0.028099770865987424, 0.02239597704561444,
0.021259854197715138, 0.019550779488756193],
[0.005109230075589886, 0.024256260906530314, 0.037219567144505014, 0.011084172177569178,
0.010188378057453354, 0.017962560659441445, 0.04729168775168664, 0.02492065225425394,
0.019126548597139297, 0.02724016030873975],
[0.0499872264824053, 0.01578220724608491, 0.039632658667298516, 0.04903951302608984,
0.03932221150124168, 0.008616604704271582, 0.0346148124577519, 0.04162744688117539,
0.01634848744660001, 0.03712229224787633],
[0.011916652913009161, 0.005791136424618239, 0.046266690895896204, 0.02387969739257014,
0.040834038895559505, 0.00931805082508649, 0.011201860837575907, 0.047429492283186915,
0.017846720861538802, 0.01253648043881666],
[0.006003601120607258, 0.039841461407444885, 0.005985847423598251, 0.047551870688879595,
0.020683334921946576, 0.04968444484105832, 0.041813237403034574, 0.037404359814932765,
0.013729141404005208, 0.019165069557273314],
[0.012947408076344626, 0.044723415666352885, 0.04761868976798498, 0.01439916643665625,
0.03141629987523862, 0.04306829464680261, 0.03420488002314209, 0.045425354970194974,
0.022295461424182816, 0.04389248590973355],
[0.0053872594148395275, 0.027898665930223877, 0.04563325493609173, 0.037451302988316296,
0.029017352247932785, 0.007519093577662157, 0.007806631867994166, 0.005176600441550583,
0.020922611199312127, 0.04072663379641977],
[0.0214553311125957, 0.04913324946215417, 0.033002862236459304, 0.02531096397034037,
0.04924945357463482, 0.01921171223306217, 0.01147201098666873, 0.011512028578867661,
0.02567269388696986, 0.04997847093343963],
[0.026198927281335938, 0.04175072486435096, 0.02048278519622126, 0.03979456770431986,
0.00913418139157282, 0.04971931959972772, 0.033476696856833535, 0.041592436575500887,
0.02148765491787998, 0.00911376190466672],
[0.03724211357427991, 0.029315971083017287, 0.046110072482825004, 0.04425433713927332,
0.04318740319663871, 0.015524044591402202, 0.04066567779979656, 0.048206926898015805,
0.03842869273178805, 0.03663882732673997],
[0.01667445380415266, 0.012327211589288942, 0.0413074692086983, 0.028304237383731577,
0.03110293694868698, 0.010931479753118428, 0.046512355067008716, 0.0190270655251186,
0.04923005112352888, 0.04161653563069649],
[0.02479677298817738, 0.038050764982467386, 0.03102513187524288, 0.04708710593428631,
0.014196593313512335, 0.03708833783525589, 0.03128277488329507, 0.01734636997003995,
0.01490270857081518, 0.012022613583062301],
[0.030235216213526313, 0.007082973316544405, 0.007749394421090892, 0.03443849716219311,
0.02848789306736962, 0.04506396650441203, 0.014295698740059083, 0.010252900824967198,
0.01992966787964868, 0.02061986017694009],
[0.02922168169493548, 0.018082193088623123, 0.009596894631454668, 0.045483226718148285,
0.044602178357912345, 0.03539696075585631, 0.04277002837146326, 0.0066878264912792005,
0.027356200416042694, 0.00643307674638662],
[0.03643870805708788, 0.021512891086631892, 0.020065751044575763, 0.00551966361927437,
0.04626184655291875, 0.028924330131141478, 0.02257611389439959, 0.025398804061322215,
0.012082095961843783, 0.01875416967484882],
[0.0061566868747421856, 0.00777909564356357, 0.021239087221537663, 0.03352364396975099,
0.04728700149052222, 0.03882611041914923, 0.012780862474694583, 0.028341096438794056,
0.009098323514433756, 0.04705818444061962],
[0.007308825658479663, 0.02445037721311853, 0.039104273251372085, 0.03309667212729586,
0.011547060236167087, 0.0242323538259776, 0.04994114443025109, 0.028780917220574367,
0.021854784665633176, 0.025373680055380477],
[0.010331206272978975, 0.04903609796313997, 0.031893265760917396, 0.0401340388231393,
0.015316286437514339, 0.01570437816835138, 0.04512476177857368, 0.03625808372602799,
0.03977290804190325, 0.02443948391954817],
[0.009872666869860063, 0.028296669010155696, 0.0203873036230351, 0.007171821089317866,
0.016093162426392922, 0.00731838929936556, 0.019449272248309106, 0.032652592575620636,
0.030559576648764714, 0.01850768081473857],
[0.03229419216370146, 0.020884636842314626, 0.022320183988112315, 0.017222899934131834,
0.02681283478488545, 0.02080529369148572, 0.02056911939086087, 0.005765363222229641,
0.04591476655289037, 0.03531531912255183],
[0.039687113365614396, 0.025290637386108542, 0.040803506736485105, 0.048836903043247566,
0.03180776390490933, 0.011960700377594889, 0.010769980094018031, 0.03430631836824289,
0.031064313901631814, 0.007772729008113667],
[0.03992050485057065, 0.04419631767283281, 0.030687517536522674, 0.02917581521852262,
0.023414553337076274, 0.037709873987660375, 0.03362294086757893, 0.020131218470470776,
0.0195658220553962, 0.04199207532468904],
[0.04598045630261411, 0.03733996023804372, 0.020635175998356747, 0.04530715058059671,
0.014811677000976502, 0.030331293558891328, 0.04769790147476487, 0.024347531973952088,
0.024561682307181868, 0.03352064976485503],
[0.02540784763367206, 0.01329376365410117, 0.005831002067450855, 0.019085833489753047,
0.022187619406739082, 0.026173148932127167, 0.04376897883407012, 0.02341409349997081,
0.0082388326853238, 0.04612735643152319],
[0.04467562981556489, 0.04100603139993887, 0.033660401448589373, 0.04693533327377245,
0.025245359605886142, 0.021134839971978455, 0.020616852340224145, 0.029026669061443626,
0.02833233561466102, 0.012982454148449032],
[0.04373352872495395, 0.03226065381549463, 0.018843723527851016, 0.04777390524524972,
0.037996857678741947, 0.009767209850405478, 0.039927871974520766, 0.04559177157591775,
0.007977180791760777, 0.022464404479374034],
[0.011001396210815688, 0.020448781928976374, 0.007211015321928358, 0.040644959854387666,
0.016138573510288082, 0.029245687739242923, 0.04137263063332093, 0.04579678840524958,
0.016692188078447034, 0.040875615844633055],
[0.04550054724790139, 0.03996228494957334, 0.014679306278588187, 0.021280966111468874,
0.023288680066284016, 0.017019042131022427, 0.018566027355192145, 0.02931588979424113,
0.0329693547036976, 0.028487696135026823],
[0.019885626303629635, 0.012054119505886984, 0.029117028184877377, 0.02329461811453198,
0.044541978643386916, 0.026641053236365713, 0.013555408268363347, 0.022476212867441704,
0.01822851475715445, 0.04316317830123527],
[0.04022906114764718, 0.03490688392398833, 0.031643115047326094, 0.04599629499733613,
0.01201941829906145, 0.03864096097100446, 0.019712854217464753, 0.0065645121495459865,
0.024073696220952137, 0.005025700015413076],
[0.04105310672306777, 0.03547542013870518, 0.04917768542573394, 0.03173161083353454,
0.0485028906330986, 0.03851138158346338, 0.0385309525221923, 0.04445202110155239,
0.04268364928705378, 0.026875900096814977],
[0.048596354098024104, 0.03658490085552626, 0.006553960452417306, 0.028055650202984568,
0.04023460243917465, 0.015141175167018288, 0.03803713350955397, 0.03986203355644792,
0.04793136826275444, 0.03335245853190657],
[0.032730786620249724, 0.02603239543167589, 0.015580960547613213, 0.04939354465374922,
0.02459732382344007, 0.04994918554390341, 0.014720575833780232, 0.02678534756116507,
0.028462514382568464, 0.01147569677521251],
[0.038189181875092015, 0.03180363774763045, 0.04200650953852682, 0.04487353462202118,
0.035858488815131186, 0.025078545773584187, 0.01394507372271377, 0.01810037615145729,
0.02874542136082197, 0.026187481067698293],
[0.03505062740717129, 0.04278912125593421, 0.01515422246185559, 0.04158819075914071,
0.017774055776095754, 0.026922987072396088, 0.030803182979583947, 0.005932930358711197,
0.019503615239154396, 0.011013747720926871],
[0.03521313918020118, 0.005601046753210021, 0.012932248784603313, 0.04382556382626371,
0.039993406756398585, 0.04598107628814767, 0.02509046525955461, 0.039306513081415244,
0.005573970443103555, 0.031812221196175],
[0.01622069032318666, 0.03859685809342213, 0.014367331227940094, 0.048031660815734734,
0.02054120203671272, 0.03686306437998371, 0.01214530614704537, 0.02978083390082365,
0.010625937678933748, 0.017004963723521078],
[0.04689142000408602, 0.022319863386066407, 0.029443908149743528, 0.034741953882369746,
0.02731577833171775, 0.033134548835591096, 0.03474131259213653, 0.03259449619303298,
0.03961913016485228, 0.029077129531794765],
[0.01839574302865395, 0.04220459600756815, 0.04268789611599023, 0.030366310642928225,
0.046093836981572094, 0.013281003207212677, 0.02990471796235017, 0.02640482375389861,
0.012498490248747905, 0.036471832027027716],
[0.011595697596571367, 0.011801909395557808, 0.03859658901174864, 0.008973718347063675,
0.02751366039768431, 0.009855359279865899, 0.022030372169285332, 0.04400118674675557,
0.01474073854615342, 0.03935002646260049],
[0.0376374475502119, 0.04358910354606104, 0.0174049904087581, 0.01588171460578275,
0.03647946011413684, 0.03654257535729058, 0.04150679590659755, 0.036890854399000896,
0.04363047139231686, 0.04719150657118887],
[0.042281428708025134, 0.017388707611857443, 0.00824933778239887, 0.016735817018286706,
0.013860340481776184, 0.048652228654464766, 0.03836293919279311, 0.021626065607358448,
0.01847220337370124, 0.018173475388832058],
[0.04390291333938459, 0.020636122993521886, 0.033881269731218105, 0.029925054035066984,
0.0208047494517343, 0.04818692023925879, 0.019227109433102685, 0.020881504378666493,
0.013476111490192663, 0.019261288406638245],
[0.04840382398443666, 0.030148824563445132, 0.023197939850455405, 0.005758987427433909,
0.030807221652529446, 0.01968112167259412, 0.005424086659838573, 0.011958533744073317,
0.0412230631458628, 0.026742058873946477],
[0.04124934138706181, 0.021080268077234614, 0.043605523898039955, 0.047382985122399426,
0.026077241885237763, 0.039936101728898436, 0.03690495747272888, 0.03230073619562196,
0.019731785991948053, 0.023899019206482686],
[0.02080258827692606, 0.021210101281871622, 0.048190886360184745, 0.020074474242814094,
0.017388944950181673, 0.0466935268388838, 0.04823134727400356, 0.0057446043586346284,
0.03284168851464952, 0.03852265134537035],
[0.034199303214489045, 0.010731645789267549, 0.01761821075840501, 0.039222875284838074,
0.03883485935856826, 0.04923791129111862, 0.04416693729131508, 0.03333481936418653,
0.03497660671145157, 0.04754432387749269],
[0.00769235797930681, 0.03619988266216615, 0.01644414978445513, 0.04998872619943993,
0.049026277542830655, 0.015342333514634696, 0.024401131244194352, 0.013669744191637065,
0.0070497503135712, 0.02709434127258643],
[0.01599933719644903, 0.02822895361634078, 0.04175970951434357, 0.018558913574655955,
0.0061527861728898755, 0.03412116984453176, 0.03049765532667523, 0.042066801982526686,
0.025494057937820513, 0.04585119302823544],
[0.014271790409199701, 0.0249236763148691, 0.012213117104361338, 0.011736091695710122,
0.035433983129186816, 0.008255927081404806, 0.04975308687122862, 0.0425970081796326,
0.045469300274133914, 0.01648747240608918],
[0.03731587081627487, 0.01983082659581604, 0.010411873659490315, 0.03366900528336806,
0.023062859326845955, 0.017761881757757246, 0.040921743151295466, 0.04665602016327729,
0.04320939451099982, 0.014398409252431573],
[0.014426251115421649, 0.048129464887976516, 0.026153305769406814, 0.035864104805706606,
0.017243342667819465, 0.048320508331019495, 0.049618671132005576, 0.04413162349501227,
0.008894293848240309, 0.014534324853010528],
[0.01642830596402218, 0.01855630102449137, 0.023591752771606873, 0.01635343037095868,
0.01824237912982002, 0.03036496662811555, 0.032330479824535706, 0.03290929967410783,
0.04236757769526706, 0.03551302048388809],
[0.03299892706552598, 0.017231659032585535, 0.030007647769458003, 0.03154502442324105,
0.04670635867613615, 0.006398490836284773, 0.024550516826655613, 0.0244329420369976,
0.012262837490025453, 0.02431452612190801],
[0.03805787407656821, 0.015829978550772665, 0.035501674923167824, 0.013251227012512794,
0.04283435328380561, 0.025465519335143738, 0.025917064548546243, 0.0091022618989936,
0.04509391554786887, 0.03477691054100679],
[0.04204814443181919, 0.046285603422012295, 0.03801225652678169, 0.018360498585396064,
0.019506457256442386, 0.04287209239532307, 0.017819931936546383, 0.041900920690916006,
0.04374999287531825, 0.030674984032234603],
[0.049563247490866484, 0.006577177560776335, 0.019164587817434908, 0.019855527890954704,
0.0053203773246621525, 0.030038181087302177, 0.023103703791826666, 0.013807161457233756,
0.04849178959111378, 0.021287166770401798],
[0.015266262907993464, 0.023477880373527125, 0.02576124266159111, 0.032182379996385846,
0.019345358489584713, 0.029590258681423905, 0.04487596065085574, 0.04989930452686145,
0.049738751670242914, 0.005963481938055651],
[0.009301246881737665, 0.02876168736119158, 0.03689809044477766, 0.03285302220881822,
0.011746073712583153, 0.01993501379547364, 0.03161937695602978, 0.045227362979789625,
0.01796489449519819, 0.007112788143366458],
[0.02914644477950473, 0.03235793427119994, 0.02416789834173064, 0.0497905782522779,
0.0214626996023832, 0.022869773917447576, 0.010234907838336854, 0.039471417212917984,
0.006606882846680613, 0.012211447626554578],
[0.04481081482025662, 0.028654233852438604, 0.027481281278103786, 0.022337801496170557,
0.008510795051739777, 0.016871022360906103, 0.016272694520774393, 0.02185952320253532,
0.04480135169664522, 0.03898622540873182],
[0.010796306590054835, 0.014054740890651455, 0.03900249553845563, 0.04675746451833353,
0.03914064371208133, 0.007221083652890356, 0.02278919907515344, 0.01610563184035065,
0.03563928283073872, 0.024829084680027973],
[0.016386192265528166, 0.0129625257630752, 0.037090311952301946, 0.03206261674329985,
0.03504834534033722, 0.021394150055124524, 0.020197818952908755, 0.04855626170589669,
0.01746190792152245, 0.005251099959262916],
[0.02413791733556524, 0.01418009820236061, 0.03585662936765647, 0.04961035755678205,
0.022292461385832418, 0.03285690826826881, 0.013339255797398933, 0.034762247390299705,
0.04990543863858117, 0.024960761246849227],
[0.02030065505638935, 0.04507356103997628, 0.0447431025958544, 0.01812623224084133,
0.007092295385754654, 0.006662353941336588, 0.032216141053653426, 0.016917670721464856,
0.03747978722348479, 0.027582677461056915],
[0.01963122102613836, 0.014727316137662445, 0.019811620199922726, 0.021695882104669315,
0.015779755634785408, 0.034513737514199734, 0.02338561238464482, 0.04238538470859948,
0.025968823536122187, 0.005900661510769529],
[0.027851414645867364, 0.038608533656777716, 0.04218852859740293, 0.031307720557603815,
0.01510708038361799, 0.010327353057854597, 0.005982177511960103, 0.020263585026944766,
0.0344991778925035, 0.019803583142504157],
[0.03138354490594714, 0.016576338652075858, 0.01954067488485227, 0.027538118339307718,
0.04295461952032487, 0.044632947489609845, 0.031043892072892643, 0.03381453865886708,
0.02967913353195915, 0.027456067548166214],
[0.012648277213949139, 0.02147141723593203, 0.02104209150238283, 0.04175438111690916,
0.0380752572098937, 0.03511219731206188, 0.014325732577487673, 0.01779038257458537,
0.012712711626257492, 0.006312587371308425],
[0.038798071221229184, 0.040786062425047544, 0.011973230919468097, 0.022668067055476082,
0.010864379122508525, 0.035510234146772277, 0.018376795032121522, 0.02732495797820408,
0.021118718645008677, 0.027660777105358823],
[0.040398176337111416, 0.041589887557204185, 0.03313944326562041, 0.013562457446489234,
0.04423786178446942, 0.02122480870954843, 0.01655409545458892, 0.017089483310473264,
0.022476209248644827, 0.03365572632857359],
[0.048960081369487936, 0.010242854772025092, 0.03330655009788278, 0.02194347781464191,
0.008220460572221562, 0.03526244126477144, 0.047048377591589564, 0.028526910596301427,
0.018880333232398495, 0.015006107089722793]]
self.y_data = [[0.0007925424056634186, 0.0005665900911329277],
[0.0010585048722607602, 2.8494086496458716e-06],
[0.0008414107857886328, 2.8002946087861272e-05],
[0.0009251870560912125, 2.384788397875836e-05],
[0.000874573156497512, 8.062597191488177e-06],
[0.0007638498146239093, 0.0003703263009294565],
[0.0009308519077971741, 1.1638228072977165e-05],
[0.0007864811543508003, 2.2304400947606054e-05],
[0.0007491363378327119, 9.534515353957354e-05],
[0.0009426003453837156, 5.6693756291933275e-05],
[0.0007637592096321781, 1.2130102980903709e-05],
[0.0010310354715801174, 1.727594055837286e-06],
[0.0007087635853838984, 1.0214029282239149e-05],
[0.0008681399157594405, 1.376450905520804e-05],
[0.0009871100149544995, 1.1972027081289327e-05],
[0.0006600383157709415, 4.411301452868887e-05],
[0.0011150290136542483, 0.00011941591570269189],
[0.0009844470596521951, 1.0390571314487557e-05],
[0.0009924202951985919, 1.7780738978293172e-05],
[0.0008890215322758985, 4.1344186535449104e-05],
[0.0008712156727370767, 0.00010132154128213327],
[0.0008561045818881447, 8.381566483689625e-06],
[0.0008890942059352698, 0.00016129868006170442],
[0.0009333952496798223, 2.1249592561484834e-05],
[0.0006761969398906298, 9.488863813051396e-05],
[0.0010801998890405827, 1.3152742457748038e-05],
[0.000985491878546432, 1.609348230264967e-05],
[0.0010407607851237884, 1.3881360860358527e-05],
[0.0009448438028736406, 0.0004416951486649948],
[0.000980012794338712, 1.8530270631060155e-05],
[0.0009318687505264468, 1.7917542301062093e-05],
[0.0010243445967227825, 6.603101392164839e-06],
[0.0008622056007327381, 3.090527605691687e-05],
[0.0009306879741384426, 2.2924441992576388e-05],
[0.0008443141609667974, 3.066068249429251e-05],
[0.0007245272093153513, 5.363406370392003e-05],
[0.0011849585828018208, 4.675871037442442e-06],
[0.0008105349715943244, 8.094003205300774e-05],
[0.0009490669794237794, 0.00018975019639924567],
[0.0008778935581885564, 1.8461703640382713e-05],
[0.001044474062145572, 2.9164971161063217e-05],
[0.0008351480431144039, 1.6071860440487194e-05],
[0.0008837319240273249, 6.812666550480133e-06],
[0.0007736249258360838, 8.434890991008527e-05],
[0.0007095981348908593, 2.8623123810378887e-05],
[0.0004229746063234756, 8.238149709813923e-05],
[0.0009095265551539573, 9.751977705592244e-06],
[0.0008380747797696737, 7.092838555805115e-05],
[0.0009123386682751461, 4.220635337701501e-05],
[0.0008254765804077775, 1.71460820743383e-05],
[0.0008237728388182426, 0.0005858396818730046], [0.0007841947168334, 4.003196446303115e-05],
[0.000783387769347537, 4.1827511111575526e-05],
[0.0007836587539256941, 2.2815579760238418e-05],
[0.000689810913593528, 4.677874357943257e-05],
[0.0006928092388136206, 3.2041946816423834e-05],
[0.0009628308413665072, 9.091327506849174e-06],
[0.0008674853886132909, 0.00011556832649864892],
[0.0007316818498915116, 9.06142487908154e-05],
[0.0009841384143746672, 1.7708658169128936e-05],
[0.0005633713619061733, 4.909505572408975e-05],
[0.0009395683797016946, 1.7337711014965126e-05],
[0.0009045219642133674, 2.2720866604547203e-05],
[0.0007007077593375118, 0.00012193579125573799],
[0.0005690170033905004, 3.887874627370344e-05],
[0.0007657104749808448, 0.00037999721152594636],
[0.0009503806278811835, 9.151835128402212e-06],
[0.0009675609365212856, 7.1313307453442594e-06],
[0.000974510097771328, 8.50051762275781e-06], [0.0009608494311319217, 4.102701188378303e-06],
[0.0007919129343864603, 1.263242483445386e-05],
[0.0006769311078530377, 0.00034059633553839994],
[0.0006713181899141524, 8.926997665601477e-05],
[0.0006864306990085552, 0.00016352391301309408],
[0.0009352471463769254, 1.0653548245634068e-05],
[0.0010293617808291551, 4.7273146880616684e-05],
[0.000520421542537526, 4.0163695023880504e-05],
[0.0009245775882230998, 4.506212105650066e-05],
[0.0008722753745296028, 0.00019590559689400478],
[0.0006122954119062695, 0.00010077946292202919],
[0.00092631687823538, 7.996082822598985e-06], [0.0007589181095245227, 6.239775485629166e-05],
[0.0010302838487190114, 2.901873927404364e-05],
[0.0007497058135891877, 0.00013209109631584398],
[0.0007242117041892783, 2.1283653718063533e-05],
[0.001019036356802633, 7.293821879143868e-06],
[0.0008582616016361702, 1.3730368922431366e-05],
[0.000820213479118376, 2.5165289411630813e-05],
[0.0009949122986752052, 3.4406844061796946e-06],
[0.0007421884604245471, 1.6985562432437836e-05],
[0.0009712596286981783, 3.67522088506879e-05],
[0.0010387433319199418, 1.3354019906832657e-06],
[0.0009491915123565882, 4.859175025186338e-06],
[0.000942916560522447, 2.4216395413043535e-05],
[0.0008333603263210304, 1.6122495796027574e-05],
[0.0009872862002234886, 0.00018510885686371818],
[0.0008135829258114036, 4.074898102162636e-05],
[0.0009821072262702643, 1.975128047594417e-06],
[0.0004793268163100407, 5.593606323953503e-05],
[0.0009433825535621887, 1.987828938098789e-05],
[0.0010534018290836015, 5.373172841701729e-06],
[0.0008173150306250249, 1.0431010198263167e-05],
[0.0008265869873916913, 0.0011118198905213012],
[0.0008559272110632151, 1.0563162846433177e-05],
[0.0007147781881168918, 6.779868933842494e-05],
[0.0010599274833073813, 1.3673728897275495e-05],
[0.0011716453929469881, 5.2637387954432755e-06],
[0.0007457008995842067, 0.00017798836201445315],
[0.0007848619037893339, 2.6597924273268394e-05],
[0.000681965331031664, 4.688892939979759e-05],
[0.0009748936108733166, 3.0194125866290043e-05],
[0.0008531581576957838, 5.701214513967124e-05],
[0.0006798851273004801, 2.6634373650710846e-05],
[0.0005063954575633604, 2.9812152247067138e-05],
[0.000896860712254886, 1.350104914658771e-05],
[0.0008383657569342121, 3.714845483750536e-05],
[0.00043456623412789166, 5.144773393037422e-05],
[0.0009744203372013663, 7.845464058544707e-07],
[0.0008146308216879444, 1.3369494298034185e-05],
[0.0007863820402647026, 2.4578230232840748e-05],
[0.0005324596070001466, 4.273231395330076e-05],
[0.0009129105767939171, 6.665709843411428e-05],
[0.0009160502845906562, 1.45972192736778e-05],
[0.0008932693526968915, 5.8815602362153435e-05],
[0.0010852198973956076, 6.620142478393173e-06],
[0.0008747444663238607, 1.830670004911971e-05],
[0.0006970161943219365, 1.987971633843321e-05],
[0.0006366455020916801, 2.4819537319569796e-05],
[0.0006851004048795766, 6.154148784580448e-05], [0.0010096105731193, 4.050551665063854e-05],
[0.0006620670021369798, 0.00018266085204902104],
[0.000899936693532784, 1.1642578199481923e-05],
[0.000837992528312776, 1.974583936573093e-05],
[0.0007366124872955382, 6.0151649113520754e-05],
[0.0006911156793848897, 3.425109608484562e-05],
[0.0008581536236389556, 5.5080450604157145e-06],
[0.0005325864908892018, 9.724743663874842e-05],
[0.001024795887810873, 2.714545578929375e-05],
[0.0008922134914688346, 1.4081077236374736e-05],
[0.0006908700178775859, 0.00023803112422652093],
[0.0009681276011244109, 6.427420301312383e-06],
[0.0007473117554994532, 3.522188098520935e-05],
[0.0005459521956177326, 5.645094074761669e-05],
[0.0007138153496185944, 5.9302141537103946e-05],
[0.0006019593937385818, 0.00012203842448384563],
[0.0003824726825447726, 9.106354640836211e-05],
[0.00039147187342213855, 3.9523149668613855e-05],
[0.0008713035640954209, 2.263113121279153e-06],
[0.0007007821742201448, 1.9918142490526043e-05],
[0.0005289708663153738, 4.602352690675059e-05],
[0.0010895372322683196, 7.784580884561608e-06],
[0.0009281035525757939, 1.3669532048878744e-05],
[0.0008461364007825968, 2.100512268542016e-05],
[0.0005142740640675288, 0.0002234216906170518],
[0.0010674571040720955, 7.499111398769335e-06],
[0.0006639583499858664, 4.12064021566654e-05],
[0.0010800993361943898, 4.454121097571768e-06],
[0.0011088928821601713, 6.281005074717913e-06],
[0.0010447117337918491, 6.166977292837267e-06],
[0.0009908474328398234, 0.0006972119228249687],
[0.0011734535385022986, 5.322822964716142e-06],
[0.0010607998339840633, 8.432843440663979e-06],
[0.000728286363528645, 1.003876157206595e-05],
[0.0010978802184722096, 0.00020117472017046036],
[0.0009581809478185817, 1.2076960448404152e-05],
[0.0010706742107879006, 2.274293707123082e-05],
[0.0010295078377422075, 6.2340605970630305e-06],
[0.0009929019464142226, 8.89104174257169e-06], [0.0011952403637113024, 4.69199336408724e-06],
[0.0005368992700338172, 2.6778245999333798e-05],
[0.0007407855082797821, 2.8855561383426108e-05],
[0.0007644638627602774, 0.0006278089763466673],
[0.0008003585344939782, 1.9401640972987943e-05],
[0.0009003877403885162, 0.0001170417735715546],
[0.0006145823767484273, 7.469747097079194e-05],
[0.0007449178090528109, 1.7618499669040572e-05],
[0.0007917731373564069, 6.61310447591173e-05], [0.000905159076578529, 0.0008065776519163843],
[0.001092919182818895, 0.00024376567214998965],
[0.0008772844626700118, 5.578826539826172e-05],
[0.0011005088759159698, 4.75910396007134e-06],
[0.0008311368245230549, 0.0004109440818708806],
[0.0007342718240152611, 0.00018017190411205163],
[0.0007734856307151691, 0.00014317577459111287],
[0.0010051737664051692, 1.0675457141850279e-05],
[0.0009979489614306738, 7.411940232951084e-05],
| |
= random.choice(self._broker_partitions)
kafka_conn = self._connections[broker_partition.broker_id]
kafka_conn.produce(self.topic, msgs, broker_partition.partition)
bytes_sent = sum(len(m) for m in msgs)
log.debug(self._log_str(u"sent {0} bytes to {1}"
.format(bytes_sent, broker_partition)))
return broker_partition
def detect_broker_partitions(self):
bps = self._zk_util.broker_partitions_for(self.topic,
force_partition_zero=True)
if not bps:
raise NoAvailablePartitionsError(u"No brokers were found!")
self._broker_partitions = bps
self._bps_changed = False
def _unbalance(self, nodes):
self._bps_changed = True
def _register_callbacks(self):
zk = self._zk_util._zk # FIXME: Evil breaking of encapsulation
path_for_brokers = self._zk_util.path_for_brokers()
path_for_topic = self._zk_util.path_for_topic(self.topic)
if self._brokers_watch is None and zk.exists(path_for_brokers):
self._brokers_watch = zk.children(path_for_brokers)(self._unbalance)
if self._topic_watch is None and zk.exists(path_for_topic):
self._topic_watch = zk.children(path_for_topic)(self._unbalance)
log.debug("Producer {0} has watches: {1}"
.format(self._id, sorted(zk.watches.data.keys())))
def _all_callbacks_registered(self):
"""Are all the callbacks we need to know when to rebalance actually
registered? Some of these (like the topic ones) are the responsibility
of the broker to create."""
return all([self._brokers_watch, self._topic_watch])
def _log_str(self, s):
return u"ZKProducer {0} > {1}".format(self._id, s)
def __del__(self):
self.close()
class ZKConsumer(object):
"""Take 2 on the rebalancing code."""
def __init__(self, zk_conn, consumer_group, topic, autocommit=True, zk_timeout=None):
"""FIXME: switch arg order and default zk_conn to localhost?"""
# Simple attributes we return as properties
self._id = self._create_consumer_id(consumer_group)
self._topic = topic
self._consumer_group = consumer_group
self._autocommit = autocommit
# Internal vars
self._zk_util = ZKUtil(zk_conn, zk_timeout)
self._needs_rebalance = True
self._broker_partitions = [] # Updated during rebalancing
self._bps_to_next_offsets = {} # Updated after a successful fetch
self._rebalance_enabled = True # Only used for debugging purposes
# These are to handle ZooKeeper notification subscriptions.
self._topic_watch = None
self._topics_watch = None
self._consumers_watch = None
self._brokers_watch = None
# Register ourselves with ZK so other Consumers know we're active.
self._register()
# Force a rebalance so we know which broker-partitions we own
self.rebalance()
self._stats = defaultdict(lambda: ConsumerStats(fetches=0, bytes=0, messages=0, max_fetch=0))
@property
def id(self): return self._id
@property
def topic(self): return self._topic
@property
def consumer_group(self): return self._consumer_group
@property
def autocommit(self): return self._autocommit
@property
def stats(self):
''' Returns the aggregate of the stats from all the broker partitions
'''
fetches = 0
bytes = 0
messages = 0
max_fetch = 0
for stats in self._stats.values():
fetches += stats.fetches
bytes += stats.bytes
messages += stats.messages
max_fetch = max(max_fetch, stats.max_fetch)
return ConsumerStats(fetches, bytes, messages, max_fetch)
def stats_by_broker_partition(self):
return dict(self._stats)
@property
def broker_partitions(self):
if self._needs_rebalance:
self.rebalance()
return self._broker_partitions
@property
def brokers(self):
return sorted(frozenset(bp.broker_id for bp in self.broker_partitions))
def close(self):
if hasattr(self, '_zk_util'):
self._zk_util.close()
def simple_consumer(self, bp_ids_to_offsets):
"""bp_pairs_to_offsets is a dictionary of tuples to integers like the
following:
{
"0-0" : 2038903,
"0-1" : 3930198,
"1-0" : 3932088,
"1-1" : 958
}
The keys are of the format "[broker_id]-[partition_id]".
The values are offsets.
This method will return a SimpleConsumer that is initialied to read from
the brokers listed at the offsets specified.
"""
all_broker_partitions = self._zk_util.broker_partitions_for(self.topic)
broker_partitions = dict((bp, bp_ids_to_offsets(bp.id))
for bp in all_broker_partitions
if bp.id in bp_ids_to_offsets)
return SimpleConsumer(self.topic, broker_partitions)
def fetch(self, max_size=None, retry_limit=3, ignore_failures=False):
"""Return a FetchResult, which can be iterated over as a list of
MessageSets. A MessageSet is returned for every broker partition that
is successfully queried, even if that MessageSet is empty.
FIXME: This is where the adjustment needs to happen. Regardless of
whether a rebalance has occurred or not, we can very easily see if we
are still responsible for the same partitions as we were the last time
we ran, and set self._bps_to_next_offsets --> we just need to check if
it's not None and if we still have the same offsets, and adjust
accordingly.
"""
def needs_offset_values_from_zk(bps_to_offsets):
"""We need to pull offset values from ZK if we have no
BrokerPartitions in our BPs -> Offsets mapping, or if some of those
Offsets are unknown (None)"""
return (not bps_to_offsets) or (None in bps_to_offsets.values())
log.debug("Fetch called on ZKConsumer {0}".format(self.id))
if self._needs_rebalance:
self.rebalance()
# Find where we're starting from. If we've already done a fetch, we use
# our internal value. This is also all we can do in the case where
# autocommit is off, since any value in ZK will be out of date.
bps_to_offsets = dict(self._bps_to_next_offsets)
offsets_pulled_from_zk = False
if needs_offset_values_from_zk(bps_to_offsets):
# We have some offsets, but we've been made responsible for new
# BrokerPartitions that we need to lookup.
if bps_to_offsets:
bps_needing_offsets = [bp for bp, offset in bps_to_offsets.items()
if offset is None]
# Otherwise, it's our first fetch, so we need everything
else:
bps_needing_offsets = self.broker_partitions
bps_to_offsets.update(self._zk_util.offsets_for(self.consumer_group,
self._id,
bps_needing_offsets))
offsets_pulled_from_zk = True
# Do all the fetches we need to (this should get replaced with
# multifetch or performance is going to suck wind later)...
message_sets = []
# We only iterate over those broker partitions for which we have offsets
for bp in bps_to_offsets:
offset = bps_to_offsets[bp]
kafka = self._connections[bp.broker_id]
partition = kafka.partition(bp.topic, bp.partition)
if offset is None:
offset = partition.latest_offset()
try:
offsets_msgs = kafka.fetch(bp.topic,
offset,
partition=bp.partition,
max_size=max_size)
# If our fetch fails because it's out of range, and the values came
# from ZK originally (not our internal incrementing), we assume ZK
# is somehow stale, so we just grab the latest and march on.
except OffsetOutOfRange as ex:
if offsets_pulled_from_zk:
log.error("Offset {0} from ZooKeeper is out of range for {1}"
.format(offset, bp))
offset = partition.latest_offset()
log.error("Retrying with offset {0} for {1}"
.format(offset, bp))
offsets_msgs = kafka.fetch(bp.topic,
offset,
partition=bp.partition,
max_size=max_size)
else:
raise
except KafkaError as k_err:
if ignore_failures:
log.error("Ignoring failed fetch on {0}".format(bp))
log.exception(k_err)
continue
else:
raise
msg_set = MessageSet(bp, offset, offsets_msgs)
# fetches bytes messages max_fetch
old_stats = self._stats[bp]
self._stats[bp] = ConsumerStats(fetches=old_stats.fetches + 1,
bytes=old_stats.bytes + msg_set.size,
messages=old_stats.messages + len(msg_set),
max_fetch=max(old_stats.max_fetch, msg_set.size))
message_sets.append(msg_set)
result = FetchResult(sorted(message_sets))
# Now persist our new offsets
for msg_set in result:
self._bps_to_next_offsets[msg_set.broker_partition] = msg_set.next_offset
if self._autocommit:
self.commit_offsets()
return result
def commit_offsets(self):
if self._bps_to_next_offsets:
self._zk_util.save_offsets_for(self.consumer_group,
self._bps_to_next_offsets)
def poll(self,
start_offsets=None,
end_offsets=None,
poll_interval=1,
max_size=None,
retry_limit=3):
"""FIXME: start/end, retry_limit"""
while True:
for msg_set in self.fetch(max_size=max_size):
yield msg_set
time.sleep(poll_interval)
def _create_consumer_id(self, consumer_group_id):
"""Create a Consumer ID in the same way Kafka's reference client does"""
hostname = platform.node()
ms_since_epoch = int(time.time() * 1000)
uuid_top_hex = uuid.uuid4().hex[:8]
consumer_uuid = "{0}-{1}-{2}".format(hostname, ms_since_epoch, uuid_top_hex)
return "{0}_{1}".format(consumer_group_id, consumer_uuid)
def _register(self):
"""Register ourselves as a consumer in this consumer_group"""
self._zk_util.register_consumer(self.consumer_group, self.id, self.topic)
# self._zk_util.create_path_if_needed()
def rebalance(self):
"""Figure out which brokers and partitions we should be consuming from,
based on the latest information about the other consumers and brokers
that are present.
We registered for notifications from ZooKeeper whenever a broker or
consumer enters or leaves the pool. But we usually only rebalance right
before we're about to take an action like fetching.
The rebalancing algorithm is slightly different from that described in
the design doc (mostly in the sense that the design doc algorithm will
leave partitions unassigned if there's an uneven distributions). The
idea is that we split the partitions as evently as possible, and if
some consumers need to have more partitions than others, the extra
partitions always go to the earlier consumers in the list. So you could
have a distribution like 4-4-4-4 or 5-5-4-4, but never 4-4-4-5.
Rebalancing has special consequences if the Consumer is doing manual
commits (autocommit=False):
1. This Consumer will keep using the in memory offset state for all
BrokerPartitions that it was already following before the rebalance.
2. The offset state for any new BrokerPartitions that this Consumer is
responsible for after the rebalance will be read from ZooKeeper.
3. For those BrokerPartitions that this Consumer was reading but is no
longer responsible for after the rebalance, the offset state is
simply discarded. It is not persisted to ZooKeeper.
So there is no guarantee of single delivery in this circumstance. If
BrokerPartition 1-0 shifts ownership from Consumer A to Consumer B in
the rebalance, Consumer B will pick up from the last manual commit of
Consumer A -- *not* the offset that Consumer A was at when the rebalance
was triggered.
"""
log.info(("Rebalance triggered for Consumer {0}, broker partitions | |
<reponame>hopem/cinder
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for Linux servers running LVM.
"""
import math
import os
import re
import socket
from oslo.config import cfg
from cinder.brick.iscsi import iscsi
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('volume_group',
default='cinder-volumes',
help='Name for the VG that will contain exported volumes'),
cfg.StrOpt('volume_clear',
default='zero',
help='Method used to wipe old volumes (valid options are: '
'none, zero, shred)'),
cfg.IntOpt('volume_clear_size',
default=0,
help='Size in MiB to wipe at start of old volumes. 0 => all'),
cfg.StrOpt('pool_size',
default=None,
help='Size of thin provisioning pool '
'(None uses entire cinder VG)'),
cfg.IntOpt('lvm_mirrors',
default=0,
help='If set, create lvms with multiple mirrors. Note that '
'this requires lvm_mirrors + 2 pvs with available space'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class LVMVolumeDriver(driver.VolumeDriver):
"""Executes commands relating to Volumes."""
VERSION = '1.0'
def __init__(self, *args, **kwargs):
super(LVMVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.hostname = socket.gethostname()
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
out, err = self._execute('vgs', '--noheadings', '-o', 'name',
run_as_root=True)
volume_groups = out.split()
if self.configuration.volume_group not in volume_groups:
exception_message = (_("volume group %s doesn't exist")
% self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=exception_message)
def _create_volume(self, volume_name, sizestr, vg=None):
if vg is None:
vg = self.configuration.volume_group
no_retry_list = ['Insufficient free extents',
'One or more specified logical volume(s) not found']
cmd = ['lvcreate', '-L', sizestr, '-n', volume_name, vg]
if self.configuration.lvm_mirrors:
cmd += ['-m', self.configuration.lvm_mirrors, '--nosync']
terras = int(sizestr[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd += ['-R', str(rsize)]
self._try_execute(*cmd, run_as_root=True, no_retry_list=no_retry_list)
def _volume_not_present(self, volume_name):
path_name = '%s/%s' % (self.configuration.volume_group, volume_name)
try:
self._try_execute('lvdisplay', path_name, run_as_root=True)
except Exception as e:
# If the volume isn't present
return True
return False
def _delete_volume(self, volume):
"""Deletes a logical volume."""
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
dev_path = self.local_path(volume)
if os.path.exists(dev_path):
self.clear_volume(volume)
self._try_execute('lvremove', '-f', "%s/%s" %
(self.configuration.volume_group,
self._escape_snapshot(volume['name'])),
run_as_root=True)
def _sizestr(self, size_in_g):
if int(size_in_g) == 0:
return '100M'
return '%sG' % size_in_g
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
def _escape_snapshot(self, snapshot_name):
if not snapshot_name.startswith('snapshot'):
return snapshot_name
return '_' + snapshot_name
def create_volume(self, volume):
"""Creates a logical volume. Can optionally return a Dictionary of
changes to the volume object to be persisted.
"""
self._create_volume(volume['name'], self._sizestr(volume['size']))
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._create_volume(volume['name'], self._sizestr(volume['size']))
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * 1024,
execute=self._execute)
def delete_volume(self, volume):
"""Deletes a logical volume."""
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
# TODO(yamahata): lvm can't delete origin volume only without
# deleting derived snapshots. Can we do something fancy?
out, err = self._execute('lvdisplay', '--noheading',
'-C', '-o', 'Attr',
'%s/%s' % (self.configuration.volume_group,
volume['name']),
run_as_root=True)
# fake_execute returns None resulting unit test error
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
def clear_volume(self, volume):
"""unprovision old volumes to prevent data leaking between users."""
if self.configuration.volume_clear == 'none':
return
vol_path = self.local_path(volume)
size_in_g = volume.get('size', volume.get('volume_size', None))
if size_in_g is None:
LOG.warning(_("Size for volume: %s not found, "
"skipping secure delete.") % volume['id'])
return
size_in_m = self.configuration.volume_clear_size
LOG.info(_("Performing secure delete on volume: %s") % volume['id'])
if self.configuration.volume_clear == 'zero':
if size_in_m == 0:
return volutils.copy_volume('/dev/zero',
vol_path, size_in_g * 1024,
sync=True,
execute=self._execute)
else:
clear_cmd = ['shred', '-n0', '-z', '-s%dMiB' % size_in_m]
elif self.configuration.volume_clear == 'shred':
clear_cmd = ['shred', '-n3']
if size_in_m:
clear_cmd.append('-s%dMiB' % size_in_m)
else:
LOG.error(_("Error unrecognized volume_clear option: %s"),
self.configuration.volume_clear)
return
clear_cmd.append(vol_path)
self._execute(*clear_cmd, run_as_root=True)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
orig_lv_name = "%s/%s" % (self.configuration.volume_group,
snapshot['volume_name'])
self._try_execute('lvcreate', '-L',
self._sizestr(snapshot['volume_size']),
'--name', self._escape_snapshot(snapshot['name']),
'--snapshot', orig_lv_name, run_as_root=True)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_("snapshot: %s not found, "
"skipping delete operations") % snapshot['name'])
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
# it's quite slow.
self._delete_volume(snapshot)
def local_path(self, volume, vg=None):
if vg is None:
vg = self.configuration.volume_group
# NOTE(vish): stops deprecation warning
escaped_group = vg.replace('-', '--')
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume))
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.info(_('Creating clone of volume: %s') % src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % volume['id'],
'id': temp_id}
self.create_snapshot(temp_snapshot)
self._create_volume(volume['name'], self._sizestr(volume['size']))
try:
volutils.copy_volume(self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * 1024,
execute=self._execute)
finally:
self.delete_snapshot(temp_snapshot)
def clone_image(self, volume, image_location):
return None, False
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
"""Executes commands relating to ISCSI volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSCSI target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
def __init__(self, *args, **kwargs):
self.tgtadm = iscsi.get_target_admin()
super(LVMISCSIDriver, self).__init__(*args, **kwargs)
def set_execute(self, execute):
super(LVMISCSIDriver, self).set_execute(execute)
self.tgtadm.set_execute(execute)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
# TODO(jdg): In the future move all of the dependent stuff into the
# cooresponding target admin class
if isinstance(self.tgtadm, iscsi.LioAdm):
try:
volume_info = self.db.volume_get(context, volume['id'])
(auth_method,
auth_user,
auth_pass) = volume_info['provider_auth'].split(' ', 3)
chap_auth = self._iscsi_authentication(auth_method,
auth_user,
auth_pass)
except exception.NotFound:
LOG.debug("volume_info:", volume_info)
LOG.info(_("Skipping ensure_export. No iscsi_target "
"provision for volume: %s"), volume['id'])
return
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume['name'])
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume['name'])
iscsi_target = 1
self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target,
0, volume_path, chap_auth,
check_exit_code=False)
return
if not isinstance(self.tgtadm, iscsi.TgtAdm):
try:
iscsi_target = self.db.volume_get_iscsi_target_num(
context,
volume['id'])
except exception.NotFound:
LOG.info(_("Skipping ensure_export. No iscsi_target "
"provisioned for volume: %s"), volume['id'])
return
else:
iscsi_target = 1 # dummy value when using TgtAdm
chap_auth = None
# Check for https://bugs.launchpad.net/cinder/+bug/1065702
old_name = None
volume_name = volume['name']
if (volume['provider_location'] is not None and
volume['name'] not in volume['provider_location']):
msg = _('Detected inconsistency in provider_location id')
LOG.debug(msg)
old_name = self._fix_id_migration(context, volume)
if 'in-use' in volume['status']:
volume_name = old_name
old_name = None
iscsi_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume_name)
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume_name)
# NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
# should clean this all up at some point in the future
self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target,
0, volume_path, chap_auth,
check_exit_code=False,
old_name=old_name)
def _fix_id_migration(self, context, volume):
"""Fix provider_location and dev files to address bug 1065702.
For volumes that the provider_location has NOT been updated
and are not currently in-use we'll create a new iscsi target
and remove the persist file.
If the volume is in-use, we'll just stick with the old | |
<reponame>hyperevo/py-helios-node<gh_stars>0
from cytoolz import (
identity,
)
from eth_typing import Hash32
from eth_utils import (
decode_hex,
encode_hex,
int_to_big_endian,
is_integer,
big_endian_to_int,
to_wei,
from_wei,
)
import time
from hvm.rlp.transactions import BaseReceiveTransaction
from helios.exceptions import BaseRPCError
from helios.rpc.constants import MAX_ALLOWED_AGE_OF_NEW_RPC_BLOCK
from helios.rpc.format import (
block_to_dict,
header_to_dict,
format_params,
to_int_if_hex,
transaction_to_dict,
receipt_to_dict,
receive_transactions_to_dict,
decode_hex_if_str,
receive_transaction_to_dict, connected_nodes_to_dict)
import rlp_cython as rlp
from helios.sync.common.constants import FULLY_SYNCED_STAGE_ID
from hvm.exceptions import (
CanonicalHeadNotFound,
HeaderNotFound,
TransactionNotFound,
)
from hvm.utils.blocks import does_block_meet_min_gas_price, get_block_average_transaction_gas_price
from hvm.types import Timestamp
#from hp2p.chain import NewBlockQueueItem
from eth_utils import is_hex_address, to_checksum_address
# Tell mypy to ignore this import as a workaround for https://github.com/python/mypy/issues/4049
from helios.rpc.modules import ( # type: ignore
RPCModule,
)
from hvm.constants import (
TIME_BETWEEN_HEAD_HASH_SAVE,
NUMBER_OF_HEAD_HASH_TO_SAVE
)
from hvm.utils.headers import (
compute_gas_limit,
)
from hvm.chains.base import BaseChain
from helios.rlp_templates.hls import P2PBlock
import asyncio
from typing import cast
from hp2p.events import NewBlockEvent, StakeFromBootnodeRequest, CurrentSyncStageRequest, \
CurrentSyncingParametersRequest, GetConnectedNodesRequest
from hvm.rlp.consensus import StakeRewardBundle
from hvm.vm.forks.helios_testnet.blocks import HeliosMicroBlock
def account_db_at_block(chain, chain_address, at_block):
if at_block == 'latest':
header = chain.chaindb.get_canonical_head(chain_address=chain_address)
account_hash = header.account_hash
vm = chain.get_vm(header=header)
return vm.state.account_db
else:
header = chain.chaindb.get_canonical_block_header_by_number(chain_address=chain_address, block_number=at_block)
account_hash = header.account_hash
vm = chain.get_vm(header=header)
vm.state.account_db.revert_to_account_from_hash(account_hash, chain_address)
return vm.state.account_db
class Hls(RPCModule):
'''
All the methods defined by JSON-RPC API, starting with "hls_"...
Any attribute without an underscore is publicly accessible.
'''
#
# Tools
#
async def ping(self) -> bool:
"""
Workaround for keepalive of ws connections in case it is not handled by the ws client.
"""
return True
async def accounts(self):
raise DeprecationWarning("This method has been moved to personal_listAccounts")
@format_params(decode_hex)
async def blockNumber(self, chain_address):
num = self._chain.get_canonical_head(chain_address).block_number
return hex(num)
async def gasPrice(self):
required_min_gas_price = self._chain.chaindb.get_required_block_min_gas_price()
return hex(required_min_gas_price)
@format_params(decode_hex, to_int_if_hex)
async def getBalance(self, address, at_block):
chain = self.get_new_chain(address)
if at_block == 'latest':
try:
header = chain.chaindb.get_canonical_head(address)
balance = header.account_balance
except CanonicalHeadNotFound:
balance = 0
else:
try:
header = chain.chaindb.get_canonical_block_header_by_number(at_block, address)
balance = header.account_balance
except CanonicalHeadNotFound:
try:
header = chain.chaindb.get_canonical_head(address)
balance = header.account_balance
except CanonicalHeadNotFound:
balance = 0
return hex(balance)
@format_params(decode_hex)
async def getBlockTransactionCountByHash(self, block_hash):
chain = self.get_new_chain()
try:
tx_count = chain.chaindb.get_number_of_total_tx_in_block(block_hash)
except HeaderNotFound:
raise BaseRPCError('No block found with the given block hash')
return hex(tx_count)
@format_params(to_int_if_hex, decode_hex)
async def getBlockTransactionCountByNumber(self, at_block, chain_address):
chain = self.get_new_chain()
try:
block_hash = chain.chaindb.get_canonical_block_hash(chain_address=chain_address, block_number=at_block)
tx_count = chain.chaindb.get_number_of_total_tx_in_block(block_hash)
except HeaderNotFound:
raise BaseRPCError('No block found with the given wallet address and block number')
return hex(tx_count)
@format_params(decode_hex, to_int_if_hex)
async def getCode(self, chain_address, at_block):
account_db = account_db_at_block(self._chain, chain_address, at_block)
code = account_db.get_code(chain_address)
return encode_hex(code)
@format_params(decode_hex, to_int_if_hex, to_int_if_hex)
async def getStorageAt(self, chain_address, position, at_block):
if not is_integer(position) or position < 0:
raise TypeError("Position of storage must be a whole number, but was: %r" % position)
account_db = account_db_at_block(self._chain, chain_address, at_block)
stored_val = account_db.get_storage(chain_address, position)
return encode_hex(int_to_big_endian(stored_val))
async def protocolVersion(self):
return hex(63)
async def syncing(self):
# Check our current syncing stage. If not sync stage 4, then we are syncing
current_sync_stage_response = await self._event_bus.request(
CurrentSyncStageRequest()
)
if current_sync_stage_response.sync_stage < FULLY_SYNCED_STAGE_ID:
return True
else:
return False
#
# Transactions
#
@format_params(decode_hex, to_int_if_hex)
async def getTransactionByBlockHashAndIndex(self, block_hash, index):
try:
tx = self._chain.get_transaction_by_block_hash_and_index(block_hash, index)
except HeaderNotFound:
raise BaseRPCError('No block found with the given block hash')
if isinstance(tx, BaseReceiveTransaction):
# receive tx
return receive_transaction_to_dict(tx, self._chain)
else:
# send tx
return transaction_to_dict(tx, self._chain)
@format_params(to_int_if_hex, to_int_if_hex, decode_hex)
async def getTransactionByBlockNumberAndIndex(self, at_block, index, chain_address):
try:
block_hash = self._chain.chaindb.get_canonical_block_hash(chain_address=chain_address,
block_number=at_block)
except HeaderNotFound:
raise BaseRPCError('No block found with the given chain address and block number')
tx = self._chain.get_transaction_by_block_hash_and_index(block_hash, index)
if isinstance(tx, BaseReceiveTransaction):
# receive tx
return receive_transaction_to_dict(tx, self._chain)
else:
# send tx
return transaction_to_dict(tx, self._chain)
@format_params(decode_hex, to_int_if_hex)
async def getTransactionCount(self, chain_address, at_block):
account_db = account_db_at_block(self._chain, chain_address, at_block)
nonce = account_db.get_nonce(chain_address)
return hex(nonce)
@format_params(decode_hex)
async def getTransactionByHash(self, tx_hash):
chain = self.get_new_chain()
try:
tx = chain.get_canonical_transaction(tx_hash)
except TransactionNotFound:
raise BaseRPCError("Transaction with hash {} not found on canonical chain.".format(encode_hex(tx_hash)))
if isinstance(tx, BaseReceiveTransaction):
return receive_transaction_to_dict(tx, chain)
else:
return transaction_to_dict(tx, chain)
@format_params(decode_hex)
async def getTransactionReceipt(self, tx_hash):
chain = self.get_new_chain()
receipt = chain.chaindb.get_transaction_receipt(tx_hash)
receipt_dict = receipt_to_dict(receipt, tx_hash, chain)
return receipt_dict
@format_params(decode_hex)
async def getReceivableTransactions(self, chain_address):
# create new chain for all requests
chain = self.get_new_chain(chain_address)
receivable_transactions = chain.create_receivable_transactions()
receivable_transactions_dict = receive_transactions_to_dict(receivable_transactions, chain)
return receivable_transactions_dict
@format_params(decode_hex)
async def getReceiveTransactionOfSendTransaction(self, tx_hash):
'''
Gets the receive transaction corresponding to a given send transaction, if it exists
'''
chain = self.get_new_chain()
receive_tx = chain.get_receive_tx_from_send_tx(tx_hash)
if receive_tx is not None:
receive_tx_dict = receive_transaction_to_dict(receive_tx, chain)
return receive_tx_dict
else:
raise BaseRPCError("No receive transaction found for the given send transaction hash")
#
# Gas system and network performance
#
async def getGasPrice(self):
required_min_gas_price = self._chain.chaindb.get_required_block_min_gas_price()
return hex(required_min_gas_price)
async def getHistoricalGasPrice(self):
historical_min_gas_price = self._chain.chaindb.load_historical_minimum_gas_price()
encoded = []
for timestamp_gas_price in historical_min_gas_price:
encoded.append([hex(timestamp_gas_price[0]), hex(timestamp_gas_price[1])])
return encoded
async def getApproximateHistoricalNetworkTPCCapability(self):
historical_tpc_cap = self._chain.chaindb.load_historical_network_tpc_capability()
encoded = []
for timestamp_tpc_cap in historical_tpc_cap:
encoded.append([hex(timestamp_tpc_cap[0]), hex(timestamp_tpc_cap[1])])
return encoded
async def getApproximateHistoricalTPC(self):
historical_tpc = self._chain.chaindb.load_historical_tx_per_centisecond()
encoded = []
for timestamp_tpc in historical_tpc:
encoded.append([hex(timestamp_tpc[0]), hex(timestamp_tpc[1])])
return encoded
#
# Blocks
#
@format_params(decode_hex, to_int_if_hex)
async def getBlockNumber(self, chain_address, before_timestamp = None):
chain = self.get_new_chain(chain_address)
if before_timestamp is None or before_timestamp == 'latest':
canonical_header = chain.chaindb.get_canonical_head(chain_address)
block_number = canonical_header.block_number
else:
# it will raise HeaderNotFound error if there isnt one before the timestamp. This is on purpose.
block_number = chain.chaindb.get_canonical_block_number_before_timestamp(before_timestamp, chain_address)
return hex(block_number)
@format_params(decode_hex)
async def getBlockCreationParams(self, chain_address):
#create new chain for all requests
chain = self.get_new_chain(chain_address)
to_return = {}
to_return['block_number'] = hex(chain.header.block_number)
to_return['parent_hash'] = encode_hex(chain.header.parent_hash)
vm = chain.get_vm(timestamp = int(time.time()))
to_return['nonce'] = hex(vm.state.account_db.get_nonce(chain_address))
receivable_transactions = chain.create_receivable_transactions()
encoded_receivable_transactions = []
for re_tx in receivable_transactions:
encoded_receivable_transactions.append(encode_hex(rlp.encode(re_tx)))
to_return['receive_transactions'] = encoded_receivable_transactions
reward_bundle = chain.get_consensus_db().create_reward_bundle_for_block(chain_address)
amount = reward_bundle.reward_type_1.amount + reward_bundle.reward_type_2.amount
to_return['reward_bundle'] = encode_hex(rlp.encode(reward_bundle, sedes = StakeRewardBundle))
return to_return
@format_params(decode_hex, identity)
async def getBlockByHash(self, block_hash: Hash32, include_transactions: bool = False):
chain = self.get_new_chain()
block = chain.get_block_by_hash(block_hash)
return block_to_dict(block, include_transactions, chain)
@format_params(to_int_if_hex, decode_hex, identity)
async def getBlockByNumber(self, at_block, chain_address, include_transactions: bool = False):
chain = self.get_new_chain(chain_address)
block = chain.get_block_by_number(at_block, chain_address=chain_address)
return block_to_dict(block, include_transactions, chain)
async def sendRawBlock(self, encoded_micro_block):
chain = self.get_new_chain()
encoded_micro_block = decode_hex(encoded_micro_block)
micro_block = rlp.decode(encoded_micro_block, sedes=chain.get_vm().micro_block_class)
block_class = self._chain_class.get_vm_class_for_block_timestamp(timestamp = micro_block.header.timestamp).get_block_class()
full_block = block_class.from_micro_block(micro_block)
min_time_between_blocks = chain.get_vm(header=full_block.header).min_time_between_blocks
# Validate the block here
if(full_block.header.timestamp < (int(time.time()) - MAX_ALLOWED_AGE_OF_NEW_RPC_BLOCK)):
raise BaseRPCError("The block timestamp is to old. We can only import new blocks over RPC.")
try:
canonical_head = chain.chaindb.get_canonical_head(full_block.header.chain_address)
if canonical_head.block_number >= full_block.header.block_number:
raise BaseRPCError("You are attempting to replace an existing block. This is not allowed.")
if full_block.header.timestamp < (canonical_head.timestamp + min_time_between_blocks):
raise BaseRPCError("Not enough time has passed for you to add a new block yet. New blocks can only be added to your chain every {} seconds".format(min_time_between_blocks))
except CanonicalHeadNotFound:
pass
if((full_block.header.block_number != 0) and
(not chain.chaindb.is_in_canonical_chain(full_block.header.parent_hash))):
raise BaseRPCError("Parent block not found on canonical chain.")
#Check our current syncing stage. Must be sync stage 4.
current_sync_stage_response = await self._event_bus.request(
CurrentSyncStageRequest()
)
if current_sync_stage_response.sync_stage < FULLY_SYNCED_STAGE_ID:
raise BaseRPCError("This node is still syncing with the network. Please wait until this node has synced.")
if not does_block_meet_min_gas_price(full_block, chain):
required_min_gas_price = self._chain.chaindb.get_required_block_min_gas_price()
raise Exception("Block transactions don't meet the minimum gas price requirement of {}".format(required_min_gas_price))
self._event_bus.broadcast(
NewBlockEvent(block=cast(P2PBlock, full_block), from_rpc=True)
)
return True
#
# Block explorer
#
@format_params(to_int_if_hex, to_int_if_hex, decode_hex_if_str, decode_hex_if_str, identity)
async def getNewestBlocks(self, num_to_return = 10, start_idx = 0, after_hash = b'', chain_address = b'', include_transactions: bool = False):
'''
Returns list of block dicts
:param start_idx:
:param end_idx:
:param chain_address:
:return:
'''
# block = chain.get_block_by_hash(block_hash)
# return block_to_dict(block, include_transactions, chain)
if num_to_return is None:
num_to_return = 10
if start_idx is None:
start_idx = 0
num_to_return = min([10, num_to_return])
block_dicts_to_return = []
if chain_address != b'' and chain_address is not None:
chain = self.get_new_chain(chain_address)
try:
canonical_header = chain.chaindb.get_canonical_head(chain_address)
start = canonical_header.block_number-start_idx
if start >= 0:
end = max([-1, start-num_to_return])
for i in range(start, end, -1):
block = chain.get_block_by_number(i, chain_address)
if block.hash == after_hash:
break
block_dicts_to_return.append(block_to_dict(block, include_transactions, chain))
except CanonicalHeadNotFound:
return []
else:
chain = self.get_new_chain()
at_block_index = -1
current_window = int(time.time() / TIME_BETWEEN_HEAD_HASH_SAVE) * TIME_BETWEEN_HEAD_HASH_SAVE
for timestamp in range(current_window, current_window-(NUMBER_OF_HEAD_HASH_TO_SAVE*TIME_BETWEEN_HEAD_HASH_SAVE), -1*TIME_BETWEEN_HEAD_HASH_SAVE):
chronological_blocks = chain.chain_head_db.load_chronological_block_window(Timestamp(timestamp))
if chronological_blocks is None:
continue
chronological_blocks.reverse()
for block_timestamp_block_hash in chronological_blocks:
at_block_index += 1
if at_block_index < start_idx:
continue
block = chain.get_block_by_hash(block_timestamp_block_hash[1])
if block.hash == after_hash:
return block_dicts_to_return
block_dicts_to_return.append(block_to_dict(block, include_transactions, chain))
if len(block_dicts_to_return) >= num_to_return:
return block_dicts_to_return
return block_dicts_to_return
#
# Network status information
#
async def getConnectedNodes(self):
get_connected_nodes_response = await self._event_bus.request(
GetConnectedNodesRequest()
)
get_connected_nodes_response = get_connected_nodes_response.connected_nodes
| |
<reponame>JoyChen1998/Network_PacketCapture<gh_stars>1-10
# encoding = utf-8
import socket
import time
import psutil
from struct import *
from multiprocessing import Pool
__AUTHOR__ = 'JoyChan'
__REPO__ = "https://github.com/JoyChen1998/Network_PacketCapture"
# ---* CONFIG *---
INTERVAL = 1 # for default speed to get a packet
HAVE_SAVED = False # control file save
HAVE_FILTER_PROTOCOL = False # control filter rules for protocol
HAVE_FILTER_IP = False # control filter rules for ip
__VERSION__ = '1.3.5'
# ---* CONFIG *---
ackn = []
protocol_filter_list = []
source_ip_filter_list = []
destination_ip_filter_list = []
allows_protocol = ['TCP', 'ICMP', 'UDP'] # for transfer protocol number to protocol name
class Sniffer:
def __init__(self):
'''
do basically set
'''
global protocol_filter_list
global source_ip_filter_list
global destination_ip_filter_list
self.s = None
self.filter_proto = protocol_filter_list
self.filter_in_ip = source_ip_filter_list
self.filter_out_ip = destination_ip_filter_list
self.cnt = 1 # for count packet
self.cnt_merge = 1 # for merge count
self.ack = []
self.Packet_MAC = {
'Source MAC': None,
'Destination MAC': None
}
self.Packet_IP = {
'Version': None,
'IP Header Length': None,
'Differ Service': None,
'All Length': None,
'Identification': None,
'DF': None,
'MF': None,
'Offset': None,
'TTL': None,
'Protocol': None,
'Source Address': None,
'Destination Address': None
}
self.Packet_UDP = {
'Source_port': None,
'Dest_port': None,
'Length': None,
'Checksum': None,
'Data_seg': None,
'Data_length': None
}
self.Packet_TCP = {
'Source_port': None,
'Dest_port': None,
'Sequence': None,
'Acknowledgement': None,
'TCP Header Length': None,
'Data_seg': None,
'Data_length': None
}
self.Packet_ICMP = {
'Type': None,
'Code': None,
'Checksum': None,
'Data_seg': None,
'Data_length': None
}
@staticmethod
def get_netcard():
netcard_info = []
info = psutil.net_if_addrs()
for k, v in info.items():
for item in v:
if item[0] == 2 and not item[1] == '127.0.0.1':
netcard_info.append((k, item[1]))
return netcard_info
@staticmethod
def eth_addr(a):
b = "%.2x-%.2x-%.2x-%.2x-%.2x-%.2x" % (a[0], a[1], a[2], a[3], a[4], a[5])
return b
@staticmethod
def convert_hex_to_ascii(data):
tmp = ""
try:
tmp = data.decode().encode("utf-8").decode("utf-8")
except:
for j in range(0, len(data)):
tmp += chr(int("%.2x" % data[j], 16))
return tmp
def record_http_msg(self, data, acknowledge):
for index in range(0, len(self.ack)):
if str(acknowledge) == str(self.ack[index]):
ackn[index] += data
if len(self.ack) > 0:
with open('HTTP_record.txt', 'w') as f:
f.write('** http record started **\n')
for index_i in ackn:
f.write('new http packet data\n')
f.write(index_i + '\n')
f.write('*'*40 + '\n')
f.write('\n** http record ended **\n')
f.close()
@staticmethod
def get_flag(e):
f = bin(int(e[0], 16))[2:]
o = '0' * (4 - len(f)) + f
return o[1:3]
@staticmethod
def get_offset(e):
f = bin(int(e[0], 16))[2:]
o = '0' * (4 - len(f)) + f
return int(o[3] + e[1:], 16)
@staticmethod
def change_digit_to_word(protocol):
protocols = {
'0': 'IP',
'1': 'ICMP',
'6': 'TCP',
'17': 'UDP'
}
return protocols[str(protocol)]
def soc_establish_conn(self):
'''
To create a socket
:return: nil
'''
'''
(You can skip it)
I just want to say something about the `socket.AF_INET` & `socket.AF_PACKET`.
When I use `socket.AF_INET`,I only can get one protocol just like `TCP`, `UDP`, or `ICMP`...
So, should I neet to use Multiprocessing Pool ??
I have been thinking for a long time about multi-process parallelism ...
But, When I saw the anotation about `AF_PACKET` I got a clear idea. Why not unpack the MAC-Packet?
Well, You can see the word about AF_PACKET => `When using socket.AF_PACKET to create a socket,
it will be able to capture all Ethernet frames received or sent by the unit.`
So, the final version change to use AF_PACKET, I don't need to care about multi-process!
'''
try:
# self.s = socket.socket(socket.AF_INET, socket.SOCK_RAW, self.param)
self.s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0003)) # set a packet socket conn
except:
print('Socket could not be created')
exit(-1)
print('Socket established success!')
self.unpack_eth_packet()
def unpack_eth_packet(self):
# for i in range(1, 20):
while True:
packet = self.s.recvfrom(65565)
packet = packet[0]
# parse ethernet header
eth_length = 14
eth_header = packet[: eth_length]
eth = unpack('!6s6sH', eth_header)
eth_protocol = socket.ntohs(eth[2]) # Convert a 16-bit integer from network to host byte order.
source_eth_addr = self.eth_addr(packet[6:12])
dest_eth_addr = self.eth_addr(packet[0:6])
self.Packet_MAC['Source MAC'] = source_eth_addr
self.Packet_MAC['Destination MAC'] = dest_eth_addr
if eth_protocol == 8:
self.unpack_ip_packet(packet, eth_length)
# add a interval
time.sleep(INTERVAL)
def unpack_ip_packet(self, packet, eth_len):
'''
this function is to unpack the ip packet
:param eth_len: the header include ` MAC frame header`
:param packet: the packet that needed to packet
:return: nil & just return
'''
# Parse IP header
# take first 20 characters for the ip header
self.cnt += 1
ip_header = packet[eth_len: eth_len + 20]
# ip packet unpack
iph = unpack('!BBHH2sBBH4s4s', ip_header)
version_ihl = iph[0]
differ_service = iph[1]
version = version_ihl >> 4
ihl = version_ihl & 0xF
iph_lenth = ihl * 4
all_lenth = iph[2]
id = iph[3]
flag_N_offset = iph[4].hex()
flags = self.get_flag(flag_N_offset)
MF = flags[1]
DF = flags[0]
offst = self.get_offset(flag_N_offset)
ttl = iph[5]
protocol = iph[6]
self.Packet_IP['Version'] = version
self.Packet_IP['Differ Service'] = differ_service
self.Packet_IP['IP Header Length'] = ihl
self.Packet_IP['All Length'] = all_lenth
self.Packet_IP['Identification'] = id
self.Packet_IP['MF'] = MF
self.Packet_IP['DF'] = DF
self.Packet_IP['Offset'] = offst
self.Packet_IP['TTL'] = ttl
self.Packet_IP['Protocol'] = self.change_digit_to_word(protocol)
self.Packet_IP['Source Address'] = socket.inet_ntoa(iph[8])
self.Packet_IP['Destination Address'] = socket.inet_ntoa(iph[9])
# filter for ip in/out
if len(self.filter_in_ip) > 0 and self.Packet_IP['Source Address'] not in self.filter_in_ip:
return
if len(self.filter_out_ip) > 0 and self.Packet_IP['Destination Address'] not in self.filter_out_ip:
return
new_length = iph_lenth + eth_len # upgrade packet parser start length
if HAVE_SAVED and DF == '0':
if MF == '1':
with open('merge.'+str(self.cnt_merge)+'.txt', 'a') as f:
f.write('packet_cnt=' + str(self.cnt) + '\noffset=' + str(offst) + '\ndata=' + str(packet[new_length + 20:]) + '\n')
f.close()
elif MF == '0':
with open('merge.'+str(self.cnt_merge)+'.txt', 'a') as f:
f.write('packet_cnt=' + str(self.cnt) + '\noffset=' + str(offst) + '\ndata=' + str(packet[new_length + 20:]) + '\n')
f.close()
self.cnt_merge += 1
# classify different kinds of packet
if HAVE_FILTER_PROTOCOL:
if protocol == 6 and protocol in protocol_filter_list:
self.unpack_tcp_packet(new_length, packet)
elif protocol == 17 and protocol in protocol_filter_list:
self.unpack_udp_packet(new_length, packet)
elif protocol == 1 and protocol in protocol_filter_list:
self.unpack_icmp_packet(new_length, packet)
else:
return
else:
if protocol == 6:
self.unpack_tcp_packet(new_length, packet)
elif protocol == 17:
self.unpack_udp_packet(new_length, packet)
elif protocol == 1:
self.unpack_icmp_packet(new_length, packet)
else:
print('This Packe\'s Protocol is not in [ TCP , ICMP , UDP ]')
print()
def unpack_tcp_packet(self, iph_lenth, packet):
'''
this function is to unpack the tcp packet
:param iph_lenth: the header include ` MAC frame header & ip header`
:param packet: the packet that needed to packet
:return: nil
'''
tcp_header = packet[iph_lenth:iph_lenth + 20]
tcph = unpack('!HHLLBBHHH', tcp_header)
source_port = tcph[0]
dest_port = tcph[1]
sequence = tcph[2]
acknowledgement = tcph[3]
doff_reserved = tcph[4]
tcph_length = doff_reserved >> 4
h_size = iph_lenth + tcph_length * 4
data_size = len(packet) - h_size
# TCP Packet's data segment
data = self.convert_hex_to_ascii(packet[h_size:])
if "HTTP" in data:
self.ack.append(acknowledgement)
ackn.append('')
if acknowledgement in self.ack:
self.record_http_msg(data, acknowledgement)
self.Packet_TCP['Source_port'] = source_port
self.Packet_TCP['Dest_port'] = dest_port
self.Packet_TCP['Sequence'] = sequence
self.Packet_TCP['Acknowledgement'] = acknowledgement
self.Packet_TCP['TCP Header Length'] = tcph_length
self.Packet_TCP['Data_seg'] = data
self.Packet_TCP['Data_length'] = data_size
if HAVE_SAVED:
with open('tcp_packet.txt', 'a') as f:
f.write('----- packet - index: ' + str(self.cnt) + ' -----\n')
for key, value in self.Packet_MAC.items():
f.write(key + ':' + str(value) + '\n')
f.write('\n')
for key, value in self.Packet_IP.items():
f.write(key + ':' + str(value) + '\n')
f.write('\n')
for key, value in self.Packet_TCP.items():
f.write(key + ':' + str(value) + '\n')
f.write('\n***************************\n\n')
print('----- packet - index: ', str(self.cnt), ' -----')
for key, value in self.Packet_MAC.items():
print(key, ':', value)
print()
for key, value in self.Packet_IP.items():
print(key, ':', value)
print()
for key, value in self.Packet_TCP.items():
print(key, ':', value)
print()
print('*' * 35)
print()
def unpack_udp_packet(self, iph_lenth, packet):
'''
this function is to unpack the udp packet
:param iph_lenth: the header include ` MAC frame header & ip header`
:param packet: the packet that needed to packet
:return: nil
'''
udph_length = 8
udp_header = packet[iph_lenth:iph_lenth + 8]
udph = unpack('!HHHH', udp_header)
source_port = udph[0]
dest_port = udph[1]
length = udph[2]
checksum = udph[3]
h_size = iph_lenth + udph_length
data_size = len(packet) - h_size
data = self.convert_hex_to_ascii(packet[h_size:])
self.Packet_UDP['Source_port'] = source_port
self.Packet_UDP['Dest_port'] = dest_port
self.Packet_UDP['Length'] = length
self.Packet_UDP['Checksum'] = checksum
self.Packet_UDP['Data_seg'] = data
self.Packet_UDP['Data_length'] = data_size
if HAVE_SAVED:
with open('udp_packet.txt', 'a') as f:
f.write('----- packet - index: ' + str(self.cnt) + ' -----\n')
for key, value in self.Packet_MAC.items():
f.write(key + ':' + str(value) + '\n')
| |
<reponame>koreywylie/networkZoo<gh_stars>0
#!/usr/bin/env python
"""
networkZoo.py
"""
# Python Libraries
from os.path import join as opj # method to join strings of file paths
import getopt # used to parse command-line input
import os, sys, re, json, csv
from functools import partial
from string import digits
from numbers import Number
# Qt GUI Libraries
from PyQt5 import QtWidgets
from PyQt5 import QtCore
from PyQt5.QtCore import QObject, pyqtSignal, Qt, QThread, QRectF
from PyQt5.QtGui import QColor, QPixmap, QPainter
# Mathematical/Neuroimaging/Plotting Libraries
import numpy as np
from nilearn import plotting, image, input_data # library for neuroimaging
from nilearn import masking
from scipy.ndimage import binary_dilation #used to smooth edges of binary masks
from nibabel.nifti1 import Nifti1Image, Nifti1Pair
from nibabel.affines import apply_affine
import nipype.interfaces.io as nio
import matplotlib.pyplot as plt # Plotting library
import matplotlib.gridspec as gridspec
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure as Figure
# Get location of script
if ('__file__' in locals()) or ('__file__' in globals()):
mypath = os.path.dirname(os.path.abspath(__file__))
else:
mypath = os.getcwd()
sys.path.append(mypath) # include dirs. needed for "Internal imports"
sys.path.append(opj(mypath, 'config_settings'))
sys.path.append(opj(mypath, 'functions'))
sys.path.append(opj(mypath, 'gui'))
# Backup configuration settings, if needed
from config_settings_backup import default_configuration as config_backup
# Internal imports
import zoo_MainWin # Qt GUI for pyqt5 window setup
import zoo_InputFileHandling as io # fns. to handle importing files
import zoo_SelectionWin as select # Qt GUI for selecting items from list
import zoo_Mapper as map # Qt GUI + progress bar, for numpy correlations between spatial maps
import zoo_ImageSaver as saver # Qt GUI + progress bar, for saving display & creating output images
import zoo_Preferences as prefs # Qt GUI dialog to tweak program settings & save
import zoo_DisplayOpts as disp # Qt GUI dialog to tweak display & plot settings
import zoo_OutputOpts as outparams # Qt GUI dialog to tweak mask creation & output settings
import zoo_About as about # Qt window for about info
import zoo_Tutorial as tutorial # Qt window for Step-by-step tutorial
import zoo_MaskMaker as masks # fns. to create binary masks
# Selectively suppress _expected_ irrevelant warnings
import warnings
#change of datatypes in nilearn
warnings.filterwarnings('ignore', '.*Casting data from int32 to float32.*')
warnings.filterwarnings('ignore', '.*Casting data from int8 to float32.*')
#imprecise tight layout in matplotlib
warnings.filterwarnings('ignore', '.*This figure includes Axes that are not compatible.*')
#NaN possible in masks
warnings.filterwarnings('ignore', '.*converting a masked element to nan.*')
#poor NaN handling in nilearn
warnings.filterwarnings('ignore', '.*invalid value encountered in greater.*')
#binary ROI/ICN masks do not have contour levels when plotted in matplotlib
warnings.filterwarnings('ignore', '.*No contour levels were found.*')
#Cannot thread changing selected ICA/ICN Qt list widget items
warnings.filterwarnings('ignore',
".*QObject::connect: Cannot queue arguments of type 'QItemSelection'.*")
class NetworkZooGUI(QtWidgets.QMainWindow, zoo_MainWin.Ui_MainWindow):
"""
Main NetworkZoo GUI, for classifying ICA comp. spatial maps based on ICN template vols
"""
def __init__(self, configuration_file=None):
super(self.__class__, self).__init__() # Runs the initialization of the base classes
self.setupUi(self) # created in QT Designer; output w/ pyuic5, see zoo_MainWin.py for class
self.reset_analysis()
# Data containers
self.gd = {} # gui data;
# ...where gd[class][unique_name][file_path, nilearn image object]
self.gd = {'ica' : {}, 'icn' : {}, 'mapped' : {},
'mapped_ica' : {}, 'mapped_icn' : {}}
self.corrs = {} # dict of correlations, indexed by ic name
self.matches = {} # dict of top matches, indexed by ic name
self.reference_img = None # referrence nii vol. w/ smallest dimensions
# Configuration file defaults
if ('mypath' not in locals()) and ('mypath' not in globals()):
if '__file__' in locals():
script_path = os.path.dirname(os.path.abspath(__file__))
else:
script_path = os.getcwd()
else:
script_path = mypath # needed to pass var. to fn. below
config_file = 'config_settings/config.json'
if configuration_file is not None:
if os.path.isfile(configuration_file):
config_file = configuration_file
self.load_configuration(config_file, script_path, config_backup)
# Setup display based on config settings
if hasattr(self, 'config'):
if 'allow_multiclassifications' in self.config['ica'].keys():
state = self.config['ica']['allow_multiclassifications']
self.action_AllowICAMulticlassifications.setChecked(state)
if state:
self.action_FindDuplicateICAClassifications.setEnabled(True)
if 'display' not in self.config.keys():
self.config.update({'display': {}})
if 'mri_plots' not in self.config['display'].keys():
self.config['display'].update({'mri_plots': {}})
if 'global' not in self.config['display']['mri_plots'].keys():
self.config['display']['mri_plots'].update({'global': {}})
if 'display_mode' not in self.config['display']['mri_plots']['global'].keys():
self.config['display']['mri_plots']['global'].update({'display_mode': ''})
else:
displayLayout = self.config['display']['mri_plots']['global']['display_mode']
if displayLayout == 'ortho':
self.pushButton_showMaxOverlap.setEnabled(True)
self.radioButton_ortho.setChecked(True)
self.radioButton_axial.setChecked(False)
self.radioButton_coronal.setChecked(False)
self.radioButton_sagittal.setChecked(False)
elif displayLayout == 'axial':
self.pushButton_showMaxOverlap.setEnabled(False)
self.radioButton_ortho.setChecked(False)
self.radioButton_axial.setChecked(True)
self.radioButton_coronal.setChecked(False)
self.radioButton_sagittal.setChecked(False)
elif displayLayout == 'coronal':
self.pushButton_showMaxOverlap.setEnabled(False)
self.radioButton_ortho.setChecked(False)
self.radioButton_axial.setChecked(False)
self.radioButton_coronal.setChecked(True)
self.radioButton_sagittal.setChecked(False)
elif displayLayout == 'sagittal':
self.pushButton_showMaxOverlap.setEnabled(False)
self.radioButton_ortho.setChecked(False)
self.radioButton_axial.setChecked(False)
self.radioButton_coronal.setChecked(False)
self.radioButton_sagittal.setChecked(True)
# Setup Input fns.
self.io = io.InputHandling(self.gd, self.config, self.corrs,
self.listWidget_ICAComponents,
self.listWidget_ICNtemplates,
self.listWidget_Classifications)
# Load default files
self.io.configure_ICs() # loads anatomical MRI, ICN templates, etc.
# Setup corr. fns.
self.mapper = map.Mapper(in_files=self.get_imgs('ica'),
in_filenames=self.get_img_names('ica'),
map_files=self.get_imgs('icn'),
map_filenames=self.get_img_names('icn'),
corrs=self.corrs)
# Setup non-Qt display defaults
if hasattr(self, 'config'):
if 'display' in self.config.keys():
if 'mri_plots' in self.config['display'].keys():
self.mp = self.config['display']['mri_plots'].copy()
if 'time_plots' in self.config['display'].keys():
self.tp = self.config['display']['time_plots'].copy()
if type(self.mp['global']['display_text_size']) not in (int, float, 'x-large'):
self.mp['global']['display_text_size'] = plt.rcParams['font.size'] * 1.44
# Qt set-up for spatial & time data displays
ANATOMICAL_TO_TIMESERIES_PLOT_RATIO = 5
anat_sp = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Preferred)
anat_sp.setVerticalStretch(ANATOMICAL_TO_TIMESERIES_PLOT_RATIO)
ts_sp = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Preferred)
ts_sp.setVerticalStretch(1)
# figure for spatial data
self.figure_x = plt.figure()
self.canvas_x = FigureCanvas(self.figure_x)
self.verticalLayout_plot.addWidget(self.canvas_x)
self.canvas_x.setSizePolicy(anat_sp)
# change MNI coordinates on click
self.canvas_x.mpl_connect('button_release_event', self.figure_x_onClick)
# figure for time & frequency data
self.figure_t = plt.figure()
self.canvas_t = FigureCanvas(self.figure_t)
self.verticalLayout_plot.addWidget(self.canvas_t)
self.canvas_t.setSizePolicy(ts_sp)
# connections for menu items
self.action_LoadAnalysis.triggered.connect(self.load_analysis)
self.action_SaveAnalysis.triggered.connect(self.save_analysis)
self.action_SaveAnalysisAs.triggered.connect(self.save_analysis_as)
self.action_ResetAnalysis.triggered.connect(partial(self.reset_analysis,
clear_lists=True,
clear_display=True,
warn=True))
self.action_runAnalysis.triggered.connect(self.run_analysis)
self.action_ResetDisplay.triggered.connect(self.reset_display)
self.action_SaveDisplay.triggered.connect(partial(saver.ImageSaver.save_display,
self.figure_x,
figure_t=self.figure_t,
fname=None,
output_dir=self.config['base_directory']))
self.action_Quit.triggered.connect(self.quit_gui)
self.action_EditPreferrences.triggered.connect(self.edit_preferrences)
self.action_EditDisplayOptions.triggered.connect(self.edit_display)
self.action_EditOutputParams.triggered.connect(self.edit_output_opts)
self.action_LoadICAcomps.triggered.connect(self.io.browse_ica_files)
self.action_LoadICAtimeseries.triggered.connect(partial(self.io.load_ica_timeseries,
prompt_fileDialog=True))
self.action_RenameICAlist_select.triggered.connect(partial(self.rename_list_select,
list_name='ica',
listWidget=self.listWidget_ICAComponents))
self.action_ClearICAlist_select.triggered.connect(partial(self.clear_list_select,
list_name='ica',
listWidget=self.listWidget_ICAComponents))
self.action_ClearICAlist_all.triggered.connect(partial(self.clear_list_all,
list_name='ica',
listWidget=self.listWidget_ICAComponents))
self.action_LoadICNtemplates.triggered.connect(self.io.browse_icn_files)
self.action_LoadNoisetemplates.triggered.connect(self.io.load_noise_templates)
self.action_RenameICNtemplates_select.triggered.connect(partial(self.rename_list_select,
list_name='icn',
listWidget=self.listWidget_ICNtemplates))
self.action_ClearICNlist_select.triggered.connect(partial(self.clear_list_select,
list_name='icn',
listWidget=self.listWidget_ICNtemplates))
self.action_ClearICNlist_all.triggered.connect(partial(self.clear_list_all,
list_name='icn',
listWidget=self.listWidget_ICNtemplates))
self.action_RenameClassifications_select.triggered.connect(partial(self.rename_list_select,
list_name='mapped',
listWidget=self.listWidget_Classifications))
self.action_AllowICAMulticlassifications.triggered.connect(self.allow_ica_multiClass)
self.action_FindDuplicateICAClassifications.triggered.connect(partial(self.find_duplicate_mappings,
duplicated_name='ica'))
self.action_FindDuplicateICNClassifications.triggered.connect(partial(self.find_duplicate_mappings,
duplicated_name='icn'))
self.action_FindProbableClassifications.triggered.connect(self.find_probable_classifications)
self.action_FindQuestionableClassifications.triggered.connect(self.find_questionable_classifications)
self.action_ClearClassifications_select.triggered.connect(partial(self.clear_list_select,
list_name='mapped',
listWidget=self.listWidget_Classifications))
self.action_ClearClassifications_all.triggered.connect(partial(self.clear_list_all,
list_name='mapped',
listWidget=self.listWidget_Classifications))
self.action_createOutput.triggered.connect(self.create_FiguresAndTables)
self.action_createBinaryMasks.triggered.connect(self.create_Masks)
self.action_ShowAboutInfo.triggered.connect(self.show_about)
self.action_ShowStepByStepTutorial.triggered.connect(self.show_tutorial)
self.action_LoadDemoICAcomps.triggered.connect(self.load_demoICA)
# Connections for buttons & lists
self.buttonGroup_xview.buttonReleased.connect(self.change_display_layout)
self.pushButton_showMaxOverlap.clicked.connect(self.update_plots)
self.pushButton_icaload.clicked.connect(self.io.browse_ica_files)
self.pushButton_icnload.clicked.connect(self.io.browse_icn_files)
self.listWidget_ICAComponents.itemClicked.connect(self.update_gui_ica)
self.listWidget_ICNtemplates.itemClicked.connect(self.update_gui_icn)
self.listWidget_Classifications.itemClicked.connect(self.update_gui_classifications)
self.pushButton_addClassification.clicked.connect(self.add_Classification)
self.pushButton_rmClassification.clicked.connect(self.delete_Classification)
self.pushButton_runAnalysis.clicked.connect(self.run_analysis)
self.pushButton_createOutput.clicked.connect(self.create_FiguresAndTables)
self.horizontalSlider_Xslice.sliderReleased.connect(self.update_plots_from_sliders)
self.horizontalSlider_Yslice.sliderReleased.connect(self.update_plots_from_sliders)
self.horizontalSlider_Zslice.sliderReleased.connect(self.update_plots_from_sliders)
self.buttonGroup_xview.buttonReleased.connect(self.update_plots)
self.listWidget_ICAComponents.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.listWidget_ICAComponents.clearSelection()
self.listWidget_ICAComponents.setCurrentRow(-1)
self.listWidget_ICNtemplates.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.listWidget_ICNtemplates.clearSelection()
self.listWidget_ICNtemplates.setCurrentRow(-1)
self.listWidget_Classifications.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
self.listWidget_Classifications.clearSelection()
self.listWidget_Classifications.setCurrentRow(-1)
# Final startup notices
warning_flag = False
if not hasattr(self, 'config'):
warning_flag = True
elif 'smri_file' not in self.config.keys():
warning_flag = True
elif not os.path.isfile(self.config['smri_file']):
warning_flag = True
if warning_flag:
title = "Anatomical/structural MRI vol."
message = "Path to anatomical MRI vol. not set."
message += " Slices will be plotted on a white background,"
message += " which may obscure the outlines of white ICN templates."
message += "\n\nTo this change setting, select 'Preferrences' from the 'Edit' menu"
message += " when the main window is opened, and click on 'Reset All Settings' if needed"
QtWidgets.QMessageBox.warning(self, title, message)
##########################################################################
#-------------------------------------------------
### Functions to set default for configuration ###
#-------------------------------------------------
def load_configuration(self, fname=None, mypath=None, config_backup=None):
"""Loads configuration settings, either from stored .json file, or re-load from stored config"""
warning_flag, configData = False, None
if fname:
if isinstance(fname, str):
if os.path.isfile(fname):
if os.stat(fname).st_size == 0:
title = "Error configuring networkZoo"
message = "Configuration file: " + fname
message += " is an empty file! Defaulting to backup config file..."
QtWidgets.QMessageBox.warning(self, title, message)
io.InputHandling.replace_faulty_config(fname)
with open(fname) as json_config:
configData = json.load(json_config)
elif os.path.isfile(opj(mypath, fname)):
fname = opj(mypath, fname)
with open(fname) as json_config:
configData = json.load(json_config)
else:
warning_flag = True
elif isinstance(fname, dict):
if all (key in fname.keys() for key in ['ica', 'icn', 'smri_file']):
configData = fname
else:
warning_flag = True
elif hasattr(self, 'config'):
configData = self.config
else:
warning_flag = True
else:
warning_flag = True
if warning_flag:
if 'configData' not in locals():
if config_backup:
configData = config_backup
else:
configData = io.InputHandling.replace_faulty_config(None)
if hasattr(self, 'config'):
title = "Error configuring networkZoo"
message = "New configuration file not found,"
message += " defaulting to backup configuration settings"
else:
title = "Error starting networkZoo"
message = "Configuration file not found,"
message += " defaulting to original, non-modified settings."
QtWidgets.QMessageBox.warning(self, title, message)
self.config_file = fname if isinstance(fname, str) else None
if configData:
if 'base_directory' in configData.keys():
configData['base_directory'] = mypath
self.config = io.InputHandling.config_check_defaults(configData,
mypath=mypath,
config_backup=config_backup)
#-------------------------------------------
### Functions related to overall display ###
#-------------------------------------------
def change_display_layout(self):
"""Change layout of slices plotted on display"""
if self.buttonGroup_xview.checkedButton() == self.radioButton_ortho:
self.mp['global'].update({'display_mode': 'ortho'})
self.pushButton_showMaxOverlap.setEnabled(True)
self.horizontalSlider_Xslice.setEnabled(True)
self.horizontalSlider_Yslice.setEnabled(True)
self.horizontalSlider_Zslice.setEnabled(True)
elif self.buttonGroup_xview.checkedButton() == self.radioButton_axial:
self.mp['global'].update({'display_mode': 'axial'})
self.pushButton_showMaxOverlap.setEnabled(False)
self.horizontalSlider_Xslice.setEnabled(False)
self.horizontalSlider_Yslice.setEnabled(False)
self.horizontalSlider_Zslice.setEnabled(False)
elif self.buttonGroup_xview.checkedButton() == self.radioButton_coronal:
self.mp['global'].update({'display_mode': 'coronal'})
self.pushButton_showMaxOverlap.setEnabled(False)
self.horizontalSlider_Xslice.setEnabled(False)
self.horizontalSlider_Yslice.setEnabled(False)
self.horizontalSlider_Zslice.setEnabled(False)
else: # Sagittal
self.mp['global'].update({'display_mode': 'sagittal'})
self.pushButton_showMaxOverlap.setEnabled(False)
self.horizontalSlider_Xslice.setEnabled(False)
self.horizontalSlider_Yslice.setEnabled(False)
self.horizontalSlider_Zslice.setEnabled(False)
self.update_plots()
def reset_display(self, initialize=False):
"""Clear | |
from . import constants as CONSTANTS
from .producer_property import ProducerProperty
from common.telemetry import telemetry_py
from common.telemetry_events import TelemetryEvent
class Image:
"""
If ``string`` is used, it has to consist of digits 0-9 arranged into
lines, describing the image, for example::
image = Image("90009:"
"09090:"
"00900:"
"09090:"
"90009")
will create a 5×5 image of an X. The end of a line is indicated by a colon.
It's also possible to use a newline (\\n) to indicate the end of a line
like this::
image = Image("90009\\n"
"09090\\n"
"00900\\n"
"09090\\n"
"90009")
The other form creates an empty image with ``width`` columns and
``height`` rows. Optionally ``buffer`` can be an array of
``width``×``height`` integers in range 0-9 to initialize the image::
Image(2, 2, b'\x08\x08\x08\x08')
or::
Image(2, 2, bytearray([9,9,9,9]))
Will create a 2 x 2 pixel image at full brightness.
.. note::
Keyword arguments cannot be passed to ``buffer``.
"""
# Attributes assigned (to functions) later;
# having this here helps the pylint.
HEART = None
HEART_SMALL = None
HAPPY = None
SMILE = None
SAD = None
CONFUSED = None
ANGRY = None
ASLEEP = None
SURPRISED = None
SILLY = None
FABULOUS = None
MEH = None
YES = None
NO = None
CLOCK12 = None
CLOCK11 = None
CLOCK10 = None
CLOCK9 = None
CLOCK8 = None
CLOCK7 = None
CLOCK6 = None
CLOCK5 = None
CLOCK4 = None
CLOCK3 = None
CLOCK2 = None
CLOCK1 = None
ARROW_N = None
ARROW_NE = None
ARROW_E = None
ARROW_SE = None
ARROW_S = None
ARROW_SW = None
ARROW_W = None
ARROW_NW = None
TRIANGLE = None
TRIANGLE_LEFT = None
CHESSBOARD = None
DIAMOND = None
DIAMOND_SMALL = None
SQUARE = None
SQUARE_SMALL = None
RABBIT = None
COW = None
MUSIC_CROTCHET = None
MUSIC_QUAVER = None
MUSIC_QUAVERS = None
PITCHFORK = None
XMAS = None
PACMAN = None
TARGET = None
TSHIRT = None
ROLLERSKATE = None
DUCK = None
HOUSE = None
TORTOISE = None
BUTTERFLY = None
STICKFIGURE = None
GHOST = None
SWORD = None
GIRAFFE = None
SKULL = None
UMBRELLA = None
SNAKE = None
ALL_CLOCKS = None
ALL_ARROWS = None
# implementing image model as described here:
# https://microbit-micropython.readthedocs.io/en/v1.0.1/image.html
def __init__(self, *args, **kwargs):
# Depending on the number of arguments
# in constructor, it treat args differently.
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_CREATION)
if len(args) == 0:
# default constructor
self.__LED = self.__string_to_square_array(CONSTANTS.BLANK_5X5)
elif len(args) == 1:
pattern = args[0]
if isinstance(pattern, str):
self.__LED = self.__string_to_square_array(pattern)
else:
raise TypeError("Image(s) takes a string")
else:
width = args[0]
height = args[1]
if width < 0 or height < 0:
# This is not in original, but ideally,
# image should fail non-silently
raise ValueError(CONSTANTS.INDEX_ERR)
if len(args) == 3:
# This option is for potential third bytearray arguments
byte_arr = args[2]
self.__LED = self.__bytes_to_array(width, height, byte_arr)
else:
self.__LED = self.__create_leds(width, height)
self.read_only = False
def width(self):
"""
Return the number of columns in the image.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
if len(self.__LED) > 0:
return len(self.__LED[0])
else:
return 0
def height(self):
"""
Return the numbers of rows in the image.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
return len(self.__LED)
def set_pixel(self, x, y, value):
"""
Set the brightness of the pixel at column ``x`` and row ``y`` to the
``value``, which has to be between 0 (dark) and 9 (bright).
This method will raise an exception when called on any of the built-in
read-only images, like ``Image.HEART``.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
if self.read_only:
raise TypeError(CONSTANTS.COPY_ERR_MESSAGE)
elif not self.__valid_pos(x, y):
raise ValueError(CONSTANTS.INDEX_ERR)
elif not self.__valid_brightness(value):
raise ValueError(CONSTANTS.BRIGHTNESS_ERR)
else:
self.__LED[y][x] = value
def get_pixel(self, x, y):
"""
Return the brightness of pixel at column ``x`` and row ``y`` as an
integer between 0 and 9.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
if self.__valid_pos(x, y):
return self.__LED[y][x]
else:
raise ValueError(CONSTANTS.INDEX_ERR)
def shift_up(self, n):
"""
Return a new image created by shifting the picture up by ``n`` rows.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
return self.__shift_vertical(-n)
def shift_down(self, n):
"""
Return a new image created by shifting the picture down by ``n`` rows.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
return self.__shift_vertical(n)
def shift_right(self, n):
"""
Return a new image created by shifting the picture right by ``n``
columns.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
return self.__shift_horizontal(n)
def shift_left(self, n):
"""
Return a new image created by shifting the picture left by ``n``
columns.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
return self.__shift_horizontal(-n)
def crop(self, x, y, w, h):
"""
Return a new image by cropping the picture to a width of ``w`` and a
height of ``h``, starting with the pixel at column ``x`` and row ``y``.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
res = Image(w, h)
res.blit(self, x, y, w, h)
return res
def copy(self):
"""
Return an exact copy of the image.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
return Image(self.__create_string())
# This inverts the brightness of each LED.
# ie: Pixel that is at brightness 4 would become brightness 5
# and pixel that is at brightness 9 would become brightness 0.
def invert(self):
"""
Return a new image by inverting the brightness of the pixels in the
source image.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
for y in range(self.height()):
for x in range(self.width()):
self.set_pixel(x, y, CONSTANTS.BRIGHTNESS_MAX - self.get_pixel(x, y))
# This fills all LEDs with same brightness.
def fill(self, value):
"""
Set the brightness of all the pixels in the image to the
``value``, which has to be between 0 (dark) and 9 (bright).
This method will raise an exception when called on any of the built-in
read-only images, like ``Image.HEART``.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
for y in range(self.height()):
for x in range(self.width()):
self.set_pixel(x, y, value)
# This transposes a certain area (w x h) on src onto the current image.
def blit(self, src, x, y, w, h, xdest=0, ydest=0):
"""
Copy the rectangle defined by ``x``, ``y``, ``w``, ``h`` from the image ``src`` into
this image at ``xdest``, ``ydest``.
Areas in the source rectangle, but outside the source image are treated as having a value of 0.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
if not src.__valid_pos(x, y):
raise ValueError(CONSTANTS.INDEX_ERR)
for count_y in range(h):
for count_x in range(w):
if self.__valid_pos(xdest + count_x, ydest + count_y):
if src.__valid_pos(x + count_x, y + count_y):
transfer_pixel = src.get_pixel(x + count_x, y + count_y)
else:
transfer_pixel = 0
self.set_pixel(xdest + count_x, ydest + count_y, transfer_pixel)
# This adds two images (if other object is not an image, throws error).
# The images must be the same size.
def __add__(self, other):
"""
Create a new image by adding the brightness values from the two images for each pixel.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
if not isinstance(other, Image):
raise TypeError(
CONSTANTS.UNSUPPORTED_ADD_TYPE + f"'{type(self)}', '{type(other)}'"
)
elif not (other.height() == self.height() and other.width() == self.width()):
raise ValueError(CONSTANTS.SAME_SIZE_ERR)
else:
res = Image(self.width(), self.height())
for y in range(self.height()):
for x in range(self.width()):
sum_value = other.get_pixel(x, y) + self.get_pixel(x, y)
display_result = min(CONSTANTS.BRIGHTNESS_MAX, sum_value)
res.set_pixel(x, y, display_result)
return res
# This multiplies image by number (if other factor is not a number, it throws an error).
def __mul__(self, other):
"""
Create a new image by multiplying the brightness of each pixel by n.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
try:
float_val = float(other)
except TypeError:
raise TypeError(f"can't convert {type(other)} to float")
res = Image(self.width(), self.height())
for y in range(self.height()):
for x in range(self.width()):
product = self.get_pixel(x, y) * float_val
res.set_pixel(x, y, min(CONSTANTS.BRIGHTNESS_MAX, product))
return res
def __repr__(self):
"""
Get a compact string representation of the image.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
ret_str = "Image('"
for index_y in range(self.height()):
ret_str += self.__row_to_str(index_y)
ret_str += "')"
return ret_str
def __str__(self):
"""
Get a readable string representation of the image.
"""
telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_IMAGE_OTHER)
ret_str = "Image('\n"
for index_y in range(self.height()):
ret_str += "\t" + self.__row_to_str(index_y) + "\n"
ret_str += "')"
return ret_str
# HELPER FUNCTIONS
# This create 2D array of off LEDs with
# width w and height h
def __create_leds(self, w, h):
arr = []
for _ in range(h):
sub_arr = []
for _ in range(w):
sub_arr.append(0)
arr.append(sub_arr)
return arr
# This turns byte array to 2D array for LED field.
def __bytes_to_array(self, width, height, byte_arr):
bytes_translated = bytes(byte_arr)
if | |
weight='HImass', tofield=False):
'''use this to create mesh of HI
'''
comm = halocat.comm
if mode == 'halos': catalogs = [halocat]
elif mode == 'galaxies': catalogs = [cencat, satcat]
elif mode == 'all': catalogs = [halocat, cencat, satcat]
else: print('Mode not recognized')
rankweight = sum([cat[weight].sum().compute() for cat in catalogs])
totweight = comm.allreduce(rankweight)
for cat in catalogs: cat[weight] /= totweight/float(nc)**3
allcat = MultipleSpeciesCatalog(['%d'%i for i in range(len(catalogs))], *catalogs)
mesh = allcat.to_mesh(BoxSize=bs,Nmesh=[nc,nc,nc],\
position=position,weight=weight)
if tofield: mesh = mesh.to_field()
return mesh
# ###########################################
class ModelCII_C():
def __init__(self, aa):
self.aa = aa
self.zz = 1./aa - 1.
self.mcut = 1.0e9
self.nu_line = 1902e9
self.L_fac = 1e6 / (8.*np.pi*gb_k_B*H(self.zz)) * (gb_c/self.nu_line)**3. * (1.+self.zz)**2. * gb_len_conv**-2. * gb_L_sun * gb_h**3.
self.a_CII = 0.8727
self.b_CII = 6.7250
def assignline(self, halocat, cencat, satcat):
haloL = self.assignhalo(halocat['Mass'].compute())
satL = self.assignsat(satcat['Mass'].compute())
cenL = self.assigncen(cencat['Mass'].compute())
return haloL, satL, cenL
def assignhalo(self, mhalo):
logMhalo = np.log10(mhalo[self.mcut < mhalo]/gb_h)
logSFR = logSFR_Behroozi(z=self.zz, logMList=logMhalo)
lCII = np.power(10., self.a_CII * logSFR + self.b_CII) * np.power(10., 0.37*np.random.randn(len(logMhalo)))
lCII[logSFR == -1000.] = 0.
return self.L_fac * lCII
def assignsat(self, msat):
return msat*0
def assigncen(self, mcen):
return mcen*0
def assignrsd(self, rsdfac, halocat, cencat, satcat, los=[0,0,1]):
hrsdpos = halocat['Position']+halocat['Velocity']*los * rsdfac
crsdpos = cencat['Position']+cencat['Velocity']*los * rsdfac
srsdpos = satcat['Position']+satcat['Velocity']*los * rsdfac
return hrsdpos, crsdpos, srsdpos
def createmesh(self, bs, nc, positions, weights):
'''use this to create mesh of Line
'''
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc,nc,nc])
mesh = pm.create(mode='real', value=0)
comm = pm.comm
# rankweight = sum([wt.sum() for wt in weights])
# totweight = comm.allreduce(rankweight)
# for wt in weights: wt /= totweight/float(nc)**3
for i in range(len(positions)):
lay = pm.decompose(positions[i])
mesh.paint(positions[i], mass=weights[i], layout=lay, hold=True)
return mesh
def createmesh_catalog(self, bs, nc, halocat, cencat, satcat, mode='galaxies', position='RSDpos', weight='HImass', tofield=False):
'''use this to create mesh of HI
'''
comm = halocat.comm
if mode == 'halos': catalogs = [halocat]
elif mode == 'galaxies': catalogs = [cencat, satcat]
elif mode == 'all': catalogs = [halocat, cencat, satcat]
else: print('Mode not recognized')
rankweight = sum([cat[weight].sum().compute() for cat in catalogs])
totweight = comm.allreduce(rankweight)
for cat in catalogs: cat[weight] /= totweight/float(nc)**3
allcat = MultipleSpeciesCatalog(['%d'%i for i in range(len(catalogs))], *catalogs)
mesh = allcat.to_mesh(BoxSize=bs,Nmesh=[nc,nc,nc],\
position=position,weight=weight)
if tofield: mesh = mesh.to_field()
return mesh
# ###########################################
class ModelCII_D():
def __init__(self, aa):
self.aa = aa
self.zz = 1./aa - 1.
self.mcut = 1.0e9
self.nu_line = 1902e9
self.L_fac = 1e6 / (8.*np.pi*gb_k_B*H(self.zz)) * (gb_c/self.nu_line)**3. * (1.+self.zz)**2. * gb_len_conv**-2. * gb_L_sun * gb_h**3.
self.a_CII = 0.9231
self.b_CII = 6.5234
def assignline(self, halocat, cencat, satcat):
haloL = self.assignhalo(halocat['Mass'].compute())
satL = self.assignsat(satcat['Mass'].compute())
cenL = self.assigncen(cencat['Mass'].compute())
return haloL, satL, cenL
def assignhalo(self, mhalo):
logMhalo = np.log10(mhalo[self.mcut < mhalo]/gb_h)
logSFR = logSFR_Behroozi(z=self.zz, logMList=logMhalo)
lCII = np.power(10., self.a_CII * logSFR + self.b_CII) * np.power(10., 0.37*np.random.randn(len(logMhalo)))
lCII[logSFR == -1000.] = 0.
return self.L_fac * lCII
def assignsat(self, msat):
return msat*0
def assigncen(self, mcen):
return mcen*0
def assignrsd(self, rsdfac, halocat, cencat, satcat, los=[0,0,1]):
hrsdpos = halocat['Position']+halocat['Velocity']*los * rsdfac
crsdpos = cencat['Position']+cencat['Velocity']*los * rsdfac
srsdpos = satcat['Position']+satcat['Velocity']*los * rsdfac
return hrsdpos, crsdpos, srsdpos
def createmesh(self, bs, nc, positions, weights):
'''use this to create mesh of Line
'''
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc,nc,nc])
mesh = pm.create(mode='real', value=0)
comm = pm.comm
# rankweight = sum([wt.sum() for wt in weights])
# totweight = comm.allreduce(rankweight)
# for wt in weights: wt /= totweight/float(nc)**3
for i in range(len(positions)):
lay = pm.decompose(positions[i])
mesh.paint(positions[i], mass=weights[i], layout=lay, hold=True)
return mesh
def createmesh_catalog(self, bs, nc, halocat, cencat, satcat, mode='galaxies', position='RSDpos', weight='HImass', tofield=False):
'''use this to create mesh of HI
'''
comm = halocat.comm
if mode == 'halos': catalogs = [halocat]
elif mode == 'galaxies': catalogs = [cencat, satcat]
elif mode == 'all': catalogs = [halocat, cencat, satcat]
else: print('Mode not recognized')
rankweight = sum([cat[weight].sum().compute() for cat in catalogs])
totweight = comm.allreduce(rankweight)
for cat in catalogs: cat[weight] /= totweight/float(nc)**3
allcat = MultipleSpeciesCatalog(['%d'%i for i in range(len(catalogs))], *catalogs)
mesh = allcat.to_mesh(BoxSize=bs,Nmesh=[nc,nc,nc],\
position=position,weight=weight)
if tofield: mesh = mesh.to_field()
return mesh
# ###########################################
class ModelCO10():
def __init__(self, aa):
self.aa = aa
self.zz = 1./aa - 1.
self.mcut = 1.0e9
self.delta_MF = 1.0
self.J = 1.0
self.nu_line = self.J*115.27e9 # in unit of Hz for CO
self.L_fac = 1e6 / (8.*np.pi*gb_k_B*H(self.zz)) * (gb_c/self.nu_line)**3. * (1.+self.zz)**2. * gb_len_conv**-2. * gb_L_sun * gb_h**3.
self.a_CO = 1.27
self.b_CO = -1.0
def assignline(self, halocat, cencat, satcat):
haloL = self.assignhalo(halocat['Mass'].compute())
satL = self.assignsat(satcat['Mass'].compute())
cenL = self.assigncen(cencat['Mass'].compute())
return haloL, satL, cenL
def assignhalo(self, mhalo):
logMhalo = np.log10(mhalo[self.mcut < mhalo]/gb_h)
logSFR = logSFR_Behroozi(z=self.zz, logMList=logMhalo)
L_IR = np.power(10., logSFR)/self.delta_MF * np.power(10., 10.)
L_IR[logSFR == -1000.] = 0.
Lprime_CO = np.power(10., (np.log10(L_IR)-self.b_CO)/self.a_CO) * np.power(10., 0.37*np.random.randn(len(logMhalo))) #in unit of K km/s pc^2
#log10(LCO_prime) = (log10(L_IR) - b) / a + Normal(0, 0.37)
L_CO = 4.9 * 1.0e-5 * self.J**3. * Lprime_CO #in unit of L_sun
return self.L_fac * L_CO
def assignsat(self, msat):
return msat*0
def assigncen(self, mcen):
return mcen*0
def assignrsd(self, rsdfac, halocat, cencat, satcat, los=[0,0,1]):
hrsdpos = halocat['Position']+halocat['Velocity']*los * rsdfac
crsdpos = cencat['Position']+cencat['Velocity']*los * rsdfac
srsdpos = satcat['Position']+satcat['Velocity']*los * rsdfac
return hrsdpos, crsdpos, srsdpos
def createmesh(self, bs, nc, positions, weights):
'''use this to create mesh of the Line
'''
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc,nc,nc])
mesh = pm.create(mode='real', value=0)
comm = pm.comm
# rankweight = sum([wt.sum() for wt in weights])
# totweight = comm.allreduce(rankweight)
# for wt in weights: wt /= totweight/float(nc)**3
for i in range(len(positions)):
lay = pm.decompose(positions[i])
mesh.paint(positions[i], mass=weights[i], layout=lay, hold=True)
# mesh = mesh/mesh.cmean()
return mesh
def createmesh_catalog(self, bs, nc, halocat, cencat, satcat, mode='galaxies', position='RSDpos', weight='HImass', tofield=False):
'''use this to create mesh of HI
'''
comm = halocat.comm
if mode == 'halos': catalogs = [halocat]
elif mode == 'galaxies': catalogs = [cencat, satcat]
elif mode == 'all': catalogs = [halocat, cencat, satcat]
else: print('Mode not recognized')
rankweight = sum([cat[weight].sum().compute() for cat in catalogs])
totweight = comm.allreduce(rankweight)
for cat in catalogs: cat[weight] /= totweight/float(nc)**3
allcat = MultipleSpeciesCatalog(['%d'%i for i in range(len(catalogs))], *catalogs)
mesh = allcat.to_mesh(BoxSize=bs,Nmesh=[nc,nc,nc],\
position=position,weight=weight)
if tofield: mesh = mesh.to_field()
return mesh
# ###########################################
class ModelCO21():
def __init__(self, aa):
self.aa = aa
self.zz = 1./aa - 1.
self.mcut = 1.0e9
self.delta_MF = 1.0
self.J = 2.0
self.nu_line = self.J*115.27e9 # in unit of Hz for CO
self.L_fac = 1e6 / (8.*np.pi*gb_k_B*H(self.zz)) * (gb_c/self.nu_line)**3. * (1.+self.zz)**2. * gb_len_conv**-2. * gb_L_sun * gb_h**3.
self.a_CO = 1.11
self.b_CO = 0.6
def assignline(self, halocat, cencat, satcat):
haloL = self.assignhalo(halocat['Mass'].compute())
satL = self.assignsat(satcat['Mass'].compute())
cenL = self.assigncen(cencat['Mass'].compute())
return haloL, satL, cenL
def assignhalo(self, mhalo):
logMhalo = np.log10(mhalo[self.mcut < mhalo]/gb_h)
logSFR = logSFR_Behroozi(z=self.zz, logMList=logMhalo)
L_IR = np.power(10., logSFR)/self.delta_MF * np.power(10., 10.)
L_IR[logSFR == -1000.] = 0.
Lprime_CO = np.power(10., (np.log10(L_IR)-self.b_CO)/self.a_CO) * np.power(10., 0.37*np.random.randn(len(logMhalo)))
#log10(LCO_prime) = (log10(L_IR) - b)/a + Normal(0, 0.37)
#in unit of K km/s pc^2
L_CO = 4.9 * 1.0e-5 * self.J**3. * Lprime_CO #in unit of L_sun
return self.L_fac * L_CO
def assignsat(self, msat):
return msat*0
def assigncen(self, mcen):
return mcen*0
def assignrsd(self, rsdfac, halocat, cencat, satcat, los=[0,0,1]):
hrsdpos = halocat['Position']+halocat['Velocity']*los * rsdfac
crsdpos = cencat['Position']+cencat['Velocity']*los * rsdfac
srsdpos = satcat['Position']+satcat['Velocity']*los * rsdfac
return hrsdpos, crsdpos, srsdpos
def createmesh(self, bs, nc, positions, weights):
'''use this to create mesh of Line
'''
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc,nc,nc])
mesh = pm.create(mode='real', value=0)
comm = pm.comm
# rankweight = sum([wt.sum() for wt in weights])
# totweight = comm.allreduce(rankweight)
# for wt in weights: wt /= totweight/float(nc)**3
for i in range(len(positions)):
lay = pm.decompose(positions[i])
mesh.paint(positions[i], mass=weights[i], layout=lay, hold=True)
return mesh
def createmesh_catalog(self, bs, nc, halocat, cencat, satcat, mode='galaxies', position='RSDpos', weight='HImass', tofield=False):
'''use this to create mesh of HI
'''
comm = halocat.comm
if mode == 'halos': catalogs = [halocat]
elif mode == 'galaxies': catalogs = [cencat, satcat]
elif mode == 'all': catalogs = [halocat, cencat, satcat]
else: print('Mode not recognized')
rankweight = sum([cat[weight].sum().compute() for cat in catalogs])
totweight = comm.allreduce(rankweight)
for cat in catalogs: cat[weight] /= totweight/float(nc)**3
allcat = MultipleSpeciesCatalog(['%d'%i for i in range(len(catalogs))], *catalogs)
| |
# ##### BEGIN MIT LICENSE BLOCK #####
#
# MIT License
#
# Copyright (c) 2022 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ##### END MIT LICENSE BLOCK #####
import os
import json
import struct
from math import degrees, radians
from .format import ScenarioAsset, SALT_SIZE
DEBUG_PARSER = True
DEBUG_HEADER = True
DEBUG_BODY = True
def tag_block_header(TAG, header_group, version, count, size):
TAGBLOCKHEADER = TAG.TagBlockHeader()
TAGBLOCKHEADER.name = TAG.string_to_bytes(header_group, True)
TAGBLOCKHEADER.version = version
TAGBLOCKHEADER.count = count
TAGBLOCKHEADER.size = size
return TAGBLOCKHEADER
def tag_block(TAG, count, maximum_count, address, definition):
TAGBLOCK = TAG.TagBlock()
TAGBLOCK.count = count
TAGBLOCK.maximum_count = maximum_count
TAGBLOCK.address = address
TAGBLOCK.definition = definition
return TAGBLOCK
def get_object_names(dump_dic, TAG, SCENARIO):
object_name_tag_block = dump_dic['Data']['Object Names']
SCENARIO.object_names = []
for object_name_element in object_name_tag_block:
object_name = SCENARIO.ObjectName()
object_name.name = TAG.string_to_bytes(object_name_element['Name'], False)
object_name.object_type = -1
object_name.placement_index = -1
SCENARIO.object_names.append(object_name)
SCENARIO.object_name_header = tag_block_header(TAG, "tbfd", 0, len(SCENARIO.object_names), 36)
def get_scenery(dump_dic, TAG, SCENARIO):
scenery_tag_block = dump_dic['Data']['Scenery']
SCENARIO.scenery = []
for scenery_element in scenery_tag_block:
source = scenery_element['Source']['Value']
if not source == 0: # We're doing this to exclude scenery pieces added as XREFs.
primary = scenery_element['Primary Color']
secondary = scenery_element['Secondary Color']
tertiary = scenery_element['Tertiary Color']
quaternary = scenery_element['Quaternary Color']
scenery = SCENARIO.Scenery()
scenery.sobj_header = tag_block_header(TAG, "sobj", 1, 1, 48)
scenery.obj0_header = tag_block_header(TAG, "obj#", 0, 1, 8)
scenery.sper_header = tag_block_header(TAG, "sper", 0, 1, 24)
scenery.sct3_header = tag_block_header(TAG, "sct3", 0, 1, 20)
scenery.palette_index = scenery_element['Palette Index']
scenery.name_index = scenery_element['Name Index']
scenery.placement_flags = scenery_element['Placement Flags']
scenery.position = scenery_element['Position']
scenery.rotation = scenery_element['Rotation']
scenery.scale = scenery_element['Scale']
scenery.transform_flags = scenery_element['Transform Flags']
scenery.manual_bsp_flags = scenery_element['Manual BSP Flags']
scenery.unique_id = scenery_element['Unique ID']['FullInteger']
scenery.origin_bsp_index = scenery_element['Origin BSP Index']
scenery.object_type = scenery_element['Type']['Value']
scenery.source = scenery_element['Source']['Value']
scenery.bsp_policy = 0
scenery.editor_folder_index = -1
scenery.variant_name_length = 0
scenery.active_change_colors = 0
scenery.primary_color_BGRA = (primary['B'], primary['G'], primary['R'], 1)
scenery.secondary_color_BGRA = (secondary['B'], secondary['G'], secondary['R'], 1)
scenery.tertiary_color_BGRA = (tertiary['B'], tertiary['G'], tertiary['R'], 1)
scenery.quaternary_color_BGRA = (quaternary['B'], quaternary['G'], quaternary['R'], 1)
scenery.pathfinding_policy = scenery_element['Pathfinding Policy']['Value']
scenery.lightmap_policy = scenery_element['Lightmapping Policy']['Value']
scenery.valid_multiplayer_games = 0
SCENARIO.scenery.append(scenery)
SCENARIO.scenery_header = tag_block_header(TAG, "tbfd", 4, len(SCENARIO.scenery), 96)
def get_unit(dump_dic, TAG, SCENARIO, unit):
unit_tag_block = dump_dic['Data'][unit]
unit_list = []
for unit_element in unit_tag_block:
primary = unit_element['Primary Color']
secondary = unit_element['Secondary Color']
tertiary = unit_element['Tertiary Color']
quaternary = unit_element['Quaternary Color']
unit = SCENARIO.Unit()
unit.sobj_header = tag_block_header(TAG, "sobj", 1, 1, 48)
unit.obj0_header = tag_block_header(TAG, "obj#", 0, 1, 8)
unit.sper_header = tag_block_header(TAG, "sper", 0, 1, 24)
unit.sunt_header = tag_block_header(TAG, "sunt", 0, 1, 8)
unit.palette_index = unit_element['Palette Index']
unit.name_index = unit_element['Name Index']
unit.placement_flags = unit_element['Placement Flags']
unit.position = unit_element['Position']
unit.rotation = unit_element['Rotation']
unit.scale = unit_element['Scale']
unit.transform_flags = unit_element['Transform Flags']
unit.manual_bsp_flags = unit_element['Manual BSP Flags']
unit.unique_id = unit_element['Unique ID']['FullInteger']
unit.origin_bsp_index = unit_element['Origin BSP Index']
unit.object_type = unit_element['Type']['Value']
unit.source = unit_element['Source']['Value']
unit.bsp_policy = 0
unit.editor_folder_index = -1
unit.variant_name_length = 0
unit.active_change_colors = 0
unit.primary_color_BGRA = (primary['B'], primary['G'], primary['R'], 1)
unit.secondary_color_BGRA = (secondary['B'], secondary['G'], secondary['R'], 1)
unit.tertiary_color_BGRA = (tertiary['B'], tertiary['G'], tertiary['R'], 1)
unit.quaternary_color_BGRA = (quaternary['B'], quaternary['G'], quaternary['R'], 1)
unit.body_vitality = 0
unit.flags = 0
unit_list.append(unit)
return tag_block_header(TAG, "tbfd", 2, len(unit_list), 84), unit_list
def get_equipment(dump_dic, TAG, SCENARIO):
equipment_tag_block = dump_dic['Data']['Equipment']
SCENARIO.equipment = []
for equipment_element in equipment_tag_block:
equipment = SCENARIO.Equipment()
equipment.sobj_header = tag_block_header(TAG, "sobj", 1, 1, 48)
equipment.obj0_header = tag_block_header(TAG, "obj#", 0, 1, 8)
equipment.seqt_header = tag_block_header(TAG, "seqt", 0, 1, 4)
equipment.palette_index = equipment_element['Palette Index']
equipment.name_index = equipment_element['Name Index']
equipment.placement_flags = equipment_element['Placement Flags']
equipment.position = equipment_element['Position']
equipment.rotation = equipment_element['Rotation']
equipment.scale = equipment_element['Scale']
equipment.transform_flags = equipment_element['Transform Flags']
equipment.manual_bsp_flags = equipment_element['Manual BSP Flags']
equipment.unique_id = equipment_element['Unique ID']['FullInteger']
equipment.origin_bsp_index = equipment_element['Origin BSP Index']
equipment.object_type = equipment_element['Type']['Value']
equipment.source = equipment_element['Source']['Value']
equipment.bsp_policy = 0
equipment.editor_folder_index = -1
equipment.flags = 0
SCENARIO.equipment.append(equipment)
SCENARIO.equipment_header = tag_block_header(TAG, "tbfd", 2, len(SCENARIO.equipment), 56)
def get_weapon(dump_dic, TAG, SCENARIO):
weapon_tag_block = dump_dic['Data']['Weapons']
SCENARIO.weapons = []
for weapon_element in weapon_tag_block:
primary = weapon_element['Primary Color']
secondary = weapon_element['Secondary Color']
tertiary = weapon_element['Tertiary Color']
quaternary = weapon_element['Quaternary Color']
weapon = SCENARIO.Weapon()
weapon.sobj_header = tag_block_header(TAG, "sobj", 1, 1, 48)
weapon.obj0_header = tag_block_header(TAG, "obj#", 0, 1, 8)
weapon.sper_header = tag_block_header(TAG, "sper", 0, 1, 24)
weapon.swpt_header = tag_block_header(TAG, "swpt", 0, 1, 8)
weapon.palette_index = weapon_element['Palette Index']
weapon.name_index = weapon_element['Name Index']
weapon.placement_flags = weapon_element['Placement Flags']
weapon.position = weapon_element['Position']
weapon.rotation = weapon_element['Rotation']
weapon.scale = weapon_element['Scale']
weapon.transform_flags = weapon_element['Transform Flags']
weapon.manual_bsp_flags = weapon_element['Manual BSP Flags']
weapon.unique_id = weapon_element['Unique ID']['FullInteger']
weapon.origin_bsp_index = weapon_element['Origin BSP Index']
weapon.object_type = weapon_element['Type']['Value']
weapon.source = weapon_element['Source']['Value']
weapon.bsp_policy = 0
weapon.editor_folder_index = -1
weapon.variant_name_length = 0
weapon.active_change_colors = 0
weapon.primary_color_BGRA = (primary['B'], primary['G'], primary['R'], 1)
weapon.secondary_color_BGRA = (secondary['B'], secondary['G'], secondary['R'], 1)
weapon.tertiary_color_BGRA = (tertiary['B'], tertiary['G'], tertiary['R'], 1)
weapon.quaternary_color_BGRA = (quaternary['B'], quaternary['G'], quaternary['R'], 1)
weapon.rounds_left = 0
weapon.rounds_loaded = 0
weapon.flags = 0
SCENARIO.weapons.append(weapon)
SCENARIO.weapon_header = tag_block_header(TAG, "tbfd", 2, len(SCENARIO.weapons), 84)
def get_trigger_volumes(dump_dic, TAG, SCENARIO):
trigger_volumes_tag_block = dump_dic['Data']['Kill Trigger Volumes']
SCENARIO.trigger_volumes = []
for trigger_volume_element in trigger_volumes_tag_block:
trigger_volume = SCENARIO.TriggerVolume()
trigger_volume.name = TAG.string_to_bytes(trigger_volume_element['Name'], False)
trigger_volume.name_length = len(trigger_volume_element['Name'])
trigger_volume.object_name_index = -1
trigger_volume.node_name = TAG.string_to_bytes("", False)
trigger_volume.forward = trigger_volume_element['Forward']
trigger_volume.up = trigger_volume_element['Up']
trigger_volume.position = trigger_volume_element['Position']
trigger_volume.extents = trigger_volume_element['Extents']
trigger_volume.kill_trigger_volume_index = -1
SCENARIO.trigger_volumes.append(trigger_volume)
SCENARIO.trigger_volumes_header = tag_block_header(TAG, "tbfd", 1, len(SCENARIO.trigger_volumes), 68)
def get_decals(dump_dic, TAG, SCENARIO):
decals_tag_block = dump_dic['Data']['Decals']
SCENARIO.decals = []
for decal_element in decals_tag_block:
decal = SCENARIO.Decal()
decal.palette_index = decal_element['Palette Index']
decal.yaw = decal_element['Yaw']
decal.pitch = decal_element['Pitch']
decal.position = decal_element['Position']
SCENARIO.decals.append(decal)
SCENARIO.decals_header = tag_block_header(TAG, "tbfd", 0, len(SCENARIO.decals), 16)
def get_squad_groups(dump_dic, TAG, SCENARIO):
squad_groups_tag_block = dump_dic['Data']['Squad Groups']
SCENARIO.squad_groups = []
for squad_group_element in squad_groups_tag_block:
squad_group = SCENARIO.SquadGroups()
squad_group.name = TAG.string_to_bytes(squad_group_element['Name'], False)
squad_group.parent_index = squad_group_element['Parent Index']
squad_group.initial_order_index = squad_group_element['Initial Order Index']
SCENARIO.squad_groups.append(squad_group)
SCENARIO.squad_groups_header = tag_block_header(TAG, "tbfd", 0, len(SCENARIO.squad_groups), 36)
def get_squads(dump_dic, TAG, SCENARIO):
squads_tag_block = dump_dic['Data']['Squads']
SCENARIO.squads = []
for squad_element in squads_tag_block:
starting_locations_dic = squad_element['Starting Locations']
starting_location_count = len(starting_locations_dic)
squad = SCENARIO.Squad()
squad.name = TAG.string_to_bytes(squad_element['Name'], False)
squad.flags = squad_element['Flags']
squad.team = squad_element['Team']['Value']
squad.parent_squad_group_index = squad_element['Parent Squad Group Index']
squad.squad_delay_time = squad_element['Squad Delay Time']
squad.normal_difficulty_count = squad_element['Normal Difficulty Count']
squad.insane_difficulty_count = squad_element['Insane Difficulty Count']
squad.major_upgrade = squad_element['Major Upgrade']['Value']
squad.vehicle_type_index = squad_element['Vehicle Type Index']
squad.character_type_index = squad_element['Character Type Index']
squad.initial_zone_index = squad_element['Initial Zone Index']
squad.initial_weapon_index = squad_element['Initial Weapon Index']
squad.initial_secondary_weapon_index = -1
squad.grenade_type = squad_element['Grenade Type']['Value']
squad.initial_order_index = squad_element['Initial Order Index']
squad.vehicle_variant_length = 0
squad.starting_locations_tag_block = tag_block(TAG, starting_location_count, 0, 0, 0)
squad.placement_script = TAG.string_to_bytes("", False)
if starting_location_count > 0:
squad.starting_locations_header = tag_block_header(TAG, "tbfd", 6, starting_location_count, 100)
starting_locations = []
for starting_location_element in starting_locations_dic:
starting_location = SCENARIO.StartingLocation()
starting_location.name = TAG.string_to_bytes(starting_location_element['Name'], False)
starting_location.name_length = len(starting_location_element['Name'])
starting_location.position = starting_location_element['Position']
starting_location.reference_frame = -1
if type(starting_location_element['Facing']) == list:
starting_location.facing_y = starting_location_element['Facing'][0]
starting_location.facing_p = starting_location_element['Facing'][1]
else:
starting_location.facing_y = starting_location_element['Facing']
starting_location.facing_p = 0
starting_location.flags = 0
starting_location.character_type_index = starting_location_element['Character Type Index']
starting_location.initial_weapon_index = starting_location_element['Initial Weapon Index']
starting_location.initial_secondary_weapon_index = -1
starting_location.vehicle_type_index = starting_location_element['Vehicle Type Index']
starting_location.seat_type = starting_location_element['Seat Type']['Value']
starting_location.grenade_type = starting_location_element['Grenade Type']['Value']
starting_location.swarm_count = 0
starting_location.actor_variant_name_length = 0
starting_location.vehicle_variant_name_length = 0
starting_location.initial_movement_distance = 0
starting_location.emitter_vehicle_index = -1
starting_location.initial_movement_mode = 0
starting_location.placement_script = TAG.string_to_bytes("", False)
starting_locations.append(starting_location)
squad.starting_locations = starting_locations
SCENARIO.squads.append(squad)
SCENARIO.squads_header = tag_block_header(TAG, "tbfd", 2, len(SCENARIO.squads), 120)
def get_zones(dump_dic, TAG, SCENARIO):
zones_tag_block = dump_dic['Data']['Zones']
SCENARIO.zones = []
for zone_element in zones_tag_block:
firing_positions_dic = zone_element['Firing Positions']
firing_positions_count = len(firing_positions_dic)
areas_dic = zone_element['Areas']
areas_count = len(areas_dic)
zone = SCENARIO.Zone()
zone.name = TAG.string_to_bytes(zone_element['Name'], False)
zone.flags = zone_element['Flags']
zone.manual_bsp_index = zone_element['Manual BSP Index']
zone.firing_positions_tag_block = tag_block(TAG, firing_positions_count, 0, 0, 0)
zone.areas_tag_block = tag_block(TAG, areas_count, 0, 0, 0)
zone.firing_positions = []
zone.areas = []
if | |
Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GetSecurityGroupBusinessObjectPermissionsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v2_with_http_info(bus_ob_id, **kwargs) # noqa: E501
def security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v2_with_http_info(self, bus_ob_id, **kwargs): # noqa: E501
"""Get Business Object permission for current user # noqa: E501
Operation to get Business Object permissions for the currently logged-in user's Security Group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v2_with_http_info(bus_ob_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str bus_ob_id: Specify the Business Object ID. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GetSecurityGroupBusinessObjectPermissionsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['bus_ob_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'bus_ob_id' is set
if self.api_client.client_side_validation and ('bus_ob_id' not in local_var_params or # noqa: E501
local_var_params['bus_ob_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `bus_ob_id` when calling `security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_id_v2`") # noqa: E501
collection_formats = {}
path_params = {}
if 'bus_ob_id' in local_var_params:
path_params['busObId'] = local_var_params['bus_ob_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V2/getsecuritygroupbusinessobjectpermissionsforcurrentuserbybusobid/busobid/{busObId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetSecurityGroupBusinessObjectPermissionsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v1(self, busobname, **kwargs): # noqa: E501
"""Get Business Object permissions for current user # noqa: E501
Operation to get Business Object permissions for currently logged in user's Security Group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v1(busobname, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str busobname: Specify the Business Object name. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[BusinessObjectPermission]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v1_with_http_info(busobname, **kwargs) # noqa: E501
def security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v1_with_http_info(self, busobname, **kwargs): # noqa: E501
"""Get Business Object permissions for current user # noqa: E501
Operation to get Business Object permissions for currently logged in user's Security Group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v1_with_http_info(busobname, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str busobname: Specify the Business Object name. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[BusinessObjectPermission], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['busobname'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v1" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'busobname' is set
if self.api_client.client_side_validation and ('busobname' not in local_var_params or # noqa: E501
local_var_params['busobname'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `busobname` when calling `security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'busobname' in local_var_params:
path_params['busobname'] = local_var_params['busobname'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V1/getsecuritygroupbusinessobjectpermissionsforcurrentuserbybusobname/busobname/{busobname}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[BusinessObjectPermission]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v2(self, busobname, **kwargs): # noqa: E501
"""Get Business Object permissions for current user # noqa: E501
Operation to get Business Object permissions for currently logged in user's Security Group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v2(busobname, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str busobname: Specify the Business Object name. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GetSecurityGroupBusinessObjectPermissionsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v2_with_http_info(busobname, **kwargs) # noqa: E501
def security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v2_with_http_info(self, busobname, **kwargs): # noqa: E501
"""Get Business Object permissions for current user # noqa: E501
Operation to get Business Object permissions for currently logged in user's Security Group. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v2_with_http_info(busobname, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str busobname: Specify the Business Object name. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GetSecurityGroupBusinessObjectPermissionsResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['busobname'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v2" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'busobname' is set
if self.api_client.client_side_validation and ('busobname' not in local_var_params or # noqa: E501
local_var_params['busobname'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `busobname` when calling `security_get_security_group_business_object_permissions_for_current_user_by_bus_ob_name_v2`") # noqa: E501
collection_formats = {}
path_params = {}
if 'busobname' in local_var_params:
path_params['busobname'] = local_var_params['busobname'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/V2/getsecuritygroupbusinessobjectpermissionsforcurrentuserbybusobname/busobname/{busobname}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GetSecurityGroupBusinessObjectPermissionsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def security_get_security_group_categories_v1(self, **kwargs): # noqa: E501
"""Get all Security Group categories # noqa: E501
Operation to get IDs and names for all available Security Group categories. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP | |
<reponame>sakagarwal/python-aiplatform<gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1beta1.services.vizier_service import pagers
from google.cloud.aiplatform_v1beta1.types import study
from google.cloud.aiplatform_v1beta1.types import study as gca_study
from google.cloud.aiplatform_v1beta1.types import vizier_service
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import VizierServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import VizierServiceGrpcTransport
from .transports.grpc_asyncio import VizierServiceGrpcAsyncIOTransport
class VizierServiceClientMeta(type):
"""Metaclass for the VizierService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[VizierServiceTransport]]
_transport_registry["grpc"] = VizierServiceGrpcTransport
_transport_registry["grpc_asyncio"] = VizierServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[VizierServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class VizierServiceClient(metaclass=VizierServiceClientMeta):
"""Vertex AI Vizier API.
Vertex AI Vizier is a service to solve blackbox optimization
problems, such as tuning machine learning hyperparameters and
searching over deep learning architectures.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VizierServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
VizierServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> VizierServiceTransport:
"""Returns the transport used by the client instance.
Returns:
VizierServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def custom_job_path(project: str, location: str, custom_job: str,) -> str:
"""Returns a fully-qualified custom_job string."""
return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(
project=project, location=location, custom_job=custom_job,
)
@staticmethod
def parse_custom_job_path(path: str) -> Dict[str, str]:
"""Parses a custom_job path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/customJobs/(?P<custom_job>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def study_path(project: str, location: str, study: str,) -> str:
"""Returns a fully-qualified study string."""
return "projects/{project}/locations/{location}/studies/{study}".format(
project=project, location=location, study=study,
)
@staticmethod
def parse_study_path(path: str) -> Dict[str, str]:
"""Parses a study path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/studies/(?P<study>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def trial_path(project: str, location: str, study: str, trial: str,) -> str:
"""Returns a fully-qualified trial string."""
return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(
project=project, location=location, study=study, trial=trial,
)
@staticmethod
def parse_trial_path(path: str) -> Dict[str, str]:
"""Parses a trial path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/studies/(?P<study>.+?)/trials/(?P<trial>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, VizierServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the vizier service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
| |
<filename>pypy/module/_ssl/interp_ssl.py<gh_stars>1-10
from __future__ import with_statement
from pypy.rpython.lltypesystem import rffi, lltype
from pypy.interpreter.error import OperationError
from pypy.interpreter.baseobjspace import Wrappable
from pypy.interpreter.typedef import TypeDef
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.rlib.rarithmetic import intmask
from pypy.rlib import rpoll, rsocket
from pypy.rlib.ropenssl import *
from pypy.module._socket import interp_socket
## user defined constants
X509_NAME_MAXLEN = 256
## # these mirror ssl.h
PY_SSL_ERROR_NONE, PY_SSL_ERROR_SSL = 0, 1
PY_SSL_ERROR_WANT_READ, PY_SSL_ERROR_WANT_WRITE = 2, 3
PY_SSL_ERROR_WANT_X509_LOOKUP = 4
PY_SSL_ERROR_SYSCALL = 5 # look at error stack/return value/errno
PY_SSL_ERROR_ZERO_RETURN, PY_SSL_ERROR_WANT_CONNECT = 6, 7
# start of non ssl.h errorcodes
PY_SSL_ERROR_EOF = 8 # special case of SSL_ERROR_SYSCALL
PY_SSL_ERROR_INVALID_ERROR_CODE = 9
PY_SSL_CERT_NONE, PY_SSL_CERT_OPTIONAL, PY_SSL_CERT_REQUIRED = 0, 1, 2
PY_SSL_CLIENT, PY_SSL_SERVER = 0, 1
(PY_SSL_VERSION_SSL2, PY_SSL_VERSION_SSL3,
PY_SSL_VERSION_SSL23, PY_SSL_VERSION_TLS1) = range(4)
SOCKET_IS_NONBLOCKING, SOCKET_IS_BLOCKING = 0, 1
SOCKET_HAS_TIMED_OUT, SOCKET_HAS_BEEN_CLOSED = 2, 3
SOCKET_TOO_LARGE_FOR_SELECT, SOCKET_OPERATION_OK = 4, 5
HAVE_RPOLL = True # Even win32 has rpoll.poll
constants = {}
constants["SSL_ERROR_ZERO_RETURN"] = PY_SSL_ERROR_ZERO_RETURN
constants["SSL_ERROR_WANT_READ"] = PY_SSL_ERROR_WANT_READ
constants["SSL_ERROR_WANT_WRITE"] = PY_SSL_ERROR_WANT_WRITE
constants["SSL_ERROR_WANT_X509_LOOKUP"] = PY_SSL_ERROR_WANT_X509_LOOKUP
constants["SSL_ERROR_SYSCALL"] = PY_SSL_ERROR_SYSCALL
constants["SSL_ERROR_SSL"] = PY_SSL_ERROR_SSL
constants["SSL_ERROR_WANT_CONNECT"] = PY_SSL_ERROR_WANT_CONNECT
constants["SSL_ERROR_EOF"] = PY_SSL_ERROR_EOF
constants["SSL_ERROR_INVALID_ERROR_CODE"] = PY_SSL_ERROR_INVALID_ERROR_CODE
constants["CERT_NONE"] = PY_SSL_CERT_NONE
constants["CERT_OPTIONAL"] = PY_SSL_CERT_OPTIONAL
constants["CERT_REQUIRED"] = PY_SSL_CERT_REQUIRED
if not OPENSSL_NO_SSL2:
constants["PROTOCOL_SSLv2"] = PY_SSL_VERSION_SSL2
constants["PROTOCOL_SSLv3"] = PY_SSL_VERSION_SSL3
constants["PROTOCOL_SSLv23"] = PY_SSL_VERSION_SSL23
constants["PROTOCOL_TLSv1"] = PY_SSL_VERSION_TLS1
constants["OPENSSL_VERSION_NUMBER"] = OPENSSL_VERSION_NUMBER
ver = OPENSSL_VERSION_NUMBER
ver, status = divmod(ver, 16)
ver, patch = divmod(ver, 256)
ver, fix = divmod(ver, 256)
ver, minor = divmod(ver, 256)
ver, major = divmod(ver, 256)
constants["OPENSSL_VERSION_INFO"] = (major, minor, fix, patch, status)
constants["OPENSSL_VERSION"] = SSLEAY_VERSION
def ssl_error(space, msg, errno=0):
w_exception_class = get_error(space)
w_exception = space.call_function(w_exception_class,
space.wrap(errno), space.wrap(msg))
return OperationError(w_exception_class, w_exception)
if HAVE_OPENSSL_RAND:
# helper routines for seeding the SSL PRNG
@unwrap_spec(string=str, entropy=float)
def RAND_add(space, string, entropy):
"""RAND_add(string, entropy)
Mix string into the OpenSSL PRNG state. entropy (a float) is a lower
bound on the entropy contained in string."""
buf = rffi.str2charp(string)
try:
libssl_RAND_add(buf, len(string), entropy)
finally:
rffi.free_charp(buf)
def RAND_status(space):
"""RAND_status() -> 0 or 1
Returns 1 if the OpenSSL PRNG has been seeded with enough data and 0 if not.
It is necessary to seed the PRNG with RAND_add() on some platforms before
using the ssl() function."""
res = libssl_RAND_status()
return space.wrap(res)
@unwrap_spec(path=str)
def RAND_egd(space, path):
"""RAND_egd(path) -> bytes
Queries the entropy gather daemon (EGD) on socket path. Returns number
of bytes read. Raises socket.sslerror if connection to EGD fails or
if it does provide enough data to seed PRNG."""
socket_path = rffi.str2charp(path)
try:
bytes = libssl_RAND_egd(socket_path)
finally:
rffi.free_charp(socket_path)
if bytes == -1:
msg = "EGD connection failed or EGD did not return"
msg += " enough data to seed the PRNG"
raise ssl_error(space, msg)
return space.wrap(bytes)
class SSLObject(Wrappable):
def __init__(self, space):
self.space = space
self.w_socket = None
self.ctx = lltype.nullptr(SSL_CTX.TO)
self.ssl = lltype.nullptr(SSL.TO)
self.peer_cert = lltype.nullptr(X509.TO)
self._server = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw')
self._server[0] = '\0'
self._issuer = lltype.malloc(rffi.CCHARP.TO, X509_NAME_MAXLEN, flavor='raw')
self._issuer[0] = '\0'
self.shutdown_seen_zero = False
def server(self):
return self.space.wrap(rffi.charp2str(self._server))
def issuer(self):
return self.space.wrap(rffi.charp2str(self._issuer))
def __del__(self):
self.enqueue_for_destruction(self.space, SSLObject.destructor,
'__del__() method of ')
def destructor(self):
assert isinstance(self, SSLObject)
if self.peer_cert:
libssl_X509_free(self.peer_cert)
if self.ssl:
libssl_SSL_free(self.ssl)
if self.ctx:
libssl_SSL_CTX_free(self.ctx)
lltype.free(self._server, flavor='raw')
lltype.free(self._issuer, flavor='raw')
@unwrap_spec(data='bufferstr')
def write(self, data):
"""write(s) -> len
Writes the string s into the SSL object. Returns the number
of bytes written."""
self._refresh_nonblocking(self.space)
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, True)
if sockstate == SOCKET_HAS_TIMED_OUT:
raise ssl_error(self.space, "The write operation timed out")
elif sockstate == SOCKET_HAS_BEEN_CLOSED:
raise ssl_error(self.space, "Underlying socket has been closed.")
elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT:
raise ssl_error(self.space, "Underlying socket too large for select().")
num_bytes = 0
while True:
err = 0
num_bytes = libssl_SSL_write(self.ssl, data, len(data))
err = libssl_SSL_get_error(self.ssl, num_bytes)
if err == SSL_ERROR_WANT_READ:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, False)
elif err == SSL_ERROR_WANT_WRITE:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, True)
else:
sockstate = SOCKET_OPERATION_OK
if sockstate == SOCKET_HAS_TIMED_OUT:
raise ssl_error(self.space, "The write operation timed out")
elif sockstate == SOCKET_HAS_BEEN_CLOSED:
raise ssl_error(self.space, "Underlying socket has been closed.")
elif sockstate == SOCKET_IS_NONBLOCKING:
break
if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE:
continue
else:
break
if num_bytes > 0:
return self.space.wrap(num_bytes)
else:
raise _ssl_seterror(self.space, self, num_bytes)
def pending(self):
"""pending() -> count
Returns the number of already decrypted bytes available for read,
pending on the connection."""
count = libssl_SSL_pending(self.ssl)
if count < 0:
raise _ssl_seterror(self.space, self, count)
return self.space.wrap(count)
@unwrap_spec(num_bytes=int)
def read(self, num_bytes=1024):
"""read([len]) -> string
Read up to len bytes from the SSL socket."""
count = libssl_SSL_pending(self.ssl)
if not count:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, False)
if sockstate == SOCKET_HAS_TIMED_OUT:
raise ssl_error(self.space, "The read operation timed out")
elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT:
raise ssl_error(self.space, "Underlying socket too large for select().")
elif sockstate == SOCKET_HAS_BEEN_CLOSED:
if libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN:
return self.space.wrap('')
raise ssl_error(self.space, "Socket closed without SSL shutdown handshake")
raw_buf, gc_buf = rffi.alloc_buffer(num_bytes)
while True:
err = 0
count = libssl_SSL_read(self.ssl, raw_buf, num_bytes)
err = libssl_SSL_get_error(self.ssl, count)
if err == SSL_ERROR_WANT_READ:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, False)
elif err == SSL_ERROR_WANT_WRITE:
sockstate = check_socket_and_wait_for_timeout(self.space,
self.w_socket, True)
elif (err == SSL_ERROR_ZERO_RETURN and
libssl_SSL_get_shutdown(self.ssl) == SSL_RECEIVED_SHUTDOWN):
return self.space.wrap("")
else:
sockstate = SOCKET_OPERATION_OK
if sockstate == SOCKET_HAS_TIMED_OUT:
raise ssl_error(self.space, "The read operation timed out")
elif sockstate == SOCKET_IS_NONBLOCKING:
break
if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE:
continue
else:
break
if count <= 0:
raise _ssl_seterror(self.space, self, count)
result = rffi.str_from_buffer(raw_buf, gc_buf, num_bytes, count)
rffi.keep_buffer_alive_until_here(raw_buf, gc_buf)
return self.space.wrap(result)
def _refresh_nonblocking(self, space):
# just in case the blocking state of the socket has been changed
w_timeout = space.call_method(self.w_socket, "gettimeout")
nonblocking = not space.is_w(w_timeout, space.w_None)
libssl_BIO_set_nbio(libssl_SSL_get_rbio(self.ssl), nonblocking)
libssl_BIO_set_nbio(libssl_SSL_get_wbio(self.ssl), nonblocking)
def do_handshake(self, space):
self._refresh_nonblocking(space)
# Actually negotiate SSL connection
# XXX If SSL_do_handshake() returns 0, it's also a failure.
while True:
ret = libssl_SSL_do_handshake(self.ssl)
err = libssl_SSL_get_error(self.ssl, ret)
# XXX PyErr_CheckSignals()
if err == SSL_ERROR_WANT_READ:
sockstate = check_socket_and_wait_for_timeout(
space, self.w_socket, False)
elif err == SSL_ERROR_WANT_WRITE:
sockstate = check_socket_and_wait_for_timeout(
space, self.w_socket, True)
else:
sockstate = SOCKET_OPERATION_OK
if sockstate == SOCKET_HAS_TIMED_OUT:
raise ssl_error(space, "The handshake operation timed out")
elif sockstate == SOCKET_HAS_BEEN_CLOSED:
raise ssl_error(space, "Underlying socket has been closed.")
elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT:
raise ssl_error(space, "Underlying socket too large for select().")
elif sockstate == SOCKET_IS_NONBLOCKING:
break
if err == SSL_ERROR_WANT_READ or err == SSL_ERROR_WANT_WRITE:
continue
else:
break
if ret <= 0:
raise _ssl_seterror(space, self, ret)
if self.peer_cert:
libssl_X509_free(self.peer_cert)
self.peer_cert = libssl_SSL_get_peer_certificate(self.ssl)
if self.peer_cert:
libssl_X509_NAME_oneline(
libssl_X509_get_subject_name(self.peer_cert),
self._server, X509_NAME_MAXLEN)
libssl_X509_NAME_oneline(
libssl_X509_get_issuer_name(self.peer_cert),
self._issuer, X509_NAME_MAXLEN)
def shutdown(self, space):
# Guard against closed socket
w_fileno = space.call_method(self.w_socket, "fileno")
if space.int_w(w_fileno) < 0:
raise ssl_error(space, "Underlying socket has been closed")
self._refresh_nonblocking(space)
zeros = 0
while True:
# Disable read-ahead so that unwrap can work correctly.
# Otherwise OpenSSL might read in too much data,
# eating clear text data that happens to be
# transmitted after the SSL shutdown.
# Should be safe to call repeatedly everytime this
# function is used and the shutdown_seen_zero != 0
# condition is met.
if self.shutdown_seen_zero:
libssl_SSL_set_read_ahead(self.ssl, 0)
ret = libssl_SSL_shutdown(self.ssl)
# if err == 1, a secure shutdown with SSL_shutdown() is complete
if ret > 0:
break
if ret == 0:
# Don't loop endlessly; instead preserve legacy
# behaviour of trying SSL_shutdown() only twice.
# This looks necessary for OpenSSL < 0.9.8m
zeros += 1
if zeros > 1:
break
# Shutdown was sent, now try receiving
self.shutdown_seen_zero = True
continue
# Possibly retry shutdown until timeout or failure
ssl_err = libssl_SSL_get_error(self.ssl, ret)
if ssl_err == SSL_ERROR_WANT_READ:
sockstate = check_socket_and_wait_for_timeout(
self.space, self.w_socket, False)
elif ssl_err == SSL_ERROR_WANT_WRITE:
sockstate = check_socket_and_wait_for_timeout(
self.space, self.w_socket, True)
else:
break
if sockstate == SOCKET_HAS_TIMED_OUT:
if ssl_err == SSL_ERROR_WANT_READ:
raise ssl_error(self.space, "The read operation timed out")
else:
raise ssl_error(self.space, "The write operation timed out")
elif sockstate == SOCKET_TOO_LARGE_FOR_SELECT:
raise ssl_error(space, "Underlying socket too large for select().")
elif sockstate != SOCKET_OPERATION_OK:
# Retain the SSL error code
break
if ret < 0:
raise _ssl_seterror(space, self, ret)
return self.w_socket
def cipher(self, space):
if not self.ssl:
return space.w_None
current = libssl_SSL_get_current_cipher(self.ssl)
if not current:
return space.w_None
name = libssl_SSL_CIPHER_get_name(current)
if name:
w_name = space.wrap(rffi.charp2str(name))
else:
w_name = space.w_None
proto = libssl_SSL_CIPHER_get_version(current)
if proto:
w_proto = space.wrap(rffi.charp2str(name))
else:
w_proto = space.w_None
bits = libssl_SSL_CIPHER_get_bits(current,
lltype.nullptr(rffi.INTP.TO))
w_bits = space.newint(bits)
return space.newtuple([w_name, w_proto, w_bits])
@unwrap_spec(der=bool)
def peer_certificate(self, der=False):
"""peer_certificate([der=False]) -> certificate
Returns the certificate for the peer. If no certificate was provided,
returns None. If a certificate was provided, but not validated, returns
an empty dictionary. Otherwise returns a dict containing information
about the peer certificate.
If the optional argument is True, returns a | |
<reponame>RI-imaging/qpimage
import pathlib
import warnings
import h5py
import nrefocus
import numpy as np
from skimage.restoration import unwrap_phase
from . import holo
from .image_data import Amplitude, Phase, write_image_dataset
from .meta import MetaDict, DATA_KEYS, META_KEYS
from ._version import version as __version__
#: valid combinations for keyword argument `which_data`
VALID_INPUT_DATA = ["field",
"hologram",
"phase",
("phase", "amplitude"),
("phase", "intensity"),
]
class QPImage(object):
# required to create in-memory hdf5 files with unique fd
_instances = 0
def __init__(self, data=None, bg_data=None, which_data="phase",
meta_data=None, holo_kw=None, proc_phase=True,
h5file=None, h5mode="a", h5dtype="float32"):
"""Quantitative phase image manipulation
This class implements various tasks for quantitative phase
imaging, including phase unwrapping, background correction,
numerical focusing, and data export.
Parameters
----------
data: 2d ndarray (float or complex) or list
The experimental data (see `which_data`)
bg_data: 2d ndarray (float or complex), list, or `None`
The background data (must be same type as `data`)
which_data: str or tuple
String or comma-separated list of strings indicating
the order and type of input data. Valid values are
"hologram", "field", "phase", "phase,amplitude",
or "phase,intensity", where the latter two require an
indexable object with the phase data as first element.
meta_data: dict
Meta data associated with the input data.
see :data:`qpimage.meta.META_KEYS`
holo_kw: dict
Special keyword arguments for phase retrieval from
hologram data (`which_data="hologram"`).
See :func:`qpimage.holo.get_field` for valid keyword
arguments.
.. versionadded:: 0.1.6
proc_phase: bool
Process the phase data. This includes phase unwrapping
using :func:`skimage.restoration.unwrap_phase` and
correcting for 2PI phase offsets (The offset is estimated
from a 1px-wide border around the image).
.. versionadded:: 0.6.0
Previous versions always performed phase unwrapping
and did so without offset correction
h5file: str, pathlib.Path, h5py.Group, h5py.File, or None
A path to an hdf5 data file where all data is cached. If
set to `None` (default), all data will be handled in
memory using the "core" driver of the :mod:`h5py`'s
:class:`h5py:File` class. If the file does not exist,
it is created. If the file already exists, it is opened
with the file mode defined by `hdf5_mode`. If this is
an instance of h5py.Group or h5py.File, then this will
be used to internally store all data.
h5mode: str
Valid file modes are (only applies if `h5file` is a path)
- "r": Readonly, file must exist
- "r+": Read/write, file must exist
- "w": Create file, truncate if exists
- "w-" or "x": Create file, fail if exists
- "a": Read/write if exists, create otherwise (default)
h5dtype: str
The datatype in which to store the image data. The default
is "float32" which is sufficient for 2D image analysis and
consumes only half the disk space of the numpy default
"float64".
Notes
-----
QPImage is slicable; the following returns a new QPImage with
the same meta data, but with all background corrections merged.
.. code-block:: python
qpi = QPImage(data=...)
qpi_scliced = qpi[10:20, 40:30]
"""
if holo_kw is None:
holo_kw = {}
if meta_data is None:
meta_data = {}
if (data is not None and
not isinstance(data, (np.ndarray, list, tuple))):
msg = "`data` must be numpy.ndarray!"
if isinstance(data, (str, pathlib.Path)):
msg += " Did you mean `h5file={}`?".format(data)
raise ValueError(msg)
if isinstance(h5file, h5py.Group):
self.h5 = h5file
self._do_h5_cleanup = False
else:
if h5file is None:
h5kwargs = {"name": "qpimage{}.h5".format(QPImage._instances),
"driver": "core",
"backing_store": False,
"mode": "w"}
else:
h5kwargs = {"name": h5file,
"mode": h5mode}
self.h5 = h5py.File(**h5kwargs)
self._do_h5_cleanup = True
QPImage._instances += 1
#: hologram processing keyword arguments
self.holo_kw = holo_kw
# set meta data
meta = MetaDict(meta_data)
for key in meta:
self.h5.attrs[key] = meta[key]
if "qpimage version" not in self.h5.attrs:
self.h5.attrs["qpimage version"] = __version__
# set data
self._amp = Amplitude(self.h5.require_group("amplitude"),
h5dtype=h5dtype)
self._pha = Phase(self.h5.require_group("phase"),
h5dtype=h5dtype)
if data is not None:
# compute phase and amplitude from input data
amp, pha = self._get_amp_pha(data=data,
which_data=which_data,
proc_phase=proc_phase)
self._amp["raw"] = amp
self._pha["raw"] = pha
# set background data
self.set_bg_data(bg_data=bg_data,
which_data=which_data)
self.h5dtype = h5dtype
# :mod:`nrefocus` interface class
self._refocuser = None
def __enter__(self):
return self
def __eq__(self, other):
datame = [self.meta[k] for k in self.meta if k in DATA_KEYS]
dataot = [other.meta[k] for k in other.meta if k in DATA_KEYS]
if (isinstance(other, QPImage) and
self.shape == other.shape and
np.allclose(self.amp, other.amp) and
np.allclose(self.pha, other.pha) and
datame == dataot):
return True
else:
return False
def __exit__(self, exc_type, exc_val, exc_tb):
if self._do_h5_cleanup:
self.h5.flush()
self.h5.close()
def __contains__(self, key):
return key in self.h5.attrs
def __getitem__(self, given):
"""Slice QPImage `pha` and `amp` and return a new QPImage
The background data of the returned QPImage is merged into
the "data" background array, i.e. there will be no "fit"
background array.
"""
if isinstance(given, (slice, tuple)):
# return new QPImage
qpi = QPImage(data=(self.raw_pha[given], self.raw_amp[given]),
bg_data=(self.bg_pha[given], self.bg_amp[given]),
which_data=("phase", "amplitude"),
meta_data=self.meta,
proc_phase=False)
return qpi
elif isinstance(given, str):
# return meta data
return self.meta[given]
else:
msg = "Only slicing and meta data keys allowed for `__getitem__`"
raise ValueError(msg)
def __repr__(self):
if "identifier" in self:
ident = self["identifier"]
else:
ident = hex(id(self))
rep = "QPImage <{}>, {x}x{y}px".format(ident,
x=self._amp.raw.shape[0],
y=self._amp.raw.shape[1],
)
if "wavelength" in self:
wl = self["wavelength"]
if 2000e-9 > wl > 10e-9:
# convenience for light microscopy
rep += ", λ={:.1f}nm".format(wl * 1e9)
else:
rep += ", λ={:.2e}m".format(wl)
return rep
def __setitem__(self, key, value):
if key not in META_KEYS:
raise KeyError("Unknown meta data key: {}".format(key))
else:
self.h5.attrs[key] = value
@staticmethod
def _conv_which_data(which_data):
"""Convert which data to string or tuple
This function improves user convenience,
as `which_data` may be of several types
(str, ,str with spaces and commas, list, tuple) which
is internally handled by this method.
"""
if isinstance(which_data, str):
which_data = which_data.lower().strip()
if which_data.count(","):
# convert comma string to list
which_data = [w.strip() for w in which_data.split(",")]
# remove empty strings
which_data = [w for w in which_data if w]
if len(which_data) == 1:
return which_data[0]
else:
# convert to tuple
return tuple(which_data)
else:
return which_data
elif isinstance(which_data, (list, tuple)):
which_data = [w.lower().strip() for w in which_data]
return tuple(which_data)
elif which_data is None:
return None
else:
msg = "unknown type for `which_data`: {}".format(which_data)
raise ValueError(msg)
def _get_amp_pha(self, data, which_data, proc_phase=True):
"""Convert input data to phase and amplitude
Parameters
----------
data: 2d ndarray (float or complex) or list
The experimental data (see `which_data`)
which_data: str
String or comma-separated list of strings indicating
the order and type of input data. Valid values are
"field", "phase", "hologram", "phase,amplitude", or
"phase,intensity", where the latter two require an
indexable object with the phase data as first element.
proc_phase: bool
Process the phase data. This includes phase unwrapping
using :func:`skimage.restoration.unwrap_phase` and
correcting for 2PI phase offsets (The offset is estimated
from a 1px-wide border around the image).
.. versionadded:: 0.6.0
Previous versions always performed phase unwrapping
and did so without offset correction
Returns
-------
amp, pha: tuple of (:class:`Amplitdue`, :class:`Phase`)
"""
which_data = QPImage._conv_which_data(which_data)
if which_data == "field":
amp = np.abs(data)
pha = np.angle(data)
elif which_data == "phase":
pha = data
amp = np.ones_like(data)
elif which_data == ("phase", "amplitude"):
amp = data[1]
pha = data[0]
elif which_data == ("phase", "intensity"):
amp = np.sqrt(data[1])
pha = data[0]
elif which_data == "hologram":
amp, pha = self._get_amp_pha(holo.get_field(data, **self.holo_kw),
which_data="field")
else:
raise ValueError(
f"`which_data` must be one of {VALID_INPUT_DATA}!")
if amp.size == 0 or pha.size == 0:
msg = "`data` with shape {} has zero size!".format(amp.shape)
raise ValueError(msg)
if proc_phase:
# phase unwrapping (take into account nans)
nanmask = np.isnan(pha)
if np.sum(nanmask):
# create masked array
# skimage.restoration.unwrap_phase cannot handle nan data
# (even if masked)
pham = pha.copy()
pham[nanmask] = 0
pham = np.ma.masked_array(pham, mask=nanmask)
pha = unwrap_phase(pham, seed=47)
pha[nanmask] = np.nan
else:
pha = unwrap_phase(pha, seed=47)
# remove 2PI offsets that might be present in the border phase
border = np.concatenate((pha[0, :],
pha[-1, :],
pha[:, 0],
pha[:, -1]))
twopi = 2*np.pi
minimum = divmod_neg(np.nanmin(border), twopi)[0]
offset = minimum * twopi
pha -= offset
return amp, pha
@property
def bg_amp(self):
"""background amplitude image"""
return self._amp.bg
@property
def bg_pha(self):
"""background phase image"""
return self._pha.bg
@property
def amp(self):
"""background-corrected amplitude image"""
return self._amp.image
@property
def dtype(self):
"""dtype of the phase data array"""
return self._pha.raw.dtype
@property
def field(self):
"""background-corrected complex field"""
return self.amp | |
= False) -> DefaultBoxes:
"""
Convenience function for generating DefaultBoxes object for standard SSD300 model
:param voc: set True if default boxes should be made for VOC dataset.
Will set scales to be slightly larger than for the default
COCO dataset configuration
:return: DefaultBoxes object implemented for standard SSD300 models
"""
image_size = 300
feature_maps = [38, 19, 10, 5, 3, 1]
steps = [8, 16, 32, 64, 100, 300]
# use the scales here:
# https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
if voc:
scales = [[30, 60], [60, 111], [111, 162], [162, 213], [213, 264], [264, 315]]
else:
scales = [[21, 45], [45, 99], [99, 153], [153, 207], [207, 261], [261, 315]]
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
return DefaultBoxes(image_size, feature_maps, steps, scales, aspect_ratios)
def _ltrb_to_xywh(boxes):
# in-place conversion from ltrb to cx,cy,w,h format
# expected input shape N,4
cx = 0.5 * (boxes[:, 0] + boxes[:, 2])
cy = 0.5 * (boxes[:, 1] + boxes[:, 3])
w = boxes[:, 2] - boxes[:, 0]
h = boxes[:, 3] - boxes[:, 1]
boxes[:, 0] = cx
boxes[:, 1] = cy
boxes[:, 2] = w
boxes[:, 3] = h
def _xywh_to_ltrb_batch(boxes):
# in-place conversion from cx, cy, w, h format to ltrb
# expected input shape M,N,4
lt_x = boxes[:, :, 0] - 0.5 * boxes[:, :, 2]
lt_y = boxes[:, :, 1] - 0.5 * boxes[:, :, 3]
rb_x = boxes[:, :, 0] + 0.5 * boxes[:, :, 2]
rb_y = boxes[:, :, 1] + 0.5 * boxes[:, :, 3]
boxes[:, :, 0] = lt_x
boxes[:, :, 1] = lt_y
boxes[:, :, 2] = rb_x
boxes[:, :, 3] = rb_y
# potential IoU thresholds for selecting cropping boundaries for SSD model input
_SSD_RANDOM_CROP_OPTIONS = (
# return original
None,
# random crop (all IoUs are valid)
-1,
# crop with minimum box IoU
0.1,
0.3,
0.5,
0.7,
0.9,
)
def ssd_random_crop(
image: Image.Image, boxes: Tensor, labels: Tensor
) -> Tuple[Image.Image, Tensor, Tensor]:
"""
Performs one of the random SSD crops on a given image, bounding boxes,
and labels as implemented in the original paper.
| Chooses between following 3 conditions:
| 1. Preserve the original image
| 2. Random crop minimum IoU is among 0.1, 0.3, 0.5, 0.7, 0.9
| 3. Random crop
Adapted from: https://github.com/chauhan-utk/src.DomainAdaptation
:param image: the image to potentially crop
:param boxes: a tensor of bounding boxes in ltrb format with shape n_boxes,4
:param labels: a tensor of labels for each of the bounding boxes
:return: the cropped image, boxes, and labels
"""
if box_iou is None:
raise RuntimeError(
"Unable to import box_iou from torchvision.ops try upgrading your"
" torch and torchvision versions"
)
# Loop will always return something when None or 0 is selected
while True:
min_iou = random.choice(_SSD_RANDOM_CROP_OPTIONS)
# do nothing
if min_iou is None:
return image, boxes, labels
w_orig, h_orig = image.size
# search for 50 random crops before trying a different threshold
for _ in range(50):
# crops to [.1,1.0] of image area since 0.3 * 0.3 ~= 0.1
w_crop = random.uniform(0.3, 1.0)
h_crop = random.uniform(0.3, 1.0)
if w_crop / h_crop < 0.5 or w_crop / h_crop > 2:
continue # keep crop ratio between 1:2 / 2:1
# generate bounding box of size w_crop,h_crop
left = random.uniform(0, 1.0 - w_crop)
top = random.uniform(0, 1.0 - h_crop)
right = left + w_crop
bottom = top + h_crop
# get IoUs between given bounding boxes and cropped box
ious = box_iou(boxes, torch.tensor([[left, top, right, bottom]]))
if not (ious > min_iou).all():
continue # do not use this crop if all boxes do not pass threshold
# discard any boxes whose center is not in the cropped image
x_centers = 0.5 * (boxes[:, 0] + boxes[:, 2])
y_centers = 0.5 * (boxes[:, 1] + boxes[:, 3])
center_in_crop_mask = (
(x_centers > left)
& (x_centers < right)
& (y_centers > top)
& (y_centers < bottom)
)
if not center_in_crop_mask.any():
continue # do not use crop if no boxes are centered in it
# clip bounding boxes to the cropped boundaries
boxes[boxes[:, 0] < left, 0] = left
boxes[boxes[:, 1] < top, 1] = top
boxes[boxes[:, 2] > right, 2] = right
boxes[boxes[:, 3] > bottom, 3] = bottom
# drop bounding boxes whose centers are not in the copped region
boxes = boxes[center_in_crop_mask, :]
labels = labels[center_in_crop_mask]
# expand the cropped region to map to pixels in the image and crop
image_crop_box = (
int(left * w_orig),
int(top * h_orig),
int(right * w_orig),
int(bottom * h_orig),
)
image = image.crop(image_crop_box)
# shift and crop bounding boxes
boxes[:, 0] = (boxes[:, 0] - left) / w_crop
boxes[:, 1] = (boxes[:, 1] - top) / h_crop
boxes[:, 2] = (boxes[:, 2] - left) / w_crop
boxes[:, 3] = (boxes[:, 3] - top) / h_crop
return image, boxes, labels
"""
named tuple for storing detection model score and truth
"""
DetectionResult = NamedTuple(
"DetectionResult",
[("score", float), ("is_true_positive", bool)],
)
class MeanAveragePrecision(object):
"""
Class for computing the mean average precision of an object detection model output.
Inputs will be decoded by the provided post-processing function.
Each batch update tracks the cumulative ground truth objects of each class, and the
scores the model gives each class.
calculate_map object uses the aggregated results to find the mAP at the given
threshold(s)
:param postprocessing_fn: function that takes in detection model output and returns
post-processed tuple of predicted bounding boxes, classification labels, and
scores
:param iou_threshold: IoU thresholds to match predicted objects to ground truth
objects. Can provide a single IoU or a tuple of two representing a range.
mAP will be averaged over the range of values at each IoU
:param iou_steps: the amount of IoU to shift between measurements between
iou_threshold values
"""
def __init__(
self,
postprocessing_fn: Callable[[Any], Tuple[Tensor, Tensor, Tensor]],
iou_threshold: Union[float, Tuple[float, float]] = 0.5,
iou_steps: float = 0.05,
):
self._postprocessing_fn = postprocessing_fn
if isinstance(iou_threshold, float):
self._iou_thresholds = [iou_threshold]
else:
min_threshold, max_threshold = iou_threshold
steps = abs(round((max_threshold - min_threshold) / iou_steps))
self._iou_thresholds = [
min_threshold + (iou_steps * step) for step in range(steps)
]
if self._iou_thresholds[-1] < max_threshold:
self._iou_thresholds.append(max_threshold)
# dictionaries used to store model results for mAP calculation
self._ground_truth_classes_count = defaultdict(int) # class -> num_expected
self._detection_results_by_class = defaultdict(
lambda: defaultdict(list)
) # iou_threshold -> class -> results
def __str__(self):
iou_thresh = (
str(self._iou_thresholds[0])
if len(self._iou_thresholds) == 1
else "[{}:{}]".format(self._iou_thresholds[0], self._iou_thresholds[-1])
)
return "mAP@{}".format(iou_thresh)
def clear(self):
"""
Resets the ground truth class count and results dictionaries
"""
self._ground_truth_classes_count = defaultdict(int) # class -> num_expected
self._detection_results_by_class = defaultdict(lambda: defaultdict(list))
def _update_class_counts(self, actual_labels: Tensor):
for label in actual_labels.reshape(-1):
self._ground_truth_classes_count[label.item()] += 1
def _update_model_results(
self,
prediction_is_true_positive: Tensor,
pred_labels: Tensor,
pred_scores: Tensor,
iou_threshold: float,
):
for idx in range(pred_labels.size(0)):
self._detection_results_by_class[iou_threshold][
pred_labels[idx].item()
].append(
DetectionResult(
score=pred_scores[idx].item(),
is_true_positive=prediction_is_true_positive[idx].item() != 0,
)
)
def batch_forward(
self,
model_output: Tuple[Tensor, Tensor],
ground_truth_annotations: List[Tuple[Tensor, Tensor]],
):
"""
Decodes the model outputs using non maximum suppression, then stores the
number of ground truth objects per class, true positives, and true negatives
that can be used to calculate the overall mAP in the calculate_map function
:param model_output: the predictions tuple containing [predicted_boxes,
predicted_labels] batch size should match length of ground_truth_annotations
:param ground_truth_annotations: annotations from data loader to compare the
batch results to, should be
"""
if box_iou is None:
raise RuntimeError(
"Unable to import box_iou from torchvision.ops try upgrading your"
" torch and torchvision versions"
)
# run postprocessing / nms
nms_results = self._postprocessing_fn(model_output)
# match nms results to ground truth objects for each batch image and store
for prediction, annotations in zip(nms_results, ground_truth_annotations):
actual_boxes, actual_labels = annotations
self._update_class_counts(actual_labels)
if prediction is None or len(prediction) == 0:
continue
pred_boxes, pred_labels, pred_scores = prediction
if pred_boxes.size(0) == 0:
continue
if actual_boxes.size(0) == 0: # no GTs, all results will be False negative
prediction_is_true_positive = torch.zeros(pred_labels.shape)
for threshold in self._iou_thresholds:
self._update_model_results(
prediction_is_true_positive, pred_labels, pred_scores, threshold
)
continue
# order predictions by scores
pred_ranks = torch.argsort(pred_scores, descending=True)
pred_boxes = | |
136 / 409):
ac_num = 3
for s in range(1, 3):
k = s - ac_num
left = int(ac_center + k * space - 22 / 57 * space - half_w)
if left <= 0:
left = 0
if left >= 136:
left = int(right - ww)
right = int(ac_center + k * space - 22 / 57 * space + half_w)
if right <= 0:
right = int(left + ww)
if right >= 136:
right = 136
globals()['char_' + str(s)] = img[:, left:right]
for s in range(3, 8):
k = s - ac_num
left = int(ac_center + k * space - half_w)
if left <= 0:
left = 0
if left >= 136:
left = int(right - ww)
right = int(ac_center + k * space + half_w)
if right <= 0:
right = int(left + ww)
if right >= 136:
right = 136
globals()['char_' + str(s)] = img[:, left:right]
if (187 * 136 / 409 < ac_center) and (ac_center <= 244 * 136 / 409):
ac_num = 4
for s in range(1, 3):
k = s - ac_num
left = int(ac_center + k * space - 22 / 57 * space - half_w)
if left <= 0:
left = 0
if left >= 136:
left = int(right - ww)
right = int(ac_center + k * space - 22 / 57 * space + half_w)
if right <= 0:
right = int(left + ww)
if right >= 136:
right = 136
globals()['char_' + str(s)] = img[:, left:right]
for s in range(3, 8):
k = s - ac_num
left = int(ac_center + k * space - half_w)
if left <= 0:
left = 0
if left >= 136:
left = right - ww
right = int(ac_center + k * space + half_w)
if right <= 0:
right = int(left + ww)
if right >= 136:
right = 136
left = int(left) # 改过
globals()['char_' + str(s)] = img[:, left:right]
if (244 * 136 / 409 < ac_center) and (ac_center <= 301 * 136 / 409):
ac_num = 5
for s in range(1, 3):
k = s - ac_num
left = int(ac_center + k * space - 22 / 57 * space - half_w)
if left <= 0:
left = 0
if left >= 136:
left = int(right - ww)
right = int(ac_center + k * space - 22 / 57 * space + half_w)
if right <= 0:
right = int(left + ww)
if right >= 136:
right = 136
globals()['char_' + str(s)] = img[:, left:right]
for s in range(3, 8):
k = s - ac_num
left = int(ac_center + k * space - half_w)
if left <= 0:
left = 0
if left >= 136:
left = int(right - ww)
right = int(ac_center + k * space + half_w)
if right <= 0:
right = int(left + ww)
if right >= 136:
right = 136
globals()['char_' + str(s)] = img[:, left:right]
if (301 * 136 / 409 < ac_center) and (ac_center <= 358 * 136 / 409):
ac_num = 6
for s in range(1, 3):
k = s - ac_num
left = int(ac_center + k * space - 22 / 57 * space - half_w)
if left <= 0:
left = 0
if left >= 136:
left = int(right - ww)
right = int(ac_center + k * space - 22 / 57 * space + half_w)
if right <= 0:
right = int(left + ww)
if right >= 136:
right = 136
globals()['char_' + str(s)] = img[:, left:right]
for s in range(3, 8):
k = s - ac_num
left = int(ac_center + k * space - half_w)
if left <= 0:
left = 0
if left >= 136:
left = int(right - ww)
right = int(ac_center + k * space + half_w)
if right <= 0:
right = int(left + ww)
if right >= 136:
right = 136
globals()['char_' + str(s)] = img[:, left:right]
if (358 * 136 / 409 < ac_center) and (ac_center <= 409 * 136 / 409):
ac_num = 7
for s in range(1, 3):
k = s - ac_num
left = int(ac_center + k * space - 22 / 57 * space - half_w)
if left <= 0:
left = 0
if left >= 136:
left = int(right - ww)
left = int(right - ww)
right = int(ac_center + k * space - 22 / 57 * space + half_w)
if right <= 0:
right = int(left + ww)
if right >= 136:
right = 136
globals()['char_' + str(s)] = img[:, left:right]
for s in range(3, 8):
k = s - ac_num
left = int(ac_center + k * space - half_w)
if left <= 0:
left = 0
if left >= 136:
left = int(right - ww)
right = int(ac_center + k * space + half_w)
if right <= 0:
right = int(left + ww)
if right >= 136:
right = 136
globals()['char_' + str(s)] = img[:, left:right]
else:
print("bad plate")
plateflag = 0
# print(f)
# print(char_1.shape)
if plateflag != 0:
return char_1, char_2, char_3, char_4, char_5, char_6, char_7
else:
return 0
def char_recog(self, char2, char3, char4, char5, char6, char7):
# 读取model
# K.clear_session()
model = model_from_json(open('carplatenew.json').read())
model.load_weights('carplatenew.h5')
img_char2 = cv2.resize(char2, (70, 110), interpolation=cv2.INTER_CUBIC)
img_char3 = cv2.resize(char3, (70, 110), interpolation=cv2.INTER_CUBIC)
img_char4 = cv2.resize(char4, (70, 110), interpolation=cv2.INTER_CUBIC)
img_char5 = cv2.resize(char5, (70, 110), interpolation=cv2.INTER_CUBIC)
img_char6 = cv2.resize(char6, (70, 110), interpolation=cv2.INTER_CUBIC)
img_char7 = cv2.resize(char7, (70, 110), interpolation=cv2.INTER_CUBIC)
x2 = img_to_array(img_char2)
x3 = img_to_array(img_char3)
x4 = img_to_array(img_char4)
x5 = img_to_array(img_char5)
x6 = img_to_array(img_char6)
x7 = img_to_array(img_char7)
x2 = np.expand_dims(x2, axis=0)
x3 = np.expand_dims(x3, axis=0)
x4 = np.expand_dims(x4, axis=0)
x5 = np.expand_dims(x5, axis=0)
x6 = np.expand_dims(x6, axis=0)
x7 = np.expand_dims(x7, axis=0)
preds2 = model.predict_classes(x2)
prob2 = model.predict_proba(x2)
preds3 = model.predict_classes(x3)
prob3 = model.predict_proba(x3)
preds4 = model.predict_classes(x4)
prob4 = model.predict_proba(x4)
preds5 = model.predict_classes(x5)
prob5 = model.predict_proba(x5)
preds6 = model.predict_classes(x6)
prob6 = model.predict_proba(x6)
preds7 = model.predict_classes(x7)
prob7 = model.predict_proba(x7)
table = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P',
'Q',
'R',
'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
num2 = int(preds2)
num3 = int(preds3)
num4 = int(preds4)
num5 = int(preds5)
num6 = int(preds6)
num7 = int(preds7)
char_result = []
char_result.append(str(table[num2]))
char_result.append(str(table[num3]))
char_result.append(str(table[num4]))
char_result.append(str(table[num5]))
char_result.append(str(table[num6]))
char_result.append(str(table[num7]))
prob = str([prob2[0][num2], prob3[0][num3], prob4[0][num4], prob5[0][num5], prob6[0][num6], prob7[0][num7]])
return char_result, prob
def ImageProcess(self):
print("进入CONTROLLER")
image_file = self.image_file
self.excelResults = list()
self.excelName = list()
self.model_path = os.getcwd() + '\\keras-yolo3\\model_data\\yolo.h5' # model path or trained weights path
self.anchors_path = os.getcwd() + '\\keras-yolo3\\model_data\\yolo_anchors.txt'
self.classes_path = os.getcwd() + '\\keras-yolo3\\model_data\\coco_classes.txt'
self.score = 0.3
self.iou = 0.45
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.model_image_size = (416, 416) # fixed size or (None, None), hw
self.boxes, self.scores, self.classes = self.generate()
image_data = np.ones((416, 416, 3))
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
ob, osc, oc = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [654, 512],
K.learning_phase(): 0
}) # 深度玄学
print("开始读图")
# for img_name in os.listdir(image_file): 改文件名字
# imgfilepath = os.path.join(image_file, img_name)
# os.rename(imgfilepath,os.path.join(image_file,img_name[1:len(img_name)]))
for img_name in os.listdir(image_file):
imgfilepath = os.path.join(image_file, img_name)
img_frame = cv2.imread(imgfilepath)
self.ori_image = img_frame.copy() # 深拷贝
self.mutex_process.lock()
print(image_file)
self.sg_show.emit(img_frame)
self.mutex_process.unlock()
self.detect_img = img_frame
pil_frame = Image.fromarray(cv2.cvtColor(img_frame, cv2.COLOR_BGR2RGB))
car_position = self.detect_image(pil_frame)
for car_num in range(np.shape(car_position)[0]): # 存储车辆图像
self.mutex_process.lock()
print(image_file)
if car_position[car_num][0] < 0 or car_position[car_num][1] < 0 or car_position[car_num][2] < 0 or \
car_position[car_num][3] < 0:
continue
img_car = img_frame[car_position[car_num][1]:car_position[car_num][3],
car_position[car_num][0]: car_position[car_num][2], :] # y1:y2,x1:x2
car_name = img_name[:len(img_name) - 4] + '_' + str(car_num) + '.jpg' # 第i辆车
cv2.imwrite(self.car_save_path + car_name, img_car) # 定位的车辆
print(car_position)
print(car_position[car_num])
cv2.rectangle(self.detect_img, (car_position[car_num][0], car_position[car_num][1]),
(car_position[car_num][2], car_position[car_num][3]), (0, 255, 0), 2)
if self.visualize:
img_show = self.detect_img[:, :, :]
print('发结果')
else:
img_show = self.ori_image
print('发原图')
self.sg_show.emit(img_show)
self.mutex_process.unlock()
# self.sg_finished.emit(self.image_file)
# exe隐藏cmd窗口
st = subprocess.STARTUPINFO()
st.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW
st.wShowWindow = subprocess.SW_HIDE
# | |
:
if word[1] == "C" or word[1] == "c" :
toGuess = toGuess[:1] + "c" + toGuess[2:]
if word[2] == "C" or word[2] == "c" :
toGuess = toGuess[:2] + "c" + toGuess[3:]
if word[3] == "C" or word[3] == "c" :
toGuess = toGuess[:3] + "c" + toGuess[4:]
if word[1] != "C" and word[1] != "c" and word[2] != "C" and word[2] != "c" and word[3] != "C" and word[3] != "c" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "c" + ", "
if guessChar == "D" or guessChar == "d" :
if word[1] == "D" or word[1] == "d" :
toGuess = toGuess[:1] + "d" + toGuess[2:]
if word[2] == "D" or word[2] == "d" :
toGuess = toGuess[:2] + "d" + toGuess[3:]
if word[3] == "D" or word[3] == "d" :
toGuess = toGuess[:3] + "d" + toGuess[4:]
if word[1] != "D" and word[1] != "d" and word[2] != "D" and word[2] != "d" and word[3] != "D" and word[3] != "d" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "d" + ", "
if guessChar == "E" or guessChar == "e" :
if word[1] == "E" or word[1] == "e" :
toGuess = toGuess[:1] + "e" + toGuess[2:]
if word[2] == "E" or word[2] == "e" :
toGuess = toGuess[:2] + "e" + toGuess[3:]
if word[3] == "E" or word[3] == "e" :
toGuess = toGuess[:3] + "e" + toGuess[4:]
if word[1] != "E" and word[1] != "e" and word[2] != "E" and word[2] != "e" and word[3] != "E" and word[3] != "e" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "e" + ", "
if guessChar == "F" or guessChar == "f" :
if word[1] == "F" or word[1] == "f" :
toGuess = toGuess[:1] + "f" + toGuess[2:]
if word[2] == "F" or word[2] == "f" :
toGuess = toGuess[:2] + "f" + toGuess[3:]
if word[3] == "F" or word[3] == "f" :
toGuess = toGuess[:3] + "f" + toGuess[4:]
if word[1] != "F" and word[1] != "f" and word[2] != "F" and word[2] != "f" and word[3] != "F" and word[3] != "f" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "f" + ", "
if guessChar == "G" or guessChar == "g" :
if word[1] == "G" or word[1] == "g" :
toGuess = toGuess[:1] + "g" + toGuess[2:]
if word[2] == "G" or word[2] == "g" :
toGuess = toGuess[:2] + "g" + toGuess[3:]
if word[3] == "G" or word[3] == "g" :
toGuess = toGuess[:3] + "g" + toGuess[4:]
if word[1] != "G" and word[1] != "g" and word[2] != "G" and word[2] != "g" and word[3] != "G" and word[3] != "g" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "g" + ", "
if guessChar == "H" or guessChar == "h" :
if word[1] == "H" or word[1] == "h" :
toGuess = toGuess[:1] + "h" + toGuess[2:]
if word[2] == "H" or word[2] == "h" :
toGuess = toGuess[:2] + "h" + toGuess[3:]
if word[3] == "H" or word[3] == "h" :
toGuess = toGuess[:3] + "h" + toGuess[4:]
if word[1] != "H" and word[1] != "h" and word[2] != "H" and word[2] != "h" and word[3] != "H" and word[3] != "h" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "h" + ", "
if guessChar == "I" or guessChar == "i" :
if word[1] == "I" or word[1] == "i" :
toGuess = toGuess[:1] + "i" + toGuess[2:]
if word[2] == "I" or word[2] == "i" :
toGuess = toGuess[:2] + "i" + toGuess[3:]
if word[3] == "I" or word[3] == "i" :
toGuess = toGuess[:3] + "i" + toGuess[4:]
if word[1] != "I" and word[1] != "i" and word[2] != "I" and word[2] != "i" and word[3] != "I" and word[3] != "i" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "i" + ", "
if guessChar == "J" or guessChar == "j" :
if word[1] == "J" or word[1] == "j" :
toGuess = toGuess[:1] + "j" + toGuess[2:]
if word[2] == "J" or word[2] == "j" :
toGuess = toGuess[:2] + "j" + toGuess[3:]
if word[3] == "J" or word[3] == "j" :
toGuess = toGuess[:3] + "j" + toGuess[4:]
if word[1] != "J" and word[1] != "j" and word[2] != "J" and word[2] != "j" and word[3] != "J" and word[3] != "j" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "j" + ", "
if guessChar == "K" or guessChar == "k" :
if word[1] == "K" or word[1] == "k" :
toGuess = toGuess[:1] + "k" + toGuess[2:]
if word[2] == "K" or word[2] == "k" :
toGuess = toGuess[:2] + "k" + toGuess[3:]
if word[3] == "K" or word[3] == "k" :
toGuess = toGuess[:3] + "k" + toGuess[4:]
if word[1] != "K" and word[1] != "k" and word[2] != "K" and word[2] != "k" and word[3] != "K" and word[3] != "k" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "k" + ", "
if guessChar == "L" or guessChar == "l" :
if word[1] == "L" or word[1] == "l" :
toGuess = toGuess[:1] + "l" + toGuess[2:]
if word[2] == "L" or word[2] == "l" :
toGuess = toGuess[:2] + "l" + toGuess[3:]
if word[3] == "L" or word[3] == "l" :
toGuess = toGuess[:3] + "l" + toGuess[4:]
if word[1] != "L" and word[1] != "l" and word[2] != "L" and word[2] != "l" and word[3] != "L" and word[3] != "l" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "l" + ", "
if guessChar == "M" or guessChar == "m" :
if word[1] == "M" or word[1] == "m" :
toGuess = toGuess[:1] + "m" + toGuess[2:]
if word[2] == "M" or word[2] == "m" :
toGuess = toGuess[:2] + "m" + toGuess[3:]
if word[3] == "M" or word[3] == "m" :
toGuess = toGuess[:3] + "m" + toGuess[4:]
if word[1] != "M" and word[1] != "m" and word[2] != "M" and word[2] != "m" and word[3] != "M" and word[3] != "m" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "m" + ", "
if guessChar == "N" or guessChar == "n" :
if word[1] == "N" or word[1] == "n" :
toGuess = toGuess[:1] + "n" + toGuess[2:]
if word[2] == "N" or word[2] == "n" :
toGuess = toGuess[:2] + "n" + toGuess[3:]
if word[3] == "N" or word[3] == "n" :
toGuess = toGuess[:3] + "n" + toGuess[4:]
if word[1] != "N" and word[1] != "n" and word[2] != "N" and word[2] != "n" and word[3] != "N" and word[3] != "n" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "n" + ", "
if guessChar == "O" or guessChar == "o" :
if word[1] == "O" or word[1] == "o" :
toGuess = toGuess[:1] + "o" + toGuess[2:]
if word[2] == "O" or word[2] == "o" :
toGuess = toGuess[:2] + "o" + toGuess[3:]
if word[3] == "O" or word[3] == "o" :
toGuess = toGuess[:3] + "o" + toGuess[4:]
if word[1] != "O" and word[1] != "o" and word[2] != "O" and word[2] != "o" and word[3] != "O" and word[3] != "o" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "o" + ", "
if guessChar == "P" or guessChar == "p" :
if word[1] == "P" or word[1] == "p" :
toGuess = toGuess[:1] + "p" + toGuess[2:]
if word[2] == "P" or word[2] == "p" :
toGuess = toGuess[:2] + "p" + toGuess[3:]
if word[3] == "P" or word[3] == "p" :
toGuess = toGuess[:3] + "p" + toGuess[4:]
if word[1] != "P" and word[1] != "p" and word[2] != "P" and word[2] != "p" and word[3] != "P" and word[3] != "p" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "p" + ", "
if guessChar == "Q" or guessChar == "q" :
if word[1] == "Q" or word[1] == "q" :
toGuess = toGuess[:1] + "q" + toGuess[2:]
if word[2] == "Q" or word[2] == "q" :
toGuess = toGuess[:2] + "q" + toGuess[3:]
if word[3] == "Q" or word[3] == "q" :
toGuess = toGuess[:3] + "q" + toGuess[4:]
if word[1] != "Q" and word[1] != "q" and word[2] != "Q" and word[2] != "q" and word[3] != "Q" and word[3] != "q" :
print("\nWrong!\n")
numberOfErrors = numberOfErrors + 1
wrongChars = wrongChars + "q" + ", "
if guessChar == "R" or guessChar == "r" :
if word[1] == "R" or | |
False otherwise.
kwargs will be passed onto the creation of the dataset. Please pass chunking, compression, dtype, and other
arguments this way
Returns
-------
h5_main : USIDataset
Reference to the main dataset
"""
def __check_anc_before_creation(aux_prefix, dim_type='pos'):
aux_prefix = validate_single_string_arg(aux_prefix, 'aux_' + dim_type + '_prefix')
if not aux_prefix.endswith('_'):
aux_prefix += '_'
if '-' in aux_prefix:
warn('aux_' + dim_type + ' should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(aux_prefix, aux_prefix.replace('-', '_')))
aux_prefix = aux_prefix.replace('-', '_')
for dset_name in [aux_prefix + 'Indices', aux_prefix + 'Values']:
if dset_name in h5_parent_group.keys():
# TODO: What if the contained data was correct?
raise KeyError('Dataset named: ' + dset_name + ' already exists in group: '
'{}. Consider passing these datasets using kwargs (if they are correct) instead of providing the pos_dims and spec_dims arguments'.format(h5_parent_group.name))
return aux_prefix
def __ensure_anc_in_correct_file(h5_inds, h5_vals, prefix):
if h5_inds.file != h5_vals.file:
raise ValueError('Provided ' + prefix + ' datasets are present in different HDF5 files!')
if h5_inds.file != h5_parent_group.file:
# Need to copy over the anc datasets to the new group
if verbose:
print('Need to copy over ancillary datasets: {} and {} to '
'destination group: {} which is in a different HDF5 '
'file'.format(h5_inds, h5_vals, h5_parent_group))
ret_vals = [copy_dataset(x, h5_parent_group, verbose=verbose) for x in [h5_inds, h5_vals]]
else:
ret_vals = [h5_inds, h5_vals]
return tuple(ret_vals)
if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):
raise TypeError('h5_parent_group should be a h5py.File or h5py.Group object')
if not is_editable_h5(h5_parent_group):
raise ValueError('The provided file is not editable')
if verbose:
print('h5 group and file OK')
quantity, units, main_data_name = validate_string_args([quantity, units, main_data_name],
['quantity', 'units', 'main_data_name'])
if verbose:
print('quantity, units, main_data_name all OK')
quantity = quantity.strip()
units = units.strip()
main_data_name = main_data_name.strip()
if '-' in main_data_name:
warn('main_data_name should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(main_data_name, main_data_name.replace('-', '_')))
main_data_name = main_data_name.replace('-', '_')
if isinstance(main_data, (list, tuple)):
if not contains_integers(main_data, min_val=1):
raise ValueError('main_data if specified as a shape should be a list / tuple of integers >= 1')
if len(main_data) != 2:
raise ValueError('main_data if specified as a shape should contain 2 numbers')
if 'dtype' not in kwargs:
raise ValueError('dtype must be included as a kwarg when creating an empty dataset')
_ = validate_dtype(kwargs.get('dtype'))
main_shape = main_data
if verbose:
print('Selected empty dataset creation. OK so far')
elif isinstance(main_data, (np.ndarray, da.core.Array)):
if main_data.ndim != 2:
raise ValueError('main_data should be a 2D array')
main_shape = main_data.shape
if verbose:
print('Provided numpy or Dask array for main_data OK so far')
else:
raise TypeError('main_data should either be a numpy array or a tuple / list with the shape of the data')
if h5_pos_inds is not None and h5_pos_vals is not None:
# The provided datasets override fresh building instructions.
validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals, main_shape, is_spectroscopic=False)
if verbose:
print('The shapes of the provided h5 position indices and values are OK')
h5_pos_inds, h5_pos_vals = __ensure_anc_in_correct_file(h5_pos_inds, h5_pos_vals, 'Position')
else:
aux_pos_prefix = __check_anc_before_creation(aux_pos_prefix, dim_type='pos')
pos_dims = validate_dimensions(pos_dims, dim_type='Position')
validate_dims_against_main(main_shape, pos_dims, is_spectroscopic=False)
if verbose:
print('Passed all pre-tests for creating position datasets')
h5_pos_inds, h5_pos_vals = write_ind_val_dsets(h5_parent_group, pos_dims, is_spectral=False, verbose=verbose,
slow_to_fast=slow_to_fast, base_name=aux_pos_prefix)
if verbose:
print('Created position datasets!')
if h5_spec_inds is not None and h5_spec_vals is not None:
# The provided datasets override fresh building instructions.
validate_anc_h5_dsets(h5_spec_inds, h5_spec_vals, main_shape, is_spectroscopic=True)
if verbose:
print('The shapes of the provided h5 position indices and values '
'are OK')
h5_spec_inds, h5_spec_vals = __ensure_anc_in_correct_file(h5_spec_inds, h5_spec_vals,
'Spectroscopic')
else:
aux_spec_prefix = __check_anc_before_creation(aux_spec_prefix, dim_type='spec')
spec_dims = validate_dimensions(spec_dims, dim_type='Spectroscopic')
validate_dims_against_main(main_shape, spec_dims, is_spectroscopic=True)
if verbose:
print('Passed all pre-tests for creating spectroscopic datasets')
h5_spec_inds, h5_spec_vals = write_ind_val_dsets(h5_parent_group, spec_dims, is_spectral=True, verbose=verbose,
slow_to_fast=slow_to_fast, base_name=aux_spec_prefix)
if verbose:
print('Created Spectroscopic datasets')
if h5_parent_group.file.driver == 'mpio':
if kwargs.pop('compression', None) is not None:
warn('This HDF5 file has been opened wth the "mpio" communicator. '
'mpi4py does not allow creation of compressed datasets. Compression kwarg has been removed')
if isinstance(main_data, np.ndarray):
# Case 1 - simple small dataset
h5_main = h5_parent_group.create_dataset(main_data_name, data=main_data, **kwargs)
if verbose:
print('Created main dataset with provided data')
elif isinstance(main_data, da.core.Array):
# Case 2 - Dask dataset
# step 0 - get rid of any automated dtype specification:
_ = kwargs.pop('dtype', None)
# step 1 - create the empty dataset:
h5_main = h5_parent_group.create_dataset(main_data_name, shape=main_data.shape, dtype=main_data.dtype,
**kwargs)
if verbose:
print('Created empty dataset: {} for writing Dask dataset: {}'.format(h5_main, main_data))
print('Dask array will be written to HDF5 dataset: "{}" in file: "{}"'.format(h5_main.name,
h5_main.file.filename))
# Step 2 - now ask Dask to dump data to disk
da.to_hdf5(h5_main.file.filename, {h5_main.name: main_data})
# main_data.to_hdf5(h5_main.file.filename, h5_main.name) # Does not work with python 2 for some reason
else:
# Case 3 - large empty dataset
h5_main = h5_parent_group.create_dataset(main_data_name, main_data, **kwargs)
if verbose:
print('Created empty dataset for Main')
write_simple_attrs(h5_main, {'quantity': quantity, 'units': units})
if verbose:
print('Wrote quantity and units attributes to main dataset')
if isinstance(main_dset_attrs, dict):
write_simple_attrs(h5_main, main_dset_attrs)
if verbose:
print('Wrote provided attributes to main dataset')
write_book_keeping_attrs(h5_main)
# make it main
link_as_main(h5_main, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)
if verbose:
print('Successfully linked datasets - dataset should be main now')
from ..usi_data import USIDataset
return USIDataset(h5_main)
def map_grid_to_cartesian(h5_main, grid_shape, mode='histogram', **kwargs):
"""
Map an incomplete measurement, such as a spiral scan, to a cartesian grid.
Parameters
----------
h5_main : :class:`pyUSID.USIDataset`
Dataset containing the sparse measurement
grid_shape : int or [int, int]
Shape of the output :class:`numpy.ndarray`.
mode : str, optional. Default = 'histogram'
Method used for building a cartesian grid.
Available methods = 'histogram', 'linear', 'nearest', 'cubic'
Use kwargs to pass onto each of the techniques
Note
----
UNDER DEVELOPMENT!
Currently only valid for 2 position dimensions
@author: <NAME>
Returns
-------
:class:`numpy.ndarray` but could be a h5py.Dataset or dask.array.core.Array object
"""
try:
from scipy.interpolate import griddata
except ImportError as expn:
griddata = None
warn('map_grid_to_cartesian() requires scipy')
raise expn
from ..usi_data import USIDataset
if not isinstance(h5_main, USIDataset):
raise TypeError('Provided object is not a pyUSID.USIDataset object')
if mode not in ['histogram', 'linear', 'nearest', 'cubic']:
raise ValueError('mode must be a string among["histogram", "cubic"]')
ds_main = h5_main[()].squeeze()
ds_pos_vals = h5_main.h5_pos_vals[()]
if ds_pos_vals.shape[1] != 2:
raise TypeError("Only working for 2 position dimensions.")
# Transform to row, col image format
rotation = np.array([[0, 1], [-1, 0]])
ds_pos_vals = np.dot(ds_pos_vals, rotation)
try:
grid_n = len(grid_shape)
except TypeError:
grid_n = 1
if grid_n != 1 and grid_n != 2:
raise ValueError("grid_shape must be of type int or [int, int].")
if grid_n == 1:
grid_shape = 2 * [grid_shape]
def interpolate(points, values, grid_shape, method):
grid_shape = list(map((1j).__mul__, grid_shape))
grid_x, grid_y = np.mgrid[
np.amin(points[:, 0]):np.amax(points[:, 0]):grid_shape[0],
np.amin(points[:, 1]):np.amax(points[:, 1]):grid_shape[1]
]
ndim_data = griddata(points, values, (grid_x, grid_y), method=method)
return ndim_data
if mode == "histogram":
histogram_weighted, _, _ = np.histogram2d(*ds_pos_vals.T, bins=grid_shape, weights=ds_main)
histogram, _, _ = np.histogram2d(*ds_pos_vals.T, bins=grid_shape)
cart_data = np.divide(histogram_weighted, histogram)
else:
cart_data = interpolate(ds_pos_vals, ds_main, grid_shape, method=mode)
return cart_data
def write_sidpy_dataset(si_dset, h5_parent_group, verbose=False,
**kwargs):
"""
Writes a sidpy.Dataset as a USID dataset in the provided HDF5 Group.
Please see notes about dimension types
Parameters
----------
si_dset: sidpy.Dataset
Dataset to be written to HDF5 in NSID format
h5_parent_group : class:`h5py.Group`
Parent group under which the datasets will be created
verbose : bool, Optional. Default = False
Whether or not to write logs to standard out
kwargs: dict
additional keyword arguments passed on to h5py when writing data
Returns
------
h5_main : USIDataset
Reference to the main dataset
Notes
-----
USID only has two dimension types - Position and Spectroscopic.
Consider changing the types of dimensions of all other dimensions to either
"SPATIAL" or "SPECTRAL".
"""
if not isinstance(si_dset, sid.Dataset):
raise TypeError('Data to write is not a sidpy dataset')
if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):
raise TypeError('h5_parent_group is not a h5py.File or '
'h5py.Group object')
spatial_dims, spectral_dims, spatial_size, spectral_size = [], [], 1, 1
for dim_ind, dime in si_dset._axes.items():
if dime._dimension_type == sid.DimensionType.SPATIAL:
spatial_dims.append(Dimension(dime._name,
dime._units,
dime.values,
dime._quantity,
dime._dimension_type))
spatial_size *= np.size(dime.values)
else:
if not dime._dimension_type == sid.DimensionType.SPECTRAL:
warn('Will consider dimension: {} of type: {} as a '
'spectroscopic dimension'.format(dime._name,
dime._dimension_type))
spectral_dims.append(Dimension(dime._name,
dime._units,
dime.values,
dime._quantity,
dime._dimension_type))
spectral_size *= np.size(dime.values)
main_dataset = da.reshape(si_dset, [spatial_size, spectral_size])
# TODO : Consider writing this out as a | |
import copy
import glob
import hashlib
import logging
import os
import shutil
from subprocess import CalledProcessError, DEVNULL, check_output # skipcq:BAN-B404
import tempfile
import typing
from pathlib import Path
from typing import Any, Text, Tuple, Union, Optional, List, Dict, NamedTuple
from packaging import version
from rasa.constants import MINIMUM_COMPATIBLE_VERSION
import rasa.shared.utils.io
import rasa.utils.io
from rasa.cli.utils import create_output_path
from rasa.shared.utils.cli import print_success
from rasa.shared.constants import (
CONFIG_KEYS_CORE,
CONFIG_KEYS_NLU,
CONFIG_KEYS,
DEFAULT_DOMAIN_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_CORE_SUBDIRECTORY_NAME,
DEFAULT_NLU_SUBDIRECTORY_NAME,
)
from rasa.exceptions import ModelNotFound
from rasa.utils.common import TempDirectoryPath
if typing.TYPE_CHECKING:
from rasa.shared.importers.importer import TrainingDataImporter
logger = logging.getLogger(__name__)
# Type alias for the fingerprint
Fingerprint = Dict[Text, Union[Text, List[Text], int, float]]
FINGERPRINT_FILE_PATH = "fingerprint.json"
FINGERPRINT_CONFIG_KEY = "config"
FINGERPRINT_CONFIG_CORE_KEY = "core-config"
FINGERPRINT_CONFIG_NLU_KEY = "nlu-config"
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY = "config-without-epochs"
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY = "domain"
FINGERPRINT_NLG_KEY = "nlg"
FINGERPRINT_RASA_VERSION_KEY = "version"
FINGERPRINT_STORIES_KEY = "stories"
FINGERPRINT_NLU_DATA_KEY = "messages"
FINGERPRINT_NLU_LABELS_KEY = "nlu_labels"
FINGERPRINT_PROJECT = "project"
FINGERPRINT_TRAINED_AT_KEY = "trained_at"
class Section(NamedTuple):
"""Specifies which fingerprint keys decide whether this sub-model is retrained."""
name: Text
relevant_keys: List[Text]
SECTION_CORE = Section(
name="Core model",
relevant_keys=[
FINGERPRINT_CONFIG_KEY,
FINGERPRINT_CONFIG_CORE_KEY,
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY,
FINGERPRINT_STORIES_KEY,
FINGERPRINT_RASA_VERSION_KEY,
],
)
SECTION_NLU = Section(
name="NLU model",
relevant_keys=[
FINGERPRINT_CONFIG_KEY,
FINGERPRINT_CONFIG_NLU_KEY,
FINGERPRINT_NLU_DATA_KEY,
FINGERPRINT_RASA_VERSION_KEY,
],
)
SECTION_NLG = Section(name="NLG responses", relevant_keys=[FINGERPRINT_NLG_KEY])
class FingerprintComparisonResult:
"""Container for the results of a fingerprint comparison."""
def __init__(
self,
nlu: bool = True,
core: bool = True,
nlg: bool = True,
force_training: bool = False,
):
"""Creates a `FingerprintComparisonResult` instance.
Args:
nlu: `True` if the NLU model should be retrained.
core: `True` if the Core model should be retrained.
nlg: `True` if the responses in the domain should be updated.
force_training: `True` if a training of all parts is forced.
"""
self.nlu = nlu
self.core = core
self.nlg = nlg
self.force_training = force_training
def is_training_required(self) -> bool:
"""Check if anything has to be retrained."""
return any([self.nlg, self.nlu, self.core, self.force_training])
def should_retrain_core(self) -> bool:
"""Check if the Core model has to be updated."""
return self.force_training or self.core
def should_retrain_nlg(self) -> bool:
"""Check if the responses have to be updated."""
return self.should_retrain_core() or self.nlg
def should_retrain_nlu(self) -> bool:
"""Check if the NLU model has to be updated."""
return self.force_training or self.nlu
def get_model(model_path: Text = DEFAULT_MODELS_PATH) -> TempDirectoryPath:
"""Get a model and unpack it. Raises a `ModelNotFound` exception if
no model could be found at the provided path.
Args:
model_path: Path to the zipped model. If it's a directory, the latest
trained model is returned.
Returns:
Path to the unpacked model.
"""
if not model_path:
raise ModelNotFound("No path specified.")
elif not os.path.exists(model_path):
raise ModelNotFound(f"No file or directory at '{model_path}'.")
if os.path.isdir(model_path):
model_path = get_latest_model(model_path)
if not model_path:
raise ModelNotFound(
f"Could not find any Rasa model files in '{model_path}'."
)
elif not model_path.endswith(".tar.gz"):
raise ModelNotFound(f"Path '{model_path}' does not point to a Rasa model file.")
try:
model_relative_path = os.path.relpath(model_path)
except ValueError:
model_relative_path = model_path
logger.info(f"Loading model {model_relative_path}...")
return unpack_model(model_path)
def get_latest_model(model_path: Text = DEFAULT_MODELS_PATH) -> Optional[Text]:
"""Get the latest model from a path.
Args:
model_path: Path to a directory containing zipped models.
Returns:
Path to latest model in the given directory.
"""
if not os.path.exists(model_path) or os.path.isfile(model_path):
model_path = os.path.dirname(model_path)
list_of_files = glob.glob(os.path.join(model_path, "*.tar.gz"))
if len(list_of_files) == 0:
return None
return max(list_of_files, key=os.path.getctime)
def unpack_model(
model_file: Text, working_directory: Optional[Union[Path, Text]] = None
) -> TempDirectoryPath:
"""Unpack a zipped Rasa model.
Args:
model_file: Path to zipped model.
working_directory: Location where the model should be unpacked to.
If `None` a temporary directory will be created.
Returns:
Path to unpacked Rasa model.
"""
import tarfile
if working_directory is None:
working_directory = tempfile.mkdtemp()
# All files are in a subdirectory.
try:
with tarfile.open(model_file, mode="r:gz") as tar:
tar.extractall(working_directory)
logger.debug(f"Extracted model to '{working_directory}'.")
except Exception as e:
logger.error(f"Failed to extract model at {model_file}. Error: {e}")
raise
return TempDirectoryPath(working_directory)
def get_model_subdirectories(
unpacked_model_path: Text,
) -> Tuple[Optional[Text], Optional[Text]]:
"""Return paths for Core and NLU model directories, if they exist.
If neither directories exist, a `ModelNotFound` exception is raised.
Args:
unpacked_model_path: Path to unpacked Rasa model.
Returns:
Tuple (path to Core subdirectory if it exists or `None` otherwise,
path to NLU subdirectory if it exists or `None` otherwise).
"""
core_path = os.path.join(unpacked_model_path, DEFAULT_CORE_SUBDIRECTORY_NAME)
nlu_path = os.path.join(unpacked_model_path, DEFAULT_NLU_SUBDIRECTORY_NAME)
if not os.path.isdir(core_path):
core_path = None
if not os.path.isdir(nlu_path):
nlu_path = None
if not core_path and not nlu_path:
raise ModelNotFound(
"No NLU or Core data for unpacked model at: '{}'.".format(
unpacked_model_path
)
)
return core_path, nlu_path
def create_package_rasa(
training_directory: Text,
output_filename: Text,
fingerprint: Optional[Fingerprint] = None,
) -> Text:
"""Create a zipped Rasa model from trained model files.
Args:
training_directory: Path to the directory which contains the trained
model files.
output_filename: Name of the zipped model file to be created.
fingerprint: A unique fingerprint to identify the model version.
Returns:
Path to zipped model.
"""
import tarfile
if fingerprint:
persist_fingerprint(training_directory, fingerprint)
output_directory = os.path.dirname(output_filename)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with tarfile.open(output_filename, "w:gz") as tar:
for elem in os.scandir(training_directory):
tar.add(elem.path, arcname=elem.name)
shutil.rmtree(training_directory)
return output_filename
def project_fingerprint() -> Optional[Text]:
"""Create a hash for the project in the current working directory.
Returns:
project hash
"""
try:
remote = check_output( # skipcq:BAN-B607,BAN-B603
["git", "remote", "get-url", "origin"], stderr=DEVNULL
)
return hashlib.sha256(remote).hexdigest()
except (CalledProcessError, OSError):
return None
async def model_fingerprint(file_importer: "TrainingDataImporter") -> Fingerprint:
"""Create a model fingerprint from its used configuration and training data.
Args:
file_importer: File importer which provides the training data and model config.
Returns:
The fingerprint.
"""
import time
config = await file_importer.get_config()
domain = await file_importer.get_domain()
stories = await file_importer.get_stories()
nlu_data = await file_importer.get_nlu_data()
responses = domain.responses
# Do a copy of the domain to not change the actual domain (shallow is enough)
domain = copy.copy(domain)
# don't include the response texts in the fingerprint.
# Their fingerprint is separate.
domain.responses = {}
return {
FINGERPRINT_CONFIG_KEY: _get_fingerprint_of_config(
config, exclude_keys=CONFIG_KEYS
),
FINGERPRINT_CONFIG_CORE_KEY: _get_fingerprint_of_config(
config, include_keys=CONFIG_KEYS_CORE
),
FINGERPRINT_CONFIG_NLU_KEY: _get_fingerprint_of_config(
config, include_keys=CONFIG_KEYS_NLU
),
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY: _get_fingerprint_of_config_without_epochs(
config
),
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY: domain.fingerprint(),
FINGERPRINT_NLG_KEY: rasa.shared.utils.io.deep_container_fingerprint(responses),
FINGERPRINT_PROJECT: project_fingerprint(),
FINGERPRINT_NLU_DATA_KEY: nlu_data.fingerprint(),
FINGERPRINT_NLU_LABELS_KEY: nlu_data.label_fingerprint(),
FINGERPRINT_STORIES_KEY: stories.fingerprint(),
FINGERPRINT_TRAINED_AT_KEY: time.time(),
FINGERPRINT_RASA_VERSION_KEY: rasa.__version__,
}
def _get_fingerprint_of_config(
config: Optional[Dict[Text, Any]],
include_keys: Optional[List[Text]] = None,
exclude_keys: Optional[List[Text]] = None,
) -> Text:
if not config:
return ""
keys = include_keys or list(filter(lambda k: k not in exclude_keys, config.keys()))
sub_config = {k: config[k] for k in keys if k in config}
return rasa.shared.utils.io.deep_container_fingerprint(sub_config)
def _get_fingerprint_of_config_without_epochs(
config: Optional[Dict[Text, Any]],
) -> Text:
if not config:
return ""
copied_config = copy.deepcopy(config)
for key in ["pipeline", "policies"]:
if copied_config.get(key):
for p in copied_config[key]:
if "epochs" in p:
del p["epochs"]
return rasa.shared.utils.io.deep_container_fingerprint(copied_config)
def fingerprint_from_path(model_path: Text) -> Fingerprint:
"""Load a persisted fingerprint.
Args:
model_path: Path to directory containing the fingerprint.
Returns:
The fingerprint or an empty dict if no fingerprint was found.
"""
if not model_path or not os.path.exists(model_path):
return {}
fingerprint_path = os.path.join(model_path, FINGERPRINT_FILE_PATH)
if os.path.isfile(fingerprint_path):
return rasa.shared.utils.io.read_json_file(fingerprint_path)
else:
return {}
def persist_fingerprint(output_path: Text, fingerprint: Fingerprint):
"""Persist a model fingerprint.
Args:
output_path: Directory in which the fingerprint should be saved.
fingerprint: The fingerprint to be persisted.
"""
path = os.path.join(output_path, FINGERPRINT_FILE_PATH)
rasa.shared.utils.io.dump_obj_as_json_to_file(path, fingerprint)
def did_section_fingerprint_change(
fingerprint1: Fingerprint, fingerprint2: Fingerprint, section: Section
) -> bool:
"""Check whether the fingerprint of a section has changed."""
for k in section.relevant_keys:
if fingerprint1.get(k) != fingerprint2.get(k):
logger.info(f"Data ({k}) for {section.name} section changed.")
return True
return False
def move_model(source: Text, target: Text) -> bool:
"""Move two model directories.
Args:
source: The original folder which should be merged in another.
target: The destination folder where it should be moved to.
Returns:
`True` if the merge was successful, else `False`.
"""
try:
shutil.move(source, target)
return True
except Exception as e:
logging.debug(f"Could not merge model: {e}")
return False
def should_retrain(
new_fingerprint: Fingerprint,
old_model: Text,
train_path: Text,
has_e2e_examples: bool = False,
force_training: bool = False,
) -> FingerprintComparisonResult:
"""Check which components of a model should be retrained.
Args:
new_fingerprint: The fingerprint of the new model to be trained.
old_model: Path to the old zipped model file.
train_path: Path to the directory in which the new model will be trained.
has_e2e_examples: Whether the new training data contains e2e examples.
force_training: Indicates if the model needs to be retrained even if the data
has not changed.
Returns:
A FingerprintComparisonResult object indicating whether Rasa Core and/or Rasa
NLU needs to be retrained or not.
"""
fingerprint_comparison = FingerprintComparisonResult()
if old_model is None or not os.path.exists(old_model):
return fingerprint_comparison
with unpack_model(old_model) as unpacked:
last_fingerprint = fingerprint_from_path(unpacked)
old_core, old_nlu = get_model_subdirectories(unpacked)
fingerprint_comparison = FingerprintComparisonResult(
core=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_CORE
),
nlu=did_section_fingerprint_change(
| |
all the boundary edges
#regardless because -- very tricky -- it won't matter. The
#correct boundary value will be used based on how hdg and
#advect are implemented.
for j in range(self.levels):
#Add the interior vertical faces on the j'th level
if len(shp) == 3: #Scalar field
#Interior
tmp = field2D_ed[0][:, :, sol2D.ids_interior_ed[0]]
field3D_ed[i][:, :, (j + segs[0]):segs[1]:self.levels] \
+= mul * tmp[self.copies[0], :, :]
tmp = field2D_ed[0][:, :, sol2D.ids_exterior_ed[0]]
#Exterior
field3D_ed[i][:, :, \
(j + segs[2]):segs[3]:self.levels] \
+= mul * tmp[self.copies[0], :, :]
elif len(shp) == 4: #vector field
#Interior
tmp = field2D_ed[0][:, :, :, sol2D.ids_interior_ed[0]]
field3D_ed[i][:, :shp[1], :, (j + segs[0]):segs[1]:self.levels] \
+= mul * tmp[self.copies[0], :shp[1], :, :]
#Exterior
tmp = field2D_ed[0][:, :, :, sol2D.ids_exterior_ed[0]]
field3D_ed[i][:, :shp[1], :, (j + segs[2]):segs[3]:self.levels] \
+= mul * tmp[self.copies[0], :shp[1], :, :]
#CM Now add the horizontal faces
segs_h = self.segs_h
mv = self.mv
for i in range(len(field2D)):
i_3d = (sol2D.u_elm_type[i] == self.u_ed_type).nonzero()[0][0]
#Add the horizontal faces on the interior
for j in range(self.levels - 1):
if len(shp) == 3:
field3D_ed[i_3d][:, :, \
(segs_h[i_3d][0]+j):segs_h[i_3d][1]:(self.levels-1)] \
+= mul * field2D[i]
elif len(shp) == 4:
field3D_ed[i_3d][:, :shp[1], :,\
(segs_h[i_3d][0]+j):segs_h[i_3d][1]:(self.levels - 1)] \
+= mul * field2D[i][:, :shp[1], :, :]
#Add the horizontal faces on the exterior
#CM In other words, surface and bottom faces
if len(shp) == 3:
field3D_ed[i_3d][:, :, segs_h[i_3d][2]:segs_h[i_3d][3]:2] += \
mul * field2D[i]
field3D_ed[i_3d][:, :, (segs_h[i_3d][2] + 1):segs_h[i_3d][3]:2] += \
mul * field2D[i][mv[i], :, :]
elif len(shp) == 4:
# CM First add the surface (top) face values
field3D_ed[i_3d][:, :shp[1], :, segs_h[i_3d][2]:segs_h[i_3d][3]:2] \
+= mul * field2D[i][:, :shp[1], :, :]
#CM Then add the bottom face values
#CM BUG HERE??
# print "CM: Inside Add2Dto3D_ed.__call__():"
# print "Checking bottom edge values"
# beids2 = [27, 28, 35, 36]
# beids3 = [1655, 1657, 1671, 1673]
# print "field3D_ed = ", field3D_ed[i_3d][:, 1, 0, beids3]
# print "field2D = ", field2D[i][:, 1, 0, beids2]
field3D_ed[i_3d][:, :shp[1], :, (segs_h[i_3d][2] + 1):segs_h[i_3d][3]:2] \
+= mul * field2D[i][mv[i], :shp[1], :, :]
#CM Original code was:
#field3D_ed[i_3d][mv[i], :shp[1], :, (segs_h[i_3d][2] + 1):segs_h[i_3d][3]:2] \
# += mul * field2D[i][:, :shp[1], :, :]
return field3D_ed
def get_bcs_from_field_ed(sol, field_ed, bcids=None, field_ed_bcs=None, \
negbcids=None):
"""get_bcs_from_field_ed(sol, field_ed, bcids=None, field_ed_bcs=None)
Starting with all the edges in a field, return only the boundary
conditions, formatted in the approprate data-structured to be used by
common solver functions.
@param sol (\c object) The solution data-structure
@param field_ed (\c float) List of numpy arrays of the edged values
@param bcids (\c list) [optional] List of ids which should be obtained
from field_ed. ids not included in the list will return a value of
'0' instead.
@param field_ed_bcs (\c float) [optional] An already-formatted boundary
condition output data structured. This is a list of numpy arrays.
The data in this array will be overwritten for the id's specified.
@param negbcids (\c list) List of boundary ids for which the boundary
condition values should be negated. This happens, for example, for
Neumann bcs at the bottom of the ocean where nz = -1
@retval field_ed_bcs (\c float) Boundary conditions obtained from field_ed,
and formatted in the approprate data-structured to be used by
common solver functions.
"""
if bcids == None:
if len(field_ed[0].shape) == 3:
field_ed_bcs = [fe[:, :, ids] \
for fe, ids in zip(field_ed, sol.ids_exterior_ed)]
elif len(field_ed[0].shape) == 4:
field_ed_bcs = [fe[:, :, :, ids] \
for fe, ids in zip(field_ed, sol.ids_exterior_ed)]
else:
shp = [sp.array(fld.shape) for fld in field_ed]
if field_ed_bcs == None: #Initialize the outputs
for s, ids in zip(shp, sol.ids_exterior_ed):
s[-1] = ids.sum()
field_ed_bcs = [sp.zeros(s) for s in shp]
for i in range(len(shp)):
ids_ex = sol.ids_exterior_ed[i]
for bcid in bcids:
ids2 = sp.zeros_like(ids_ex)
ids1 = sol.bcid[i] == bcid
ids2[ids_ex] = ids1
if len(shp[0]) == 3:
field_ed_bcs[i][:, :, ids1] = field_ed[i][:, :, ids2]
elif len(shp[0]) == 4:
field_ed_bcs[i][:, :, :, ids1] = field_ed[i][:, :, :, ids2]
if negbcids != None:
shp = [sp.array(fld.shape) for fld in field_ed]
for i in range(len(shp)):
ids_ex = sol.ids_exterior_ed[i]
for bcid in negbcids:
ids1 = sol.bcid[i] == bcid
field_ed_bcs[i][:, :, ids1] = -field_ed_bcs[i][:, :, ids1]
return field_ed_bcs
def get_bcs_from_field(sol, field, bcids=None, field_ed_bcs=None, negbcids=None):
"""get_bcs_from_field(sol, field, bcids=None, field_ed_bcs=None)
Starting with all the volume elements in a field, return only the boundary
conditions, formatted in the approprate data-structured to be used by
common solver functions.
@param sol (\c object) The solution data-structure
@param field (\c float) List of numpy arrays of the volume values
@param bcids (\c list) [optional] List of ids which should be obtained
from field_ed. ids not included in the list will return a value of
'0' instead.
@param field_ed_bcs (\c float) [optional] An already-formatted boundary
condition output data structured. This is a list of numpy arrays.
The data in this array will be overwritten for the id's specified.
@param negbcids (\c list) List of boundary ids for which the boundary
condition values should be negated. This happens, for example, for
Neumann bcs at the bottom of the ocean where nz = -1
@retval field_ed_bcs (\c float) Boundary conditions obtained from field_ed,
and formatted in the approprate data-structured to be used by
common solver functions.
@see get_bcs_from_field_ed
"""
#Grab the edge values on the left only:
field_ed = [fed[0] for fed in sol.get_elm2ed_array(field)]
return get_bcs_from_field_ed(sol, field_ed, bcids, field_ed_bcs, negbcids)
def master_grad_elm(sol, field):
"""This function takes the weak gradient of the provided tracer field in the
master element, and returns those. This is primarily a helper function for
the grad and div functions. In other words, this takes the operation
\f$(\mathbf q, \theta) = -(u, \nabla \cdot \theta)\f$
(so, only the volume terms)
@param sol (\c object) The solution data-structure
@param field (\c float) List of numpy arrays of the field on the element
(that is \f$u\f$). Note, this function only deals with tracer fields,
that is len(field[i].shape) = 3.
@retval field_grad (\c float) List of numpy arrays of the gradient of the
field. Note, this function returns the gradient as:
field_grad[i].shape = (nb, dim, n_fld, n_elm)
@note The derivatives are taken without quadrature, that is, this is a
quadrature-free implementation.
"""
# print 'FIELD DATA:'
# print field[0].shape
# print field[1].shape
#===================================#
# setup
#===================================#
#First, build the operators we need
dim = sol.dim
n_fld = field[0].shape[1]
##Element Derivative matrices (weak form)
Ds = sol.Ds
#===================================#
# computation
#===================================#
#initialize outputs
grad = [sp.zeros((fld.shape[0], dim, n_fld, fld.shape[2])) for fld in field]
### DO THE Master INTEGRATIONS ###
for i in range(len(sol.n_elm_type)):
shapv = field[i].shape
for j in range(dim):
#The next line should be dominatingly computationally expensive:
#Calculate the derivatives in the master element:
grad[i][:, j, :, :] = (\
sp.dot(Ds[i][j], field[i]\
.reshape(shapv[0], n_fld * shapv[2], order='F'))\
).reshape(shapv[0], n_fld, shapv[2], order='F')
return grad
def mk_ed_flux(sol, field, field_ed, grdim=None, gradflux=None):
"""Make the edge fluxes in x-y-z space for the gradient and divergence
operatos.
@param sol (\c object) The solution data-structure
@param field (\c float) List of numpy arrays of the field on the element
(that is \f$u\f$).
@param field_ed (\c float) List of numpy arrays of the field on the edges
of the elements (that is \f$\hat u\f$).
@param grdim (\c list) This is an optional list of integers that contain the
dimensions for which to take the gradient. That is, if grdim = [0, 2],
only the x and z gradients will be returned. By default, all gradients
are returned.
@param gradflux (\c bool) Flag to choose between making fluxes for the
gradient: F_ed[i][:, j, :, :] = \f$ \phi \hat n_{x_j} J_ed \f$
or divergence: F_ed[i][:, :, :] = \f$\sum_j \phi_j \hat n_j J_ed\f$
If not specified, gradient fluxes will be used for tracers and
divergence fluxes will be used for vectors
"""
#First do some input parsing
if grdim is None:
grdim = range(sol.dim)
shape = [f.shape for f in field_ed]
if gradflux == None:
if len(shape[0]) == 3: #gradients
gradflux = True
elif len(shape[0]) == 4: #divergences
gradflux | |
command to show the full 'COMM:' line, as it
# does not depend on execution data. The color codes are added or
# not, depending on user options.
#
# 4. 'Last Address' trick:
# On SED, the empty address // refers to the last address matched.
# As this behaviour can be affected when several DEBUG lines are
# inserted before the command, sedsed uses a trick to force it.
# The last address used on the original script is repeated with a
# null command (/last-address/ y/!/!/). This way sedsed repeat the
# addressing, ensuring the next command will have it as the right
# 'last' address.
#
# 5. 't Status' trick:
# The 't' command behaviour, from SED manual page:
#
# If a s/// has done a successful substitution since the last
# input line was read and since the last t command, then branch
# to label
#
# As all the DEBUG commands use lots of 's///' commands, the 't'
# status is always true. The trick here is to add fake labels
# between *any* command and fake 't' commands to jump to them:
#
# <last command, possibly s///>
# t zzset001
# ... debug commands ...
# t zzclr001
# : zzset001
# ... debug commands ...
# : zzclr001
# <next command, possibly t>
#
# The DEBUG commands are repeated and placed into two distinct
# blocks: 'zzset' and 'zzclr', which represents the 't' status
# of the last command. The execution order follows:
#
# zzset: 1st jump (t), then debug (s///), t status is ON
# zzclr: 1st debug (s///), then jump (t), t status is OFF
#
# The 001 count is incremented on each command to have unique
# labels.
#
#
# --- THANK YOU VERY MUCH ---
#
# - <NAME> (GNU sed 4.x maintainer) for the idea of the
# 't status' trick.
#
# - <NAME> for the idea of using the 'i'
# command for the COMM: lines.
#
# show pattern space, show hold space, show sed command
# null sed command to restore last address, 't' status trick
showpatt = [ 's/^/PATT:/', 'l', 's/^PATT://' ]
showhold = ['x', 's/^/HOLD:/', 'l', 's/^HOLD://', 'x']
showcomm = ['i\\', 'COMM:%s\a%s' % (color_YLW, color_NO)]
nullcomm = ['y/!/!/']
save_t = ['t zzset\a\n#DEBUG#', 't zzclr\a',
':zzset\a\n#DEBUG#', ':zzclr\a']
def format_debugcmds(cmds):
"One per line, with prefix (spaces)"
return debug_prefix + ('\n' + debug_prefix).join(cmds) + '\n'
showpatt = format_debugcmds(showpatt)
showhold = format_debugcmds(showhold)
save_t = format_debugcmds(save_t)
showcomm = debug_prefix + '\n'.join(showcomm) + '\n'
nullcomm = nullcomm[0]
# If user specified --hide, unset DEBUG commands for them
if action_modifiers.count('nopatt'):
showpatt = ''
if action_modifiers.count('nohold'):
showhold = ''
if action_modifiers.count('nocomm'):
showcomm = ''
# Compose HTML page header and footer info for --htmlize.
# The SCRIPTNAME is added then removed from html_colors for
# code convenience only.
#
html_colors['SCRIPTNAME'] = os.path.basename(script_file)
html_data = {
'header': """\
<html>
<head><meta name="Generator" content="sedsed --htmlize">
<title>Colorized %(SCRIPTNAME)s</title></head>
<body bgcolor="%(BGCOLOR)s" text="%(TEXT)s"
link="%(LINK)s" alink="%(ALINK)s" vlink="%(VLINK)s">
<pre>\
""" % html_colors,
'footer': """
<font color="%s"><b>### colorized by <a \
href="%s">sedsed</a>, a SED script \
debugger/indenter/tokenizer/HTMLizer</b></font>\n
</pre></body></html>\
""" % (html_colors['comment'], myhome)
}
del html_colors['SCRIPTNAME']
# -----------------------------------------------------------------------------
# SED Machine Data
# -----------------------------------------------------------------------------
# All SED commands grouped by kind
sedcmds = {
'file': 'rw',
'addr': '/$0123456789\\',
'multi': 'sy',
'solo': 'nNdDgGhHxpPlq=',
'text': 'aci',
'jump': ':bt',
'block': '{}',
'flag': 'gp0123456789w' + 'IiMme' # default + GNU
}
# Regex patterns to identify special entities
patt = {
'jump_label': r'[^\s;}#]*', # _any_ char except those, or None
'filename': r'[^\s]+', # _any_ not blank char (strange..)
'flag': r'[%s]+' % sedcmds['flag'], # list of all flags
'topopts': r'#!\s*/[^\s]+\s+-([nf]+)' # options on #!/bin/sed header
}
# All fields used by the internal SED command dictionary
cmdfields = [
'linenr',
'addr1', 'addr1flag', 'addr2', 'addr2flag', 'lastaddr', 'modifier',
'id', 'content', 'delimiter', 'pattern', 'replace', 'flag',
'extrainfo', 'comment'
]
# XXX Don't change the order! There is a piggy cmdfields[6:] ahead
# -----------------------------------------------------------------------------
# Auxiliary Functions - Tools
# -----------------------------------------------------------------------------
def escape_text_commands_specials(text):
text = text.replace('\\', '\\\\') # escape escape
return text
def is_open_bracket(text):
# bracket open: [ \\[ \\\\[ ...
# not bracket : \[ \\\[ \\\\\[ ...
isis = 0
delim = '['
text = re.sub(r'\[:[a-z]+:]', '', text) # del [:charclasses:]
if text.find(delim) == -1: # hey, no brackets!
return 0
# Only the last two count
patterns = text.split(delim)[-2:]
devdebug('bracketpatts: %s' % patterns, 3)
possibleescape, bracketpatt = patterns
# Maybe the bracket is escaped, and is not a metachar?
m = re.search(r'\\+$', possibleescape) # escaped bracket
if m and len(m.group(0)) % 2: # odd number of escapes
devdebug('bracket INVALID! - escaped', 2)
isis = 0
elif bracketpatt.find(']') == -1: # not closed by ]
devdebug('bracket OPEN! - found! found!', 2)
isis = 1 # it is opened! :)
return isis
def paint_html(element, txt=''):
if not txt:
return txt # nothing to paint
# Escape HTML special chars
txt = txt.replace('&', '&')
txt = txt.replace('>', '>')
txt = txt.replace('<', '<')
# Some color adjustments and emphasis
if element == 'id' and txt in sedcmds['block']:
element = 'delimiter'
elif element == 'id' and txt == ':':
element = 'content'
elif element == 'replace':
# highlight \n, & and \$
newtxt = paint_html('special', '\\' + linesep)
txt = txt.replace('\\' + linesep, newtxt)
txt = re.sub('(\\\\[1-9]|&)', paint_html('special', '\\1'), txt)
elif element == 'pattern':
# highlight ( and |
txt = re.sub(
'(\\\\)([(|])',
'\\1' + paint_html('pattmeta', '\\2'),
txt)
elif element == 'plaintext':
# highlight \$
newtxt = paint_html('special', '\\' + linesep)
txt = txt.replace('\\' + linesep, newtxt)
elif element == 'branch':
# nice link to the label
txt = '<a href="#%s">%s</a>' % (txt, txt)
elif element == 'target':
# link target
txt = '<a name="%s">%s</a>' % (txt, txt)
element = 'content'
# Paint it!
if html_colors.get(element) and txt:
font_color = html_colors[element]
txt = '<font color="%s"><b>%s</b></font>' % (font_color, txt)
return txt
# -----------------------------------------------------------------------------
# SedCommand class - Know All About Commands
# -----------------------------------------------------------------------------
# TIP: SedCommand already receives lstrip()ed data and data != None
class SedCommand(object):
def __init__(self, abcde):
self.id = abcde[0] # s
self.content = '' # txt, filename
self.modifier = '' # !
self.full = '' # !s/abc/def/g
# for s/// & y///
self.pattern = '' # abc
self.replace = '' # def
self.delimiter = '' # /
self.flag = '' # g
self.isok = 0
self.comment = ''
self.rest = self.junk = abcde
self.extrainfo = ''
if self.id == '!':
self.modifier = self.id # set modifier
self.junk = self.junk[1:].lstrip() # del !@junk
self.id = self.junk[0] # set id again
self.junk = self.junk[1:] # del id@junk
# self.setId()
self.do_it_all()
def do_it_all(self):
# here, junk arrives without the id, but not lstripped (s///)
sedcmd = self.id
# TODO put pending comment on the previous command (h ;#comm)
if sedcmd == '#':
devdebug('type: comment', 3)
self.comment = self.id + self.junk
self.junk = ''
self.isok = 1
elif sedcmd in sedcmds['solo']:
devdebug('type: solo', 3)
self.isok = 1
elif sedcmd in sedcmds['block']:
devdebug('type: block', 3)
self.isok = 1
elif sedcmd in sedcmds['text']:
devdebug('type: text', 3)
# if not \ at end, finished
if self.junk[-1] != '\\':
# ensure \LineSep at beginning
self.content = re.sub(r'^\\%s' % linesep, '', self.junk)
self.content = '\\%s%s' % (linesep, self.content)
self.isok = 1
elif sedcmd in sedcmds['jump']:
devdebug('type: jump', 3)
self.junk = self.junk.lstrip()
m = re.match(patt['jump_label'], self.junk)
if m:
self.content = m.group()
self.junk = self.junk[m.end():]
self.isok = 1
elif sedcmd in sedcmds['file']:
# TODO deal with valid cmds like 'r bla;bla' and 'r bla ;#comm'
# TODO spaces and ; are valid as filename chars
devdebug('type: file', 3)
self.junk = self.junk.lstrip()
m = re.match(patt['filename'], self.junk)
if m:
self.content = m.group()
self.junk = self.junk[m.end():]
self.isok = 1
elif sedcmd in sedcmds['multi']: # s/// & y///
devdebug('type: multi', 3)
self.delimiter = self.junk[0]
ps = SedAddress(self.junk, 'pattern')
hs = ''
if ps.isok:
self.pattern = ps.pattern
self.junk = ps.rest
# 'replace' opt to avoid openbracket check,
# because 's/bla/[/' is ok
hs = SedAddress(self.delimiter + self.junk, 'replace')
if hs.isok:
self.replace = hs.pattern
self.junk = hs.rest.lstrip()
# great, s/patt/rplc/ successfully taken
# there are flags?
if hs and hs.isok and self.junk:
devdebug('possible s/// flag: %s' % self.junk, 3)
m = re.match(r'(%s\s*)+' % patt['flag'], self.junk)
if m:
self.flag = m.group()
self.junk = self.junk[m.end():].lstrip() # del flag
self.flag = re.sub(r'\s', '', self.flag) # del blanks@flag
devdebug('FOUND s/// flag: %s' % (self.flag.strip()))
# now we've got flags also
# write file flag
if 'w' in self.flag:
m = re.match(patt['filename'], self.junk)
if | |
<reponame>silenceleaf/rclonesync-V2-fork
#!/usr/bin/env python
"""BiDirectional Sync using rclone"""
__version__ = "V2.7 190429" # Version number and date code
#==========================================================================================================
# Configure rclone, including authentication before using this tool. rclone must be in the search path.
#
# <NAME>, November 2017 - 2019
# Revision and contributions:
# <NAME>.
#
# See README.md for revision history
#
# Known bugs:
# remove size compare since its not used
#
#==========================================================================================================
import argparse
import sys
import re
import os.path
import io
import platform
import shutil
import subprocess
if platform.system() is "Windows" and sys.version_info[0] < 3:
import win_subprocess
import tempfile
from datetime import datetime
import time
import logging
import inspect # For getting the line number for error messages.
import collections # For dictionary sorting.
import hashlib # For checking if the filter file changed and force --first_sync.
# Configurations and constants
MAX_DELETE = 50 # % deleted allowed, else abort. Use --force or --max_deletess to override.
CHK_FILE = 'RCLONE_TEST'
RTN_ABORT = 1 # Tokens for return codes based on criticality.
RTN_CRITICAL = 2 # Aborts allow rerunning. Criticals block further runs. See Readme.md.
def bidirSync():
def print_msg(tag, msg, key=''):
return u" {:9}{:35} - {}".format(tag, msg, key)
if not os.path.exists(workdir):
os.makedirs(workdir)
global path1_list_file, path2_list_file
list_file_base = workdir + "LSL_" + (path1_base + path2_base).replace(':','_').replace(r'/','_').replace('\\','_')
# '/home/<user>/.rclonesyncwd/LSL_<path1_base><path2_base>'
path1_list_file = list_file_base + '_Path1'
path2_list_file = list_file_base + '_Path2'
logging.warning("Synching Path1 <{}> with Path2 <{}>".format(path1_base, path2_base))
logging.info("Command line: <{}>".format(args))
# ***** Handle filters_file, if provided *****
filters = []
if filters_file is not None:
logging.info("Using filters-file <{}>".format(filters_file))
if not os.path.exists(filters_file):
logging.error("Specified filters-file file does not exist: " + filters_file)
return RTN_CRITICAL
filters_fileMD5 = filters_file + "-MD5"
with open(filters_file, 'rb') as ifile:
current_file_hash = hashlib.md5(ifile.read()).hexdigest()
# If the filters file is written from windows it will have a \r in it. Py2.7 on Windows discards
# the \r, as does Py3.6 on Linux, but Py2.7 on Linux includes the \r in the calculated hash, resulting
# in a different file hash than in other environments. Removing the \r makes the calculation platform
# agnostic.
stored_file_hash = ''
if os.path.exists(filters_fileMD5):
with open(filters_fileMD5) as ifile:
stored_file_hash = ifile.read()
elif not first_sync:
logging.error(u"MD5 file not found for filters file <{}>. Must run --first-sync.".format(filters_file))
return RTN_CRITICAL
if current_file_hash != stored_file_hash and not first_sync:
logging.error(u"Filters-file <{}> has chanaged (MD5 does not match). Must run --first-sync.".format(filters_file))
return RTN_CRITICAL
if first_sync:
logging.info(u"Storing filters-file hash to <{}>".format(filters_fileMD5))
with open(filters_fileMD5, 'w') as ofile:
ofile.write(current_file_hash) # + "\n")
filters.append(u"--filter-from")
filters.append(filters_file)
# ***** Set up dry_run and rclone --verbose switches *****
switches = []
for x in range(rc_verbose):
switches.append(u"-v")
if dry_run:
switches.append(u"--dry-run")
if os.path.exists(path2_list_file): # If dry_run, original LSL files are preserved and lsl's are done to the _DRYRUN files.
shutil.copy(path2_list_file, path2_list_file + u'_DRYRUN')
path2_list_file += u'_DRYRUN'
if os.path.exists(path1_list_file):
shutil.copy(path1_list_file, path1_list_file + u'_DRYRUN')
path1_list_file += u'_DRYRUN'
if args.no_datetime_log:
switches.extend([u'--log-format', '""'])
# print (switches)
# ***** rclone call wrapper functions with retries *****
maxTries=3
def rclone_lsl(path, ofile, options=None, linenum=0):
for x in range(maxTries):
with open(ofile, "w") as of:
process_args = [rclone, "lsl", path, "--config", rcconfig]
if options is not None:
process_args.extend(options)
if args.rclone_args is not None:
process_args.extend(args.rclone_args)
if not subprocess.call(process_args, stdout=of):
return 0
logging.warning(print_msg(u"WARNING", "rclone lsl try {} failed.".format(x+1)))
logging.error(print_msg(u"ERROR", "rclone lsl failed. Specified path invalid? (Line {})".format(linenum)))
return 1
def rclone_cmd(cmd, p1=None, p2=None, options=None, linenum=0):
for x in range(maxTries):
process_args = [rclone, cmd, "--config", rcconfig]
if p1 is not None:
process_args.append(p1)
if p2 is not None:
process_args.append(p2)
if options is not None:
process_args.extend(options)
if args.rclone_args is not None:
process_args.extend(args.rclone_args)
# print (process_args)
# if not subprocess.call(process_args): # Prior implementation, replaced with Popen call below - v2.5.
# return 0
try:
if platform.system() is "Windows" and sys.version_info[0] < 3:
# On Windows and Python 2.7, the subprocess module only support ASCII in the process_args
# argument. The win_subprocess mdoule supports extended characters (UTF-8), which is needed
# when file and directory names contain extended characters. However, win_subprocess
# requires both shell=True and valid output files.
with io.open(workdir + "deleteme.txt", "wt") as of:
p = win_subprocess.Popen(process_args, stdout=of, stderr=of, shell=True)
else:
p = subprocess.Popen(process_args)
p.wait()
if p.returncode == 0:
return 0
except Exception as e:
logging.warning(print_msg(u"WARNING", "rclone {} try {} failed.".format(cmd, x+1), p1))
logging.error("message: <{}>".format(e))
logging.error(print_msg(u"ERROR", "rclone {} failed. (Line {})".format(cmd, linenum), p1))
return 1
# ***** first_sync generate path1 and path2 file lists, and copy any unique path2 files to path1 *****
if first_sync:
logging.info(">>>>> --first-sync copying any unique Path2 files to Path1")
if rclone_lsl(path1_base, path1_list_file, filters, linenum=inspect.getframeinfo(inspect.currentframe()).lineno):
return RTN_CRITICAL
if rclone_lsl(path2_base, path2_list_file, filters, linenum=inspect.getframeinfo(inspect.currentframe()).lineno):
return RTN_CRITICAL
status, path1_now = load_list(path1_list_file)
if status:
logging.error(print_msg("ERROR", "Failed loading Path1 list file <{}>".format(path1_list_file)))
return RTN_CRITICAL
status, path2_now = load_list(path2_list_file)
if status:
logging.error(print_msg("ERROR", "Failed loading Path2 list file <{}>".format(path2_list_file)))
return RTN_CRITICAL
for key in path2_now:
if key not in path1_now:
src = path2_base + key
dest = path1_base + key
logging.info(print_msg("Path2", " --first-sync copying to Path1", dest))
if rclone_cmd('copyto', src, dest, options=switches, linenum=inspect.getframeinfo(inspect.currentframe()).lineno):
return RTN_CRITICAL
if rclone_lsl(path1_base, path1_list_file, filters, linenum=inspect.getframeinfo(inspect.currentframe()).lineno):
return RTN_CRITICAL
# ***** Check for existence of prior Path1 and Path2 lsl files *****
if not os.path.exists(path1_list_file) or not os.path.exists(path2_list_file):
# On prior critical error abort, the prior LSL files are renamed to _ERROR to lock out further runs
logging.error("***** Cannot find prior Path1 or Path2 lsl files.")
return RTN_CRITICAL
# ***** Check basic health of access to the Path1 and Path2 filesystems *****
if check_access:
if first_sync:
logging.info(">>>>> --check-access skipped on --first-sync")
else:
logging.info(">>>>> Checking Path1 and Path2 rclone filesystems access health")
path1_chk_list_file = list_file_base + '_Path1_CHK'
path2_chk_list_file = list_file_base + '_Path2_CHK'
if "testdir" not in path1_base: # Normally, disregard any check files in the test directory tree.
xx = ['--filter', '- /testdir/', '--filter', '- rclonesync/Test/', '--filter', '+ ' + chk_file, '--filter', '- *']
else: # If testing, include check files within the test directory tree.
xx = ['--filter', '- rclonesync/Test/', '--filter', '+ ' + chk_file, '--filter', '- *']
if rclone_lsl(path1_base, path1_chk_list_file, options=xx, linenum=inspect.getframeinfo(inspect.currentframe()).lineno):
return RTN_ABORT
if rclone_lsl(path2_base, path2_chk_list_file, options=xx, linenum=inspect.getframeinfo(inspect.currentframe()).lineno):
return RTN_ABORT
status, path1_check = load_list(path1_chk_list_file)
if status:
logging.error(print_msg("ERROR", "Failed loading Path1 check list file <{}>".format(path1_chk_list_file)))
return RTN_CRITICAL
status, path2_check = load_list(path2_chk_list_file)
if status:
logging.error(print_msg("ERROR", "Failed loading Path2 check list file <{}>".format(path2_chk_list_file)))
return RTN_CRITICAL
check_error = False
if len(path1_check) < 1 or len(path1_check) != len(path2_check):
logging.error(print_msg("ERROR", "Failed access health test: <{}> Path1 count {}, Path2 count {}"
.format(chk_file, len(path1_check), len(path2_check)), ""))
check_error = True
for key in path1_check:
if key not in path2_check:
logging.error(print_msg("ERROR", "Failed access health test: Path1 key <{}> not found in Path2".format(key), ""))
check_error = True
for key in path2_check:
if key not in path1_check:
logging.error(print_msg("ERROR", "Failed access health test: Path2 key <{}> not found in Path1".format(key), ""))
check_error = True
if check_error:
return RTN_CRITICAL
os.remove(path1_chk_list_file) # _*ChkLSL files will be left if the check fails. Look at these files for clues.
os.remove(path2_chk_list_file)
# ***** Get current listings of the path1 and path2 trees *****
path1_list_file_new = list_file_base + '_Path1_NEW'
if rclone_lsl(path1_base, path1_list_file_new, filters, linenum=inspect.getframeinfo(inspect.currentframe()).lineno):
return RTN_CRITICAL
path2_list_file_new = list_file_base + '_Path2_NEW'
if rclone_lsl(path2_base, path2_list_file_new, filters, linenum=inspect.getframeinfo(inspect.currentframe()).lineno):
return RTN_CRITICAL
# ***** Load Current and Prior listings of both Path1 and Path2 trees *****
status, path1_prior = load_list(path1_list_file) # Successful load of the file return status = 0.
if status: logging.error(print_msg("ERROR", "Failed loading prior Path1 list file <{}>".format(path1_list_file))); return RTN_CRITICAL
if len(path1_prior) == 0: logging.error(print_msg("ERROR", "Zero length in prior Path1 list file <{}>".format(path1_list_file))); return RTN_CRITICAL
status, path2_prior = load_list(path2_list_file)
if status: logging.error(print_msg("ERROR", "Failed loading prior Path2 list file <{}>".format(path2_list_file))); return RTN_CRITICAL
if len(path2_prior) == 0: logging.error(print_msg("ERROR", "Zero length in prior Path2 list file <{}>".format(path2_list_file))); return RTN_CRITICAL
status, path1_now = load_list(path1_list_file_new)
if status: logging.error(print_msg("ERROR", "Failed loading current Path1 list file <{}>".format(path1_list_file_new))); return RTN_ABORT
if len(path1_now) == 0: logging.error(print_msg("ERROR", "Zero length in current Path1 list file <{}>".format(path1_list_file_new))); return RTN_ABORT
status, path2_now = load_list(path2_list_file_new)
if status: logging.error(print_msg("ERROR", "Failed loading current Path2 list file <{}>".format(path2_list_file_new))); return RTN_ABORT
if len(path2_now) == 0: logging.error(print_msg("ERROR", "Zero length in current Path2 list file <{}>".format(path2_list_file_new))); return RTN_ABORT
# ***** Check for Path1 deltas relative to the prior sync *****
logging.info(">>>>> Path1 Checking for Diffs")
path1_deltas = {}
path1_deleted = 0
for | |
# .\_avm.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:8c3bce54577a879cd94d42789711c9f5d444aa71
# Generated 2017-02-16 11:50:59.033000 by PyXB version 1.2.3
# Namespace avm [xmlns:avm]
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:7919894f-f470-11e6-8e31-7429af7917c0')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.3'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import iFAB as _ImportedBinding__iFAB
import pyxb.binding.datatypes
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI(u'avm', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, unicode):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: {avm}CalculationTypeEnum
class CalculationTypeEnum (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'CalculationTypeEnum')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 224, 2)
_Documentation = None
CalculationTypeEnum._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=CalculationTypeEnum, enum_prefix=None)
CalculationTypeEnum.Declarative = CalculationTypeEnum._CF_enumeration.addEnumeration(unicode_value=u'Declarative', tag=u'Declarative')
CalculationTypeEnum.Python = CalculationTypeEnum._CF_enumeration.addEnumeration(unicode_value=u'Python', tag=u'Python')
CalculationTypeEnum._InitializeFacetMap(CalculationTypeEnum._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'CalculationTypeEnum', CalculationTypeEnum)
# Atomic simple type: {avm}DataTypeEnum
class DataTypeEnum (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'DataTypeEnum')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 230, 2)
_Documentation = None
DataTypeEnum._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=DataTypeEnum, enum_prefix=None)
DataTypeEnum.String = DataTypeEnum._CF_enumeration.addEnumeration(unicode_value=u'String', tag=u'String')
DataTypeEnum.Boolean = DataTypeEnum._CF_enumeration.addEnumeration(unicode_value=u'Boolean', tag=u'Boolean')
DataTypeEnum.Integer = DataTypeEnum._CF_enumeration.addEnumeration(unicode_value=u'Integer', tag=u'Integer')
DataTypeEnum.Real = DataTypeEnum._CF_enumeration.addEnumeration(unicode_value=u'Real', tag=u'Real')
DataTypeEnum._InitializeFacetMap(DataTypeEnum._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'DataTypeEnum', DataTypeEnum)
# Atomic simple type: {avm}DimensionTypeEnum
class DimensionTypeEnum (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'DimensionTypeEnum')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 238, 2)
_Documentation = None
DimensionTypeEnum._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=DimensionTypeEnum, enum_prefix=None)
DimensionTypeEnum.Matrix = DimensionTypeEnum._CF_enumeration.addEnumeration(unicode_value=u'Matrix', tag=u'Matrix')
DimensionTypeEnum.Vector = DimensionTypeEnum._CF_enumeration.addEnumeration(unicode_value=u'Vector', tag=u'Vector')
DimensionTypeEnum.Scalar = DimensionTypeEnum._CF_enumeration.addEnumeration(unicode_value=u'Scalar', tag=u'Scalar')
DimensionTypeEnum._InitializeFacetMap(DimensionTypeEnum._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'DimensionTypeEnum', DimensionTypeEnum)
# List simple type: [anonymous]
# superclasses pyxb.binding.datatypes.anySimpleType
class STD_ANON (pyxb.binding.basis.STD_list):
"""Simple type that is a list of pyxb.binding.datatypes.anyURI."""
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 319, 6)
_Documentation = None
_ItemType = pyxb.binding.datatypes.anyURI
STD_ANON._InitializeFacetMap()
# List simple type: [anonymous]
# superclasses pyxb.binding.datatypes.anySimpleType
class STD_ANON_ (pyxb.binding.basis.STD_list):
"""Simple type that is a list of pyxb.binding.datatypes.anyURI."""
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 409, 6)
_Documentation = None
_ItemType = pyxb.binding.datatypes.anyURI
STD_ANON_._InitializeFacetMap()
# List simple type: [anonymous]
# superclasses pyxb.binding.datatypes.anySimpleType
class STD_ANON_2 (pyxb.binding.basis.STD_list):
"""Simple type that is a list of pyxb.binding.datatypes.anyURI."""
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 423, 6)
_Documentation = None
_ItemType = pyxb.binding.datatypes.anyURI
STD_ANON_2._InitializeFacetMap()
# List simple type: [anonymous]
# superclasses pyxb.binding.datatypes.anySimpleType
class STD_ANON_3 (pyxb.binding.basis.STD_list):
"""Simple type that is a list of pyxb.binding.datatypes.anyURI."""
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 429, 6)
_Documentation = None
_ItemType = pyxb.binding.datatypes.anyURI
STD_ANON_3._InitializeFacetMap()
# List simple type: [anonymous]
# superclasses pyxb.binding.datatypes.anySimpleType
class STD_ANON_4 (pyxb.binding.basis.STD_list):
"""Simple type that is a list of pyxb.binding.datatypes.anyURI."""
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 448, 10)
_Documentation = None
_ItemType = pyxb.binding.datatypes.anyURI
STD_ANON_4._InitializeFacetMap()
# Atomic simple type: {avm}SimpleFormulaOperation
class SimpleFormulaOperation (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'SimpleFormulaOperation')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 455, 2)
_Documentation = None
SimpleFormulaOperation._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=SimpleFormulaOperation, enum_prefix=None)
SimpleFormulaOperation.Addition = SimpleFormulaOperation._CF_enumeration.addEnumeration(unicode_value=u'Addition', tag=u'Addition')
SimpleFormulaOperation.Multiplication = SimpleFormulaOperation._CF_enumeration.addEnumeration(unicode_value=u'Multiplication', tag=u'Multiplication')
SimpleFormulaOperation.ArithmeticMean = SimpleFormulaOperation._CF_enumeration.addEnumeration(unicode_value=u'ArithmeticMean', tag=u'ArithmeticMean')
SimpleFormulaOperation.GeometricMean = SimpleFormulaOperation._CF_enumeration.addEnumeration(unicode_value=u'GeometricMean', tag=u'GeometricMean')
SimpleFormulaOperation.Maximum = SimpleFormulaOperation._CF_enumeration.addEnumeration(unicode_value=u'Maximum', tag=u'Maximum')
SimpleFormulaOperation.Minimum = SimpleFormulaOperation._CF_enumeration.addEnumeration(unicode_value=u'Minimum', tag=u'Minimum')
SimpleFormulaOperation._InitializeFacetMap(SimpleFormulaOperation._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'SimpleFormulaOperation', SimpleFormulaOperation)
# Atomic simple type: {avm}DoDDistributionStatementEnum
class DoDDistributionStatementEnum (pyxb.binding.datatypes.string, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'DoDDistributionStatementEnum')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 489, 2)
_Documentation = None
DoDDistributionStatementEnum._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=DoDDistributionStatementEnum, enum_prefix=None)
DoDDistributionStatementEnum.StatementA = DoDDistributionStatementEnum._CF_enumeration.addEnumeration(unicode_value=u'StatementA', tag=u'StatementA')
DoDDistributionStatementEnum.StatementB = DoDDistributionStatementEnum._CF_enumeration.addEnumeration(unicode_value=u'StatementB', tag=u'StatementB')
DoDDistributionStatementEnum.StatementC = DoDDistributionStatementEnum._CF_enumeration.addEnumeration(unicode_value=u'StatementC', tag=u'StatementC')
DoDDistributionStatementEnum.StatementD = DoDDistributionStatementEnum._CF_enumeration.addEnumeration(unicode_value=u'StatementD', tag=u'StatementD')
DoDDistributionStatementEnum.StatementE = DoDDistributionStatementEnum._CF_enumeration.addEnumeration(unicode_value=u'StatementE', tag=u'StatementE')
DoDDistributionStatementEnum._InitializeFacetMap(DoDDistributionStatementEnum._CF_enumeration)
Namespace.addCategoryObject('typeBinding', u'DoDDistributionStatementEnum', DoDDistributionStatementEnum)
# List simple type: [anonymous]
# superclasses pyxb.binding.datatypes.anySimpleType
class STD_ANON_5 (pyxb.binding.basis.STD_list):
"""Simple type that is a list of pyxb.binding.datatypes.anyURI."""
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 581, 10)
_Documentation = None
_ItemType = pyxb.binding.datatypes.anyURI
STD_ANON_5._InitializeFacetMap()
# Complex type {avm}Component with content type ELEMENT_ONLY
class Component_ (pyxb.binding.basis.complexTypeDefinition):
"""Test documentation for Component type. Yep."""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Component')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 67, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element DomainModel uses Python identifier DomainModel
__DomainModel = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'DomainModel'), 'DomainModel', '__avm_Component__DomainModel', True, pyxb.utils.utility.Location(u'avm.xsd', 72, 6), )
DomainModel = property(__DomainModel.value, __DomainModel.set, None, None)
# Element Property uses Python identifier Property
__Property = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Property'), 'Property', '__avm_Component__Property', True, pyxb.utils.utility.Location(u'avm.xsd', 73, 6), )
Property = property(__Property.value, __Property.set, None, None)
# Element ResourceDependency uses Python identifier ResourceDependency
__ResourceDependency = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'ResourceDependency'), 'ResourceDependency', '__avm_Component__ResourceDependency', True, pyxb.utils.utility.Location(u'avm.xsd', 74, 6), )
ResourceDependency = property(__ResourceDependency.value, __ResourceDependency.set, None, None)
# Element Connector uses Python identifier Connector
__Connector = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Connector'), 'Connector', '__avm_Component__Connector', True, pyxb.utils.utility.Location(u'avm.xsd', 75, 6), )
Connector = property(__Connector.value, __Connector.set, None, None)
# Element DistributionRestriction uses Python identifier DistributionRestriction
__DistributionRestriction = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'DistributionRestriction'), 'DistributionRestriction', '__avm_Component__DistributionRestriction', True, pyxb.utils.utility.Location(u'avm.xsd', 76, 6), )
DistributionRestriction = property(__DistributionRestriction.value, __DistributionRestriction.set, None, None)
# Element Port uses Python identifier Port
__Port = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Port'), 'Port', '__avm_Component__Port', True, pyxb.utils.utility.Location(u'avm.xsd', 77, 6), )
Port = property(__Port.value, __Port.set, None, None)
# Element Classifications uses Python identifier Classifications
__Classifications = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Classifications'), 'Classifications', '__avm_Component__Classifications', True, pyxb.utils.utility.Location(u'avm.xsd', 78, 6), )
Classifications = property(__Classifications.value, __Classifications.set, None, None)
# Element AnalysisConstruct uses Python identifier AnalysisConstruct
__AnalysisConstruct = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'AnalysisConstruct'), 'AnalysisConstruct', '__avm_Component__AnalysisConstruct', True, pyxb.utils.utility.Location(u'avm.xsd', 79, 6), )
AnalysisConstruct = property(__AnalysisConstruct.value, __AnalysisConstruct.set, None, None)
# Element Supercedes uses Python identifier Supercedes
__Supercedes = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Supercedes'), 'Supercedes', '__avm_Component__Supercedes', True, pyxb.utils.utility.Location(u'avm.xsd', 80, 6), )
Supercedes = property(__Supercedes.value, __Supercedes.set, None, None)
# Element Formula uses Python identifier Formula
__Formula = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Formula'), 'Formula', '__avm_Component__Formula', True, pyxb.utils.utility.Location(u'avm.xsd', 81, 6), )
Formula = property(__Formula.value, __Formula.set, None, None)
# Element DomainMapping uses Python identifier DomainMapping
__DomainMapping = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'DomainMapping'), 'DomainMapping', '__avm_Component__DomainMapping', True, pyxb.utils.utility.Location(u'avm.xsd', 82, 6), )
DomainMapping = property(__DomainMapping.value, __DomainMapping.set, None, None)
# Attribute Name uses Python identifier Name
__Name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Name'), 'Name', '__avm_Component__Name', pyxb.binding.datatypes.string)
__Name._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 84, 4)
__Name._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 84, 4)
Name = property(__Name.value, __Name.set, None, None)
# Attribute Version uses Python identifier Version
__Version = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Version'), 'Version', '__avm_Component__Version', pyxb.binding.datatypes.string)
__Version._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 85, 4)
__Version._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 85, 4)
Version = property(__Version.value, __Version.set, None, None)
# Attribute SchemaVersion uses Python identifier SchemaVersion
__SchemaVersion = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'SchemaVersion'), 'SchemaVersion', '__avm_Component__SchemaVersion', pyxb.binding.datatypes.string)
__SchemaVersion._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 86, 4)
__SchemaVersion._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 86, 4)
SchemaVersion = property(__SchemaVersion.value, __SchemaVersion.set, None, None)
# Attribute ID uses Python identifier ID
__ID = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'ID'), 'ID', '__avm_Component__ID', pyxb.binding.datatypes.string)
__ID._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 87, 4)
__ID._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 87, 4)
ID = property(__ID.value, __ID.set, None, None)
_ElementMap.update({
__DomainModel.name() : __DomainModel,
__Property.name() : __Property,
__ResourceDependency.name() : __ResourceDependency,
__Connector.name() : __Connector,
__DistributionRestriction.name() : __DistributionRestriction,
__Port.name() : __Port,
__Classifications.name() : __Classifications,
__AnalysisConstruct.name() : __AnalysisConstruct,
__Supercedes.name() : __Supercedes,
__Formula.name() : __Formula,
__DomainMapping.name() : __DomainMapping
})
_AttributeMap.update({
__Name.name() : __Name,
__Version.name() : __Version,
__SchemaVersion.name() : __SchemaVersion,
__ID.name() : __ID
})
Namespace.addCategoryObject('typeBinding', u'Component', Component_)
# Complex type {avm}DomainModel with content type EMPTY
class DomainModel_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {avm}DomainModel with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'DomainModel')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 89, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute UsesResource uses Python identifier UsesResource
__UsesResource = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'UsesResource'), 'UsesResource', '__avm_DomainModel__UsesResource', pyxb.binding.datatypes.IDREFS)
__UsesResource._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 90, 4)
__UsesResource._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 90, 4)
UsesResource = property(__UsesResource.value, __UsesResource.set, None, None)
# Attribute Author uses Python identifier Author
__Author = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Author'), 'Author', '__avm_DomainModel__Author', pyxb.binding.datatypes.string)
__Author._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 91, 4)
__Author._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 91, 4)
Author = property(__Author.value, __Author.set, | |
to `false`.
"""
return pulumi.get(self, "locked")
@locked.setter
def locked(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "locked", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
"""
A message to include with notifications for this monitor.
"""
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter(name="monitorThresholdWindows")
def monitor_threshold_windows(self) -> Optional[pulumi.Input['MonitorMonitorThresholdWindowsArgs']]:
"""
A mapping containing `recovery_window` and `trigger_window` values, e.g. `last_15m` . Can only be used for, and are
required for, anomaly monitors.
"""
return pulumi.get(self, "monitor_threshold_windows")
@monitor_threshold_windows.setter
def monitor_threshold_windows(self, value: Optional[pulumi.Input['MonitorMonitorThresholdWindowsArgs']]):
pulumi.set(self, "monitor_threshold_windows", value)
@property
@pulumi.getter(name="monitorThresholds")
def monitor_thresholds(self) -> Optional[pulumi.Input['MonitorMonitorThresholdsArgs']]:
"""
Alert thresholds of the monitor.
"""
return pulumi.get(self, "monitor_thresholds")
@monitor_thresholds.setter
def monitor_thresholds(self, value: Optional[pulumi.Input['MonitorMonitorThresholdsArgs']]):
pulumi.set(self, "monitor_thresholds", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of Datadog monitor.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="newGroupDelay")
def new_group_delay(self) -> Optional[pulumi.Input[int]]:
"""
The time (in seconds) to skip evaluations for new groups. `new_group_delay` overrides `new_host_delay` if it is set to a
nonzero value.
"""
return pulumi.get(self, "new_group_delay")
@new_group_delay.setter
def new_group_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "new_group_delay", value)
@property
@pulumi.getter(name="newHostDelay")
def new_host_delay(self) -> Optional[pulumi.Input[int]]:
"""
**Deprecated**. See `new_group_delay`. Time (in seconds) to allow a host to boot and applications to fully start before
starting the evaluation of monitor results. Should be a non-negative integer. This value is ignored for simple monitors
and monitors not grouped by host. Defaults to `300`. The only case when this should be used is to override the default
and set `new_host_delay` to zero for monitors grouped by host.
"""
return pulumi.get(self, "new_host_delay")
@new_host_delay.setter
def new_host_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "new_host_delay", value)
@property
@pulumi.getter(name="noDataTimeframe")
def no_data_timeframe(self) -> Optional[pulumi.Input[int]]:
"""
The number of minutes before a monitor will notify when data stops reporting. Provider defaults to 10 minutes. We
recommend at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks.
"""
return pulumi.get(self, "no_data_timeframe")
@no_data_timeframe.setter
def no_data_timeframe(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "no_data_timeframe", value)
@property
@pulumi.getter(name="notifyAudit")
def notify_audit(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean indicating whether tagged users will be notified on changes to this monitor. Defaults to `false`.
"""
return pulumi.get(self, "notify_audit")
@notify_audit.setter
def notify_audit(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "notify_audit", value)
@property
@pulumi.getter(name="notifyNoData")
def notify_no_data(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean indicating whether this monitor will notify when data stops reporting. Defaults to `false`.
"""
return pulumi.get(self, "notify_no_data")
@notify_no_data.setter
def notify_no_data(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "notify_no_data", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Integer from 1 (high) to 5 (low) indicating alert severity.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def query(self) -> Optional[pulumi.Input[str]]:
"""
The monitor query to notify on. Note this is not the same query you see in the UI and the syntax is different depending
on the monitor type, please see the [API Reference](https://docs.datadoghq.com/api/v1/monitors/#create-a-monitor) for
details. `terraform plan` will validate query contents unless `validate` is set to `false`. **Note:** APM latency data
is now available as Distribution Metrics. Existing monitors have been migrated automatically but all terraformed
monitors can still use the existing metrics. We strongly recommend updating monitor definitions to query the new
metrics. To learn more, or to see examples of how to update your terraform definitions to utilize the new distribution
metrics, see the [detailed doc](https://docs.datadoghq.com/tracing/guide/ddsketch_trace_metrics/).
"""
return pulumi.get(self, "query")
@query.setter
def query(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "query", value)
@property
@pulumi.getter(name="renotifyInterval")
def renotify_interval(self) -> Optional[pulumi.Input[int]]:
"""
The number of minutes after the last notification before a monitor will re-notify on the current status. It will only
re-notify if it's not resolved.
"""
return pulumi.get(self, "renotify_interval")
@renotify_interval.setter
def renotify_interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "renotify_interval", value)
@property
@pulumi.getter(name="renotifyOccurrences")
def renotify_occurrences(self) -> Optional[pulumi.Input[int]]:
"""
The number of re-notification messages that should be sent on the current status.
"""
return pulumi.get(self, "renotify_occurrences")
@renotify_occurrences.setter
def renotify_occurrences(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "renotify_occurrences", value)
@property
@pulumi.getter(name="renotifyStatuses")
def renotify_statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The types of statuses for which re-notification messages should be sent.
"""
return pulumi.get(self, "renotify_statuses")
@renotify_statuses.setter
def renotify_statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "renotify_statuses", value)
@property
@pulumi.getter(name="requireFullWindow")
def require_full_window(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean indicating whether this monitor needs a full window of data before it's evaluated. We highly recommend you set
this to `false` for sparse metrics, otherwise some evaluations will be skipped. Default: `true` for `on average`, `at
all times` and `in total` aggregation. `false` otherwise.
"""
return pulumi.get(self, "require_full_window")
@require_full_window.setter
def require_full_window(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "require_full_window", value)
@property
@pulumi.getter(name="restrictedRoles")
def restricted_roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "restricted_roles")
@restricted_roles.setter
def restricted_roles(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "restricted_roles", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of tags to associate with your monitor. This can help you categorize and filter monitors in the manage monitors
page of the UI. Note: it's not currently possible to filter by these tags when querying via the API
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="timeoutH")
def timeout_h(self) -> Optional[pulumi.Input[int]]:
"""
The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state.
"""
return pulumi.get(self, "timeout_h")
@timeout_h.setter
def timeout_h(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_h", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the monitor. The mapping from these types to the types found in the Datadog Web UI can be found in the
Datadog API [documentation page](https://docs.datadoghq.com/api/v1/monitors/#create-a-monitor). Note: The monitor type
cannot be changed after a monitor is created.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def validate(self) -> Optional[pulumi.Input[bool]]:
"""
If set to `false`, skip the validation call done during plan.
"""
return pulumi.get(self, "validate")
@validate.setter
def validate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "validate", value)
class Monitor(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
enable_logs_sample: Optional[pulumi.Input[bool]] = None,
escalation_message: Optional[pulumi.Input[str]] = None,
evaluation_delay: Optional[pulumi.Input[int]] = None,
force_delete: Optional[pulumi.Input[bool]] = None,
groupby_simple_monitor: Optional[pulumi.Input[bool]] = None,
include_tags: Optional[pulumi.Input[bool]] = None,
locked: Optional[pulumi.Input[bool]] = None,
message: Optional[pulumi.Input[str]] = None,
monitor_threshold_windows: Optional[pulumi.Input[pulumi.InputType['MonitorMonitorThresholdWindowsArgs']]] = None,
monitor_thresholds: Optional[pulumi.Input[pulumi.InputType['MonitorMonitorThresholdsArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
new_group_delay: Optional[pulumi.Input[int]] = None,
new_host_delay: Optional[pulumi.Input[int]] = None,
no_data_timeframe: Optional[pulumi.Input[int]] = None,
notify_audit: Optional[pulumi.Input[bool]] = None,
notify_no_data: Optional[pulumi.Input[bool]] = None,
priority: Optional[pulumi.Input[int]] = None,
query: Optional[pulumi.Input[str]] = None,
renotify_interval: Optional[pulumi.Input[int]] = None,
renotify_occurrences: Optional[pulumi.Input[int]] = None,
renotify_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
require_full_window: Optional[pulumi.Input[bool]] = None,
restricted_roles: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
timeout_h: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
validate: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Provides a Datadog monitor resource. This can be used to create and manage Datadog monitors.
## Example Usage
```python
import pulumi
import pulumi_datadog as datadog
# Create a new Datadog monitor
foo = datadog.Monitor("foo",
escalation_message="Escalation message @pagerduty",
include_tags=True,
message="Monitor triggered. Notify: @hipchat-channel",
monitor_thresholds=datadog.MonitorMonitorThresholdsArgs(
critical="4",
critical_recovery="3",
warning="2",
warning_recovery="1",
),
name="Name for monitor foo",
notify_audit=False,
notify_no_data=False,
query="avg(last_1h):avg:aws.ec2.cpu{environment:foo,host:foo} by {host} > 4",
renotify_interval=60,
tags=[
"foo:bar",
"baz",
],
type="metric alert")
```
## Import
```sh
$ pulumi import datadog:index/monitor:Monitor bytes_received_localhost 2081
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enable_logs_sample: A boolean indicating whether or not to include a list of log values which triggered the alert. This is only used by log
monitors. Defaults to `false`.
:param pulumi.Input[str] escalation_message: A message to include with a re-notification. Supports the `@username` notification allowed elsewhere.
:param pulumi.Input[int] evaluation_delay: (Only applies to metric alert) Time (in seconds) to delay evaluation, as a non-negative integer. For example, if the
value is set to `300` (5min), the `timeframe` is set to `last_5m` and the time is 7:00, the monitor will evaluate data
from 6:50 to 6:55. This is useful for AWS CloudWatch and other backfilled metrics to ensure the monitor will always have
data during evaluation.
:param pulumi.Input[bool] force_delete: A boolean indicating whether this monitor can be deleted even if it’s referenced by other resources (e.g. SLO,
composite monitor).
:param pulumi.Input[bool] groupby_simple_monitor: Whether or not to trigger one alert if any source breaches a threshold. This is only used by | |
of all domain DNS records for a Service Version. Returns an array items in the same format as the single domain /check."""
content = self._fetch("/service/%s/version/%d/domain/check_all" % (service_id, version_number))
return map(lambda x: FastlyDomainCheck(self, x), content)
def get_event_log(self, object_id):
"""Get the specified event log."""
content = self._fetch("/event_log/%s" % object_id, method="GET")
return FastlyEventLog(self, content)
def list_gzip(self, service_id, version_number):
"""List all gzip configurations for a particular service and version"""
content = self._fetch("/service/%s/version/%d/gzip" % (service_id, version_number))
return map(lambda x: FastlyGzip(self, x), content)
def create_gzip(self, service_id, version_number, name, cache_condition=None, content_types=None, extensions=None):
body = self._formdata({
"name": name,
"cache_condition": cache_condition,
"content_types": content_types,
"extensions": extensions
}, FastlyGzip.FIELDS)
"""Creates a new Gzip object."""
content = self._fetch("/service/%s/version/%d/gzip" % (service_id, version_number), method="POST", body=body)
return FastlyGzip(self, content)
def get_gzip(self, service_id, version_number, name):
"""Retrieves a Header object by name."""
content = self._fetch("/service/%s/version/%d/gzip/%s" % (service_id, version_number, urllib.parse.quote(name, safe='')))
return FastlyGzip(self, content)
def update_gzip(self, service_id, version_number, name_key, **kwargs):
"""Modifies an existing Gzip object by name."""
body = self._formdata(kwargs, FastlyGzip.FIELDS)
content = self._fetch("/service/%s/version/%d/gzip/%s" % (service_id, version_number, urllib.parse.quote(name_key, safe='')), method="PUT", body=body)
return FastlyGzip(self, content)
def delete_gzip(self, service_id, version_number, name):
"""Deletes a Gzip object by name."""
content = self._fetch("/service/%s/version/%d/gzip/%s" % (service_id, version_number, urllib.parse.quote(name, safe='')), method="DELETE")
return self._status(content)
def list_headers(self, service_id, version_number):
"""Retrieves all Header objects for a particular Version of a Service."""
content = self._fetch("/service/%s/version/%d/header" % (service_id, version_number))
return map(lambda x: FastlyHeader(self, x), content)
def create_header(self, service_id, version_number, name, dst, src, _type=FastlyHeaderType.RESPONSE, action=FastlyHeaderAction.SET, regex=None, substitution=None, ignore_if_set=None, priority=10, response_condition=None, cache_condition=None, request_condition=None):
body = self._formdata({
"name": name,
"dst": dst,
"src": src,
"type": _type,
"action": action,
"regex": regex,
"substitution": substitution,
"ignore_if_set": ignore_if_set,
"priority": priority,
"response_condition": response_condition,
"request_condition": request_condition,
"cache_condition": cache_condition,
}, FastlyHeader.FIELDS)
"""Creates a new Header object."""
content = self._fetch("/service/%s/version/%d/header" % (service_id, version_number), method="POST", body=body)
return FastlyHeader(self, content)
def get_header(self, service_id, version_number, name):
"""Retrieves a Header object by name."""
content = self._fetch("/service/%s/version/%d/header/%s" % (service_id, version_number, urllib.parse.quote(name, safe='')))
return FastlyHeader(self, content)
def update_header(self, service_id, version_number, name_key, **kwargs):
"""Modifies an existing Header object by name."""
if '_type' in kwargs:
kwargs['type'] = kwargs['_type']
body = self._formdata(kwargs, FastlyHeader.FIELDS)
content = self._fetch("/service/%s/version/%d/header/%s" % (service_id, version_number, urllib.parse.quote(name_key, safe='')), method="PUT", body=body)
return FastlyHeader(self, content)
def delete_header(self, service_id, version_number, name):
"""Deletes a Header object by name."""
content = self._fetch("/service/%s/version/%d/header/%s" % (service_id, version_number, urllib.parse.quote(name, safe='')), method="DELETE")
return self._status(content)
def list_healthchecks(self, service_id, version_number):
"""List all of the healthchecks for a particular service and version."""
content = self._fetch("/service/%s/version/%d/healthcheck" % (service_id, version_number))
return map(lambda x: FastlyHealthCheck(self, x), content)
def create_healthcheck(
self,
service_id,
version_number,
name,
host,
method="HEAD",
path="/",
http_version="1.1",
timeout=1000,
check_interval=5000,
expected_response=200,
window=5,
threshold=3,
initial=1):
"""Create a healthcheck for a particular service and version."""
body = self._formdata({
"name": name,
"method": method,
"host": host,
"path": path,
"http_version": http_version,
"timeout": timeout,
"check_interval": check_interval,
"expected_response": expected_response,
"window": window,
"threshold": threshold,
"initial": initial,
}, FastlyHealthCheck.FIELDS)
content = self._fetch("/service/%s/version/%d/healthcheck" % (service_id, version_number), method="POST", body=body)
return FastlyHealthCheck(self, content)
def get_healthcheck(self, service_id, version_number, name):
"""Get the healthcheck for a particular service and version."""
content = self._fetch("/service/%s/version/%d/healthcheck/%s" % (service_id, version_number, urllib.parse.quote(name, safe='')))
return FastlyHealthCheck(self, content)
def update_healthcheck(self, service_id, version_number, name_key, **kwargs):
"""Update the healthcheck for a particular service and version."""
body = self._formdata(kwargs, FastlyHealthCheck.FIELDS)
content = self._fetch("/service/%s/version/%d/healthcheck/%s" % (service_id, version_number, urllib.parse.quote(name_key, safe='')), method="PUT", body=body)
return FastlyHealthCheck(self, content)
def delete_healthcheck(self, service_id, version_number, name):
"""Delete the healthcheck for a particular service and version."""
content = self._fetch("/service/%s/version/%d/healthcheck/%s" % (service_id, version_number, urllib.parse.quote(name, safe='')), method="DELETE")
return self._status(content)
def purge_url(self, host, path):
"""Purge an individual URL."""
content = self._fetch(path, method="PURGE", headers={"Host": host})
return FastlyPurge(self, content)
def check_purge_status(self, purge_id):
"""Get the status and times of a recently completed purge."""
content = self._fetch("/purge?id=%s" % purge_id)
return map(lambda x: FastlyPurgeStatus(self, x), content)
def list_request_settings(self, service_id, version_number):
"""Returns a list of all Request Settings objects for the given service and version."""
content = self._fetch("/service/%s/version/%d/request_settings" % (service_id, version_number))
return map(lambda x: FastlyRequestSetting(self, x), content)
def create_request_setting(
self,
service_id,
version_number,
name,
default_host=None,
force_miss=None,
force_ssl=None,
action=None,
bypass_busy_wait=None,
max_stale_age=None,
hash_keys=None,
xff=None,
timer_support=None,
geo_headers=None,
request_condition=None):
"""Creates a new Request Settings object."""
body = self._formdata({
"name": name,
"default_host": default_host,
"force_miss": force_miss,
"force_ssl": force_ssl,
"action": action,
"bypass_busy_wait": bypass_busy_wait,
"max_stale_age": max_stale_age,
"hash_keys": hash_keys,
"xff": xff,
"timer_support": timer_support,
"geo_headers": geo_headers,
"request_condition": request_condition,
}, FastlyRequestSetting.FIELDS)
content = self._fetch("/service/%s/version/%d/request_settings" % (service_id, version_number), method="POST", body=body)
return FastlyRequestSetting(self, content)
def get_request_setting(self, service_id, version_number, name):
"""Gets the specified Request Settings object."""
content = self._fetch("/service/%s/version/%d/request_settings/%s" % (service_id, version_number, urllib.parse.quote(name, safe='')))
return FastlyRequestSetting(self, content)
def update_request_setting(self, service_id, version_number, name_key, **kwargs):
"""Updates the specified Request Settings object."""
body = self._formdata(kwargs, FastlyHealthCheck.FIELDS)
content = self._fetch("/service/%s/version/%d/request_settings/%s" % (service_id, version_number, urllib.parse.quote(name_key, safe='')), method="PUT", body=body)
return FastlyRequestSetting(self, content)
def delete_request_setting(self, service_id, version_number, name):
"""Removes the specfied Request Settings object."""
content = self._fetch("/service/%s/version/%d/request_settings/%s" % (service_id, version_number, urllib.parse.quote(name, safe='')), method="DELETE")
return self._status(content)
def list_response_objects(self, service_id, version_number):
"""Returns all Response Objects for the specified service and version."""
content = self._fetch("/service/%s/version/%d/response_object" % (service_id, version_number))
return map(lambda x: FastlyResponseObject(self, x), content)
def create_response_object(self, service_id, version_number, name, status="200", response="OK", content="", request_condition=None, cache_condition=None):
"""Creates a new Response Object."""
body = self._formdata({
"name": name,
"status": status,
"response": response,
"content": content,
"request_condition": request_condition,
"cache_condition": cache_condition,
}, FastlyResponseObject.FIELDS)
content = self._fetch("/service/%s/version/%d/response_object" % (service_id, version_number), method="POST", body=body)
return FastlyResponseObject(self, content)
def get_response_object(self, service_id, version_number, name):
"""Gets the specified Response Object."""
content = self._fetch("/service/%s/version/%d/response_object/%s" % (service_id, version_number, urllib.parse.quote(name, safe='')))
return FastlyResponseObject(self, content)
def update_response_object(self, service_id, version_number, name_key, **kwargs):
"""Updates the specified Response Object."""
body = self._formdata(kwargs, FastlyResponseObject.FIELDS)
content = self._fetch("/service/%s/version/%d/response_object/%s" % (service_id, version_number, urllib.parse.quote(name_key, safe='')), method="PUT", body=body)
return FastlyResponseObject(self, content)
def delete_response_object(self, service_id, version_number, name):
"""Deletes the specified Response Object."""
content = self._fetch("/service/%s/version/%d/response_object/%s" % (service_id, version_number, urllib.parse.quote(name, safe='')), method="DELETE")
return self._status(content)
def create_service(self, customer_id, name, publish_key=None, comment=None):
"""Create a service."""
body = self._formdata({
"customer_id": customer_id,
"name": name,
"publish_key": publish_key,
"comment": comment,
}, FastlyService.FIELDS)
content = self._fetch("/service", method="POST", body=body)
return FastlyService(self, content)
def list_services(self):
"""List Services."""
content = self._fetch("/service")
return map(lambda x: FastlyService(self, x), content)
def get_service(self, service_id):
"""Get a specific service by id."""
content = self._fetch("/service/%s" % service_id)
return FastlyService(self, content)
def get_service_details(self, service_id):
"""List detailed information on a specified service."""
content = self._fetch("/service/%s/details" % service_id)
return FastlyService(self, content)
def get_service_by_name(self, service_name):
"""Get a specific service by name."""
content = self._fetch("/service/search?name=%s" % urllib.parse.quote(service_name, safe=''))
return FastlyService(self, content)
def update_service(self, service_id, **kwargs):
"""Update a service."""
body = self._formdata(kwargs, FastlyService.FIELDS)
content = self._fetch("/service/%s" % service_id, method="PUT", body=body)
return FastlyService(self, content)
def delete_service(self, service_id):
"""Delete a service."""
content = self._fetch("/service/%s" % service_id, method="DELETE")
return self._status(content)
def list_domains_by_service(self, service_id):
"""List the domains within a service."""
content = self._fetch("/service/%s/domain" % service_id, method="GET")
return map(lambda x: FastlyDomain(self, x), content)
def purge_service(self, service_id):
"""Purge everything from a service."""
content = self._fetch("/service/%s/purge_all" % service_id, method="POST")
return self._status(content)
def purge_service_by_key(self, service_id, key):
"""Purge a particular service by a key."""
content = self._fetch("/service/%s/purge/%s" % (service_id, key), method="POST")
return self._status(content)
def create_dictionary(self, service_id, version_number, dictionary_name):
"""Create a dictionary."""
body = self._formdata({
"name": dictionary_name
}, FastlyDictionary.FIELDS)
content = self._fetch("/service/%s/version/%d/dictionary" % (service_id, version_number), method="POST", body=body)
return FastlyDictionary(self, content)
def get_dictionary(self, service_id, version_number, name):
"""Get a dictionary by name from a particular service and version."""
content = self._fetch("/service/%s/version/%d/dictionary/%s" % (service_id, version_number, urllib.parse.quote(name)))
return FastlyDictionary(self, content)
def delete_dictionary(self, service_id, version_number, name):
"""Delete a dictionary for a particular service and version."""
content = self._fetch("/service/%s/version/%d/dictionary/%s" % (service_id, version_number, urllib.parse.quote(name)), method="DELETE")
return self._status(content)
def list_dictionaries(self, service_id, version_number):
"""List the dictionaries within a service"""
content = self._fetch("/service/%s/version/%s/dictionary" % (service_id, version_number))
return map(lambda x: FastlyDictionary(self, x), content)
def get_dictionary_items(self, service_id, dictionary_id):
content = self._fetch("/service/%s/dictionary/%s/items" % (service_id, dictionary_id))
return map(lambda x: FastlyDictionaryItem(self, x), content)
def update_dictionary_items_batch(self, service_id, dictionary_id, **kwargs):
"""Batch update dictionary items."""
item_types = [
{
"dict_name": "create_items",
"fastly_op": "create"
},
{
"dict_name": "update_items",
"fastly_op": "update"
},
{
"dict_name": "delete_items",
"fastly_op": "delete"
}
]
items = []
for item_type in item_types:
for item in kwargs.get(item_type['dict_name'], []):
items.append({
"op": item_type['fastly_op'],
"item_key": item['key'],
"item_value": item['value']
})
body = json.dumps({"items": items})
content = self._fetch(
"/service/%s/dictionary/%s/items" % (service_id, dictionary_id),
method="PATCH",
body=body,
headers={
"Content-Type": "application/json"
}
)
return self._status(content)
def get_settings(self, service_id, version_number):
"""Get the settings for a particular service and version."""
content = self._fetch("/service/%s/version/%d/settings" % (service_id, version_number))
return FastlySettings(self, content)
def update_settings(self, service_id, version_number, settings={}):
"""Update the settings for a particular service and version."""
body = urllib.parse.urlencode(settings)
content = self._fetch("/service/%s/version/%d/settings" % (service_id, version_number), method="PUT", body=body)
return FastlySettings(self, content)
def get_stats(self, service_id, stat_type=FastlyStatsType.ALL):
"""Get the stats from a service."""
content = self._fetch("/service/%s/stats/%s" % (service_id, stat_type))
return content
def list_syslogs(self, service_id, version_number):
"""List all of the Syslogs for a particular service and version."""
content = self._fetch("/service/%s/version/%d/syslog" % (service_id, version_number))
return map(lambda x: FastlySyslog(self, x), content)
def create_syslog(
self,
service_id,
version_number,
name,
address,
port=514,
use_tls="0",
tls_ca_cert=None,
token=None,
_format=None,
response_condition=None):
"""Create a Syslog for a particular service and version."""
body = self._formdata({
"name": name,
"address": address,
"port": port,
"use_tls": use_tls,
"tls_ca_cert": tls_ca_cert,
"token": token,
"format": _format,
"response_condition": response_condition,
}, FastlySyslog.FIELDS)
content = self._fetch("/service/%s/version/%d/syslog" % (service_id, version_number), method="POST", body=body)
return FastlySyslog(self, content)
def get_syslog(self, service_id, version_number, name):
"""Get the Syslog for a particular service and version."""
content = self._fetch("/service/%s/version/%d/syslog/%s" % (service_id, version_number, urllib.parse.quote(name, safe='')))
return FastlySyslog(self, content)
def update_syslog(self, service_id, version_number, name_key, **kwargs):
"""Update the Syslog for a particular service and version."""
body = self._formdata(kwargs, FastlySyslog.FIELDS)
content = self._fetch("/service/%s/version/%d/syslog/%s" % (service_id, version_number, urllib.parse.quote(name_key, safe='')), method="PUT", body=body)
return FastlySyslog(self, content)
def delete_syslog(self, service_id, version_number, name):
"""Delete the Syslog for a particular service and version."""
content = self._fetch("/service/%s/version/%d/syslog/%s" % (service_id, version_number, urllib.parse.quote(name, safe='')), method="DELETE")
return self._status(content)
def change_password(self, old_password, new_password):
"""Update the user's password to a new one."""
body = self._formdata({
"old_password": old_password,
"password": <PASSWORD>,
}, ["old_password", "password"])
content = self._fetch("/current_user/password", method="POST", body=body)
return FastlyUser(self, content)
def get_current_user(self):
"""Get the logged in user."""
content = self._fetch("/current_user")
return FastlyUser(self, content)
def get_user(self, user_id):
"""Get a specific user."""
content = self._fetch("/user/%s" % user_id)
return FastlyUser(self, content)
def create_user(self, customer_id, name, login, password, role=FastlyRoles.USER, require_new_password=True):
"""Create a user."""
body = self._formdata({
"customer_id": customer_id,
"name": name,
"login": login,
"password": password,
"role": role,
"require_new_password": require_new_password,
}, FastlyUser.FIELDS)
content = self._fetch("/user", method="POST", body=body)
return FastlyUser(self, content)
def update_user(self, user_id, **kwargs):
"""Update a user."""
body = self._formdata(kwargs, FastlyUser.FIELDS)
content = self._fetch("/user/%s" % user_id, method="PUT", body=body)
return FastlyUser(self, content)
def delete_user(self, user_id):
"""Delete a user."""
content = self._fetch("/user/%s" % user_id, method="DELETE")
return self._status(content)
def request_password_reset(self, user_id):
"""Requests a password reset for the specified user."""
content = self._fetch("/user/%s/password/request_reset" % (user_id), method="POST")
return FastlyUser(self, content)
def list_vcls(self, service_id, version_number):
"""List the uploaded VCLs for a particular service and version."""
content = self._fetch("/service/%s/version/%d/vcl" % (service_id, version_number))
return map(lambda x: FastlyVCL(self, x), content)
def upload_vcl(self, service_id, version_number, name, content, main=None, comment=None):
"""Upload a VCL for a particular service and version."""
body = self._formdata({
"name": name,
"content": content,
"comment": comment,
"main": main,
}, FastlyVCL.FIELDS)
content = self._fetch("/service/%s/version/%d/vcl" % (service_id, version_number), method="POST", body=body)
return FastlyVCL(self, content)
def download_vcl(self, service_id, version_number, | |
<reponame>acrucetta/Chicago_COVI_WebApp
"""
=================
Structured Arrays
=================
Introduction
============
Structured arrays are ndarrays whose datatype is a composition of simpler
datatypes organized as a sequence of named :term:`fields <field>`. For example,
::
>>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')])
>>> x
array([('Rex', 9, 81.), ('Fido', 3, 27.)],
dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
Here ``x`` is a one-dimensional array of length two whose datatype is a
structure with three fields: 1. A string of length 10 or less named 'name', 2.
a 32-bit integer named 'age', and 3. a 32-bit float named 'weight'.
If you index ``x`` at position 1 you get a structure::
>>> x[1]
('Fido', 3, 27.0)
You can access and modify individual fields of a structured array by indexing
with the field name::
>>> x['age']
array([9, 3], dtype=int32)
>>> x['age'] = 5
>>> x
array([('Rex', 5, 81.), ('Fido', 5, 27.)],
dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
Structured datatypes are designed to be able to mimic 'structs' in the C
language, and share a similar memory layout. They are meant for interfacing with
C code and for low-level manipulation of structured buffers, for example for
interpreting binary blobs. For these purposes they support specialized features
such as subarrays, nested datatypes, and unions, and allow control over the
memory layout of the structure.
Users looking to manipulate tabular data, such as stored in csv files, may find
other pydata projects more suitable, such as xarray, pandas, or DataArray.
These provide a high-level interface for tabular data analysis and are better
optimized for that use. For instance, the C-struct-like memory layout of
structured arrays in numpy can lead to poor cache behavior in comparison.
.. _defining-structured-types:
Structured Datatypes
====================
A structured datatype can be thought of as a sequence of bytes of a certain
length (the structure's :term:`itemsize`) which is interpreted as a collection
of fields. Each field has a name, a datatype, and a byte offset within the
structure. The datatype of a field may be any numpy datatype including other
structured datatypes, and it may also be a :term:`subarray data type` which
behaves like an ndarray of a specified shape. The offsets of the fields are
arbitrary, and fields may even overlap. These offsets are usually determined
automatically by numpy, but can also be specified.
Structured Datatype Creation
----------------------------
Structured datatypes may be created using the function :func:`numpy.dtype`.
There are 4 alternative forms of specification which vary in flexibility and
conciseness. These are further documented in the
:ref:`Data Type Objects <arrays.dtypes.constructing>` reference page, and in
summary they are:
1. A list of tuples, one tuple per field
Each tuple has the form ``(fieldname, datatype, shape)`` where shape is
optional. ``fieldname`` is a string (or tuple if titles are used, see
:ref:`Field Titles <titles>` below), ``datatype`` may be any object
convertible to a datatype, and ``shape`` is a tuple of integers specifying
subarray shape.
>>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2, 2))])
dtype([('x', '<f4'), ('y', '<f4'), ('z', '<f4', (2, 2))])
If ``fieldname`` is the empty string ``''``, the field will be given a
default name of the form ``f#``, where ``#`` is the integer index of the
field, counting from 0 from the left::
>>> np.dtype([('x', 'f4'), ('', 'i4'), ('z', 'i8')])
dtype([('x', '<f4'), ('f1', '<i4'), ('z', '<i8')])
The byte offsets of the fields within the structure and the total
structure itemsize are determined automatically.
2. A string of comma-separated dtype specifications
In this shorthand notation any of the :ref:`string dtype specifications
<arrays.dtypes.constructing>` may be used in a string and separated by
commas. The itemsize and byte offsets of the fields are determined
automatically, and the field names are given the default names ``f0``,
``f1``, etc. ::
>>> np.dtype('i8, f4, S3')
dtype([('f0', '<i8'), ('f1', '<f4'), ('f2', 'S3')])
>>> np.dtype('3int8, float32, (2, 3)float64')
dtype([('f0', 'i1', (3,)), ('f1', '<f4'), ('f2', '<f8', (2, 3))])
3. A dictionary of field parameter arrays
This is the most flexible form of specification since it allows control
over the byte-offsets of the fields and the itemsize of the structure.
The dictionary has two required keys, 'names' and 'formats', and four
optional keys, 'offsets', 'itemsize', 'aligned' and 'titles'. The values
for 'names' and 'formats' should respectively be a list of field names and
a list of dtype specifications, of the same length. The optional 'offsets'
value should be a list of integer byte-offsets, one for each field within
the structure. If 'offsets' is not given the offsets are determined
automatically. The optional 'itemsize' value should be an integer
describing the total size in bytes of the dtype, which must be large
enough to contain all the fields.
::
>>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4', 'f4']})
dtype([('col1', '<i4'), ('col2', '<f4')])
>>> np.dtype({'names': ['col1', 'col2'],
... 'formats': ['i4', 'f4'],
... 'offsets': [0, 4],
... 'itemsize': 12})
dtype({'names':['col1','col2'], 'formats':['<i4','<f4'], 'offsets':[0,4], 'itemsize':12})
Offsets may be chosen such that the fields overlap, though this will mean
that assigning to one field may clobber any overlapping field's data. As
an exception, fields of :class:`numpy.object` type cannot overlap with
other fields, because of the risk of clobbering the internal object
pointer and then dereferencing it.
The optional 'aligned' value can be set to ``True`` to make the automatic
offset computation use aligned offsets (see :ref:`offsets-and-alignment`),
as if the 'align' keyword argument of :func:`numpy.dtype` had been set to
True.
The optional 'titles' value should be a list of titles of the same length
as 'names', see :ref:`Field Titles <titles>` below.
4. A dictionary of field names
The use of this form of specification is discouraged, but documented here
because older numpy code may use it. The keys of the dictionary are the
field names and the values are tuples specifying type and offset::
>>> np.dtype({'col1': ('i1', 0), 'col2': ('f4', 1)})
dtype([('col1', 'i1'), ('col2', '<f4')])
This form is discouraged because Python dictionaries do not preserve order
in Python versions before Python 3.6, and the order of the fields in a
structured dtype has meaning. :ref:`Field Titles <titles>` may be
specified by using a 3-tuple, see below.
Manipulating and Displaying Structured Datatypes
------------------------------------------------
The list of field names of a structured datatype can be found in the ``names``
attribute of the dtype object::
>>> d = np.dtype([('x', 'i8'), ('y', 'f4')])
>>> d.names
('x', 'y')
The field names may be modified by assigning to the ``names`` attribute using a
sequence of strings of the same length.
The dtype object also has a dictionary-like attribute, ``fields``, whose keys
are the field names (and :ref:`Field Titles <titles>`, see below) and whose
values are tuples containing the dtype and byte offset of each field. ::
>>> d.fields
mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)})
Both the ``names`` and ``fields`` attributes will equal ``None`` for
unstructured arrays. The recommended way to test if a dtype is structured is
with `if dt.names is not None` rather than `if dt.names`, to account for dtypes
with 0 fields.
The string representation of a structured datatype is shown in the "list of
tuples" form if possible, otherwise numpy falls back to using the more general
dictionary form.
.. _offsets-and-alignment:
Automatic Byte Offsets and Alignment
------------------------------------
Numpy uses one of two methods to automatically determine the field byte offsets
and the overall itemsize of a structured datatype, depending on whether
``align=True`` was specified as a keyword argument to :func:`numpy.dtype`.
By default (``align=False``), numpy will pack the fields together such that
each field starts at the byte offset the previous field ended, and the fields
are contiguous in memory. ::
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
>>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2'))
offsets: [0, 1, 2, 6, 7, 15]
itemsize: 17
If ``align=True`` is set, numpy will pad the structure in the same way many C
compilers would pad a C-struct. Aligned structures can give a performance
improvement in some cases, at the cost of increased datatype size. Padding
bytes are inserted between fields such that each field's byte offset will be a
multiple of that field's alignment, which is usually equal to the field's size
in bytes for simple datatypes, see :c:member:`PyArray_Descr.alignment`. The
structure will also have trailing padding added so that its itemsize is a
multiple of the largest field's alignment. ::
| |
type for y coordinates")
raise Exception("Not a valid type for y coordinates")
if len(x) == len(y):
self.x = x
self.y = y
else:
logger.error("Input vectors have not the same length")
Exception("Input vectors have not the same length")
@property
def get_contours_number(self):
""" Return the number of sub-contours
:return: ncontour: int
"""
ncontour = len(self.x)
logger.info("Number of contours: {0}".format(ncontour))
return ncontour
@property
def get_points_number(self):
"""
For each contour, return the number of points
"""
ncontour = self.get_contours_number
npoints = []
for i in range(0, ncontour):
npoints.append(len(self.x[i]))
return npoints
def write_to(self, filename):
"""Write the contour coordinates into the selected file
:param filename: name of the 'datasource' file
:type filename: str
:return:
"""
ncontour = self.get_contours_number
npoints = self.get_points_number
with open(filename, 'w') as f:
f.write(str(ncontour) + '\n')
for i in range(0, ncontour):
logger.debug("Sub-contour no. {0} has {1} points".format(i + 1, npoints[i]))
f.write(str(npoints[i]) + '\n')
for xx, yy in zip(self.x[i], self.y[i]):
line = ' '.join((str(xx), str(yy)))
f.write(line + '\n')
logger.info("Written contours into file {0}".format(filename))
def to_geojson(self, filename, varname='contours'):
"""
Write the contour to a geojson file
:param filename: path to the file to be written
:type filename: str
:param varname: name of the js variable that will represent the contours
:type varname: str
"""
geojsoncontour = {
"type": "MultiPolygon",
"coordinates": [[[[x, y] for x, y in zip(self.x[cont], self.y[cont])]] for cont in
range(0, self.get_contours_number)]
}
try:
with open(os.path.join(filename), 'w') as f:
f.write("".join(("var ", varname, " = ")))
out = json.dumps(geojsoncontour, indent=2, separators=(',', ': '))
f.write(out)
except FileNotFoundError:
logger.error("Directory {} does not exist".format(os.path.basename(filename)))
raise FileNotFoundError('Directory does not exist')
def read_from_np(self, filename):
"""Get the coordinates of the contour from an already existing contour file.
The function use numpy method 'genfromtxt' several times and is not optimised at all.
For large contour files, prefer 'read_from' function.
:parameter: filename: str
:return: lon: numpy ndarray
:return: lat: numpy ndarray
"""
# Check if the file exist
if os.path.exists(filename):
# Count number of contour and lines in the files
# %timeit shows that "linecache" is way faster than "readline" on the first line
logger.info("Reading contours from file {0}".format(filename))
ncontours = int(linecache.getline(filename, 1))
with open(filename) as f:
nlines = sum(1 for _ in f)
logger.debug("Number of contours: {0}".format(ncontours))
# Initialise lon and lat as list of lists
lon = [[]] * ncontours
lat = [[]] * ncontours
# Initialise line to read number
linenum = 2
# Loop on the contours
for n in range(0, ncontours):
# Number of points in the current contour
npoints = int(linecache.getline(filename, linenum))
nskiplines = linenum + npoints
# Load coordinates (npoints lines to be read)
coords = np.genfromtxt(filename, skip_header=linenum, skip_footer=nlines - nskiplines)
coords = coords.T
lon[n] = coords[0]
lat[n] = coords[1]
# Update line number
# (taking into account the number of points already read)
linenum = nskiplines + 1
self.x = np.array(lon)
self.y = np.array(lat)
return self
else:
logger.error("File {0} does not exist".format(filename))
raise FileNotFoundError('File does not exist')
def read_from(self, filename):
"""Get the coordinates of the contour from an already existing contour file.
The function reads the file only once and performs the conversion from lists
to ndarrays at the end of the loop.
:parameter filename: name of the 'contour' file
:type filename: str
:return: lon: numpy ndarray
:return: lat: numpy ndarray
"""
if os.path.exists(filename):
logger.info("Reading contours from file {0}".format(filename))
with open(filename, 'r') as f:
ncontour = int(f.readline().split()[0])
logger.debug("Number of contours: {0}".format(ncontour))
numpoints = []
lon, lat = [], []
for nc in range(0, ncontour):
npoints = int(f.readline().split()[0])
numpoints.append(npoints)
xx, yy = [], []
for pp in range(0, npoints):
# Read the coordinates of the sub-contour
coords = f.readline()
xx.append(float(coords.split()[0]))
yy.append(float(coords.split()[1]))
lon.append(xx)
lat.append(yy)
self.x = np.array(lon)
self.y = np.array(lat)
return self
else:
logger.error("File {0} does not exist".format(filename))
raise FileNotFoundError('File does not exist')
def add_to_plot(self, m=None, **kwargs):
"""Add the contours to the plot
:param m: basemap projection
:type m: mpl_toolkits.basemap.Basemap
"""
if m is None:
logger.debug("No projection defined")
logger.debug('Adding contours to plot')
for lon, lat in zip(self.x, self.y):
# Append first element of the array to close the contour
plt.plot(np.append(lon, lon[0]),
np.append(lat, lat[0]),
**kwargs)
else:
logger.debug("Applying projection to coordinates")
logger.debug('Adding contours to map')
for lon, lat in zip(self.x, self.y):
m.plot(np.append(lon, lon[0]),
np.append(lat, lat[0]),
latlon=True, **kwargs)
class Diva2DParameters(object):
"""Class that stores the parameter properties
Example:
========
Parameters = Diva2DParameters(CorrelationLength, iCoordChange, iSpec, iReg, xmin, ymin, dx, dy,
nx, ny, ExclusionValue, SignalToNoiseRatio, VarianceBackgroundField)
"""
def __init__(self, cl=None, icoordchange=0, ispec=0, ireg=0,
xori=None, yori=None, dx=None, dy=None, nx=None, ny=None,
valex=-999., snr=None, varbak=None):
"""Creation of the Diva 2D 'data' object using the user inputs.
:param cl: correlation length
:type cl: float
:param icoordchange: flag for the specification of the coordinate change
:type icoordchange: int
:param ispec: flag for the specification of the output field
:type ispec: int
:param ireg: flag for the specification of the reference field
:type ireg: int
:param xori: westermost location of the computation domain
:type xori: float
:param yori: southernmost location of the computation domain
:type yori: float
:param dx: spatial step in the x-direction
:type dx: float
:param dy: spatial step in the x-direction
:type dy: float
:param nx: number of steps in the x-direction
:type nx: int
:param ny: number of steps in the y-direction
:type ny: int
:param valex: exclusion (also called fill) value in the analysis and error fields
:type valex: float
:param snr: signal-to-noise ratio of the dataset
:type snr: float
:param varbak: variance of the background field
:type varbak: float
"""
self.cl = cl
self.icoordchange = icoordchange
self.ispec = ispec
self.ireg = ireg
self.xori = xori
self.yori = yori
self.dx = dx
self.dy = dy
self.nx = nx
self.ny = ny
self.valex = valex
self.snr = snr
self.varbak = varbak
# Compute domain limits for later use
if self.nx and self.dx and self.xori:
self.xend = self.xori + (self.nx - 1) * self.dx
else:
self.xend = None
if self.ny and self.dy and self.yori:
self.yend = self.yori + (self.ny - 1) * self.dy
else:
self.yend = None
logger.info("Creating Diva 2D parameter object")
'''
class Icoord(object):
def __init__(self, icoordchange=0):
self.icoordchange = icoordchange
print(icoordchange)
def describe(self):
if self.icoordchange == None:
logger.warning("icoordchange not defined")
self.description = ' '
else:
if self.icoordchange == 0:
self.description = 'no change is necessary ; same coordinates as data'
elif self.icoordchange == 1:
self.description = 'convertion from degrees to kilometers'
elif self.icoordchange == 2:
self.description = 'conversion from degrees to kilometers using a cosine projection'
elif self.icoordchange < 0:
self.description = 'apply scale factor xscale before doing starting the calculation'
else:
self.description = 'unknown value'
'''
def describe(self):
"""Provide a description of the parameter values stored in the parameter file
If the parameters were not initialised, returns None for each value.
"""
print("Correlation length: {0}".format(self.cl))
print("icoordchange: {0}".format(self.icoordchange))
print("ispec: {0}".format(self.ispec))
print("ireg: {0}".format(self.ireg))
print("Domain: x-axis: from {0} to {1} with {2} steps of {3}".format(self.xori, self.xend,
self.nx, self.dx))
print("Domain: y-axis: from {0} to {1} with {2} steps of {3}".format(self.yori, self.yend,
self.ny, self.dy))
print("Exclusion value: {0}".format(self.valex))
print("Signal-to-noise ratio: {0}".format(self.snr))
print("Variance of the background field: {0}".format(self.varbak))
def write_to(self, filename):
"""Create a DIVA 2D parameter file given the main analysis parameters
defined as floats or integers.
:param filename: name of the 'parameter' file
:type filename: str
:return
"""
paramstring = ("# Correlation Length lc \n{0} \n"
"# icoordchange \n{1} \n"
"# ispec \n{2} \n"
"# ireg \n{3} \n"
"# xori \n{4} \n"
"# yori \n{5} \n"
"# dx \n{6} \n"
"# dy \n{7} \n"
"# nx \n{8} \n"
"# ny \n{9} \n"
"# valex \n{10} \n"
"# snr \n{11} \n"
"# varbak \n{12}").format(self.cl, self.icoordchange, self.ispec,
self.ireg, self.xori, self.yori, self.dx, self.dy,
self.nx, self.ny, self.valex, self.snr, self.varbak,
)
with open(filename, 'w') as f:
f.write(paramstring)
logger.info("Written parameters into file {0}".format(filename))
def read_from(self, filename):
"""Read the information contained in a DIVA parameter file
and extract the analysis parameters
:param filename: name of the 'parameter' file
:type filename: str
"""
if os.path.exists(filename):
logger.info("Reading parameters from file {0}".format(filename))
cl, icoord, ispec, ireg, xori, yori, dx, dy, nx,\
| |
<filename>rpbasicdesign/Designer.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Methods for conversion"""
__author__ = '<NAME>'
__license__ = 'MIT'
__date__ = '2020.10.21'
import itertools
import logging
import os
import pprint
import random
import sys
from csv import DictReader, DictWriter
from copy import deepcopy
from math import factorial
from pathlib import Path
from typing import Dict
from operator import getitem
from rptools.rplibs import rpSBML, rpPathway
from rpbasicdesign import DNABOT_PART_HEADER
from rpbasicdesign.Construct import Construct
from rpbasicdesign.Part import Part
# CONSTANTS
MAX_ENZ_PER_RXN = 1
MAX_RXN_PER_CONSTRUCT = 3
def _gen_plate_coords(nb_row=8, nb_col=12):
"""Generator that generates the label coordinates for plate
:param nb_row: number of rows in the plate (max: 26)
:type nb_row: int
:param nb_col: number of columns in the plate (max: 99)
:type nb_col: int
:returns: well coordinate
:rtype: str
"""
assert nb_row <= 26 and nb_col <= 99
row_names = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
col_names = [str(_) for _ in range(1, 100)]
for icol in range(nb_col):
for jrow in range(nb_row):
yield ''.join([row_names[jrow], col_names[icol]])
class Designer:
"""Convert rpSBML enzyme info in to BASIC construct.
WARNING: promoters and RBSs are randomly chosen from amongst all available
:param polycistronic: True if polycistronic design should be prepared
:type polycistronic: bool
:param verbose: True to increase log verbosity
:type verbose: bool
:param lms_id: part ID that corresponds to the LMS methylated linker
:type lms_id: str
:param lmp_id: part ID that corresponds to the LMP methylated linker
:type lmp_id: str
:param backbone_id: part ID that corresponds to the backbone
:type backbone_id: str
:param parts_files: files listing available linkers and user parts (backbone, promoters, ...)for constructs
:type parts_files: list of str
:return: Designer object
:rtype: <Designer>
"""
def __init__(self,
# polycistronic=True,
verbose=False,
lms_id='LMS', lmp_id='LMP',
backbone_id='BASIC_SEVA_37_CmR-p15A.1',
parts_files=None,
max_enz_per_rxn=MAX_ENZ_PER_RXN,
max_rxn_per_construct=MAX_RXN_PER_CONSTRUCT
):
# Default settings
self._DATA_PATH = Path(__file__).resolve().parent / 'data'
self._DEFAULT_DATA = [
self._DATA_PATH / 'biolegio_parts.csv',
self._DATA_PATH / 'user_parts.csv'
]
# File to copy-paste, for the user convenience
self._BIOLEGIO_PLATE_FILE = self._DATA_PATH / 'biolegio_plate.csv'
self._verbose = verbose
self._polycistronic_design = True
self._lms_id = lms_id
self._lmp_id = lmp_id
self._backbone_id = backbone_id
self._max_enz_per_rxn = max_enz_per_rxn
self._max_rxn_per_construct = max_rxn_per_construct
# Data files
if parts_files is None:
self._parts_files = self._DEFAULT_DATA
else:
self._parts_files = parts_files
# Storage
self._parts = {}
self.constructs = []
# Get resources
self._get_linkers_and_parts_from_file()
self._check_linkers_and_parts()
def _get_backbone_part(self) -> Part:
return self._parts[self._backbone_id]
def _get_lms_part(self) -> Part:
return self._parts[self._lms_id]
def _get_lmp_part(self) -> Part:
return self._parts[self._lmp_id]
def _get_neutral_linker_parts(self) -> list:
parts = []
for part in self._parts.values():
if part.linker_class == 'neutral linker':
parts.append(part)
return parts
def _get_rbs_ids(self) -> list:
"""Return RBS part IDs"""
part_ids = []
for part in self._parts.values():
if part.biological_role == 'rbs':
part_ids.append(part.id)
return sorted(part_ids)
def _get_promoter_ids(self) -> list:
"""Return promoter parts"""
part_ids = []
for part in self._parts.values():
if part.biological_role == 'promoter':
part_ids.append(part.id)
return sorted(part_ids)
def _get_cds_ids(self) -> list:
"""Return CDS parts"""
part_ids = []
for part in self._parts.values():
if part.biological_role == 'cds':
part_ids.append(part.id)
return sorted(part_ids)
def _get_cds_ids_at_step(self, cds_step: str) -> list:
"""Return the CDS IDs for the specified step"""
part_ids = []
for part in self._parts.values():
if part.biological_role == 'cds':
if cds_step in part.cds_steps:
part_ids.append(part.id)
return sorted(part_ids)
def _get_parts_from_ids(self, part_ids: list) -> list:
parts = []
for part_id in part_ids:
parts.append(self._get_part_from_id(part_id))
return parts
def _part_exists(self, part_id: str) -> bool:
if part_id in self._parts:
return True
return False
def _get_part_from_id(self, part_id: str) -> Part:
return self._parts.get(part_id, None)
def _get_rbs_ortho_id(self, part_id: str) -> str:
assert len(part_id.split('-')) == 2
return part_id.split('-')[0]
def _get_rbs_seq_id(self, part_id: str) -> dict:
assert len(part_id.split('-')) == 2
return part_id.split('-')[1]
def _gen_rbs_id(self, rbs_ortho_id: str, rbs_seq_id: str) -> str:
return f'{rbs_ortho_id}-{rbs_seq_id}'
def _get_linkers_and_parts_from_file(self):
"""Collect linkers and parts from the parts files."""
for parts_file in self._parts_files:
with open(parts_file) as ifh:
for item in DictReader(ifh):
if item['id'].startswith('#'): # Skip if commented
continue
elif item['id'] in self._parts:
logging.warning(f'Warning, part {item["id"]} duplicated, only the last definition kept.')
elif item['type'].lower() in ['neutral linker', 'methylated linker', 'peptide fusion linker']:
self._parts[item['id']] = Part(id=item['id'], basic_role='linker', biological_role='misc',
linker_class=item['type'].lower(), seq=item['sequence'])
elif item['type'].lower() == 'rbs linker':
self._parts[item['id']] = Part(id=item['id'], basic_role='linker', biological_role='rbs',
linker_class=item['type'].lower(), seq=item['sequence'])
elif item['type'].lower() == 'backbone':
self._parts[item['id']] = Part(id=item['id'], basic_role='backbone', biological_role='ori',
seq=item['sequence'])
elif item['type'].lower() == 'constitutive promoter':
self._parts[item['id']] = Part(id=item['id'], basic_role='part', biological_role='promoter',
seq=item['sequence'])
else:
logging.warning(f'Part "{item["id"]}" not imported because it does not fall any supported part '
f'type.')
def _check_linkers_and_parts(self):
"""Check that a minimal set of ressources have been stored."""
_EXPECTED_ROLES = {'linker': 0, 'part': 0}
for item in self._parts.values():
if item.basic_role in _EXPECTED_ROLES.keys():
_EXPECTED_ROLES[item.basic_role] += 1
for role, count in _EXPECTED_ROLES.items():
if count == 0:
raise BaseException(f'No linker or part of type "{role}" provided. Does any have been provided? Exit.')
def get_selenzyme_annotation(
self,
rpsbml_path: str
) -> Dict:
rpsbml = rpSBML(str(rpsbml_path))
pathway = rpPathway.from_rpSBML(rpsbml=rpsbml)
for idx_rxn, rxn_id in enumerate(pathway.get_reactions_ids()):
# Stop if too many reactions
if idx_rxn > self._max_rxn_per_construct:
raise ValueError(
f'Number of reactions exceed the defined allowed number of ',
f'enzymes : {self._max_rxn_per_construct}. Execution cancelled.')
#
rxn = pathway.get_reaction(rxn_id)
enzymes = rxn.get_selenzy()
# Stop if no enzyme available
if len(enzymes) == 0:
raise ValueError(
f'Missing UniProt IDs from selenzyme annotation for '
f'for reaction {rxn_id}. Execution cancelled.')
# Collect enzyme ordered by score, the first is the best
for idx_enz, enz in enumerate(
sorted(enzymes.items(), key=lambda x: getitem(x[1], 'score'), reverse=True),
start=1):
# Skip worst enzyme if too many
if idx_enz > self._max_enz_per_rxn:
logging.warning(
f'Max number of enzyme per reaction reached ({self._max_enz_per_rxn}) '
f'for reaction {rxn_id}. Only the best one(s) are kept.')
break
uniprot_id, _ = enz
if uniprot_id in self._parts:
self._parts[uniprot_id].cds_steps.append(rxn_id)
else:
self._parts[uniprot_id] = Part(id=uniprot_id, basic_role='part',
biological_role='cds', cds_steps=[rxn_id],
seq='atgc')
# def _read_MIRIAM_annotation(self, annot) -> dict:
# """Return the MIRIAM annotations of species.
# Notice: an empty dict is return if the parsing failed.
# :param annot: SBML annotation block
# :type annot: <libsbml.XMLNode>
# :return: annotations as a dictionary
# :rtype dict
# """
# try:
# to_keep = {}
# bag = annot.getChild('RDF').getChild('Description').getChild('is').getChild('Bag')
# for i in range(bag.getNumChildren()):
# str_annot = bag.getChild(i).getAttrValue(0)
# if str_annot == '':
# logging.warning('This contains no attributes: ' + str(bag.getChild(i).toXMLString()))
# continue
# dbid = str_annot.split('/')[-2].split('.')[0]
# if len(str_annot.split('/')[-1].split(':')) == 2:
# cid = str_annot.split('/')[-1].split(':')[1]
# else:
# cid = str_annot.split('/')[-1]
# if dbid not in to_keep:
# to_keep[dbid] = []
# to_keep[dbid].append(cid)
# return to_keep
# except AttributeError:
# return {}
# def enzyme_from_rpsbml_deprecated(self, rpsbml_file: str):
# """Extract enzyme from rpSBML annotation
# WARNING: the rpSBML file is expected to follow the specific schema of annotations used for rpSBMLs
# :param rpsbml_file: path to rpSBML from which enzyme will be extracted.
# :type rpsbml_file: str
# """
# TO_SKIP_RXN_IDS = ['rxn_target']
# reader = SBMLReader()
# document = reader.readSBML(rpsbml_file)
# model = document.getModel()
# for idx_rxn, reaction in enumerate(model.getListOfReactions(), start=1):
# if reaction.id in TO_SKIP_RXN_IDS:
# logging.info(f'Reaction `{reaction.id}` skipped when extracting UniProt IDs. See TO_SKIP_RXN_IDS.')
# elif idx_rxn > self._max_rxn_per_construct:
# logging.warning(f'Number of reactions exceed the defined allowed number of enzymes : {self._MAX_RXN}. Reaction skipped.')
# else:
# annot = reaction.getAnnotation()
# if 'uniprot' not in self._read_MIRIAM_annotation(annot):
# raise KeyError(f'Missing UniProt ID for reaction {reaction.id}. Execution cancelled.')
# # The list of IDs is traversed in the reverse order
# # - (i) best IDs are expected to be the last ones
# # - (ii) allow to only keep topx enzymes
# for idx_uid, uniprot_id in enumerate(reversed(self._read_MIRIAM_annotation(annot)['uniprot']), start=1):
# if idx_uid > self._max_enz_per_rxn:
# logging.warning(
# f'Max number of enzyme per reaction reached ({self._max_enz_per_rxn}) for reaction {reaction.id}. '
# 'Other enzyme are ignored for this reaction. Passing to the next reaction.')
# break
# if uniprot_id in self._parts:
# self._parts[uniprot_id].cds_steps.append(reaction.id)
# else:
# self._parts[uniprot_id] = Part(id=uniprot_id, basic_role='part',
# biological_role='cds', cds_steps=[reaction.id],
# seq='atgc')
def combine(self, sample_size: int, random_seed: int =42, cds_permutation: bool =True) -> int:
"""Generate random constructs
NOTICE:
- special attention is made to prevent combination that would contain the same promoter, rbs and CDS.
- special attention is made to prevent reusing the same RBS suffix in a given construct, to this end RBS
linker IDs are expected to be in the form AAA-BBB with "AAA" being the linker suffix ID.
- randomness is reset at the begining of each execution by `random.seed(random_seed)`
:param sample_size: expected number of distinct constructs
:type sample_size: int
| |
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2008, <NAME> <<EMAIL>>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Legacy hooks for simulation :py:class:`solvcon.case_legacy.Case`. Two
categories of hooks are defined here: (i) base hooks for subclassing and (ii)
generic hooks which can be readily installed.
"""
import os
import time
import math
import numpy as np
from . import rpc
from . import domain
from . import anchor
from .io import vtk as scvtk
from .io import vtkxml
class Hook(object):
"""
Organizer class for hooking subroutines for BaseCase.
@ivar cse: Case object.
@itype cse: BaseCase
@ivar info: information output function.
@itype info: callable
@ivar psteps: the interval number of steps between printing.
@itype psteps: int
@ivar kws: excessive keywords.
@itype kws: dict
"""
def __init__(self, cse, **kw):
"""
@param cse: Case object.
@type cse: BaseCase
"""
from . import case # avoid cyclic importation.
assert isinstance(cse, (case.BaseCase, case.MeshCase))
self.cse = cse
self.info = cse.info
self.psteps = kw.pop('psteps', None)
self.ankcls = kw.pop('ankcls', None)
# save excessive keywords.
self.kws = dict(kw)
super(Hook, self).__init__()
def _makedir(self, dirname, verbose=False):
"""
Make new directory if it does not exist in prior.
@param dirname: name of directory to be created.
@type dirname: str
@keyword verbose: flag if print out creation message.
@type verbose: bool
"""
if not os.path.exists(dirname):
os.makedirs(dirname)
if verbose:
self.info('Created %s' % dirname)
def _depend(self, deplst, verbose=False, stop_on_false=True):
"""
Check for dependency to another hook.
@param deplst: list of depended hook classes.
@type deplst: list
@keyword verbose: flag print message.
@type verbose: bool
@keyword stop_on_false: flag stop on false.
@type stop_on_false: bool
@return: dependency met or not.
@rtype: bool
"""
hooks = self.cse.runhooks
info = self.info
# check.
metlst = []
msglst = []
for ahook in deplst:
metlst.append(False)
for obj in hooks:
if isinstance(obj, ahook):
metlst[-1] = True
break
if not metlst[-1]:
msglst.append("%s should be enabled for %s." % (
ahook.__name__, self.__class__.__name__))
if verbose and msglst:
info('\n'.join(msglst)+'\n')
if stop_on_false and msglst:
raise RuntimeError('\n'.join(msglst))
# return.
for met in metlst:
if not met:
return False
return True
@staticmethod
def _deliver_anchor(target, ankcls, ankkw):
"""
Provide the information to instantiate anchor object for a solver. The
target object can be a real solver object or a shadow associated to a
remote worker object with attached muscle of solver object.
@param target: the solver or shadow object.
@type target: solvcon.solver.Solver or solvcon.rpc.Shadow
@param ankcls: type of the anchor to instantiate.
@type ankcls: type
@param ankkw: keywords to instantiate anchor object.
@type ankkw: dict
@return: nothing
"""
if isinstance(target, rpc.Shadow):
target.drop_anchor(ankcls, ankkw)
else:
target.runanchors.append(ankcls, **ankkw)
def drop_anchor(self, svr):
"""
Drop the anchor(s) to the solver object.
@param svr: the solver object on which the anchor(s) is dropped.
@type svr: solvon.solver.BaseSolver
@return: nothing
"""
if self.ankcls:
self._deliver_anchor(svr, self.ankcls, self.kws)
def preloop(self):
"""
Things to do before the time-marching loop.
"""
pass
def premarch(self):
"""
Things to do before the time march for a specific time step.
"""
pass
def postmarch(self):
"""
Things to do after the time march for a specific time step.
"""
pass
def postloop(self):
"""
Things to do after the time-marching loop.
"""
pass
################################################################################
# Fundamental hooks.
################################################################################
class ProgressHook(Hook):
"""
Print simulation progess.
@ivar linewidth: the maximal width for progress symbol. 50 is upper limit.
@itype linewidth: int
"""
def __init__(self, cse, **kw):
self.linewidth = kw.pop('linewidth', 50)
super(ProgressHook, self).__init__(cse, **kw)
def preloop(self):
istep = self.cse.execution.step_current
nsteps = self.cse.execution.steps_run
info = self.info
info("Steps %d/%d\n" % (istep, nsteps))
def postmarch(self):
from datetime import timedelta
istep = self.cse.execution.step_current
nsteps = self.cse.execution.steps_run
tstart = self.cse.log.time['loop_march'][0]
psteps = self.psteps
linewidth = self.linewidth
info = self.info
# calculate estimated remaining time.
tcurr = time.time()
tleft = (tcurr-tstart) * ((float(nsteps)-float(istep))/float(istep))
# output information.
if istep%psteps == 0:
info("#")
if istep > 0 and istep%(psteps*linewidth) == 0:
info("\nStep %d/%d, time elapsed: %s remaining: %s\n" % (
istep, nsteps,
str(timedelta(seconds=int(tcurr-tstart))),
str(timedelta(seconds=int(tleft))),
))
elif istep == nsteps:
info("\nStep %d/%d done\n" % (istep, nsteps))
################################################################################
# Hooks for BlockCase.
################################################################################
class BlockHook(Hook):
"""
Base type for hooks needing a BlockCase.
"""
def __init__(self, cse, **kw):
from . import case # avoid cyclic importation.
assert isinstance(cse, (case.BlockCase, case.MeshCase))
super(BlockHook, self).__init__(cse, **kw)
@property
def blk(self):
return self.cse.solver.domainobj.blk
def _collect_interior(self, key, tovar=False, inder=False,
consider_ghost=True):
"""
@param key: the name of the array to collect in a solver object.
@type key: str
@keyword tovar: flag to store collect data to case var dict.
@type tovar: bool
@keyword inder: the array is for derived data.
@type inder: bool
@keyword consider_ghost: treat the array with the consideration of
ghost cells. Default is True.
@type consider_ghost: bool
@return: the interior array hold by the solver.
@rtype: numpy.ndarray
"""
cse = self.cse
ncell = self.blk.ncell
ngstcell = self.blk.ngstcell
if cse.is_parallel:
dom = self.cse.solver.domainobj
# collect arrays from solvers.
dealer = self.cse.solver.dealer
arrs = list()
for iblk in range(dom.nblk):
dealer[iblk].cmd.pull(key, inder=inder, with_worker=True)
arr = dealer[iblk].recv()
arrs.append(arr)
# create global array.
shape = [it for it in arrs[0].shape]
shape[0] = ncell
arrg = np.empty(shape, dtype=arrs[0].dtype)
# set global array.
clmaps = dom.mappers[2]
for iblk in range(dom.nblk):
slctg = (clmaps[:,1] == iblk)
slctl = clmaps[slctg,0]
if consider_ghost:
slctl += dom.shapes[iblk,6]
arrg[slctg] = arrs[iblk][slctl]
else:
if consider_ghost:
start = ngstcell
else:
start = 0
if inder:
arrg = cse.solver.solverobj.der[key][start:].copy()
else:
arrg = getattr(cse.solver.solverobj, key)[start:].copy()
if tovar:
self.cse.execution.var[key] = arrg
return arrg
def _spread_interior(self, arrg, key, consider_ghost=True):
"""
@param arrg: the global array to be spreaded.
@type arrg: numpy.ndarray
@param key: the name of the array to collect in a solver object.
@type key: str
@keyword consider_ghost: treat the arrays with the consideration of
ghost cells. Default is True.
@type consider_ghost: bool
@return: the interior array hold by the solver.
@rtype: numpy.ndarray
"""
cse = self.cse
ncell = self.blk.ncell
ngstcell = self.blk.ngstcell
if cse.is_parallel:
dom = self.cse.solver.domainobj
dealer = self.cse.solver.dealer
clmaps = dom.mappers[2]
for iblk in range(len(dom)):
blk = dom[iblk]
# create subarray.
shape = [it for it in arrg.shape]
if consider_ghost:
shape[0] = blk.ngstcell+blk.ncell
else:
shape[0] = blk.ncell
arr = np.empty(shape, dtype=arrg.dtype)
# calculate selectors.
slctg = (clmaps[:,1] == iblk)
slctl = clmaps[slctg,0]
if consider_ghost:
slctl += blk.ngstcell
# push data to remote solver.
arr[slctl] = arrg[slctg]
dealer[iblk].cmd.push(arr, key, start=blk.ngstcell)
else:
if consider_ghost:
start = ngstcell
else:
start = 0
getattr(cse.solver.solverobj, key)[start:] = arrg[:]
class BlockInfoHook(BlockHook):
def __init__(self, cse, **kw):
"""
If keyword psteps is None, postmarch method will not output performance
information.
"""
self.show_bclist = kw.pop('show_bclist', False)
self.perffn = kw.pop('perffn', None)
super(BlockInfoHook, self).__init__(cse, **kw)
def preloop(self):
blk = self.blk
self.info("Block information:\n %s\n" % str(blk))
if self.show_bclist:
for bc in blk.bclist:
self.info(" %s\n" % bc)
def _show_performance(self):
"""
Show and store performance information.
"""
ncell = self.blk.ncell
time = self.cse.log.time['solver_march']
step_init = self.cse.execution.step_init
step_current = self.cse.execution.step_current
neq = self.cse.execution.neq
| |
"fr_FR": "Colonie libre",
"it_IT": "Colonia libera",
"ja_JP": "自由植民地",
"ko_KR": "자유로운 거주지",
"pl_PL": "Wolna kolonia",
"pt_BR": "Colônia Livre",
"ru_RU": "Бесплатная колония"
},
"SCENARIO_VICTORIA_ABILITY": {
"de_DE": "<NAME>",
"es_ES": "Fiebre del oro victoriana",
"fr_FR": "Ruée vers l'or en Victoria",
"it_IT": "Corsa all'oro vittoriana",
"ja_JP": "ヴィクトリア州のゴールドラッシュ",
"ko_KR": "빅토리아의 골드러시",
"pl_PL": "Wiktoriańska gorączka złota",
"pt_BR": "Corrida do Ouro de Victoria",
"ru_RU": "\"Золотая лихорадка\" Виктории"
},
"SCENARIO_WESTERN_AUSTRALIA_ABILITY": {
"de_DE": "Goldenes Outback",
"es_ES": "Outback de oro",
"fr_FR": "Outback doré",
"it_IT": "Outback dorato",
"ja_JP": "ゴールデンアウトバック",
"ko_KR": "황금빛 아웃백",
"pl_PL": "Złote pustkowie",
"pt_BR": "Outback Dourado",
"ru_RU": "Золотые копи"
},
"SEONDEOK": {
"de_DE": "Seondeok",
"es_ES": "Seondeok",
"fr_FR": "Seondeok",
"it_IT": "Seondeok",
"ja_JP": "ソンドク",
"ko_KR": "선덕",
"pl_PL": "Seondeok",
"pt_BR": "Seondeok",
"ru_RU": "Сондок"
},
"SHAKA": {
"de_DE": "Shaka",
"es_ES": "Shaka",
"fr_FR": "Chaka",
"it_IT": "Shaka",
"ja_JP": "シャカ",
"ko_KR": "샤카",
"pl_PL": "Czaka",
"pt_BR": "Shaka",
"ru_RU": "Чака"
},
"SIMON_BOLIVAR": {
"de_DE": "<NAME>",
"es_ES": "<NAME>",
"fr_FR": "<NAME>",
"it_IT": "<NAME>",
"ja_JP": "シモン・ボリバル",
"ko_KR": "시몬 볼리바르",
"pl_PL": "<NAME>",
"pt_BR": "<NAME>",
"ru_RU": "<NAME>"
},
"SULEIMAN": {
"de_DE": "Süleyman",
"es_ES": "Solimán",
"fr_FR": "Soliman Ier",
"it_IT": "Solimano",
"ja_JP": "スレイマン1世",
"ko_KR": "술레이만",
"pl_PL": "Sulejman",
"pt_BR": "Solimão",
"ru_RU": "Сулейман"
},
"TAMAR": {
"de_DE": "Tamar",
"es_ES": "Tamara",
"fr_FR": "Tamar",
"it_IT": "Tamara",
"ja_JP": "タマル",
"ko_KR": "타마르",
"pl_PL": "Tamara",
"pt_BR": "Tamara",
"ru_RU": "Тамара"
},
"TOMYRIS": {
"de_DE": "Tomyris",
"es_ES": "Tomiris",
"fr_FR": "Tomyris",
"it_IT": "Tomiri",
"ja_JP": "トミュリス",
"ko_KR": "토미리스",
"pl_PL": "Tomyris",
"pt_BR": "Tômiris",
"ru_RU": "Томирис"
},
"TRAJAN": {
"de_DE": "Trajan",
"es_ES": "Trajano",
"fr_FR": "Trajan",
"it_IT": "Traiano",
"ja_JP": "トラヤヌス",
"ko_KR": "트라야누스",
"pl_PL": "Trajan",
"pt_BR": "Trajano",
"ru_RU": "Траян"
},
"T_ROOSEVELT": {
"de_DE": "<NAME>",
"es_ES": "Teddy Roosevelt",
"fr_FR": "Theodore Roosevelt",
"it_IT": "<NAME>",
"ja_JP": "テディ・ルーズベルト",
"ko_KR": "테오도어 루즈벨트",
"pl_PL": "Teddy Roosevelt",
"pt_BR": "Teddy Roosevelt",
"ru_RU": "Теодор Рузвельт"
},
"T_ROOSEVELT_ORIGINAL": {
"de_DE": "<NAME> (Bull Moose)",
"es_ES": "Teddy Roosevelt (Alce)",
"fr_FR": "Theodore Roosevelt (Élan)",
"it_IT": "Teddy Roosevelt (l'Alce)",
"ja_JP": "テディ・ルーズベルト (ブルムース)",
"ko_KR": "테오도어 루즈벨트(불 무스)",
"pl_PL": "Teddy Roosevelt (Łoś)",
"pt_BR": "Teddy Roosevelt (Alce)",
"ru_RU": "Теодор Рузвельт (прогрессивист)"
},
"T_ROOSEVELT_ROUGHRIDER": {
"de_DE": "<NAME> (Rough Rider)",
"es_ES": "<NAME> (Jinete duro)",
"fr_FR": "Theodore Roosevelt (Rough Rider)",
"it_IT": "T<NAME> (Rough Rider)",
"ja_JP": "テディ・ルーズベルト (ラフライダー)",
"ko_KR": "테오도어 루즈벨트(의용 기병대)",
"pl_PL": "Teddy Roosevelt (Rough Rider)",
"pt_BR": "Teddy Roosevelt (Rough Rider)",
"ru_RU": "Теодор Рузвельт (Мужественный всадник)"
},
"VICTORIA": {
"de_DE": "Victoria",
"es_ES": "Victoria",
"fr_FR": "Victoria",
"it_IT": "Vittoria",
"ja_JP": "ヴィクトリア",
"ko_KR": "빅토리아",
"pl_PL": "Wiktoria",
"pt_BR": "Vitória",
"ru_RU": "Виктория"
},
"WARMACHINE_SCENARIO_BELGIUM": {
"de_DE": "<NAME>.",
"es_ES": "<NAME>",
"fr_FR": "Albert Ier",
"it_IT": "re <NAME>",
"ja_JP": "アルベール1世",
"ko_KR": "앨버트 1세",
"pl_PL": "Albert I",
"pt_BR": "<NAME>",
"ru_RU": "<NAME>"
},
"WARMACHINE_SCENARIO_BRITAIN": {
"de_DE": "Britisches Expeditionskorps",
"es_ES": "Fuerza Expedicionaria Británica",
"fr_FR": "Corps expéditionnaire britannique",
"it_IT": "British Expeditionary Force",
"ja_JP": "BEF",
"ko_KR": "영국 원정군",
"pl_PL": "Brytyjski Korpus Ekspedycyjny",
"pt_BR": "Força Expd Britânica",
"ru_RU": "БЭС"
},
"WARMACHINE_SCENARIO_FRANCE": {
"de_DE": "Französisches Kommando",
"es_ES": "Alto mando francés",
"fr_FR": "État-major français",
"it_IT": "Alto comando francese",
"ja_JP": "フランス軍総司令部",
"ko_KR": "프랑스 최고사령부",
"pl_PL": "Najwyższe Dowództwo Francuskie",
"pt_BR": "Alto Comando Francês",
"ru_RU": "Командование Франции"
},
"WARMACHINE_SCENARIO_GERMANY": {
"de_DE": "Deutscher Generalstab",
"es_ES": "Estado Mayor alemán",
"fr_FR": "État-major allemand",
"it_IT": "Comando militare tedesco",
"ja_JP": "ドイツ参謀本部",
"ko_KR": "독일 작전 참모",
"pl_PL": "Niemiecki Sztab Generalny",
"pt_BR": "Estado-Maior Alemão",
"ru_RU": "Германский генштаб"
},
"WARMACHINE_SCENARIO_LUXEMBOURG": {
"de_DE": "Luxemburg",
"es_ES": "Luxemburgo",
"fr_FR": "Luxembourg",
"it_IT": "Lussemburgo",
"ja_JP": "ルクセンブルク",
"ko_KR": "룩셈부르크",
"pl_PL": "Luksemburg",
"pt_BR": "Luxemburgo",
"ru_RU": "Люксембург"
},
"WILHELMINA": {
"de_DE": "Wilhelmina",
"es_ES": "Guillermina",
"fr_FR": "Wilhelmine",
"it_IT": "Guglielmina",
"ja_JP": "ウィルヘルミナ",
"ko_KR": "빌헬미나",
"pl_PL": "Wilhelmina",
"pt_BR": "Guilhermina",
"ru_RU": "Вильгельмина"
}
}
CIV_NAMES = {
"AKKAD": {
"de_DE": "Akkad",
"es_ES": "Acad",
"fr_FR": "Akkad",
"it_IT": "Akkad",
"ja_JP": "アッカド",
"ko_KR": "아카드",
"pl_PL": "Akad",
"pt_BR": "Acádia",
"ru_RU": "Аккад"
},
"AMERICA": {
"de_DE": "Amerika",
"es_ES": "Estados Unidos",
"fr_FR": "Amérique",
"it_IT": "America",
"ja_JP": "アメリカ",
"ko_KR": "미국",
"pl_PL": "Ameryka",
"pt_BR": "América",
"ru_RU": "Америка"
},
"AMSTERDAM": {
"de_DE": "Amsterdam",
"es_ES": "Ámsterdam",
"fr_FR": "Amsterdam",
"it_IT": "Amsterdam",
"ja_JP": "アムステルダム",
"ko_KR": "암스테르담",
"pl_PL": "Amsterdam",
"pt_BR": "Amsterdã",
"ru_RU": "Амстердам"
},
"ANTANANARIVO": {
"de_DE": "Antananarivo",
"es_ES": "Antananarivo",
"fr_FR": "Antananarivo",
"it_IT": "Antananarivo",
"ja_JP": "アンタナナリボ",
"ko_KR": "안타나나리보",
"pl_PL": "Antananarywa",
"pt_BR": "Antananarivo",
"ru_RU": "Антананариву"
},
"ANTIOCH": {
"de_DE": "Venedig",
"es_ES": "Venecia",
"fr_FR": "Venise",
"it_IT": "Venezia",
"ja_JP": "ヴェネツィア",
"ko_KR": "베네치아",
"pl_PL": "Wenecja",
"pt_BR": "Veneza",
"ru_RU": "Венеция"
},
"ARABIA": {
"de_DE": "Arabien",
"es_ES": "Arabia",
"fr_FR": "Arabie",
"it_IT": "Arabia",
"ja_JP": "アラビア",
"ko_KR": "아라비아",
"pl_PL": "Arabia",
"pt_BR": "Arábia",
"ru_RU": "Аравия"
},
"ARMAGH": {
"de_DE": "Armagh",
"es_ES": "Armagh",
"fr_FR": "Armagh",
"it_IT": "Armagh",
"ja_JP": "アーマー",
"ko_KR": "아마",
"pl_PL": "Armagh",
"pt_BR": "Armagh",
"ru_RU": "Арма"
},
"AUCKLAND": {
"de_DE": "Auckland",
"es_ES": "Auckland",
"fr_FR": "Auckland",
"it_IT": "Auckland",
"ja_JP": "オークランド",
"ko_KR": "오클랜드",
"pl_PL": "Auckland",
"pt_BR": "Auckland",
"ru_RU": "Окленд"
},
"AUSTRALIA": {
"de_DE": "Australien",
"es_ES": "Australia",
"fr_FR": "Australie",
"it_IT": "Australia",
"ja_JP": "オーストラリア",
"ko_KR": "호주",
"pl_PL": "Australia",
"pt_BR": "Austrália",
"ru_RU": "Австралия"
},
"AUSTRALIA_SCENARIO_QUEENSLAND": {
"de_DE": "Queensland",
"es_ES": "Queensland",
"fr_FR": "Queensland",
"it_IT": "Queensland",
"ja_JP": "クイーンズランド",
"ko_KR": "퀸즐랜드",
"pl_PL": "Queensland",
"pt_BR": "Queensland",
"ru_RU": "Квинсленд"
},
"AUSTRALIA_SCENARIO_SOUTH_AUSTRALIA": {
"de_DE": "South Australia",
"es_ES": "Australia del Sur",
"fr_FR": "Australie-Méridionale",
"it_IT": "Australia Meridionale",
"ja_JP": "南オーストラリア",
"ko_KR": "사우스오스트레일리아",
"pl_PL": "Australia Południowa",
"pt_BR": "Austrália do Sul",
"ru_RU": "Южная Австралия"
},
"AUSTRALIA_SCENARIO_VICTORIA": {
"de_DE": "Victoria",
"es_ES": "Victoria",
"fr_FR": "Victoria",
"it_IT": "Victoria",
"ja_JP": "ヴィクトリア",
"ko_KR": "빅토리아",
"pl_PL": "Wiktoria",
"pt_BR": "Victoria",
"ru_RU": "Виктория"
},
"AUSTRALIA_SCENARIO_WESTERN_AUSTRALIA": {
"de_DE": "Western Australia",
"es_ES": "Australia Occidental",
"fr_FR": "Australie-Occidentale",
"it_IT": "Australia Occidentale",
"ja_JP": "西オーストラリア",
"ko_KR": "웨스턴오스트레일리아",
"pl_PL": "Australia Zachodnia",
"pt_BR": "Austrália Ocidental",
"ru_RU": "Западная Австралия"
},
"AYUTTHAYA": {
"de_DE": "Ayutthaya",
"es_ES": "Ayutthaya",
"fr_FR": "Ayutthaya",
"it_IT": "Ayutthaya",
"ja_JP": "アユタヤ",
"ko_KR": "아유타야",
"pl_PL": "Ajutthaja",
"pt_BR": "Aiutaia",
"ru_RU": "Аюттхая"
},
"AZTEC": {
"de_DE": "Aztekenland",
"es_ES": "Imp. Azteca",
"fr_FR": "Aztèques",
"it_IT": "Aztechi",
"ja_JP": "アステカ",
"ko_KR": "아즈텍",
"pl_PL": "Aztekowie",
"pt_BR": "Astecas",
"ru_RU": "Ацтеки"
},
"BABYLON": {
"de_DE": "Anschan",
"es_ES": "Anshan",
"fr_FR": "Anshan",
"it_IT": "Anshan",
"ja_JP": "アンシャン",
"ko_KR": "안산",
"pl_PL": "Anszan",
"pt_BR": "Ansam",
"ru_RU": "Аншан"
},
"BABYLON_STK": {
"de_DE": "Babylon",
"es_ES": "Babilonia",
"fr_FR": "Babylone",
"it_IT": "Babilonia",
"ja_JP": "バビロン",
"ko_KR": "바빌론",
"pl_PL": "Babilon",
"pt_BR": "Babilônia",
"ru_RU": "Вавилон"
},
"BANDAR_BRUNEI": {
"de_DE": "Bandar Brunei",
"es_ES": "Bandar Brunéi",
"fr_FR": "Bandar Brunei",
"it_IT": "Bandar Brunei",
"ja_JP": "バンダルブルネイ",
"ko_KR": "반다르 브루나이",
"pl_PL": "Bandar Brunei",
"pt_BR": "Bandar Brunei",
"ru_RU": "Бандар-Бруней"
},
"BARBARIAN": {
"de_DE": "Räuber",
"es_ES": "Bandidos",
"fr_FR": "Pillards",
"it_IT": "Banditi",
"ja_JP": "襲撃者",
"ko_KR": "침략자",
"pl_PL": "Łupieżcy",
"pt_BR": "Saqueadores",
"ru_RU": "Грабители"
},
"BLACKDEATH_SCENARIO_ABERDEEN": {
"de_DE": "Aberdeen",
"es_ES": "Aberdeen",
"fr_FR": "Aberdeen",
"it_IT": "Aberdeen",
"ja_JP": "アバディーン",
"ko_KR": "애버딘",
"pl_PL": "Aberdeen",
"pt_BR": "Aberdeen",
"ru_RU": "Абердин"
},
"BLACKDEATH_SCENARIO_ALBORG": {
"de_DE": "Aalborg",
"es_ES": "Aalborg",
"fr_FR": "Alborg",
"it_IT": "Alborg",
"ja_JP": "オールボー",
"ko_KR": "올보르",
"pl_PL": "Aalborg",
"pt_BR": "Aalborg",
"ru_RU": "Ольборг"
},
"BLACKDEATH_SCENARIO_BARCELONA": {
"de_DE": "Barcelona",
"es_ES": "Barcelona",
"fr_FR": "Barcelone",
"it_IT": "Barcellona",
"ja_JP": "バルセロナ",
"ko_KR": "바르셀로나",
"pl_PL": "Barcelona",
"pt_BR": "Barcelona",
"ru_RU": "Барселона"
},
"BLACKDEATH_SCENARIO_BRAGA": {
"de_DE": "Braga",
"es_ES": "Braga",
"fr_FR": "Braga",
"it_IT": "Braga",
"ja_JP": "ブラガ",
"ko_KR": "브라가",
"pl_PL": "Braga",
"pt_BR": "Braga",
"ru_RU": "Брага"
},
"BLACKDEATH_SCENARIO_CASTILE": {
"de_DE": "Kastilien",
"es_ES": "Castilla",
"fr_FR": "Castille",
"it_IT": "Castiglia",
"ja_JP": "カスティーリャ",
"ko_KR": "카스티야",
"pl_PL": "Kastylia",
"pt_BR": "Castela",
"ru_RU": "Кастилия"
},
"BLACKDEATH_SCENARIO_CASTILE_ABILITY": {
"de_DE": "Schwert des Glaubens",
"es_ES": "Espada de la Fe",
"fr_FR": "Épée de la foi",
"it_IT": "Spada della fede",
"ja_JP": "信仰の剣",
"ko_KR": "신앙의 검",
"pl_PL": "<NAME>",
"pt_BR": "Espada da Fé",
"ru_RU": "Меч веры"
},
"BLACKDEATH_SCENARIO_CITY_STATE": {
"de_DE": "Stadtstaat-Anführer",
"es_ES": "Líder de la ciudad-estado",
"fr_FR": "Dirigeant de cité-état",
"it_IT": "leader della città-stato",
"ja_JP": "都市国家の指導者",
"ko_KR": "도시 국가 지도자",
"pl_PL": "Przywódca miasta-państwa",
"pt_BR": "Líder da cidade-estado",
"ru_RU": "Правитель города-государства"
},
"BLACKDEATH_SCENARIO_COPENHAGEN": {
"de_DE": "Kopenhagen",
"es_ES": "Copenhague",
"fr_FR": "Copenhague",
"it_IT": "Copenhagen",
"ja_JP": "コペンハーゲン",
"ko_KR": "코펜하겐",
"pl_PL": "Kopenhaga",
"pt_BR": "Copenhague",
"ru_RU": "Копенгаген"
},
"BLACKDEATH_SCENARIO_CORK": {
"de_DE": "Cork",
"es_ES": "Cork",
"fr_FR": "Cork",
"it_IT": "Cork",
"ja_JP": "コーク",
"ko_KR": "코크",
"pl_PL": "Cork",
"pt_BR": "Cork",
"ru_RU": "Корк"
},
"BLACKDEATH_SCENARIO_DUBROVNIK": {
"de_DE": "Dubrovnik",
"es_ES": "Dubrovnik",
"fr_FR": "Dubrovnik",
"it_IT": "Dubrovnik",
"ja_JP": "ドゥブロヴニク",
"ko_KR": "두브로브니크",
"pl_PL": "Dubrownik",
"pt_BR": "Dubrovnik",
"ru_RU": "Дубровник"
},
"BLACKDEATH_SCENARIO_EDINBURGH": {
"de_DE": "Edinburgh",
"es_ES": "Edimburgo",
"fr_FR": "Édimbourg",
"it_IT": "Edimburgo",
"ja_JP": "エディンバラ",
"ko_KR": "에든버러",
"pl_PL": "Edynburg",
"pt_BR": "Edimburgo",
"ru_RU": "Эдинбург"
},
"BLACKDEATH_SCENARIO_ENGLAND": {
"de_DE": "England",
"es_ES": "Inglaterra",
"fr_FR": "Angleterre",
"it_IT": "Inghilterra",
"ja_JP": "イングランド",
"ko_KR": "잉글랜드",
"pl_PL": "Anglia",
"pt_BR": "Inglaterra",
"ru_RU": "Англия"
},
"BLACKDEATH_SCENARIO_ENGLAND_ABILITY": {
"de_DE": "Arbeitszwang",
"es_ES": "Trabajo forzado",
"fr_FR": "Travail forcé",
"it_IT": "Coercizione dei lavoratori",
"ja_JP": "労働強要",
"ko_KR": "노동 억압",
"pl_PL": "Przymus pracy",
"pt_BR": "Coerção de Trabalho",
"ru_RU": "Принудительные работы"
},
"BLACKDEATH_SCENARIO_FARO": {
"de_DE": "Faro",
"es_ES": "Faro",
"fr_FR": "Faro",
"it_IT": "Faro",
"ja_JP": "ファロ",
"ko_KR": "파로",
"pl_PL": "Faro",
| |
<gh_stars>1-10
#%%
#import importlib.util
#spec = importlib.util.spec_from_file_location("medical_ML.py", 'C:\\Users\\Andrew\\Documents\\Stanford\\medical\\medicalML_git\\medicalML')
#foo = importlib.util.module_from_spec(spec)
#spec.loader.exec_module(foo)
from medical_ML import Experiment, split_cohort#, test
import pandas as pd
import os
#%%
def plot_ROCs_with_baseline(RESULT_DIR,
models, label,
test_models,
baseline_probs = None,
test_baseline_probs = None,
baseline_str = 'baseline',
baseline_type = 'probability',
title = 'ROC for discharge prediction on test data',
pr_title = "Precision-recall curve on test data"):
# pce_train_est2, pce_test_est2 = split_cohort(ascvd_est, to_exclude, test_ind_col, drop = 'all')
expt = Experiment(datafile = None,
result_dir = RESULT_DIR,
label = label)
# pce_train_est2, pce_test_est2 = split_cohort(ascvd_est, to_exclude, test_ind_col, drop = 'all')
# expt.save_and_plot_results(models,
# cv = 5, pce_file = pce_train_est2, test = False,
# test_pce_file = pce_test_est2,
# train = True,
# title = 'ROC for full patient cohort on validation data',
# tr_title = 'ROC for full patient cohort on training data')
expt.save_and_plot_test_results(test_models,
cv = 5,
baseline_str = baseline_str,
baseline_type = baseline_type,
test_baseline_prob_file = test_baseline_probs,
title = title,
pr_title = pr_title)
def plot_val_ROCs(RESULT_DIR,
models, label,
baseline_probs = None,
baseline_str = 'baseline',
baseline_type = 'probability',
title = 'ROC for discharge prediction on test data'
):
# pce_train_est2, pce_test_est2 = split_cohort(ascvd_est, to_exclude, test_ind_col, drop = 'all')
expt = Experiment(datafile = None,
result_dir = RESULT_DIR,
label = label)
# pce_train_est2, pce_test_est2 = split_cohort(ascvd_est, to_exclude, test_ind_col, drop = 'all')
# expt.save_and_plot_results(models,
# cv = 5, pce_file = pce_train_est2, test = False,
# test_pce_file = pce_test_est2,
# train = True,
# title = 'ROC for full patient cohort on validation data',
# tr_title = 'ROC for full patient cohort on training data')
expt.save_and_plot_results(models,
cv = 5,
train = False,
test = False,
baseline_str = baseline_str,
# baseline_type = baseline_type,
baseline_prob_file = baseline_probs,
title = title)
def plot_ROCs(RESULT_DIR,
models, label,
test_models,
title = 'ROC for discharge prediction on test data',
pr_title = "Precision-recall curve on test data"):
# pce_train_est2, pce_test_est2 = split_cohort(ascvd_est, to_exclude, test_ind_col, drop = 'all')
expt = Experiment(datafile = None,
result_dir = RESULT_DIR,
label = label)
# expt.save_and_plot_results(models,
# cv = 5, test = False,
# train = True,
# title = 'ROC for full patient cohort on validation data',
# tr_title = 'ROC for full patient cohort on training data')
expt.save_and_plot_test_results(test_models,
cv = 5,
title = title,
pr_title = pr_title)
def test_on_new_cohort(R2, expt, test_data, to_exclude = None, test_ind_col = None, models = None, baseline_str = None,
test_baseline_prob_file = None,
title = 'ROC for discharge prediction on test data'):
if not os.path.isdir(R2): os.mkdir(R2)
# _, test_data = split_cohort(alldata, to_exclude, test_ind_col, drop = 'all')
if test_ind_col is not None:
expt.test_data = test_data[test_data[test_ind_col] == 1]
else:
expt.test_data = test_data
expt.predict_on_test(models, test_file = None,
out_dir = R2)
# to_exclude['pce_invalid_vars'] = True
# ascvd_train_est2, ascvd_test_est2 = split_cohort(ascvd_est, to_exclude, test_ind_col, drop = 'all')
expt.save_and_plot_test_results(models,
cv = 5,
out_dir = R2,
test_baseline_prob_file = test_baseline_prob_file,
baseline_str = baseline_str,
title = title)
def train_val(RESULT_DIR, alldata, models, label = 'Label',
cv = 5,
score_name = "AUC",
to_exclude = None,
test_ind_col = None, oversample_rate = 1,
imputer = 'iterative', add_missing_flags = True,
baseline_str = None,
baseline_prob_file = None,
title = 'ROC for discharge prediction on validation data'):
print('\n\n' + 'STARTING EXPERIMENT FOR ' + RESULT_DIR + '\n\n')
expt = Experiment(alldata, label = label,
to_exclude = to_exclude,
test_ind_col = test_ind_col, drop = 'all',
result_dir = RESULT_DIR)
expt.predict_models_from_groups(0, models, cv=cv, score_name=score_name, mode='classification',
oversample_rate = oversample_rate,
imputer = imputer, add_missing_flags = add_missing_flags)
# expt.save_and_plot_results(models,
# cv = cv, test = False,
# baseline_prob_file = baseline_prob_file,
# baseline_str = baseline_str,
# title = title)
return(expt)
models = ['baseline']
t_models = [
'logreg'
# ,
# 'lasso2'
,
'lasso'
,
'rf'
,
'gbm'
# ,
# 'svm'
,
'xgb'
# ,
# 'baseline'
]
baseline_str = ['Simple Imputation/PCE Variables', 'Simple Imputation/All Variables',
'Iterative Imputation/PCE Variables', 'Iterative Imputation/All Variables']
RESULT_DIR = '../../heart_disease/Results/prior_cvd_pts/allvar_ascvd_cvd_withandwithoutstatin_allage_1101'
cht = 'secondary prevention patients'
#RESULT_DIR = '../../heart_disease/Results/allvars_pce_pts_0925'
#cht = 'PCE-eligible cohort'
t_models.append('baseline')
label = 'ascvdany5y'
#baseline_type = ['probability'] *4
#
#BP_DIR = '../../heart_disease/Results/pce_imputation'
#baseline_prob_files = ['simple_pcevars_PCE_estimates_clipped_vars2.csv',
# 'simple_allvars_PCE_estimates_clipped_vars2.csv',
# 'iterative_pcevars_PCE_estimates_clipped_vars2.csv',
# 'iterative_allvars_PCE_estimates_clipped_vars2.csv']
#
#b_probs = []
#for bpf in baseline_prob_files:
# bp = pd.read_csv(os.path.join(BP_DIR, bpf))
# bp.columns = ['y', 'est_prob']
## bp = bp[~bp.y.isna()]
# b_probs.append(bp)
# print(bpf)
test_ind_col = 'test_ind'
label = 'ascvdany5y'
to_exclude = {
'pce_cohort': True,
'pce_invalid_vars': True,
'cvd_bl': False,
'antilpd': False,
'oldyoung': False}
datafile = 'ascvd_est.csv'
ascvd_est = pd.read_csv('../../heart_disease/Data/cohort/' + datafile)
train_est2, test_est2 = split_cohort(ascvd_est, to_exclude, test_ind_col, drop = 'none')
#%%
test_est2 = test_est2[test_est2.cvd_bl == True]
#%%
b_probs = test_est2[['ascvdany5y', 'ascvd5yest']]
b_probs.columns = ['y', 'est_prob']
b_probs_tr = train_est2[['ascvdany5y', 'ascvd5yest']]
b_probs_tr.columns = ['y', 'est_prob']
baseline_type = 'probability'
baseline_str = 'PCE'
plot_ROCs_with_baseline(RESULT_DIR + '',
models = models,
label = label,
test_baseline_probs = b_probs,
baseline_str = baseline_str,
baseline_type = baseline_type,
# title = f"ROC on held-out test data \nfor {cht}",
title = f"Machine learning and PCE performance for secondary ASCVD risk prediction:\nReceiver operating characteristic curve",
# pr_title = "Precision-recall curve for patients with in-hospital predictions ({})".format(run_type.upper()),
test_models = t_models)
#%%
plot_val_ROCs(RESULT_DIR + '',
models = t_models,
label = label,
baseline_probs = b_probs_tr,
baseline_str = baseline_str,
# baseline_type = baseline_type,
title = f"ROC on cross-validation data for {cht}")
#%%
#
#models = [
# 'logreg'
# ,
# 'lasso2'
# ,
# 'rf'
# ,
# 'gbm'
# # ,
# # 'svm'
# # ,
# # 'xgb'
# ]
#label = 'Label'
#run_type = '3pm'
#RESULT_DIR = "../Results/full_run_defaults3_{}".format(run_type)
#
## imputer = 'simple'
## add_missing_flags = False
## alldata = pd.read_csv("../Data/train_dat_{}.csv".format(run_type))
#
## expt = train_val(RESULT_DIR = RESULT_DIR, alldata = alldata,
## models = ['dummy'],
## to_exclude = None,
## test_ind_col = None, label = label,
## imputer = imputer, add_missing_flags = add_missing_flags)
#
#test_file = 'test1'
#test_data = pd.read_csv("../Data/{}_dat_{}.csv".format(test_file, run_type))
## test_on_new_cohort(RESULT_DIR + '/test1', expt, test_data = test_data,
## models = models,
## to_exclude = None,
## test_ind_col = None,
## title = 'ROC on data from held-out facility')
## plot_ROCs(RESULT_DIR + '/test1',
## models = models,
## label = label,
## title = "ROC for discharge prediction at {}".format(run_type.upper()),
## pr_title = "Precision-recall curve for {} prediction".format(run_type.upper()),
## test_models = [
## 'logreg'
## ,
## 'lasso2'
## ,
## 'rf'
## ,
## 'gbm'
## # ,
## # 'svm'
## # ,
## # 'xgb'
## ])
#
#
## excluding all with missing predictions
## test_metadata = pd.read_csv("../Data/{}_dat_{}times.csv".format(test_file, run_type))
## test_data = test_data[~test_metadata.hours_to_disch_exp.isna()]
## test_metadata = test_metadata[~test_metadata.hours_to_disch_exp.isna()]
## newcols = test_metadata.columns.tolist()
## def ff(x):
## if x == 'Label':
## return("y")
## if x == "exp_disch_label":
## return("est_prob")
## return x
## test_metadata.columns = [ff(x) for x in newcols]
## if run_type == '3pm':
## test_metadata.est_prob = (test_metadata.hours_to_disch_exp < 26.2)
## if run_type == '3am':
## test_metadata.est_prob = (test_metadata.hours_to_disch_exp < 14.2)
#
#
## # test_on_new_cohort(RESULT_DIR + '/test1_withpred', expt, test_data = test_data,
## # models = models,
## # to_exclude = None,
## # test_ind_col = None,
## # title = 'ROC on data from held-out facility')
#
## plot_ROCs_with_baseline(RESULT_DIR + '/test1_withpred',
## models = models,
## label = label,
## test_baseline_probs = test_metadata,
## baseline_str = "Bedside prediction",
## baseline_type = 'binary',
## title = "ROC for patients with in-hospital predictions ({})".format(run_type.upper()),
## pr_title = "Precision-recall curve for patients with in-hospital predictions ({})".format(run_type.upper()),
## test_models = [
## 'logreg'
## ,
## 'lasso2'
## ,
## 'rf'
## ,
## 'gbm',
## 'baseline'
## # ,
## # 'svm'
## # ,
## # 'xgb'
## ])
#
#
## treating all with missing predictions as "will be discharged"
#test_metadata = pd.read_csv("../Data/{}_dat_{}times.csv".format(test_file, run_type))
## test_data = test_data[~test_metadata.hours_to_disch_exp.isna()]
## test_metadata = test_metadata[~test_metadata.hours_to_disch_exp.isna()]
#test_metadata[test_metadata.hours_to_disch_exp.isna()] = 0
#newcols = test_metadata.columns.tolist()
#def ff(x):
# if x == 'Label':
# return("y")
# if x == "exp_disch_label":
# return("est_prob")
# return x
#test_metadata.columns = [ff(x) for x in newcols]
#if run_type == '3pm':
# test_metadata.est_prob = (test_metadata.hours_to_disch_exp < 26.2)
#if run_type == '3am':
# test_metadata.est_prob = (test_metadata.hours_to_disch_exp < 14.2)
#
#
## test_on_new_cohort(RESULT_DIR + '/test1_withpred', expt, test_data = test_data,
## models = models,
## to_exclude = None,
## test_ind_col = None,
## title = 'ROC on data from held-out facility')
#
#plot_ROCs_with_baseline(RESULT_DIR + '/test1',
# models = models,
# label = label,
# test_baseline_probs = test_metadata,
# baseline_str = "Bedside prediction (missing = discharge)",
# baseline_type = 'binary',
# title = "ROC for patients with in-hospital predictions ({})".format(run_type.upper()),
# pr_title = "Precision-recall curve for patients with in-hospital predictions ({})".format(run_type.upper()),
# test_models = [
# 'logreg'
# ,
# 'lasso2'
# ,
# 'rf'
# ,
# 'gbm',
# 'baseline'
# # ,
# # 'svm'
# # ,
# # 'xgb'
# ])
#
#
## treating all with missing predictions as "wont be discharged"
#test_metadata = pd.read_csv("../Data/{}_dat_{}times.csv".format(test_file, run_type))
## test_data = test_data[~test_metadata.hours_to_disch_exp.isna()]
## test_metadata = test_metadata[~test_metadata.hours_to_disch_exp.isna()]
#test_metadata[test_metadata.hours_to_disch_exp.isna()] = 1000
#newcols = test_metadata.columns.tolist()
#def ff(x):
# if x == 'Label':
# return("y")
# if x == "exp_disch_label":
# return("est_prob")
# return x
#test_metadata.columns = [ff(x) for x in newcols]
#if run_type == '3pm':
# test_metadata.est_prob = (test_metadata.hours_to_disch_exp < 26.2)
#if run_type == '3am':
# test_metadata.est_prob = (test_metadata.hours_to_disch_exp < 14.2)
#
#
## test_on_new_cohort(RESULT_DIR + '/test1_withpred', expt, test_data = test_data,
## models = models,
## | |
= os.listdir(file_path)
shutil.copy(file_path + '/daylight_detector_side.png', b + '/textures/blocks')
if 'daylight_detector_side.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'daylight_detector_side.png'),
os.path.join(b + '/textures/blocks', str('daylightdetector_side.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/daylight_detector_top.png', b + '/textures/blocks')
if 'daylight_detector_top.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'daylight_detector_top.png'),
os.path.join(b + '/textures/blocks', str('daylightdetector_top.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/destroy_stage_0.png', b + '/textures/blocks')
if 'destroy_stage_0.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'destroy_stage_0.png'),
os.path.join(b + '/textures/blocks', str('destroy_0.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/destroy_stage_1.png', b + '/textures/blocks')
if 'destroy_stage_1.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'destroy_stage_1.png'),
os.path.join(b + '/textures/blocks', str('destroy_1.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/destroy_stage_2.png', b + '/textures/blocks')
if 'destroy_stage_2.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'destroy_stage_2.png'),
os.path.join(b + '/textures/blocks', str('destroy_2.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/destroy_stage_3.png', b + '/textures/blocks')
if 'destroy_stage_3.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'destroy_stage_3.png'),
os.path.join(b + '/textures/blocks', str('destroy_3.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/destroy_stage_4.png', b + '/textures/blocks')
if 'destroy_stage_4.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'destroy_stage_4.png'),
os.path.join(b + '/textures/blocks', str('destroy_4.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/destroy_stage_5.png', b + '/textures/blocks')
if 'destroy_stage_5.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'destroy_stage_5.png'),
os.path.join(b + '/textures/blocks', str('destroy_5.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/destroy_stage_6.png', b + '/textures/blocks')
if 'destroy_stage_6.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'destroy_stage_6.png'),
os.path.join(b + '/textures/blocks', str('destroy_6.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/destroy_stage_7.png', b + '/textures/blocks')
if 'destroy_stage_7.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'destroy_stage_7.png'),
os.path.join(b + '/textures/blocks', str('destroy_7.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/destroy_stage_8.png', b + '/textures/blocks')
if 'destroy_stage_8.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'destroy_stage_8.png'),
os.path.join(b + '/textures/blocks', str('destroy_8.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/destroy_stage_9.png', b + '/textures/blocks')
if 'destroy_stage_9.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'destroy_stage_9.png'),
os.path.join(b + '/textures/blocks', str('destroy_9.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/rail_detector.png', b + '/textures/blocks')
if 'rail_detector.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'rail_detector.png'),
os.path.join(b + '/textures/blocks', str('detectorRail.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/rail_detector_powered.png', b + '/textures/blocks')
if 'rail_detector_powered.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'rail_detector_powered.png'),
os.path.join(b + '/textures/blocks', str('detectorRail_on.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dirt.png', b + '/textures/blocks')
if 'dirt.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'dirt.png'),
os.path.join(b + '/textures/blocks', str('dirt.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dispenser_front_horizontal.png', b + '/textures/blocks')
if 'dispenser_front_horizontal.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'dispenser_front_horizontal.png'),
os.path.join(b + '/textures/blocks', str('dispenser_front.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dispenser_front_vertical.png', b + '/textures/blocks')
if 'dispenser_front_vertical.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'dispenser_front_vertical.png'),
os.path.join(b + '/textures/blocks', str('dispenser_front_vertical.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/door_iron_lower.png', b + '/textures/blocks')
if 'door_iron_lower.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'door_iron_lower.png'),
os.path.join(b + '/textures/blocks', str('doorIron_lower.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/door_iron_upper.png', b + '/textures/blocks')
if 'door_iron_upper.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'door_iron_upper.png'),
os.path.join(b + '/textures/blocks', str('doorIron_upper.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/door_wood_lower.png', b + '/textures/blocks')
if 'door_wood_lower.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'door_wood_lower.png'),
os.path.join(b + '/textures/blocks', str('doorWood_lower.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/door_wood_upper.png', b + '/textures/blocks')
if 'door_wood_upper.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'door_wood_upper.png'),
os.path.join(b + '/textures/blocks', str('doorWood_upper.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dragon_egg.png', b + '/textures/blocks')
if 'dragon_egg.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'dragon_egg.png'),
os.path.join(b + '/textures/blocks', str('drangonEgg.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dropper_front_horizontal.png', b + '/textures/blocks')
if 'dropper_front_horizontal.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'dropper_front_horizontal.png'),
os.path.join(b + '/textures/blocks', str('dropper_front.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/dropper_front_vertical.png', b + '/textures/blocks')
if 'dropper_front_vertical.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'dropper_front_vertical.png'),
os.path.join(b + '/textures/blocks', str('dropper_front_vertical.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/enchanting_table_bottom.png', b + '/textures/blocks')
if 'enchanting_table_bottom.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'enchanting_table_bottom.png'),
os.path.join(b + '/textures/blocks', str('enchantment_bottom.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/enchanting_table_side.png', b + '/textures/blocks')
if 'enchanting_table_side.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'enchanting_table_side.png'),
os.path.join(b + '/textures/blocks', str('enchantment_side.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/enchanting_table_top.png', b + '/textures/blocks')
if 'enchanting_table_top.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'enchanting_table_top.png'),
os.path.join(b + '/textures/blocks', str('enchantment_top.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/endframe_eye.png', b + '/textures/blocks')
if 'endframe_eye.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'endframe_eye.png'),
os.path.join(b + '/textures/blocks', str('endframe_eye.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/endframe_side.png', b + '/textures/blocks')
if 'endframe_side.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'endframe_side.png'),
os.path.join(b + '/textures/blocks', str('endframe_side.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/endframe_top.png', b + '/textures/blocks')
if 'endframe_top.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'endframe_top.png'),
os.path.join(b + '/textures/blocks', str('endframe_top.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/farmland_dry.png', b + '/textures/blocks')
if 'farmland_dry.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'farmland_dry.png'),
os.path.join(b + '/textures/blocks', str('farmland_dry.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/farmland_wet.png', b + '/textures/blocks')
if 'farmland_wet.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'farmland_wet.png'),
os.path.join(b + '/textures/blocks', str('farmland_wet.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/iron_bars.png', b + '/textures/blocks')
if 'iron_bars.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'iron_bars.png'),
os.path.join(b + '/textures/blocks', str('fenceIron.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/fern.png', b + '/textures/blocks')
if 'fern.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'fern.png'),
os.path.join(b + '/textures/blocks', str('fern.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/fire_layer_0.png', b + '/textures/blocks')
if 'fire_layer_0.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'fire_layer_0.png'),
os.path.join(b + '/textures/blocks', str('fire_0.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/fire_layer_1.png', b + '/textures/blocks')
if 'fire_layer_1.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'fire_layer_1.png'),
os.path.join(b + '/textures/blocks', str('fire_1.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/flower_dandelion.png', b + '/textures/blocks')
if 'flower_dandelion.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'flower_dandelion.png'),
os.path.join(b + '/textures/blocks', str('flower.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/flower_pot.png', b + '/textures/blocks')
if 'flower_pot.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'flower_pot.png'),
os.path.join(b + '/textures/blocks', str('flowerpot.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/furnace_front_off.png', b + '/textures/blocks')
if 'furnace_front_off.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'furnace_front_off.png'),
os.path.join(b + '/textures/blocks', str('furnace_front.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/furnace_front_on.png', b + '/textures/blocks')
if 'furnace_front_on.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'furnace_front_on.png'),
os.path.join(b + '/textures/blocks', str('furnace_front_lit.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/furnace_side.png', b + '/textures/blocks')
if 'furnace_side.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'furnace_side.png'),
os.path.join(b + '/textures/blocks', str('furnace_side.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/furnace_top.png', b + '/textures/blocks')
if 'furnace_top.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'furnace_top.png'),
os.path.join(b + '/textures/blocks', str('furnace_top.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/glass.png', b + '/textures/blocks')
if 'glass.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'glass.png'),
os.path.join(b + '/textures/blocks', str('glass.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + '/rail_golden.png', b + '/textures/blocks')
if 'rail_golden.png' in file_names:
os.rename(os.path.join(b + '/textures/blocks', 'rail_golden.png'),
os.path.join(b + '/textures/blocks', str('goldenRail.png')))
except:
pass
try:
file_path = a + '/assets/minecraft/textures/blocks'
file_names = os.listdir(file_path)
shutil.copy(file_path + | |
mask = np.ones(dims,dtype=np.int32)
equil = {"time":0.0,"data_source":os.path.abspath(filename), "mask":mask,
"br":br,"bt":bt,"bz":bz,"er":er,"et":et,"ez":ez}
return equil, rhogrid, btipsign
def read_ncdf(filename, vars=None):
'''
#+#read_ncdf
#+Reads a flat NetCDF file
#+***
#+##Arguments
#+ **filename**: NetCDF file
#+
#+##Keyword Arguments
#+ **vars**: List of variables to read
#+
#+##Return Value
#+Structure containing NetCDF variables
#+
#+##Example Usage
#+```python
#+>>> a = read_ncdf("./123324H01_fi_1.cdf")
#+```
'''
d = dict()
d['err'] = 1
if os.path.isfile(filename):
d['err'] = 0
f = netcdf.netcdf_file(filename, 'r', mmap=False)
variables = f.variables
if vars != None:
for k in vars:
# need to check case sensitibity
if k in variables.keys():
v = variables[k]
if tuple() == v.shape:
d[k] = v.getValue()
else:
d[k] = v[:]
else:
for k,v in variables.items():
if tuple() == v.shape:
d[k] = v.getValue()
else:
d[k] = v[:]
f.close()
else:
error('FILE DOES NOT EXIST: '+filename)
return d
def extract_transp_plasma(filename, intime, grid, rhogrid,
dn0out=None, scrapeoff=None,rho_scrapeoff=0.1):
'''
#+#extract_transp_plasma
#+Extracts `plasma` structure from a TRANSP run
#+***
#+##Arguments
#+ **filename**: TRANSP output file e.g. [TRANSP_RUNID].CDF
#+
#+ **intime**: Time of interest [s]
#+
#+ **grid**: Interpolation grid
#+
#+ **rhogrid**: sqrt(normalized torodial flux) mapped onto the interpolation grid
#+
#+##Keyword Arguments
#+ **dn0out**: Wall Neutral density value `dn0out` variable in transp namelist
#+
#+ **scrapeoff**: scrapeoff decay length
#+
#+ **rho_scrapeoff**: scrapeoff length, default = 0.1
#+
#+##Example Usage
#+```python
#+>>> plasma = extract_transp_plasma("./142332H01.CDF", 1.2, grid, rho)
#+```
'''
var_list = ["X","TRFLX","TFLUX","TIME","NE","NH","ND","NT","NIMP","TE","TI","ZEFFI","OMEGA","DN0WD","XZIMP"]
zz = read_ncdf(filename, vars=var_list)
t = zz['TIME']
idx = np.argmin(abs(t-intime))
time = t[idx].astype('float64')
print(' * Selecting profiles at :', time, ' s') #pick the closest timeslice to TOI
impurity_charge = np.max(zz["XZIMP"]).astype("int16")
transp_ne = zz['NE'][idx,:] #cm^-3
transp_nimp = zz['NIMP'][idx,:] #cm^-3
transp_nn = zz['DN0WD'][idx,:] #cm^-3
if 'NH' in zz:
transp_nh = zz['NH'][idx,:] #cm^-3
else:
transp_nh = 0*transp_ne
if 'ND' in zz:
transp_nd = zz['ND'][idx,:] #cm^-3
else:
transp_nd = 0*transp_ne
if 'NT' in zz:
transp_nt = zz['NT'][idx,:] #cm^-3
else:
transp_nt = 0*transp_ne
transp_te = zz['TE'][idx,:]*1.e-3 # kev
transp_ti = zz['TI'][idx,:]*1.e-3 # kev
transp_zeff = zz['ZEFFI'][idx,:]
rho_cb = np.sqrt(zz['TRFLX'][idx,:]/zz['TFLUX'][idx])
# center each rho b/c toroidal flux is at cell boundary
rho = 0.e0*rho_cb
rho[0] = 0.5*rho_cb[0]
for i in range(len(rho_cb)-1):
rho[i+1] = rho_cb[i+1] - 0.5*(rho_cb[i+1] - rho_cb[i])
if 'OMEGA' not in zz.keys():
error('OMEGA not found in TRANSP file. Assuming no plasma rotation')
transp_omega=0.0*transp_te
else:
transp_omega = zz['OMEGA'][idx,:] # rad/s
if dn0out == None:
dn0out = transp_nn[-1]
if scrapeoff == None:
scrapeoff = 0.0
if scrapeoff > 0.0:
drho = abs(rho[-1] - rho[-2])
rho_sc = rho[-1] + drho*(range(np.ceil(rho_scrapeoff/drho)) + 1)
sc = np.exp(-(rho_sc - rho[-1])/scrapeoff)
transp_ne = np.append(transp_ne,transp_ne[-1]*sc)
transp_nimp = np.append(transp_nimp,transp_nimp[-1]*sc)
transp_nh = np.append(transp_nh,transp_nh[-1]*sc)
transp_nd = np.append(transp_nd,transp_nd[-1]*sc)
transp_nt = np.append(transp_nt,transp_nt[-1]*sc)
transp_te = np.append(transp_te,transp_te[-1]*sc)
transp_ti = np.append(transp_ti,transp_ti[-1]*sc)
transp_nn = np.append(transp_nn,0*sc + dn0out)
transp_zeff = np.append(transp_zeff, (transp_zeff[-1]-1)*sc + 1)
transp_omega = np.append(transp_omega,transp_omega[-1]*sc)
rho = np.append(rho, rho_sc)
profiles = {"rho":rho,
"dene":np.where(transp_ne > 0, transp_ne, 0.0),
"denimp":np.where(transp_nimp > 0, transp_nimp, 0.0),
"denn":np.where(transp_nn > 0, transp_nn, 0.0),
"te":np.where(transp_te > 0, transp_te, 0.0),
"ti":np.where(transp_ti > 0, transp_ti, 0.0),
"zeff":np.where(transp_zeff > 1.0, transp_zeff, 1.0),
"omega":transp_omega}
if 'NH' in zz:
profiles['denh'] = np.where(transp_nh > 0, transp_nh, 0.0)
if 'ND' in zz:
profiles['dend'] = np.where(transp_nd > 0, transp_nd, 0.0)
if 'NT' in zz:
profiles['dent'] = np.where(transp_nt > 0, transp_nt, 0.0)
# Interpolate onto r-z grid
dims = rhogrid.shape
f_dene = interp1d(rho,transp_ne,fill_value='extrapolate')
dene = f_dene(rhogrid)
dene = np.where(dene > 0.0, dene, 0.0).astype('float64')
f_denimp = interp1d(rho,transp_nimp,fill_value='extrapolate')
denimp = f_denimp(rhogrid)
denimp = np.where(denimp > 0.0, denimp, 0.0).astype('float64')
f_denh = interp1d(rho,transp_nh,fill_value='extrapolate')
denh = f_denh(rhogrid)
denh = np.where(denh > 0.0, denh, 0.0).astype('float64')
f_dend = interp1d(rho,transp_nd,fill_value='extrapolate')
dend = f_dend(rhogrid)
dend = np.where(dend > 0.0, dend, 0.0).astype('float64')
f_dent = interp1d(rho,transp_nt,fill_value='extrapolate')
dent = f_dent(rhogrid)
dent = np.where(dent > 0.0, dent, 0.0).astype('float64')
f_denn = interp1d(rho,np.log(transp_nn),fill_value=np.nan,bounds_error=False)
log_denn = f_denn(rhogrid)
denn = np.where(~np.isnan(log_denn), np.exp(log_denn), 0.0).astype('float64')
f_te = interp1d(rho,transp_te,fill_value='extrapolate')
te = f_te(rhogrid)
te = np.where(te > 0, te, 0.0).astype('float64')
f_ti = interp1d(rho,transp_ti,fill_value='extrapolate')
ti = f_ti(rhogrid)
ti = np.where(ti > 0, ti, 0.0).astype('float64')
f_zeff = interp1d(rho,transp_zeff, fill_value=1.0, bounds_error=False)
zeff = f_zeff(rhogrid)
zeff = np.where(zeff > 1, zeff, 1.0).astype('float64')
f_omega = interp1d(rho,transp_omega,fill_value='extrapolate')
vt = grid['r2d']*f_omega(rhogrid).astype('float64')
vr = np.zeros(dims,dtype='float64')
vz = np.zeros(dims,dtype='float64')
max_rho = max(abs(rho))
mask = np.zeros(dims,dtype='int')
w = np.where(rhogrid <= max_rho) #where we have profiles
mask[w] = 1
deni = np.concatenate((denh.reshape(1,dims[0],dims[1]),
dend.reshape(1,dims[0],dims[1]),
dent.reshape(1,dims[0],dims[1])),axis=0)
ai = np.array([1.007276466879e0, 2.013553212745e0,3.01550071632e0])
w_ai = [a in zz for a in ['NH','ND','NT']]
# SAVE IN PROFILES STRUCTURE
plasma={"data_source":os.path.abspath(filename),"time":time,"impurity_charge":int(impurity_charge),
"nthermal":int(np.sum(w_ai)), "species_mass":ai[w_ai], "deni":deni[w_ai,:,:],"profiles":profiles,
"mask":mask,"dene":dene,"denimp":denimp,"denn":denn,"te":te,"ti":ti,
"vr":vr,"vt":vt,"vz":vz,"zeff":zeff}
return plasma
def read_nubeam(filename, grid, e_range=(), p_range=(), btipsign=-1, species=1):
"""
#+#read_nubeam
#+Reads NUBEAM fast-ion distribution function
#+***
#+##Arguments
#+ **filename**: NUBEAM guiding center fast-ion distribution function file e.g. 159245H01_fi_1.cdf
#+
#+ **grid**: Interpolation grid
#+
#+##Keyword Arguments
#+ **btipsign**: Sign of the dot product of the magnetic field and plasma current
#+
#+ **e_range**: Energy range to consider
#+
#+ **p_range**: Pitch range to consider
#+
#+ **species**: Fast-ion species number. Defaults to 1
#+
#+##Return Value
#+Distribution structure
#+
#+##Example Usage
#+```python
#+>>> dist = read_nubeam("./159245H02_fi_1.cdf",grid,btipsign=-1)
#+```
"""
species_var = "SPECIES_{}".format(species)
sstr = read_ncdf(filename,vars=[species_var])[species_var].tostring().decode('UTF-8')
print("Species: "+sstr)
var = read_ncdf(filename, vars=["TIME","R2D","Z2D","E_"+sstr,"A_"+sstr,"F_"+sstr,"RSURF","ZSURF","BMVOL"])
ngrid = len(var["R2D"])
try:
time = var["TIME"][0]
except:
time = var["TIME"]
r2d = var["R2D"]
z2d = var["Z2D"]
rsurf = var["RSURF"].T
zsurf = var["ZSURF"].T
bmvol = var["BMVOL"]
pitch = var["A_"+sstr]
energy = var["E_"+sstr]*1e-3
fbm = var["F_"+sstr].T*1e3
fbm = np.where(fbm > 0.0, 0.5*fbm, 0.0) #0.5 to convert to pitch instead of solid angle d_omega/4pi
if btipsign < 0:
fbm = fbm[:,::-1,:] #reverse pitch elements
if not e_range:
e_range = (np.min(energy), np.max(energy))
if not p_range:
p_range = (np.min(pitch), np.max(pitch))
# Trim distribution according to e/p_range
we = np.logical_and(energy >= e_range[0], energy <= e_range[1])
wp = np.logical_and(pitch >= p_range[0], pitch <= p_range[1])
energy = energy[we]
nenergy = len(energy)
pitch = pitch[wp]
npitch = len(pitch)
fbm = fbm[we,:,:]
fbm = fbm[:,wp,:]
dE = np.abs(energy[1] - energy[0])
dp = np.abs(pitch[1] - pitch[0])
emin, emax = np.maximum(np.min(energy) - 0.5*dE, 0.0), np.max(energy) + 0.5*dE
pmin, pmax = np.maximum(np.min(pitch) - 0.5*dp, -1.0), np.minimum(np.max(pitch)+0.5*dp, 1.0)
print('Energy min/max: ', emin, emax)
print('Pitch min/max: ',pmin, pmax)
nr = grid["nr"]
nz = grid["nz"]
r = grid["r"]
z = grid["z"]
rgrid = grid["r2d"]
zgrid = grid["z2d"]
dr = np.abs(r[1] - r[0])
dz = np.abs(z[1] - z[0])
fdens = np.sum(fbm,axis=(0,1))*dE*dp
ntot = np.sum(fdens*bmvol)
print('Ntotal in phase space: ',ntot)
tri = Delaunay(np.vstack((r2d,z2d)).T) # Triangulation for barycentric interpolation
pts = np.array([xx for xx in zip(r2d,z2d)])
itp = NearestNDInterpolator(pts,np.arange(ngrid)) #to find indices outside simplices
points = np.array([xx for xx in zip(rgrid.flatten(),zgrid.flatten())])
t = tri.find_simplex(points)
denf = np.zeros((nr,nz))
fbm_grid = np.zeros((nenergy,npitch,nr,nz))
for (ind,tt) in enumerate(t):
i,j = np.unravel_index(ind,(nr,nz))
if tt == -1:
ii = int(itp(r[i],z[j]))
denf[i,j] = fdens[ii]
fbm_grid[:,:,i,j] = fbm[:,:,ii]
else:
b = tri.transform[tt,:2].dot(np.transpose(points[ind] - tri.transform[tt,2]))
s = tri.simplices[tt,:]
#perform barycentric linear interpolation
denf[i,j] = b[0]*fdens[s[0]] + b[1]*fdens[s[1]] + (1 - np.sum(b))*fdens[s[2]]
fbm_grid[:,:,i,j] = b[0]*fbm[:,:,s[0]] + b[1]*fbm[:,:,s[1]] + (1-np.sum(b))*fbm[:,:,s[2]]
denf[denf < 0] = 0
# Correct for points outside of seperatrix
rmaxis = np.mean(rsurf[:,0])
zmaxis = np.mean(zsurf[:,0])
r_sep = rsurf[:,-1]
z_sep = zsurf[:,-1]
#plt.triplot(r2d,z2d,tri.simplices.copy())
#plt.plot(r2d,z2d,'o')
#plt.plot(r_sep,z_sep)
#plt.show()
x_bdry = r_sep - rmaxis
y_bdry = z_sep - zmaxis
r_bdry = np.sqrt(x_bdry**2 + y_bdry**2)
theta_bdry = np.arctan2(y_bdry,x_bdry)
theta_bdry = np.where(theta_bdry < 0.0, theta_bdry + 2*np.pi, theta_bdry) #[0,2pi]
w = np.argsort(theta_bdry)
theta_bdry = theta_bdry[w]
r_bdry = r_bdry[w]
theta_bdry, w = np.unique(theta_bdry,return_index=True)
r_bdry = r_bdry[w]
itp = interp1d(theta_bdry,r_bdry,'cubic',fill_value='extrapolate')
x_pts = grid["r2d"] - rmaxis
y_pts = grid["z2d"] - zmaxis
r_pts = np.sqrt(x_pts**2 + y_pts**2)
theta_pts = np.arctan2(y_pts,x_pts)
theta_pts = np.where(theta_pts < 0.0, theta_pts + 2*np.pi, theta_pts) #[0,2pi]
r_bdry_itp = itp(theta_pts)
w = r_pts >= r_bdry_itp + 2
denf[w] = 0.0
fbm_grid[:,:,w] = 0.0
# enforce correct normalization
ntot_denf = 2*np.pi*dr*dz*np.sum(r*np.sum(denf,axis=1))
denf = denf*(ntot/ntot_denf)
ntot_fbm = (2*np.pi*dE*dp*dr*dz)*np.sum(r*np.sum(fbm_grid,axis=(0,1,3)))
fbm_grid = fbm_grid*(ntot/ntot_denf)
fbm_dict={"type":1,"time":time,"nenergy":nenergy,"energy":energy,"npitch":npitch,
"pitch":pitch,"f":fbm_grid,"denf":denf,"data_source":os.path.abspath(filename)}
return fbm_dict
def nubeam_geometry(nubeam, angle=0.0, verbose=False):
"""
#+#nubeam_geometry
#+Calculates the FIDASIM beam geometry from the beam geometry variables in the TRANSP/NUBEAM namelist
#+***
#+##Arguments
#+ **NUBEAM**: Dictionary containing the following
#+
#+ **NUBEAM["NAME"]**: Ion source name
#+
#+ **NUBEAM["NBSHAP"]**: Ion source shape 1=rectangular, 2=circular
#+
#+ **NUBEAM["FOCLZ"]**: Vertical focal length [cm]
#+
#+ **NUBEAM["FOCLR"]**: Horizontal focal length [cm]
#+
#+ **NUBEAM["DIVZ"]**: Vertical divergence [rad]
#+
#+ **NUBEAM["DIVR"]**: Horizontal divergence [rad]
#+
#+ **NUBEAM["BMWIDZ"]**: | |
expr.key.accept(self)
expr.value.accept(self)
self.leave()
def visit_generator_expr(self, expr: GeneratorExpr) -> None:
self.enter()
self.analyze_comp_for(expr)
expr.left_expr.accept(self)
self.leave()
def analyze_comp_for(self, expr: Union[GeneratorExpr,
DictionaryComprehension]) -> None:
"""Analyses the 'comp_for' part of comprehensions.
That is the part after 'for' in (x for x in l if p)
"""
for index, sequence, conditions in zip(expr.indices, expr.sequences,
expr.condlists):
sequence.accept(self)
# Bind index variables.
self.analyze_lvalue(index)
for cond in conditions:
cond.accept(self)
def visit_func_expr(self, expr: FuncExpr) -> None:
self.analyze_function(expr)
def visit_conditional_expr(self, expr: ConditionalExpr) -> None:
expr.if_expr.accept(self)
expr.cond.accept(self)
expr.else_expr.accept(self)
def visit__promote_expr(self, expr: PromoteExpr) -> None:
expr.type = self.anal_type(expr.type)
def visit_yield_expr(self, expr: YieldExpr) -> None:
expr.expr.accept(self)
#
# Helpers
#
def lookup(self, name: str, ctx: Context) -> SymbolTableNode:
"""Look up an unqualified name in all active namespaces."""
# 1a. Name declared using 'global x' takes precedence
if name in self.global_decls[-1]:
if name in self.globals:
return self.globals[name]
else:
self.name_not_defined(name, ctx)
return None
# 1b. Name declared using 'nonlocal x' takes precedence
if name in self.nonlocal_decls[-1]:
for table in reversed(self.locals[:-1]):
if table is not None and name in table:
return table[name]
else:
self.name_not_defined(name, ctx)
return None
# 2. Class attributes (if within class definition)
if self.is_class_scope() and name in self.type.names:
return self.type[name]
# 3. Local (function) scopes
for table in reversed(self.locals):
if table is not None and name in table:
return table[name]
# 4. Current file global scope
if name in self.globals:
return self.globals[name]
# 5. Builtins
b = self.globals.get('__builtins__', None)
if b:
table = cast(MypyFile, b.node).names
if name in table:
if name[0] == "_" and name[1] != "_":
self.name_not_defined(name, ctx)
return None
node = table[name]
return node
# Give up.
self.name_not_defined(name, ctx)
self.check_for_obsolete_short_name(name, ctx)
return None
def check_for_obsolete_short_name(self, name: str, ctx: Context) -> None:
matches = [obsolete_name
for obsolete_name in obsolete_name_mapping
if obsolete_name.rsplit('.', 1)[-1] == name]
if len(matches) == 1:
self.fail("(Did you mean '{}'?)".format(obsolete_name_mapping[matches[0]]), ctx)
def lookup_qualified(self, name: str, ctx: Context) -> SymbolTableNode:
if '.' not in name:
return self.lookup(name, ctx)
else:
parts = name.split('.')
n = self.lookup(parts[0], ctx) # type: SymbolTableNode
if n:
for i in range(1, len(parts)):
if isinstance(n.node, TypeInfo):
result = cast(TypeInfo, n.node).get(parts[i])
if not result:
# Fall back to direct lookup from the class. This can be important
# when we have a forward reference of a nested class that is being
# bound before the outer class has been fully semantically analyzed.
#
# A better approach would be to introduce a new analysis pass or
# to move things around between passes, but this unblocks a common
# use case even though this is a little limited in case there is
# inheritance involved, for example.
result = cast(TypeInfo, n.node).names.get(parts[i])
n = result
elif isinstance(n.node, MypyFile):
n = cast(MypyFile, n.node).names.get(parts[i], None)
if not n:
self.name_not_defined(name, ctx)
break
if n:
n = self.normalize_type_alias(n, ctx)
return n
def builtin_type(self, fully_qualified_name: str) -> Instance:
node = self.lookup_fully_qualified(fully_qualified_name)
info = cast(TypeInfo, node.node)
return Instance(info, [])
def lookup_fully_qualified(self, name: str) -> SymbolTableNode:
"""Lookup a fully qualified name.
Assume that the name is defined. This happens in the global namespace -- the local
module namespace is ignored.
"""
assert '.' in name
parts = name.split('.')
n = self.modules[parts[0]]
for i in range(1, len(parts) - 1):
n = cast(MypyFile, n.names[parts[i]].node)
return n.names[parts[-1]]
def lookup_fully_qualified_or_none(self, name: str) -> SymbolTableNode:
"""Lookup a fully qualified name.
Assume that the name is defined. This happens in the global namespace -- the local
module namespace is ignored.
"""
assert '.' in name
parts = name.split('.')
n = self.modules[parts[0]]
for i in range(1, len(parts) - 1):
next_sym = n.names.get(parts[i])
if not next_sym:
return None
n = cast(MypyFile, next_sym.node)
return n.names.get(parts[-1])
def qualified_name(self, n: str) -> str:
return self.cur_mod_id + '.' + n
def enter(self) -> None:
self.locals.append(SymbolTable())
self.global_decls.append(set())
self.nonlocal_decls.append(set())
def leave(self) -> None:
self.locals.pop()
self.global_decls.pop()
self.nonlocal_decls.pop()
def is_func_scope(self) -> bool:
return self.locals[-1] is not None
def is_class_scope(self) -> bool:
return self.type is not None and not self.is_func_scope()
def add_symbol(self, name: str, node: SymbolTableNode,
context: Context) -> None:
if self.is_func_scope():
if name in self.locals[-1]:
# Flag redefinition unless this is a reimport of a module.
if not (node.kind == MODULE_REF and
self.locals[-1][name].node == node.node):
self.name_already_defined(name, context)
self.locals[-1][name] = node
elif self.type:
self.type.names[name] = node
else:
existing = self.globals.get(name)
if existing and (not isinstance(node.node, MypyFile) or
existing.node != node.node):
# Modules can be imported multiple times to support import
# of multiple submodules of a package (e.g. a.x and a.y).
if not (existing.type and node.type and is_same_type(existing.type, node.type)):
# Only report an error if the symbol collision provides a different type.
self.name_already_defined(name, context)
self.globals[name] = node
def add_var(self, v: Var, ctx: Context) -> None:
if self.is_func_scope():
self.add_local(v, ctx)
else:
self.globals[v.name()] = SymbolTableNode(GDEF, v, self.cur_mod_id)
v._fullname = self.qualified_name(v.name())
def add_local(self, v: Var, ctx: Context) -> None:
if v.name() in self.locals[-1]:
self.name_already_defined(v.name(), ctx)
v._fullname = v.name()
self.locals[-1][v.name()] = SymbolTableNode(LDEF, v)
def add_local_func(self, defn: FuncBase, ctx: Context) -> None:
# TODO combine with above
if defn.name() in self.locals[-1]:
self.name_already_defined(defn.name(), ctx)
self.locals[-1][defn.name()] = SymbolTableNode(LDEF, defn)
def check_no_global(self, n: str, ctx: Context,
is_func: bool = False) -> None:
if n in self.globals:
if is_func and isinstance(self.globals[n].node, FuncDef):
self.fail(("Name '{}' already defined (overload variants "
"must be next to each other)").format(n), ctx)
else:
self.name_already_defined(n, ctx)
def name_not_defined(self, name: str, ctx: Context) -> None:
message = "Name '{}' is not defined".format(name)
extra = self.undefined_name_extra_info(name)
if extra:
message += ' {}'.format(extra)
self.fail(message, ctx)
def name_already_defined(self, name: str, ctx: Context) -> None:
self.fail("Name '{}' already defined".format(name), ctx)
def fail(self, msg: str, ctx: Context) -> None:
self.errors.report(ctx.get_line(), msg)
def undefined_name_extra_info(self, fullname: str) -> Optional[str]:
if fullname in obsolete_name_mapping:
return "(it's now called '{}')".format(obsolete_name_mapping[fullname])
else:
return None
class FirstPass(NodeVisitor):
"""First phase of semantic analysis.
See docstring of 'analyze' for a description of what this does.
"""
def __init__(self, sem: SemanticAnalyzer) -> None:
self.sem = sem
self.pyversion = sem.pyversion
def analyze(self, file: MypyFile, fnam: str, mod_id: str) -> None:
"""Perform the first analysis pass.
Resolve the full names of definitions not nested within functions and
construct type info structures, but do not resolve inter-definition
references such as base classes.
Also add implicit definitions such as __name__.
"""
sem = self.sem
sem.cur_mod_id = mod_id
sem.errors.set_file(fnam)
sem.globals = SymbolTable()
sem.global_decls = [set()]
sem.nonlocal_decls = [set()]
sem.block_depth = [0]
defs = file.defs
# Add implicit definitions of module '__name__' etc.
for name, t in implicit_module_attrs.items():
v = Var(name, UnboundType(t))
v._fullname = self.sem.qualified_name(name)
self.sem.globals[name] = SymbolTableNode(GDEF, v, self.sem.cur_mod_id)
for d in defs:
d.accept(self)
# Add implicit definition of 'None' to builtins, as we cannot define a
# variable with a None type explicitly.
if mod_id == 'builtins':
v = Var('None', NoneTyp())
v._fullname = self.sem.qualified_name('None')
self.sem.globals['None'] = SymbolTableNode(GDEF, v, self.sem.cur_mod_id)
def visit_block(self, b: Block) -> None:
if b.is_unreachable:
return
self.sem.block_depth[-1] += 1
for node in b.body:
node.accept(self)
self.sem.block_depth[-1] -= 1
def visit_assignment_stmt(self, s: AssignmentStmt) -> None:
for lval in s.lvalues:
self.sem.analyze_lvalue(lval, add_global=True,
explicit_type=s.type is not None)
def visit_func_def(self, d: FuncDef) -> None:
sem = self.sem
d.is_conditional = sem.block_depth[-1] > 0
if d.name() in sem.globals:
n = sem.globals[d.name()].node
if sem.is_conditional_func(n, d):
# Conditional function definition -- multiple defs are ok.
d.original_def = cast(FuncDef, n)
else:
sem.check_no_global(d.name(), d, True)
d._fullname = sem.qualified_name(d.name())
sem.globals[d.name()] = SymbolTableNode(GDEF, d, sem.cur_mod_id)
def visit_overloaded_func_def(self, d: OverloadedFuncDef) -> None:
self.sem.check_no_global(d.name(), d)
d._fullname = self.sem.qualified_name(d.name())
self.sem.globals[d.name()] = SymbolTableNode(GDEF, d,
self.sem.cur_mod_id)
def visit_class_def(self, d: ClassDef) -> None:
self.sem.check_no_global(d.name, d)
d.fullname = self.sem.qualified_name(d.name)
info = TypeInfo(SymbolTable(), d)
info.set_line(d.line)
d.info = info
self.sem.globals[d.name] = SymbolTableNode(GDEF, info,
self.sem.cur_mod_id)
self.process_nested_classes(d)
def process_nested_classes(self, outer_def: ClassDef) -> None:
for node in outer_def.defs.body:
if isinstance(node, ClassDef):
node.info = TypeInfo(SymbolTable(), node)
node.info._fullname = node.info.name()
symbol = SymbolTableNode(MDEF, node.info)
outer_def.info.names[node.name] = symbol
self.process_nested_classes(node)
def visit_for_stmt(self, s: ForStmt) -> None:
self.sem.analyze_lvalue(s.index, add_global=True)
def visit_with_stmt(self, s: WithStmt) -> None:
for n in s.target:
if n:
self.sem.analyze_lvalue(n, add_global=True)
def visit_decorator(self, d: Decorator) -> None:
d.var._fullname = self.sem.qualified_name(d.var.name())
self.sem.add_symbol(d.var.name(), SymbolTableNode(GDEF, d.var), d)
def visit_if_stmt(self, s: IfStmt) -> None:
infer_reachability_of_if_statement(s, pyversion=self.pyversion)
for node in s.body:
node.accept(self)
if s.else_body:
s.else_body.accept(self)
def visit_try_stmt(self, s: TryStmt) -> None:
self.sem.analyze_try_stmt(s, | |
field associated with this file. May be None if there isn't
one, for example when we have an application/octet-stream upload.
"""
return self._field_name
@property
def file_name(self):
"""The file name given in the upload request.
"""
return self._file_name
@property
def actual_file_name(self):
"""The file name that this file is saved as. Will be None if it's not
currently saved on disk.
"""
return self._actual_file_name
@property
def file_object(self):
"""The file object that we're currently writing to. Note that this
will either be an instance of a :class:`io.BytesIO`, or a regular file
object.
"""
return self._fileobj
@property
def size(self):
"""The total size of this file, counted as the number of bytes that
currently have been written to the file.
"""
return self._bytes_written
@property
def in_memory(self):
"""A boolean representing whether or not this file object is currently
stored in-memory or on-disk.
"""
return self._in_memory
def flush_to_disk(self):
"""If the file is already on-disk, do nothing. Otherwise, copy from
the in-memory buffer to a disk file, and then reassign our internal
file object to this new disk file.
Note that if you attempt to flush a file that is already on-disk, a
warning will be logged to this module's logger.
"""
if not self._in_memory:
self.logger.warn(
"Trying to flush to disk when we're not in memory"
)
return
# Go back to the start of our file.
self._fileobj.seek(0)
# Open a new file.
new_file = self._get_disk_file()
# Copy the file objects.
shutil.copyfileobj(self._fileobj, new_file)
# Seek to the new position in our new file.
new_file.seek(self._bytes_written)
# Reassign the fileobject.
old_fileobj = self._fileobj
self._fileobj = new_file
# We're no longer in memory.
self._in_memory = False
# Close the old file object.
old_fileobj.close()
def _get_disk_file(self):
"""This function is responsible for getting a file object on-disk for us.
"""
self.logger.info("Opening a file on disk")
file_dir = self._config.get('UPLOAD_DIR')
keep_filename = self._config.get('UPLOAD_KEEP_FILENAME', False)
keep_extensions = self._config.get('UPLOAD_KEEP_EXTENSIONS', False)
# If we have a directory and are to keep the filename...
if file_dir is not None and keep_filename:
self.logger.info("Saving with filename in: %r", file_dir)
# Build our filename.
# TODO: what happens if we don't have a filename?
fname = self._file_base
if keep_extensions:
fname = fname + self._ext
path = os.path.join(file_dir, fname)
try:
self.logger.info("Opening file: %r", path)
tmp_file = open(path, 'w+b')
except (IOError, OSError) as e:
tmp_file = None
self.logger.exception("Error opening temporary file")
raise FileError("Error opening temporary file: %r" % path)
else:
# Build options array.
# Note that on Python 3, tempfile doesn't support byte names. We
# encode our paths using the default filesystem encoding.
options = {}
if keep_extensions:
ext = self._ext
if isinstance(ext, binary_type):
ext = ext.decode(sys.getfilesystemencoding())
options['suffix'] = ext
if file_dir is not None:
d = file_dir
if isinstance(d, binary_type):
d = d.decode(sys.getfilesystemencoding())
options['dir'] = d
# Create a temporary (named) file with the appropriate settings.
self.logger.info("Creating a temporary file with options: %r",
options)
try:
tmp_file = tempfile.NamedTemporaryFile(**options)
except (IOError, OSError):
self.logger.exception("Error creating named temporary file")
raise FileError("Error creating named temporary file")
fname = tmp_file.name
# Encode filename as bytes.
if isinstance(fname, text_type):
fname = fname.encode(sys.getfilesystemencoding())
self._actual_file_name = fname
return tmp_file
def write(self, data):
"""Write some data to the File.
:param data: a bytestring
"""
return self.on_data(data)
def on_data(self, data):
"""This method is a callback that will be called whenever data is
written to the File.
:param data: a bytestring
"""
bwritten = self._fileobj.write(data)
# If the bytes written isn't the same as the length, just return.
if bwritten != len(data):
self.logger.warn("bwritten != len(data) (%d != %d)", bwritten,
len(data))
return bwritten
# Keep track of how many bytes we've written.
self._bytes_written += bwritten
# If we're in-memory and are over our limit, we create a file.
if (self._in_memory and
self._config.get('MAX_MEMORY_FILE_SIZE') is not None and
(self._bytes_written >
self._config.get('MAX_MEMORY_FILE_SIZE'))):
self.logger.info("Flushing to disk")
self.flush_to_disk()
# Return the number of bytes written.
return bwritten
def on_end(self):
"""This method is called whenever the Field is finalized.
"""
pass
def finalize(self):
"""Finalize the form file. This will not close the underlying file,
but simply signal that we are finished writing to the File.
"""
self.on_end()
def close(self):
"""Close the File object. This will actually close the underlying
file object (whether it's a :class:`io.BytesIO` or an actual file
object).
"""
self._fileobj.close()
def __repr__(self):
return "%s(file_name=%r, field_name=%r)" % (
self.__class__.__name__,
self.file_name,
self.field_name
)
class BaseParser(object):
"""This class is the base class for all parsers. It contains the logic for
calling and adding callbacks.
A callback can be one of two different forms. "Notification callbacks" are
callbacks that are called when something happens - for example, when a new
part of a multipart message is encountered by the parser. "Data callbacks"
are called when we get some sort of data - for example, part of the body of
a multipart chunk. Notification callbacks are called with no parameters,
whereas data callbacks are called with three, as follows::
data_callback(data, start, end)
The "data" parameter is a bytestring (i.e. "foo" on Python 2, or b"foo" on
Python 3). "start" and "end" are integer indexes into the "data" string
that represent the data of interest. Thus, in a data callback, the slice
`data[start:end]` represents the data that the callback is "interested in".
The callback is not passed a copy of the data, since copying severely hurts
performance.
"""
def __init__(self):
self.logger = logging.getLogger(__name__)
def callback(self, name, data=None, start=None, end=None):
"""This function calls a provided callback with some data. If the
callback is not set, will do nothing.
:param name: The name of the callback to call (as a string).
:param data: Data to pass to the callback. If None, then it is
assumed that the callback is a notification callback,
and no parameters are given.
:param end: An integer that is passed to the data callback.
:param start: An integer that is passed to the data callback.
"""
name = "on_" + name
func = self.callbacks.get(name)
if func is None:
return
# Depending on whether we're given a buffer...
if data is not None:
# Don't do anything if we have start == end.
if start is not None and start == end:
return
self.logger.debug("Calling %s with data[%d:%d]", name, start, end)
func(data, start, end)
else:
self.logger.debug("Calling %s with no data", name)
func()
def set_callback(self, name, new_func):
"""Update the function for a callback. Removes from the callbacks dict
if new_func is None.
:param name: The name of the callback to call (as a string).
:param new_func: The new function for the callback. If None, then the
callback will be removed (with no error if it does not
exist).
"""
if new_func is None:
self.callbacks.pop('on_' + name, None)
else:
self.callbacks['on_' + name] = new_func
def close(self):
pass # pragma: no cover
def finalize(self):
pass # pragma: no cover
def __repr__(self):
return "%s()" % self.__class__.__name__
class OctetStreamParser(BaseParser):
"""This parser parses an octet-stream request body and calls callbacks when
incoming data is received. Callbacks are as follows:
.. list-table::
:widths: 15 10 30
:header-rows: 1
* - Callback Name
- Parameters
- Description
* - on_start
- None
- Called when the first data is parsed.
* - on_data
- data, start, end
- Called for each data chunk that is parsed.
* - on_end
- None
- Called when the parser is finished parsing all data.
:param callbacks: A dictionary of callbacks. See the documentation for
:class:`BaseParser`.
:param max_size: The maximum size of body to parse. Defaults to infinity -
i.e. unbounded.
"""
def __init__(self, callbacks={}, max_size=float('inf')):
super(OctetStreamParser, self).__init__()
self.callbacks = callbacks
self._started = False
if not isinstance(max_size, Number) or max_size < 1:
raise ValueError("max_size must be a positive number, not %r" %
max_size)
self.max_size = max_size
self._current_size = 0
def write(self, data):
"""Write some data to the parser, which will perform size verification,
and then pass the data to the underlying callback.
:param data: a bytestring
"""
if not self._started:
self.callback('start')
self._started = True
# Truncate data | |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
""" Tests for data_manager.
"""
import pytest
import pyomo.environ as pyo
from collections import OrderedDict
from pyomo.core.base.componentuid import ComponentUID
from pyomo.common.collections import ComponentMap
import pandas as pd
from idaes.apps.caprese.dynamic_block import DynamicBlock
from idaes.apps.caprese.controller import ControllerBlock
from idaes.apps.caprese.estimator import EstimatorBlock
from idaes.apps.caprese.dynamic_var import DiffVar
from idaes.apps.caprese.tests.test_simple_model import (
make_model,
initialize_t0,
)
from idaes.apps.caprese.data_manager import (
find_and_merge_variables,
empty_dataframe_from_variables,
add_variable_values_to_dataframe,
add_variable_setpoints_to_dataframe,
PlantDataManager,
ControllerDataManager,
EstimatorDataManager,
)
__author__ = "<NAME>"
class TestDataManager(object):
@pytest.mark.unit
def make_small_ContreteModel(self):
m = pyo.ConcreteModel()
m.set1 = pyo.Set(initialize = [0.0, 1.0, 2.0, 3.0, 4.0])
m.set2 = pyo.Set(initialize = ["A", "B"])
init_d1 = {(i, "A"):0.1+1.*i for i in m.set1}
init_d2 = {(i, "B"):0.2+1.*i for i in m.set1}
init_dict = {**init_d1, **init_d2}
m.var1 = pyo.Var(m.set1, m.set2, initialize = init_dict)
return m
@pytest.mark.unit
def test_find_and_merge_variables(self):
plant = self.make_plant()
p_model = plant.mod
p_time = plant.time
p_t0 = p_time.first()
controller = self.make_controller()
c_model = controller.mod
c_time = controller.time
c_t0 = c_time.first()
# states_of_interest is defined with plant
states_of_interest = [pyo.Reference(p_model.conc[:, "A"]),
pyo.Reference(p_model.rate[:, "A"]),
p_model.flow_out]
pred_merged_list = controller.differential_vars + \
[pyo.Reference(c_model.rate[:, "A"]),
c_model.flow_out]
merged_list_1 = find_and_merge_variables(
controller, controller.differential_vars, states_of_interest
)
assert all(i1[c_t0] is i2[c_t0] for i1, i2
in zip(merged_list_1, pred_merged_list))
@pytest.mark.unit
def test_empty_dataframe_from_variables(self):
m = self.make_small_ContreteModel()
# w/o rename_map
variables = [pyo.Reference(m.var1[:, "A"]),
pyo.Reference(m.var1[:, "B"])]
df1 = empty_dataframe_from_variables(variables)
assert type(df1) is pd.core.frame.DataFrame
assert df1.empty
assert len(df1.index) == 0
pred_column_strings1 = ["iteration"]+\
[str(ComponentUID(var.referent))
for var in variables]
assert all(i1 == i2 for i1, i2 in
zip(pred_column_strings1, df1.columns))
assert df1["iteration"].dtype == "int64"
assert all(dtype == "float64" for dtype in df1.dtypes[1:])
# w/ rename_map
rename_map = {var: str(ComponentUID(var.referent))+"_random"
for var in variables}
df2 = empty_dataframe_from_variables(variables, rename_map)
assert type(df2) is pd.core.frame.DataFrame
assert df2.empty
assert len(df2.index) == 0
pred_column_strings2 = ["iteration"]+\
[str(ComponentUID(var.referent))+"_random"
for var in variables]
assert all(i1 == i2 for i1, i2 in
zip(pred_column_strings2, df2.columns))
assert df2["iteration"].dtype == "int64"
assert all(dtype == "float64" for dtype in df2.dtypes[1:])
@pytest.mark.unit
def test_add_variable_values_to_dataframe(self):
m = self.make_small_ContreteModel()
variables = [pyo.Reference(m.var1[:, "A"]),
pyo.Reference(m.var1[:, "B"])]
str_cuids = [str(ComponentUID(var.referent)) for var in variables]
# Case1: Neither optional argument is given.
df1 = empty_dataframe_from_variables(variables)
df1 = add_variable_values_to_dataframe(df1,
variables,
iteration = 10,
)
assert len(df1.index) == 5
assert all(i1 == i2 for i1, i2 in
zip(df1.index, [0.0, 1.0, 2.0, 3.0, 4.0]))
assert len(df1.columns) == 3
assert all(ele == 10 for ele in df1["iteration"])
assert all(ele == 0.1+ind*1. for ind, ele in
enumerate(df1[str_cuids[0]]))
assert all(ele == 0.2+ind*1. for ind, ele in
enumerate(df1[str_cuids[1]]))
# Case2: time_subset is given
df2 = empty_dataframe_from_variables(variables)
df2 = add_variable_values_to_dataframe(df2,
variables,
iteration = 100,
time_subset = [0,1]
)
assert len(df2.index) == 2
assert all(i1 == i2 for i1, i2 in zip(df2.index, [0.0,1.0]))
assert len(df2.columns) == 3
assert all(ele == 100 for ele in df2["iteration"])
assert all(ele == 0.1+ind*1. for ind, ele in
enumerate(df2[str_cuids[0]]))
assert all(ele == 0.2+ind*1. for ind, ele in
enumerate(df2[str_cuids[1]]))
df2 = add_variable_values_to_dataframe(df2,
variables,
iteration = 200,
time_subset = [2,3,4]
)
assert len(df2.index) == 5
assert all(i1 == i2 for i1, i2 in
zip(df2.index, [0.0, 1.0, 3.0, 4.0, 5.0]))
assert len(df2.columns) == 3
assert all(ele == 100 for ele in df2["iteration"][0:2])
assert all(ele == 200 for ele in df2["iteration"][2:])
assert all(ele == 0.1+ind*1. for ind, ele in
enumerate(df2[str_cuids[0]]))
assert all(ele == 0.2+ind*1. for ind, ele in
enumerate(df2[str_cuids[1]]))
#Case3: rename_map is given
rename_map = {var: str(ComponentUID(var.referent))+"_random"
for var in variables}
df3 = empty_dataframe_from_variables(variables, rename_map)
df3 = add_variable_values_to_dataframe(df3,
variables,
iteration = 300,
time_subset = [0,1],
rename_map = rename_map)
assert len(df3.index) == 2
assert all(i1 == i2 for i1, i2 in zip(df3.index, [0.0,1.0]))
assert len(df3.columns) == 3
assert all(ele == 300 for ele in df3["iteration"])
assert all(ele == 0.1+ind*1. for ind, ele in
enumerate(df3[str_cuids[0]+"_random"]))
assert all(ele == 0.2+ind*1. for ind, ele in
enumerate(df3[str_cuids[1]+"_random"]))
#Case4: time_map is given
df5 = empty_dataframe_from_variables(variables)
time_map = {t: t + 40. for t in m.set1}
df5 = add_variable_values_to_dataframe(df5,
variables,
iteration = 500,
time_map = time_map)
assert len(df5.index) == 5
assert all(i1 == i2 for i1, i2 in
zip(df5.index, [40.0, 41.0, 42.0, 43.0, 44.0]))
assert len(df1.columns) == 3
assert all(ele == 500 for ele in df5["iteration"])
assert all(ele == 0.1+ind*1. for ind, ele in
enumerate(df5[str_cuids[0]]))
assert all(ele == 0.2+ind*1. for ind, ele in
enumerate(df5[str_cuids[1]]))
@pytest.mark.unit
def test_add_variable_setpoints_to_dataframe(self):
m = self.make_small_ContreteModel()
variables = [pyo.Reference(m.var1[:, "A"]),
pyo.Reference(m.var1[:, "B"])]
str_cuids = [str(ComponentUID(var.referent)) for var in variables]
# Manually create differential_vars, save differential varialbes and,
# assign their setpoints.
m.differential_vars = []
for ind, var in enumerate(variables):
new_ref = pyo.Reference(var.referent, ctype = DiffVar)
new_ref.setpoint = 10.*(ind+1)
m.differential_vars.append(new_ref)
# This mapping is important, because user given variables are not nmpc_var.
# Thus, they don't have setpoint attribute.
# This mapping maps the user given variables to corresponding nmpc_vars.
m.var_mapping = ComponentMap((original_ref, new_ref)
for original_ref, new_ref in
zip(variables, m.differential_vars))
df1 = empty_dataframe_from_variables(variables)
df1 = add_variable_setpoints_to_dataframe(
df1,
variables,
time_subset = [0.0, 1.0, 2.0],
map_for_user_given_vars = m.var_mapping,
iteration = 10,
)
assert all(val == 10 for val in df1["iteration"])
assert all(val == 10.0 for val in df1["var1[*,A]"])
assert all(val == 20.0 for val in df1["var1[*,B]"])
# Set new setpoint value
for ind, var in enumerate(m.differential_vars):
var.setpoint = 100.*(ind+1)
df1 = add_variable_setpoints_to_dataframe(
df1,
variables,
time_subset = [3.0, 4.0],
map_for_user_given_vars = m.var_mapping,
iteration = 20,
)
assert all(val == 20 for val in df1["iteration"][3:])
assert all(val == 100.0 for val in df1["var1[*,A]"][3:])
assert all(val == 200.0 for val in df1["var1[*,B]"][3:])
@pytest.mark.unit
def make_plant(self):
model = make_model(horizon = 0.5, nfe = 2)
time = model.time
t0 = time.first()
inputs = [model.flow_in[t0]]
measurements=[model.conc[0,'A'], model.conc[0,'B']]
plant = DynamicBlock(
model=model,
time=time,
inputs=inputs,
measurements=measurements,
)
plant.construct()
plant.set_sample_time(sample_time = 0.5)
return plant
@pytest.mark.unit
def make_controller(self):
model = make_model(horizon = 1., nfe = 4)
time = model.time
t0 = time.first()
inputs = [model.flow_in[t0]]
measurements=[model.conc[0,'A'], model.conc[0,'B']]
controller = ControllerBlock(
model=model,
time=time,
inputs=inputs,
measurements=measurements,
)
controller.construct()
controller.set_sample_time(sample_time = 0.5)
return controller
@pytest.mark.unit
def make_estimator(self):
model = make_model(horizon = 1., nfe = 4)
time = model.time
t0 = time.first()
inputs = [model.flow_in[t0]]
measurements=[model.conc[0,'A'], model.conc[0,'B']]
estimator = EstimatorBlock(
model=model,
time=time,
inputs=inputs,
measurements=measurements,
sample_time = 0.5)
estimator.construct()
estimator.set_sample_time(sample_time = 0.5)
return estimator
@pytest.mark.unit
def test_PlantDataManager(self):
model = make_model(horizon = 0.5, nfe = 2)
time = model.time
t0 = time.first()
inputs = [model.flow_in[t0]]
measurements=[model.conc[0,'A'], model.conc[0,'B']]
plant = DynamicBlock(
model=model,
time=time,
inputs=inputs,
measurements=measurements,
)
plant.construct()
states_of_interest = [pyo.Reference(model.conc[:, "A"]),
pyo.Reference(model.rate[:, "A"])]
plant_datamanager = PlantDataManager(plant, states_of_interest,)
# Make sure all methods are there.
assert hasattr(plant_datamanager, "__init__")
assert hasattr(plant_datamanager, "get_plant_dataframe")
assert hasattr(plant_datamanager, "save_initial_plant_data")
assert hasattr(plant_datamanager, "save_plant_data")
@pytest.mark.unit
def test_PlantDataManager__init__(self):
plant = self.make_plant()
model = plant.mod
time = plant.time
t0 = time.first()
states_of_interest = [pyo.Reference(model.conc[:, "A"]),
pyo.Reference(model.rate[:, "A"])]
plant_datamanager = PlantDataManager(plant, states_of_interest)
assert hasattr(plant_datamanager, "plantblock")
assert plant is plant_datamanager.plantblock
# Note that model.conc[:, "A"] has alrealy existed in differential_vars
assert hasattr(plant_datamanager, "plant_states_of_interest")
assert all(i1[t0] is i2[t0] for i1, i2 in
zip(plant_datamanager.plant_states_of_interest,
plant.differential_vars + [states_of_interest[1]]))
assert hasattr(plant_datamanager, "plant_vars_of_interest")
pred_plant_vars_of_interest = plant.differential_vars + \
[states_of_interest[1]] + \
plant.input_vars
assert all(i1[t0] is i2[t0] for i1, i2 in
zip(plant_datamanager.plant_vars_of_interest,
pred_plant_vars_of_interest))
assert hasattr(plant_datamanager, "plant_df")
# empty_dataframe_from_variables has been tested.
@pytest.mark.unit
def test_get_plant_dataframe(self):
plant = self.make_plant()
model = plant.mod
time = plant.time
t0 = time.first()
states_of_interest = [pyo.Reference(model.conc[:, "A"]),
pyo.Reference(model.rate[:, "A"])]
plant_datamanager = PlantDataManager(plant, states_of_interest)
assert id(plant_datamanager.get_plant_dataframe()) == \
id(plant_datamanager.plant_df)
@pytest.mark.unit
def test_save_initial_plant_data(self):
plant = self.make_plant()
model = plant.mod
time = plant.time
t0 = time.first()
states_of_interest = [pyo.Reference(model.conc[:, "A"]),
pyo.Reference(model.rate[:, "A"])]
plant_datamanager = PlantDataManager(plant, states_of_interest)
# Set some values at t0 for differential variables
plant.vectors.differential[0,:].set_value(35.)
plant.vectors.differential[1,:].set_value(45.)
# Set value at t0 for the user-interested variable.
plant.vectors.algebraic[1,:].set_value(0.2)
plant_datamanager.save_initial_plant_data()
df = plant_datamanager.plant_df
assert len(df.index) == 1
assert df.index[0] == 0.0
pred_columns = [
"iteration", "mod.conc[*,A]", "mod.conc[*,B]",
"mod.rate[*,A]", "mod.flow_in[*]"
]
assert all(i1 == i2 for i1, i2 in zip(pred_columns, df.columns))
assert df["iteration"][0] == 0
| |
import json
import socket
import time
from hashlib import sha1
import random
import threading
from queue import Queue
from copy import copy
# project files
from src import utils
from src.utils import log
from src.Finger import Finger
from src.rpc_handlers import REQUEST_MAP, STATUS_CONFLICT
from src.Storage import Storage
hash_func = sha1
Finger.hash_func = hash_func
class Node:
"""
Defines an E-Chord Node
"""
def __init__(self, port=None):
"""
Initializes a new node
"""
# data storage dictionary to hold (key, value) pairs
self.storage = Storage()
# RW mutex to avoid writes in the middle of RPCs
# RPCs are considered readers, the main thread is considered the writer
self.stabilize_mutex = utils.RWLock()
# create threads to listen for connections and to send stabilize signal
self.event_queue = Queue()
# set address for server and client
self.SERVER_ADDR = ("", port) if port is not None else (utils.get_ip(), utils.params["host"]["server_port"])
# initialize finger table and successor list
self.finger_table = [Finger(self.SERVER_ADDR)] * utils.params["ring"]["bits"]
self.successor_list = []
self.successor_list_index = -1
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# ID will be SHA-1(IP+port)
self.node_id = utils.get_id(self.SERVER_ADDR[0] + str(self.SERVER_ADDR[1]), hash_func)
log.debug(f"Initialized with node ID: {self.node_id}")
self.predecessor = None
# variable indicating intent to leave
# once true, the node will try to move its keys and leave after each stabilize
self.leaving = False
self.join_ring()
self.ask_peer((utils.params["seed_server"]["ip"], utils.params["seed_server"]["port"]),
"add_node", {"ip": self.SERVER_ADDR[0], "port": self.SERVER_ADDR[1], "node_id": self.node_id})
self.listen()
def join_ring(self):
"""
Joins ring by getting seed from seed server and asking it to find successor
:return: None
"""
while True:
# get initial node from seed server
data = self.get_seed()
log.debug("Asked seed server")
if data["header"]["status"] == STATUS_CONFLICT:
log.critical("ID conflict in network. Please change port.")
exit(1)
# Join ring
# if at least one other node exists
if data["header"]["status"] in range(200, 300):
log.info("Got seed from seed server")
log.debug(f"Seed address: {data['body']['ip'], data['body']['port']}")
seed_dead = False
while True:
self.predecessor = None
# get successor from seed node
log.info("Asking seed for successor")
response = self.ask_peer((data["body"]["ip"], data["body"]["port"]),
"find_successor", {"for_id": self.node_id})
if not response or response["header"]["status"] not in range(200, 300):
# tell seed server that seed node has died
self.ask_peer((utils.params["seed_server"]["ip"], utils.params["seed_server"]["port"]),
"dead_node", {"ip": data["body"]["ip"], "port": data["body"]["port"],
"node_id": data["body"]["node_id"]})
seed_dead = True
break
self.finger_table[0] = Finger((response["body"]["ip"], response["body"]["port"]),
response["body"]["node_id"])
log.info("Got successor")
log.debug(f"Successor address: {self.finger_table[0].addr} with node ID: "
f"{self.finger_table[0].node_id}")
# initialize successor list, or get new successor if successor is dead
if self.init_successor_list():
log.info("Initialized successor list")
break
# if successor has died, wait for other nodes to stabilize before asking for new successor
log.info("Waiting for stabilization")
time.sleep(utils.params["ring"]["stabilize_delay"])
# if seed node is dead, reseed
if seed_dead:
log.info("Seed is dead, retrying...")
continue
# if this is the first node
else:
log.info("No other nodes in the network")
self.predecessor = Finger(self.SERVER_ADDR)
for i in range(len(self.successor_list)):
self.successor_list[i] = copy(self.predecessor)
log.info("Initialized predecessor and sucessor list to self")
break
def init_successor_list(self):
"""
Initializes successor_list
:return: True if successful, False if this node's successor is dead
"""
# ask successor for this node's successor list
log.info("Asking successor for this node's successor list")
response = self.ask_peer(self.finger_table[0].addr, "get_prev_successor_list", {})
# successor is dead
if not response:
log.info("Successor is dead")
return False
# populating this node's successor list
for i, successor in enumerate(response["body"]["successor_list"]):
new_finger = Finger((successor["ip"], successor["port"]), successor["node_id"])
try:
self.successor_list[i] = new_finger
except IndexError:
self.successor_list.append(new_finger)
return True
def find_key(self, key):
"""
Finds node that contains key and returns the key's value
:param key: the key
:return: the key's value, or None if not found
"""
key_id = utils.get_id(key, hash_func)
log.info(f"Finding value for ID {key_id}")
new_node = self.find_successor(key_id)
if not new_node:
log.debug("Couldn't find node")
return None
log.debug(f"Node found to store key has ID: {new_node[2]}")
response = self.ask_peer(new_node[:2], "lookup", {"key": key})
if not response or response["header"]["status"] not in range(200, 300):
log.debug("Couldn't find key")
return None
log.debug("Value found")
return response["body"]["value"]
def find_and_store_key(self, key, value):
"""
Finds node that key should be stored in and stores it there
If the key already exists, this will update its value with the given value
:param key: the key
:param value: the value of the key
:return: bool, whether the insertion was successful
"""
key_id = utils.get_id(key, hash_func)
log.info(f"Finding node to store key {key} with ID {key_id}")
new_node = self.find_successor(key_id)
if not new_node:
log.debug("Couldn't find node")
return False
log.debug(f"Node found to store key has ID: {new_node[2]}")
response = self.ask_peer(new_node[:2], "store_key", {"key": key, "value": value, "key_id": key_id})
if not response or response["header"]["status"] not in range(200, 300):
log.debug("Couldn't store key")
return False
log.debug("Pair stored")
return True
def find_and_delete_key(self, key):
"""
Finds node that key should be deleted from and deletes it
That node will deal with deleting backups
:param key: the key
:return: bool, whether the deletion was successful
"""
key_id = utils.get_id(key, hash_func)
log.info(f"Finding node to delete key {key} with ID {key_id}")
new_node = self.find_successor(key_id)
if not new_node:
log.debug("Couldn't find node")
return False
log.debug(f"Node found to delete key has ID: {new_node[2]}")
response = self.ask_peer(new_node[:2], "delete_key", {"key": key})
if not response or response["header"]["status"] not in range(200, 300):
log.debug("Couldn't delete key")
return False
log.debug("Pair deleted")
return True
def move_keys_to_predecessor(self):
"""
Moves keys from this node to predecessor
The keys moved will be the ones, which should be in that node instead of this one
Condition for moving a key: key_id <= other_node.id
:return: bool; whether move was successful
"""
if self.predecessor.node_id == self.node_id:
return True
to_move = []
for key in self.storage:
# Keys that should be transferred are between current node (lower bound)
# and new node (inclusive upper bound)
# As it stands, the keys held by current node fall either after or before the new node
# The keys that fall between should be left with this node
# The keys that fall before the new node should be transferred to it
if utils.is_between_clockwise(self.storage.get_id(key), self.node_id, self.predecessor.node_id,
inclusive_upper=True):
to_move.append({"key": key, "value": self.storage[key], "key_id": self.storage.get_id(key)})
if not to_move:
return True
response = self.ask_peer(self.predecessor.addr, "batch_store_keys", {"keys": to_move}, pre_request=True)
if not response or response["header"]["status"] not in range(200, 300):
return False
# delete from local storage if keys were moved successfully
for key_dict in to_move:
del self.storage[key_dict["key"]]
return True
def move_keys_to_successor(self):
"""
Move all keys to successor before exiting
:return: bool; whether move was successful
"""
if self.finger_table[0].node_id == self.node_id:
return True
to_move = self.storage.dump()
if not to_move:
return True
response = self.ask_peer(self.finger_table[0].addr, "batch_store_keys", {"keys": to_move}, pre_request=True)
return bool(response)
def stabilize(self):
"""
Stabilize ring by updating successor or successor's predecessor
:return: None
"""
log.info("Stabilizing...")
current_successor = self.finger_table[0]
# ask all successors in successor list until one responds, remove dead ones
while len(self.successor_list):
response = self.ask_peer(current_successor.addr, "get_predecessor", {})
if not response:
# current successor was dead, get a new one from the successor list
log.info("Successor is dead, getting next in list")
log.debug(f"Successor dead is: {current_successor.addr}")
if self.predecessor and self.predecessor.node_id == current_successor.node_id:
self.predecessor = None
current_successor = self.successor_list[0]
del self.successor_list[0]
continue
self.finger_table[0] = current_successor
break
else:
log.info("All successors in successor list are dead")
self.join_ring()
return
status_ok = response["header"]["status"] in range(200, 300)
if status_ok:
# if successor has this node as predecessor
if self.node_id == response["body"]["node_id"]:
log.debug("Successor's predecessor is this node")
return
# check if successor's predecessor is dead
poll_response = self.ask_peer((response["body"]["ip"], response["body"]["port"]), "poll", {})
# if it is, notify successor and return
if not poll_response:
self.ask_peer(self.finger_table[0].addr, "clear_predecessor", {})
return
# if new node joined between this node and its successor
if utils.is_between_clockwise(response["body"]["node_id"], self.node_id, self.finger_table[0].node_id):
# shift successor list by 1
self.successor_list.insert(0, self.finger_table[0])
del self.successor_list[-1]
# update successor
self.finger_table[0] = Finger((response["body"]["ip"], response["body"]["port"]),
response["body"]["node_id"])
log.info("Got new successor")
log.debug(f"New succesor address: {response['body']['ip'], response['body']['port']} with node ID: "
f"{response['body']['node_id']}")
# update successor's predecessor to be this node
self.ask_peer(self.finger_table[0].addr, "update_predecessor", {"ip": self.SERVER_ADDR[0],
"port": self.SERVER_ADDR[1],
"node_id": self.node_id})
log.debug("Asked successor to make this node its predecessor")
def fix_fingers(self):
"""
Fixes a random finger of the finger table
:return: None
"""
# TODO maybe priority here (and in successor list?)
log.info("Fixing a finger...")
i = random.randint(1, utils.params["ring"]["bits"] - 1)
log.debug(f"Picked finger {i}")
succ = self.find_successor((self.node_id + 2 ** i) % 2 ** utils.params["ring"]["bits"])
if not succ:
return
self.finger_table[i] = Finger((succ[0], succ[1]), succ[2])
def fix_successor_list(self):
"""
| 1. If successor list empty or index<0, | |
<filename>rl_coach/spaces.py
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
from enum import Enum
from itertools import product
from typing import Union, List, Dict, Tuple, Callable
import numpy as np
import scipy
import scipy.spatial
from rl_coach.core_types import ActionType, ActionInfo
from rl_coach.utils import eps
class Space(object):
"""
A space defines a set of valid values
"""
def __init__(self, shape: Union[int, tuple, list, np.ndarray], low: Union[None, int, float, np.ndarray]=-np.inf,
high: Union[None, int, float, np.ndarray]=np.inf):
"""
:param shape: the shape of the space
:param low: the lowest values possible in the space. can be an array defining the lowest values per point,
or a single value defining the general lowest values
:param high: the highest values possible in the space. can be an array defining the highest values per point,
or a single value defining the general highest values
"""
# the number of dimensions is the number of axes in the shape. it will be set in the shape setter
self.num_dimensions = 0
# the number of elements is the number of possible actions if the action space was discrete.
# it will be set in the shape setter
self.num_elements = 0
self._low = self._high = None
self._shape = self.shape = shape
self._low = self.low = low
self._high = self.high = high
# we allow zero sized spaces which means that the space is empty. this is useful for environments with no
# measurements for example.
if type(shape) == int and shape < 0:
raise ValueError("The shape of the space must be a non-negative number")
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, val: Union[int, tuple, list, np.ndarray]):
# convert the shape to an np.ndarray
self._shape = val
if type(self._shape) == int:
self._shape = np.array([self._shape])
if type(self._shape) == tuple or type(self._shape) == list:
self._shape = np.array(self._shape)
# the shape is now an np.ndarray
self.num_dimensions = len(self._shape)
self.num_elements = int(np.prod(self._shape))
@property
def low(self):
if hasattr(self, '_low'):
return self._low
else:
return None
@low.setter
def low(self, val: Union[None, int, float, np.ndarray]):
if type(val) == np.ndarray and type(self.shape) == np.ndarray and np.all(val.shape != self.shape):
raise ValueError("The low values shape don't match the shape of the space")
elif self.high is not None and not np.all(self.high >= val):
raise ValueError("At least one of the axes-parallel lines defining the space has high values which "
"are lower than the given low values")
else:
self._low = val
# we allow using a number to define the low values, but we immediately convert it to an array which defines
# the low values for all the space dimensions in order to expose a consistent value type
if type(self._low) == int or type(self._low) == float:
self._low = np.ones(self.shape)*self._low
@property
def high(self):
if hasattr(self, '_high'):
return self._high
else:
return None
@high.setter
def high(self, val: Union[None, int, float, np.ndarray]):
if type(val) == np.ndarray and type(self.shape) == np.ndarray and np.all(val.shape != self.shape):
raise ValueError("The high values shape don't match the shape of the space")
elif self.low is not None and not np.all(self.low <= val):
raise ValueError("At least one of the axes-parallel lines defining the space has low values which "
"are higher than the given high values")
else:
self._high = val
# we allow using a number to define the high values, but we immediately convert it to an array which defines
# the high values for all the space dimensions in order to expose a consistent value type
if type(self._high) == int or type(self._high) == float:
self._high = np.ones(self.shape)*self._high
def contains(self, val: Union[int, float, np.ndarray]) -> bool:
"""
Checks if value is contained by this space. The shape must match and
all of the values must be within the low and high bounds.
:param val: a value to check
:return: True / False depending on if the val matches the space definition
"""
if (type(val) == int or type(val) == float) and not np.all(self.shape == np.ones(1)):
return False
if type(val) == np.ndarray and not np.all(val.shape == self.shape):
return False
if (self.low is not None and not np.all(val >= self.low)) \
or (self.high is not None and not np.all(val <= self.high)):
# TODO: check the performance overhead this causes
return False
return True
def is_valid_index(self, index: np.ndarray) -> bool:
"""
Checks if a given multidimensional index is within the bounds of the shape of the space
:param index: a multidimensional index
:return: True if the index is within the shape of the space. False otherwise
"""
if len(index) != self.num_dimensions:
return False
if np.any(index < np.zeros(self.num_dimensions)) or np.any(index >= self.shape):
return False
return True
def sample(self) -> np.ndarray:
"""
Sample the defined space, either uniformly, if space bounds are defined, or Normal distributed if no
bounds are defined
:return: A numpy array sampled from the space
"""
# if there are infinite bounds, we sample using gaussian noise with mean 0 and std 1
if np.any(self.low == -np.inf) or np.any(self.high == np.inf):
return np.random.normal(0, 1, self.shape)
else:
return np.random.uniform(self.low, self.high, self.shape)
def val_matches_space_definition(self, val: Union[int, float, np.ndarray]) -> bool:
screen.warning(
"Space.val_matches_space_definition will be deprecated soon. Use "
"contains instead."
)
return self.contains(val)
def is_point_in_space_shape(self, point: np.ndarray) -> bool:
screen.warning(
"Space.is_point_in_space_shape will be deprecated soon. Use "
"is_valid_index instead."
)
return self.is_valid_index(point)
class RewardSpace(Space):
def __init__(self, shape: Union[int, np.ndarray], low: Union[None, int, float, np.ndarray]=-np.inf,
high: Union[None, int, float, np.ndarray]=np.inf,
reward_success_threshold: Union[None, int, float]=None):
super().__init__(shape, low, high)
self.reward_success_threshold = reward_success_threshold
"""
Observation Spaces
"""
class ObservationSpace(Space):
def __init__(self, shape: Union[int, np.ndarray], low: Union[None, int, float, np.ndarray]=-np.inf,
high: Union[None, int, float, np.ndarray]=np.inf):
super().__init__(shape, low, high)
class VectorObservationSpace(ObservationSpace):
"""
An observation space which is defined as a vector of elements. This can be particularly useful for environments
which return measurements, such as in robotic environments.
"""
def __init__(self, shape: int, low: Union[None, int, float, np.ndarray]=-np.inf,
high: Union[None, int, float, np.ndarray]=np.inf, measurements_names: List[str]=None):
if measurements_names is None:
measurements_names = []
if len(measurements_names) > shape:
raise ValueError("measurement_names size {} is larger than shape {}.".format(
len(measurements_names), shape))
self.measurements_names = measurements_names
super().__init__(shape, low, high)
class TensorObservationSpace(ObservationSpace):
"""
An observation space which defines observations with arbitrary shape. This can be particularly useful for
environments with non image input.
"""
def __init__(self, shape: np.ndarray, low: -np.inf,
high: np.inf):
super().__init__(shape, low, high)
class PlanarMapsObservationSpace(ObservationSpace):
"""
An observation space which defines a stack of 2D observations. For example, an environment which returns
a stack of segmentation maps like in Starcraft.
"""
def __init__(self, shape: Union[np.ndarray], low: int, high: int, channels_axis: int=-1):
super().__init__(shape, low, high)
self.channels_axis = channels_axis
if not 2 <= len(shape) <= 3:
raise ValueError("Planar maps observations must have 3 dimensions - a channels dimension and 2 maps "
"dimensions, not {}".format(len(shape)))
if len(shape) == 2:
self.channels = 1
else:
self.channels = shape[channels_axis]
class ImageObservationSpace(PlanarMapsObservationSpace):
"""
An observation space which is a private case of the PlanarMapsObservationSpace, where the stack of 2D observations
represent a RGB image, or a grayscale image.
"""
def __init__(self, shape: Union[np.ndarray], high: int, channels_axis: int=-1):
# TODO: consider allowing arbitrary low values for images
super().__init__(shape, 0, high, channels_axis)
self.has_colors = self.channels == 3
if not self.channels == 3 and not self.channels == 1:
raise ValueError("Image observations must have 1 or 3 channels, not {}".format(self.channels))
# TODO: mixed observation spaces (image + measurements, image + segmentation + depth map, etc.)
class StateSpace(object):
def __init__(self, sub_spaces: Dict[str, Space]):
self.sub_spaces = sub_spaces
def __getitem__(self, item):
return self.sub_spaces[item]
def __setitem__(self, key, value):
self.sub_spaces[key] = value
"""
Action Spaces
"""
class ActionSpace(Space):
def __init__(self, shape: Union[int, np.ndarray], low: Union[None, int, float, np.ndarray]=-np.inf,
high: Union[None, int, float, np.ndarray]=np.inf, descriptions: Union[None, List, Dict]=None,
default_action: ActionType=None):
super().__init__(shape, low, | |
import operator
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.baseobjspace import ObjSpace
from pypy.interpreter.function import Function, Method, FunctionWithFixedCode
from pypy.interpreter.argument import Arguments
from pypy.interpreter.typedef import default_identity_hash
from rpython.tool.sourcetools import compile2, func_with_new_name
from pypy.module.__builtin__.interp_classobj import W_InstanceObject
from rpython.rlib.objectmodel import specialize
from rpython.rlib import jit
def object_getattribute(space):
"Utility that returns the app-level descriptor object.__getattribute__."
w_src, w_getattribute = space.lookup_in_type_where(space.w_object,
'__getattribute__')
return w_getattribute
object_getattribute._annspecialcase_ = 'specialize:memo'
def object_setattr(space):
"Utility that returns the app-level descriptor object.__setattr__."
w_src, w_setattr = space.lookup_in_type_where(space.w_object,
'__setattr__')
return w_setattr
object_setattr._annspecialcase_ = 'specialize:memo'
def object_delattr(space):
"Utility that returns the app-level descriptor object.__delattr__."
w_src, w_delattr = space.lookup_in_type_where(space.w_object,
'__delattr__')
return w_delattr
object_delattr._annspecialcase_ = 'specialize:memo'
def object_hash(space):
"Utility that returns the app-level descriptor object.__hash__."
w_src, w_hash = space.lookup_in_type_where(space.w_object,
'__hash__')
return w_hash
object_hash._annspecialcase_ = 'specialize:memo'
def type_eq(space):
"Utility that returns the app-level descriptor type.__eq__."
w_src, w_eq = space.lookup_in_type_where(space.w_type,
'__eq__')
return w_eq
type_eq._annspecialcase_ = 'specialize:memo'
def list_iter(space):
"Utility that returns the app-level descriptor list.__iter__."
w_src, w_iter = space.lookup_in_type_where(space.w_list,
'__iter__')
return w_iter
list_iter._annspecialcase_ = 'specialize:memo'
def tuple_iter(space):
"Utility that returns the app-level descriptor tuple.__iter__."
w_src, w_iter = space.lookup_in_type_where(space.w_tuple,
'__iter__')
return w_iter
tuple_iter._annspecialcase_ = 'specialize:memo'
def raiseattrerror(space, w_obj, name, w_descr=None):
if w_descr is None:
raise oefmt(space.w_AttributeError,
"'%T' object has no attribute '%s'", w_obj, name)
else:
raise oefmt(space.w_AttributeError,
"'%T' object attribute '%s' is read-only", w_obj, name)
# Helpers for old-style and mix-style mixup
def _same_class_w(space, w_obj1, w_obj2, w_typ1, w_typ2):
if (space.is_oldstyle_instance(w_obj1) and
space.is_oldstyle_instance(w_obj2)):
assert isinstance(w_obj1, W_InstanceObject)
assert isinstance(w_obj2, W_InstanceObject)
return space.is_w(w_obj1.w_class, w_obj2.w_class)
return space.is_w(w_typ1, w_typ2)
class Object(object):
def descr__getattribute__(space, w_obj, w_name):
name = space.str_w(w_name)
w_descr = space.lookup(w_obj, name)
if w_descr is not None:
if space.is_data_descr(w_descr):
# Only override if __get__ is defined, too, for compatibility
# with CPython.
w_get = space.lookup(w_descr, "__get__")
if w_get is not None:
w_type = space.type(w_obj)
return space.get_and_call_function(w_get, w_descr, w_obj,
w_type)
w_value = w_obj.getdictvalue(space, name)
if w_value is not None:
return w_value
if w_descr is not None:
return space.get(w_descr, w_obj)
raiseattrerror(space, w_obj, name)
def descr__setattr__(space, w_obj, w_name, w_value):
name = space.str_w(w_name)
w_descr = space.lookup(w_obj, name)
if w_descr is not None:
if space.is_data_descr(w_descr):
space.set(w_descr, w_obj, w_value)
return
if w_obj.setdictvalue(space, name, w_value):
return
raiseattrerror(space, w_obj, name, w_descr)
def descr__delattr__(space, w_obj, w_name):
name = space.str_w(w_name)
w_descr = space.lookup(w_obj, name)
if w_descr is not None:
if space.is_data_descr(w_descr):
space.delete(w_descr, w_obj)
return
if w_obj.deldictvalue(space, name):
return
raiseattrerror(space, w_obj, name, w_descr)
def descr__init__(space, w_obj, __args__):
pass
contains_jitdriver = jit.JitDriver(name='contains',
greens=['w_type'], reds='auto')
class DescrOperation(object):
# This is meant to be a *mixin*.
def is_data_descr(space, w_obj):
return (space.lookup(w_obj, '__set__') is not None or
space.lookup(w_obj, '__delete__') is not None)
def get_and_call_args(space, w_descr, w_obj, args):
# a special case for performance and to avoid infinite recursion
if isinstance(w_descr, Function):
return w_descr.call_obj_args(w_obj, args)
else:
w_impl = space.get(w_descr, w_obj)
return space.call_args(w_impl, args)
def get_and_call_function(space, w_descr, w_obj, *args_w):
typ = type(w_descr)
# a special case for performance and to avoid infinite recursion
if typ is Function or typ is FunctionWithFixedCode:
# isinstance(typ, Function) would not be correct here:
# for a BuiltinFunction we must not use that shortcut, because a
# builtin function binds differently than a normal function
# see test_builtin_as_special_method_is_not_bound
# in interpreter/test/test_function.py
# the fastcall paths are purely for performance, but the resulting
# increase of speed is huge
return w_descr.funccall(w_obj, *args_w)
else:
args = Arguments(space, list(args_w))
w_impl = space.get(w_descr, w_obj)
return space.call_args(w_impl, args)
def call_args(space, w_obj, args):
# two special cases for performance
if isinstance(w_obj, Function):
return w_obj.call_args(args)
if isinstance(w_obj, Method):
return w_obj.call_args(args)
w_descr = space.lookup(w_obj, '__call__')
if w_descr is None:
raise oefmt(space.w_TypeError,
"'%T' object is not callable", w_obj)
return space.get_and_call_args(w_descr, w_obj, args)
def get(space, w_descr, w_obj, w_type=None):
w_get = space.lookup(w_descr, '__get__')
if w_get is None:
return w_descr
if w_type is None:
w_type = space.type(w_obj)
return space.get_and_call_function(w_get, w_descr, w_obj, w_type)
def set(space, w_descr, w_obj, w_val):
w_set = space.lookup(w_descr, '__set__')
if w_set is None:
raise oefmt(space.w_AttributeError,
"'%T' object is not a descriptor with set", w_descr)
return space.get_and_call_function(w_set, w_descr, w_obj, w_val)
def delete(space, w_descr, w_obj):
w_delete = space.lookup(w_descr, '__delete__')
if w_delete is None:
raise oefmt(space.w_AttributeError,
"'%T' object is not a descriptor with delete", w_descr)
return space.get_and_call_function(w_delete, w_descr, w_obj)
def getattr(space, w_obj, w_name):
# may be overridden in StdObjSpace
w_descr = space.lookup(w_obj, '__getattribute__')
return space._handle_getattribute(w_descr, w_obj, w_name)
def _handle_getattribute(space, w_descr, w_obj, w_name):
try:
if w_descr is None: # obscure case
raise OperationError(space.w_AttributeError, space.w_None)
return space.get_and_call_function(w_descr, w_obj, w_name)
except OperationError, e:
if not e.match(space, space.w_AttributeError):
raise
w_descr = space.lookup(w_obj, '__getattr__')
if w_descr is None:
raise
return space.get_and_call_function(w_descr, w_obj, w_name)
def setattr(space, w_obj, w_name, w_val):
w_descr = space.lookup(w_obj, '__setattr__')
if w_descr is None:
raise oefmt(space.w_AttributeError,
"'%T' object is readonly", w_obj)
return space.get_and_call_function(w_descr, w_obj, w_name, w_val)
def delattr(space, w_obj, w_name):
w_descr = space.lookup(w_obj, '__delattr__')
if w_descr is None:
raise oefmt(space.w_AttributeError,
"'%T' object does not support attribute removal",
w_obj)
return space.get_and_call_function(w_descr, w_obj, w_name)
def is_true(space, w_obj):
w_descr = space.lookup(w_obj, "__nonzero__")
if w_descr is None:
w_descr = space.lookup(w_obj, "__len__")
if w_descr is None:
return True
# call __len__
w_res = space.get_and_call_function(w_descr, w_obj)
return space._check_len_result(w_res) != 0
# call __nonzero__
w_res = space.get_and_call_function(w_descr, w_obj)
# more shortcuts for common cases
if space.is_w(w_res, space.w_False):
return False
if space.is_w(w_res, space.w_True):
return True
w_restype = space.type(w_res)
# Note there is no check for bool here because the only possible
# instances of bool are w_False and w_True, which are checked above.
if space.is_w(w_restype, space.w_int):
return space.int_w(w_res) != 0
else:
msg = "__nonzero__ should return bool or integer"
raise OperationError(space.w_TypeError, space.wrap(msg))
def nonzero(space, w_obj):
if space.is_true(w_obj):
return space.w_True
else:
return space.w_False
def len(space, w_obj):
w_descr = space.lookup(w_obj, '__len__')
if w_descr is None:
raise oefmt(space.w_TypeError, "'%T' has no length", w_obj)
w_res = space.get_and_call_function(w_descr, w_obj)
return space.wrap(space._check_len_result(w_res))
def _check_len_result(space, w_obj):
# Will complain if result is too big.
result = space.int_w(space.int(w_obj))
if result < 0:
raise oefmt(space.w_ValueError, "__len__() should return >= 0")
return result
def iter(space, w_obj):
w_descr = space.lookup(w_obj, '__iter__')
if w_descr is None:
w_descr = space.lookup(w_obj, '__getitem__')
if w_descr is None:
raise oefmt(space.w_TypeError,
"'%T' object is not iterable", w_obj)
return space.newseqiter(w_obj)
w_iter = space.get_and_call_function(w_descr, w_obj)
w_next = space.lookup(w_iter, 'next')
if w_next is None:
raise OperationError(space.w_TypeError,
space.wrap("iter() returned non-iterator"))
return w_iter
def next(space, w_obj):
w_descr = space.lookup(w_obj, 'next')
if w_descr is None:
raise oefmt(space.w_TypeError,
"'%T' object is not an iterator", w_obj)
return space.get_and_call_function(w_descr, w_obj)
def getitem(space, w_obj, w_key):
w_descr = space.lookup(w_obj, '__getitem__')
if w_descr is None:
raise oefmt(space.w_TypeError,
"'%T' object is not subscriptable", w_obj)
return space.get_and_call_function(w_descr, w_obj, w_key)
def setitem(space, w_obj, w_key, w_val):
w_descr = space.lookup(w_obj, '__setitem__')
if w_descr is None:
raise oefmt(space.w_TypeError,
"'%T' object does not support item assignment", w_obj)
return space.get_and_call_function(w_descr, w_obj, w_key, w_val)
def delitem(space, w_obj, w_key):
w_descr = space.lookup(w_obj, '__delitem__')
if w_descr is None:
raise oefmt(space.w_TypeError,
"'%T' object does not support item deletion", w_obj)
return space.get_and_call_function(w_descr, w_obj, w_key)
def getslice(space, w_obj, w_start, w_stop):
w_descr = space.lookup(w_obj, '__getslice__')
if w_descr is None:
w_slice = space.newslice(w_start, w_stop, space.w_None)
return space.getitem(w_obj, w_slice)
w_start, w_stop = old_slice_range(space, w_obj, w_start, w_stop)
return space.get_and_call_function(w_descr, w_obj, w_start, w_stop)
def setslice(space, w_obj, w_start, w_stop, w_sequence):
w_descr = space.lookup(w_obj, '__setslice__')
if w_descr is None:
w_slice = space.newslice(w_start, w_stop, space.w_None)
return space.setitem(w_obj, w_slice, w_sequence)
w_start, w_stop = old_slice_range(space, w_obj, w_start, w_stop)
return space.get_and_call_function(w_descr, w_obj, w_start, w_stop, w_sequence)
def delslice(space, w_obj, w_start, w_stop):
w_descr = space.lookup(w_obj, '__delslice__')
if w_descr is None:
w_slice = space.newslice(w_start, w_stop, space.w_None)
return space.delitem(w_obj, w_slice)
w_start, w_stop = old_slice_range(space, w_obj, w_start, w_stop)
return space.get_and_call_function(w_descr, w_obj, w_start, w_stop)
def format(space, w_obj, w_format_spec):
w_descr = space.lookup(w_obj, '__format__')
if w_descr is None:
raise oefmt(space.w_TypeError,
"'%T' object does not define __format__", w_obj)
w_res = space.get_and_call_function(w_descr, w_obj, w_format_spec)
if not space.isinstance_w(w_res, space.w_basestring):
raise oefmt(space.w_TypeError,
"%T.__format__ must return string or unicode, not %T",
w_obj, w_res)
return w_res
def pow(space, w_obj1, w_obj2, w_obj3):
w_typ1 = space.type(w_obj1)
w_typ2 = space.type(w_obj2)
w_left_src, w_left_impl = space.lookup_in_type_where(w_typ1, '__pow__')
if _same_class_w(space, w_obj1, w_obj2, w_typ1, w_typ2):
w_right_impl = None
else:
w_right_src, w_right_impl = space.lookup_in_type_where(w_typ2, '__rpow__')
# sse binop_impl
if (w_left_src is not w_right_src
and space.is_true(space.issubtype(w_typ2, w_typ1))):
if (w_left_src and w_right_src and
not space.abstract_issubclass_w(w_left_src, w_right_src) and
not space.abstract_issubclass_w(w_typ1, w_right_src)):
w_obj1, w_obj2 = w_obj2, w_obj1
w_left_impl, w_right_impl = w_right_impl, w_left_impl
if w_left_impl is not None:
if space.is_w(w_obj3, space.w_None):
w_res = space.get_and_call_function(w_left_impl, w_obj1, w_obj2)
else:
w_res = space.get_and_call_function(w_left_impl, w_obj1, w_obj2, w_obj3)
if _check_notimplemented(space, w_res):
return w_res
if w_right_impl is | |
import json
import re
from datetime import datetime, timezone
from functools import lru_cache
import logging
import os
import dataclasses
import urllib
from jira import JIRA
from .entities import Issue, Comment, User, Source, Metadata, CommentMetadata
from . import utils
__all__ = ["Client", "Formatter", "get_username", "get_password"]
logger = logging.getLogger(__name__)
def get_username():
return os.environ.get("JIRAHUB_JIRA_USERNAME")
def get_password():
return os.environ.get("JIRAHUB_JIRA_PASSWORD")
_JIRA_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f%z"
def _parse_datetime(value):
return datetime.strptime(value, _JIRA_DATETIME_FORMAT).astimezone(timezone.utc)
def _client_field_name(field_id):
return f"customfield_{field_id}"
def _jql_field_name(field_id):
return f"cf[{field_id}]"
class _IssueMapper:
"""
This class is responsible for mapping the fields of the JIRA client's resource objects
to our own in jirahub.entities.
"""
def __init__(self, config, bot_username):
self._config = config
self._bot_username = bot_username
def get_user(self, raw_user):
if not raw_user.displayName:
display_name = raw_user.name
else:
display_name = raw_user.displayName
return User(source=Source.JIRA, username=raw_user.name, display_name=display_name, raw_user=raw_user)
def get_comment(self, raw_comment):
user = self.get_user(raw_comment.author)
is_bot = user.username == self._bot_username
body = raw_comment.body
if body is None:
body = ""
comment_id = raw_comment.id
created_at = _parse_datetime(raw_comment.created)
updated_at = _parse_datetime(raw_comment.updated)
return Comment(
source=Source.JIRA,
is_bot=is_bot,
comment_id=comment_id,
created_at=created_at,
updated_at=updated_at,
user=user,
body=body,
raw_comment=raw_comment,
)
def get_raw_comment_fields(self, fields, comment=None):
raw_fields = {}
if "body" in fields:
if fields["body"]:
body = fields["body"]
else:
body = ""
raw_fields["body"] = body
return raw_fields
def get_issue(self, raw_issue, raw_comments):
user = self.get_user(raw_issue.fields.creator)
is_bot = user.username == self._bot_username
comments = [self.get_comment(c) for c in raw_comments]
body = raw_issue.fields.description
if body is None:
body = ""
issue_id = raw_issue.key
project = raw_issue.fields.project.key
created_at = _parse_datetime(raw_issue.fields.created)
updated_at = _parse_datetime(raw_issue.fields.updated)
title = raw_issue.fields.summary
labels = set([urllib.parse.unquote(l) for l in raw_issue.fields.labels])
milestones = {v.name for v in raw_issue.fields.fixVersions}
components = {c.name for c in raw_issue.fields.components}
if raw_issue.fields.priority:
priority = raw_issue.fields.priority.name
else:
priority = None
if raw_issue.fields.issuetype:
issue_type = raw_issue.fields.issuetype.name
else:
issue_type = None
is_open = raw_issue.fields.status.name.lower() not in [s.lower() for s in self._config.jira.closed_statuses]
metadata = self._get_metadata(raw_issue)
return Issue(
source=Source.JIRA,
is_bot=is_bot,
issue_id=issue_id,
project=project,
created_at=created_at,
updated_at=updated_at,
user=user,
title=title,
body=body,
labels=labels,
is_open=is_open,
priority=priority,
issue_type=issue_type,
milestones=milestones,
components=components,
comments=comments,
metadata=metadata,
raw_issue=raw_issue,
)
def get_raw_issue_fields(self, fields, issue=None):
fields = fields.copy()
raw_fields = {}
if "title" in fields:
raw_fields["summary"] = fields.pop("title")
if "body" in fields:
if fields["body"]:
body = fields["body"]
else:
body = ""
raw_fields["description"] = body
fields.pop("body")
if "metadata" in fields:
raw_fields.update(self._get_raw_metadata_fields(fields.pop("metadata")))
if "labels" in fields:
if fields["labels"]:
raw_fields["labels"] = [urllib.parse.quote(l) for l in fields["labels"]]
else:
raw_fields["labels"] = []
fields.pop("labels")
if "milestones" in fields:
if fields["milestones"]:
raw_fields["fixVersions"] = self._make_name_list(fields["milestones"])
else:
raw_fields["fixVersions"] = []
fields.pop("milestones")
if "components" in fields:
if fields["components"]:
raw_fields["components"] = self._make_name_list(fields["components"])
else:
raw_fields["components"] = []
fields.pop("components")
if "priority" in fields:
if fields["priority"]:
raw_fields["priority"] = {"name": fields["priority"]}
else:
raw_fields["priority"] = None
fields.pop("priority")
if "issue_type" in fields:
raw_fields["issuetype"] = {"name": fields.pop("issue_type")}
transition = None
if issue is None:
if "is_open" in fields:
if fields["is_open"]:
status = self._config.jira.open_status
else:
status = self._config.jira.close_status
else:
status = self._config.jira.open_status
if status:
raw_fields["status"] = {"name": status}
elif "is_open" in fields:
if fields["is_open"]:
transition = self._config.jira.reopen_status
else:
transition = self._config.jira.close_status
fields.pop("is_open", None)
raw_fields.update(fields)
return raw_fields, transition
def _make_name_list(self, values):
return [{"name": v} for v in values]
def _get_metadata(self, raw_issue):
kwargs = {}
field_name = _client_field_name(self._config.jira.github_issue_url_field_id)
github_issue_url = getattr(raw_issue.fields, field_name)
if github_issue_url:
github_repository, github_issue_id = utils.extract_github_ids_from_url(github_issue_url)
kwargs["github_repository"] = github_repository
kwargs["github_issue_id"] = github_issue_id
field_name = _client_field_name(self._config.jira.jirahub_metadata_field_id)
metadata_json = getattr(raw_issue.fields, field_name)
if metadata_json:
try:
metadata_dict = json.loads(metadata_json)
except Exception:
logger.exception("Failed to deserialize JSON")
else:
kwargs.update(metadata_dict)
if kwargs.get("comments"):
kwargs["comments"] = [CommentMetadata(**c) for c in kwargs["comments"]]
return Metadata(**kwargs)
def _get_raw_metadata_fields(self, metadata):
raw_fields = {}
if not metadata:
return {
_client_field_name(self._config.jira.github_issue_url_field_id): None,
_client_field_name(self._config.jira.jirahub_metadata_field_id): None,
}
if metadata.github_repository and metadata.github_issue_id:
github_issue_url = utils.make_github_issue_url(metadata.github_repository, metadata.github_issue_id)
else:
github_issue_url = None
field_name = _client_field_name(self._config.jira.github_issue_url_field_id)
raw_fields[field_name] = github_issue_url
metadata_dict = dataclasses.asdict(metadata)
metadata_dict.pop("github_repository")
metadata_dict.pop("github_issue_id")
field_name = _client_field_name(self._config.jira.jirahub_metadata_field_id)
raw_fields[field_name] = json.dumps(metadata_dict)
return raw_fields
class Client:
_PAGE_SIZE = 50
@classmethod
def from_config(cls, config):
jira = JIRA(
config.jira.server, basic_auth=(get_username(), get_password()), max_retries=config.jira.max_retries
)
return cls(config, jira, get_username())
def __init__(self, config, jira, bot_username):
self._config = config
self._jira = jira
self._mapper = _IssueMapper(config, bot_username)
def get_user(self, username):
return self._mapper.get_user(self._jira.user(username))
def find_issues(self, min_updated_at=None):
if min_updated_at:
assert min_updated_at.tzinfo is not None
query = self._make_query(min_updated_at=min_updated_at)
current_page = 0
while True:
start_idx = current_page * Client._PAGE_SIZE
raw_issues = self._jira.search_issues(query, start_idx, Client._PAGE_SIZE)
for raw_issue in raw_issues:
# The JIRA client is a buggy and will occasionally return None
# for the creator field, even when the data exists. Reloading the
# issues one by one seems to fix that.
raw_issue = self._jira.issue(raw_issue.key)
raw_comments = self._jira.comments(raw_issue)
yield self._mapper.get_issue(raw_issue, raw_comments)
if len(raw_issues) < Client._PAGE_SIZE:
break
current_page += 1
def find_other_issue(self, github_issue):
assert github_issue.source == Source.GITHUB
github_issue_url = utils.make_github_issue_url(github_issue.project, github_issue.issue_id)
query = self._make_query(github_issue_url=github_issue_url)
raw_issues = self._jira.search_issues(query)
if len(raw_issues) > 1:
raise RuntimeError(f"{github_issue} has multiple linked JIRA issues")
elif len(raw_issues) == 1:
# Reloading the issue to make sure we get the creator field (see note above).
raw_issue = self._jira.issue(raw_issues[0].key)
raw_comments = self._jira.comments(raw_issue)
return self._mapper.get_issue(raw_issue, raw_comments)
else:
return None
def get_issue(self, issue_id):
raw_issue = self._jira.issue(issue_id)
raw_comments = self._jira.comments(raw_issue)
return self._mapper.get_issue(raw_issue, raw_comments)
def create_issue(self, fields):
raw_fields, _ = self._mapper.get_raw_issue_fields(fields)
raw_fields["project"] = self._config.jira.project_key
raw_issue = self._jira.create_issue(fields=raw_fields)
new_issue = self._mapper.get_issue(raw_issue, [])
logger.info("Created issue %s", new_issue)
return new_issue
def update_issue(self, issue, fields):
assert issue.source == Source.JIRA
if ("title" in fields or "body" in fields) and not issue.is_bot:
raise ValueError("Cannot update title or body of issue owned by another user")
raw_fields, transition = self._mapper.get_raw_issue_fields(fields, issue=issue)
if len(raw_fields) > 0:
issue.raw_issue.update(notify=self._config.jira.notify_watchers, fields=raw_fields)
if transition is not None:
self._jira.transition_issue(issue.raw_issue, transition)
raw_comments = self._jira.comments(issue.raw_issue)
updated_issue = self._mapper.get_issue(issue.raw_issue, raw_comments)
logger.info("Updated issue %s", updated_issue)
return updated_issue
def create_comment(self, issue, fields):
assert issue.source == Source.JIRA
fields = self._mapper.get_raw_comment_fields(fields)
raw_comment = self._jira.add_comment(issue=issue.issue_id, **fields)
new_comment = self._mapper.get_comment(raw_comment)
logger.info("Created comment %s on issue %s", new_comment, issue)
return new_comment
def update_comment(self, comment, fields):
assert comment.source == Source.JIRA
if not comment.is_bot:
raise ValueError("Cannot update comment owned by another user")
fields = self._mapper.get_raw_comment_fields(fields, comment=comment)
comment.raw_comment.update(**fields)
updated_comment = self._mapper.get_comment(comment.raw_comment)
logger.info("Updated comment %s", updated_comment)
return updated_comment
def delete_comment(self, comment):
assert comment.source == Source.JIRA
if not comment.is_bot:
raise ValueError("Cannot delete comment owned by another user")
comment.raw_comment.delete()
logger.info("Deleted comment %s", comment)
def _make_query(self, min_updated_at=None, github_issue_url=None):
filters = []
quoted_project_key = self._quote_query_string(self._config.jira.project_key)
filters.append(f"project = {quoted_project_key}")
if min_updated_at:
min_updated_at_ms = int(min_updated_at.timestamp() * 1000)
filters.append(f"updated > {min_updated_at_ms}")
if github_issue_url:
quoted_url = self._quote_query_string(github_issue_url)
field_name = _jql_field_name(self._config.jira.github_issue_url_field_id)
filters.append(f"{field_name} = {quoted_url}")
return " and ".join(filters) + " order by updated asc"
def _quote_query_string(self, value):
return "'" + value.replace("'", "\\'") + "'"
@lru_cache()
def get_create_metadata(self, expand=None):
return self._jira.createmeta(self._config.jira.project_key, expand=expand)["projects"][0]
class Formatter:
H1_RE = re.compile(r"(\s|^)# ")
H2_RE = re.compile(r"(\s|^)## ")
H3_RE = re.compile(r"(\s|^)### ")
H4_RE = re.compile(r"(\s|^)#### ")
H5_RE = re.compile(r"(\s|^)##### ")
H6_RE = re.compile(r"(\s|^)###### ")
NOFORMAT_OPEN_RE = re.compile(r"```(\w*)")
NOFORMAT_CLOSE_RE = re.compile(r"```")
HASH_NUMBER_RE = re.compile(r"(^|\s)#([0-9]+)($|\s)")
USER_MENTION_RE = re.compile(r"(^|\s)@([0-9a-zA-Z-]+)\b")
ITALIC_RE = re.compile(r"(^|[^\w*])\*(\w(.*?\w)?)\*($|[^\w*])")
BOLD_RE = re.compile(r"(^|\W)\*\*(\w(.*?\w)?)\*\*($|\W)")
MONOSPACED_RE = re.compile(r"`(.+?)`")
STRIKETHROUGH_RE = re.compile(r"(^|\W)~~(\w(.*?\w)?)~~($|\W)")
INSERTED_RE = re.compile(r"<ins>(.+?)</ins>")
SUPERSCRIPT_RE = re.compile(r"<sup>(.+?)</sup>")
SUBSCRIPT_RE = re.compile(r"<sub>(.+?)</sub>")
URL_WITH_TEXT_RE = re.compile(r"\[(.*?)\]\((http.*?)\)")
URL_RE = re.compile(r"(\s|^)<?(http.*?)>?(\s|$)")
QUOTE_RE = re.compile(r"((^> .*?$)(\r?\n)?)+", re.MULTILINE)
def __init__(self, config, url_helper, github_client):
self._config = config
self._url_helper = url_helper
self._github_client = github_client
def format_link(self, url, link_text=None):
if link_text:
return f"[{link_text}|{url}]"
else:
return f"[{url}]"
def format_body(self, body):
regions = [(body, True)]
regions = utils.isolate_regions(
regions, Formatter.NOFORMAT_OPEN_RE, Formatter.NOFORMAT_CLOSE_RE, self._handle_noformat_content
)
result = ""
for content, formatted in regions:
if formatted:
content = self._format_content(content)
result = result + content
return result
def _handle_noformat_content(self, content, open_match):
if open_match.group(1):
return ("{code:" + open_match.group(1) + "}" + content + "{code}", False)
else:
return ("{noformat}" + content + "{noformat}", False)
def _format_user_mention(self, match):
username = match.group(2)
try:
user = self._github_client.get_user(username)
except Exception:
logger.warning("Missing GitHub user with username %s", username)
user = None
if user:
url = self._url_helper.get_user_profile_url(user)
link_text = user.display_name
return match.group(1) + self.format_link(url, link_text)
else:
return match.group(0)
def _format_issue_or_pull(self, match):
number = int(match.group(2))
if self._github_client.is_issue(number):
url = self._url_helper.get_issue_url(source=Source.GITHUB, issue_id=number)
link = self.format_link(url, f"#{number}")
return match.group(1) + link + match.group(3)
elif self._github_client.is_pull_request(number):
url = self._url_helper.get_pull_request_url(number)
link = self.format_link(url, f"#{number}")
return match.group(1) + link + match.group(3)
else:
return match.group(0)
def _format_quote_block(self, match):
content = match.group(0)
content = "{quote}\n" + content
if content.endswith("\n"):
content = content + "{quote}"
else:
content = content + "\n{quote}"
lines = content.split("\n")
new_lines = []
for line in lines:
if line.startswith("> "):
new_lines.append(line[2:])
else:
new_lines.append(line)
return "\n".join(new_lines)
def _format_content(self, content):
content = Formatter.HASH_NUMBER_RE.sub(self._format_issue_or_pull, content)
content = Formatter.H1_RE.sub(lambda match: match.group(1) + "h1. ", content)
content = Formatter.H2_RE.sub(lambda match: match.group(1) + "h2. ", content)
content | |
zrem(self, tr, key:NativeType, members:ListOf(NativeType)) -> int:
""" Remove one or more members from a sorted set """
return self._query(tr, b'zrem', self.encode_from_native(key), *map(self.encode_from_native, members))
# Hashes
@_query_command
def hset(self, tr, key:NativeType, field:NativeType, value:NativeType) -> int:
""" Set the string value of a hash field """
return self._query(tr, b'hset', self.encode_from_native(key), self.encode_from_native(field), self.encode_from_native(value))
@_query_command
def hmset(self, tr, key:NativeType, values:dict) -> StatusReply:
""" Set multiple hash fields to multiple values """
data = [ ]
for k,v in values.items():
assert isinstance(k, self.native_type)
assert isinstance(v, self.native_type)
data.append(self.encode_from_native(k))
data.append(self.encode_from_native(v))
return self._query(tr, b'hmset', self.encode_from_native(key), *data)
@_query_command
def hsetnx(self, tr, key:NativeType, field:NativeType, value:NativeType) -> int:
""" Set the value of a hash field, only if the field does not exist """
return self._query(tr, b'hsetnx', self.encode_from_native(key), self.encode_from_native(field), self.encode_from_native(value))
@_query_command
def hdel(self, tr, key:NativeType, fields:ListOf(NativeType)) -> int:
""" Delete one or more hash fields """
return self._query(tr, b'hdel', self.encode_from_native(key), *map(self.encode_from_native, fields))
@_query_command
def hget(self, tr, key:NativeType, field:NativeType) -> (NativeType, NoneType):
""" Get the value of a hash field """
return self._query(tr, b'hget', self.encode_from_native(key), self.encode_from_native(field))
@_query_command
def hexists(self, tr, key:NativeType, field:NativeType) -> bool:
""" Returns if field is an existing field in the hash stored at key. """
return self._query(tr, b'hexists', self.encode_from_native(key), self.encode_from_native(field))
@_query_command
def hkeys(self, tr, key:NativeType) -> SetReply:
""" Get all the keys in a hash. (Returns a set) """
return self._query(tr, b'hkeys', self.encode_from_native(key))
@_query_command
def hvals(self, tr, key:NativeType) -> ListReply:
""" Get all the values in a hash. (Returns a list) """
return self._query(tr, b'hvals', self.encode_from_native(key))
@_query_command
def hlen(self, tr, key:NativeType) -> int:
""" Returns the number of fields contained in the hash stored at key. """
return self._query(tr, b'hlen', self.encode_from_native(key))
@_query_command
def hgetall(self, tr, key:NativeType) -> DictReply:
""" Get the value of a hash field """
return self._query(tr, b'hgetall', self.encode_from_native(key))
@_query_command
def hmget(self, tr, key:NativeType, fields:ListOf(NativeType)) -> ListReply:
""" Get the values of all the given hash fields """
return self._query(tr, b'hmget', self.encode_from_native(key), *map(self.encode_from_native, fields))
@_query_command
def hincrby(self, tr, key:NativeType, field:NativeType, increment) -> int:
""" Increment the integer value of a hash field by the given number
Returns: the value at field after the increment operation. """
assert isinstance(increment, int)
return self._query(tr, b'hincrby', self.encode_from_native(key), self.encode_from_native(field), self._encode_int(increment))
@_query_command
def hincrbyfloat(self, tr, key:NativeType, field:NativeType, increment:(int,float)) -> float:
""" Increment the float value of a hash field by the given amount
Returns: the value at field after the increment operation. """
return self._query(tr, b'hincrbyfloat', self.encode_from_native(key), self.encode_from_native(field), self._encode_float(increment))
# Pubsub
# (subscribe, unsubscribe, etc... should be called through the Subscription class.)
@_command
def start_subscribe(self, tr, *a) -> 'Subscription':
"""
Start a pubsub listener.
::
# Create subscription
subscription = yield from protocol.start_subscribe()
yield from subscription.subscribe(['key'])
yield from subscription.psubscribe(['pattern*'])
while True:
result = yield from subscription.next_published()
print(result)
:returns: :class:`~asyncio_redis.Subscription`
"""
# (Make coroutine. @asyncio.coroutine breaks documentation. It uses
# @functools.wraps to make a generator for this function. But _command
# will no longer be able to read the signature.)
if False: yield
if self.in_use:
raise Error('Cannot start pubsub listener when a protocol is in use.')
subscription = Subscription(self)
self._in_pubsub = True
self._subscription = subscription
return subscription
@_command
def _subscribe(self, tr, channels:ListOf(NativeType)) -> NoneType:
""" Listen for messages published to the given channels """
self._pubsub_channels |= set(channels)
return self._pubsub_method('subscribe', channels)
@_command
def _unsubscribe(self, tr, channels:ListOf(NativeType)) -> NoneType:
""" Stop listening for messages posted to the given channels """
self._pubsub_channels -= set(channels)
return self._pubsub_method('unsubscribe', channels)
@_command
def _psubscribe(self, tr, patterns:ListOf(NativeType)) -> NoneType:
""" Listen for messages published to channels matching the given patterns """
self._pubsub_patterns |= set(patterns)
return self._pubsub_method('psubscribe', patterns)
@_command
def _punsubscribe(self, tr, patterns:ListOf(NativeType)) -> NoneType: # XXX: unittest
""" Stop listening for messages posted to channels matching the given patterns """
self._pubsub_patterns -= set(patterns)
return self._pubsub_method('punsubscribe', patterns)
@asyncio.coroutine
def _pubsub_method(self, method, params):
if not self._in_pubsub:
raise Error('Cannot call pubsub methods without calling start_subscribe')
# Send
self._send_command([method.encode('ascii')] + list(map(self.encode_from_native, params)))
# Note that we can't use `self._query` here. The reason is that one
# subscribe/unsubscribe command returns a separate answer for every
# parameter. It doesn't fit in the same model of all the other queries
# where one query puts a Future on the queue that is replied with the
# incoming answer.
# Redis returns something like [ 'subscribe', 'channel_name', 1] for
# each parameter, but we can safely ignore those replies that.
@_query_command
def publish(self, tr, channel:NativeType, message:NativeType) -> int:
""" Post a message to a channel
(Returns the number of clients that received this message.) """
return self._query(tr, b'publish', self.encode_from_native(channel), self.encode_from_native(message))
@_query_command
def pubsub_channels(self, tr, pattern:(NativeType, NoneType)=None) -> ListReply:
"""
Lists the currently active channels. An active channel is a Pub/Sub
channel with one ore more subscribers (not including clients subscribed
to patterns).
"""
return self._query(tr, b'pubsub', b'channels',
(self.encode_from_native(pattern) if pattern else b'*'))
@_query_command
def pubsub_numsub(self, tr, channels:ListOf(NativeType)) -> DictReply:
"""Returns the number of subscribers (not counting clients subscribed
to patterns) for the specified channels. """
return self._query(tr, b'pubsub', b'numsub', *[ self.encode_from_native(c) for c in channels ])
@_query_command
def pubsub_numpat(self, tr) -> int:
""" Returns the number of subscriptions to patterns (that are performed
using the PSUBSCRIBE command). Note that this is not just the count of
clients subscribed to patterns but the total number of patterns all the
clients are subscribed to. """
return self._query(tr, b'pubsub', b'numpat')
# Server
@_query_command
def ping(self, tr) -> StatusReply:
""" Ping the server (Returns PONG) """
return self._query(tr, b'ping')
@_query_command
def echo(self, tr, string:NativeType) -> NativeType:
""" Echo the given string """
return self._query(tr, b'echo', self.encode_from_native(string))
@_query_command
def save(self, tr) -> StatusReply:
""" Synchronously save the dataset to disk """
return self._query(tr, b'save')
@_query_command
def bgsave(self, tr) -> StatusReply:
""" Asynchronously save the dataset to disk """
return self._query(tr, b'bgsave')
@_query_command
def bgrewriteaof(self, tr) -> StatusReply:
""" Asynchronously rewrite the append-only file """
return self._query(tr, b'bgrewriteaof')
@_query_command
def lastsave(self, tr) -> int:
""" Get the UNIX time stamp of the last successful save to disk """
return self._query(tr, b'lastsave')
@_query_command
def dbsize(self, tr) -> int:
""" Return the number of keys in the currently-selected database. """
return self._query(tr, b'dbsize')
@_query_command
def flushall(self, tr) -> StatusReply:
""" Remove all keys from all databases """
return self._query(tr, b'flushall')
@_query_command
def flushdb(self, tr) -> StatusReply:
""" Delete all the keys of the currently selected DB. This command never fails. """
return self._query(tr, b'flushdb')
# @_query_command
# def object(self, subcommand, args):
# """ Inspect the internals of Redis objects """
# raise NotImplementedError
@_query_command
def type(self, tr, key:NativeType) -> StatusReply:
""" Determine the type stored at key """
return self._query(tr, b'type', self.encode_from_native(key))
@_query_command
def config_set(self, tr, parameter:str, value:str) -> StatusReply:
""" Set a configuration parameter to the given value """
return self._query(tr, b'config', b'set', self.encode_from_native(parameter),
self.encode_from_native(value))
@_query_command
def config_get(self, tr, parameter:str) -> ConfigPairReply:
""" Get the value of a configuration parameter """
return self._query(tr, b'config', b'get', self.encode_from_native(parameter))
@_query_command
def config_rewrite(self, tr) -> StatusReply:
""" Rewrite the configuration file with the in memory configuration """
return self._query(tr, b'config', b'rewrite')
@_query_command
def config_resetstat(self, tr) -> StatusReply:
""" Reset the stats returned by INFO """
return self._query(tr, b'config', b'resetstat')
@_query_command
def info(self, tr, section:(NativeType, NoneType)=None) -> InfoReply:
""" Get information and statistics about the server """
if section is None:
return self._query(tr, b'info')
else:
return self._query(tr, b'info', self.encode_from_native(section))
@_query_command
def shutdown(self, tr, save=False) -> StatusReply:
""" Synchronously save the dataset to disk and then shut down the server """
return self._query(tr, b'shutdown', (b'save' if save else b'nosave'))
@_query_command
def client_getname(self, tr) -> NativeType:
""" Get the current connection name """
return self._query(tr, b'client', b'getname')
@_query_command
def client_setname(self, tr, name) -> StatusReply:
""" Set the current connection name """
return self._query(tr, b'client', b'setname', self.encode_from_native(name))
@_query_command
def client_list(self, tr) -> ClientListReply:
""" Get the list of client connections """
return self._query(tr, b'client', b'list')
@_query_command
def client_kill(self, tr, address:str) -> StatusReply:
"""
Kill the connection of a client
`address` should be an "ip:port" string.
"""
return self._query(tr, b'client', b'kill', address.encode('utf-8'))
# LUA scripting
@_command
@asyncio.coroutine
def | |
min_
# same coordinates in image slicing scale
b_min = np.round(coord - hlf_hc[i] * inv_scale[i]).astype(int)
b_max = np.round(coord + hlf_hc[i] * inv_scale[i]).astype(int)
# correct for edges of image
if b_max >= shape[i]:
b_max = shape[i] - 1
if b_min < 0:
b_min = 0
# add the coordinate slice to the pair info
b_box[c] = slice(b_min, b_max)
# add the coordinate in image scale to the point coords
point_coords.append(coord - b_min)
# add the point to the pair
sample[pair]['point'] = np.array([point_coords])
# add the hypercube slices to the paie
sample[pair]['b_box'] = b_box
# add the tracks info to the pair
lil_tracks = lil_tracks.reset_index(drop=True)
sample[pair]['df'] = lil_tracks
# get the tracks array for input to napari
cols = [id_col, time_col]
coord_cols = _coords_cols(array_order, non_tzyx_col, time_col)
for c in coord_cols:
cols.append(c)
only_tracks = lil_tracks[cols].to_numpy()
sample[pair]['tracks'] = only_tracks
# add the sample info to the sample dict
sample['info'] = df
return sample
def get_objects_without_tracks(
labels,
tracks,
id_col,
time_col,
array_order,
scale,
_frames,
_box
):
"""
The
"""
df = {c:[] for c in array_order}
df[id_col] = []
df['area'] = []
coord_cols = [c for c in array_order if c != time_col]
coord_scale = [scale[i] for i, c in enumerate(array_order) if c != time_col]
for t in range(labels.shape[0] - 1):
try:
frame = np.array(labels[t, ...])
# get the tracks at this point in time
t_trk = tracks.copy()
t_trk = t_trk.loc[t_trk[time_col] == t]
# get the properties of objects in the frame
props = regionprops(frame)
# go through properties.
no_tracks = []
for p in props:
label = p['label']
bbox = p['bbox']
ct = t_trk.copy()
# get any tracks in the bounding box for this obj
for i, c in enumerate(coord_cols):
min_ = bbox[i] * coord_scale[i]
max_ = bbox[i + len(coord_cols)] * coord_scale[i]
ct = ct.loc[(ct[c] >= min_) & (ct[c] < max_)]
# if there are no tracks in the bbox, add to the list
if len(ct) == 0:
no_tracks.append(label)
# based on list entries, make dataframe for objects
for p in props:
if p['label'] in no_tracks:
df[time_col].append(t)
for i, c in enumerate(coord_cols):
df[c].append(p['centroid'][i])
df[id_col].append(p['label'])
df['area'].append(p['area'])
except KeyError:
print(t)
df = pd.DataFrame(df)
print(f'Found {len(df)} untracked objects')
return df
# -------
# Helpers
# -------
def add_track_length(df, id_col, new_col='track_length'):
bincounts = np.bincount(df[id_col].values)
ids_dict = {ID : num for ID, num in enumerate(bincounts) if num > 0}
df[new_col] = [None, ] * len(df)
for ID in ids_dict.keys():
idxs = df[df[id_col] == ID].index.values
df.at[idxs, new_col] = ids_dict[ID]
return df
def single_zarr(input_path, c=2, idx=0):
'''
Parameters
----------
c: int or tuple
Index of indices to return in array
idx: int or tuple
which indicies of the dim to apply c to
'''
assert type(c) == type(idx)
arr = da.from_zarr(input_path)
slices = [slice(None)] * arr.ndim
if isinstance(idx, int):
slices[idx] = c
elif isinstance(idx, tuple):
for i, ind in enumerate(idx):
slices[ind] = c[i]
else:
raise TypeError('c and idx must be int or tuple with same type')
slices = tuple(slices)
arr = arr[slices]
return arr
# referenced in sample_tracks() and sample_objects()
def _add_construction_info(sample, id_col, time_col, array_order, non_tzyx_col):
'''
Add information that can be used to (1) distinguish coordinate and ID
columns of the data frames, (2) reconstruct names of other columns
containing important information, and (3) determine how said columns
relate to array dimensions.
'''
sample['coord_info'] = {
'id_col' : id_col,
'time_col' : time_col,
'array_order' : array_order,
'non_tzyx_col' : non_tzyx_col
}
return sample
def open_with_correct_modality(image_path, channel=None, chan_axis=0):
suffix = Path(image_path).suffix
if suffix == '.nd2':
layerlist = nd2_reader(image_path)
if channel is None:
image = [l[0] for l in layerlist]
image = da.stack(image)
else:
image = layerlist[channel][0]
elif suffix == '.zarr':
image = zarr.open(image_path, 'r')
if isinstance(image, zarr.hierarchy.Group):
print('group')
print(list(image.keys()))
print(image_path)
raise ValueError('Read in as zarr group... need array like?!')
if channel is not None:
s_ = [slice(None, None), ] * len(image.ndim)
s_[chan_axis] = slice(channel, channel + 1)
image = image[s_]
image = da.array(image)
print(image)
elif suffix == '.h5' or suffix == '.hdf5':
if channel is not None:
print('channel == None')
image = read_from_h5(image_path, channel=channel)
elif channel == 2:
print('channel == 2')
image = read_from_h5(image_path, channel='channel2')
else:
print('channel == ', channel)
image = read_from_h5(image_path, channel='channel2')
return image
def read_from_h5(h5_path, channel='channel2'):
'''
For corrupted nd2 files saved as h5 files using Fiji
h5 key structure for these files is as follows:
{'t<index>' : {'channel<index>' : <3d array>, ...}, ...}
'''
import h5py
#print('imported h5py')
with h5py.File(h5_path) as f:
#print('read h5 file')
t_keys = [int(key[1:]) for key in f.keys() if 't' in key]
t_keys = ['t' + str(key) for key in sorted(t_keys)]
#print(t_keys)
c_keys = [key for key in f[t_keys[0]].keys()]
#print(c_keys)
#images = []
frame = f[t_keys[0]][c_keys[0]]
#print('Getting channel with key: ', channel)
for i, c in enumerate(c_keys):
if c == channel:
chan_image = np.zeros((len(t_keys), ) + frame.shape, dtype=frame.dtype)
#print('Generated empty numpy for channel with shape: ', chan_image.shape)
for j, t in enumerate(t_keys):
chan_image[j, :, :, :] = f[t][c]
#print('Added h5 data to np array')
image = da.from_array(chan_image)
#print('converted to dask')
#images.append(chan_da)
#image = chan_da
return image
# ---------------
# GET SAMPLE DATA
# ---------------
def get_sample_hypervolumes(sample, img_channel=None):
image_path = sample['image_path']
#print(image_path)
labels_path = sample['labels_path']
image = open_with_correct_modality(image_path, img_channel)
if labels_path is not None:
labels = open_with_correct_modality(labels_path)
else:
labels = None
array_order = sample['coord_info']['array_order']
pairs = [key for key in sample.keys() if isinstance(key, tuple)]
#if labels is not None:
#print(labels.shape)
#labels = np.array(labels)
for pair in pairs:
l = len(array_order)
m = f'Image must be of same dimensions ({l}) as in the sample array_order: {array_order}'
assert image.ndim == l, m
slice_ = []
for key in array_order:
s_ = sample[pair]['b_box'][key]
slice_.append(s_)
#print(slice_)
#print(image)
img = image[tuple(slice_)]
if isinstance(img, da.core.Array):
img = img.compute()
if labels is not None:
lab = labels[tuple(slice_)]
if isinstance(lab, da.core.Array):
lab = img.compute()
else:
lab = None
sample[pair]['image'] = img
sample[pair]['labels'] = lab
return sample
# -----------
# SAVE SAMPLE
# -----------
def save_sample(save_dir, sample):
"""
"""
pairs = [key for key in sample.keys() if isinstance(key, tuple)]
n_samples = len(pairs)
file_name = Path(sample['tracks_path']).stem
if sample['sample_type'] == 'random tracks':
tp = 'rtracks'
elif sample['sample_type'] == 'track terminations':
tp = 'tterm'
elif sample['sample_type'] == 'untracked objects':
tp = 'uobj'
else:
tp = 'ukn'
now = datetime.now()
dt = now.strftime("%y%m%d_%H%M%S")
name = file_name + f'_{tp}_{dt}_n={n_samples}.smpl'
sample_dir = os.path.join(save_dir, name)
os.makedirs(sample_dir, exist_ok=True)
# save base for sample
sample_json = {
'image_path' : sample['image_path'],
'labels_path' : sample['labels_path'],
'sample_type' : sample['sample_type'],
'coord_info' : sample['coord_info'],
'tracks_path' : sample['tracks_path']
}
#print(sample_json)
json_name = file_name + '_read-info.json'
with open(os.path.join(sample_dir, json_name), 'w') as f:
json.dump(sample_json, f, indent=4)
# save the info data frame
info_name = file_name + '_info.csv'
sample['info'].to_csv(os.path.join(sample_dir, info_name))
# save the individual samples
for pair in pairs:
pair_name = f'id-{pair[0]}_t-{pair[1]}'
pair_dir = os.path.join(sample_dir, pair_name)
os.makedirs(pair_dir)
# save the df
df_name = pair_name + '_df.csv'
sample[pair]['df'].to_csv(os.path.join(pair_dir, df_name))
# save the tracks
tracks_name = pair_name + '_tracks.zarr'
t_data = sample[pair]['tracks']
tracks = zarr.open(os.path.join(pair_dir, tracks_name), mode='w', shape=t_data.shape, chunks=t_data.shape)
tracks[:, :] = t_data
# save the bounding box
bbox_name = pair_name + '_bbox.json'
new_bbox = sample[pair]['b_box'].copy()
for key in new_bbox.keys():
s_ = new_bbox[key]
new_bbox[key] = (int(s_.start), int(s_.stop), s_.step)
with open(os.path.join(pair_dir, bbox_name), 'w') as f:
json.dump(new_bbox, f, indent=4)
# if there are any corrections, save
try:
if len(sample[pair]['corr']) > 0:
corr_name = pair_name + '_corr.json'
with open(os.path.join(pair_dir, corr_name), 'w') as f:
json.dump(sample[pair]['corr'], f)
except:
pass
# if the image is in the sample, save this
try:
img = sample[pair]['image']
image_path = os.path.join(pair_dir, pair_name + '_image.zarr')
img_zarr = zarr.open(image_path, mode='w', shape=img.shape, chunks=img.shape, dtype=img.dtype)
img_zarr[:, :, :, :] = img
except KeyError:
pass
# if the label is in the sample, save this
try:
lab = sample[pair]['labels']
labels_path = os.path.join(pair_dir, pair_name + '_labels.zarr')
lab_zarr = zarr.open(labels_path, mode='w', shape=lab.shape, chunks=lab.shape, dtype=lab.dtype)
lab_zarr[:, :, :, :] = lab
except KeyError:
pass
def read_sample(sample_path):
"""
Parse sample info from .smpl 'file'
"""
files = os.listdir(sample_path)
json_path = [f for f in files if f.endswith('_read-info.json')]
assert len(json_path) == 1, | |
9.65096318e-06,
# -1.17180876e-06, -1.47205710e-09, 3.78235738e-10], [-1.39289314e-07, -1.60277947e-12, -4.39124955e-15, 2.21338354e-11,
# -1.11538197e-11, -1.44742977e-16, 2.45995034e-16, 5.61168466e-06,
# -1.11813367e-06, -1.89357558e-09, 3.96106702e-10], [-2.38234690e-07, -2.34956564e-12, -8.11551999e-15, 2.78141126e-11,
# -1.62722580e-11, -1.63587140e-16, 2.70279113e-16, 9.65096318e-06,
# -1.11813367e-06, -1.89357558e-09, 3.96106702e-10], [-2.45393681e-07, -2.24112955e-12, -5.97702657e-15, 1.96625477e-11,
# -2.42366032e-11, -1.43390966e-16, 3.28663609e-16, 9.40527117e-06,
# -9.39813247e-07, -1.70756991e-09, 3.78235738e-10], [-2.45393681e-07, -2.24112955e-12, -4.94362236e-15, 2.44727589e-11,
# -2.94454263e-11, -2.19426502e-16, 2.70279113e-16, 1.07096878e-05,
# -1.11813367e-06, -1.58467050e-09, 3.78235738e-10], [-1.90254264e-07, -1.82364824e-12, -4.94362236e-15, 2.21338354e-11,
# -1.43318040e-11, -1.44742977e-16, 2.45995034e-16, 9.40527117e-06,
# -1.11813367e-06, -1.68779674e-09, 3.96106702e-10], [-2.42251909e-07, -1.67219450e-12, -5.27274157e-15, 2.21338354e-11,
# -1.96793589e-11, -1.13203293e-16, 2.98247899e-16, 9.65096318e-06,
# -1.11813367e-06, -1.17916558e-09, 3.78235738e-10], [-1.90254264e-07, -1.82364824e-12, -4.76895857e-15, 2.21338354e-11,
# -1.99658740e-11, -1.44742977e-16, 2.45995034e-16, 9.40527117e-06,
# -1.17180876e-06, -1.59249365e-09, 4.35713874e-10], [-1.90254264e-07, -2.22452254e-12, -4.94362236e-15, 1.62578218e-11,
# -1.62722580e-11, -1.44742977e-16, 2.45995034e-16, 9.40527117e-06,
# -1.11813367e-06, -1.43951889e-09, 3.78235738e-10], [-1.90254264e-07, -1.82364824e-12, -4.94362236e-15, 2.21338354e-11,
# -1.50846933e-11, -2.19426502e-16, 2.60644127e-16, 9.65096318e-06,
# -8.65681077e-07, -1.46260412e-09, 2.28239583e-10]],
# [[-2.42251909e-07, -1.67219450e-12, -5.27274157e-15, 2.21338354e-11,
# -1.96793589e-11, -1.13203293e-16, 2.72140372e-16, 9.65096318e-06,
# -1.11813367e-06, -1.43951889e-09, 3.78235738e-10], [-1.90254264e-07, -2.22452254e-12, -4.94362236e-15, 1.85680774e-11,
# -1.62722580e-11, -1.44742977e-16, 3.09083616e-16, 9.40527117e-06,
# -1.12610129e-06, -1.17916558e-09, 3.78235738e-10], [-1.20091915e-07, -1.82364824e-12, -5.40934727e-15, 2.21338354e-11,
# -1.43318040e-11, -1.70895077e-16, 2.45995034e-16, 9.40527117e-06,
# -1.11813367e-06, -1.32170579e-09, 3.96106702e-10], [-1.90254264e-07, -1.60277947e-12, -5.70554298e-15, 2.21338354e-11,
# -1.11538197e-11, -1.44742977e-16, 2.99808199e-16, 5.61168466e-06,
# -1.11813367e-06, -1.89357558e-09, 3.96106702e-10], [-2.15071534e-07, -1.82364824e-12, -4.60913467e-15, 2.33394428e-11,
# -1.99658740e-11, -1.63587140e-16, 2.70279113e-16, 9.65096318e-06,
# -1.11813367e-06, -1.89357558e-09, 3.96106702e-10], [-2.00585578e-07, -2.11904266e-12, -8.11551999e-15, 2.81960422e-11,
# -1.82388275e-11, -1.12940120e-16, 2.07352195e-16, 9.40527117e-06,
# -1.17180876e-06, -2.00971560e-09, 3.14996193e-10], [-1.90254264e-07, -1.71628741e-12, -4.94362236e-15, 1.62578218e-11,
# -1.92150601e-11, -1.63821146e-16, 2.45995034e-16, 1.09658461e-05,
# -1.11813367e-06, -1.43951889e-09, 3.78235738e-10], [-1.90254264e-07, -2.22452254e-12, -4.94362236e-15, 1.62578218e-11,
# -1.62722580e-11, -1.44742977e-16, 2.45995034e-16, 9.40527117e-06,
# -1.11813367e-06, -1.35842392e-09, 3.78235738e-10], [-1.71588952e-07, -1.82364824e-12, -4.76895857e-15, 2.21338354e-11,
# -1.99658740e-11, -1.43458577e-16, 2.59858851e-16, 9.65096318e-06,
# -1.11813367e-06, -1.89357558e-09, 3.96106702e-10], [-2.38234690e-07, -2.34956564e-12, -1.04299197e-14, 2.48201817e-11,
# -2.08741244e-11, -1.63587140e-16, 2.83598783e-16, 9.62870546e-06,
# -1.17180876e-06, -1.59249365e-09, 4.21889069e-10]],
# [[-1.90254264e-07, -2.22452254e-12, -4.08873977e-15, 2.21338354e-11,
# -1.99658740e-11, -1.44512520e-16, 2.59858851e-16, 9.65096318e-06,
# -1.11813367e-06, -1.89357558e-09, 3.74129315e-10], [-1.71588952e-07, -1.82364824e-12, -4.76895857e-15, 1.85680774e-11,
# -1.62139720e-11, -1.44742977e-16, 3.09083616e-16, 9.40527117e-06,
# -1.12610129e-06, -1.17916558e-09, 3.78235738e-10], [-2.15264619e-07, -2.22452254e-12, -6.33498725e-15, 1.73594735e-11,
# -1.34839780e-11, -1.44742977e-16, 2.90665882e-16, 9.40527117e-06,
# -1.11813367e-06, -1.35842392e-09, 3.78235738e-10], [-1.40099670e-07, -2.22452254e-12, -4.94362236e-15, 1.85680774e-11,
# -1.62722580e-11, -1.44742977e-16, 3.09083616e-16, 9.40527117e-06,
# -1.12610129e-06, -1.17916558e-09, 3.78235738e-10], [-1.90254264e-07, -2.22452254e-12, -4.94362236e-15, 1.64646744e-11,
# -1.62722580e-11, -1.44742977e-16, 3.09083616e-16, 9.40527117e-06,
# -1.12610129e-06, -1.13078466e-09, 3.78235738e-10], [-1.90254264e-07, -2.22452254e-12, -4.94362236e-15, 1.85680774e-11,
# -1.62722580e-11, -1.63010931e-16, 3.09083616e-16, 9.40527117e-06,
# -1.12610129e-06, -1.17916558e-09, 3.78235738e-10], [-1.90254264e-07, -2.22452254e-12, -3.74632294e-15, 1.85680774e-11,
# -1.62722580e-11, -1.44742977e-16, 3.09083616e-16, 9.40527117e-06,
# -1.12610129e-06, -1.17916558e-09, 3.96106702e-10], [-1.90254264e-07, -1.60277947e-12, -5.70554298e-15, 2.21338354e-11,
# -1.11538197e-11, -1.44742977e-16, 2.99808199e-16, 5.73102778e-06,
# -1.11813367e-06, -1.89357558e-09, 3.78235738e-10], [-2.42251909e-07, -1.67219450e-12, -5.27274157e-15, 2.21338354e-11,
# -1.96793589e-11, -1.13203293e-16, 2.72140372e-16, 9.23325222e-06,
# -1.31997546e-06, -1.22746693e-09, 3.78235738e-10], [-2.82774060e-07, -1.89689945e-12, -5.27274157e-15, 2.86453913e-11,
# -1.96793589e-11, -1.13203293e-16, 2.72140372e-16, 9.65096318e-06,
# -1.44288236e-06, -1.43951889e-09, 3.78235738e-10]],
# [[-1.90254264e-07, -2.22452254e-12, -3.74632294e-15, 1.85680774e-11,
# -1.84128303e-11, -1.44742977e-16, 3.09083616e-16, 8.73930743e-06,
# -1.12610129e-06, -1.17916558e-09, 3.78235738e-10], [-1.90254264e-07, -2.22452254e-12, -5.44291350e-15, 1.92680075e-11,
# -1.62722580e-11, -1.44742977e-16, 2.47871443e-16, 9.40527117e-06,
# -1.12610129e-06, -1.13078466e-09, 4.83565237e-10], [-1.71588952e-07, -1.54906242e-12, -4.76895857e-15, 1.85680774e-11,
# -1.62139720e-11, -1.44742977e-16, 3.09083616e-16, 9.40527117e-06,
# -1.12610129e-06, -9.00097778e-10, 3.78235738e-10], [-2.15264619e-07, -2.22452254e-12, -6.33498725e-15, 1.73594735e-11,
# -1.48626310e-11, -1.44742977e-16, 3.72729736e-16, 9.68884515e-06,
# -1.11813367e-06, -1.35842392e-09, 3.78235738e-10], [-1.81871721e-07, -2.22452254e-12, -4.08873977e-15, 1.89800313e-11,
# -1.99658740e-11, -1.44512520e-16, 2.59858851e-16, 9.65096318e-06,
# -1.11813367e-06, -1.89357558e-09, 3.74129315e-10], [-1.90254264e-07, -1.91251108e-12, -4.94362236e-15, 1.85680774e-11,
# -1.62722580e-11, -1.63010931e-16, 3.09083616e-16, 9.40527117e-06,
# -1.12610129e-06, -1.11893090e-09, 3.78235738e-10], [-1.88550736e-07, -2.22452254e-12, -4.94362236e-15, 1.85680774e-11,
# -1.92628734e-11, -1.63010931e-16, 3.09083616e-16, 7.19223480e-06,
# -1.05249253e-06, -1.17916558e-09, 2.78617400e-10], [-1.82099652e-07, -2.22452254e-12, -4.94362236e-15, 1.85680774e-11,
# -1.62722580e-11, -1.63010931e-16, 3.09083616e-16, 9.40527117e-06,
# -1.12610129e-06, -1.17916558e-09, 3.78235738e-10], [-1.40099670e-07, -2.22452254e-12, -4.94362236e-15, 1.85171504e-11,
# -1.62722580e-11, -1.23764030e-16, 3.09083616e-16, 1.03008728e-05,
# -9.95920108e-07, -1.17916558e-09, 3.78235738e-10], [-1.40099670e-07, -2.22372616e-12, -5.02384591e-15, 1.85680774e-11,
# -1.26451526e-11, -1.44742977e-16, 3.09083616e-16, 9.40527117e-06,
# -1.12610129e-06, -1.52952460e-09, 3.78235738e-10]],
# [[-1.47629128e-07, -2.22452254e-12, -4.08873977e-15, 1.84307963e-11,
# -1.99658740e-11, -1.44512520e-16, 3.09083616e-16, 9.40527117e-06,
# -1.12610129e-06, -8.45312886e-10, 3.78235738e-10], [-1.82099652e-07, -2.22452254e-12, -4.94362236e-15, 1.85680774e-11,
# -1.62722580e-11, -1.63010931e-16, 2.59858851e-16, 9.65096318e-06,
# -1.11813367e-06, -1.89357558e-09, 3.97007888e-10], [-1.91511126e-07, -2.22452254e-12, -4.08873977e-15, 2.20274034e-11,
# -1.26451526e-11, -1.55490358e-16, 3.09083616e-16, 9.40527117e-06,
# -1.12610129e-06, -1.76261064e-09, 3.78235738e-10], [-1.50886105e-07, -2.22372616e-12, -4.09683602e-15, 1.85680774e-11,
# -1.99658740e-11, -1.44512520e-16, 2.36889735e-16, 9.65096318e-06,
# -1.11813367e-06, -1.82981544e-09, 3.74129315e-10], [-1.69703479e-07, -2.22452254e-12, -4.42878661e-15, 1.85680774e-11,
# -1.84128303e-11, -1.44742977e-16, 3.11049945e-16, 8.63937016e-06,
# -1.12610129e-06, -1.36564534e-09, 4.63959690e-10], [-1.90254264e-07, -2.22452254e-12, -4.08873977e-15, 2.01216972e-11,
# -1.99658740e-11, -1.44512520e-16, 2.59858851e-16, 9.65096318e-06,
# -1.11813367e-06, -1.89357558e-09, 3.74129315e-10], [-1.90254264e-07, -2.22452254e-12, -3.74632294e-15, 2.28494131e-11,
# -1.75323836e-11, -1.44742977e-16, 2.93061889e-16, 8.73930743e-06,
# -1.12610129e-06, -1.86970759e-09, 3.74129315e-10], [-1.81871721e-07, -2.01467458e-12, -4.08873977e-15, 1.89800313e-11,
# -1.64718095e-11, -1.44512520e-16, 2.59858851e-16, 9.65096318e-06,
# -1.11813367e-06, -9.89392446e-10, 2.95526570e-10], [-1.30806445e-07, -2.22452254e-12, -3.90097835e-15, 1.73594735e-11,
# -1.48626310e-11, -1.44742977e-16, 3.72729736e-16, 9.68884515e-06,
# -1.11813367e-06, -1.35842392e-09, 3.03427536e-10], [-1.75855607e-07, -2.22452254e-12, -6.33498725e-15, 1.85680774e-11,
# -1.62722580e-11, -1.68283713e-16, 3.09083616e-16, 9.40527117e-06,
# -1.41959659e-06, -1.17916558e-09, 2.72723818e-10]],
# [[-1.34727251e-07, -2.34808730e-12, -5.02287486e-15, 2.20274034e-11,
# -1.38464136e-11, -1.17483952e-16, 2.12776013e-16, 9.65096318e-06,
# -1.01232548e-06, -2.21924137e-09, 4.43144916e-10], [-1.90254264e-07, -2.22452254e-12, -4.08873977e-15, 2.01216972e-11,
# -1.99658740e-11, -1.55490358e-16, 2.72254609e-16, 9.40527117e-06,
# -1.12610129e-06, -1.44625949e-09, 3.78235738e-10], [-1.91511126e-07, -2.22452254e-12, -3.86040285e-15, 2.28494131e-11,
# -1.75323836e-11, -1.71938144e-16, 3.06440687e-16, 8.73930743e-06,
# -1.01112594e-06, -1.47207801e-09, 3.74129315e-10], [-1.90254264e-07, -2.47095573e-12, -4.08873977e-15, 2.20274034e-11,
# -1.26451526e-11, -1.55490358e-16, 3.09083616e-16, 9.40527117e-06,
# -1.45361150e-06, -1.76261064e-09, 4.76551501e-10], [-1.55573605e-07, -2.22452254e-12, -4.08873977e-15, 1.93575578e-11,
# -1.26451526e-11, -1.55490358e-16, 3.09083616e-16, 8.67144028e-06,
# -1.12610129e-06, -1.84220906e-09, 3.78235738e-10], [-1.91511126e-07, -2.63840157e-12, -4.08873977e-15, 2.20274034e-11,
# -1.99658740e-11, -1.67952750e-16, 2.59858851e-16, 9.65096318e-06,
# -1.30058245e-06, -1.89357558e-09, 3.74129315e-10], [-1.91511126e-07, -2.48986284e-12, -3.06551577e-15, 2.20274034e-11,
# -1.26451526e-11, -1.55490358e-16, 3.09083616e-16, 9.40527117e-06,
# -1.42003224e-06, -1.76261064e-09, 3.78235738e-10], [-1.91511126e-07, -2.22452254e-12, -3.35194639e-15, 2.20274034e-11,
# -1.35191142e-11, -1.89385507e-16, 3.09083616e-16, 9.40527117e-06,
# -1.06307843e-06, -1.76261064e-09, 3.78235738e-10], [-2.34610539e-07, -2.22452254e-12, -4.08873977e-15, 2.01216972e-11,
# -1.50574571e-11, -1.66527890e-16, 2.59858851e-16, 9.65096318e-06,
# -1.11813367e-06, -1.42254173e-09, 3.74129315e-10], [-1.90254264e-07, -2.34472219e-12, -4.47795449e-15, 2.01216972e-11,
# -2.49285010e-11, -1.44512520e-16, 2.50672974e-16, 9.65096318e-06,
# -9.51827225e-07, -1.89357558e-09, 3.74129315e-10]],
# [[-2.42421666e-07, -2.22452254e-12, -4.08873977e-15, 2.01216972e-11,
# -1.19855240e-11, -1.66527890e-16, 2.59858851e-16, 9.65096318e-06,
# -8.35879841e-07, -1.42254173e-09, 3.74129315e-10], [-2.34610539e-07, -2.22452254e-12, -4.08873977e-15, 2.23740573e-11,
# -1.71837610e-11, -1.66527890e-16, 2.28896073e-16, 9.65096318e-06,
# -1.11813367e-06, -1.42254173e-09, 4.38261678e-10], [-2.34610539e-07, -2.22452254e-12, -4.08873977e-15, 2.01216972e-11,
# -1.30512863e-11, -1.21599094e-16, 3.37610390e-16, 7.41627969e-06,
# -1.11813367e-06, -2.07500536e-09, 3.34726633e-10], [-1.91511126e-07, -2.22452254e-12, -3.35194639e-15, 2.20274034e-11,
# -1.35191142e-11, -1.82672704e-16, 3.09083616e-16, 9.40527117e-06,
# -1.06307843e-06, -1.42254173e-09, 3.74129315e-10], [-1.91511126e-07, -2.22452254e-12, -3.35194639e-15, 2.20274034e-11,
# -1.99658740e-11, -1.55490358e-16, 2.72254609e-16, 9.56351503e-06,
# -1.12610129e-06, -1.44625949e-09, 3.78235738e-10], [-1.90254264e-07, -2.22452254e-12, -4.08873977e-15, 2.01216972e-11,
# -1.35191142e-11, -1.89385507e-16, 3.09083616e-16, 9.40527117e-06,
# -1.06307843e-06, -1.37600740e-09, 3.78235738e-10], [-2.08737018e-07, -2.22452254e-12, -4.08873977e-15, 2.53934959e-11,
# -1.50574571e-11, -1.46468771e-16, 2.59858851e-16, 9.65096318e-06,
# -1.11813367e-06, -1.42254173e-09, 4.55046826e-10], [-2.34610539e-07, -1.94327711e-12, -4.08873977e-15, 2.01216972e-11,
# -1.50574571e-11, -1.66527890e-16, 2.59858851e-16, 9.65096318e-06,
# -1.11813367e-06, -1.42254173e-09, 3.74129315e-10], [-1.90254264e-07, -2.22452254e-12, -4.08873977e-15, 2.01216972e-11,
# -1.99658740e-11, -1.55490358e-16, 3.40106684e-16, 9.40527117e-06,
# -1.12610129e-06, -1.44625949e-09, 3.78235738e-10], [-1.90254264e-07, -2.22452254e-12, -4.08873977e-15, 2.01216972e-11,
# -1.99658740e-11, -1.55490358e-16, 2.72254609e-16, 1.08725213e-05,
# -1.12610129e-06, -1.44625949e-09, 2.88638739e-10]],
# [[-1.90254264e-07, -2.22452254e-12, -3.03237765e-15, 2.52048821e-11,
# -1.99658740e-11, -1.82477055e-16, 3.21778138e-16, 9.40527117e-06,
# -9.81449993e-07, -1.44625949e-09, 3.78235738e-10], [-1.90254264e-07, -2.22452254e-12, -4.08873977e-15, 2.01216972e-11,
# -1.99658740e-11, -1.32206856e-16, 3.41675167e-16, 1.13358099e-05,
# -1.12610129e-06, -1.12313665e-09, 3.78235738e-10], [-2.34610539e-07, -1.94327711e-12, -3.57906744e-15, 2.01216972e-11,
# -1.11294692e-11, -1.66527890e-16, 2.59858851e-16, 8.68239204e-06,
# -1.11813367e-06, -1.42254173e-09, 2.73357670e-10], [-1.90254264e-07, -2.22452254e-12, -4.08873977e-15, 2.01216972e-11,
# -1.99658740e-11, -1.55490358e-16, 3.44041432e-16, 9.40527117e-06,
# -1.40582666e-06, -1.44625949e-09, 3.74129315e-10], [-1.90254264e-07, -2.22452254e-12, -4.08873977e-15, 1.78629903e-11,
# -1.99658740e-11, -1.55490358e-16, 2.72254609e-16, 1.12418666e-05,
# -1.32998876e-06, -1.18031482e-09, 4.58268086e-10], [-1.91511126e-07, -2.22452254e-12, -3.35194639e-15, 1.82995570e-11,
# -1.99658740e-11, -1.55490358e-16, 3.40106684e-16, 9.40527117e-06,
# -1.12610129e-06, -1.44625949e-09, 3.78235738e-10], [-2.34610539e-07, -2.22452254e-12, -4.08873977e-15, 1.60796635e-11,
# -1.59913753e-11, -1.32151511e-16, 3.40106684e-16, 9.40527117e-06,
# -1.04405060e-06, -1.44625949e-09, 3.78235738e-10], [-1.90254264e-07, -1.94327711e-12, -3.49272168e-15, 2.01216972e-11,
# -1.50574571e-11, -1.89240927e-16, 2.66501523e-16, 9.65096318e-06,
# -1.11813367e-06, -1.42254173e-09, 3.74129315e-10], [-1.90254264e-07, -2.22452254e-12, -4.08873977e-15, 2.01216972e-11,
# -1.99658740e-11, -1.55490358e-16, 3.40106684e-16, 1.18806879e-05,
# -8.63159127e-07, -1.78672257e-09, 2.80342541e-10], [-1.90254264e-07, -2.24600227e-12, -4.08873977e-15, 1.78954700e-11,
# -2.31878348e-11, -1.10364859e-16, 2.77246386e-16, 9.40527117e-06,
# -1.12610129e-06, -1.65094987e-09, 3.78235738e-10]],
# [[-1.91511126e-07, -2.22452254e-12, -3.35194639e-15, 1.82995570e-11,
# -1.99658740e-11, -1.10364859e-16, 2.77246386e-16, 9.40527117e-06,
# -1.12610129e-06, -1.65094987e-09, 3.78235738e-10], [-1.90254264e-07, -2.48630908e-12, -4.08873977e-15, 2.25204870e-11,
# -2.04166065e-11, -1.55490358e-16, 2.86756122e-16, 9.40527117e-06,
# -1.12610129e-06, -1.44625949e-09, 3.78235738e-10], [-1.92440841e-07, -1.62339482e-12, -4.08873977e-15, 2.30239989e-11,
# -2.31878348e-11, -9.23751760e-17, 2.77246386e-16, 9.40527117e-06,
# -1.18411435e-06, -1.79465581e-09, 3.78235738e-10], [-1.90254264e-07, -2.24600227e-12, -4.08873977e-15, 1.78954700e-11,
# -1.99658740e-11, -1.43008601e-16, 2.72254609e-16, 1.12418666e-05,
# -1.27379628e-06, -1.18031482e-09, 4.41635116e-10], [-2.47418125e-07, -2.22452254e-12, -3.35194639e-15, 1.82995570e-11,
# -2.23294876e-11, -1.13954952e-16, 4.27479227e-16, 9.23102940e-06,
# -1.12610129e-06, -1.44625949e-09, 3.78235738e-10], [-1.91511126e-07, -2.22452254e-12, -3.35194639e-15, 1.82995570e-11,
# -1.99658740e-11, -1.55490358e-16, 3.40106684e-16, 9.40527117e-06,
# -1.12610129e-06, -1.44625949e-09, 3.78235738e-10], [-2.10154069e-07, -1.94327711e-12, -3.13149482e-15, 2.01216972e-11,
# -1.78990882e-11, -1.10364859e-16, 2.77246386e-16, 9.40527117e-06,
# -1.12610129e-06, -1.17086634e-09, 3.78235738e-10], [-1.90254264e-07, -2.24600227e-12, -4.08873977e-15, 1.64383119e-11,
# -1.50574571e-11, -1.89240927e-16, 2.66501523e-16, 9.65096318e-06,
# -8.22876398e-07, -1.42254173e-09, 3.74129315e-10], [-1.91511126e-07, -1.80242332e-12, -3.35194639e-15, 2.34528286e-11,
# -1.99658740e-11, -7.79562669e-17, 3.16616540e-16, 9.40527117e-06,
# -1.12610129e-06, -1.65094987e-09, 3.78235738e-10], [-1.90254264e-07, -2.24600227e-12, -4.08873977e-15, 2.29869942e-11,
# -2.31878348e-11, -1.55490358e-16, 3.40106684e-16, 9.40527117e-06,
# -1.12610129e-06, -1.44625949e-09, 3.81160517e-10]],
# [[-2.47418125e-07, -1.94327711e-12, -3.13149482e-15, 2.01216972e-11,
# -1.78990882e-11, -1.10364859e-16, 2.03702407e-16, 1.04118119e-05,
# -1.12610129e-06, -1.17086634e-09, 3.78235738e-10], [-2.10154069e-07, -2.22452254e-12, -3.35194639e-15, 1.39304055e-11,
# -2.23294876e-11, -1.13954952e-16, 4.27479227e-16, 9.13118737e-06,
# -1.12610129e-06, -1.44625949e-09, 3.78235738e-10], [-1.91511126e-07, -2.22452254e-12, -3.76143802e-15, 2.30239989e-11,
# -2.62563555e-11, -8.17330952e-17, 2.77246386e-16, 9.40527117e-06,
# -1.20058590e-06, -1.79465581e-09, 3.78235738e-10], [-1.92440841e-07, -1.31175454e-12, -3.93646392e-15, 1.82995570e-11,
# -1.99658740e-11, -1.67311023e-16, 4.18651962e-16, 7.13471395e-06,
# -1.12610129e-06, -1.13775978e-09, 3.78235738e-10], [-1.91511126e-07, -2.12456544e-12, -3.35194639e-15, 2.34528286e-11,
# -1.92465933e-11, -7.79562669e-17, 3.16616540e-16, 9.40527117e-06,
# -1.12610129e-06, -1.44625949e-09, 3.81160517e-10], [-1.90254264e-07, -2.24600227e-12, -4.79799699e-15, 2.83481246e-11,
# -2.31878348e-11, -1.55490358e-16, 2.38762597e-16, 9.40527117e-06,
# -1.12610129e-06, -1.65094987e-09, 3.78235738e-10], [-2.43210021e-07, -2.44253444e-12, -4.08873977e-15, 1.79657917e-11,
# -2.10505053e-11, -1.55490358e-16, 2.86756122e-16, 9.40527117e-06,
# -1.37246580e-06, -1.44625949e-09, 3.78235738e-10], [-1.52546770e-07, -2.22452254e-12, -3.35194639e-15, 1.82995570e-11,
# -1.99658740e-11, -1.55490358e-16, 3.40106684e-16, 9.40527117e-06,
# -1.12610129e-06, -1.61083832e-09, 3.78235738e-10], [-1.91511126e-07, -1.80242332e-12, -2.96900208e-15, 2.34528286e-11,
# -2.04166065e-11, -1.15485451e-16, 2.86756122e-16, 9.05531871e-06,
# -1.12610129e-06, -1.50318727e-09, 3.78235738e-10], [-1.90254264e-07, -2.60452339e-12, -4.12499832e-15, 2.25204870e-11,
# -1.97554588e-11, -7.79562669e-17, 3.16616540e-16, 9.40527117e-06,
# -1.12610129e-06, -1.66011637e-09, 3.78235738e-10]],
# [[-1.52546770e-07, -2.22452254e-12, -3.35194639e-15, 2.21465066e-11,
# -1.99658740e-11, -1.55490358e-16, 2.96389807e-16, 1.01462954e-05,
# -1.12610129e-06, -1.50439826e-09, 3.78235738e-10], [-1.52546770e-07, -2.22452254e-12, -3.35194639e-15, 1.82995570e-11,
# -1.99658740e-11, -1.78981740e-16, 3.40106684e-16, 9.40527117e-06,
# -1.12610129e-06, -1.61083832e-09, 3.78235738e-10], [-1.91511126e-07, -2.17690839e-12, -2.96900208e-15, 2.34528286e-11,
# -1.97554588e-11, -7.79562669e-17, 3.16616540e-16, 9.40527117e-06,
# -1.12610129e-06, -1.97675171e-09, 3.78235738e-10], [-1.90254264e-07, -2.82930989e-12, -4.12499832e-15, 2.25204870e-11,
# -2.04166065e-11, -1.29105229e-16, 2.86756122e-16, 9.05531871e-06,
# -1.12610129e-06, -1.63850520e-09, 4.01064635e-10], [-1.91511126e-07, -2.14793182e-12, -3.35194639e-15, 2.34528286e-11,
# -1.92465933e-11, -7.79562669e-17, 3.29170260e-16, 9.40527117e-06,
# -1.12610129e-06, -1.65094987e-09, 3.78235738e-10], [-1.90254264e-07, -2.24600227e-12, -4.79799699e-15, 2.83481246e-11,
# -2.31878348e-11, -1.55490358e-16, 2.38762597e-16, 9.40527117e-06,
# -1.12610129e-06, -1.44625949e-09, 3.81160517e-10], [-2.10343580e-07, -1.52785245e-12, -2.96900208e-15, 2.34528286e-11,
# -1.78990882e-11, -1.10364859e-16, 2.28554136e-16, 9.06940649e-06,
# -1.12610129e-06, -1.17086634e-09, 3.78235738e-10], [-2.37228426e-07, -2.49146682e-12, -3.13149482e-15, 2.01216972e-11,
# -2.04166065e-11, -1.45599707e-16, 3.12262213e-16, 9.05531871e-06,
# -1.32408669e-06, -1.50318727e-09, 3.56782035e-10], [-1.52546770e-07, -2.22452254e-12, -3.35194639e-15, 1.82995570e-11,
# -1.99658740e-11, -1.13954952e-16, 4.27479227e-16, 9.97389570e-06,
# -1.12610129e-06, -1.33233993e-09, 3.78235738e-10], [-2.10154069e-07, -2.22452254e-12, -3.35194639e-15, 1.39304055e-11,
# -2.23294876e-11, -1.81937152e-16, 3.40106684e-16, 9.40527117e-06,
# -1.12610129e-06, -1.59345562e-09, 3.43633386e-10]],
# [[-1.98336173e-07, -2.14793182e-12, -4.79799699e-15, 2.83481246e-11,
# -1.64623534e-11, -1.55490358e-16, 2.38762597e-16, 9.40527117e-06,
# -1.12610129e-06, -1.44625949e-09, 3.81160517e-10], [-1.90254264e-07, -2.24600227e-12, -3.35194639e-15, 2.34528286e-11,
# -1.92465933e-11, -7.79562669e-17, 3.29170260e-16, 9.40527117e-06,
# -1.45709028e-06, -1.65094987e-09, 3.78235738e-10], [-1.90254264e-07, -2.82930989e-12, -4.12499832e-15, 2.25204870e-11,
# -2.04166065e-11, -1.13954952e-16, 4.27479227e-16, 8.24780372e-06,
# -1.12610129e-06, -1.31992160e-09, 3.78235738e-10], [-1.57722689e-07, -2.22452254e-12, -3.35194639e-15, 1.42829034e-11,
# -1.99658740e-11, -1.29105229e-16, 2.86756122e-16, 9.75149643e-06,
# -1.12610129e-06, -1.63850520e-09, 4.01064635e-10], [-1.40381864e-07, -2.14793182e-12, -3.35194639e-15, 2.34528286e-11,
# -1.99658740e-11, -1.13954952e-16, 4.27479227e-16, 7.45429086e-06,
# -1.12610129e-06, -1.33233993e-09, 3.78235738e-10], [-1.52546770e-07, -1.62234294e-12, -3.35194639e-15, 1.82995570e-11,
# -1.36984906e-11, -6.71196990e-17, 3.29170260e-16, 9.40527117e-06,
# -1.12610129e-06, -1.65094987e-09, 3.78235738e-10], [-1.69018115e-07, -2.14793182e-12, -3.35194639e-15, 1.72152568e-11,
# -1.92465933e-11, -7.79562669e-17, 3.29170260e-16, 9.05531871e-06,
# -1.15535710e-06, -1.63850520e-09, 2.89960828e-10], | |
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param reprotect_input: Reverse replication input.
:type reprotect_input: ~azure.mgmt.recoveryservicessiterecovery.models.ReverseReplicationInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._reprotect_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
reprotect_input=reprotect_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reprotect.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/reProtect'} # type: ignore
def _resolve_health_errors_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
resolve_health_input, # type: "_models.ResolveHealthInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._resolve_health_errors_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(resolve_health_input, 'ResolveHealthInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_resolve_health_errors_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/resolveHealthErrors'} # type: ignore
def begin_resolve_health_errors(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
resolve_health_input, # type: "_models.ResolveHealthInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Resolve health errors.
Operation to resolve health issues of the replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param resolve_health_input: Health issue input object.
:type resolve_health_input: ~azure.mgmt.recoveryservicessiterecovery.models.ResolveHealthInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._resolve_health_errors_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
resolve_health_input=resolve_health_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_resolve_health_errors.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/resolveHealthErrors'} # type: ignore
def _test_failover_initial(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
testfailover_input, # type: "_models.TestFailoverInput"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.ReplicationProtectedItem"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ReplicationProtectedItem"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._test_failover_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(testfailover_input, 'TestFailoverInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_test_failover_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/testFailover'} # type: ignore
def begin_test_failover(
self,
fabric_name, # type: str
protection_container_name, # type: str
replicated_protected_item_name, # type: str
testfailover_input, # type: "_models.TestFailoverInput"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ReplicationProtectedItem"]
"""Execute test failover.
Operation to perform a test failover of the replication protected item.
:param fabric_name: Unique fabric name.
:type fabric_name: str
:param protection_container_name: Protection container name.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name.
:type replicated_protected_item_name: str
:param testfailover_input: Test failover input.
:type testfailover_input: ~azure.mgmt.recoveryservicessiterecovery.models.TestFailoverInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ReplicationProtectedItem or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.recoveryservicessiterecovery.models.ReplicationProtectedItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationProtectedItem"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._test_failover_initial(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
testfailover_input=testfailover_input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ReplicationProtectedItem', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'protectionContainerName': self._serialize.url("protection_container_name", protection_container_name, 'str'),
'replicatedProtectedItemName': self._serialize.url("replicated_protected_item_name", replicated_protected_item_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_test_failover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/testFailover'} # type: ignore
def _test_failover_cleanup_initial(
self,
| |
and generate metrics.
"""
experiment, network, _optimizer, _symbols_metadata = load_checkpoint(checkpoint_path)
logger.info("start testing classifier")
logger.info(f"experiment:\n{experiment}")
logger.info(f"network:\n{network}")
# get test dataloader
_, _, test_loader = generate_dataloaders(experiment)
criterion = experiment.criterion
num_test_batches = math.ceil(experiment.test_size / experiment.batch_size)
num_batches_length = len(str(num_test_batches))
test_losses = []
test_accuracy = torchmetrics.Accuracy().to(DEVICE)
test_precision = torchmetrics.Precision(
num_classes=experiment.num_symbols, average="macro"
).to(DEVICE)
test_recall = torchmetrics.Recall(
num_classes=experiment.num_symbols, average="macro"
).to(DEVICE)
with torch.no_grad():
network.eval()
for batch_number, (inputs, labels) in enumerate(test_loader, start=1):
inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
# forward pass
output = network(inputs)
# get predicted labels from output
predictions = network.get_predictions(output)
# get class indexes from the one-hot encoded labels
labels = torch.argmax(labels, dim=1)
# calculate test loss
test_loss = criterion(output, labels)
test_losses.append(test_loss.item())
batch_accuracy = test_accuracy(predictions, labels)
test_precision(predictions, labels)
test_recall(predictions, labels)
logger.info(
f"test batch {batch_number:{num_batches_length}} of {num_test_batches} | accuracy: {batch_accuracy:.4f}"
)
# log statistics
average_test_loss = np.mean(test_losses)
total_test_accuracy = test_accuracy.compute()
precision = test_precision.compute()
recall = test_recall.compute()
logger.info(
f"testing complete | average loss: {average_test_loss:.4f} | accuracy: {total_test_accuracy:.4f}"
)
logger.info(f"precision: {precision:.4f} | recall: {recall:.4f}")
if print_sample_assignments:
num_sample_assignments = 10
# num_sample_assignments = 20
# num_sample_assignments = 100
with torch.no_grad():
network.eval()
inputs, labels = next(iter(test_loader))
# inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
inputs = inputs.to(DEVICE)
with torch.random.fork_rng():
torch.manual_seed(time.time() * 1000)
permutation = torch.randperm(len(inputs))
inputs = inputs[permutation[0:num_sample_assignments]]
labels = labels[permutation[0:num_sample_assignments]]
# forward pass
output = network(inputs)
# get predicted labels from output
predictions = network.get_predictions(output)
# get class indexes from the one-hot encoded labels
labels = torch.argmax(labels, dim=1)
# reset logger, add raw messages format
logger.remove()
logger.add(sys.stderr, format="{message}")
log_file_path = pathlib.Path(checkpoint_path).with_suffix(".log")
logger.add(log_file_path, format="{message}")
assignments = network.symbol_mapper.one_hot_to_label(predictions.cpu())
labels = network.symbol_mapper.one_hot_to_label(labels)
logger.info("\nsample assignments")
logger.info("assignment | true label")
logger.info("-----------------------")
for assignment, label in zip(assignments, labels):
if assignment == label:
logger.info(f"{assignment:>10} | {label:>10}")
else:
logger.info(f"{assignment:>10} | {label:>10} !!!")
def assign_symbols(
network,
symbols_metadata,
sequences_fasta,
scientific_name=None,
taxonomy_id=None,
output_directory=None,
):
"""
Use the trained network to assign symbols to the sequences in the FASTA file.
"""
sequences_fasta_path = pathlib.Path(sequences_fasta)
if scientific_name is not None:
taxonomy_id = get_species_taxonomy_id(scientific_name)
clade = get_taxonomy_id_clade(taxonomy_id)
# logger.info(f"got clade {clade} for {scientific_name}")
if output_directory is None:
output_directory = sequences_fasta_path.parent
assignments_csv_path = pathlib.Path(
f"{output_directory}/{sequences_fasta_path.stem}_symbols.csv"
)
# read the FASTA file in chunks and assign symbols
with open(assignments_csv_path, "w+", newline="") as csv_file:
# generate a csv writer, create the CSV file with a header
field_names = ["stable_id", "symbol", "probability", "description", "source"]
csv_writer = csv.writer(csv_file, delimiter="\t", lineterminator="\n")
csv_writer.writerow(field_names)
for fasta_entries in read_fasta_in_chunks(sequences_fasta_path):
if fasta_entries[-1] is None:
fasta_entries = [
fasta_entry
for fasta_entry in fasta_entries
if fasta_entry is not None
]
identifiers = [fasta_entry[0].split(" ")[0] for fasta_entry in fasta_entries]
sequences = [fasta_entry[1] for fasta_entry in fasta_entries]
clades = [clade for _ in range(len(fasta_entries))]
assignments_probabilities = network.predict_probabilities(sequences, clades)
# save assignments and probabilities to the CSV file
for identifier, (assignment, probability) in zip(
identifiers, assignments_probabilities
):
symbol_description = symbols_metadata[assignment]["description"]
symbol_source = symbols_metadata[assignment]["source"]
csv_writer.writerow(
[
identifier,
assignment,
probability,
symbol_description,
symbol_source,
]
)
logger.info(f"symbol assignments saved at {assignments_csv_path}")
def save_network_from_checkpoint(checkpoint_path):
"""
Save the network in a checkpoint file as a separate file.
"""
_experiment, network, _optimizer, _symbols_metadata = load_checkpoint(checkpoint_path)
path = checkpoint_path
network_path = pathlib.Path(f"{path.parent}/{path.stem}_network.pth")
torch.save(network, network_path)
return network_path
def log_pytorch_cuda_info():
"""
Log PyTorch and CUDA info and device to be used.
"""
logger.debug(f"{torch.__version__=}")
logger.debug(f"{DEVICE=}")
logger.debug(f"{torch.version.cuda=}")
logger.debug(f"{torch.backends.cudnn.enabled=}")
logger.debug(f"{torch.cuda.is_available()=}")
if torch.cuda.is_available():
logger.debug(f"{torch.cuda.device_count()=}")
logger.debug(f"{torch.cuda.get_device_properties(DEVICE)}")
def evaluate_network(checkpoint_path, complete=False):
"""
Evaluate a trained network by assigning gene symbols to the protein sequences
of genome assemblies in the latest Ensembl release, and comparing them to the existing
Xref assignments.
Args:
checkpoint_path (Path): path to the experiment checkpoint
complete (bool): Whether or not to run the evaluation for all genome assemblies.
Defaults to False, which runs the evaluation only for a selection of
the most important species genome assemblies.
"""
experiment, network, _optimizer, symbols_metadata = load_checkpoint(checkpoint_path)
symbols_set = set(symbol.lower() for symbol in experiment.symbol_mapper.categories)
assemblies = get_assemblies_metadata()
comparison_statistics_list = []
for assembly in assemblies:
if not complete and assembly.assembly_accession not in selected_genome_assemblies:
continue
canonical_fasta_filename = assembly.fasta_filename.replace(
"pep.all.fa", "pep.all_canonical.fa"
)
canonical_fasta_path = sequences_directory / canonical_fasta_filename
# assign symbols
assignments_csv_path = pathlib.Path(
f"{checkpoint_path.parent}/{canonical_fasta_path.stem}_symbols.csv"
)
if not assignments_csv_path.exists():
logger.info(f"assigning gene symbols to {canonical_fasta_path}")
assign_symbols(
network,
symbols_metadata,
canonical_fasta_path,
scientific_name=assembly.scientific_name,
output_directory=checkpoint_path.parent,
)
comparisons_csv_path = pathlib.Path(
f"{checkpoint_path.parent}/{assignments_csv_path.stem}_compare.csv"
)
if not comparisons_csv_path.exists():
comparison_successful = compare_with_database(
assignments_csv_path,
assembly.core_db,
assembly.scientific_name,
symbols_set,
)
if not comparison_successful:
continue
comparison_statistics = get_comparison_statistics(comparisons_csv_path)
comparison_statistics["scientific_name"] = assembly.scientific_name
comparison_statistics["taxonomy_id"] = assembly.taxonomy_id
comparison_statistics["clade"] = assembly.clade
comparison_statistics_list.append(comparison_statistics)
message = "{}: {} assignments, {} exact matches ({:.2f}%), {} fuzzy matches ({:.2f}%), {} total matches ({:.2f}%)".format(
comparison_statistics["scientific_name"],
comparison_statistics["num_assignments"],
comparison_statistics["num_exact_matches"],
comparison_statistics["matching_percentage"],
comparison_statistics["num_fuzzy_matches"],
comparison_statistics["fuzzy_percentage"],
comparison_statistics["num_total_matches"],
comparison_statistics["total_matches_percentage"],
)
logger.info(message)
dataframe_columns = [
"clade",
"scientific_name",
"num_assignments",
"num_exact_matches",
"matching_percentage",
"num_fuzzy_matches",
"fuzzy_percentage",
"num_total_matches",
"total_matches_percentage",
]
comparison_statistics = pd.DataFrame(
comparison_statistics_list,
columns=dataframe_columns,
)
clade_groups = comparison_statistics.groupby(["clade"])
clade_groups_statistics = []
for clade, group in clade_groups:
with pd.option_context("display.float_format", "{:.2f}".format):
group_string = group.to_string(index=False)
num_assignments_sum = group["num_assignments"].sum()
num_exact_matches_sum = group["num_exact_matches"].sum()
num_fuzzy_matches_sum = group["num_fuzzy_matches"].sum()
num_total_matches_sum = num_exact_matches_sum + num_fuzzy_matches_sum
matching_percentage_weighted_average = (
num_exact_matches_sum / num_assignments_sum
) * 100
fuzzy_percentage_weighted_average = (
num_fuzzy_matches_sum / num_assignments_sum
) * 100
total_percentage_weighted_average = (
num_total_matches_sum / num_assignments_sum
) * 100
averages_message = "{} weighted averages: {:.2f}% exact matches, {:.2f}% fuzzy matches, {:.2f}% total matches".format(
clade,
matching_percentage_weighted_average,
fuzzy_percentage_weighted_average,
total_percentage_weighted_average,
)
clade_statistics = f"{group_string}\n{averages_message}"
clade_groups_statistics.append(clade_statistics)
comparison_statistics_string = "comparison statistics:\n"
comparison_statistics_string += "\n\n".join(
clade_statistics for clade_statistics in clade_groups_statistics
)
logger.info(comparison_statistics_string)
def is_exact_match(symbol_a, symbol_b):
symbol_a = symbol_a.lower()
symbol_b = symbol_b.lower()
if symbol_a == symbol_b:
return "exact_match"
else:
return "no_exact_match"
def is_fuzzy_match(symbol_a, symbol_b):
symbol_a = symbol_a.lower()
symbol_b = symbol_b.lower()
if symbol_a == symbol_b:
return "no_fuzzy_match"
if (symbol_a in symbol_b) or (symbol_b in symbol_a):
return "fuzzy_match"
else:
return "no_fuzzy_match"
def is_known_symbol(symbol, symbols_set):
symbol = symbol.lower()
if symbol in symbols_set:
return "known"
else:
return "unknown"
def compare_with_database(
assignments_csv,
ensembl_database,
scientific_name=None,
symbols_set=None,
EntrezGene=False,
Uniprot_gn=False,
):
"""
Compare classifier assignments with the gene symbols in the genome assembly
ensembl_database core database on the public Ensembl MySQL server.
"""
assignments_csv_path = pathlib.Path(assignments_csv)
canonical_translations = get_xref_canonical_translations(
ensembl_database, EntrezGene=EntrezGene, Uniprot_gn=Uniprot_gn
)
if len(canonical_translations) == 0:
if scientific_name is None:
logger.info("0 canonical translations retrieved, nothing to compare")
else:
logger.info(
f"{scientific_name}: 0 canonical translations retrieved, nothing to compare"
)
return False
comparisons = []
with open(assignments_csv_path, "r", newline="") as assignments_file:
csv_reader = csv.reader(assignments_file, delimiter="\t")
_csv_field_names = next(csv_reader)
for csv_row in csv_reader:
csv_stable_id = csv_row[0]
classifier_symbol = csv_row[1]
probability = csv_row[2]
translation_stable_id = csv_stable_id.split(".")[0]
if (
translation_stable_id
in canonical_translations["translation.stable_id"].values
):
xref_symbol = canonical_translations.loc[
canonical_translations["translation.stable_id"]
== translation_stable_id,
"Xref_symbol",
].values[0]
comparisons.append(
(csv_stable_id, xref_symbol, classifier_symbol, probability)
)
dataframe_columns = [
"csv_stable_id",
"xref_symbol",
"classifier_symbol",
"probability",
]
compare_df = pd.DataFrame(comparisons, columns=dataframe_columns)
compare_df["exact_match"] = compare_df.apply(
lambda x: is_exact_match(x["classifier_symbol"], x["xref_symbol"]),
axis=1,
result_type="reduce",
)
compare_df["fuzzy_match"] = compare_df.apply(
lambda x: is_fuzzy_match(x["classifier_symbol"], x["xref_symbol"]),
axis=1,
result_type="reduce",
)
if symbols_set:
compare_df["known_symbol"] = compare_df.apply(
lambda x: is_known_symbol(x["xref_symbol"], symbols_set),
axis=1,
result_type="reduce",
)
comparisons_csv_path = pathlib.Path(
f"{assignments_csv_path.parent}/{assignments_csv_path.stem}_compare.csv"
)
compare_df.to_csv(comparisons_csv_path, sep="\t", index=False)
return True
def get_comparison_statistics(comparisons_csv_path):
compare_df = pd.read_csv(comparisons_csv_path, sep="\t", index_col=False)
num_assignments = len(compare_df)
if num_assignments > 0:
num_exact_matches = len(compare_df[compare_df["exact_match"] == "exact_match"])
num_fuzzy_matches = len(compare_df[compare_df["fuzzy_match"] == "fuzzy_match"])
matching_percentage = (num_exact_matches / num_assignments) * 100
fuzzy_percentage = (num_fuzzy_matches / num_assignments) * 100
num_total_matches = num_exact_matches + num_fuzzy_matches
total_matches_percentage = (num_total_matches / num_assignments) * 100
comparison_statistics = {
"num_assignments": num_assignments,
"num_exact_matches": num_exact_matches,
"matching_percentage": matching_percentage,
"num_fuzzy_matches": num_fuzzy_matches,
"fuzzy_percentage": fuzzy_percentage,
"num_total_matches": num_total_matches,
"total_matches_percentage": total_matches_percentage,
}
else:
comparison_statistics = {
"num_assignments": 0,
"num_exact_matches": 0,
"matching_percentage": 0,
"num_fuzzy_matches": 0,
"fuzzy_percentage": 0,
"num_total_matches": 0,
"total_matches_percentage": 0,
}
return comparison_statistics
def compare_assignments(
assignments_csv, ensembl_database, scientific_name, checkpoint=None
):
"""Compare assignments with the ones on the latest Ensembl release."""
assignments_csv_path = pathlib.Path(assignments_csv)
log_file_path = pathlib.Path(
f"{assignments_csv_path.parent}/{assignments_csv_path.stem}_compare.log"
)
logger.add(log_file_path, format=logging_format)
if checkpoint is None:
symbols_set = None
else:
experiment, _network, _optimizer, _symbols_metadata = load_checkpoint(checkpoint)
symbols_set = set(
symbol.lower() for symbol in experiment.symbol_mapper.categories
)
comparisons_csv_path = pathlib.Path(
f"{assignments_csv_path.parent}/{assignments_csv_path.stem}_compare.csv"
)
if not comparisons_csv_path.exists():
compare_with_database(
assignments_csv_path, ensembl_database, scientific_name, symbols_set
)
comparison_statistics = get_comparison_statistics(comparisons_csv_path)
taxonomy_id = get_species_taxonomy_id(scientific_name)
clade = get_taxonomy_id_clade(taxonomy_id)
comparison_statistics["scientific_name"] = scientific_name
comparison_statistics["taxonomy_id"] = taxonomy_id
comparison_statistics["clade"] = clade
message = "{} assignments, {} exact matches ({:.2f}%), {} fuzzy matches ({:.2f}%), {} total matches ({:.2f}%)".format(
comparison_statistics["num_assignments"],
comparison_statistics["num_exact_matches"],
comparison_statistics["matching_percentage"],
comparison_statistics["num_fuzzy_matches"],
comparison_statistics["fuzzy_percentage"],
comparison_statistics["num_total_matches"],
comparison_statistics["total_matches_percentage"],
)
logger.info(message)
dataframe_columns = [
"clade",
"scientific_name",
"num_assignments",
"num_exact_matches",
"matching_percentage",
"num_fuzzy_matches",
"fuzzy_percentage",
"num_total_matches",
"total_matches_percentage",
]
comparison_statistics = pd.DataFrame(
[comparison_statistics],
columns=dataframe_columns,
)
with pd.option_context("display.float_format", "{:.2f}".format):
logger.info(
f"comparison statistics:\n{comparison_statistics.to_string(index=False)}"
)
def main():
"""
main function
"""
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
"--datetime",
help="datetime string; if set | |
tex_node)
elif engine == 'Octane':
if ao_exists == True:
diffuse_node.setInput(0, ao_node)
mat_node.setInput(input_slots['diffuse'], tex_node)
else:
mat_node.setInput(input_slots['diffuse'], tex_node)
if prim_group.lower() + "_subsurfacecolor.png" not in tex_list_lower:
transl_color_exists = True
cc_node = mat_builder_node.createNode(node_names['cc'])
cc_node.setInput(0, tex_node)
transl_color_node = cc_node
if engine == 'Octane':
multiply_node = mat_builder_node.createNode("octane::NT_TEX_MULTIPLY")
multiply_node.setInput(0, cc_node)
if transl_weight_exists == True:
multiply_node.setInput(1, transl_weight_node)
else:
multiply_node.parm("texture2").set(0.3)
transl_color_node = multiply_node
mat_node.setInput(input_slots['transl_color'], multiply_node)
elif engine == 'VRay':
back_mat_node.setInput(0, cc_node)
else:
mat_node.setInput(input_slots['transl_color'], cc_node)
elif tex_type in ao_names:
ao_exists = True
ao_node = tex_node
if engine == 'VRay':
if diffuse_exists == True:
multiply_node = mat_builder_node.createNode("VRayNodeTexRGBMultiplyMax")
multiply_node.setInput(0, diffuse_node)
multiply_node.setInput(1, tex_node)
mat_node.setInput(0, multiply_node)
elif engine == 'Octane':
if diffuse_exists == True:
diffuse_node.setInput(0, tex_node)
else:
mat_node.setInput(input_slots['ao'], tex_node)
elif tex_type == "subsurfacecolor":
if engine == 'Octane':
transl_color_exists = True
transl_color_node = tex_node
if transl_weight_exists == True:
tex_node.setInput(0, transl_weight_node)
mat_node.setInput(0, tex_node)
elif engine == 'VRay':
back_mat_node.setInput(0, tex_node)
else:
mat_node.setInput(input_slots['transl_color'], tex_node)
elif tex_type == "subsurfaceamount":
transl_weight_exists = True
if engine == 'Octane':
transl_weight_node = tex_node
if transl_color_exists == True:
if prim_group.lower() + "_subsurfacecolor.png" not in tex_list_lower:
transl_color_node.setInput(1, tex_node)
else:
transl_color_node.setInput(0, tex_node)
elif engine == 'VRay':
twoside_mat_node.setInput(2, tex_node)
else:
mat_node.setInput(input_slots['transl_weight'], tex_node)
elif tex_type in spec_names:
spec_exists = True
mat_node.setInput(input_slots['spec'], tex_node)
elif tex_type in gloss_names:
rough_exists = True
mat_node.parm(parm_names['roughness']).set(1)
if engine == 'Redshift':
mat_node.parm("refl_isGlossiness").set("1")
mat_node.setInput(input_slots['gloss'], tex_node)
elif engine == 'Arnold':
invert_node = mat_builder_node.createNode("arnold::color_correct")
invert_node.setInput(0, tex_node)
invert_node.parm("invert").set("1")
mat_node.setInput(input_slots['gloss'], invert_node)
elif engine == 'Renderman':
invert_node = mat_builder_node.createNode("pxrinvert::22")
invert_node.setInput(0, tex_node)
mat_node.setInput(input_slots['gloss'], invert_node)
elif engine == 'Octane':
tex_node.parm("invert").set("1")
mat_node.setInput(input_slots['gloss'], tex_node)
else:
mat_node.setInput(input_slots['gloss'], tex_node)
elif tex_type in rough_names:
rough_exists = True
mat_node.parm(parm_names['roughness']).set(1)
if engine == 'VRay':
mat_node.parm("option_use_roughness").set("1")
mat_node.setInput(input_slots['rough'], tex_node)
elif tex_type in opc_names:
opc_exists = True
if engine == 'Redshift':
if self.ui.opc_as_stencil.isChecked() == True:
sprite_node = mat_builder_node.createNode("redshift::Sprite", tex_type)
sprite_node.parm(parm_names['tex_filename']).set(dirpath + tex)
sprite_node.setInput(0, mat_node)
mat_out_node.setInput(0, sprite_node)
else:
tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
mat_node.setInput(47, tex_node)
elif engine == 'Renderman':
mat_node.setInput(input_slots['opc'], tex_node, 1)
else:
# tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
# tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
mat_node.setInput(input_slots['opc'], tex_node)
if engine == 'VRay':
back_mat_node.setInput(input_slots['opc'], tex_node)
if self.ui.opc_as_stencil.isChecked() == True:
twoside_mat_node.parm("opacity_mode").set("1")
back_mat_node.parm("opacity_mode").set("1")
elif tex_type in nml_names:
normal_exists = True
if engine in ['Arnold', 'Redshift', 'Renderman']:
normal_node = mat_builder_node.createNode(node_names['bump'])
normal_node.setInput(1, tex_node)
mat_node.setInput(input_slots['normal'], normal_node)
if engine == 'Redshift':
normal_node.parm("inputType").set("1")
elif engine == 'Octane':
mat_node.setInput(input_slots['normal'], tex_node)
elif engine == 'Renderman':
normal_node = mat_builder_node.createNode("pxrnormalmap:22")
mat_node.setInput(input_slots['normal'], tex_node)
elif engine == 'VRay':
normal_node = mat_builder_node.createNode(node_names['bump'])
if is_leaf != None:
normal_node.setInput(0, twoside_mat_node)
else:
normal_node.setInput(0, mat_node)
normal_node.parm("map_type").set("1")
normal_node.setInput(3, tex_node)
mat_out_node.setInput(0, normal_node)
for tex in tex_list:
find_base_leaf_tex = re.search(r"(?i)(?:" + base_prim_group + r")" + regex_filter, tex)
if find_base_leaf_tex != None:
x = tex[:-4]
tex_type = x.split("_")[-1]
tex_type = tex_type.lower()
if tex_type not in all_names or tex_type == "subsurfacecolor":
pass
if tex_type in ao_names and ao_exists == False:
tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
ao_exists = True
ao_node = tex_node
if engine == 'VRay':
if diffuse_exists == True:
multiply_node = mat_builder_node.createNode("VRayNodeTexRGBMultiplyMax")
multiply_node.setInput(0, diffuse_node)
multiply_node.setInput(1, tex_node)
mat_node.setInput(0, multiply_node)
elif engine == 'Octane':
if diffuse_exists == True:
diffuse_node.setInput(0, tex_node)
else:
mat_node.setInput(input_slots['ao'], tex_node)
elif tex_type in spec_names and spec_exists == False:
spec_exists = True
tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
mat_node.setInput(input_slots['spec'], tex_node)
elif tex_type in gloss_names and rough_exists == False:
rough_exists = True
mat_node.parm(parm_names['roughness']).set(1)
tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
if engine == 'Redshift':
mat_node.parm("refl_isGlossiness").set("1")
mat_node.setInput(input_slots['gloss'], tex_node)
elif engine == 'Arnold':
invert_node = mat_builder_node.createNode("arnold::color_correct")
invert_node.setInput(0, tex_node)
invert_node.parm("invert").set("1")
mat_node.setInput(input_slots['gloss'], invert_node)
elif engine == 'Renderman':
invert_node = mat_builder_node.createNode("pxrinvert::22")
invert_node.setInput(0, tex_node)
mat_node.setInput(input_slots['gloss'], invert_node)
elif engine == 'Octane':
tex_node.parm("invert").set("1")
mat_node.setInput(input_slots['gloss'], tex_node)
else:
mat_node.setInput(input_slots['gloss'], tex_node)
elif tex_type in rough_names and rough_exists == False:
rough_exists = True
mat_node.parm(parm_names['roughness']).set(1)
tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
if engine == 'VRay':
mat_node.parm("option_use_roughness").set("1")
mat_node.setInput(input_slots['rough'], tex_node)
elif tex_type in opc_names and opc_exists == False:
opc_exists = True
if engine == 'Redshift':
if self.ui.opc_as_stencil.isChecked() == True:
sprite_node = mat_builder_node.createNode("redshift::Sprite", tex_type)
sprite_node.parm(parm_names['tex_filename']).set(dirpath + tex)
sprite_node.setInput(0, mat_node)
mat_out_node.setInput(0, sprite_node)
else:
tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
mat_node.setInput(47, tex_node)
elif engine == 'Renderman':
mat_node.setInput(input_slots['opc'], tex_node, 1)
else:
tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
mat_node.setInput(input_slots['opc'], tex_node)
if engine == 'VRay':
back_mat_node.setInput(input_slots['opc'], tex_node)
if self.ui.opc_as_stencil.isChecked() == True:
mat_node.parm("opacity_mode").set("1")
elif tex_type in nml_names and normal_exists == False:
normal_exists = True
tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
if engine in ['Arnold', 'Redshift', 'Renderman']:
normal_node = mat_builder_node.createNode(node_names['bump'])
normal_node.setInput(1, tex_node)
mat_node.setInput(input_slots['normal'], normal_node)
if engine == 'Redshift':
normal_node.parm("inputType").set("1")
elif engine == 'Octane':
mat_node.setInput(input_slots['normal'], tex_node)
elif engine == 'Renderman':
mat_node.setInput(input_slots['normal'], tex_node)
elif engine == 'VRay':
normal_node = mat_builder_node.createNode(node_names['bump'])
if is_leaf != None:
normal_node.setInput(0, twoside_mat_node)
else:
normal_node.setInput(0, mat_node)
normal_node.parm("map_type").set("1")
normal_node.setInput(3, tex_node)
mat_out_node.setInput(0, normal_node)
elif tex_type == "subsurfaceamount" and transl_weight_exists == False:
tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
if engine == 'Octane':
transl_weight_node = tex_node
if transl_color_exists == True:
if prim_group.lower() + "_subsurfacecolor.png" not in tex_list_lower:
transl_color_node.setInput(1, tex_node)
else:
transl_color_node.setInput(0, tex_node)
elif engine == 'VRay':
twoside_mat_node.setInput(2, tex_node)
else:
mat_node.setInput(input_slots['transl_weight'], tex_node)
self.adjust_gamma(tex_node, tex_type) # Set linear or sRGB Gamma
if rough_exists == False:
mat_node.parm(parm_names['roughness']).set(0.2)
if transl_weight_exists == False:
if engine == 'VRay':
twoside_mat_node.setParms({"translucency_texr": 0.3, "translucency_texg": 0.3, "translucency_texb": 0.3})
elif engine != 'Octane':
mat_node.parm(parm_names['transl_weight']).set(0.3)
else:
for tex in tex_list:
if engine == 'VRay':
if is_leaf != None:
mat_node = front_mat_node
find_tex = re.search(r"(?i)(?:" + prim_group + r")" + regex_filter, tex)
if find_tex != None:
x = tex[:-4]
tex_type = x.split("_")[-1]
tex_type = tex_type.lower()
if tex_type not in all_names:
tex_type = "diffuse"
# Prevent creation of opacity texture node for Redshift
if engine == 'Redshift':
if tex_type not in opc_names:
tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
else:
tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
self.adjust_gamma(tex_node, tex_type) # Set linear or sRGB Gamma
if tex_type in diff_names:
diffuse_exists = True
diffuse_node = tex_node
if engine == 'VRay':
if ao_exists == True:
multiply_node = mat_builder_node.createNode("VRayNodeTexRGBMultiplyMax")
multiply_node.setInput(0, diffuse_node)
multiply_node.setInput(1, ao_node)
mat_node.setInput(input_slots['diffuse'], multiply_node)
else:
mat_node.setInput(input_slots['diffuse'], tex_node)
elif engine == 'Octane':
if ao_exists == True:
diffuse_node.setInput(0, ao_node)
mat_node.setInput(input_slots['diffuse'], tex_node)
else:
mat_node.setInput(input_slots['diffuse'], tex_node)
if is_leaf != None:
if prim_group.lower() + "_subsurfacecolor.png" not in tex_list_lower:
transl_color_exists = True
cc_node = mat_builder_node.createNode(node_names['cc'])
cc_node.setInput(0, tex_node)
transl_color_node = cc_node
if engine == 'Octane':
multiply_node = mat_builder_node.createNode("octane::NT_TEX_MULTIPLY")
multiply_node.setInput(0, cc_node)
if transl_weight_exists == True:
multiply_node.setInput(1, transl_weight_node)
else:
multiply_node.parm("texture2").set(0.3)
transl_color_node = multiply_node
mat_node.setInput(input_slots['transl_color'], multiply_node)
elif engine == 'VRay':
back_mat_node.setInput(0, cc_node)
else:
mat_node.setInput(input_slots['transl_color'], cc_node)
elif tex_type in ao_names:
ao_exists = True
ao_node = tex_node
if engine == 'VRay':
if diffuse_exists == True:
multiply_node = mat_builder_node.createNode("VRayNodeTexRGBMultiplyMax")
multiply_node.setInput(0, diffuse_node)
multiply_node.setInput(1, tex_node)
mat_node.setInput(0, multiply_node)
elif engine == 'Octane':
if diffuse_exists == True:
diffuse_node.setInput(0, tex_node)
else:
mat_node.setInput(input_slots['ao'], tex_node)
elif tex_type == "subsurfacecolor":
if engine == 'Octane':
transl_color_exists = True
transl_color_node = tex_node
if transl_weight_exists == True:
tex_node.setInput(0, transl_weight_node)
mat_node.setInput(0, tex_node)
elif engine == 'VRay':
if is_leaf != None:
back_mat_node.setInput(0, tex_node)
else:
mat_node.setInput(input_slots['transl_color'], tex_node)
elif tex_type == "subsurfaceamount":
transl_weight_exists = True
if engine == 'Octane':
transl_weight_node = tex_node
if transl_color_exists == True:
if prim_group.lower() + "_subsurfacecolor.png" not in tex_list_lower:
transl_color_node.setInput(1, tex_node)
else:
transl_color_node.setInput(0, tex_node)
elif engine == 'VRay':
twoside_mat_node.setInput(2, tex_node)
else:
mat_node.setInput(input_slots['transl_weight'], tex_node)
elif tex_type in spec_names:
mat_node.setInput(input_slots['spec'], tex_node)
elif tex_type in gloss_names:
rough_exists = True
mat_node.parm(parm_names['roughness']).set(1)
if engine == 'Redshift':
mat_node.parm("refl_isGlossiness").set("1")
mat_node.setInput(input_slots['gloss'], tex_node)
elif engine == 'Arnold':
invert_node = mat_builder_node.createNode("arnold::color_correct")
invert_node.setInput(0, tex_node)
invert_node.parm("invert").set("1")
mat_node.setInput(input_slots['gloss'], invert_node)
elif engine == 'Renderman':
invert_node = mat_builder_node.createNode("pxrinvert::22")
invert_node.setInput(0, tex_node)
mat_node.setInput(input_slots['gloss'], invert_node)
elif engine == 'Octane':
tex_node.parm("invert").set("1")
mat_node.setInput(input_slots['gloss'], tex_node)
else:
mat_node.setInput(input_slots['gloss'], tex_node)
elif tex_type in rough_names:
rough_exists = True
mat_node.parm(parm_names['roughness']).set(1)
if engine == 'VRay':
mat_node.parm("option_use_roughness").set("1")
mat_node.setInput(input_slots['rough'], tex_node)
elif tex_type in opc_names:
opc_exists = True
if engine == 'Redshift':
if self.ui.opc_as_stencil.isChecked() == True:
sprite_node = mat_builder_node.createNode("redshift::Sprite", tex_type)
sprite_node.parm(parm_names['tex_filename']).set(dirpath + tex)
sprite_node.setInput(0, mat_node)
mat_out_node.setInput(0, sprite_node)
else:
tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
mat_node.setInput(47, tex_node)
elif engine == 'Renderman':
mat_node.setInput(input_slots['opc'], tex_node, 1)
else:
# tex_node = mat_builder_node.createNode(node_names['texture_node'], tex_type)
# tex_node.parm(parm_names['tex_filename']).set(dirpath + tex)
mat_node.setInput(input_slots['opc'], tex_node)
if engine == 'VRay':
if is_leaf != None:
back_mat_node.setInput(input_slots['opc'], tex_node)
if self.ui.opc_as_stencil.isChecked() == True:
front_mat_node.parm("opacity_mode").set("1")
back_mat_node.parm("opacity_mode").set("1")
else:
if self.ui.opc_as_stencil.isChecked() == True:
mat_node.parm("opacity_mode").set("1")
elif tex_type in nml_names:
normal_exists = True
if engine in ['Arnold', 'Redshift', 'Renderman']:
normal_node = mat_builder_node.createNode(node_names['bump'])
normal_node.setInput(1, tex_node)
mat_node.setInput(input_slots['normal'], normal_node)
if engine == 'Redshift':
normal_node.parm("inputType").set("1")
elif engine == 'Arnold':
normal_node.parm("color_to_signed").set("0")
elif engine == | |
if ('dmt',ci,ci) in cq:
cq[('dmt_sge',ci,ci)] = cq[('dmt',ci,ci)]
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
for ci,ckDict in self.coefficients.diffusion.items():
if self.lag:#mwf looks like this was missing if lag May 7 09
for ck,cjDict in ckDict.items():
cq[('grad(phi)_sge',ck)]=copy.deepcopy(cq[('grad(phi)',ck)])
for cj in list(cjDict.keys()):
cq[('dphi_sge',ck,cj)]=copy.deepcopy(cq[('dphi',ck,cj)])
cq[('da_sge',ci,ck,cj)]=copy.deepcopy(cq[('da',ci,ck,cj)])
else:
for ck,cjDict in ckDict.items():
cq[('grad(phi)_sge',ck)]=cq[('grad(phi)',ck)]
for cj in list(cjDict.keys()):
cq[('dphi_sge',ck,cj)]=cq[('dphi',ck,cj)]
cq[('da_sge',ci,ck,cj)]=cq[('da',ci,ck,cj)]
def updateSubgridErrorHistory(self,initializationPhase=False):
if self.lag:
for ci in range(self.nc):
self.tau_last[ci][:] = self.tau[ci]
#mwf should these be deep copies?
self.cq[('dH_sge',ci,ci)][:] = self.cq[('dH',ci,ci)]
self.cq[('dm_sge',ci,ci)][:] = self.cq[('dm',ci,ci)]
for ci,ckDict in self.coefficients.diffusion.items():
for ck,cjDict in ckDict.items():
self.cq[('grad(phi)_sge',ck)][:]=self.cq[('grad(phi)',ck)]
for cj in list(cjDict.keys()):
self.cq[('dphi_sge',ck,cj)][:]=0.0 #grad(phi) will be a constant when lagged so dphi=0 not 1
self.cq[('da_sge',ci,ck,cj)][:]=self.cq[('da',ci,ck,cj)]
def calculateSubgridError(self,q):
oldTau=False#True #mwf oldTau not working with sd!
for ci in range(self.nc):
if oldTau:
if self.coefficients.sd:
csubgridError.calculateSubgridError_ADR_tau_sd(self.stabilizationFlag,
self.coefficients.sdInfo[(ci,ci)][0],self.coefficients.sdInfo[(ci,ci)][1],
self.mesh.elementDiametersArray,
q[('dmt',ci,ci)],
q[('dH',ci,ci)],
q[('a',ci,ci)],
q[('da',ci,ci,ci)],
q[('grad(phi)',ci)],
q[('dphi',ci,ci)],
q[('dr',ci,ci)],
q[('pe',ci)],
q[('cfl',ci)],
self.tau[ci])
else:
csubgridError.calculateSubgridError_ADR_tau(self.stabilizationFlag,
self.mesh.elementDiametersArray,
q[('dmt',ci,ci)],
q[('dH',ci,ci)],
q[('a',ci,ci)],
q[('da',ci,ci,ci)],
q[('grad(phi)',ci)],
q[('dphi',ci,ci)],
q[('dr',ci,ci)],
q[('pe',ci)],
q[('cfl',ci)],
self.tau[ci])
else:
if self.coefficients.sd:
csubgridError.calculateSubgridError_ADR_generic_tau_sd(self.coefficients.sdInfo[(ci,ci)][0],self.coefficients.sdInfo[(ci,ci)][1],
q['inverse(J)'],
q[('dmt',ci,ci)],
q[('dH',ci,ci)],
q[('a',ci,ci)],
q[('da',ci,ci,ci)],
q[('grad(phi)',ci)],
q[('dphi',ci,ci)],
q[('dr',ci,ci)],
q[('pe',ci)],
q[('cfl',ci)],
self.tau[ci])
else:
csubgridError.calculateSubgridError_ADR_generic_tau(q['inverse(J)'],
q[('dmt',ci,ci)],
q[('dH',ci,ci)],
q[('a',ci,ci)],
q[('da',ci,ci,ci)],
q[('grad(phi)',ci)],
q[('dphi',ci,ci)],
q[('dr',ci,ci)],
q[('pe',ci)],
q[('cfl',ci)],
self.tau[ci])
if self.lag:
tau=self.tau_last[ci]
else:
tau=self.tau[ci]
for cj in range(self.nc):
if ('dpdeResidual',ci,cj) in q:
csubgridError.calculateSubgridError_tauRes(tau,
q[('pdeResidual',ci)],
q[('dpdeResidual',ci,cj)],
q[('subgridError',ci)],
q[('dsubgridError',ci,cj)])
class HamiltonJacobi_ASGS_opt(SGE_base):
def __init__(self,coefficients,nd,stabFlag='1',lag=False):
SGE_base.__init__(self,coefficients,nd,lag)
self.stabilizationFlag = stabFlag
def initializeElementQuadrature(self,mesh,t,cq):
import copy
self.cq=cq
self.mesh=mesh
self.tau=[]
self.tau_last=[]
for ci in range(self.nc):
if self.lag:
cq[('dH_sge',ci,ci)]=copy.deepcopy(cq[('dH',ci,ci)])
else:
cq[('dH_sge',ci,ci)]=cq[('dH',ci,ci)]
def calculateSubgridError(self,q):
pass
def updateSubgridErrorHistory(self,initializationPhase=False):
if self.lag:
for ci in range(self.nc):
self.cq[('dH_sge',ci,ci)][:]= self.cq[('dH',ci,ci)]
class StokesStabilization_1(SGE_base):
def __init__(self,coefficients,nd,stabFlag='1',lag=False):
SGE_base.__init__(self,coefficients,nd,lag)
def calculateSubgridError(self,q):
if self.coefficients.sd:
csubgridError.calculateSubgridErrorStokes2D_1_sd(self.mesh.elementDiametersArray,
q[('u',1)],
q[('u',2)],
q[('a',1,1)],
q[('pdeResidual',0)],
q[('dpdeResidual',0,1)],
q[('dpdeResidual',0,2)],
q[('pdeResidual',1)],
q[('dpdeResidual',1,0)],
q[('dpdeResidual',1,1)],
q[('pdeResidual',2)],
q[('dpdeResidual',2,0)],
q[('dpdeResidual',2,2)],
q[('subgridError',0)],
q[('dsubgridError',0,0)],
q[('dsubgridError',0,1)],
q[('dsubgridError',0,2)],
q[('subgridError',1)],
q[('dsubgridError',1,0)],
q[('dsubgridError',1,1)],
q[('dsubgridError',1,2)],
q[('subgridError',2)],
q[('dsubgridError',2,0)],
q[('dsubgridError',2,1)],
q[('dsubgridError',2,2)])
else:
csubgridError.calculateSubgridErrorStokes2D_1(self.mesh.elementDiametersArray,
q[('u',1)],
q[('u',2)],
q[('a',1,1)],
q[('pdeResidual',0)],
q[('dpdeResidual',0,1)],
q[('dpdeResidual',0,2)],
q[('pdeResidual',1)],
q[('dpdeResidual',1,0)],
q[('dpdeResidual',1,1)],
q[('pdeResidual',2)],
q[('dpdeResidual',2,0)],
q[('dpdeResidual',2,2)],
q[('subgridError',0)],
q[('dsubgridError',0,0)],
q[('dsubgridError',0,1)],
q[('dsubgridError',0,2)],
q[('subgridError',1)],
q[('dsubgridError',1,0)],
q[('dsubgridError',1,1)],
q[('dsubgridError',1,2)],
q[('subgridError',2)],
q[('dsubgridError',2,0)],
q[('dsubgridError',2,1)],
q[('dsubgridError',2,2)])
def updateSubgridErrorHistory(self,initializationPhase=False):
pass
class StokesASGS_velocity(SGE_base):
def __init__(self,coefficients,nd):
SGE_base.__init__(self,coefficients,nd,lag=False)
self.stabilizationFlag = '1'
coefficients.stencil[0].add(0)
if nd == 2:
coefficients.stencil[1].add(2)
coefficients.stencil[2].add(1)
elif nd == 3:
coefficients.stencil[1].add(2)
coefficients.stencil[1].add(3)
coefficients.stencil[2].add(1)
coefficients.stencil[2].add(3)
coefficients.stencil[3].add(1)
coefficients.stencil[3].add(2)
def calculateSubgridError(self,q):
if self.nd == 2:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorStokes2D_GLS_velocity_sd(self.mesh.elementDiametersArray,
q[('a',1,1)],
q[('pdeResidual',1)],
q[('dpdeResidual',1,0)],
q[('dpdeResidual',1,1)],
q[('pdeResidual',2)],
q[('dpdeResidual',2,0)],
q[('dpdeResidual',2,2)],
q[('subgridError',1)],
q[('dsubgridError',1,0)],
q[('dsubgridError',1,1)],
q[('subgridError',2)],
q[('dsubgridError',2,0)],
q[('dsubgridError',2,2)])
else:
csubgridError.calculateSubgridErrorStokes2D_GLS_velocity(self.mesh.elementDiametersArray,
q[('a',1,1)],
q[('pdeResidual',1)],
q[('dpdeResidual',1,0)],
q[('dpdeResidual',1,1)],
q[('pdeResidual',2)],
q[('dpdeResidual',2,0)],
q[('dpdeResidual',2,2)],
q[('subgridError',1)],
q[('dsubgridError',1,0)],
q[('dsubgridError',1,1)],
q[('subgridError',2)],
q[('dsubgridError',2,0)],
q[('dsubgridError',2,2)])
elif self.nd == 3:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorStokes3D_GLS_velocity_sd(self.mesh.elementDiametersArray,
q[('a',1,1)],
q[('pdeResidual',1)],
q[('dpdeResidual',1,0)],
q[('dpdeResidual',1,1)],
q[('pdeResidual',2)],
q[('dpdeResidual',2,0)],
q[('dpdeResidual',2,2)],
q[('pdeResidual',3)],
q[('dpdeResidual',3,0)],
q[('dpdeResidual',3,3)],
q[('subgridError',1)],
q[('dsubgridError',1,0)],
q[('dsubgridError',1,1)],
q[('subgridError',2)],
q[('dsubgridError',2,0)],
q[('dsubgridError',2,2)],
q[('subgridError',3)],
q[('dsubgridError',3,0)],
q[('dsubgridError',3,3)])
else:
csubgridError.calculateSubgridErrorStokes3D_GLS_velocity(self.mesh.elementDiametersArray,
q[('a',1,1)],
q[('pdeResidual',1)],
q[('dpdeResidual',1,0)],
q[('dpdeResidual',1,1)],
q[('pdeResidual',2)],
q[('dpdeResidual',2,0)],
q[('dpdeResidual',2,2)],
q[('pdeResidual',3)],
q[('dpdeResidual',3,0)],
q[('dpdeResidual',3,3)],
q[('subgridError',1)],
q[('dsubgridError',1,0)],
q[('dsubgridError',1,1)],
q[('subgridError',2)],
q[('dsubgridError',2,0)],
q[('dsubgridError',2,2)],
q[('subgridError',3)],
q[('dsubgridError',3,0)],
q[('dsubgridError',3,3)])
def updateSubgridErrorHistory(self,initializationPhase=False):
pass
class NavierStokesASGS_velocity_pressure(SGE_base):
def __init__(self,coefficients,nd,stabFlag='1',lag=False,delayLagSteps=5,hFactor=1.0,noPressureStabilization=False):
self.noPressureStabilization=noPressureStabilization
SGE_base.__init__(self,coefficients,nd,lag)
self.stabilizationFlag = stabFlag
coefficients.stencil[0].add(0)
self.nSteps=0
self.delayLagSteps=delayLagSteps
self.hFactor=hFactor
def initializeElementQuadrature(self,mesh,t,cq):
import copy
self.mesh=mesh
self.tau=[]
self.tau_last=[]
self.df_last={}
self.cq=cq
self.v_last = copy.deepcopy(cq[('f',0)])
for ci in range(self.nc):
if self.lag:
self.tau_last.append(numpy.zeros(cq[('u',ci)].shape,'d'))
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
for cj in range(self.nc):
if ('df',ci,cj) in cq:
if ci ==0:
cq[('df_sge',ci,cj)]=cq[('df',ci,cj)]
else:
#cek for incompressible form weshould just be able to use v_last
#cq[('df_sge',ci,cj)] = numpy.zeros(cq[('df',ci,cj)].shape,'d')
if ci == cj:
cq[('df_sge',ci,cj)] = self.v_last
else:
cq[('df_sge',ci,cj)] = numpy.zeros(cq[('df',ci,cj)].shape,'d')
else:
for cj in range(self.nc):
if ('df',ci,cj) in cq:
cq[('df_sge',ci,cj)]=cq[('df',ci,cj)]
self.tau.append(numpy.zeros(cq[('u',ci)].shape,'d'))
for ci,ckDict in self.coefficients.diffusion.items():
for ck,cjDict in ckDict.items():
cq[('grad(phi)_sge',ck)]=cq[('grad(phi)',ck)]
for cj in list(cjDict.keys()):
cq[('dphi_sge',ck,cj)]=cq[('dphi',ck,cj)]
cq[('da_sge',ci,ck,cj)]=cq[('da',ci,ck,cj)]
for ci,cjDict in self.coefficients.hamiltonian.items():
for cj in cjDict:
cq[('dH_sge',ci,cj)]=cq[('dH',ci,cj)]
if self.lag:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tau_sd(self.hFactor,
self.mesh.elementDiametersArray,
cq[('dmt',1,1)],
cq[('dm',1,1)],
cq[('f',0)],
cq[('a',1,1)],
self.tau[0],
self.tau[1],
cq[('cfl',0)])
else:
csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tau(self.hFactor,
self.mesh.elementDiametersArray,
cq[('dmt',1,1)],
cq[('dm',1,1)],
cq[('f',0)],
cq[('a',1,1)],
self.tau[0],
self.tau[1],
cq[('cfl',0)])
self.v_last[:]=self.cq[('f',0)]
def updateSubgridErrorHistory(self,initializationPhase=False):
self.nSteps+=1
if self.lag:
for ci in range(self.nc):
self.tau_last[ci][:] = self.tau[ci]
self.v_last[:]=self.cq[('f',0)]
#cek for incompressible form we can just use v_last
# for cj in range(self.nc):
# if self.cq.has_key(('df',ci,cj)):
# if ci != 0:
# self.cq[('df_sge',ci,cj)][:] = self.cq[('df',ci,cj)]
def calculateSubgridError(self,q):
from . import LinearAlgebraTools
oldTau=True
if self.nd == 2:
if self.lag and self.nSteps < self.delayLagSteps:
v = q[('f',0)]
elif self.lag:
v = self.v_last
else:
v = q[('f',0)]
if oldTau:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tau_sd(self.hFactor,
self.mesh.elementDiametersArray,
q[('dmt',1,1)],
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
else:
csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tau(self.hFactor,
self.mesh.elementDiametersArray,
q[('dmt',1,1)],
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
else:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorNavierStokes2D_generic_tau_sd(q['inverse(J)'],
q[('dmt',1,1)],
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
else:
csubgridError.calculateSubgridErrorNavierStokes2D_generic_tau(q['inverse(J)'],
q[('dmt',1,1)],
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
tau0=self.tau[0]
tau1=self.tau[1]
csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tauRes(tau0,
tau1,
q[('pdeResidual',0)],
q[('dpdeResidual',0,1)],
q[('dpdeResidual',0,2)],
q[('pdeResidual',1)],
q[('dpdeResidual',1,0)],
q[('dpdeResidual',1,1)],
q[('dpdeResidual',1,2)],
q[('pdeResidual',2)],
q[('dpdeResidual',2,0)],
q[('dpdeResidual',2,1)],
q[('dpdeResidual',2,2)],
q[('subgridError',0)],
q[('dsubgridError',0,1)],
q[('dsubgridError',0,2)],
q[('subgridError',1)],
q[('dsubgridError',1,0)],
q[('dsubgridError',1,1)],
q[('dsubgridError',1,2)],
q[('subgridError',2)],
q[('dsubgridError',2,0)],
q[('dsubgridError',2,1)],
q[('dsubgridError',2,2)])
if self.noPressureStabilization:
q[('subgridError',0)][:]=0.0
q[('dsubgridError',0,1)][:]=0.0
q[('dsubgridError',0,2)][:]=0.0
elif self.nd == 3:
if self.lag and self.nSteps < self.delayLagSteps:
v = q[('f',0)]
elif self.lag:
v = self.v_last
else:
v = q[('f',0)]
if oldTau:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tau_sd(self.hFactor,
self.mesh.elementDiametersArray,
q[('dmt',1,1)],
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
else:
csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tau(self.hFactor,
self.mesh.elementDiametersArray,
q[('dmt',1,1)],
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
else:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorNavierStokes2D_generic_tau_sd(q['inverse(J)'],
q[('dmt',1,1)],
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
else:
csubgridError.calculateSubgridErrorNavierStokes2D_generic_tau(q['inverse(J)'],
q[('dmt',1,1)],
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
tau0=self.tau[0]
tau1=self.tau[1]
csubgridError.calculateSubgridErrorNavierStokes3D_GLS_tauRes(tau0,
tau1,
q[('pdeResidual',0)],
q[('dpdeResidual',0,1)],
q[('dpdeResidual',0,2)],
q[('dpdeResidual',0,3)],
q[('pdeResidual',1)],
q[('dpdeResidual',1,0)],
q[('dpdeResidual',1,1)],
q[('dpdeResidual',1,2)],
q[('dpdeResidual',1,3)],
q[('pdeResidual',2)],
q[('dpdeResidual',2,0)],
q[('dpdeResidual',2,1)],
q[('dpdeResidual',2,2)],
q[('dpdeResidual',2,3)],
q[('pdeResidual',3)],
q[('dpdeResidual',3,0)],
q[('dpdeResidual',3,1)],
q[('dpdeResidual',3,2)],
q[('dpdeResidual',3,3)],
q[('subgridError',0)],
q[('dsubgridError',0,1)],
q[('dsubgridError',0,2)],
q[('dsubgridError',0,3)],
q[('subgridError',1)],
q[('dsubgridError',1,0)],
q[('dsubgridError',1,1)],
q[('dsubgridError',1,2)],
q[('dsubgridError',1,3)],
q[('subgridError',2)],
q[('dsubgridError',2,0)],
q[('dsubgridError',2,1)],
q[('dsubgridError',2,2)],
q[('dsubgridError',2,3)],
q[('subgridError',3)],
q[('dsubgridError',3,0)],
q[('dsubgridError',3,1)],
q[('dsubgridError',3,2)],
q[('dsubgridError',3,3)])
if self.noPressureStabilization:
q[('subgridError',0)][:]=0.0
q[('dsubgridError',0,1)][:]=0.0
q[('dsubgridError',0,2)][:]=0.0
q[('dsubgridError',0,3)][:]=0.0
for ci in range(self.nd):
q[('cfl',ci+1)][:] = q[('cfl',0)]
class NavierStokesASGS_velocity_pressure_opt(SGE_base):
def __init__(self,coefficients,nd,stabFlag='1',lag=False,delayLagSteps=5,hFactor=1.0,noPressureStabilization=False):
self.noPressureStabilization=noPressureStabilization
SGE_base.__init__(self,coefficients,nd,lag)
self.stabilizationFlag = stabFlag
coefficients.stencil[0].add(0)
self.nSteps=0
self.delayLagSteps=delayLagSteps
self.hFactor=hFactor
def initializeElementQuadrature(self,mesh,t,cq):
import copy
self.mesh=mesh
self.tau=[]
self.tau_last=[]
self.df_last={}
self.cq=cq
if self.lag:
self.v_last = self.cq[('velocity',0)]
else:
self.v_last = cq[('f',0)]
cq[('df_sge',1,1)]=self.v_last
cq[('df_sge',2,2)]=self.v_last
cq[('df_sge',3,3)]=self.v_last
def updateSubgridErrorHistory(self,initializationPhase=False):
self.nSteps+=1
def calculateSubgridError(self,q):
if self.nSteps < self.delayLagSteps:
self.v_last = q[('f',0)]
cq[('df_sge',1,1)]=q[('f',0)]
cq[('df_sge',2,2)]=q[('f',0)]
cq[('df_sge',3,3)]=q[('f',0)]
else:
self.v_last = q[('velocity',0)]
cq[('df_sge',1,1)]=q[('velocity',0)]
cq[('df_sge',2,2)]=q[('velocity',0)]
cq[('df_sge',3,3)]=q[('velocity',0)]
class NavierStokesASGS_velocity_pressure_optV2(SGE_base):
def __init__(self,coefficients,nd,stabFlag='1',lag=False,delayLagSteps=0,hFactor=1.0,noPressureStabilization=False):
self.noPressureStabilization=noPressureStabilization
SGE_base.__init__(self,coefficients,nd,lag)
self.stabilizationFlag = stabFlag
coefficients.stencil[0].add(0)
self.nSteps=0
self.delayLagSteps=delayLagSteps
self.hFactor=hFactor
def initializeElementQuadrature(self,mesh,t,cq):
import copy
self.mesh=mesh
self.tau=[]
self.tau_last=[]
self.df_last={}
self.cq=cq
if self.lag:
self.v_last = copy.deepcopy(self.cq[('velocity',0)])
else:
self.v_last = self.cq[('velocity',0)]
def updateSubgridErrorHistory(self,initializationPhase=False):
if self.lag:
self.v_last[:] = self.cq[('velocity',0)]
def calculateSubgridError(self,q):
pass
class NavierStokesWithBodyForceASGS_velocity_pressure(NavierStokesASGS_velocity_pressure):
def __init__(self,coefficients,nd,stabFlag='1',lag=False,delayLagSteps=5,hFactor=1.0,noPressureStabilization=False):
NavierStokesASGS_velocity_pressure.__init__(self,coefficients,nd,stabFlag=stabFlag,lag=lag,
delayLagSteps=delayLagSteps,hFactor=hFactor,noPressureStabilization=noPressureStabilization)
def initializeElementQuadrature(self,mesh,t,cq):
NavierStokesASGS_velocity_pressure.initializeElementQuadrature(self,mesh,t,cq)
self.q_dmt_r = numpy.zeros(cq[('dmt',1,1)].shape,'d')
def calculateSubgridError(self,q):
from . import LinearAlgebraTools
oldTau=True
self.q_dmt_r.flat[:] = q[('dmt',1,1)].flat
self.q_dmt_r += q[('dr',1,1)]
if self.nd == 2:
if self.lag and self.nSteps < self.delayLagSteps:
v = q[('f',0)]
elif self.lag:
v = self.v_last
else:
v = q[('f',0)]
if oldTau:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tau_sd(self.hFactor,
self.mesh.elementDiametersArray,
self.q_dmt_r,
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
else:
csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tau(self.hFactor,
self.mesh.elementDiametersArray,
self.q_dmt_r,
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
else:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorNavierStokes2D_generic_tau_sd(q['inverse(J)'],
self.q_dmt_r,
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
else:
csubgridError.calculateSubgridErrorNavierStokes2D_generic_tau(q['inverse(J)'],
self.q_dmt_r,
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
tau0=self.tau[0]
tau1=self.tau[1]
csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tauRes(tau0,
tau1,
q[('pdeResidual',0)],
q[('dpdeResidual',0,1)],
q[('dpdeResidual',0,2)],
q[('pdeResidual',1)],
q[('dpdeResidual',1,0)],
q[('dpdeResidual',1,1)],
q[('dpdeResidual',1,2)],
q[('pdeResidual',2)],
q[('dpdeResidual',2,0)],
q[('dpdeResidual',2,1)],
q[('dpdeResidual',2,2)],
q[('subgridError',0)],
q[('dsubgridError',0,1)],
q[('dsubgridError',0,2)],
q[('subgridError',1)],
q[('dsubgridError',1,0)],
q[('dsubgridError',1,1)],
q[('dsubgridError',1,2)],
q[('subgridError',2)],
q[('dsubgridError',2,0)],
q[('dsubgridError',2,1)],
q[('dsubgridError',2,2)])
if self.noPressureStabilization:
q[('subgridError',0)][:]=0.0
q[('dsubgridError',0,1)][:]=0.0
q[('dsubgridError',0,2)][:]=0.0
elif self.nd == 3:
if self.lag and self.nSteps < self.delayLagSteps:
v = q[('f',0)]
elif self.lag:
v = self.v_last
else:
v = q[('f',0)]
if oldTau:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tau_sd(self.hFactor,
self.mesh.elementDiametersArray,
self.q_dmt_r,
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
else:
csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tau(self.hFactor,
self.mesh.elementDiametersArray,
self.q_dmt_r,
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
else:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorNavierStokes2D_generic_tau_sd(q['inverse(J)'],
self.q_dmt_r,
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
else:
csubgridError.calculateSubgridErrorNavierStokes2D_generic_tau(q['inverse(J)'],
self.q_dmt_r,
q[('dm',1,1)],
v,
q[('a',1,1)],
self.tau[0],
self.tau[1],
q[('cfl',0)])
tau0=self.tau[0]
tau1=self.tau[1]
csubgridError.calculateSubgridErrorNavierStokes3D_GLS_tauRes(tau0,
tau1,
q[('pdeResidual',0)],
q[('dpdeResidual',0,1)],
q[('dpdeResidual',0,2)],
q[('dpdeResidual',0,3)],
q[('pdeResidual',1)],
q[('dpdeResidual',1,0)],
q[('dpdeResidual',1,1)],
q[('dpdeResidual',1,2)],
q[('dpdeResidual',1,3)],
q[('pdeResidual',2)],
q[('dpdeResidual',2,0)],
q[('dpdeResidual',2,1)],
q[('dpdeResidual',2,2)],
q[('dpdeResidual',2,3)],
q[('pdeResidual',3)],
q[('dpdeResidual',3,0)],
q[('dpdeResidual',3,1)],
q[('dpdeResidual',3,2)],
q[('dpdeResidual',3,3)],
q[('subgridError',0)],
q[('dsubgridError',0,1)],
q[('dsubgridError',0,2)],
q[('dsubgridError',0,3)],
q[('subgridError',1)],
q[('dsubgridError',1,0)],
q[('dsubgridError',1,1)],
q[('dsubgridError',1,2)],
q[('dsubgridError',1,3)],
q[('subgridError',2)],
q[('dsubgridError',2,0)],
q[('dsubgridError',2,1)],
q[('dsubgridError',2,2)],
q[('dsubgridError',2,3)],
q[('subgridError',3)],
q[('dsubgridError',3,0)],
q[('dsubgridError',3,1)],
q[('dsubgridError',3,2)],
q[('dsubgridError',3,3)])
if self.noPressureStabilization:
q[('subgridError',0)][:]=0.0
q[('dsubgridError',0,1)][:]=0.0
q[('dsubgridError',0,2)][:]=0.0
q[('dsubgridError',0,3)][:]=0.0
for ci in range(self.nd):
q[('cfl',ci+1)][:] = q[('cfl',0)]
#mwf orig
# if self.nd == 2:
# if self.coefficients.sd:
# csubgridError.calculateSubgridErrorNavierStokes2D_generic_withBodyForce_tau_sd(q['inverse(J)'],
# q[('dmt',1,1)],
# q[('dm',1,1)],
# q[('df',1,1)],
# q[('a',1,1)],
# q[('dr',1,1)],
# self.tau[0],
# self.tau[1],
# q[('cfl',0)])
# else:
# csubgridError.calculateSubgridErrorNavierStokes2D_generic_withBodyForce_tau(q['inverse(J)'],
# q[('dmt',1,1)],
# q[('dm',1,1)],
# q[('df',1,1)],
# q[('a',1,1)],
# q[('dr',1,1)],
# self.tau[0],
# self.tau[1],
# q[('cfl',0)])
# if self.lag:#TODO: make sure up to date with delaySteps flag
# tau0=self.tau_last[0]
# tau1=self.tau_last[1]
# else:
# tau0=self.tau[0]
# tau1=self.tau[1]
# csubgridError.calculateSubgridErrorNavierStokes2D_GLS_tauRes(tau0,
# tau1,
# q[('pdeResidual',0)],
# q[('dpdeResidual',0,1)],
# q[('dpdeResidual',0,2)],
# q[('pdeResidual',1)],
# q[('dpdeResidual',1,0)],
# q[('dpdeResidual',1,1)],
# q[('pdeResidual',2)],
# q[('dpdeResidual',2,0)],
# q[('dpdeResidual',2,2)],
# q[('subgridError',0)],
# q[('dsubgridError',0,1)],
# q[('dsubgridError',0,2)],
# q[('subgridError',1)],
# q[('dsubgridError',1,0)],
# q[('dsubgridError',1,1)],
# q[('subgridError',2)],
# q[('dsubgridError',2,0)],
# q[('dsubgridError',2,2)])
# elif self.nd == 3:
# return NavierStokesASGS_velocity_pressure.calculateSubgridError(q)
class StokesASGS_velocity_pressure(SGE_base):
def __init__(self,coefficients,nd):
SGE_base.__init__(self,coefficients,nd,lag=False)
coefficients.stencil[0].add(0)
if nd == 2:
coefficients.stencil[1].add(2)
coefficients.stencil[2].add(1)
elif nd == 3:
coefficients.stencil[1].add(2)
coefficients.stencil[1].add(3)
coefficients.stencil[2].add(1)
coefficients.stencil[2].add(3)
coefficients.stencil[3].add(1)
coefficients.stencil[3].add(3)
def calculateSubgridError(self,q):
if self.nd == 2:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorStokes_GLS_tau_sd(self.mesh.elementDiametersArray,
q[('dH',1,0)],
q[('a',1,1)],
self.tau[0],
self.tau[1])
else:
csubgridError.calculateSubgridErrorStokes_GLS_tau(self.mesh.elementDiametersArray,
q[('dH',1,0)],
q[('a',1,1)],
self.tau[0],
self.tau[1])
csubgridError.calculateSubgridErrorStokes2D_GLS_tauRes(self.tau[0],
self.tau[1],
q[('pdeResidual',0)],
q[('dpdeResidual',0,1)],
q[('dpdeResidual',0,2)],
q[('pdeResidual',1)],
q[('dpdeResidual',1,0)],
q[('dpdeResidual',1,1)],
q[('pdeResidual',2)],
q[('dpdeResidual',2,0)],
q[('dpdeResidual',2,2)],
q[('subgridError',0)],
q[('dsubgridError',0,1)],
q[('dsubgridError',0,2)],
q[('subgridError',1)],
q[('dsubgridError',1,0)],
q[('dsubgridError',1,1)],
q[('subgridError',2)],
q[('dsubgridError',2,0)],
q[('dsubgridError',2,2)])
elif self.nd == 3:
if self.coefficients.sd:
csubgridError.calculateSubgridErrorStokes_GLS_tau_sd(self.mesh.elementDiametersArray,
q[('dH',1,0)],
q[('a',1,1)],
self.tau[0],
self.tau[1])
else:
csubgridError.calculateSubgridErrorStokes_GLS_tau(self.mesh.elementDiametersArray,
q[('dH',1,0)],
q[('a',1,1)],
self.tau[0],
self.tau[1])
self.tau[0][:] = 0.0
csubgridError.calculateSubgridErrorStokes3D_GLS_tauRes(self.tau[0],
self.tau[1],
q[('pdeResidual',0)],
q[('dpdeResidual',0,1)],
q[('dpdeResidual',0,2)],
q[('dpdeResidual',0,3)],
q[('pdeResidual',1)],
q[('dpdeResidual',1,0)],
q[('dpdeResidual',1,1)],
q[('pdeResidual',2)],
q[('dpdeResidual',2,0)],
q[('dpdeResidual',2,2)],
q[('pdeResidual',3)],
q[('dpdeResidual',3,0)],
q[('dpdeResidual',3,3)],
q[('subgridError',0)],
q[('dsubgridError',0,1)],
q[('dsubgridError',0,2)],
| |
custom saveables.
with self.assertRaisesRegexp(
ValueError, "The same saveable will be restored with two names: v2"):
saver_module.Saver({"v2": v2.saveable, "v2too": v2.saveable})
# Verify non-duplicate names work.
saver_module.Saver({"v0": v0, "v2": v2.saveable})
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.test_session(graph=ops_lib.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
v2_init = v2.insert("k1", 30.0)
save = saver_module.Saver([v0, v1, v2.saveable])
variables.global_variables_initializer().run()
v2_init.run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
v2 = saver_test_utils.CheckpointedOp(name="v2")
save = saver_module.Saver([v0, v1, v2.saveable])
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
errors_impl.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
self.assertEqual(0, len(v2.keys().eval()))
self.assertEqual(0, len(v2.values().eval()))
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(30.0, v2.values().eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=ops_lib.Graph()) as sess:
v0_2 = variables.Variable(1000.0, name="v0")
v1_2 = variables.Variable(2000.0, name="v1")
v2_2 = saver_test_utils.CheckpointedOp(name="v2")
save2 = saver_module.Saver([v0_2, v1_2, v2_2.saveable])
v2_2.insert("k1000", 3000.0).run()
variables.global_variables_initializer().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
self.assertEqual(b"k1000", v2_2.keys().eval())
self.assertEqual(3000.0, v2_2.values().eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
self.assertEqual(b"k1", v2_2.keys().eval())
self.assertEqual(30.0, v2_2.values().eval())
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.test_session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(var_value, name=var_name)
save = saver_module.Saver({var_name: var})
if context.in_graph_mode():
self.evaluate(var.initializer)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session(graph=ops_lib.Graph()) as sess:
var = resource_variable_ops.ResourceVariable(other_value, name=var_name)
save = saver_module.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, self.evaluate(var))
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
def testAllowEmpty(self):
save_path = os.path.join(self.get_temp_dir(), "allow_empty")
with self.test_session() as sess:
_ = constant_op.constant(1)
save = saver_module.Saver(allow_empty=True)
val = save.save(sess, save_path)
self.assertIsNone(val)
with self.test_session() as sess:
save = saver_module.Saver(allow_empty=True)
save.restore(sess, save_path)
def testGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.Variable(123.45)
save = saver_module.Saver({"v0": v0_1})
variables.global_variables_initializer().run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.Variable(543.21)
save = saver_module.Saver({"v0": v0_2})
variables.global_variables_initializer().run()
def testSharedServerOnGPU(self):
if not test.is_gpu_available():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_1 = variables.Variable(123.45)
save = saver_module.Saver({"v0": v0_1}, sharded=True, allow_empty=True)
variables.global_variables_initializer().run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
with sess.graph.device(test.gpu_device_name()):
v0_2 = variables.Variable(543.21)
save = saver_module.Saver({"v0": v0_2}, sharded=True, allow_empty=True)
variables.global_variables_initializer().run()
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(1.0)
twos = variables.Variable([2.0, 2.0, 2.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
v2.insert("k1", 3.0).run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(0.0)
twos = variables.Variable([0.0, 0.0, 0.0])
v2 = saver_test_utils.CheckpointedOp(name="v2")
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
self.assertEqual(b"k1", v2.keys().eval())
self.assertEqual(3.0, v2.values().eval())
def testVarListShouldBeEmptyInDeferredBuild(self):
with ops_lib.Graph().as_default():
v = variables.Variable(1.0)
with self.assertRaisesRegexp(ValueError, "defer_build"):
saver_module.Saver([v], defer_build=True)
def testBuildShouldBeCalledBeforeSaveInCaseOfDeferBuild(self):
save_path = os.path.join(self.get_temp_dir(), "error_deferred_build")
with ops_lib.Graph().as_default(), session.Session() as sess:
variables.Variable(1.0)
saver = saver_module.Saver(defer_build=True)
with self.assertRaisesRegexp(RuntimeError, "build"):
saver.save(sess, save_path)
def testDeferredBuild(self):
save_path = os.path.join(self.get_temp_dir(), "deferred_build")
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(1.0)
save = saver_module.Saver(defer_build=True)
# if build is not deferred, saver cannot save the `twos`.
twos = variables.Variable([2.0, 2.0, 2.0])
init = variables.global_variables_initializer()
save.build()
init.run()
save.save(sess, save_path)
with session.Session("", graph=ops_lib.Graph()) as sess:
one = variables.Variable(0.0)
twos = variables.Variable([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = saver_module.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
def testReshape(self):
save_path = os.path.join(self.get_temp_dir(), "variables_reshape")
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
init = variables.global_variables_initializer()
save = saver_module.Saver()
init.run()
save.save(sess, save_path)
# Error when restoring with default reshape=False
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Assign requires shapes of both tensors to match."):
save.restore(sess, save_path)
# Restored to new shape with reshape=True
with session.Session("", graph=ops_lib.Graph()) as sess:
var = variables.Variable([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]])
save = saver_module.Saver(reshape=True)
save.restore(sess, save_path)
self.assertAllClose([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], var.eval())
@test_util.run_in_graph_and_eager_modes()
def testSaveWithGlobalStep(self, pad_step_number=False):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.test_session(graph=ops_lib.Graph()):
var = resource_variable_ops.ResourceVariable(1.0, name="var0")
save = saver_module.Saver(
{
var._shared_name: var
}, pad_step_number=pad_step_number)
if context.in_graph_mode():
self.evaluate(var.initializer)
sess = ops_lib.get_default_session()
else:
sess = None
if use_tensor:
global_step = constant_op.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
if pad_step_number:
expected_save_path = "%s-%s" % (save_path,
"{:08d}".format(global_step_int))
else:
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
def testSaveWithGlobalStepWithPadding(self):
self.testSaveWithGlobalStep(pad_step_number=True)
def testSaveToNonexistingPath(self):
file_io.write_string_to_file(
os.path.join(self.get_temp_dir(), "actually_a_file"), "")
paths = [
os.path.join(self.get_temp_dir(), "nonexisting_dir/path"),
os.path.join(self.get_temp_dir(), "other_nonexisting_dir/path1/path2"),
os.path.join(self.get_temp_dir(), "actually_a_file/path"),
]
for save_path in paths:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
# In the case where the parent directory doesn't exist, whether or not the
# save succeeds or fails is implementation dependent. Therefore we allow
# both cases.
try:
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the graph.
save.save(sess, save_path)
with self.test_session() as sess:
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
except ValueError as exc:
error_msg_template = "Parent directory of {} doesn't exist, can't save."
self.assertEqual(error_msg_template.format(save_path), str(exc))
def testSaveToURI(self):
# ParseURI functions don't work on Windows yet.
# TODO(jhseu): Remove this check when it works.
if os.name == "nt":
self.skipTest("Local URI support doesn't work on Windows")
save_path = "file://" + os.path.join(self.get_temp_dir(), "uri")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
save = saver_module.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = variables.global_variables_initializer()
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
save.save(sess, save_path)
@test_util.with_c_api
class SaveRestoreShardedTest(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
# Build a graph with 2 parameter nodes on different devices.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(10, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variables.Variable(20, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k1", 30.0).run()
t1.insert("k2", 40.0).run()
val = save.save(sess, save_path)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(save_path + "-?????-of-00002", val)
else:
self.assertEqual(save_path, val)
meta_graph_filename = save._MetaGraphFilename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
if save._write_version is saver_pb2.SaverDef.V1:
# Restore different ops from shard 0 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variables.Variable(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
save = saver_module.Saver(
{
"v0": v0,
"t0": t0.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
variables.global_variables_initializer().run()
t0.insert("k11", 33.0).run()
self.assertEqual(111, v0.eval())
self.assertEqual(b"k11", t0.keys().eval())
self.assertEqual(33.0, t0.values().eval())
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, v0.eval())
self.assertEqual(b"k1", t0.keys().eval())
self.assertEqual(30.0, t0.values().eval())
# Restore different ops from shard 1 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
| |
<gh_stars>0
import tvm
import topi
import numpy as np
from tvm.testing import check_numerical_grads, estimate_performance, PerformanceEstimate
import time
import inspect
import sys
import argparse
# Whether to dump the generated code
verbose = False
# Whether to perform numerical gradient testing
perform_numgrad_test = True
# Raise an exception when performance estimates are too high
fail_when_perf_estimates_too_high = True
# Run only these lines
enabled_lines = set()
# Lines (among the enabled_lines) that were actually run
actually_run = set()
def get_shape(tensor, param_values=None):
if param_values is None:
param_values = {}
return [tvm.ir_pass.Simplify(tvm.ir_pass.Substitute(s, param_values)).value
for s in tensor.shape]
def check_equivalence(outputs1, outputs2, inputs, in_range=(-10, 10), iters=3):
outputs1 = list(outputs1)
outputs2 = list(outputs2)
sched1 = tvm.create_schedule([o.op for o in outputs1])
mout1 = tvm.build(sched1, outputs1 + inputs)
sched2 = tvm.create_schedule([o.op for o in outputs2])
mout2 = tvm.build(sched2, outputs2 + inputs)
arguments1 = [tvm.nd.empty(get_shape(t), t.dtype) for t in outputs1 + inputs]
arguments2 = [tvm.nd.empty(get_shape(t), t.dtype) for t in outputs1 + inputs]
for i in range(iters):
arguments1 = []
arguments2 = []
for a in outputs1 + inputs:
val = np.random.uniform(in_range[0], in_range[1], size=get_shape(a)).astype(a.dtype)
arguments1.append(tvm.nd.array(val))
arguments2.append(tvm.nd.array(val))
mout1(*arguments1)
mout2(*arguments2)
for j, _ in enumerate(outputs1):
tvm.testing.assert_allclose(arguments1[j].asnumpy(), arguments2[j].asnumpy())
def check_grad(out, inputs, args=[], in_range=(-10,10), perf=None, param_values=None,
acceptable_fail_fraction=None):
line = inspect.getframeinfo(inspect.stack()[1][0]).lineno
if enabled_lines:
if line not in enabled_lines:
return
actually_run.add(line)
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
if param_values is None:
param_values = {}
if verbose:
print("\n" + 80*"=" + "\n")
print("Testing gradients, line {}\n".format(line))
print("Original tensors:\n")
print(tvm.PrintTensorRecursively(out))
print()
sout = tvm.create_schedule(out.op)
mout = tvm.build(sout, [out] + inputs + args)
ones = topi.full_like(out, 1.0)
grads = list(tvm.differentiate(out, inputs, ones))
# This is not done automatically by tvm.differentiate because it may lead to strange
# tensor shapes, however it is recommended to call it
grads = list(tvm.ir_pass.RemoveUnusedDimsRecursively(grads))
if verbose:
print("Gradients:\n")
print(tvm.PrintTensorsRecursively(grads))
print()
grads_sched = tvm.create_schedule([g.op for g in grads])
mgrad = tvm.build(grads_sched, grads + inputs + args)
lowered = tvm.lower(grads_sched, grads + inputs + args, simple_mode=True)
if verbose:
print("Lowered gradients:\n")
print(lowered)
print()
if perf != False:
est = estimate_performance(grads, param_values=param_values)
est_lowered = estimate_performance(lowered, param_values=param_values)
if verbose:
print("Note: performance tuples are (iterations, multiplications, memory)")
print("Line {}: Expected performance of grads: {}".format(line, perf))
print("Line {}: Estimated performance of grads: {}".format(line, est.as_tuple()))
print("Line {}: Estimated performance of lowered grads: {}"
.format(line, est_lowered.as_tuple()))
print()
if est_lowered.memory > est.memory:
print("WARNING: Line {}: The estimated memory consumption increased after lowering, "
"this may indicate that tensor bounds have been expanded too much".format(line))
print("before: {} after: {}".format(est, est_lowered))
(iters, mults, mem) = est.as_tuple()
if perf is None or isinstance(perf, str):
print("WARNING: Line {}: No performance information, you may set it to {}"
.format(line, est.as_tuple()))
if isinstance(perf, str):
print("0,/{!r}/{{s/{!r}/{}/}}".format(perf, perf, (iters, mults, mem)))
elif perf != (iters, mults, mem):
(ref_iters, ref_mults, ref_mem) = perf
ref_est = PerformanceEstimate(*perf)
if est <= ref_est:
print("WARNING: Line {}: Estimated performance {} is better than {}. "
"Use this with sed:"
.format(line, est.as_tuple(), ref_est.as_tuple()))
print("{}s/perf={}/perf={}/".format(line, perf, (iters, mults, mem)))
elif est >= ref_est:
print("WARNING: Line {}: Estimated performance {} IS WORSE THAN {}"
.format(line, est.as_tuple(), ref_est.as_tuple()))
else:
print("WARNING: Line {}: Estimated performance {} does not match {}"
.format(line, est.as_tuple(), ref_est.as_tuple()))
EST_RTOL = 1.5
if iters > ref_iters*EST_RTOL or mults > ref_mults*EST_RTOL or mem > ref_mem*EST_RTOL:
message = ("Line {}: Some of the estimated performance metrics are much "
"worse than the reference ones (by {}): "
"estimated {}, expected {}"
.format(line, EST_RTOL, est.as_tuple(), ref_est.as_tuple()))
if fail_when_perf_estimates_too_high:
raise AssertionError(message)
else:
print(message)
input_vals = [tvm.nd.array(np.random.uniform(in_range[0], in_range[1],
size=get_shape(a, param_values)).astype(a.dtype))
for a in inputs]
arg_vals = [tvm.nd.array(np.random.uniform(in_range[0], in_range[1],
size=get_shape(a, param_values)).astype(a.dtype))
for a in args]
def fun(*arguments):
arrays = [tvm.nd.empty(get_shape(out, param_values), out.dtype)] + \
[tvm.nd.array(a) for a in list(arguments) + arg_vals]
mout(*arrays)
return arrays[0].asnumpy().sum()
g_arg_vals = \
[tvm.nd.empty(get_shape(i, param_values), g.dtype) for i, g in zip(inputs, grads)] + \
input_vals + arg_vals
mgrad(*g_arg_vals)
g_res = [g_arg_vals[g].asnumpy() for g, _ in enumerate(grads)]
if perform_numgrad_test:
check_numerical_grads(fun, [a.asnumpy() for a in input_vals], g_res,
acceptable_fail_fraction=acceptable_fail_fraction)
if verbose:
print("Line {}: Numerical gradient check passed".format(line))
def test_differentiate_function():
x = tvm.placeholder((32, 3, 28, 28), name='x')
w = tvm.placeholder((10, 3, 3, 3), name='w')
t1 = topi.nn.conv2d(x, w, 1, 0, 1)
t2 = topi.nn.flatten(t1)
t3 = topi.sum(t2)
[dx1, dw1] = tvm.differentiate(t3, [x, w])
[dx2, dw2] = tvm.differentiate(t2, [x, w], topi.full_like(t2, 1.0))
check_equivalence([dx1, dw1], [dx2, dw2], [x, w])
def mydiff(out, inp, head, t1=t1, t2=t2):
assert out == t2 and inp == [t1]
return [tvm.compute(t1.shape,
lambda ax0, ax1, ax2, ax3: head[ax0, ax3 + ax2*26 + ax1*676])]
res = tvm.differentiate(t3, [x, w], override={t2: ([t1], mydiff)})
check_equivalence(res.result, [dx1, dw1], [x, w])
def mydiff2(out, inputs, head):
return tvm.differentiate(out, inputs, head)
res = tvm.differentiate(t3, [x, w], override={t1: ([x, w], mydiff2)})
check_equivalence(res.result, [dx1, dw1], [x, w])
# Test some simple expressions
def test_autodiff():
x = tvm.var("x", dtype='float32')
k = tvm.reduce_axis((0, 10), name="k")
l = tvm.reduce_axis((0, 10), name="l")
A0 = tvm.placeholder((10, 10), name='A0')
A1 = tvm.placeholder((10, 10), name='A1')
B = tvm.compute((10, 10), lambda i, j: A0[i, j] + A0[j, i], name='B')
check_grad(B, A0, perf=(10001, 10000, 101))
B = tvm.compute((10, 10), lambda i, j: tvm.floor(A0[i, j]), name='B')
check_grad(B, A0, perf=(100, 0, 100), acceptable_fail_fraction=0.05)
B = tvm.compute((10, 10), lambda i, j: tvm.ceil(A0[i, j]), name='B')
check_grad(B, A0, perf=(100, 0, 100), acceptable_fail_fraction=0.05)
B = tvm.compute((10, 10), lambda i, j: tvm.trunc(A0[i, j]), name='B')
check_grad(B, A0, perf=(100, 0, 100), acceptable_fail_fraction=0.05)
B = tvm.compute((10, 10), lambda i, j: tvm.round(A0[i, j]), name='B')
check_grad(B, A0, perf=(100, 0, 100), acceptable_fail_fraction=0.05)
B = tvm.compute((10, 10), lambda i, j: A0[i, j] + tvm.exp(A0[j, i]), name='B')
check_grad(B, A0, perf=(10001, 20000, 101))
B = tvm.compute((10, 10), lambda i, j: tvm.log(0.1 + tvm.abs(A0[i, j] + tvm.exp(A0[j, i]))), name='B')
check_grad(B, A0, perf=(10001, 70000, 101))
B = tvm.compute((10, 10), lambda i, j: tvm.sigmoid(A0[i, j]*A0[i, j]*A0[j, i]), name='B')
check_grad(B, A0, perf=(10001, 110000, 101))
B = tvm.compute((10, 10), lambda i, j: tvm.tanh(A0[i, j]*A0[i, j]*A0[j, i]), name='B')
check_grad(B, A0, perf=(10001, 110000, 101))
B = tvm.compute((10, 10), lambda i, j: tvm.sqrt(A0[i, j]*A0[i, j]*A0[j, i]), name='B')
check_grad(B, A0, perf=(10001, 80000, 101), in_range=(0.1, 10))
B = tvm.compute((10, 10), lambda i, j: tvm.power(tvm.abs(A0[i, j]), A0[j, i]), name='B')
check_grad(B, A0, perf=(10001, 90000, 101), in_range=(-4, 4))
B = tvm.compute((10, 10), lambda i, j: A0[i, j] * A0[j, i], name='B')
check_grad(B, A0, perf=(10001, 10000, 101))
# TODO: This one needs transforming Sum(a + b) -> Sum(a) + Sum(b)
B = tvm.compute((10,), lambda i: tvm.sum(A0[i, k]*A0[k, i], axis=k), name='B')
check_grad(B, A0, perf=(11001, 1000, 1101))
B = tvm.compute((10, 10), lambda i, j: tvm.sum(A0[i, k]*A0[k, i] + 5, axis=k), name='B')
check_grad(B, A0, perf=(20001, 10000, 1101))
B = tvm.compute((10, 10), lambda i, j: tvm.max(A0[i, k]*A0[k, j] + 5, axis=k), name='B')
check_grad(B, A0, perf=(110001, 310000, 20101))
B = tvm.compute((10, 10), lambda i, j: A0[i, j] * (A1[j, i] + A0[j, i]), name='B')
check_grad(B, A0, [A1], perf=(10001, 10000, 101))
B = tvm.compute((10, 10), lambda i, j: tvm.sum(A0[k, k] - A0[tvm.min(j + k, 9), j]*A0[i, k],
axis=k),
name='B')
check_grad(B, A0, perf=(110001, 10000, 10101))
def fcombine(x, y):
return x*y
def fidentity(t0):
return tvm.const(1, t0)
prod = tvm.comm_reducer(fcombine, fidentity, name='prod')
B = tvm.compute((10, 10), lambda i, j: prod(A0[i, k] + A0[k, i], axis=k), name='B')
check_grad(B, A0, perf=(20001, 40000, 2101))
X = tvm.placeholder((10,), name='X')
A = tvm.compute((10,), lambda i: X[i] + X[9 - i])
B = tvm.compute((10,), lambda i: X[i] * X[9 - i])
Y = topi.tensordot(A, B, 1)
check_grad(Y, X, perf=(251, 230, 71))
def test_topi_autodiff():
X = tvm.placeholder((1, 2, 4, 4), name='X')
W = tvm.placeholder((5, 2, 3, 3), name='W')
W1 = tvm.placeholder((2, 5, 3, 3), name='W1')
W2 = tvm.placeholder((1,), name='W2')
R = topi.nn.conv2d(X, W, 1, 1, 1)
check_grad(R, [X, W], perf=(2953, 2880, 195))
R1 = topi.nn.conv2d(topi.nn.relu(R), W1, 1, 0, 1)
check_grad(R1, [X, W, W1], perf=(5633, 5320, 685))
R = topi.broadcast_to(W2, (5, 2, 3, 3))
check_grad(R, [W2], perf=(91, 0, 2))
R = topi.nn.conv2d(X, topi.broadcast_to(W2, (5, 2, 3, 3)), 1, 1, 1)
check_grad(R, [X, W2], perf=(1892, 1728, 125))
R = topi.nn.pool(X, [2, 2], [2, 2], [0, 0, 0, 0], 'avg')
check_grad(R, X, perf=(33, 32, 33))
R = topi.nn.pool(X, [2, 2], [2, 2], [0, 0, 0, 0], 'max')
check_grad(R, X, perf=(161, 1056, 97))
X = tvm.placeholder((1, 2, 5, 5), name='X')
R = topi.reshape(X, (1, 32))
check_grad(R, [X], perf=(51, 200, 51))
X = tvm.placeholder((1, 2, 5, 5), | |
<reponame>ni1o1/transdata
'''
BSD 3-Clause License
Copyright (c) 2021, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import geopandas as gpd
import pandas as pd
from shapely.geometry import Polygon
import math
import numpy as np
from .coordinates import getdistance
from .gisprocess import merge_polygon
import warnings
def area_to_grid(location, accuracy=500, method='rect', params='auto'):
'''
Generate the rectangular grids in the bounds or shape
Parameters
-------
location : bounds(List) or shape(GeoDataFrame)
Where to generate grids.
If bounds, [lon1, lat1, lon2, lat2](WGS84), where lon1 , lat1 are the
lower-left coordinates, lon2 , lat2 are the upper-right coordinates
If shape, it should be GeoDataFrame
accuracy : number
Grid size (meter)
method : str
rect, tri or hexa
params : list or dict
Gridding parameters.
See https://transbigdata.readthedocs.io/en/latest/grids.html
for detail information about gridding parameters.
When Gridding parameters is given, accuracy will not be used.
Returns
-------
grid : GeoDataFrame
Grid GeoDataFrame,
LONCOL and LATCOL are the index of grids,
HBLON and HBLAT are the center of the grids
params : list or dict
Gridding parameters.
See https://transbigdata.readthedocs.io/en/latest/grids.html
for detail information about gridding parameters.
'''
if (type(location) == list) | (type(location) == tuple):
shape = ''
bounds = location
elif type(location) == gpd.geodataframe.GeoDataFrame:
shape = location
bounds = shape.unary_union.bounds
else:
raise Exception(
'Location should be either bounds(List) or shape(GeoDataFrame)')
lon1, lat1, lon2, lat2 = bounds
if (lon1 > lon2) | (lat1 > lat2) | (abs(lat1) > 90) | (abs(lon1) > 180) | (
abs(lat2) > 90) | (abs(lon2) > 180):
raise Exception(
'Bounds error. The input bounds should be in the order of \
[lon1,lat1,lon2,lat2]. (lon1,lat1) is the lower left \
corner and (lon2,lat2) is the upper right corner.'
)
latStart = min(lat1, lat2)
lonStart = min(lon1, lon2)
deltaLon = accuracy * 360 / \
(2 * math.pi * 6371004 * math.cos((lat1 + lat2) * math.pi / 360))
deltaLat = accuracy * 360 / (2 * math.pi * 6371004)
if params == 'auto':
params = {'slon': lonStart,
'slat': latStart,
'deltalon': deltaLon,
'deltalat': deltaLat,
'theta': 0,
'method': method,
'gridsize': accuracy}
else:
params = convertparams(params)
method = params['method']
if method == 'rect':
tmppoints = pd.DataFrame(np.array(
np.meshgrid(
np.arange(bounds[0] - deltaLon,
bounds[2] + deltaLon,
deltaLon/3),
np.arange(bounds[1]-deltaLat,
bounds[3]+deltaLat,
deltaLat/3))
).reshape(2, -1).T,
columns=['lon', 'lat'])
tmppoints['LONCOL'], tmppoints['LATCOL'] = GPS_to_grid(
tmppoints['lon'], tmppoints['lat'], params)
tmppoints = tmppoints[['LONCOL', 'LATCOL']].drop_duplicates()
tmppoints['geometry'] = grid_to_polygon(
[tmppoints['LONCOL'], tmppoints['LATCOL']], params)
tmppoints = gpd.GeoDataFrame(tmppoints)
if (method == 'tri') | (method == 'hexa'):
tmppoints = pd.DataFrame(
np.array(
np.meshgrid(
np.arange(bounds[0]-deltaLon,
bounds[2] + deltaLon,
deltaLon/3),
np.arange(bounds[1]+deltaLat,
bounds[3]-deltaLat,
deltaLat/3))
).reshape(2, -1).T, columns=['lon', 'lat'])
tmppoints['loncol_1'],\
tmppoints['loncol_2'],\
tmppoints['loncol_3'] = GPS_to_grid(
tmppoints['lon'], tmppoints['lat'], params)
tmppoints = tmppoints[['loncol_1',
'loncol_2', 'loncol_3']].drop_duplicates()
tmppoints['geometry'] = grid_to_polygon(
[tmppoints['loncol_1'],
tmppoints['loncol_2'],
tmppoints['loncol_3']], params)
tmppoints = gpd.GeoDataFrame(tmppoints)
data = tmppoints
params['gridsize'] = accuracy
if type(shape) != gpd.geodataframe.GeoDataFrame:
grid = gpd.GeoDataFrame(data)
return grid, params
else:
data.crs = shape.crs
data = data[data.intersects(shape.unary_union)]
grid = gpd.GeoDataFrame(data)
return grid, params
def area_to_params(location, accuracy=500, method='rect'):
'''
Generate gridding params
Parameters
-------
location : bounds(List) or shape(GeoDataFrame)
Where to generate grids.
If bounds, [lon1, lat1, lon2, lat2](WGS84), where lon1 , lat1 are the
lower-left coordinates, lon2 , lat2 are the upper-right coordinates
If shape, it should be GeoDataFrame
accuracy : number
Grid size (meter)
method : str
rect, tri or hexa
Returns
-------
params : list or dict
Gridding parameters.
See https://transbigdata.readthedocs.io/en/latest/grids.html
for detail information about gridding parameters.
'''
if (type(location) == list) | (type(location) == tuple):
shape = ''
bounds = location
elif type(location) == gpd.geodataframe.GeoDataFrame:
shape = location
bounds = shape.unary_union.bounds
lon1, lat1, lon2, lat2 = bounds
if (lon1 > lon2) | (lat1 > lat2) | (abs(lat1) > 90) | (abs(lon1) > 180) | (
abs(lat2) > 90) | (abs(lon2) > 180):
raise Exception(
'Bounds error. The input bounds should be in the order \
of [lon1,lat1,lon2,lat2]. (lon1,lat1) is the lower left \
corner and (lon2,lat2) is the upper right corner.'
)
latStart = min(lat1, lat2)
lonStart = min(lon1, lon2)
deltaLon = accuracy * 360 / \
(2 * math.pi * 6371004 * math.cos((lat1 + lat2) * math.pi / 360))
deltaLat = accuracy * 360 / (2 * math.pi * 6371004)
params = [lonStart, latStart, deltaLon, deltaLat]
params = convertparams(params)
params['gridsize'] = accuracy
params['method'] = method
return params
def GPS_to_grid(lon, lat, params):
'''
Match the GPS data to the grids. The input is the columns of
longitude, latitude, and the grids parameter. The output is the grid ID.
Parameters
-------
lon : Series
The column of longitude
lat : Series
The column of latitude
params : list or dict
Gridding parameters.
See https://transbigdata.readthedocs.io/en/latest/grids.html
for detail information about gridding parameters.
Returns
-------
`Rectangle grids`
[LONCOL,LATCOL] : list
The two columns LONCOL and LATCOL together can specify a grid.
`Triangle and Hexagon grids`
[loncol_1,loncol_2,loncol_3] : list
The index of the grid latitude. The two columns LONCOL and
LATCOL together can specify a grid.
'''
params = convertparams(params)
method = params['method']
if method == 'rect':
loncol, latcol = GPS_to_grids_rect(lon, lat, params)
return [loncol, latcol]
if method == 'tri':
loncol_1, loncol_2, loncol_3 = GPS_to_grids_tri(lon, lat, params)
return [loncol_1, loncol_2, loncol_3]
if method == 'hexa':
loncol_1, loncol_2, loncol_3 = GPS_to_grids_hexa(lon, lat, params)
return [loncol_1, loncol_2, loncol_3]
def grid_to_centre(gridid, params):
'''
The center location of the grid. The input is the grid ID and
parameters, the output is the grid center location.
Parameters
-------
gridid : list
if `Rectangle grids`
[LONCOL,LATCOL] : Series
The two columns LONCOL and LATCOL together can specify a grid.
if `Triangle and Hexagon grids`
[loncol_1,loncol_2,loncol_3] : Series
The index of the grid latitude. The two columns LONCOL and
LATCOL together can specify a grid.
params : list or dict
Gridding parameters.
See https://transbigdata.readthedocs.io/en/latest/grids.html
for detail information about gridding parameters.
Returns
-------
HBLON : Series
The longitude of the grid center
HBLAT : Series
The latitude of the grid center
'''
params = convertparams(params)
method = params['method']
if method == 'rect':
loncol, latcol = gridid
loncol = pd.Series(loncol, name='loncol')
latcol = pd.Series(latcol, name='latcol')
return grid_to_centre_rect(loncol, latcol, params, from_origin=False)
if method == 'tri':
loncol_1, loncol_2, loncol_3 = gridid
loncol_1 = pd.Series(loncol_1, name='loncol_1')
loncol_2 = pd.Series(loncol_2, name='loncol_2')
loncol_3 = pd.Series(loncol_3, name='loncol_3')
testpoint = gettripoints(loncol_1, loncol_2, loncol_3, params)
hblon = ((testpoint['p1_x']+testpoint['p2_x'] +
testpoint['p3_x'])/3).values
hblat = ((testpoint['p1_y']+testpoint['p2_y'] +
testpoint['p3_y'])/3).values
return hblon, hblat
if method == 'hexa':
loncol_1, loncol_2, loncol_3 = gridid
loncol_1 = pd.Series(loncol_1, name='loncol_1')
loncol_2 = pd.Series(loncol_2, name='loncol_2')
loncol_3 = pd.Series(loncol_3, name='loncol_3')
lonStart = params['slon']
latStart = params['slat']
deltaLon = params['deltalon']
deltaLat = params['deltalat']
theta = params['theta']
params = [lonStart, latStart, deltaLon, deltaLat, theta]
x1, y1 = grid_to_centre_rect(
loncol_1, np.zeros(len(loncol_1))-5,
params=[params[0], params[1], params[2], params[3], theta+0])
x2, y2 = grid_to_centre_rect(
loncol_1, np.zeros(len(loncol_1))+5,
params=[params[0], params[1], params[2], params[3], theta+0])
x3, y3 = grid_to_centre_rect(
loncol_2, np.zeros(len(loncol_2))-5,
params=[params[0], params[1], params[2], params[3], theta+60])
x4, y4 = grid_to_centre_rect(
loncol_2, np.zeros(len(loncol_2))+5,
params=[params[0], params[1], params[2], params[3], theta+60])
x1 | |
<reponame>mfisherlevine/astroquery<gh_stars>0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
@author: <NAME>
@contact: <EMAIL>
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
Created on 4 Sept. 2019
"""
from unittest.mock import patch
import pytest
import tarfile
import os
import errno
import shutil
from ..core import XMMNewtonClass
from ..tests.dummy_tap_handler import DummyXMMNewtonTapHandler
from ..tests.dummy_handler import DummyHandler
from astroquery.exceptions import LoginError
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
class mockResponse():
headers = {'Date': 'Wed, 24 Nov 2021 13:43:50 GMT',
'Server': 'Apache/2.4.6 (Red Hat Enterprise Linux) OpenSSL/1.0.2k-fips',
'Content-Disposition': 'inline; filename="0560181401.tar.gz"',
'Content-Type': 'application/x-gzip',
'Content-Length': '6590874', 'Connection': 'close'}
status_code = 400
@staticmethod
def raise_for_status():
pass
class TestXMMNewton():
def get_dummy_tap_handler(self):
parameters = {'query': "select top 10 * from v_public_observations",
'output_file': "test2.vot",
'output_format': "votable",
'verbose': False}
dummyTapHandler = DummyXMMNewtonTapHandler("launch_job", parameters)
return dummyTapHandler
def test_query_xsa_tap(self):
parameters = {'query': "select top 10 * from v_public_observations",
'output_file': "test2.vot",
'output_format': "votable",
'verbose': False}
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
xsa.query_xsa_tap(**parameters)
self.get_dummy_tap_handler().check_call("launch_job", parameters)
self.get_dummy_tap_handler().check_parameters(parameters, "launch_job")
self.get_dummy_tap_handler().check_method("launch_job")
self.get_dummy_tap_handler().get_tables()
self.get_dummy_tap_handler().get_columns()
self.get_dummy_tap_handler().load_tables()
def test_get_tables(self):
parameters2 = {'only_names': True,
'verbose': True}
dummyTapHandler = DummyXMMNewtonTapHandler("get_tables", parameters2)
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
xsa.get_tables(only_names=True, verbose=True)
dummyTapHandler.check_call("get_tables", parameters2)
def test_get_columns(self):
parameters2 = {'table_name': "table",
'only_names': True,
'verbose': True}
dummyTapHandler = DummyXMMNewtonTapHandler("get_columns", parameters2)
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
xsa.get_columns("table", only_names=True, verbose=True)
dummyTapHandler.check_call("get_columns", parameters2)
def test_get_columns_valueerror(self):
with pytest.raises(ValueError):
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
xsa.get_columns("", only_names=True, verbose=True)
def test_dummy_handler(self):
parameters2 = {'table_name': "table",
'only_names': True,
'verbose': True}
dummyHandler = DummyHandler("get_columns", parameters2)
dummyHandler.check_call("get_columns", parameters2)
dummyHandler.check_method("get_columns")
dummyHandler.check_parameters(parameters2, "get_columns")
dummyHandler.reset()
def test_parse_filename(self):
self._create_tar("filename.tar", self._files)
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
with tarfile.open("filename.tar", "r") as tar:
for i in tar.getmembers():
paths = os.path.split(i.name)
fname = paths[1]
paths = os.path.split(paths[0])
if paths[1] != "pps":
continue
fname_info = xsa._parse_filename(fname)
assert fname_info["X"] == "P"
_files = {
"0405320501": {
"pps": [
"P0405320501M1S002EXPMAP1000.FTZ",
"P0405320501M1S002IMAGE_4000.FTZ",
"P0405320501M2S003EXPMAP2000.FTZ",
"P0405320501M2S003IMAGE_5000.FTZ",
"P0405320501PNS001EXPMAP3000.FTZ",
"P0405320501PNS001IMAGE_8000.FTZ",
"P0405320501M1S002EXPMAP2000.FTZ",
"P0405320501M1S002IMAGE_5000.FTZ",
"P0405320501M2S003EXPMAP3000.FTZ",
"P0405320501M2S003IMAGE_8000.FTZ",
"P0405320501PNS001EXPMAP4000.FTZ",
"P0405320501PNX000DETMSK1000.FTZ",
"P0405320501M1S002EXPMAP3000.FTZ",
"P0405320501M1S002IMAGE_8000.FTZ",
"P0405320501M2S003EXPMAP4000.FTZ",
"P0405320501M2X000DETMSK1000.FTZ",
"P0405320501PNS001EXPMAP5000.FTZ",
"P0405320501PNX000DETMSK2000.FTZ",
"P0405320501M1S002EXPMAP4000.FTZ",
"P0405320501M1X000DETMSK1000.FTZ",
"P0405320501M2S003EXPMAP5000.FTZ",
"P0405320501M2X000DETMSK2000.FTZ",
"P0405320501PNS001EXPMAP8000.FTZ",
"P0405320501PNX000DETMSK3000.FTZ",
"P0405320501M1S002EXPMAP5000.FTZ",
"P0405320501M1X000DETMSK2000.FTZ",
"P0405320501M2S003EXPMAP8000.FTZ",
"P0405320501M2X000DETMSK3000.FTZ",
"P0405320501PNS001IMAGE_1000.FTZ",
"P0405320501PNX000DETMSK4000.FTZ",
"P0405320501M1S002EXPMAP8000.FTZ",
"P0405320501M1X000DETMSK3000.FTZ",
"P0405320501M2S003IMAGE_1000.FTZ",
"P0405320501M2X000DETMSK4000.FTZ",
"P0405320501PNS001IMAGE_2000.FTZ",
"P0405320501PNX000DETMSK5000.FTZ",
"P0405320501M1S002IMAGE_1000.FTZ",
"P0405320501M1X000DETMSK4000.FTZ",
"P0405320501M2S003IMAGE_2000.FTZ",
"P0405320501M2X000DETMSK5000.FTZ",
"P0405320501PNS001IMAGE_3000.FTZ",
"P0405320501M1S002IMAGE_2000.FTZ",
"P0405320501M1X000DETMSK5000.FTZ",
"P0405320501M2S003IMAGE_3000.FTZ",
"P0405320501PNS001EXPMAP1000.FTZ",
"P0405320501PNS001IMAGE_4000.FTZ",
"P0405320501M1S002IMAGE_3000.FTZ",
"P0405320501M2S003EXPMAP1000.FTZ",
"P0405320501M2S003IMAGE_4000.FTZ",
"P0405320501PNS001EXPMAP2000.FTZ",
"P0405320501PNS001IMAGE_5000.FTZ",
"P0405320501PNU001IMAGE_5000.FTZ",
"P0405320501PNX001IMAGE_5000.FTZ"
]
}
}
_files_lightcurves = {
"0405320501": {
"pps": [
"P0405320501M1S002EXPMAP1000.FTZ",
"P0405320501M1S002IMAGE_4000.FTZ",
"P0405320501M2S003EXPMAP2000.FTZ",
"P0405320501M2S003IMAGE_5000.FTZ",
"P0405320501PNS001EXPMAP3000.FTZ",
"P0405320501PNS001IMAGE_8000.FTZ",
"P0405320501M1S002EXPMAP2000.FTZ",
"P0405320501M1S002IMAGE_5000.FTZ",
"P0405320501M2S003EXPMAP3000.FTZ",
"P0405320501M2S003IMAGE_8000.FTZ",
"P0405320501PNS001EXPMAP4000.FTZ",
"P0405320501PNX000DETMSK1000.FTZ",
"P0405320501M1S002EXPMAP3000.FTZ",
"P0405320501M1S002IMAGE_8000.FTZ",
"P0405320501M2S003EXPMAP4000.FTZ",
"P0405320501M2X000DETMSK1000.FTZ",
"P0405320501PNS001EXPMAP5000.FTZ",
"P0405320501PNX000DETMSK2000.FTZ",
"P0405320501M1S002EXPMAP4000.FTZ",
"P0405320501M1X000DETMSK1000.FTZ",
"P0405320501M2S003EXPMAP5000.FTZ",
"P0405320501M2X000DETMSK2000.FTZ",
"P0405320501PNS001EXPMAP8000.FTZ",
"P0405320501PNX000DETMSK3000.FTZ",
"P0405320501M1S002EXPMAP5000.FTZ",
"P0405320501M1X000DETMSK2000.FTZ",
"P0405320501M2S003EXPMAP8000.FTZ",
"P0405320501M2X000DETMSK3000.FTZ",
"P0405320501PNS001IMAGE_1000.FTZ",
"P0405320501PNX000DETMSK4000.FTZ",
"P0405320501M1S002EXPMAP8000.FTZ",
"P0405320501M1X000DETMSK3000.FTZ",
"P0405320501M2S003IMAGE_1000.FTZ",
"P0405320501M2X000DETMSK4000.FTZ",
"P0405320501PNS001IMAGE_2000.FTZ",
"P0405320501PNX000DETMSK5000.FTZ",
"P0405320501M1S002IMAGE_1000.FTZ",
"P0405320501M1X000DETMSK4000.FTZ",
"P0405320501M2S003IMAGE_2000.FTZ",
"P0405320501M2X000DETMSK5000.FTZ",
"P0405320501PNS001IMAGE_3000.FTZ",
"P0405320501M1S002IMAGE_2000.FTZ",
"P0405320501M1X000DETMSK5000.FTZ",
"P0405320501M2S003IMAGE_3000.FTZ",
"P0405320501PNS001EXPMAP1000.FTZ",
"P0405320501PNS001IMAGE_4000.FTZ",
"P0405320501M1S002IMAGE_3000.FTZ",
"P0405320501M2S003EXPMAP1000.FTZ",
"P0405320501M2S003IMAGE_4000.FTZ",
"P0405320501PNS001EXPMAP2000.FTZ",
"P0405320501PNS001IMAGE_5000.FTZ",
"P0405320501M2S003SRSPEC0053.FTZ",
"P0405320501PNS001BGSPEC0053.FTZ",
"P0405320501M2S003BGSPEC0053.FTZ",
"P0405320501PNS001SRCARF0053.FTZ",
"P0405320501M2S003SRCARF0053.FTZ",
"P0405320501PNS001SRSPEC0053.FTZ",
"P0405320501PNS001SRCTSR8092.FTZ",
"P0405320501PNS001FBKTSR8092.FTZ",
"P0405320501PNS001SRCTSR8093.FTZ",
"P0405320501PNS001FBKTSR8093.FTZ"
]
}
}
_rmf_files = ["epn_e2_ff20_sdY4.rmf", "m2_e9_im_pall_o.rmf"]
def _create_tar(self, tarname, files):
with tarfile.open(tarname, "w") as tar:
for ob_name, ob in self._files.items():
for ftype, ftype_val in ob.items():
for f in ftype_val:
try:
os.makedirs(os.path.join(ob_name, ftype))
except OSError as exc:
if exc.errno == errno.EEXIST and \
os.path.isdir(os.path.join(ob_name, ftype)):
pass
else:
raise
_file = open(os.path.join(ob_name, ftype, f), "w")
_file.close()
tar.add(os.path.join(ob_name, ftype, f))
os.remove(os.path.join(ob_name, ftype, f))
shutil.rmtree(os.path.join(ob_name, ftype))
shutil.rmtree(ob_name)
def _create_tar_lightcurves(self, tarname, files):
with tarfile.open(tarname, "w") as tar:
for ob_name, ob in self._files.items():
for ftype, ftype_val in ob.items():
for f in ftype_val:
try:
os.makedirs(os.path.join(ob_name, ftype))
except OSError as exc:
if exc.errno == errno.EEXIST and \
os.path.isdir(os.path.join(ob_name, ftype)):
pass
else:
raise
if f[17:23] == "SRSPEC":
rmf_file = self._rmf_files[1]
if f[11:13] == "PN":
rmf_file = self._rmf_files[0]
hdr = fits.Header()
hdr["RESPFILE"] = rmf_file
hdr["SPECDELT"] = 5
hdu = fits.PrimaryHDU(header=hdr)
hdu.name = "SPECTRUM"
hdu.writeto(os.path.join(ob_name, ftype, f))
else:
_file = open(os.path.join(ob_name, ftype, f), "w")
_file.close()
tar.add(os.path.join(ob_name, ftype, f))
os.remove(os.path.join(ob_name, ftype, f))
shutil.rmtree(os.path.join(ob_name, ftype))
shutil.rmtree(ob_name)
def test_create_tar_lightcurves(self):
_tarname = "tarfile_lightcurves.tar"
self._create_tar_lightcurves(_tarname, self._files_lightcurves)
assert os.path.isfile(_tarname)
def test_get_epic_spectra_non_existing_file(self, capsys):
_tarname = "nonexistingfile.tar"
_source_number = 83
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
res = xsa.get_epic_spectra(_tarname, _source_number,
instrument=[])
assert res is None
out, err = capsys.readouterr()
assert err == ("ERROR: File %s not found "
"[astroquery.esa.xmm_newton.core]\n" % _tarname)
def test_get_epic_spectra_invalid_instrumnet(self, capsys):
_tarname = "tarfile.tar"
_invalid_instrument = "II"
_source_number = 83
self._create_tar(_tarname, self._files)
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
res = xsa.get_epic_spectra(_tarname, _source_number,
instrument=[_invalid_instrument])
assert res == {}
out, err = capsys.readouterr()
assert err == ("WARNING: Invalid instrument %s "
"[astroquery.esa.xmm_newton.core]\n"
% _invalid_instrument)
os.remove(_tarname)
def test_get_epic_spectra_invalid_source_number(self, capsys):
_tarname = "tarfile.tar"
_invalid_source_number = 833
_default_instrument = ['M1', 'M2', 'PN', 'EP']
self._create_tar(_tarname, self._files)
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
res = xsa.get_epic_spectra(_tarname, _invalid_source_number,
instrument=[])
assert res == {}
out, err = capsys.readouterr()
assert out == ("INFO: Nothing to extract with the given parameters:\n"
" PPS: %s\n"
" Source Number: %u\n"
" Instrument: %s\n"
" [astroquery.esa.xmm_newton.core]\n"
% (_tarname, _invalid_source_number,
_default_instrument))
os.remove(_tarname)
def test_get_epic_images_non_existing_file(self, capsys):
_tarname = "nonexistingfile.tar"
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
res = xsa.get_epic_images(_tarname, [], [],
get_detmask=True, get_exposure_map=True)
assert res is None
out, err = capsys.readouterr()
assert err == ("ERROR: File %s not found "
"[astroquery.esa.xmm_newton.core]\n" % _tarname)
def test_get_epic_images_invalid_instrument(self, capsys):
_tarname = "tarfile.tar"
_invalid_instrument = "II"
self._create_tar(_tarname, self._files)
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
res = xsa.get_epic_images(_tarname,
band=[], instrument=[_invalid_instrument],
get_detmask=True, get_exposure_map=True)
assert res == {}
out, err = capsys.readouterr()
assert err == ("WARNING: Invalid instrument %s "
"[astroquery.esa.xmm_newton.core]\n"
% _invalid_instrument)
os.remove(_tarname)
def test_get_epic_images_invalid_band(self, capsys):
_tarname = "tarfile.tar"
_invalid_band = 10
self._create_tar(_tarname, self._files)
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
res = xsa.get_epic_images(_tarname,
band=[_invalid_band], instrument=[],
get_detmask=True, get_exposure_map=True)
assert res == {}
out, err = capsys.readouterr()
assert err == ("WARNING: Invalid band %u "
"[astroquery.esa.xmm_newton.core]\n" % _invalid_band)
os.remove(_tarname)
def test_get_epic_images(self):
_tarname = "tarfile.tar"
_instruments = ["M1", "M1_expo", "M1_det",
"M2", "M2_expo", "M2_det",
"PN", "PN_expo", "PN_det",
"EP", "EP_expo", "EP_det"]
self._create_tar(_tarname, self._files)
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
res = xsa.get_epic_images(_tarname, band=[], instrument=[],
get_detmask=True, get_exposure_map=True)
assert len(res) == 6 # Number of different bands
assert len(res[1]) == 9 # Number of different inst within band 1
assert len(res[2]) == 9 # Number of different inst within band 2
assert len(res[3]) == 9 # Number of different inst within band 3
assert len(res[4]) == 9 # Number of different inst within band 4
assert len(res[5]) == 9 # Number of different inst within band 5
assert len(res[8]) == 6 # Number of different inst within band 8
# Notice that we consider the exposure and the detector maps as
# an instrument
for k, v in res[1].items():
assert k in _instruments
if type(v) == str:
f = os.path.split(v)
assert f[1] in self._files["0405320501"]["pps"]
if type(v) == list:
for i in v:
f = os.path.split(i)
assert f[1] in self._files["0405320501"]["pps"]
for k, v in res[2].items():
assert k in _instruments
if type(v) == str:
f = os.path.split(v)
assert f[1] in self._files["0405320501"]["pps"]
if type(v) == list:
for i in v:
f = os.path.split(i)
assert f[1] in self._files["0405320501"]["pps"]
for k, v in res[3].items():
assert k in _instruments
if type(v) == str:
f = os.path.split(v)
assert f[1] in self._files["0405320501"]["pps"]
if type(v) == list:
for i in v:
f = os.path.split(i)
assert f[1] in self._files["0405320501"]["pps"]
for k, v in res[4].items():
assert k in _instruments
if type(v) == str:
f = os.path.split(v)
assert f[1] in self._files["0405320501"]["pps"]
if type(v) == list:
for i in v:
f = os.path.split(i)
assert f[1] in self._files["0405320501"]["pps"]
for k, v in res[5].items():
assert k in _instruments
if type(v) == str:
f = os.path.split(v)
assert f[1] in self._files["0405320501"]["pps"]
if type(v) == list:
for i in v:
f = os.path.split(i)
assert f[1] in self._files["0405320501"]["pps"]
for k, v in res[8].items():
assert k in _instruments
if type(v) == str:
f = os.path.split(v)
assert f[1] in self._files["0405320501"]["pps"]
if type(v) == list:
for i in v:
f = os.path.split(i)
assert f[1] in self._files["0405320501"]["pps"]
for ob in self._files:
assert os.path.isdir(ob)
for t in self._files[ob]:
assert os.path.isdir(os.path.join(ob, t))
for b in res:
for i in res[b]:
if type(res[b][i]) == str:
assert os.path.isfile(res[b][i])
if type(res[b][i]) == list:
for f in res[b][i]:
assert os.path.isfile(f)
# Removing files created in this test
for ob_name in self._files:
shutil.rmtree(ob_name)
os.remove(_tarname)
def test_get_epic_lightcurve(self):
_tarname = "tarfile.tar"
self._create_tar(_tarname, self._files)
_source_number = 1
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
res = xsa.get_epic_lightcurve(_tarname, _source_number,
instrument=['M1', 'M2', 'PN'])
assert res == {}
def test_get_epic_lightcurve_non_existing_file(self, capsys):
_tarname = "nonexistingfile.tar"
_source_number = 146
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
res = xsa.get_epic_lightcurve(_tarname, _source_number,
instrument=[])
assert res is None
out, err = capsys.readouterr()
assert err == ("ERROR: File %s not found "
"[astroquery.esa.xmm_newton.core]\n" % _tarname)
def test_get_epic_lightcurve_invalid_instrument(self, capsys):
_tarname = "tarfile.tar"
_invalid_instrument = "II"
self._create_tar(_tarname, self._files)
xsa = XMMNewtonClass(self.get_dummy_tap_handler())
res = xsa.get_epic_images(_tarname, [], [_invalid_instrument],
get_detmask=True, get_exposure_map=True)
assert res == {}
out, err = capsys.readouterr()
assert err == ("WARNING: Invalid | |
return pulumi.get(self, "method")
@property
@pulumi.getter
def origin(self) -> Sequence[str]:
"""
The list of Origins eligible to receive CORS response headers. Note: "*" is permitted in the list of origins, and means "any Origin".
"""
return pulumi.get(self, "origin")
@property
@pulumi.getter(name="responseHeader")
def response_header(self) -> Sequence[str]:
"""
The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.
"""
return pulumi.get(self, "response_header")
@pulumi.output_type
class BucketCustomPlacementConfigResponse(dict):
"""
The bucket's custom placement configuration for Custom Dual Regions.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dataLocations":
suggest = "data_locations"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketCustomPlacementConfigResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketCustomPlacementConfigResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketCustomPlacementConfigResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
data_locations: Sequence[str]):
"""
The bucket's custom placement configuration for Custom Dual Regions.
:param Sequence[str] data_locations: The list of regional locations in which data is placed.
"""
pulumi.set(__self__, "data_locations", data_locations)
@property
@pulumi.getter(name="dataLocations")
def data_locations(self) -> Sequence[str]:
"""
The list of regional locations in which data is placed.
"""
return pulumi.get(self, "data_locations")
@pulumi.output_type
class BucketEncryptionResponse(dict):
"""
Encryption configuration for a bucket.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "defaultKmsKeyName":
suggest = "default_kms_key_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketEncryptionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketEncryptionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketEncryptionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
default_kms_key_name: str):
"""
Encryption configuration for a bucket.
:param str default_kms_key_name: A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified.
"""
pulumi.set(__self__, "default_kms_key_name", default_kms_key_name)
@property
@pulumi.getter(name="defaultKmsKeyName")
def default_kms_key_name(self) -> str:
"""
A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified.
"""
return pulumi.get(self, "default_kms_key_name")
@pulumi.output_type
class BucketIamConfigurationBucketPolicyOnlyResponse(dict):
"""
The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lockedTime":
suggest = "locked_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketIamConfigurationBucketPolicyOnlyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketIamConfigurationBucketPolicyOnlyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketIamConfigurationBucketPolicyOnlyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
locked_time: str):
"""
The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.
:param bool enabled: If set, access is controlled only by bucket-level or above IAM policies.
:param str locked_time: The deadline for changing iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed from true to false until the locked time, after which the field is immutable.
"""
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "locked_time", locked_time)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
If set, access is controlled only by bucket-level or above IAM policies.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="lockedTime")
def locked_time(self) -> str:
"""
The deadline for changing iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed from true to false until the locked time, after which the field is immutable.
"""
return pulumi.get(self, "locked_time")
@pulumi.output_type
class BucketIamConfigurationResponse(dict):
"""
The bucket's IAM configuration.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bucketPolicyOnly":
suggest = "bucket_policy_only"
elif key == "publicAccessPrevention":
suggest = "public_access_prevention"
elif key == "uniformBucketLevelAccess":
suggest = "uniform_bucket_level_access"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketIamConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketIamConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketIamConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket_policy_only: 'outputs.BucketIamConfigurationBucketPolicyOnlyResponse',
public_access_prevention: str,
uniform_bucket_level_access: 'outputs.BucketIamConfigurationUniformBucketLevelAccessResponse'):
"""
The bucket's IAM configuration.
:param 'BucketIamConfigurationBucketPolicyOnlyResponse' bucket_policy_only: The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.
:param str public_access_prevention: The bucket's Public Access Prevention configuration. Currently, 'inherited' and 'enforced' are supported.
:param 'BucketIamConfigurationUniformBucketLevelAccessResponse' uniform_bucket_level_access: The bucket's uniform bucket-level access configuration.
"""
pulumi.set(__self__, "bucket_policy_only", bucket_policy_only)
pulumi.set(__self__, "public_access_prevention", public_access_prevention)
pulumi.set(__self__, "uniform_bucket_level_access", uniform_bucket_level_access)
@property
@pulumi.getter(name="bucketPolicyOnly")
def bucket_policy_only(self) -> 'outputs.BucketIamConfigurationBucketPolicyOnlyResponse':
"""
The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.
"""
return pulumi.get(self, "bucket_policy_only")
@property
@pulumi.getter(name="publicAccessPrevention")
def public_access_prevention(self) -> str:
"""
The bucket's Public Access Prevention configuration. Currently, 'inherited' and 'enforced' are supported.
"""
return pulumi.get(self, "public_access_prevention")
@property
@pulumi.getter(name="uniformBucketLevelAccess")
def uniform_bucket_level_access(self) -> 'outputs.BucketIamConfigurationUniformBucketLevelAccessResponse':
"""
The bucket's uniform bucket-level access configuration.
"""
return pulumi.get(self, "uniform_bucket_level_access")
@pulumi.output_type
class BucketIamConfigurationUniformBucketLevelAccessResponse(dict):
"""
The bucket's uniform bucket-level access configuration.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lockedTime":
suggest = "locked_time"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in BucketIamConfigurationUniformBucketLevelAccessResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
BucketIamConfigurationUniformBucketLevelAccessResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
BucketIamConfigurationUniformBucketLevelAccessResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
enabled: bool,
locked_time: str):
"""
The bucket's uniform bucket-level access configuration.
:param bool enabled: If set, access is controlled only by bucket-level or above IAM policies.
:param str locked_time: The deadline for changing iamConfiguration.uniformBucketLevelAccess.enabled from true to false in RFC 3339 format. iamConfiguration.uniformBucketLevelAccess.enabled may be changed from true to false until the locked time, after which the field is immutable.
"""
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "locked_time", locked_time)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
If set, access is controlled only by bucket-level or above IAM policies.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="lockedTime")
def locked_time(self) -> str:
"""
The deadline for changing iamConfiguration.uniformBucketLevelAccess.enabled from true to false in RFC 3339 format. iamConfiguration.uniformBucketLevelAccess.enabled may be changed from true to false until the locked time, after which the field is immutable.
"""
return pulumi.get(self, "locked_time")
@pulumi.output_type
class BucketIamPolicyBindingsItemResponse(dict):
def __init__(__self__, *,
condition: 'outputs.ExprResponse',
members: Sequence[str],
role: str):
"""
:param 'ExprResponse' condition: The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently.
:param Sequence[str] members: A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows:
- allUsers — A special identifier that represents anyone on the internet; with or without a Google account.
- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account.
- user:emailid — An email address that represents a specific account. For example, user:<EMAIL> or user:<EMAIL>.
- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com .
- group:emailid — An email address that represents a Google group. For example, group:<EMAIL>.
- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com.
- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project
- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project
- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project
:param str role: The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to | |
Bx)
csr_tobsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const R, npy_int32 const C,
npy_int32 const [] Ap, npy_int32 const [] Aj, npy_cfloat_wrapper const [] Ax,
npy_int32 [] Bp, npy_int32 [] Bj, npy_cfloat_wrapper [] Bx)
csr_tobsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const R, npy_int32 const C,
npy_int32 const [] Ap, npy_int32 const [] Aj, npy_cdouble_wrapper const [] Ax,
npy_int32 [] Bp, npy_int32 [] Bj, npy_cdouble_wrapper [] Bx)
csr_tobsr(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const R, npy_int32 const C,
npy_int32 const [] Ap, npy_int32 const [] Aj, npy_clongdouble_wrapper const [] Ax,
npy_int32 [] Bp, npy_int32 [] Bj, npy_clongdouble_wrapper [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, npy_bool_wrapper const [] Ax,
npy_int64 [] Bp, npy_int64 [] Bj, npy_bool_wrapper [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, signed char const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, signed char [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, unsigned char const [] Ax,
npy_int64 [] Bp, npy_int64 [] Bj, unsigned char [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, short const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, short [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, unsigned short const [] Ax,
npy_int64 [] Bp, npy_int64 [] Bj, unsigned short [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, int const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, int [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, unsigned int const [] Ax,
npy_int64 [] Bp, npy_int64 [] Bj, unsigned int [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, long long const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, long long [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, unsigned long long const [] Ax,
npy_int64 [] Bp, npy_int64 [] Bj, unsigned long long [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, float const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, float [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, double const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, double [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, long double const [] Ax, npy_int64 [] Bp,
npy_int64 [] Bj, long double [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, npy_cfloat_wrapper const [] Ax,
npy_int64 [] Bp, npy_int64 [] Bj, npy_cfloat_wrapper [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, npy_cdouble_wrapper const [] Ax,
npy_int64 [] Bp, npy_int64 [] Bj, npy_cdouble_wrapper [] Bx)
csr_tobsr(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const R, npy_int64 const C,
npy_int64 const [] Ap, npy_int64 const [] Aj, npy_clongdouble_wrapper const [] Ax,
npy_int64 [] Bp, npy_int64 [] Bj, npy_clongdouble_wrapper [] Bx)
"""
return _csr.csr_tobsr(*args)
def csr_matmat_pass2(*args):
"""
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_bool_wrapper const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
npy_bool_wrapper const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj,
npy_bool_wrapper [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
signed char const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
signed char const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj, signed char [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned char const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
unsigned char const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj, unsigned char [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
short const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
short const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj, short [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned short const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
unsigned short const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj, unsigned short [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
int const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj, int const [] Bx,
npy_int32 [] Cp, npy_int32 [] Cj, int [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned int const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
unsigned int const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj, unsigned int [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
long long const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
long long const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj, long long [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned long long const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
unsigned long long const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj,
unsigned long long [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
float const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
float const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj, float [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
double const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
double const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj, double [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
long double const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
long double const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj, long double [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_cfloat_wrapper const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
npy_cfloat_wrapper const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj,
npy_cfloat_wrapper [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_cdouble_wrapper const [] Ax, npy_int32 const [] Bp, npy_int32 const [] Bj,
npy_cdouble_wrapper const [] Bx, npy_int32 [] Cp, npy_int32 [] Cj,
npy_cdouble_wrapper [] Cx)
csr_matmat_pass2(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_clongdouble_wrapper const [] Ax, npy_int32 const [] Bp, npy_int32 const [] | |
<filename>Tests/test_python25.py
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import exceptions
import sys
import unittest
from iptest import is_cli, run_test
isPython25 = ((sys.version_info[0] == 2) and (sys.version_info[1] >= 5)) or (sys.version_info[0] > 2)
m = 0
gblvar = 0
@unittest.skipUnless(isPython25, 'Version of Python is too low, must be 2.5 or above')
class Python25Test(unittest.TestCase):
def test_raise_exit(self):
"""test case with RAISE(exit consumes), YIELD, RETURN, BREAK and CONTINUE in WITH"""
globals()["m"] = 0
class A:
def __enter__(self):
globals()["m"] += 99
return 300
def __exit__(self,type,value,traceback):
if(type == None and value == None and traceback == None):
globals()["m"] += 55
else:
globals()["m"] *= 2
return 1
a = A()
def foo():
p = 100
for y in [1,2,3,4,5,6,7,8,9]:
for x in [10,20,30,40,50,60,70,80,90]:
with a as b:
p = p + 1
if ( x == 20 ): continue
if ( x == 50 and y == 5 ): break
if( x != 40 and y != 4) : yield p
p = p + 5
p = p + x * 100
p = p + 1
if(x % 3 == 0):
raise RuntimeError("we force exception")
if(y == 8):
globals()["m"] += p
return
if(x % 3 == 0 and y %3 == 0):
raise RuntimeError("we force exception")
if ( x == 90 ): continue
if ( x == 60 and y == 6 ): break
yield b + p
p = p + 1
try:
k = foo()
while(k.next()):pass
except StopIteration: self.assertEqual(globals()["m"],427056988)
else:self.fail("Expected StopIteration but found None")
def test__enter__(self):
"""testing __enter__"""
def just_a_fun(arg): return 300
class B:
def __enter__(self): return "Iron", "Python", just_a_fun
def __exit__(self, a,b,c): pass
mydict = {1: [0,1,2], 2:None }
with B() as (mydict[1][0], mydict[2], B.myfun):
self.assertEqual((mydict[1],mydict[2],B().myfun()),(["Iron",1,2],"Python",just_a_fun(None)) )
#ensure it is same outside with also
self.assertEqual((mydict[1],mydict[2],B().myfun()),(["Iron",1,2],"Python",just_a_fun(None)) )
# more args
class C:
def __enter__(self,morearg): pass
def __exit__(self, a,b,c): pass
try:
with C() as something: pass
except TypeError: pass
else :self.fail("Expected TypeError but found None")
#enter raises
class D:
def __enter__(self):
raise RuntimeError("we force an error")
def __exit__(self, a,b,c): pass
try:
with D() as something: pass
except RuntimeError: pass
else :self.fail("Expected RuntimeError but found None")
#missing enter
class MissingEnter:
def __exit__(self,a,b,c): pass
try:
with MissingEnter(): pass
except AttributeError:pass
else: self.fail("Expected AttributeError but found None")
def test__exit__(self):
"""Testing __exit__"""
globals()["gblvar"] = 0
# more args
class E:
def __enter__(self): pass
def __exit__(self, a,b,c,d,e,f): pass
try:
with E() as something: pass
except TypeError: pass
else :self.fail("Expected TypeError but found None")
# less args
class F:
def __enter__(self): pass
def __exit__(self): pass
try:
with F() as something: pass
except TypeError: pass
else :self.fail("Expected TypeError but found None")
#exit raises
class H:
def __enter__(self): H.var1 = 100
def __exit__(self, a,b,c):
H.var2 = 200
raise RuntimeError("we force an error")
try:
with H():
H.var3 = 300
except RuntimeError: self.assertEqual((H.var1,H.var2,H.var3),(100,200,300))
else :self.fail("Expected RuntimeError but found None")
#exit raises on successful / throwing WITH
class Myerr1(Exception):pass
class Myerr2(Exception):pass
class Myerr3(Exception):pass
class ExitRaise:
def __enter__(self): H.var1 = 100
def __exit__(self, a,b,c):
if(a == None and b == None and c == None):
raise Myerr1
raise Myerr2
try:
with ExitRaise():
1+2+3
except Myerr1: pass
else :self.fail("Expected Myerr1 but found None")
try:
with ExitRaise():
raise Myerr3
except Myerr2: pass
else :self.fail("Expected Myerr2 but found None")
#exit propagates exception on name deletion ( covers FLOW CHECK scenario)
class PropagateException:
def __enter__(self): pass
def __exit__(self, a,b,c): return False
try:
with PropagateException() as PE:
del PE
print PE
except NameError:pass
else: self.fail("Expected NameError but found None")
try:
with PropagateException() as PE:
PE.var1 = 100
del PE
print PE
except AttributeError:pass
else: self.fail("Expected AttributeError but found None")
#exit consumes exception
class ConsumeException:
def __enter__(self): pass
def __exit__(self, a,b,c): return [1,2,3],{"dsad":"dsd"},"hello"
with ConsumeException():1/0
#missing exit
class MissingExit:
def __enter__(self): pass
try:
with MissingEnter(): pass
except NameError: pass
else: self.fail("Expected AttributeError but found None")
#With Stmt under other compound statements (NO YIELD)
globals()["gblvar"] = 0
#inheritance
class cxtmgr:
def __exit__(self, a, b, c):
globals()["gblvar"] += 10
return False
class inherited_cxtmgr(cxtmgr):
def __enter__(self):
globals()["gblvar"] += 10
return False
# Building up most complex TRY-CATCH-FINALLY-RAISE-WITH-CLASS combination with inheritance.
#try->(try->(except->(with ->fun ->(try->(with->raise)->Finally(With)))))
try: #Try
try: #try->try
globals()["gblvar"] += 1
1/0
except ZeroDivisionError: #try->(try->except)
globals()["gblvar"] += 2
with inherited_cxtmgr() as ic: #try->(try->(except->with(inherited)))
globals()["gblvar"] += 3
def fun_in_with(): return "Python is smart"
self.assertEqual(fun_in_with(),"Python is smart") #try->(try->(except->(with ->fun)))
try: #try->(try->(except->(with ->fun ->try)))
globals()["gblvar"] += 4
with inherited_cxtmgr() as inherited_cxtmgr.var: #try->(try->(except->(with ->fun ->(try->with))))
globals()["gblvar"] += 5
raise Myerr1() #try->(try->(except->(with ->fun ->(try->with->raise))))
finally: #try->(try->(except->(with ->fun ->(try->(with->raise)->Finally))))
if not is_cli: #https://github.com/IronLanguages/main/issues/844
self.assertEqual(sys.exc_info()[0], Myerr1)
else:
self.assertEqual(sys.exc_info()[0], exceptions.ZeroDivisionError)
globals()["gblvar"] += 6
class ClassInFinally:
def __enter__(self):
globals()["gblvar"] += 7
return 200
def __exit__(self,a,b,c):
globals()["gblvar"] += 8
return False # it raises
with ClassInFinally(): #try->(try->(except->(with ->fun ->(try->(with->raise)->Finally(With)))))
globals()["gblvar"] += 9
except Myerr1: self.assertEqual(globals()["gblvar"],85)
# With in __enter__ and __exit__
globals()["gblvar"] = 0
class A:
def __enter__(self): globals()["gblvar"] += 1 ; return 100
def __exit__(self,a,b,c): globals()["gblvar"] += 2; return 200
class WithInEnterExit:
def __enter__(self):
with A() as b:
globals()["gblvar"] += 3;return A()
def __exit__(self,a,b,c):
with A() as c:
globals()["gblvar"] += 4; return A()
self.assertEqual(1,1)
with WithInEnterExit() as wie:
with wie as wie_wie:
globals()["gblvar"] += 100
self.assertEqual(globals()["gblvar"],116)
def test_thread_lock(self):
import thread
temp_lock = thread.allocate_lock()
self.assertTrue(hasattr(temp_lock, "__enter__"))
self.assertTrue(hasattr(temp_lock, "__exit__"))
self.assertTrue(not temp_lock.locked())
with temp_lock:
self.assertTrue(temp_lock.locked())
self.assertTrue(not temp_lock.locked())
with thread.allocate_lock(): pass
def test_with_file(self):
with file('abc.txt', 'w'):
pass
def test_try_catch_finally(self):
# test try-catch-finally syntax
globals()["gblvar"] = 1
def setvar() : globals()["gblvar"] += 1
#missing except,else
try:
setvar()
# missing else, finally
try:1 / 0
except ZeroDivisionError: setvar()
# missing else
try:
setvar()
a =[]
a[10]
except ZeroDivisionError: assert(False)
except IndexError: setvar()
finally: setvar()
finally:
setvar()
self.assertEqual(globals()["gblvar"],7)
globals()["gblvar"] = 1
class MyErr1(Exception) :pass
class MyErr2(Exception) :pass
class MyErr3(Exception) :pass
class MyErr4(Exception) :pass
def TestUnifiedTry(myraise1,myraise2, myraise3,myraise4,myraise5,myraise6,myraise7,myraise8,myraise9):
try:
yield 1; setvar()
yield 2; setvar()
try:
setvar()
if myraise1 == "raiseInTry" :setvar(); raise MyErr1
if myraise1 == "outerTry" :setvar(); raise MyErr2
if myraise1 == "Unhandled" :setvar(); raise MyErr4
setvar()
except MyErr1:
setvar()
if myraise2 == "raiseInExcept": setvar(); raise MyErr2
if myraise2 == "Unhandled": setvar(); raise MyErr4
setvar()
except :setvar() # should never be executed
else :
setvar()
if myraise2 == "raiseInElse": setvar(); raise MyErr2
if myraise2 == "Unhandled": setvar(); raise MyErr4
setvar()
finally :
setvar()
if myraise3 == "raiseInFinally": setvar(); raise MyErr3
if myraise3 == "Unhandled": setvar(); raise MyErr4
setvar()
yield 1; setvar()
yield 2; setvar()
except MyErr2:
yield 1; setvar()
yield 2; setvar()
try:
setvar()
if myraise4 == "raiseInTry" :setvar(); raise MyErr1
if myraise4 == "Unhandled" :setvar(); raise MyErr4
setvar()
except MyErr1:
setvar()
if myraise5 == "Unhandled": setvar(); raise MyErr4
setvar()
except :setvar() # should never be executed
else :
setvar()
if myraise5 == "Unhandled": setvar(); raise MyErr4
setvar()
finally :
setvar()
if myraise6 == "Unhandled": setvar(); raise MyErr4
setvar()
yield 1; setvar()
yield 2; setvar()
except MyErr3:
yield 1; setvar()
yield 2; setvar()
try:
setvar()
if myraise4 == "raiseInTry" :setvar(); raise MyErr1
if myraise4 == "Unhandled" :setvar(); raise MyErr4
setvar()
except MyErr1:
setvar()
if myraise5 == "Unhandled": setvar(); raise MyErr4
setvar()
except :setvar() # should never be executed
else :
setvar()
if myraise5 == "Unhandled": setvar(); raise MyErr4
setvar()
finally :
setvar()
if myraise6 == "Unhandled": setvar(); raise MyErr4
setvar()
yield 1; setvar()
yield 2; setvar()
else :
yield 1; setvar()
yield 2; setvar()
try:
setvar()
if myraise4 == "raiseInTry" :setvar(); raise MyErr1
if myraise4 == "Unhandled" :setvar(); raise MyErr4
setvar()
except MyErr1:
setvar()
if myraise5 == "Unhandled": setvar(); raise MyErr4
setvar()
except :setvar() # should never be executed
else :
setvar()
if myraise5 == "Unhandled": setvar(); raise MyErr4
setvar()
finally :
setvar()
if myraise6 == "Unhandled": setvar(); raise MyErr4
setvar()
yield 1; setvar()
yield 2; setvar()
finally :
#uncomment the following 2 lines once we have the fix for PS:1752
#and accordingly adjust the final expected result value
#yield 1; setvar()
#yield 2; setvar()
try:
setvar()
if myraise7 == | |
param convert_upper. This way for
#mugration (ancestral reconstruction of non-sequences), you can
#use upper- and lower case characters for discrete states!
if (not self.is_vcf) and self.convert_upper:
self._aln = MultipleSeqAlignment([seq.upper() for seq in self._aln])
if self.seq_len:
if self.is_vcf and self.seq_len!=len(self.ref):
self.logger("TreeAnc.aln: specified sequence length doesn't match reference length, ignoring sequence length.", 1, warn=True)
self._seq_len = len(self.ref)
else:
self.logger("TreeAnc.aln: specified sequence length doesn't match alignment length. Treating difference as constant sites.", 2, warn=True)
self.additional_constant_sites = max(0, self.seq_len - self.aln.get_alignment_length())
else:
if self.is_vcf:
self.seq_len = len(self.ref)
else:
self.seq_len = self.aln.get_alignment_length()
# check whether the alignment is consistent with a nucleotide alignment.
likely_alphabet = self._guess_alphabet()
from .seq_utils import alphabets
# if likely alignment is not nucleotide but the gtr alignment is, WARN
if likely_alphabet=='aa' and self.gtr.n_states==len(alphabets['nuc']) and np.all(self.gtr.alphabet==alphabets['nuc']):
self.logger('WARNING: small fraction of ACGT-N in alignment. Really a nucleotide alignment? if not, rerun with --aa', 1, warn=True)
# conversely, warn if alignment is consistent with nucleotide but gtr has a long alphabet
if likely_alphabet=='nuc' and self.gtr.n_states>10:
self.logger('WARNING: almost exclusively ACGT-N in alignment. Really a protein alignment?', 1, warn=True)
if hasattr(self, '_tree') and (self.tree is not None):
self._attach_sequences_to_nodes()
else:
self.logger("TreeAnc.aln: sequences not yet attached to tree", 3, warn=True)
@property
def seq_len(self):
"""length of the uncompressed sequence
"""
return self._seq_len
@seq_len.setter
def seq_len(self,L):
"""set the length of the uncompressed sequence. its inverse 'one_mutation'
is frequently used as a general length scale. This can't be changed once
it is set.
Parameters
----------
L : int
length of the sequence alignment
"""
if (not hasattr(self, '_seq_len')) or self._seq_len is None:
if L:
self._seq_len = int(L)
else:
self.logger("TreeAnc: one_mutation and sequence length can't be reset",1)
@property
def one_mutation(self):
"""
Returns
-------
float
inverse of the uncompressed sequene length - length scale for short branches
"""
return 1.0/self.seq_len if self.seq_len else np.nan
@one_mutation.setter
def one_mutation(self,om):
self.logger("TreeAnc: one_mutation can't be set",1)
@property
def ref(self):
"""
Get the str reference nucleotide sequence currently used by TreeAnc.
When having read alignment in from a VCF, this is what variants map to.
:setter: Sets the string reference sequence
:getter: Returns the string reference sequence
"""
# delete previous alignment if reference changes
return self._ref
@ref.setter
def ref(self, in_ref):
"""
Parameters
----------
in_ref : str
reference sequence for the vcf sequence dict as a plain string
"""
self._aln = None
self.reduced_to_full_sequence_map = None
self.multiplicity = None
self._ref = in_ref
def extend_profile(self):
if self.aln:
if self.is_vcf and self.ref:
unique_chars = np.unique(list(self.ref))
else:
tmp_unique_chars = []
for node in self.tree.get_terminals():
tmp_unique_chars.extend(np.unique(node.sequence))
unique_chars = np.unique(tmp_unique_chars)
for c in unique_chars:
if c not in self.gtr.profile_map:
self.gtr.profile_map[c] = np.ones(self.gtr.n_states)
self.logger("WARNING: character %s is unknown. Treating it as missing information"%c,1,warn=True)
def _guess_alphabet(self):
if self.aln:
if self.is_vcf and self.ref:
total=self.seq_len
nuc_count = 0
for n in 'ACGT-N':
nuc_count += self.ref.upper().count(n)
else:
total=self.seq_len*len(self.aln)
nuc_count = 0
for seq in self.aln:
for n in 'ACGT-N':
nuc_count += seq.seq.upper().count(n)
if nuc_count>0.9*total:
return 'nuc'
else:
return 'aa'
else:
return 'nuc'
def _attach_sequences_to_nodes(self):
'''
For each node of the tree, check whether there is a sequence available
in the alignment and assign this sequence as a character array
'''
failed_leaves= 0
if self.is_vcf:
# if alignment is specified as difference from ref
dic_aln = self.aln
else:
# if full alignment is specified
dic_aln = {k.name: seq2array(k.seq, fill_overhangs=self.fill_overhangs,
ambiguous_character=self.gtr.ambiguous)
for k in self.aln} #
# loop over leaves and assign multiplicities of leaves (e.g. number of identical reads)
for l in self.tree.get_terminals():
if l.name in self.seq_multiplicity:
l.count = self.seq_multiplicity[l.name]
else:
l.count = 1.0
# loop over tree, and assign sequences
for l in self.tree.find_clades():
if l.name in dic_aln:
l.sequence= dic_aln[l.name]
elif l.is_terminal():
self.logger("***WARNING: TreeAnc._attach_sequences_to_nodes: NO SEQUENCE FOR LEAF: %s" % l.name, 0, warn=True)
failed_leaves += 1
l.sequence = seq2array(self.gtr.ambiguous*self.seq_len, fill_overhangs=self.fill_overhangs,
ambiguous_character=self.gtr.ambiguous)
if failed_leaves > self.tree.count_terminals()/3:
self.logger("ERROR: At least 30\\% terminal nodes cannot be assigned with a sequence!\n", 0, warn=True)
self.logger("Are you sure the alignment belongs to the tree?", 2, warn=True)
break
else: # could not assign sequence for internal node - is OK
pass
if failed_leaves:
self.logger("***WARNING: TreeAnc: %d nodes don't have a matching sequence in the alignment."
" POSSIBLE ERROR."%failed_leaves, 0, warn=True)
# extend profile to contain additional unknown characters
self.extend_profile()
return self.make_reduced_alignment()
def make_reduced_alignment(self):
"""
Create the reduced alignment from the full sequences attached to (some)
tree nodes. The methods collects all sequences from the tree nodes, creates
the alignment, counts the multiplicity for each column of the alignment
('alignment pattern'), and creates the reduced alignment, where only the
unique patterns are present. The reduced alignment and the pattern multiplicity
are sufficient for the GTR calculations and allow to save memory on profile
instantiation.
The maps from full sequence to reduced sequence and back are also stored to allow
compressing and expanding the sequences.
Notes
-----
full_to_reduced_sequence_map : (array)
Map to reduce a sequence
reduced_to_full_sequence_map : (dict)
Map to restore sequence from reduced alignment
multiplicity : (array)
Numpy array, which stores the pattern multiplicity for each position of the reduced alignment.
reduced_alignment : (2D numpy array)
The reduced alignment. Shape is (N x L'), where N is number of
sequences, L' - number of unique alignment patterns
cseq : (array)
The compressed sequence (corresponding row of the reduced alignment) attached to each node
"""
self.logger("TreeAnc: making reduced alignment...", 1)
# bind positions in real sequence to that of the reduced (compressed) sequence
self.full_to_reduced_sequence_map = np.zeros(self.seq_len, dtype=int)
# bind position in reduced sequence to the array of positions in real (expanded) sequence
self.reduced_to_full_sequence_map = {}
#if is a dict, want to be efficient and not iterate over a bunch of const_sites
#so pre-load alignment_patterns with the location of const sites!
#and get the sites that we want to iterate over only!
if self.is_vcf:
tmp_reduced_aln, alignment_patterns, positions = self.process_alignment_dict()
seqNames = self.aln.keys() #store seqName order to put back on tree
elif self.reduce_alignment:
# transpose real alignment, for ease of iteration
alignment_patterns = {}
tmp_reduced_aln = []
# NOTE the order of tree traversal must be the same as below
# for assigning the cseq attributes to the nodes.
seqs = [n.sequence for n in self.tree.find_clades() if hasattr(n, 'sequence')]
if len(np.unique([len(x) for x in seqs]))>1:
self.logger("TreeAnc: Sequences differ in in length! ABORTING",0, warn=True)
aln_transpose = None
raise TypeError
else:
aln_transpose = np.array(seqs).T
positions = range(aln_transpose.shape[0])
else:
self.multiplicity = np.ones(self.seq_len, dtype=float)
self.full_to_reduced_sequence_map = np.arange(self.seq_len)
self.reduced_to_full_sequence_map = {p:np.array([p]) for p in np.arange(self.seq_len)}
for n in self.tree.find_clades():
if hasattr(n, 'sequence'):
n.original_cseq = np.copy(n.sequence)
n.cseq = np.copy(n.sequence)
return ttconf.SUCCESS
for pi in positions:
if self.is_vcf:
pattern = [ self.aln[k][pi] if pi in self.aln[k].keys()
else self.ref[pi] for k,v in self.aln.items() ]
else:
pattern = aln_transpose[pi]
str_pat = "".join(pattern)
# if the column contains only one state and ambiguous nucleotides, replace
# those with the state in other strains right away
unique_letters = list(np.unique(pattern))
#create a copy so we don't modify aln_transpose
fixed_pattern = np.copy(pattern)
if hasattr(self.gtr, "ambiguous"):
if len(unique_letters)==2 and self.gtr.ambiguous in unique_letters:
other = [c for c in unique_letters if c!=self.gtr.ambiguous][0]
str_pat = str_pat.replace(self.gtr.ambiguous, other)
#also replace in original pattern!
fixed_pattern[fixed_pattern == self.gtr.ambiguous] = other
unique_letters = [other]
# if there is a mutation in this column, give it its private pattern
# this is required when sampling mutations from reconstructed profiles.
# otherwise, all mutations corresponding to the same pattern will be coupled.
if len(unique_letters)>1:
str_pat += '_%d'%pi
# if the pattern is not yet seen,
if str_pat not in alignment_patterns:
# bind the index in the reduced aln, index in sequence to the pattern string
alignment_patterns[str_pat] = (len(tmp_reduced_aln), [pi])
# append this pattern to the reduced alignment
tmp_reduced_aln.append(fixed_pattern)
else:
# if the pattern is already seen, append the position in the real
# sequence to the reduced aln<->sequence_pos_indexes map
alignment_patterns[str_pat][1].append(pi)
# add constant alignment column not in the alignment. We don't know where they
# are, so just add them to the end. First, determine sequence composition.
if self.additional_constant_sites:
character_counts = {c:np.sum(aln_transpose==c) | |
<filename>AwesomeService/coveo-blitz-thrift/src/main/python/awesome/AwesomeService.py<gh_stars>1-10
#
# Autogenerated by Thrift Compiler (0.9.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def getData(self, request):
"""
Gets data from your service. The type and format of the requests are defined in the documentation.
Parameters:
- request
"""
pass
def reset(self):
pass
def ping(self):
pass
def handleMapReduceResult(self, name, data):
"""
Parameters:
- name
- data
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def getData(self, request):
"""
Gets data from your service. The type and format of the requests are defined in the documentation.
Parameters:
- request
"""
self.send_getData(request)
return self.recv_getData()
def send_getData(self, request):
self._oprot.writeMessageBegin('getData', TMessageType.CALL, self._seqid)
args = getData_args()
args.request = request
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getData(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getData_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getData failed: unknown result");
def reset(self):
self.send_reset()
self.recv_reset()
def send_reset(self):
self._oprot.writeMessageBegin('reset', TMessageType.CALL, self._seqid)
args = reset_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_reset(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = reset_result()
result.read(iprot)
iprot.readMessageEnd()
return
def ping(self):
self.send_ping()
return self.recv_ping()
def send_ping(self):
self._oprot.writeMessageBegin('ping', TMessageType.CALL, self._seqid)
args = ping_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ping(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = ping_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "ping failed: unknown result");
def handleMapReduceResult(self, name, data):
"""
Parameters:
- name
- data
"""
self.send_handleMapReduceResult(name, data)
self.recv_handleMapReduceResult()
def send_handleMapReduceResult(self, name, data):
self._oprot.writeMessageBegin('handleMapReduceResult', TMessageType.CALL, self._seqid)
args = handleMapReduceResult_args()
args.name = name
args.data = data
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_handleMapReduceResult(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = handleMapReduceResult_result()
result.read(iprot)
iprot.readMessageEnd()
return
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["getData"] = Processor.process_getData
self._processMap["reset"] = Processor.process_reset
self._processMap["ping"] = Processor.process_ping
self._processMap["handleMapReduceResult"] = Processor.process_handleMapReduceResult
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_getData(self, seqid, iprot, oprot):
args = getData_args()
args.read(iprot)
iprot.readMessageEnd()
result = getData_result()
result.success = self._handler.getData(args.request)
oprot.writeMessageBegin("getData", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_reset(self, seqid, iprot, oprot):
args = reset_args()
args.read(iprot)
iprot.readMessageEnd()
result = reset_result()
self._handler.reset()
oprot.writeMessageBegin("reset", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_ping(self, seqid, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
result.success = self._handler.ping()
oprot.writeMessageBegin("ping", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_handleMapReduceResult(self, seqid, iprot, oprot):
args = handleMapReduceResult_args()
args.read(iprot)
iprot.readMessageEnd()
result = handleMapReduceResult_result()
self._handler.handleMapReduceResult(args.name, args.data)
oprot.writeMessageBegin("handleMapReduceResult", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class getData_args:
"""
Attributes:
- request
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'request', (Request, Request.thrift_spec), None, ), # 1
)
def __init__(self, request=None,):
self.request = request
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.request = Request()
self.request.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getData_args')
if self.request is not None:
oprot.writeFieldBegin('request', TType.STRUCT, 1)
self.request.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.request)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getData_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (Response, Response.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Response()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getData_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class reset_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('reset_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class reset_result:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('reset_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_args:
thrift_spec = (
)
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ping_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.BOOL, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, | |
<filename>orchestration/lxc/dev_environment/proxyb/rootfs/home/ubuntu/synproxy_dataplane.py<gh_stars>1-10
#!/usr/bin/env python3
"""
Copyright <2018> <<NAME>, Aalto University>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# Run as:
# ./synproxy_dataplane.py --nic-wan test_wan0 --nic-wanp test_wan0p --ipaddr 127.0.0.1 --port 12345
# Run standalone with supporting network configured
# ./synproxy_dataplane.py --nic-wan test_wan0 --nic-wanp test_wan0p --ipaddr 127.0.0.1 --port 12345 --default-tcpmss 1460 --default-tcpsack 1 --default-tcpwscale 7 --standalone
# Run standalone modifying networking from the process
# ./synproxy_dataplane.py --nic-wan test_wan0 --nic-wanp test_wan0p --ipaddr 127.0.0.1 --port 12345 --default-tcpmss 1460 --default-tcpsack 1 --default-tcpwscale 7 --standalone --secure-net 172.16.58.3/24 192.168.127.12/32 --default-gw
import asyncio
import argparse
import ipaddress
import logging
import os
import socket
import struct
import subprocess
import sys
from contextlib import suppress
from helpers_n_wrappers import iptc_helper3, container3
def synproxy_build_message(mode, ipaddr, port, proto, tcpmss, tcpsack, tcpwscale):
"""
Build and return synchronization message
Message structure:
- 32 bits: IPv4 address
- 16 bits: Port number
- 8 bits: Protocol
- 8 bits: Flags
- 16 bits: TCP MSS value
- 8 bits: TCP SACK value [0,1]
- 8 bits: TCP window scaling value [0-14]
"""
# Build flags
flags = 0
if mode == 'flush':
flags |= 0b0000001
tcpmss = 0
tcpsack = 0
tcpwscale = 0
port = 0
proto = 0
elif mode == 'add':
flags |= 0b0000010
elif mode == 'mod':
flags |= 0b0000100
elif mode == 'del':
flags |= 0b0001000
tcpmss = 0
tcpsack = 0
tcpwscale = 0
# Pack message
msg = socket.inet_pton(socket.AF_INET, ipaddr) + struct.pack('!HBBHBB', port, proto, flags, tcpmss, tcpsack, tcpwscale)
# Return built message
return msg
def synproxy_parse_message(data):
""" Return tuple (mode, ipaddr, port, proto, tcpmss, tcpsack, tcpwscale) """
if len(data) != 12:
raise Exception('wrong message')
# Unpack IP address
ipaddr = socket.inet_ntop(socket.AF_INET, data[0:4])
# Unpack fields
port, proto, flags, tcpmss, tcpsack, tcpwscale = struct.unpack('!HBBHBB', data[4:12])
# Parse flags
## flush
if (flags & 0b0000001) == 0b0000001:
mode = 'flush'
# Assert zeroed values? (port, proto, tcpmss, tcpsack, tcpwscale)
port, proto, tcpmss, tcpsack, tcpwscale = 0, 0, 0, 0, 0
## add
elif (flags & 0b0000010) == 0b0000010:
mode = 'add'
## mod
elif (flags & 0b0000100) == 0b0000100:
mode = 'mod'
## del
elif (flags & 0b0001000) == 0b0001000:
mode = 'del'
else:
raise Exception('wrong message')
# Do not process port and proto flags
return (mode, ipaddr, port, proto, tcpmss, tcpsack, tcpwscale)
class SYNProxyConnectionTCP(container3.ContainerNode):
def __init__(self, ipaddr, port, proto, tcpmss, tcpsack, tcpwscale, ipt_rule):
self.ipaddr = ipaddr
self.port = port
self.proto = proto
self.tcpmss = tcpmss
self.tcpsack = tcpsack
self.tcpwscale = tcpwscale
# Add rule object to minimize Netlink load
self.ipt_rule = ipt_rule
def lookupkeys(self):
""" Return the lookup keys of the node. """
# Create 3-tuple and 1-tuple (IP-based) matches
return (((self.ipaddr, self.port, self.proto), True), (self.ipaddr, False))
def dump(self):
""" Return a string representation of the node. """
return '[{}] {}:{} / mss={} sack={} wscale={}'.format(self.proto, self.ipaddr, self.port, self.tcpmss, self.tcpsack, self.tcpwscale)
def __repr__(self):
return self.dump()
class SYNProxyDataplane():
def __init__(self, **kwargs):
self._logger = logging.getLogger('SYNProxyDataplane')
# Store local parameters
self.nic_wan = kwargs['nic_wan']
self.nic_wanp = kwargs['nic_wanp']
self.default_gw = kwargs['default_gw']
self.secure_net = kwargs['secure_net']
self.ratelimit = kwargs['ratelimit']
# Create a connection table to store the rules
self.connectiontable = container3.Container(name='ConnectionTable')
# Continue with bootstrapping actions
self.ovs_create()
self.ovs_init_flows()
self.ipt_init_flows()
# Start monitor task
self.tasks = []
_t = asyncio.ensure_future(self.monitor(10))
self.tasks.append(_t)
# Run in standalone mode with default flow
if kwargs['standalone']:
self._logger.info('Standalone more active, insert default rule')
self._do_mod('0.0.0.0', 0, 6, kwargs['tcpmss'], kwargs['tcpsack'], kwargs['tcpwscale'])
@asyncio.coroutine
def shutdown(self):
self._logger.warning('Removing OpenvSwitch instance')
## Delete OVS bridge
to_exec = ['ovs-vsctl --if-exists del-br br-synproxy']
for _ in to_exec:
self._do_subprocess_call(_, raise_exc = False, silent = False)
## Restart OpenvSwitch service
to_exec = ['systemctl restart openvswitch-switch']
for _ in to_exec:
self._do_subprocess_call(_, raise_exc = True, silent = False)
## Cancel running tasks
for _task in self.tasks:
with suppress(asyncio.CancelledError):
del _task
yield from asyncio.sleep(1)
def ovs_create(self):
self._logger.info('Create OpenvSwitch instance')
## Enable IP forwarding
to_exec = ['sysctl -w net.ipv4.ip_forward=1']
for _ in to_exec:
self._do_subprocess_call(_, raise_exc = True, silent = True)
# Setting up TCP SYNPROXY ipt_SYNPROXY
# https://r00t-services.net/knowledgebase/14/Homemade-DDoS-Protection-Using-IPTables-SYNPROXY.html
to_exec = ['sysctl -w net.ipv4.tcp_syncookies=1',
'sysctl -w net.ipv4.tcp_timestamps=1',
'sysctl -w net.netfilter.nf_conntrack_tcp_loose=0']
for _ in to_exec:
self._do_subprocess_call(_, raise_exc = False, silent = True)
## Restart OpenvSwitch service
to_exec = ['systemctl restart openvswitch-switch']
for _ in to_exec:
self._do_subprocess_call(_, raise_exc = True, silent = False)
## Create OVS bridge
to_exec = ['ovs-vsctl --if-exists del-br br-synproxy',
'ovs-vsctl add-br br-synproxy']
for _ in to_exec:
self._do_subprocess_call(_, raise_exc = True, silent = False)
## Add ports
to_exec = ['ovs-vsctl add-port br-synproxy {0} -- set interface {0} ofport_request=1'.format(self.nic_wan),
'ovs-vsctl add-port br-synproxy mitm0 -- set interface mitm0 ofport_request=2 -- set interface mitm0 type=internal # Connected to *WAN*',
'ovs-vsctl add-port br-synproxy mitm1 -- set interface mitm1 ofport_request=3 -- set interface mitm1 type=internal # Connected to *WAN_proxied*',
'ovs-vsctl add-port br-synproxy {0} -- set interface {0} ofport_request=4'.format(self.nic_wanp)]
for _ in to_exec:
self._do_subprocess_call(_, raise_exc = True, silent = False)
## Configure ports
to_exec = ['ip link set dev mitm0 arp off',
'ip link set dev mitm0 address 00:00:00:aa:bb:cc',
'ip link set dev mitm0 up',
'ip link set dev mitm1 arp off',
'ip link set dev mitm1 address 00:00:00:dd:ee:ff',
'ip link set dev mitm1 up',
'ip link set dev {0} up'.format(self.nic_wan),
'ip link set dev {0} up'.format(self.nic_wanp)]
for _ in to_exec:
self._do_subprocess_call(_, raise_exc = True, silent = False)
## Configure txqueuelen / default is 1000
MAX_QLEN=25000
to_exec = ['ip link set dev {} qlen {}'.format(self.nic_wan, MAX_QLEN),
'ip link set dev {} qlen {}'.format(self.nic_wanp, MAX_QLEN),
'ip link set dev mitm0 qlen {}'.format(MAX_QLEN),
'ip link set dev mitm1 qlen {}'.format(MAX_QLEN),
'ip link set dev br-synproxy qlen {}'.format(MAX_QLEN)]
for _ in to_exec:
self._do_subprocess_call(_, raise_exc = True, silent = False)
## Configure default gateway
if self.default_gw:
self._logger.info('Adding route to default gateway via mimt0')
_ = 'ip route add default dev mitm0 metric 100'
self._do_subprocess_call(_, raise_exc = False, silent = True)
## Configure secure networks
for net in self.secure_net:
self._logger.info('Adding route to secure network {} via mitm1'.format(net))
_ = 'ip route add {} dev mitm1'.format(net)
self._do_subprocess_call(_, raise_exc = False, silent = True)
def ovs_init_flows(self):
self._logger.info('Initialize OpenvSwitch flows')
## Initialize flow table
to_exec = [### Delete default flows
'ovs-ofctl del-flows -O OpenFlow13 br-synproxy',
### Table 0: Traffic selector
'ovs-ofctl add-flow -O OpenFlow13 br-synproxy "table=0,priority=100,dl_type=0x0800 actions=resubmit(,2)"',
'ovs-ofctl add-flow -O OpenFlow13 br-synproxy "table=0,priority=1 actions=resubmit(,1)"',
### Table 1: Enable transparent L2-switching
'ovs-ofctl add-flow -O OpenFlow13 br-synproxy "table=1,priority=100,in_port=1 actions=output:4"',
'ovs-ofctl add-flow -O OpenFlow13 br-synproxy "table=1,priority=100,in_port=4 actions=output:1"',
### Table 2: Controls the packet pipelining
'ovs-ofctl add-flow -O OpenFlow13 br-synproxy "table=2,priority=100 actions=resubmit(,10),resubmit(,11),resubmit(,12)"',
### Table 10: Load port values in NXM registry
'ovs-ofctl add-flow -O OpenFlow13 br-synproxy "table=10,priority=100 actions=load:0x0001->NXM_NX_REG0[0..15],load:0x0002->NXM_NX_REG1[0..15],load:0x0003->NXM_NX_REG2[0..15],load:0x0004->NXM_NX_REG3[0..15]"',
### Table 11: Contains the learning flows
# Learn new flows coming from WAN
'ovs-ofctl add-flow -O OpenFlow13 br-synproxy "table=11,priority=1,in_port=1,dl_type=0x0800 \
actions=learn(table=12,priority=100,in_port=2,dl_type=0x0800,NXM_OF_IP_SRC[]=NXM_OF_IP_DST[] load:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],load:NXM_OF_ETH_DST[]->NXM_OF_ETH_SRC[] output:NXM_NX_REG0[0..15]), \
learn(table=12,priority=100,in_port=3,dl_type=0x0800,NXM_OF_IP_DST[]=NXM_OF_IP_DST[] load:NXM_OF_ETH_SRC[]->NXM_OF_ETH_SRC[],load:NXM_OF_ETH_DST[]->NXM_OF_ETH_DST[] output:NXM_NX_REG3[0..15])"',
# Learn new flows coming from WAN_proxied
'ovs-ofctl add-flow -O OpenFlow13 br-synproxy "table=11,priority=1,in_port=4,dl_type=0x0800 \
actions=learn(table=12,priority=100,in_port=2,dl_type=0x0800,NXM_OF_IP_SRC[]=NXM_OF_IP_SRC[] load:NXM_OF_ETH_SRC[]->NXM_OF_ETH_SRC[],load:NXM_OF_ETH_DST[]->NXM_OF_ETH_DST[] output:NXM_NX_REG0[0..15]), \
learn(table=12,priority=100,in_port=3,dl_type=0x0800,NXM_OF_IP_DST[]=NXM_OF_IP_SRC[] load:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],load:NXM_OF_ETH_DST[]->NXM_OF_ETH_SRC[] output:NXM_NX_REG3[0..15])"',
### Table 12: Contains the self-populated learned flows and the default forwarding flows
'ovs-ofctl add-flow -O OpenFlow13 br-synproxy "table=12,priority=1,in_port=1, actions=load:0x000000aabbcc->NXM_OF_ETH_DST[], output:2"',
'ovs-ofctl add-flow -O OpenFlow13 br-synproxy "table=12,priority=1,in_port=4, actions=load:0x000000ddeeff->NXM_OF_ETH_DST[], output:3"']
for _ in to_exec:
self._do_subprocess_call(_, raise_exc = True, silent = False)
def ipt_init_flows(self):
# Specific TCP flows are inserted/appended to filter.synproxy_chain """
self._logger.info('Initialize iptables | |
'observer' in battle_armors_files[new_image['Battle_armors']] or 'Explorer' in battle_armors_files[new_image['Battle_armors']]:
random_type_arm_armor = random.randint(0, 9)
random_type_helmetless_armor = random.randint(0, 3)
random_type_warpaint = random.randint(0, 1)
new_image['Arm_armor'] = arm_armor[random_type_arm_armor * 10 + random_color_index_three]
new_image['Battle_armors'] = battle_armors[random_type_helmetless_armor * 10 + 16 + random_color_index_three]
new_image["War_paint"] = war_paint[random_type_warpaint * 10 + random_color_index_three]
# battle armors
else:
random_type_warpaint_special = random.randint(0, 13)
if 'commander silver' in battle_armors_files[new_image['Battle_armors']]:
if not 'commander silver' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
else:
new_image["War_paint"] = war_paint[random_type_warpaint_special * 10]
# if not 'silver' in head_pieces_files[new_image['Head_pieces']] and 'and silver' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'aqua' in battle_armors_files[new_image['Battle_armors']] or 'trooper angel' in battle_armors_files[new_image['Battle_armors']]:
if not 'aqua' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
# if not 'aqua' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'trooper black' in battle_armors_files[new_image['Battle_armors']]:
if not 'black' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
else:
new_image["War_paint"] = war_paint[random_type_warpaint_special * 10 + 1]
# if not 'black' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'brawler' in battle_armors_files[new_image['Battle_armors']]:
if not 'brawler' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
# if not 'aqua' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'bubblegum' in battle_armors_files[new_image['Battle_armors']]:
if not 'bubblegum' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
# if not 'Bubblegum' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'commander bronze' in battle_armors_files[new_image['Battle_armors']]:
if not 'commander bronze' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
# if not 'gold' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'gold commander' in battle_armors_files[new_image['Battle_armors']]:
if not 'commander gold' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
else:
new_image["War_paint"] = war_paint[random_type_warpaint_special * 10 + 4]
# if not 'gold' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'gold and silver' in battle_armors_files[new_image['Battle_armors']] or 'trooper gold' in battle_armors_files[new_image['Battle_armors']]:
if not 'trooper gold' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
else:
new_image["War_paint"] = war_paint[random_type_warpaint_special * 10]
# if not 'gold and silver' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'trooper lava' in battle_armors_files[new_image['Battle_armors']]:
if not 'lava' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
# if not 'lava' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'moss' in battle_armors_files[new_image['Battle_armors']]:
if not 'moss' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
if 'trooper moss' in battle_armors_files[new_image['Battle_armors']]:
if not 'moss' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
# if not 'moss' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'protector' in battle_armors_files[new_image['Battle_armors']]:
if not 'protector' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
# if not 'angel' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'rainbow' in battle_armors_files[new_image['Battle_armors']]:
if not 'rainbow' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
# if not 'rainbow' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'armor silver' in battle_armors_files[new_image['Battle_armors']]:
if not 'trooper silver' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
else:
new_image["War_paint"] = war_paint[random_type_warpaint_special * 10]
# if not 'silver' in head_pieces_files[new_image['Head_pieces']] and 'and silver' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
if 'trooper sun' in battle_armors_files[new_image['Battle_armors']]:
if not 'sun' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
# if not 'sun' in head_pieces_files[new_image['Head_pieces']]:
# return create_new_image()
# enclosed armors and mage hood can only have short hair
if 'enclosed_armors' in new_image['Battle_armors']:
if not ('short' in hair_files[new_image['Hair']] or 'lizard' in hair_files[new_image['Hair']]):
return create_new_image()
if not('handlebars' in beards_files[new_image['Beards']] or 'goatee mustache' in beards_files[new_image['Beards']] or 'scruffy braid' in beards_files[new_image['Beards']] or 'short braids' in beards_files[new_image['Beards']]):
return create_new_image()
# Only BEARDS = Handlebars, Goatee mustache, Scruffy Braid beards can go with enclosed armors or Pilot Helmets
if 'handlebars' in beards_files[new_image['Beards']] or 'goatee mustache' in beards_files[new_image['Beards']] or 'scruffy braid' in beards_files[new_image['Beards']]:
if not 'enclosed_armors' in new_image['Battle_armors']:
return create_new_image()
# Helmet Eyebrows are for Helmets and Enclose Suits/Armors
if 'helmet' in eyebrows_files[new_image['Eyebrows']]:
# if new_image['Head_pieces'] == 'head_pieces-5' or not 'enclosed_armors' in new_image['Battle_armors']:
if not 'enclosed_armors' in new_image['Battle_armors']:
return create_new_image()
# When a suit is picked it's corresponding suit back must be added
if ('explorer' in battle_armors_files[new_image['Battle_armors']] or 'Explorer' in battle_armors_files[new_image['Battle_armors']]) and not 'helmetless' in battle_armors_files[new_image['Battle_armors']]:
new_image['Armor_back_pieces'] = 'armor_back_pieces-40'
elif 'observer' in battle_armors_files[new_image['Battle_armors']] and not 'helmetless' in battle_armors_files[new_image['Battle_armors']]:
new_image['Armor_back_pieces'] = 'armor_back_pieces-41'
else:
if 'helmetless explorer' in battle_armors_files[new_image['Battle_armors']]:
arr_helmetless_explorer = [
"armor_back_pieces-16",
"armor_back_pieces-17",
"armor_back_pieces-18",
"armor_back_pieces-19",
"armor_back_pieces-20",
"armor_back_pieces-21",
"armor_back_pieces-22",
"armor_back_pieces-23",
"armor_back_pieces-24",
"armor_back_pieces-25",
"armor_back_pieces-26",
"armor_back_pieces-27",
]
new_image['Armor_back_pieces'] = random.choice(arr_helmetless_explorer)
else:
arr_helmetless_observer = [
"armor_back_pieces-28",
"armor_back_pieces-29",
"armor_back_pieces-30",
"armor_back_pieces-31",
"armor_back_pieces-32",
"armor_back_pieces-33",
"armor_back_pieces-34",
"armor_back_pieces-35",
"armor_back_pieces-36",
"armor_back_pieces-37",
"armor_back_pieces-38",
"armor_back_pieces-39",
]
new_image['Armor_back_pieces'] = random.choice(arr_helmetless_observer)
# position Mage Hand, Mage Hand with Mage Effect Weapons
if 'weapon_back' in new_image['Weapon']:
new_image['Arms'] = 'no_arms'
new_image['Arm_armor'] = 'no_arm_armor'
elif 'mage' in arms_files[new_image['Arms']]:
if not 'mage' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
else:
# Double Grip position, with Double Grip arms with Double Grip weapon
if 'double' in arms_files[new_image['Arms']]:
if not 'battle_armors' in new_image['Battle_armors']:
arr_double_arm_armor = [
"arm_armor-1",
"arm_armor-2",
"arm_armor-3",
"arm_armor-4",
"arm_armor-5",
"arm_armor-6",
"arm_armor-7",
"arm_armor-8",
"arm_armor-9",
"arm_armor-10",
"arm_armor-51",
"arm_armor-52",
"arm_armor-53",
"arm_armor-54",
"arm_armor-55",
"arm_armor-56",
"arm_armor-57",
"arm_armor-58",
"arm_armor-59",
"arm_armor-60"
]
new_image['Arm_armor'] = arr_double_arm_armor[random_color_index_three]
if not 'double' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
arr_double_weapon = [
"weapon_double_grip-1",
"weapon_double_grip-2",
"weapon_double_grip-3",
"weapon_double_grip-4",
"weapon_double_grip-5",
"weapon_double_grip-6",
"weapon_double_grip-8",
"weapon_double_grip-9",
"weapon_double_grip-10",
"weapon_double_grip-11",
"weapon_double_grip-12",
"weapon_double_grip-13",
"weapon_double_grip-14",
"weapon_double_grip-15",
"weapon_double_grip-16",
"weapon_double_grip-17",
"weapon_double_grip-18",
"weapon_double_grip-19",
"weapon_double_grip-20",
"weapon_double_grip-21",
"weapon_double_grip-22",
"weapon_double_grip-23",
]
new_image['Weapon'] = random.choice(arr_double_weapon)
# if not 'weapon_double_grip' in new_image['Weapon']:
# return create_new_image()
# position One Hand with, One Arm, with Single Weapon
if 'single' in arms_files[new_image['Arms']]:
if not 'battle_armors' in new_image['Battle_armors']:
arr_single_arm_armor = [
"arm_armor-21",
"arm_armor-22",
"arm_armor-23",
"arm_armor-24",
"arm_armor-25",
"arm_armor-26",
"arm_armor-27",
"arm_armor-28",
"arm_armor-29",
"arm_armor-30",
]
new_image['Arm_armor'] = arr_single_arm_armor[random_color_index_three]
if not 'single' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
arr_single_weapon = [
"weapon_one_hand-1",
"weapon_one_hand-2",
"weapon_one_hand-3",
"weapon_one_hand-4",
"weapon_one_hand-5",
"weapon_one_hand-6",
"weapon_one_hand-7",
"weapon_one_hand-8",
"weapon_one_hand-9",
"weapon_one_hand-10",
"weapon_one_hand-back-11",
"weapon_one_hand-back-12",
"weapon_one_hand-back-13",
"weapon_one_hand-back-14",
"weapon_one_hand-back-15",
"weapon_one_hand-back-16",
"weapon_one_hand-back-17",
"weapon_one_hand-back-18",
"weapon_one_hand-back-19",
"weapon_one_hand-back-20",
"weapon_one_hand-back-21",
"weapon_one_hand-back-22",
"weapon_one_hand-back-23",
"weapon_one_hand-back-24",
]
new_image['Weapon'] = random.choice(arr_single_weapon)
# if not 'weapon_one_hand' in new_image['Weapon']:
# return create_new_image()
else:
# position Staff, with Staff Hand, with Staff Weapons
if 'staff' in arms_files[new_image['Arms']]:
if not 'battle_armors' in new_image['Battle_armors']:
arr_staff_arm_armor = [
"arm_armor-31",
"arm_armor-32",
"arm_armor-33",
"arm_armor-34",
"arm_armor-35",
"arm_armor-36",
"arm_armor-37",
"arm_armor-38",
"arm_armor-39",
"arm_armor-40",
]
new_image['Arm_armor'] = arr_staff_arm_armor[random_color_index_three]
if not 'staff' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
arr_staff_weapon = [
"weapon_staff-1",
"weapon_staff-2",
"weapon_staff-3",
"weapon_staff-4",
"weapon_staff-5",
"weapon_staff-6",
"weapon_staff-7",
"weapon_staff-8",
"weapon_staff-9",
"weapon_staff-10",
"weapon_staff-11",
"weapon_staff-12",
"weapon_staff-13",
"weapon_staff-14",
"weapon_staff-15",
"weapon_staff-16",
"weapon_staff-17",
"weapon_staff-18",
"weapon_staff-19",
"weapon_staff-20",
"weapon_staff-21",
"weapon_staff-22",
"weapon_staff-23",
"weapon_staff-24",
]
new_image['Weapon'] = random.choice(arr_staff_weapon)
# if not 'weapon_staff' in new_image['Weapon']:
# return create_new_image()
else:
# position Two Arms, with Two Arms, with Dual Wield Weapons
if 'two' in arms_files[new_image['Arms']]:
if not 'battle_armors' in new_image['Battle_armors']:
arr_two_arm_armor = [
"arm_armor-41",
"arm_armor-42",
"arm_armor-43",
"arm_armor-44",
"arm_armor-45",
"arm_armor-46",
"arm_armor-47",
"arm_armor-48",
"arm_armor-49",
"arm_armor-50",
]
new_image['Arm_armor'] = arr_two_arm_armor[random_color_index_three]
if not 'two' in arm_armor_files[new_image['Arm_armor']]:
return create_new_image()
arr_two_weapon = [
"weapon_dual_wield-1",
"weapon_dual_wield-2",
"weapon_dual_wield-3",
"weapon_dual_wield-4",
"weapon_dual_wield-5",
"weapon_dual_wield-6",
"weapon_dual_wield-7",
"weapon_dual_wield-8",
"weapon_dual_wield-9",
"weapon_dual_wield-10",
]
new_image['Weapon'] = random.choice(arr_two_weapon)
# if not 'weapon_dual_wield' in new_image['Weapon']:
# return create_new_image()
# Mage hood is added onto HELMETLESS Explorer or Observer suits, and must have NO HELM
if not 'No hood#78' in mage_hoods_files[new_image['Mage_hoods']] and 'mage' in mage_hoods_files[new_image['Mage_hoods']]:
new_image['Helm_backpieces'] = 'helm_backpieces-2'
# Mage hoods CAN ONLY have short unkept or short bun hairs
arr_hair_short = [
"hair-28",
"hair-29",
"hair-30",
"hair-31",
"hair-32",
"hair-33",
"hair-34",
"hair-35",
"hair-36",
"hair-37",
"hair-38",
"hair-39",
"hair-40",
"hair-41",
"hair-42",
"hair-43",
"hair-44",
"hair-45",
]
random_type_hair_two = random.randint(0,1)
new_image['Hair'] = arr_hair_short[random_type_hair_two * 9 + random_color_index]
index_battle_armor = random.randint(0,2)
arr_commander_suit = [
"battle_armors-2",
"battle_armors-3",
"battle_armors-5",
]
new_image['Battle_armors'] = arr_commander_suit[index_battle_armor]
arr_weaon_mage_effect = [
"weapon_mage_effect-1",
"weapon_mage_effect-2",
"weapon_mage_effect-3",
"weapon_mage_effect-4",
"weapon_mage_effect-5",
]
new_image['Weapon'] = random.choice(arr_weaon_mage_effect)
arr_beards = [
"beards-19",
"beards-20",
"beards-21",
"beards-22",
"beards-23",
"beards-24",
"beards-25",
"beards-26",
"beards-27",
"beards-28",
"beards-29",
"beards-30",
"beards-31",
"beards-32",
"beards-33",
"beards-34",
"beards-35",
"beards-36",
"beards-37",
"beards-38",
"beards-39",
"beards-40",
"beards-41",
"beards-42",
"beards-43",
"beards-44",
"beards-45",
"beards-46",
"beards-47",
"beards-48",
"beards-49",
"beards-50",
"beards-51",
"beards-52",
"beards-53",
"beards-54",
]
random_type_beard_two = random.randint(0,3)
new_image['Beards'] = arr_beards[random_type_beard_two * 9 + random_color_index]
new_image['Arms'] = 'no_arms'
arr_arm_armor_mage = [
"arm_armor-120",
"mage_armor-11",
"arm_armor-121",
]
new_image["Arm_armor"] = arr_arm_armor_mage[index_battle_armor]
new_image['Armor_back_pieces'] = 'no_armor_back'
# Mage hand CAN ONLY be with mage hood
if 'mage hand' in arms_files[new_image['Arms']]:
if 'No hood#78' in mage_hoods_files[new_image ["Mage_hoods"]] :
return create_new_image()
# other exception
if 'enclosed_armors' in new_image['Battle_armors']:
if not 'short' in hair_files[new_image['Hair']] or 'fluffy' in beards_files[new_image['Beards']]:
return create_new_image()
# mage staff with only mage hood
if 'staff' in weapon_files[new_image['Weapon']]:
if 'No hood#78' in mage_hoods_files[new_image['Mage_hoods']]:
return create_new_image()
# no armours ALWAYS has a tattoo
if 'no_battle_armors' in battle_armors_files[new_image['Battle_armors']]:
if 'no_tattoos' in tattoos_files[new_image['Tattoos']]:
return create_new_image()
# thunder tattoo always have thunder warpaint
if 'thunder' in war_paint_files[new_image['War_paint']]:
new_image['Tattoos'] = 'tattoos-9'
# halo, crown, daemon have only helmetless
if not 'hair' in new_image['Hair']:
if 'enclosed_armors' in new_image['Battle_armors']:
return create_new_image()
| |
"""
Created on Aug 3, 2016
@author: ramseylab
"""
import pandas
from sqlalchemy import create_engine
from sqlalchemy.engine.url import URL
from sqlalchemy.pool import NullPool
import chrom_tool as ct
import numbers
import ast
import re
from allele_tool import parse_gb_alleles_col, parse_gb_allele_freqs_col
from genome_browser_tool import bin_from_range
class GenomeBrowserClient:
# Make sure you have created such a user in MySQL
# For MySQL 5.6
# GRANT SELECT PRIVILEGES ON hg19.* To 'bud'@'localhost' IDENTIFIED BY 'earth';
# GRANT SELECT PRIVILEGES ON hgmd_pro.* To 'bud'@'localhost' IDENTIFIED BY 'earth';
# For MySQL 5.7
# CREATE USER 'bud'@'localhost' IDENTIFIED BY 'earth';
# GRANT SELECT ON hg19.* TO 'bud'@'localhost';
# GRANT SELECT ON hgmd_pro.* TO 'bud'@'localhost';
# FLUSH PRIVILEGES;
__db_url = dict(
local_hg19=dict(
drivername='mysql+pymysql',
host='localhost',
port='3306',
username='bud',
password='<PASSWORD>',
database='hg19',
query={'charset': 'utf8'}
),
remote_hg19=dict(
drivername='mysql+pymysql',
host='genome-mysql.cse.ucsc.edu',
port='3306',
username='genome',
password='',
database='hg19',
query={'charset': 'utf8'}
),
)
def __init__(self, config_key):
# db = create_engine('mysql://bud:earth@localhost:3306/hg19') # require module `MySQLdb`
# default dialect is 'mysql+mysql-python'
# `MySQLdb` is a fork of MySQL-python with added support for Python 3
# See http://docs.sqlalchemy.org/en/latest/core/engines.html#mysql
# db = create_engine('mysql+pymysql://bud:earth@localhost:3306/hg19') # require module `PyMySQL`
# For `poolclass`, see http://stackoverflow.com/a/8705750
self.db = create_engine(URL(**GenomeBrowserClient.__db_url[config_key]), poolclass=NullPool)
self.conn = self.db.connect()
# Subtraction between integer values, where one is of type UNSIGNED, produces an unsigned result by default.
# If the difference is negative, an error results because it must be unsigned.
# Coordinates are unsigned int. We'll use subtraction between coordinates to get TSS distances,
# so we must enable this mode.
self.conn.execute("SET sql_mode = 'NO_UNSIGNED_SUBTRACTION'")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.close()
self.db.dispose()
def select_tss_dist(self, chrom, chrom_start, chrom_end):
# CASE g.strand
# WHEN '+' THEN s.chromStart - g.txStart
# WHEN '-' THEN g.txEnd - s.chromStart
# # Yao edited so that 'upstream' is always a negative distance, 2016.04.20
# # WHEN '+' THEN g.txStart - s.chromStart
# # WHEN '-' THEN s.chromStart - g.txEnd
# # Steve edited so that 'upstream' is always a positive distance, 2015.10.08
# # commented out by Yao, 2016.04.20
# # WHEN '-' THEN g.txEnd - s.chromStart # Satpreet's original code 2015.08.31
query_template = '''
SELECT
CASE strand
WHEN '+' THEN {chromStart} - txStart
WHEN '-' THEN txEnd - {chromStart}
END as tssDistance
FROM
ensGene
WHERE
chrom = "{chrom}" {binCondtion}
ORDER BY
abs(tssDistance)
'''
_bin = bin_from_range(chrom_start, chrom_end)
bin_condition = "AND bin between {} and {}".format(_bin - 1, _bin + 1)
query = query_template.format(chromStart=chrom_start, chrom=chrom, binCondtion=bin_condition)
result = self.conn.execute(query).fetchall()
# If no TSS matched in nearby bins, search across all bins
# I.e. calculate distances to all TSS on the SNP's chromosome
if len(result) == 0:
query = query_template.format(chromStart=chrom_start, chrom=chrom, binCondtion="")
result = self.conn.execute(query).fetchall()
# `result` is a list of one-element tuples. Change it into a list of integers
return [r[0] for r in result]
def compute_tss_dist(self, rsid, adjacent_bins=1):
if not isinstance(adjacent_bins, numbers.Integral):
raise ValueError("Required: 'adjacent_bins' must be an int. Actually: adjacent_bins == {}".
format(adjacent_bins))
else:
if adjacent_bins > 0:
# search within a bin window
bin_condition = "g.bin between s.bin - {} and s.bin + {}".format(adjacent_bins, adjacent_bins)
elif adjacent_bins == 0:
# search within the same bin
bin_condition = "g.bin = s.bin"
elif adjacent_bins == -1:
# search across all bins
bin_condition = ""
else:
raise ValueError("Invalid 'adjacent_bins' value: " + adjacent_bins)
# if binCondition is not empty, concat conditions with AND
condition_op = " AND" if bin_condition else ""
snps = (", ".join("'" + x + "'" for x in rsid))
chroms = (", ".join("'" + x + "'" for x in ct.REGULAR_CHR))
# query = '''
# SELECT
# s.name, s.chrom, s.chromStart, s.chromEnd,
# g.name as tssGene, g.txStart, g.txEnd, g.strand,
# CASE g.strand
# WHEN '+' THEN s.chromStart - g.txStart
# WHEN '-' THEN g.txEnd - s.chromStart
# # Yao edited so that 'upstream' is always a negative distance, 2016.04.20
# # WHEN '+' THEN g.txStart - s.chromStart
# # WHEN '-' THEN s.chromStart - g.txEnd # Steve edited so that 'upstream' is always a positive distance, 2015.10.08
# # commented out by Y<NAME>, 2016.04.20
# # WHEN '-' THEN g.txEnd - s.chromStart # Satpreet's original code 2015.08.31
# END as tssDistance
# FROM
# snp146 s
# LEFT OUTER JOIN
# ensGene g
# ON
# g.bin = s.bin # Speeds up JOINs # Satpreet's original code 2015.08.31
# # g.bin between s.bin - 2 and s.bin + 2 # Yao edited so that no NA values output, 2016.08.03
# AND g.chrom = s.chrom
# WHERE
# s.name IN ( 'rs9264942', 'rs9267551', 'rs9277535', 'rs9282699' )
# ORDER BY
# name, abs(tssDistance)
# '''
query = '''
SELECT
s.name, s.chrom, s.chromStart, s.chromEnd,
g.name as tssGene, g.txStart, g.txEnd, g.strand,
CASE g.strand
WHEN '+' THEN s.chromStart - g.txStart
WHEN '-' THEN g.txEnd - s.chromStart
END as tssDistance
FROM
snp146 s
INNER JOIN
ensGene g
ON
''' + bin_condition + condition_op + '''
g.chrom = s.chrom
WHERE
s.name IN ( ''' + snps + ''')
AND s.chrom IN ( ''' + chroms + ''')
ORDER BY
name, abs(tssDistance)
'''
rows = self.conn.execute(query)
df = pandas.DataFrame(rows.fetchall())
df.columns = rows.keys()
return df
def identify_genome_seg(self, rsid):
snps = (", ".join("'" + x + "'" for x in rsid))
chroms = (", ".join("'" + x + "'" for x in ct.REGULAR_CHR))
query = '''
SELECT s.name, s.chrom, s.chromStart, s.chromEnd,
SUBSTRING_INDEX(GROUP_CONCAT(DISTINCT ch1.name), ',', 1) as ch1Name,
SUBSTRING_INDEX(GROUP_CONCAT(DISTINCT ch2.name), ',', 1) as ch2Name,
SUBSTRING_INDEX(GROUP_CONCAT(DISTINCT ch3.name), ',', 1) as ch3Name,
SUBSTRING_INDEX(GROUP_CONCAT(DISTINCT ch4.name), ',', 1) as ch4Name,
SUBSTRING_INDEX(GROUP_CONCAT(DISTINCT ch5.name), ',', 1) as ch5Name,
SUBSTRING_INDEX(GROUP_CONCAT(DISTINCT ch6.name), ',', 1) as ch6Name,
SUBSTRING_INDEX(GROUP_CONCAT(DISTINCT sw1.name), ',', 1) as sw1Name,
SUBSTRING_INDEX(GROUP_CONCAT(DISTINCT sw2.name), ',', 1) as sw2Name,
SUBSTRING_INDEX(GROUP_CONCAT(DISTINCT sw3.name), ',', 1) as sw3Name,
SUBSTRING_INDEX(GROUP_CONCAT(DISTINCT sw4.name), ',', 1) as sw4Name,
SUBSTRING_INDEX(GROUP_CONCAT(DISTINCT sw5.name), ',', 1) as sw5Name,
SUBSTRING_INDEX(GROUP_CONCAT(DISTINCT sw6.name), ',', 1) as sw6Name
FROM
snp146 s
LEFT OUTER JOIN
wgEncodeAwgSegmentationChromhmmGm12878 ch1
ON
s.bin = ch1.bin
AND s.chromStart BETWEEN ch1.chromStart AND (ch1.chromEnd - 1)
AND s.chrom = ch1.chrom
LEFT OUTER JOIN
wgEncodeAwgSegmentationChromhmmH1hesc ch2
ON
s.bin = ch2.bin
AND s.chromStart BETWEEN ch2.chromStart AND (ch2.chromEnd - 1)
AND s.chrom = ch2.chrom
LEFT OUTER JOIN
wgEncodeAwgSegmentationChromhmmHelas3 ch3
ON
s.bin = ch3.bin
AND s.chromStart BETWEEN ch3.chromStart AND (ch3.chromEnd - 1)
AND s.chrom = ch3.chrom
LEFT OUTER JOIN
wgEncodeAwgSegmentationChromhmmHepg2 ch4
ON
s.bin = ch4.bin
AND s.chromStart BETWEEN ch4.chromStart AND (ch4.chromEnd - 1)
AND s.chrom = ch4.chrom
LEFT OUTER JOIN
wgEncodeAwgSegmentationChromhmmHuvec ch5
ON
s.bin = ch5.bin
AND s.chromStart BETWEEN ch5.chromStart AND (ch5.chromEnd - 1)
AND s.chrom = ch5.chrom
LEFT OUTER JOIN
wgEncodeAwgSegmentationChromhmmK562 ch6
ON
s.bin = ch6.bin
AND s.chromStart BETWEEN ch6.chromStart AND (ch6.chromEnd - 1)
AND s.chrom = ch6.chrom
LEFT OUTER JOIN
wgEncodeAwgSegmentationSegwayGm12878 sw1
ON
s.bin = sw1.bin
AND s.chromStart BETWEEN sw1.chromStart AND (sw1.chromEnd - 1)
AND s.chrom = sw1.chrom
LEFT OUTER JOIN
wgEncodeAwgSegmentationSegwayH1hesc sw2
ON
s.bin = sw2.bin
AND s.chromStart BETWEEN sw2.chromStart AND (sw2.chromEnd - 1)
AND s.chrom = sw2.chrom
LEFT OUTER JOIN
wgEncodeAwgSegmentationSegwayHelas3 sw3
ON
s.bin = sw3.bin
AND s.chromStart BETWEEN sw3.chromStart AND (sw3.chromEnd - 1)
AND s.chrom = sw3.chrom
LEFT OUTER JOIN
wgEncodeAwgSegmentationSegwayHepg2 sw4
ON
s.bin = sw4.bin
AND s.chromStart BETWEEN sw4.chromStart AND (sw4.chromEnd - 1)
AND s.chrom = sw4.chrom
LEFT OUTER JOIN
wgEncodeAwgSegmentationSegwayHuvec sw5
ON
s.bin = sw5.bin
AND s.chromStart BETWEEN sw5.chromStart AND (sw5.chromEnd - 1)
AND s.chrom = sw5.chrom
LEFT OUTER JOIN
wgEncodeAwgSegmentationSegwayK562 sw6
ON
s.bin = sw6.bin
AND s.chromStart BETWEEN sw6.chromStart AND (sw6.chromEnd - 1)
AND s.chrom = sw6.chrom
WHERE
s.name IN ( ''' + snps + ''')
AND s.chrom IN ( ''' + chroms + ''')
GROUP BY s.name, s.chrom, s.chromStart, s.chromEnd
ORDER BY s.name
'''
rows = self.conn.execute(query)
df = pandas.DataFrame(rows.fetchall())
df.columns = rows.keys()
return df
def fetch_coord(self, rsid):
snps = ", ".join("'{}'".format(x) for x in rsid)
chroms = ", ".join("'{}'".format(x) for x in ct.REGULAR_CHR)
clazz = "'single'"
query = '''
select name, chrom, chromStart, chromEnd
from snp146
where name IN ( ''' + snps + ''') AND
chrom IN (''' + chroms + ''') AND
class = ''' + clazz
rows = self.conn.execute(query)
df = pandas.DataFrame(rows.fetchall())
df.columns = rows.keys()
return df
def fetch_alleles(self, rsid):
| |
# -*- coding: utf-8 -*-
"""This is a part of neuralflow package/EnergyModel class.
This source file contains functions for synthetic data generation."""
import numpy as np
import math
from tqdm import tqdm
from .energy_model_settings import MINIMUM_PEQ
def generate_data(self, deltaT=0.00001, time_epoch=[(0, 1)], last_event_is_spike=False):
"""Generate spike data and latent trajectories.
Parameters
----------
deltaT : float
Size of the time bin in seconds for the numerical integration of the Langevin equation. The default is 0.00001.
time_epoch : list
List of N tuples, where N is the number of trials. Each tuple consists of start time and stop time in seconds. For the case of absorbing boundary, stop
time will be the maximum allowed time for the trial to last (the trial will terminate before this time due to absorption, or at this time in an
arbitrary latent state). The default is [(0,1)].
Example: We want to generate 100 trials that start at t=0 and end at t=2, in this case time_epoch=[(0,2)]*100
last_event_is_spike : bool
If true, trial termination time will not be recorded. Otherwise, trial termination time will be recorded. The default is False.
Returns
-------
data : numpy array (N,2), dtype=np.ndarray.
Spike data packed as numpy array of the size (N,2), where each elements is a 1D array.
N is the number of trials, and for each trial the first column contains inter spike intervals (ISIs) in seconds for all neurons,
and the second column contains the corresponding neuronal IDs (trial termination, if recorded, is indicated with -1).
data[i][0] - 1D array, a sequence of ISIs of all neurons for the trial i. The last entry can be time interval between the last spike
and trial termination time.
data[i][1] - 1D array, neuronal IDs of type int64 for the trial i. The last entry is -1 if the trial termination time is recorded.
Example: neuron 0 spiked at times 0.12, 0.15, 0.25, and neuron 1 spiked at times 0.05, 0.2. Trial 0 started at t=0 and ended at t=0.28.
In this case data[0][0]=np.array([0.05,0.07,0.03,0.05,0.05,0.03]), and data[0][1]=np.array([1,0,0,1,0,-1]).
time_bins : numpy array (N,), dtype=np.ndarray
For each trial contains times at which latent trajectory was recorded. N is the number of trials,
and for each trial time is represented as 1D array of floats.
x : numpy array (N,), dtype = np.ndarray
Latent trajectories for each trial, N is the number of trials. Each entry is 1D array of floats.
metadata : dictionary
A dictionary with two entries:
last_event_is_spike : bool
Equals to the input parameter with the same name
absorption_event : list (N,)
List of strings for each trial with the following entries: 'absorbed', if the trial terminated due to trajectory absorption, or
'observation_ended' if the trial terminated due to time out in an arbitrary latent state.
"""
# By default, the boundary mode is reflecting
boundary_mode = self.boundary_mode if self.boundary_mode is not None else 'reflecting'
return self._generate_data(self.peq_, self.p0_, self.D_, self.firing_model_, self.num_neuron,
boundary_mode, deltaT, time_epoch, last_event_is_spike)
def _generate_data(self, peq, p0, D, firing_rate_model, num_neuron, boundary_mode, deltaT, time_epoch, last_event_is_spike):
"""Generates synthetic spike data and latent trajectories from a given model defined by (peq,p0,D,firing_rate_model).
Parameters
----------
peq : numpy array, dtype=float
The equilibrium probability density evaluated on SEM grid.
p0 : numpy array, dtype=float
The initial probaiblity distribution.
D : float
Noise intensity.
firing_model : list
For each neuron, this list contains the firing rate functions. Each entry is either a function that returns an array of firing rate values,
or a dictionary that specifies a model from ``firing_rate_models.py`` file.
num_neuron : int
A number of neuronal responses.
boundary_mode : ENUM("absorbing","reflecting")
Specify boundary mode that will apply the corresponding boundary condition for the latent trajectories.
deltaT : float
Size of the time bin in seconds for the integration of the Langevin equation.
time_epoch : list
List of N tuples, where N is the number of trials. Each tuple consists of start time and stop time in seconds. For the case of absorbing boundary,
stop time will be the maximum allowed time for the trial to last (the trial will terminate before this time due to absorption, or at this time in
an arbitrary latent state). The default is [(0,1)].
last_event_is_spike : bool
If true, trial termination time will not be recorded. Otherwise, trial termination time will be recorded.
Returns
-------
See generate_data function.
"""
num_trial = len(time_epoch)
if p0 is None:
p0 = peq # If p0 not provided, assume equilibirum distribution.
# generate diffusion trajectories
x, time_bins, metadata = self._generate_diffusion(peq, p0, D, boundary_mode, deltaT, time_epoch)
# initialize data arrays
rate = np.empty((num_neuron, num_trial), dtype=np.ndarray)
spikes = np.empty((num_neuron, num_trial), dtype=np.ndarray)
# generate firing rates and spikes
for iTrial in range(num_trial):
for iCell in range(num_neuron):
# Firing rate f(x(t))
rate[iCell, iTrial] = firing_rate_model[iCell](x[iTrial])
rt = rate[iCell, iTrial]
# Generate spikes from rate
spikes[iCell, iTrial] = self._generate_inhom_poisson(time_bins[iTrial][0:rt.shape[0]], rate[iCell, iTrial])
#Calculate actual time epoch with the actual end of trial times (not timeouts)
time_epoch_actual = [(time_epoch[i][0],time_bins[i][-1]+deltaT) for i in range(num_trial)]
# transform spikes to ISIs
data = self.transform_spikes_to_isi(spikes, time_epoch_actual, last_event_is_spike)
# record metadata
metadata['last_event_is_spike'] = last_event_is_spike
return data, time_bins, x, metadata
def transform_spikes_to_isi(self, spikes, time_epoch, last_event_is_spike=False):
"""Convert spike times to data array, which is a suitable format for optimization.
Parameters
----------
spikes : numpy array (num_neuron,N), dtype=np.ndarray
A sequence of spike times for each neuron on each trial. Each entry is 1D array of floats.
time_epoch : list of tuples
List of N tuples, where N is the number of trials. Each tuple consists of the trial's start time and end time in seconds.
Note that the end time should be an actual end time, but not the timeout in the case of last_event_is_spike is True.
last_event_is_spike : bool
If true, trial termination time will not be recorded. Otherwise, trial termination time will be recorded.
Returns
-------
data : numpy array (N,2),dtype=np.ndarray.
Spike data packed as numpy array of the size (N,2), where each elements is a 1D array of floats.
N is the number of trials, and for each trial the first column contains the interspike intervals (ISIs),
and the second column contains the corresponding neuronal indices.
"""
num_neuron, num_trial = spikes.shape
# initialize data array
data = np.empty((num_trial, 2), dtype=np.ndarray)
# indices of neurons that spiked
spike_ind = np.empty(num_neuron, dtype=np.ndarray)
# transform spikes to interspike intervals format
for iTrial in range(num_trial):
for iCell in range(num_neuron):
spike_ind[iCell] = iCell * np.ones(len(spikes[iCell, iTrial]), dtype=np.int)
all_spikes = np.concatenate(spikes[:, iTrial], axis=0)
all_spike_ind = np.concatenate(spike_ind[:], axis=0)
# create data array
data[iTrial, 0] = np.zeros(len(all_spikes) + (not last_event_is_spike))
if all_spikes.shape[0] == 0:
data[iTrial, 1] = np.zeros(0)
# If no spikes emitted, set to trial beginning time
last_spike_time = time_epoch[iTrial][0]
else:
# sort spike times and neuron index arrays
ind_sort = np.argsort(all_spikes)
all_spikes = all_spikes[ind_sort]
all_spike_ind = all_spike_ind[ind_sort]
data[iTrial, 0][1:len(all_spikes)] = all_spikes[1:] - all_spikes[:-1]
data[iTrial, 0][0] = all_spikes[0] - time_epoch[iTrial][0] # handle the first ISI
last_spike_time = all_spikes[-1]
if not last_event_is_spike:
data[iTrial, 0][-1] = time_epoch[iTrial][1] - last_spike_time
# assign indicies of neurons which fired, -1 to absorption event
data[iTrial, 1] = all_spike_ind if last_event_is_spike else np.concatenate((all_spike_ind, [-1]))
return data
def _generate_inhom_poisson(self, time, rate):
"""Generate spike sequence from a given rate of inhomogenious Poisson process lambda(t) and time t
Parameters
----------
time : numpy array, dtype=float
1D array of all time points
rate : numpy array, dtype=float
1D array of the corresponding firing rates
Returns
-------
spikes : np.array, dtype=float
1D array of spike times
"""
# calculate cumulative rate
deltaT = time[1:] - time[:-1]
r = np.cumsum(rate[0:-1] * deltaT)
r = np.insert(r, 0, 0)
deltaR = r[1:] - r[:-1]
# generate 1.5 as many spikes as expected on average for exponential distribution with rate 1
numX = math.ceil(1.5 * r[-1])
# generate exponential distributed spikes with the average rate 1
notEnough = True
x = np.empty(0)
xend = 0.0
while notEnough:
x = np.append(x, xend + np.cumsum(np.random.exponential(1.0, numX)))
# check that we generated enough spikes
| |
<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File for reading and setting configurations for the whole library.
I will not pretend this config file is a good way to do things.
But it is flexible, and works so far.
I make no guarantees that this will not destroy your computer.
Example config file:
``` ini
[Paths]
; Root directories, under which filings/cached tarballs will be found
; There is no % interpolation
; CACHE_FEED indicates whether the feed should be cached/searched locally
CACHE_FEED = True
; FILING_ROOT is the root of the extracted filings
FILING_ROOT=/data/bulk/data/edgar/filings/
; FEED_CACHE_ROOT is the root of the compressed daily feed files from EDGAR
FEED_CACHE_ROOT=/data/bulk/data/edgar/raw_from_edgar/compressed_daily_feeds/
; CACHE_INDEX indicates whether the index should be cached/searched locally
CACHE_INDEX = True
; INDEX_ROOT is the root of the extracted index tab-delimited files
INDEX_ROOT=/data/bulk/data/edgar/indices/
; INDEX_CACHE_ROOT is the root of the
INDEX_CACHE_ROOT=/data/bulk/data/edgar/raw_from_edgar/indices/
; FILING_PATH_FORMAT is the string to be .format-ed with the CIK and ACCESSION of the filing
; Don't put injection attacks here. That would be bad.
; Maximum length is 250 characters.
; Format string is formatted as an f-string (see docs), therefore slicing is possible.
; Available variables are:
; cik (int)
; cik_str (=f'{cik:010d}')
; accession (20 character format with dashes)
; and accession18 (18 characters of only digits with dashes removed)
; Examples:
; FILING_PATH_FORMAT={accession[11:13]}/{accession}.nc
; Would result in --> FILING_ROOT/95/0001005463-95-000003.nc
; This is useful for accession-only lookups (which is nice because multiple CIKs can file the same accession)
;
; FILING_PATH_FORMAT={cik_str[0:2]}/{cik_str[2:4]}/{cik_str[4:6]}/{cik_str[6:8]}/{cik_str[8:10]}/{accession}.txt
; Would result in --> FILING_ROOT/00/01/00/54/63/0001005463-95-000003.txt
; This uses CIK to break up filings, resulting in < 100 entries per directory. One problem is multiple CIKs
; can file the same accession, meaning you have to either copy the same accession filing to multiple dirs
;
; FILING_PATH_FORMAT={accession[:4]}/{accession[4:7]}/{accession[7:10]}/{accession[11:13]}/{accession[14:17]}/{accession[17:]}/{accession}.nc
; Would result in --> FILING_ROOT/1234/567/890/12/123/456/1234567890-12-123456.txt
; This is useful for only accession lookups (no CIKs) but also < 1000 entries per directory
;
FILING_PATH_FORMAT={accession[11:13]}/{accession}.nc
; Filename format for caching FEED compressed files from EDGAR
; String is passed .format(date=datetime object) of the date of the feed
FEED_CACHE_PATH_FORMAT={date:%Y%m%d}.nc.tar.gz
; Filename format for caching INDEX compressed files from EDGAR
; Available data are: date (datetime object), year, and quarter (both ints)
INDEX_CACHE_PATH_FORMAT=full_index_{year}_Q{quarter}.gz
[Downloader]
; Downloader specific settings
KEEP_ALL=True
KEEP_REGEX=
; User Agent for downloading, to keep the SEC happy
USER_AGENT=pyedgar feed download by <EMAIL>, from code at https://github.com/gaulinmp/pyedgar
[Index]
; Index file settings
INDEX_DELIMITER=\t
; Index file extension
; If you want to compress the index files, change INDEX_EXTENSION to tab.gz
INDEX_EXTENSION=tab.gz
```
:copyright: © 2021 by <NAME>
:license: MIT, see LICENSE for more details.
"""
# STDlib imports
import os
import re
import logging
import tempfile
import configparser
import datetime as dt
from itertools import product, starmap
# 3rd party imports
_logger = logging.getLogger(__name__)
# ██████╗ ██████╗ ███╗ ██╗███████╗██╗ ██████╗ ███████╗██╗██╗ ███████╗
# ██╔════╝██╔═══██╗████╗ ██║██╔════╝██║██╔════╝ ██╔════╝██║██║ ██╔════╝
# ██║ ██║ ██║██╔██╗ ██║█████╗ ██║██║ ███╗ █████╗ ██║██║ █████╗
# ██║ ██║ ██║██║╚██╗██║██╔══╝ ██║██║ ██║ ██╔══╝ ██║██║ ██╔══╝
# ╚██████╗╚██████╔╝██║ ╚████║██║ ██║╚██████╔╝ ██║ ██║███████╗███████╗
# ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝╚══════╝╚══════╝
PREFERRED_CONFIG_DIRECTORIES = [
os.path.abspath(os.curdir),
os.path.expanduser("~/.config/pyedgar"),
os.path.expanduser("~/AppData/Local/pyedgar"),
os.path.expanduser("~/AppData/Roaming/pyedgar"),
os.path.expanduser("~/Library/Preferences/pyedgar"),
os.path.expanduser("~/.config"),
os.path.expanduser("~"),
os.path.expanduser("~/Documents"),
]
def get_config_file(extra_dirs=None):
"""Searches the expected config paths and names, returning the first found.
First, try and load directory location from environmental variable `PYEDGAR_CONF`
if it points to a specific file.
Directories searched, in order:
#. `os.environ['PYEDGAR_CONF']` (could be directory path as well)
#. `.` (current dir)
#. `~/.config/pyedgar/`
#. `~/AppData/Local/pyedgar/`
#. `~/AppData/Roaming/pyedgar/`
#. `~/Library/Preferences/pyedgar/`
#. `~/.config/`
#. `~/`
#. `~/Documents/`
For each of those directories, the following file names are looked for:
#. `pyedgar.conf`
#. `.pyedgar`
#. `pyedgar.ini`
The first one of these combinations that is found is read and returned.
"""
try:
with open(os.environ["PYEDGAR_CONF"], "r") as fh:
config_txt = fh.read()
if config_txt:
return os.environ["PYEDGAR_CONF"]
except (KeyError, FileNotFoundError):
pass
_names = ["pyedgar.conf", ".pyedgar", "pyedgar.ini"]
_dirs = [
os.environ.get("PYEDGAR_CONF", "."), # env variable might point to dir not file
*PREFERRED_CONFIG_DIRECTORIES,
]
if extra_dirs:
if isinstance(extra_dirs, str):
_dirs.append(extra_dirs)
else:
_dirs.extend(extra_dirs)
config_txt = None
for fpath in starmap(os.path.join, product(_dirs, _names)):
try:
with open(fpath, "r") as fh:
config_txt = fh.read()
if config_txt:
return fpath
except (IOError, FileNotFoundError):
pass
# getting here means we didn't find the file
return None
_tmp_dir = os.path.join(tempfile.gettempdir(), "pyedgar")
_defaults = {
"FILING_ROOT": os.path.join(_tmp_dir, "filings"),
"FEED_CACHE_ROOT": os.path.join(_tmp_dir, "compressed_daily_feeds"),
"CACHE_FEED": "False",
"INDEX_ROOT": os.path.join(_tmp_dir, "indices"),
"INDEX_CACHE_ROOT": os.path.join(_tmp_dir, "indices"),
"CACHE_INDEX": "False",
"FILING_PATH_FORMAT": "{accession[11:13]}/{accession}.nc",
"FEED_CACHE_PATH_FORMAT": "sec_daily_{date:%Y-%m-%d}.tar.gz",
"INDEX_CACHE_PATH_FORMAT": "full_index_{year}_Q{quarter}.gz",
"KEEP_ALL": "True",
"KEEP_REGEX": "",
"INDEX_DELIMITER": "\t",
"INDEX_EXTENSION": "tab",
"USER_AGENT": "pyedgar feed download by <EMAIL>, from code at https://github.com/gaulinmp/pyedgar",
}
CONFIG_FILE = get_config_file()
_logger.info("Config file to be loaded: %r", CONFIG_FILE)
CONFIG_OBJECT = configparser.ConfigParser(interpolation=None, defaults=_defaults)
try:
CONFIG_OBJECT.read(CONFIG_FILE)
_logger.info(
"Loaded config file from %r. \n\n" "ALERT!!!! FILING_PATH_FORMAT is %r.\n",
CONFIG_FILE,
CONFIG_OBJECT.get("Paths", "FILING_PATH_FORMAT", fallback=None),
)
except TypeError:
# Type error means that we tried to read from None file
_logger.info("Error reading config file: %r", CONFIG_FILE)
# Come on python... how does a nonexistent section not drop through to DEFAULT?!
for sec in ("Paths", "Downloader", "Index"):
CONFIG_OBJECT.add_section(sec)
except Exception:
_logger.exception("Error reading config file: %r", CONFIG_FILE)
raise
# ██████╗ ██████╗ ███╗ ██╗███████╗████████╗ █████╗ ███╗ ██╗████████╗███████╗
# ██╔════╝██╔═══██╗████╗ ██║██╔════╝╚══██╔══╝██╔══██╗████╗ ██║╚══██╔══╝██╔════╝
# ██║ ██║ ██║██╔██╗ ██║███████╗ ██║ ███████║██╔██╗ ██║ ██║ ███████╗
# ██║ ██║ ██║██║╚██╗██║╚════██║ ██║ ██╔══██║██║╚██╗██║ ██║ ╚════██║
# ╚██████╗╚██████╔╝██║ ╚████║███████║ ██║ ██║ ██║██║ ╚████║ ██║ ███████║
# ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚══════╝ ╚═╝ ╚═╝ ╚═╝╚═╝ ╚═══╝ ╚═╝ ╚══════╝
# Paths to filings and indices
FILING_ROOT = CONFIG_OBJECT.get("Paths", "FILING_ROOT")
FEED_CACHE_ROOT = CONFIG_OBJECT.get("Paths", "FEED_CACHE_ROOT")
INDEX_ROOT = CONFIG_OBJECT.get("Paths", "INDEX_ROOT")
INDEX_CACHE_ROOT = CONFIG_OBJECT.get("Paths", "INDEX_CACHE_ROOT")
# expand user dir if present
if '~' in FILING_ROOT:
FILING_ROOT = os.path.expanduser(FILING_ROOT)
if '~' in FEED_CACHE_ROOT:
FEED_CACHE_ROOT = os.path.expanduser(FEED_CACHE_ROOT)
if '~' in INDEX_ROOT:
INDEX_ROOT = os.path.expanduser(INDEX_ROOT)
if '~' in INDEX_CACHE_ROOT:
INDEX_CACHE_ROOT = os.path.expanduser(INDEX_CACHE_ROOT)
# Path format
FILING_PATH_FORMAT = CONFIG_OBJECT.get("Paths", "FILING_PATH_FORMAT")
FEED_CACHE_PATH_FORMAT = CONFIG_OBJECT.get("Paths", "FEED_CACHE_PATH_FORMAT")
INDEX_CACHE_PATH_FORMAT = CONFIG_OBJECT.get("Paths", "INDEX_CACHE_PATH_FORMAT")
# Filings cache settings
CACHE_FEED = CONFIG_OBJECT.getboolean("Paths", "CACHE_FEED")
KEEP_ALL = CONFIG_OBJECT.getboolean("Downloader", "KEEP_ALL")
KEEP_REGEX = CONFIG_OBJECT.get("Downloader", "KEEP_REGEX")
USER_AGENT = CONFIG_OBJECT.get("Downloader", "USER_AGENT")
# Index cache settings
CACHE_INDEX = CONFIG_OBJECT.getboolean("Paths", "CACHE_INDEX")
INDEX_DELIMITER = CONFIG_OBJECT.get("Index", "INDEX_DELIMITER")
INDEX_EXTENSION = CONFIG_OBJECT.get("Index", "INDEX_EXTENSION").lstrip('.')
if INDEX_DELIMITER.lower() in ("\t", "\\t", "tab", "\\\t", "\\\\t"):
INDEX_DELIMITER = "\t"
# ██████╗ █████╗ ████████╗██╗ ██╗███████╗
# ██╔══██╗██╔══██╗╚══██╔══╝██║ ██║██╔════╝
# ██████╔╝███████║ ██║ ███████║███████╗
# ██╔═══╝ ██╔══██║ ██║ ██╔══██║╚════██║
# ██║ ██║ ██║ ██║ ██║ ██║███████║
# ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝╚══════╝
def format_filing_path(**kwargs):
"""
"If at first you don\'t succeed, don\'t try skydiving."
That makes about as much sense as a docstring as the workaround below.
It\'s... horrifying what I\'ve done here.
So lemme splain the what and why.
We want a conf file that is useful for paths and whatever format you want to keep your files in on your local copy of EDGAR.
So to facilitate this, let\'s allow an f-string type FILING_PATH_FORMAT input.
So how to evaluate it? We can\'t use ``FILING_PATH_FORMAT.format(**kwargs)`` because then you can\'t do cik[:5] for sub-folders.
Well here comes eval to the rescue! I know, eval-ing user-provided strings is TERRIBLE practice.
Really TERRIBLE.
Whatever. It works. I limit it to 250 characters anyway, so your injection has to be shorter than that.
I'm pretty sure rm -rf / is longer than 250 characters.
LEGAL DISCLAIMER: Know where you config file is loading from. This is most definitely an attack vector.
Anyway, back to the code. The trick here is that I don\'t know what format the input passed in will be.
So I just take all the input (all kwargs) and load it into local.
Turns out this only works because of the eval statement?
(See: https://stackoverflow.com/a/8028785/1959876)
Cool. So we put everything you pass in into the locals, to be accessed by the dynamically eval-ed f-string.
Last bit is for some sanity. I force the existence of the following variables:
cik: in int form
cik_str: in string form, =``f'{cik:010d}'``
accession: 20 character format with dashes
accession18: 18 characters of only digits with dashes removed
So you can safely define the format string using these for variables, and if you're feeling adventuresome, you can add your custom variable to the format string, then pass it into this method and it should work.
Should. Probably won't. Just stick to CIK/Accession.
Lastly... sorry for all this.
"""
# Get that thing from above.
global FILING_PATH_FORMAT
# If only someone who knew how to program would write this instead of me.
locals().update(kwargs)
try:
cik = int(kwargs.get("cik", 0))
cik_str = f"{cik:010d}"
except (ValueError, TypeError):
cik = 0
cik_str = 10 * "0"
accession = kwargs.get("accession", "9090909090-90-909090")
accession18 = accession.replace("-", "")
return eval(f"""f'{FILING_PATH_FORMAT[:250]}'""")
def format_feed_cache_path(datetime_in):
"""
Formats feed cache path on a given date (from date-time input).
"""
# Get that thing from above.
global FEED_CACHE_PATH_FORMAT
if isinstance(datetime_in, int):
datetime_in = dt.date.fromordinal(datetime_in)
| |
0xA5,
0xB8,
0xAB,
0xDC,
0x3A,
0xAE,
0xB6,
],
"gEfiPlatformMemory2ErrorSectionGuid": [
0x61EC04FC,
0x48E6,
0xD813,
0x25,
0xC9,
0x8D,
0xAA,
0x44,
0x75,
0x0B,
0x12,
],
"gEfiBlockIoCryptoAlgoAesXtsGuid": [
0x2F87BA6A,
0x5C04,
0x4385,
0xA7,
0x80,
0xF3,
0xBF,
0x78,
0xA9,
0x7B,
0xEC,
],
"gEfiBlockIoCryptoAlgoAesCbcMsBitlockerGuid": [
0x689E4C62,
0x70BF,
0x4CF3,
0x88,
0xBB,
0x33,
0xB3,
0x18,
0x26,
0x86,
0x70,
],
"gEfiPaddingRsassaPkcs1V1P5Guid": [
0x9317EC24,
0x7CB0,
0x4D0E,
0x8B,
0x32,
0x2E,
0xD9,
0x20,
0x9C,
0xD8,
0xAF,
],
"gEfiPaddingRsassaPssGuid": [
0x7B2349E0,
0x522D,
0x4F8E,
0xB9,
0x27,
0x69,
0xD9,
0x7C,
0x9E,
0x79,
0x5F,
],
"gEfiPaddingNoneGuid": [
0x3629DDB1,
0x228C,
0x452E,
0xB6,
0x16,
0x09,
0xED,
0x31,
0x6A,
0x97,
0x00,
],
"gEfiPaddingRsaesPkcs1V1P5Guid": [
0xE1C1D0A9,
0x40B1,
0x4632,
0xBD,
0xCC,
0xD9,
0xD6,
0xE5,
0x29,
0x56,
0x31,
],
"gEfiPaddingRsaesOaepGuid": [
0xC1E63AC4,
0xD0CF,
0x4CE6,
0x83,
0x5B,
0xEE,
0xD0,
0xE6,
0xA8,
0xA4,
0x5B,
],
"gEfiSmbios3TableGuid": [
0xF2FD1544,
0x9794,
0x4A2C,
0x99,
0x2E,
0xE5,
0xBB,
0xCF,
0x20,
0xE3,
0x94,
],
"gEfiBootManagerPolicyConsoleGuid": [
0xCAB0E94C,
0xE15F,
0x11E3,
0x91,
0x8D,
0xB8,
0xE8,
0x56,
0x2C,
0xBA,
0xFA,
],
"gEfiBootManagerPolicyNetworkGuid": [
0xD04159DC,
0xE15F,
0x11E3,
0xB2,
0x61,
0xB8,
0xE8,
0x56,
0x2C,
0xBA,
0xFA,
],
"gEfiBootManagerPolicyConnectAllGuid": [
0x113B2126,
0xFC8A,
0x11E3,
0xBD,
0x6C,
0xB8,
0xE8,
0x56,
0x2C,
0xBA,
0xFA,
],
"gEfiVirtualDiskGuid": [
0x77AB535A,
0x45FC,
0x624B,
0x55,
0x60,
0xF7,
0xB2,
0x81,
0xD1,
0xF9,
0x6E,
],
"gEfiVirtualCdGuid": [
0x3D5ABD30,
0x4175,
0x87CE,
0x6D,
0x64,
0xD2,
0xAD,
0xE5,
0x23,
0xC4,
0xBB,
],
"gEfiPersistentVirtualDiskGuid": [
0x5CEA02C9,
0x4D07,
0x69D3,
0x26,
0x9F,
0x44,
0x96,
0xFB,
0xE0,
0x96,
0xF9,
],
"gEfiPersistentVirtualCdGuid": [
0x08018188,
0x42CD,
0xBB48,
0x10,
0x0F,
0x53,
0x87,
0xD5,
0x3D,
0xED,
0x3D,
],
"gEfiMemoryAttributesTableGuid": [
0xDCFA911D,
0x26EB,
0x469F,
0xA2,
0x20,
0x38,
0xB7,
0xDC,
0x46,
0x12,
0x20,
],
"gEfiArmProcessorErrorSectionGuid": [
0xE19E3D16,
0xBC11,
0x11E4,
0x9C,
0xAA,
0xC2,
0x05,
0x1D,
0x5D,
0x46,
0xB0,
],
"gEfiHiiImageDecoderNameJpegGuid": [
0xEFEFD093,
0x0D9B,
0x46EB,
0xA8,
0x56,
0x48,
0x35,
0x07,
0x00,
0xC9,
0x08,
],
"gEfiHiiImageDecoderNamePngGuid": [
0xAF060190,
0x5E3A,
0x4025,
0xAF,
0xBD,
0xE1,
0xF9,
0x05,
0xBF,
0xAA,
0x4C,
],
"gEfiBttAbstractionGuid": [
0x18633BFC,
0x1735,
0x4217,
0x8A,
0xC9,
0x17,
0x23,
0x92,
0x82,
0xD3,
0xF8,
],
"gPeiAprioriFileNameGuid": [
0x1B45CC0A,
0x156A,
0x428A,
0xAF,
0x62,
0x49,
0x86,
0x4D,
0xA0,
0xE6,
0xE6,
],
"gAprioriGuid": [
0xFC510EE7,
0xFFDC,
0x11D4,
0xBD,
0x41,
0x00,
0x80,
0xC7,
0x3C,
0x88,
0x81,
],
"gEfiFirmwareFileSystem2Guid": [
0x8C8CE578,
0x8A3D,
0x4F1C,
0x99,
0x35,
0x89,
0x61,
0x85,
0xC3,
0x2D,
0xD3,
],
"gEfiFirmwareVolumeTopFileGuid": [
0x1BA0062E,
0xC779,
0x4582,
0x85,
0x66,
0x33,
0x6A,
0xE8,
0xF7,
0x8F,
0x09,
],
"gEfiHobMemoryAllocModuleGuid": [
0xF8E21975,
0x0899,
0x4F58,
0xA4,
0xBE,
0x55,
0x25,
0xA9,
0xC6,
0xD7,
0x7A,
],
"gEfiHobMemoryAllocStackGuid": [
0x4ED4BF27,
0x4092,
0x42E9,
0x80,
0x7D,
0x52,
0x7B,
0x1D,
0x00,
0xC9,
0xBD,
],
"gEfiHobMemoryAllocBspStoreGuid": [
0x564B33CD,
0xC92A,
0x4593,
0x90,
0xBF,
0x24,
0x73,
0xE4,
0x3C,
0x63,
0x22,
],
"gEfiEventLegacyBootGuid": [
0x2A571201,
0x4966,
0x47F6,
0x8B,
0x86,
0xF3,
0x1E,
0x41,
0xF3,
0x2F,
0x10,
],
"gEfiHobListGuid": [
0x7739F24C,
0x93D7,
0x11D4,
0x9A,
0x3A,
0x00,
0x90,
0x27,
0x3F,
0xC1,
0x4D,
],
"gEfiDxeServicesTableGuid": [
0x05AD34BA,
0x6F02,
0x4214,
0x95,
0x2E,
0x4D,
0xA0,
0x39,
0x8E,
0x2B,
0xB9,
],
"gEfiMdePkgTokenSpaceGuid": [
0x914AEBE7,
0x4635,
0x459B,
0xAA,
0x1C,
0x11,
0xE2,
0x19,
0xB0,
0x3A,
0x10,
],
"gEfiHardwareErrorVariableGuid": [
0x414E6BDD,
0xE47B,
0x47CC,
0xB2,
0x44,
0xBB,
0x61,
0x02,
0x0C,
0xF5,
0x16,
],
"gEfiEventDxeDispatchGuid": [
0x7081E22F,
0xCAC6,
0x4053,
0x94,
0x68,
0x67,
0x57,
0x82,
0xCF,
0x88,
0xE5,
],
"gEfiDiskInfoIdeInterfaceGuid": [
0x5E948FE3,
0x26D3,
0x42B5,
0xAF,
0x17,
0x61,
0x02,
0x87,
0x18,
0x8D,
0xEC,
],
"gEfiDiskInfoScsiInterfaceGuid": [
0x08F74BAA,
0xEA36,
0x41D9,
0x95,
0x21,
0x21,
0xA7,
0x0F,
0x87,
0x80,
0xBC,
],
"gEfiDiskInfoUsbInterfaceGuid": [
0xCB871572,
0xC11A,
0x47B5,
0xB4,
0x92,
0x67,
0x5E,
0xAF,
0xA7,
0x77,
0x27,
],
"gEfiDiskInfoAhciInterfaceGuid": [
0x9E498932,
0x4ABC,
0x45AF,
0xA3,
0x4D,
0x02,
0x47,
0x78,
0x7B,
0xE7,
0xC6,
],
"gEfiStatusCodeDataTypeStringGuid": [
0x92D11080,
0x496F,
0x4D95,
0xBE,
0x7E,
0x03,
0x74,
0x88,
0x38,
0x2B,
0x0A,
],
"gEfiStatusCodeSpecificDataGuid": [
0x335984BD,
0xE805,
0x409A,
0xB8,
0xF8,
0xD2,
0x7E,
0xCE,
0x5F,
0xF7,
0xA6,
],
"gEfiFirmwareFileSystem3Guid": [
0x5473C07A,
0x3DCB,
0x4DCA,
0xBD,
0x6F,
0x1E,
0x96,
0x89,
0xE7,
0x34,
0x9A,
],
"gEfiEndOfDxeEventGroupGuid": [
0x2CE967A,
0xDD7E,
0x4FFC,
0x9E,
0xE7,
0x81,
0xC,
0xF0,
0x47,
0x8,
0x80,
],
"gEfiFirmwareContentsSignedGuid": [
0xF9D89E8,
0x9259,
0x4F76,
0xA5,
0xAF,
0xC,
0x89,
0xE3,
0x40,
0x23,
0xDF,
],
"gEfiVectorHandoffTableGuid": [
0x996EC11C,
0x5397,
0x4E73,
0xB5,
0x8F,
0x82,
0x7E,
0x52,
0x90,
0x6D,
0xEF,
],
"gAdapterInfoPlatformSecurityGuid": [
0x6BE272C7,
0x1320,
0x4CCD,
0x90,
0x17,
0xD4,
0x61,
0x2C,
0x01,
0x2B,
0x25,
],
"gEfiDiskInfoNvmeInterfaceGuid": [
0x3AB14680,
0x5D3F,
0x4A4D,
0xBC,
0xDC,
0xCC,
0x38,
0x0,
0x18,
0xC7,
0xF7,
],
"gEfiGraphicsInfoHobGuid": [
0x39F62CCE,
0x6825,
0x4669,
0xBB,
0x56,
0x54,
0x1A,
0xBA,
0x75,
0x3A,
0x07,
],
"gEfiDiskInfoUfsInterfaceGuid": [
0x4B3029CC,
0x6B98,
0x47FB,
0xBC,
0x96,
0x76,
0xDC,
0xB8,
0x4,
0x41,
0xF0,
],
"gEfiGraphicsDeviceInfoHobGuid": [
0xE5CB2AC9,
0xD35D,
0x4430,
0x93,
0x6E,
0x1D,
0xE3,
0x32,
0x47,
0x8D,
0xE7,
],
"gEfiSmmSmramMemoryGuid": [
0x6DADF1D1,
0xD4CC,
0x4910,
0xBB,
0x6E,
0x82,
0xB1,
0xFD,
0x80,
0xFF,
0x3D,
],
"gEfiDiskInfoSdMmcInterfaceGuid": [
0x8DEEC992,
0xD39C,
0x4A5C,
0xAB,
0x6B,
0x98,
0x6E,
0x14,
0x24,
0x2B,
0x9D,
],
"gWindowsUxCapsuleGuid": [
0x3B8C8162,
0x188C,
0x46A4,
0xAE,
0xC9,
0xBE,
0x43,
0xF1,
0xD6,
0x56,
0x97,
],
"gTianoCustomDecompressGuid": [
0xA31280AD,
0x481E,
0x41B6,
0x95,
0xE8,
0x12,
0x7F,
0x4C,
0x98,
0x47,
0x79,
],
"gEfiIa32X64ErrorTypeCacheCheckGuid": [
0xA55701F5,
0xE3EF,
0x43DE,
0xAC,
0x72,
0x24,
0x9B,
0x57,
0x3F,
0xAD,
0x2C,
],
"gEfiIa32X64ErrorTypeTlbCheckGuid": [
0xFC06B535,
0x5E1F,
0x4562,
0x9F,
0x25,
0x0A,
0x3B,
0x9A,
0xDB,
0x63,
0xC3,
],
"gEfiIa32X64ErrorTypeBusCheckGuid": [
0x1CF3F8B3,
0xC5B1,
0x49A2,
0xAA,
0x59,
0x5E,
0xEF,
0x92,
0xFF,
0xA6,
0x3C,
],
"gEfiIa32X64ErrorTypeMsCheckGuid": [
0x48AB7F57,
0xDC34,
0x4F6C,
0xA7,
0xD3,
0xB0,
0xB5,
0xB0,
0xA7,
0x43,
0x14,
],
"gEfiPeiMasterBootModePpiGuid": [
0x7408D748,
0xFC8C,
0x4EE6,
0x92,
0x88,
0xC4,
0xBE,
0xC0,
0x92,
0xA4,
0x10,
],
"gEfiDxeIplPpiGuid": [
0xAE8CE5D,
0xE448,
0x4437,
0xA8,
0xD7,
0xEB,
0xF5,
0xF1,
0x94,
0xF7,
0x31,
],
"gEfiPeiMemoryDiscoveredPpiGuid": [
0xF894643D,
0xC449,
0x42D1,
0x8E,
0xA8,
0x85,
0xBD,
0xD8,
0xC6,
0x5B,
0xDE,
],
"gEfiPeiBootInRecoveryModePpiGuid": [
0x17EE496A,
0xD8E4,
0x4B9A,
0x94,
0xD1,
0xCE,
0x82,
0x72,
0x30,
0x8,
0x50,
],
"gEfiEndOfPeiSignalPpiGuid": [
0x605EA650,
0xC65C,
0x42E1,
0xBA,
0x80,
0x91,
0xA5,
0x2A,
0xB6,
0x18,
0xC6,
],
"gEfiPeiResetPpiGuid": [
0xEF398D58,
0x9DFD,
0x4103,
0xBF,
0x94,
0x78,
0xC6,
0xF4,
0xFE,
0x71,
0x2F,
],
"gEfiPeiStatusCodePpiGuid": [
0x229832D3,
0x7A30,
0x4B36,
0xB8,
0x27,
0xF4,
0xC,
0xB7,
0xD4,
0x54,
0x36,
],
"gEfiPeiSecurity2PpiGuid": [
0xDCD0BE23,
0x9586,
0x40F4,
0xB6,
0x43,
0x6,
0x52,
0x2C,
0xED,
0x4E,
0xDE,
],
"gEfiTemporaryRamSupportPpiGuid": [
0xDBE23AA9,
0xA345,
0x4B97,
0x85,
0xB6,
0xB2,
0x26,
0xF1,
0x61,
0x73,
0x89,
],
"gEfiPeiCpuIoPpiInstalledGuid": [
0xE6AF1F7B,
0xFC3F,
0x46DA,
0xA8,
0x28,
0xA3,
0xB4,
0x57,
0xA4,
0x42,
0x82,
],
"gEfiPciCfg2PpiGuid": [
0x57A449A,
0x1FDC,
0x4C06,
0xBF,
0xC9,
0xF5,
0x3F,
0x6A,
0x99,
0xBB,
0x92,
],
"gEfiPeiStallPpiGuid": [
0x1F4C6F90,
0xB06B,
0x48D8,
0xA2,
0x01,
0xBA,
0xE5,
0xF1,
0xCD,
0x7D,
0x56,
],
"gEfiPeiReadOnlyVariable2PpiGuid": [
0x2AB86EF5,
0xECB5,
0x4134,
0xB5,
0x56,
0x38,
0x54,
0xCA,
0x1F,
0xE1,
0xB4,
],
"gEfiSecPlatformInformationPpiGuid": [
0x6F8C2B35,
0xFEF4,
0x448D,
0x82,
0x56,
0xE1,
0x1B,
0x19,
0xD6,
0x10,
0x77,
],
"gEfiPeiLoadedImagePpiGuid": [
0xC1FCD448,
0x6300,
0x4458,
0xB8,
0x64,
0x28,
0xDF,
0x1,
0x53,
0x64,
0xBC,
],
"gEfiPeiSmbus2PpiGuid": [
0x9CA93627,
0xB65B,
0x4324,
0xA2,
0x2,
0xC0,
0xB4,
0x61,
0x76,
0x45,
0x43,
],
"gEfiPeiFirmwareVolumeInfoPpiGuid": [
0x49EDB1C1,
0xBF21,
0x4761,
0xBB,
0x12,
0xEB,
0x0,
0x31,
0xAA,
0xBB,
0x39,
],
"gEfiPeiLoadFilePpiGuid": [
0xB9E0ABFE,
0x5979,
0x4914,
0x97,
0x7F,
0x6D,
0xEE,
0x78,
0xC2,
0x78,
0xA6,
],
"gEfiPeiDecompressPpiGuid": [
0x1A36E4E7,
0xFAB6,
0x476A,
0x8E,
0x75,
0x69,
0x5A,
0x5,
0x76,
0xFD,
0xD7,
],
"gPcdPpiGuid": [
0x6E81C58,
0x4AD7,
0x44BC,
0x83,
0x90,
0xF1,
0x2,
0x65,
0xF7,
0x24,
0x80,
],
"gGetPcdInfoPpiGuid": [
0x4D8B155B,
0xC059,
0x4C8F,
0x89,
0x26,
0x6,
0xFD,
0x43,
0x31,
0xDB,
0x8A,
],
"gEfiPeiRecoveryModulePpiGuid": [
0xFB6D9542,
0x612D,
0x4F45,
0x87,
0x2F,
0x5C,
0xFF,
0x52,
0xE9,
0x3D,
0xCF,
],
"gEfiPeiDeviceRecoveryModulePpiGuid": [
0x0DE2CE25,
0x446A,
0x45A7,
0xBF,
0xC9,
0x37,
0xDA,
0x26,
0x34,
0x4B,
0x37,
],
"gEfiPeiVirtualBlockIoPpiGuid": [
0x695D8AA1,
0x42EE,
0x4C46,
0x80,
0x5C,
0x6E,
0xA6,
0xBC,
0xE7,
0x99,
0xE3,
],
"gEfiPeiS3Resume2PpiGuid": [
0x6D582DBC,
0xDB85,
0x4514,
0x8F,
0xCC,
0x5A,
0xDF,
0x62,
0x27,
0xB1,
0x47,
],
"gEfiPeiRscHandlerPpiGuid": [
0x65D394,
0x9951,
0x4144,
0x82,
0xA3,
0xA,
0xFC,
0x85,
0x79,
0xC2,
0x51,
],
"gEfiPeiPcdPpiGuid": [
0x1F34D25,
0x4DE2,
0x23AD,
0x3F,
0xF3,
0x36,
0x35,
0x3F,
0xF3,
0x23,
0xF1,
],
"gEfiGetPcdInfoPpiGuid": [
0xA60C6B59,
0xE459,
0x425D,
0x9C,
0x69,
0xB,
0xCC,
0x9C,
0xB2,
0x7D,
0x81,
],
"gEfiTemporaryRamDonePpiGuid": [
0xCEAB683C,
0xEC56,
0x4A2D,
0xA9,
0x6,
0x40,
0x53,
0xFA,
0x4E,
0x9C,
0x16,
],
"gEfiVectorHandoffInfoPpiGuid": [
0x3CD652B4,
0x6D33,
0x4DCE,
0x89,
0xDB,
0x83,
0xDF,
0x97,
0x66,
0xFC,
0xCA,
],
"gEfiIsaHcPpiGuid": [
0x8D48BD70,
0xC8A3,
0x4C06,
0x90,
0x1B,
0x74,
0x79,
0x46,
0xAA,
0xC3,
0x58,
],
"gEfiSioPpiGuid": [
0x23A464AD,
0xCB83,
0x48B8,
0x94,
0xAB,
0x1A,
0x6F,
0xEF,
0xCF,
0xE5,
0x22,
],
"gEfiPeiI2cMasterPpiGuid": [
0xB3BFAB9B,
0x9F9C,
0x4E8B,
0xAD,
0x37,
0x7F,
0x8C,
0x51,
0xFC,
0x62,
0x80,
],
"gEfiPeiFirmwareVolumeInfo2PpiGuid": [
0xEA7CA24B,
0xDED5,
0x4DAD,
0xA3,
0x89,
0xBF,
0x82,
0x7E,
0x8F,
0x9B,
| |
test_repos_esercizi_di_programmazione_javascript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'esercizi-di-programmazione-javascript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_esformatter_jsx():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'esformatter-jsx')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_eslint():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'eslint')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_eslint_config_defaults():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'eslint-config-defaults')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_eslint_formatter_pretty():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'eslint-formatter-pretty')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_esperanto():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'esperanto')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_espree():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'espree')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_essage():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'essage')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_essential_javascript_links():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'essential-javascript-links')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_essential_js_design_patterns():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'essential-js-design-patterns')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_evee_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'evee.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_evernote_sdk_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'evernote-sdk-js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_ews_javascript_api():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'ews-javascript-api')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_ex_navigator():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'ex-navigator')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_example_backbone_app():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'example-backbone-app')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_example_node():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'example-node')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_excel_builder_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'excel-builder.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_excellentexport():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'excellentexport')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_execjs():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'execjs')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_exercises():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'exercises')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_exokit():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'exokit')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_exoskeleton():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'exoskeleton')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_express():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'express')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_express_angular():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'express-angular')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_express_di():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'express-di')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_express_happiness():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'express-happiness')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_express_partials():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'express-partials')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_express_train():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'express-train')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_exterminate():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'exterminate')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_eyeballs_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'eyeballs.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_f8app():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'f8app')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fabric_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fabric.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_facebook_circles():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'facebook-circles')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_facebook_js_sdk():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'facebook-js-sdk')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_faced():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'faced')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fairy():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fairy')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_faker_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'faker.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_falcor():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'falcor')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_falkor_archived():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'falkor-archived')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fancy_zoom():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fancy-zoom')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fann_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fann.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fantasy_land():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fantasy-land')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fastify():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fastify')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fbt():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fbt')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fe_javascript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fe.javascript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_feather():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'feather')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_feature_engineering_book():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'feature-engineering-book')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_feelingrestful_theme():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'feelingrestful-theme')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fela():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fela')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_felt():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'felt')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fetch():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fetch')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fhir_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fhir.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fibjs():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fibjs')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fieldval_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fieldval-js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_filepond():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'filepond')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fileupload():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fileupload')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_finitio():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'finitio')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_firebase_angular_starter_pack():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'firebase-angular-starter-pack')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fireloop_io():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fireloop.io')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_firequery():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'firequery')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fireunit():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fireunit')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fireworks_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fireworks.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fishbone_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fishbone.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fiveby():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fiveby')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fixto():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fixto')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flaskr_tdd():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flaskr-tdd')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flatpickr():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flatpickr')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flexibility():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flexibility')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flight():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flight')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flipcountdown():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flipcountdown')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flipload():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flipload')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flot():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flot')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flotsam():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flotsam')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flow_jsdoc():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flow-jsdoc')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flowable_engine():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flowable-engine')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flowy():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flowy')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flux():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flux')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_flux_router_component():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'flux-router-component')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fmt_obj():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fmt-obj')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fn_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fn.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_font_awesome_webpack():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'font-awesome-webpack')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_forest():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'forest')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_formacao_javascript_mestre_jedi():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'formacao-javascript-mestre-jedi')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_formaline():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'formaline')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_formhub():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'formhub')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_formio_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'formio.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_formspree():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'formspree')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fourk_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fourk.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_foxjs():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'foxjs')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_frame_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'frame.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_framer_sketch_boilerplate():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'framer-sketch-boilerplate')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_framer_templates():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'framer-templates')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_framework():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'framework')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_framework7():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'framework7')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_framework7_react_base():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'framework7-react-base')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_freeCodeCamp():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'freeCodeCamp')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_front_end_interview_handbook():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'front-end-interview-handbook')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_front_end_separate():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'front-end-separate')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_front_ui():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'front-ui')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_frozen():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'frozen')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_frpjs():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'frpjs')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fruitmachine():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fruitmachine')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fseditor():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fseditor')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fuckitjs():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fuckitjs')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fullPage_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fullPage.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fullproof():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fullproof')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fullstack():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fullstack')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fullstack_javascript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fullstack-javascript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fullstack_javascript_architecture():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fullstack-javascript-architecture')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_functional_javascript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'functional-javascript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_functional_javascript_workshop():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'functional-javascript-workshop')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_functional_programming_javascript():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'functional-programming-javascript')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_fuzzilli():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'fuzzilli')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_galleria():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'galleria')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_gamblers_dice():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'gamblers-dice')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_gameQuery():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'gameQuery')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_ganache_cli():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'ganache-cli')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_ganon():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'ganon')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_gantt():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'gantt')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_gatsby():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'gatsby')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_gauge_js():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'gauge.js')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_geierlein():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'geierlein')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_generator_angular_go_martini():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'generator-angular-go-martini')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_generator_angulpify():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'generator-angulpify')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_generator_jhipster():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'generator-jhipster')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_generator_jhipster_react():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'generator-jhipster-react')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_generator_jquery_boilerplate():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'generator-jquery-boilerplate')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def test_repos_generator_phaser():
# path_name = os.path.join(constants.seeds_dir, 'repos', 'generator-phaser')
# multicall.multicall_directories(path_name, fuzzer='quickfuzz', validator=validate)
# def | |
<filename>bombman.py
#!/usr/bin/env python
# coding=utf-8
#
# Bombman - free and open-source Bomberman clone
#
# Copyright (C) 2016 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ========================== A FEW COMMENTS ===========================
#
# Version numbering system:
#
# major.minor
#
# Major number increases with significant new features added (multiplayer, ...),
# minor number increases with small changes (bug fixes, AI improvements, ...) and
# it does so in this way: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 91, 92, 93, etc.
#
# ---------------------------------------------------------------------
#
# Map string format (may contain spaces and newlines, which will be ignored):
#
# <environment>;<player items>;<map items>;<tiles>
#
# <environment> - Name of the environment of the map (affects only visual appearance).
# <player items> - Items that players have from the start of the game (can be an empty string),
# each item is represented by one letter (the same letter can appear multiple times):
# f - flame
# F - superflame
# b - bomb
# k - kicking shoe
# s - speedup
# p - spring
# d - disease
# m - multibomb
# r - random
# x - boxing glove
# e - detonator
# t - throwing glove
# <map items> - Set of items that will be hidden in block on the map. This is a string of the
# same format as in <player items>. If there is more items specified than there is
# block tiles, then some items will be left out.
# <tiles> - Left to right, top to bottom sequenced array of map tiles:
# . - floor
# x - block (destroyable)
# # - wall (undestroyable)
# A - teleport A
# B - teleport B
# T - trampoline
# V - lava
# u - arrow up, floor tile
# r - arrow right, floor tile
# d - arrow down, floor tile
# l - arrow left, floor tile
# U - arrow up, under block tile
# R - arrow right, under block tile
# D - arrow down, under block tile
# L - arrow left, under block tile
# <0-9> - starting position of the player specified by the number
import sys
import pygame
import os
import math
import copy
import random
import re
import time
from collections import defaultdict
from playerClass import *
DEBUG_PROFILING = False
DEBUG_FPS = False
DEBUG_VERBOSE = False
#------------------------------------------------------------------------------
def debug_log(message):
if DEBUG_VERBOSE:
print(message)
#==============================================================================
class Profiler(object):
SHOW_LAST = 10
#----------------------------------------------------------------------------
def __init__(self):
self.sections = {}
#----------------------------------------------------------------------------
def measure_start(self, section_name):
if not DEBUG_PROFILING:
return
if not (section_name in self.sections):
self.sections[section_name] = [0.0 for i in xrange(Profiler.SHOW_LAST)]
section_values = self.sections[section_name]
section_values[0] -= pygame.time.get_ticks()
#----------------------------------------------------------------------------
def measure_stop(self, section_name):
if not DEBUG_PROFILING:
return
if not section_name in self.sections:
return
section_values = self.sections[section_name]
section_values[0] += pygame.time.get_ticks()
#----------------------------------------------------------------------------
def end_of_frame(self):
for section_name in self.sections:
section_values = self.sections[section_name]
section_values.pop()
section_values.insert(0,0)
#----------------------------------------------------------------------------
def get_profile_string(self):
result = "PROFILING INFO:"
section_names = list(self.sections.keys())
section_names.sort()
for section_name in section_names:
result += "\n" + section_name.ljust(25) + ": "
section_values = self.sections[section_name]
for i in xrange(len(section_values)):
result += str(section_values[i]).ljust(5)
result += " AVG: " + str(sum(section_values) / float(len(section_values)))
return result
#==============================================================================
## Something that has a float position on the map.
class Positionable(object):
#----------------------------------------------------------------------------
def __init__(self):
self.position = (0.0,0.0)
#----------------------------------------------------------------------------
def set_position(self,position):
self.position = position
#----------------------------------------------------------------------------
def get_position(self):
return self.position
#----------------------------------------------------------------------------
def get_neighbour_tile_coordinates(self):
tile_coordinates = self.get_tile_position()
top = (tile_coordinates[0],tile_coordinates[1] - 1)
right = (tile_coordinates[0] + 1,tile_coordinates[1])
down = (tile_coordinates[0],tile_coordinates[1] + 1)
left = (tile_coordinates[0] - 1,tile_coordinates[1])
return (top,right,down,left)
#----------------------------------------------------------------------------
def get_tile_position(self):
return Positionable.position_to_tile(self.position)
#----------------------------------------------------------------------------
## Moves the object to center of tile (if not specified, objects current tile is used).
def move_to_tile_center(self, tile_coordinates=None):
if tile_coordinates != None:
self.position = tile_coordinates
self.position = (math.floor(self.position[0]) + 0.5,math.floor(self.position[1]) + 0.5)
#----------------------------------------------------------------------------
## Converts float position to integer tile position.
@staticmethod
def position_to_tile(position):
return (int(math.floor(position[0])),int(math.floor(position[1])))
#----------------------------------------------------------------------------
def is_near_tile_center(self):
position_within_tile = (self.position[0] % 1,self.position[1] % 1)
limit = 0.2
limit2 = 1.0 - limit
return (limit < position_within_tile[0] < limit2) and (limit < position_within_tile[1] < limit2)
#==============================================================================
## Info about a bomb's flight (when boxed or thrown).
class BombFlightInfo(object):
#----------------------------------------------------------------------------
def __init__(self):
self.total_distance_to_travel = 0 ##< in tiles
self.distance_travelled = 0 ##< in tiles
self.direction = (0,0) ##< in which direction the bomb is flying, 0, 1 or -1
#==============================================================================
class Bomb(Positionable):
ROLLING_SPEED = 4
FLYING_SPEED = 5
BOMB_ROLLING_UP = 0
BOMB_ROLLING_RIGHT = 1
BOMB_ROLLING_DOWN = 2
BOMB_ROLLING_LEFT = 3
BOMB_FLYING = 4
BOMB_NO_MOVEMENT = 5
DETONATOR_EXPIRATION_TIME = 20000
BOMB_EXPLODES_IN = 3000
EXPLODES_IN_QUICK = 800 ##< for when the player has quick explosion disease
#----------------------------------------------------------------------------
def __init__(self, player):
super(Bomb,self).__init__()
self.time_of_existence = 0 ##< for how long (in ms) the bomb has existed
self.flame_length = player.get_flame_length() ##< how far the flame will go
self.player = player ##< to which player the bomb belongs
self.explodes_in = Bomb.BOMB_EXPLODES_IN ##< time in ms in which the bomb explodes from the time it was created (detonator_time must expire before this starts counting down)
self.detonator_time = 0 ##< if > 0, the bomb has a detonator on it, after expiring it becomes a regular bomb
self.set_position(player.get_position())
self.move_to_tile_center()
self.has_spring = player.bombs_have_spring()
self.movement = Bomb.BOMB_NO_MOVEMENT
self.has_exploded = False
self.flight_info = BombFlightInfo()
#----------------------------------------------------------------------------
## Sends the bomb flying from its currents position to given tile (can be outside the map boundaries, will fly over the border from the other side).
def send_flying(self, destination_tile_coords):
self.movement = Bomb.BOMB_FLYING
current_tile = self.get_tile_position()
self.flight_info.distance_travelled = 0
axis = 1 if current_tile[0] == destination_tile_coords[0] else 0
self.flight_info.total_distance_to_travel = abs(current_tile[axis] - destination_tile_coords[axis])
self.flight_info.direction = [0,0]
self.flight_info.direction[axis] = -1 if current_tile[axis] > destination_tile_coords[axis] else 1
self.flight_info.direction = tuple(self.flight_info.direction)
destination_tile_coords = (destination_tile_coords[0] % GameMap.MAP_WIDTH,destination_tile_coords[1] % GameMap.MAP_HEIGHT)
self.move_to_tile_center(destination_tile_coords)
#----------------------------------------------------------------------------
def has_detonator(self):
return self.detonator_time > 0 and self.time_of_existence < Bomb.DETONATOR_EXPIRATION_TIME
#----------------------------------------------------------------------------
## Returns a time until the bomb explodes by itself.
def time_until_explosion(self):
return self.explodes_in + self.detonator_time - self.time_of_existence
#----------------------------------------------------------------------------
def explodes(self):
if not self.has_exploded:
self.player.bomb_exploded()
self.has_exploded = True
#==============================================================================
## Represents a flame coming off of an exploding bomb.
class Flame(object):
#----------------------------------------------------------------------------
def __init__(self):
self.player = None ##< reference to player to which the exploding bomb belonged
self.time_to_burnout = 1000 ##< time in ms till the flame disappears
self.direction = "all" ##< string representation of the flame direction
#==============================================================================
class MapTile(object):
TILE_FLOOR = 0 ##< walkable map tile
TILE_BLOCK = 1 ##< non-walkable but destroyable map tile
TILE_WALL = 2 ##< non-walkable and non-destroyable map tile
SPECIAL_OBJECT_TRAMPOLINE = 0
SPECIAL_OBJECT_TELEPORT_A = 1
SPECIAL_OBJECT_TELEPORT_B = 2
SPECIAL_OBJECT_ARROW_UP = 3
SPECIAL_OBJECT_ARROW_RIGHT = 4
SPECIAL_OBJECT_ARROW_DOWN = 5
SPECIAL_OBJECT_ARROW_LEFT = 6
SPECIAL_OBJECT_LAVA = 7
#----------------------------------------------------------------------------
def __init__(self, coordinates):
self.kind = MapTile.TILE_FLOOR
self.flames = []
self.coordinates = coordinates
self.to_be_destroyed = False ##< Flag that marks the tile to be destroyed after the flames go out.
self.item = None ##< Item that's present on the file
self.special_object = None ##< special object present on the tile, like trampoline or teleport
self.destination_teleport = None ##< in case of special_object equal to SPECIAL_OBJECT_TELEPORT_A or SPECIAL_OBJECT_TELEPORT_B holds the destionation teleport tile coordinates
def shouldnt_walk(self):
return self.kind in [MapTile.TILE_WALL,MapTile.TILE_BLOCK] or len(self.flames) >= 1 or self.special_object == MapTile.SPECIAL_OBJECT_LAVA
#==============================================================================
## Holds and manipulates the map data including the players, bombs etc.
class GameMap(object):
MAP_WIDTH = 15
MAP_HEIGHT = 11
WALL_MARGIN_HORIZONTAL = 0.2
WALL_MARGIN_VERTICAL = 0.4
COLLISION_BORDER_UP = 0 ##< position is inside upper border with non-walkable tile
COLLISION_BORDER_RIGHT = 1 ##< position is inside right border with non-walkable tile
COLLISION_BORDER_DOWN = 2 ##< position is inside bottom border with non-walkable tile
COLLISION_BORDER_LEFT = 3 ##< position is inside left border with non-walkable tile
COLLISION_TOTAL = 4 ##< position is inside | |
type passed to BaseSegment: {0}".format(
type(segments)))
def parse(self, parse_context=None):
"""Use the parse grammar to find subsegments within this segment.
A large chunk of the logic around this can be found in the `expand` method.
Use the parse setting in the context for testing, mostly to check how deep to go.
True/False for yes or no, an integer allows a certain number of levels.
"""
if not parse_context.dialect:
raise RuntimeError("No dialect provided to {0!r}!".format(self))
# Clear the blacklist cache so avoid missteps
if parse_context:
parse_context.blacklist.clear()
# the parse_depth and recurse kwargs control how deep we will recurse for testing.
if not self.segments:
# This means we're a root segment, just return an unmutated self
return self
# Get the Parse Grammar
g = self._parse_grammar()
if g is None:
# No parse grammar, go straight to expansion
logging.debug("{0}.parse: no grammar. Going straight to expansion".format(self.__class__.__name__))
else:
# Use the Parse Grammar (and the private method)
# NOTE: No match_depth kwarg, because this is the start of the matching.
m = g._match(
segments=self.segments,
parse_context=parse_context.copy(
match_segment=self.__class__.__name__
)
)
if not isinstance(m, MatchResult):
raise TypeError(
"[PD:{0}] {1}.match. Result is {2}, not a MatchResult!".format(
parse_context.parse_depth, self.__class__.__name__, type(m)))
# Basic Validation, that we haven't dropped anything.
check_still_complete(self.segments, m.matched_segments, m.unmatched_segments)
if m.has_match():
if m.is_complete():
# Complete match, happy days!
self.segments = m.matched_segments
else:
# Incomplete match.
# For now this means the parsing has failed. Lets add the unmatched bit at the
# end as something unparsable.
# TODO: Do something more intelligent here.
self.segments = m.matched_segments + (UnparsableSegment(
segments=m.unmatched_segments, expected="Nothing..."),)
else:
# If there's no match at this stage, then it's unparsable. That's
# a problem at this stage so wrap it in an unparable segment and carry on.
self.segments = (UnparsableSegment(
segments=self.segments,
expected=g.expected_string(dialect=parse_context.dialect)),) # NB: tuple
# Validate new segments
self.validate_segments(text="parsing")
bencher = BenchIt() # starts the timer
bencher("Parse complete of {0!r}".format(self.__class__.__name__))
# Recurse if allowed (using the expand method to deal with the expansion)
logging.debug(
"{0}.parse: Done Parse. Plotting Recursion. Recurse={1!r}".format(
self.__class__.__name__, parse_context.recurse))
parse_depth_msg = "###\n#\n# Beginning Parse Depth {0}: {1}\n#\n###\nInitial Structure:\n{2}".format(
parse_context.parse_depth + 1, self.__class__.__name__, self.stringify())
if parse_context.recurse is True:
logging.debug(parse_depth_msg)
self.segments = self.expand(
self.segments,
parse_context=parse_context.copy(
incr='parse_depth', match_depth=0, recurse=True
)
)
elif isinstance(parse_context.recurse, int):
if parse_context.recurse > 1:
logging.debug(parse_depth_msg)
self.segments = self.expand(
self.segments,
parse_context=parse_context.copy(decr='recurse', incr='parse_depth')
)
# Validate new segments
self.validate_segments(text="expanding")
return self
def __repr__(self):
return "<{0}: ({1})>".format(
self.__class__.__name__,
self.pos_marker)
def _reconstruct(self):
"""Make a string from the segments of this segment."""
return "".join(seg.raw for seg in self.segments)
@property
def raw(self):
"""Make a string from the segments of this segment."""
return self._reconstruct()
@property
def raw_upper(self):
"""Make an uppercase string from the segments of this segment."""
return self._reconstruct().upper()
@staticmethod
def _suffix():
"""Return any extra output required at the end when logging.
NB Override this for specific subclassesses if we want extra output.
"""
return ""
def _preface(self, ident, tabsize, pos_idx, raw_idx):
"""Returns the preamble to any logging."""
preface = (' ' * (ident * tabsize))
if self.is_meta:
preface += "[META] "
preface += self.__class__.__name__ + ":"
preface += (' ' * max(pos_idx - len(preface), 0))
if self.pos_marker:
preface += str(self.pos_marker)
else:
preface += '-'
sfx = self._suffix()
if sfx:
return preface + (' ' * max(raw_idx - len(preface), 0)) + sfx
else:
return preface
@property
def _comments(self):
"""Returns only the comment elements of this segment."""
return [seg for seg in self.segments if seg.type == 'comment']
@property
def _non_comments(self):
"""Returns only the non-comment elements of this segment."""
return [seg for seg in self.segments if seg.type != 'comment']
def stringify(self, ident=0, tabsize=4, pos_idx=60, raw_idx=80, code_only=False):
"""Use indentation to render this segment and it's children as a string."""
buff = StringIO()
preface = self._preface(ident=ident, tabsize=tabsize, pos_idx=pos_idx, raw_idx=raw_idx)
buff.write(preface + '\n')
if not code_only and self.comment_seperate and len(self._comments) > 0:
if self._comments:
buff.write((' ' * ((ident + 1) * tabsize)) + 'Comments:' + '\n')
for seg in self._comments:
buff.write(seg.stringify(ident=ident + 2, tabsize=tabsize, pos_idx=pos_idx,
raw_idx=raw_idx, code_only=code_only))
if self._non_comments:
buff.write((' ' * ((ident + 1) * tabsize)) + 'Code:' + '\n')
for seg in self._non_comments:
buff.write(seg.stringify(ident=ident + 2, tabsize=tabsize, pos_idx=pos_idx,
raw_idx=raw_idx, code_only=code_only))
else:
for seg in self.segments:
# If we're in code_only, only show the code segments, otherwise always true
if not code_only or seg.is_code:
buff.write(seg.stringify(ident=ident + 1, tabsize=tabsize, pos_idx=pos_idx,
raw_idx=raw_idx, code_only=code_only))
return buff.getvalue()
@staticmethod
def segs_to_tuple(segs, **kwargs):
"""Return a tuple structure from an iterable of segments."""
return tuple(seg.to_tuple(**kwargs) for seg in segs)
def to_tuple(self, **kwargs):
"""Return a tuple structure from this segment.
NB: If he segment is a meta segment, i.e. it's an indent or dedent,
then it will never be returned from here!
"""
# works for both base and raw
code_only = kwargs.get('code_only', False)
show_raw = kwargs.get('show_raw', False)
if show_raw and not self.segments:
result = (self.type, self.raw)
elif code_only:
result = (self.type, tuple(seg.to_tuple(**kwargs) for seg in self.segments if seg.is_code and not seg.is_meta))
else:
result = (self.type, tuple(seg.to_tuple(**kwargs) for seg in self.segments if not seg.is_meta))
return result
@classmethod
def structural_simplify(cls, elem):
"""Simplify the structure recursively so it serializes nicely in json/yaml."""
if isinstance(elem, tuple):
# Does this look like an element?
if len(elem) == 2 and isinstance(elem[0], str):
# This looks like a single element, make a dict
elem = {elem[0]: cls.structural_simplify(elem[1])}
elif isinstance(elem[0], tuple):
# This looks like a list of elements.
keys = [e[0] for e in elem]
# Any duplicate elements?
if len(set(keys)) == len(keys):
# No, we can use a mapping typle
elem = {e[0]: cls.structural_simplify(e[1]) for e in elem}
else:
# Yes, this has to be a list :(
elem = [cls.structural_simplify(e) for e in elem]
return elem
def as_record(self, **kwargs):
"""Return the segment as a structurally simplified record.
This is useful for serialization to yaml or json.
kwargs passed to to_tuple
"""
return self.structural_simplify(self.to_tuple(**kwargs))
@classmethod
def match(cls, segments, parse_context):
"""Match a list of segments against this segment.
Note: Match for segments is done in the ABSTRACT.
When dealing with concrete then we're always in parse.
Parse is what happens during expand.
Matching can be done from either the raw or the segments.
This raw function can be overridden, or a grammar defined
on the underlying class.
"""
if cls._match_grammar():
# Call the private method
m = cls._match_grammar()._match(segments=segments, parse_context=parse_context.copy(incr='match_depth'))
# Calling unify here, allows the MatchResult class to do all the type checking.
if not isinstance(m, MatchResult):
raise TypeError(
"[PD:{0} MD:{1}] {2}.match. Result is {3}, not a MatchResult!".format(
parse_context.parse_depth, parse_context.match_depth, cls.__name__,
type(m)))
# Once unified we can deal with it just as a MatchResult
if m.has_match():
return MatchResult((cls(segments=m.matched_segments),), m.unmatched_segments)
else:
return MatchResult.from_unmatched(segments)
else:
raise NotImplementedError("{0} has no match function implemented".format(cls.__name__))
@classmethod
def _match(cls, segments, parse_context):
"""A wrapper on the match function to do some basic validation and logging."""
parse_match_logging(
cls.__name__[:10], '_match', 'IN', parse_context=parse_context,
v_level=4, ls=len(segments))
if isinstance(segments, BaseSegment):
segments = (segments,) # Make into a tuple for compatability
if not isinstance(segments, tuple):
logging.warning(
"{0}.match, was passed {1} rather than tuple or segment".format(
cls.__name__, type(segments)))
if isinstance(segments, list):
# Let's make it a tuple for compatibility
segments = tuple(segments)
if len(segments) == 0:
logging.info("{0}._match, was passed zero length segments list".format(cls.__name__))
m = cls.match(segments, parse_context=parse_context)
if not isinstance(m, tuple) and m is not None:
logging.warning(
"{0}.match, returned {1} rather than tuple".format(
cls.__name__, type(m)))
parse_match_logging(
cls.__name__[:10], '_match', 'OUT',
parse_context=parse_context, v_level=4, m=m)
# Validation is skipped at a match level. For performance reasons
# we match at the parse level only
# check_still_complete(segments, m.matched_segments, m.unmatched_segments)
return m
@staticmethod
def expand(segments, parse_context):
"""Expand the list of child segments using their `parse` methods."""
segs = ()
for stmt in segments:
try:
if not stmt.is_expandable:
verbosity_logger(
"[PD:{0}] Skipping expansion of {1}...".format(parse_context.parse_depth, stmt),
verbosity=parse_context.verbosity)
segs += (stmt,)
continue
except Exception as err:
# raise ValueError("{0} has no attribute `is_expandable`. This segment appears poorly constructed.".format(stmt))
logging.error("{0} has no attribute `is_expandable`. This segment appears poorly constructed.".format(stmt))
raise err
if not hasattr(stmt, 'parse'):
raise ValueError("{0} has no method `parse`. This segment appears poorly constructed.".format(stmt))
parse_depth_msg = "Parse | |
i , x , y in graph.iteritems() :
new_graph [ i ] = x + shift , y
return new_graph
# ==============================================================================
## Right shift of the graph
# @code
# graph = ...
# newg = graph >> 14.5
# @endcode
def _gr_rshift_ ( graph , shift ) :
"""Right shift of the graph
>>> graph = ...
>>> newg = graph >> 14.5
"""
return _gr_lshift_ ( self , -1.0 * shift )
# ==============================================================================
## Left shift of the graph
# @code
# graph <<= 14.5 ...
# @endcode
def _gr_ilshift_ ( graph , shift ) :
"""Left shift of the graph
>>> graph <<= 14.5
"""
if not isinstance ( shift , num_types + ( VE , ) ) : return NotImplemented
for i , x , y in graph.iteritems() :
graph [ i ] = x + shift , y
return graph
# ==============================================================================
## Right shift of the graph
# @code
# graph >>= 14.5
# @endcode
def _gr_irshift_ ( graph , shift ) :
"""Right shift of the graph
>>> graph >>= 14.5
"""
return _gr_ilshift_ ( self , -1.0 * shift )
ROOT.TGraph. __mul__ = _gr_mul_
ROOT.TGraph.__rmul__ = _gr_mul_
ROOT.TGraph.__imul__ = _gr_imul_
ROOT.TGraph. __div__ = _gr_div_
ROOT.TGraph.__idiv__ = _gr_idiv_
ROOT.TGraph.__rdiv__ = _gr_rdiv_
ROOT.TGraph. __truediv__ = _gr_div_
ROOT.TGraph.__itruediv__ = _gr_idiv_
ROOT.TGraph.__rtruediv__ = _gr_rdiv_
ROOT.TGraph. __add__ = _gr_add_
ROOT.TGraph.__radd__ = _gr_add_
ROOT.TGraph.__iadd__ = _gr_iadd_
ROOT.TGraph. __sub__ = _gr_sub_
ROOT.TGraph.__rsub__ = _gr_rsub_
ROOT.TGraph.__isub__ = _gr_isub_
ROOT.TGraph.__lshift__ = _gr_lshift_
ROOT.TGraph.__rshift__ = _gr_rshift_
ROOT.TGraph.__ilshift__ = _gr_ilshift_
ROOT.TGraph.__irshift__ = _gr_irshift_
# =============================================================================
## scale the graph
# @code
# gr = ...
# ng = gr * 10
# @endcode
def _gre_mul_ ( graph , scale ) :
"""Scale the graph
graph = ...
newg = graph * 10
"""
if not isinstance ( scale , num_types + (VE,) ) : return NotImplemented
new_graph = ROOT.TGraphErrors ( len ( graph ) )
for i , x , y in graph.iteritems() :
new_graph [ i ] = x , y * scale
return new_graph
# ================================================================================
## scale the graph
# @code
# gr = ...
# ng = gr / 10
# @endcode
def _gre_div_ ( graph , scale ) :
"""Scale the graph
graph = ...
newg = graph / 10
"""
return graph * ( 1.0 / scale )
# =============================================================================
## scale the graph
# @code
# gr *= 10
# @endcode
def _gre_imul_ ( graph , scale ) :
"""Scale the graph
graph *= 10
"""
if not isinstance ( scale , num_types + ( VE , ) ) : return NotImplemented
for i , x , y in graph.iteritems() :
graph [ i ] = x , y * scale
return graph
# ================================================================================
## scale the graph
# @code
# gr /= 10
# @endcode
def _gre_idiv_ ( graph , scale ) :
"""Scale the graph
graph /= 10
"""
return _gre_imul_ ( graph , 1.0 / scale )
# =================================================================================
## scale the graph
# @code
# gr = ...
# ng = 10 / gr
# @endcode
def _gre_rdiv_ ( graph , scale ) :
"""Scale the graph
graph = ...
newg = 10 / graph
"""
if not isinstance ( scale , num_types + (VE,) ) : return NotImplemented
new_graph = ROOT.TGraphErrors ( len ( graph ) )
for i , x , y in graph.iteritems() :
new_graph [ i ] = x , 1.0 * scale / y
return new_graph
# =============================================================================
## shift the graph
# @code
# gr = ...
# ng = gr + 10
# @endcode
def _gre_add_ ( graph , shift ) :
"""Shift the graph
graph = ...
newg = graph + 10
"""
if not isinstance ( shift , num_types + ( VE , ) ) : return NotImplemented
new_graph = ROOT.TGraphErrors ( len ( graph ) )
for i , x , y in graph.iteritems() :
new_graph [ i ] = x , y + shift
return new_graph
# =============================================================================
## shift the graph
# @code
# gr = ...
# ng = gr - 10
# @endcode
def _gre_sub_ ( graph , shift ) :
"""Shift the graph
graph = ...
newg = graph - 10
"""
return _gre_add_ ( graph , -1.0 * shift )
# =============================================================================
## shift the graph
# @code
# gr = ...
# ng = 10 - gr
# @endcode
def _gre_rsub_ ( graph , shift ) :
"""Shift the graph
graph = ...
newg = 10 - graph
"""
if not isinstance ( shift , num_types + ( VE , ) ) : return NotImplemented
new_graph = ROOT.TGraphErrors ( len ( graph ) )
for i , x , y in graph.iteritems() :
new_graph [ i ] = x , shift - y
return new_graph
# =============================================================================
## shift the graph
# @code
# gr += 10
# @endcode
def _gre_iadd_ ( graph , shift ) :
"""Shift the graph
graph += 10
"""
if not isinstance ( shift , num_types + ( VE , ) ) : return NotImplemented
for i , x , y in graph.iteritems() :
graph [ i ] = x , y + shift
return graph
# =============================================================================
## shift the graph
# @code
# gr -= 10
# @endcode
def _gre_isub_ ( graph , shift ) :
"""Shift the graph
graph -= 10
"""
return _gre_iadd_ ( graph , -1.0 * shift )
# ==============================================================================
## Left shift of the graph
# @code
# graph = ...
# newg = graph << 14.5
# @endcode
def _gre_lshift_ ( graph , shift ) :
"""Left shift of the graph
>>> graph = ...
>>> newg = graph << 14.5
"""
if not isinstance ( shift , num_types + ( VE , ) ) : return NotImplemented
new_graph = ROOT.TGraphErrors ( len ( graph ) )
for i , x , y in graph.iteritems() :
new_graph [ i ] = x + shift , y
return new_graph
# ==============================================================================
## Right shift of the graph
# @code
# graph = ...
# newg = graph >> 14.5
# @endcode
def _gre_rshift_ ( graph , shift ) :
"""Right shift of the graph
>>> graph = ...
>>> newg = graph >> 14.5
"""
return _gre_lshift_ ( self , -1.0 * shift )
# ==============================================================================
## Left shift of the graph
# @code
# graph <<= 14.5 ...
# @endcode
def _gre_ilshift_ ( graph , shift ) :
"""Left shift of the graph
>>> graph <<= 14.5
"""
if not isinstance ( shift , num_types + ( VE , ) ) : return NotImplemented
for i , x , y in graph.iteritems() :
graph [ i ] = x + shift , y
return graph
# ==============================================================================
## Right shift of the graph
# @code
# graph >>= 14.5
# @endcode
def _gre_irshift_ ( graph , shift ) :
"""Right shift of the graph
>>> graph >>= 14.5
"""
return _gre_ilshift_ ( self , -1.0 * shift )
ROOT.TGraphErrors. __mul__ = _gre_mul_
ROOT.TGraphErrors.__rmul__ = _gre_mul_
ROOT.TGraphErrors.__imul__ = _gre_imul_
ROOT.TGraphErrors. __div__ = _gre_div_
ROOT.TGraphErrors.__idiv__ = _gre_idiv_
ROOT.TGraphErrors.__rdiv__ = _gre_rdiv_
ROOT.TGraphErrors. __truediv__ = _gre_div_
ROOT.TGraphErrors.__itruediv__ = _gre_idiv_
ROOT.TGraphErrors.__rtruediv__ = _gre_rdiv_
ROOT.TGraphErrors. __add__ = _gre_add_
ROOT.TGraphErrors.__radd__ = _gre_add_
ROOT.TGraphErrors.__iadd__ = _gre_iadd_
ROOT.TGraphErrors. __sub__ = _gre_sub_
ROOT.TGraphErrors.__rsub__ = _gre_rsub_
ROOT.TGraphErrors.__isub__ = _gre_isub_
ROOT.TGraphErrors.__lshift__ = _gre_lshift_
ROOT.TGraphErrors.__rshift__ = _gre_rshift_
ROOT.TGraphErrors.__ilshift__ = _gre_ilshift_
ROOT.TGraphErrors.__irshift__ = _gre_irshift_
# =============================================================================
## scale the graph
# @code
# gr = ...
# ng = gr * 10
# @endcode
def _grae_mul_ ( graph , scale ) :
"""Scale the graph
graph = ...
newg = graph * 10
"""
if not isinstance ( scale , num_types + (VE,) ) : return NotImplemented
new_graph = ROOT.TGraphAsymmErrors ( len ( graph ) )
for i , x , exl , exh , y , eyl , eyh in graph.iteritems() :
new_graph [ i ] = x , exl , exh , y * scale , eyl * scale , eyh * scale
return new_graph
# ================================================================================
## scale the graph
# @code
# gr = ...
# ng = gr / 10
# @endcode
def _grae_div_ ( graph , scale ) :
"""Scale the graph
graph = ...
newg = graph / 10
"""
return graph * ( 1.0 / scale )
# =============================================================================
## scale the graph
# @code
# gr *= 10
# @endcode
def _grae_imul_ ( graph , scale ) | |
<filename>tests/test_emitter_queries.py
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test emitter with querie."""
import os
import unittest
import tests.utils as tu
from geneve.events_emitter import SourceEvents, guess_from_query
from . import jupyter
event_docs_mappings = {
"""process where process.name == "regsvr32.exe"
""": {
"properties": {
"@timestamp": {"type": "date"},
"event": {"properties": {"category": {"type": "keyword"}}},
"process": {"properties": {"name": {"type": "keyword"}}},
},
},
"""network where source.ip == "::1" or destination.ip == "::1"
""": {
"properties": {
"@timestamp": {"type": "date"},
"event": {"properties": {"category": {"type": "keyword"}}},
"destination": {"properties": {"ip": {"type": "ip"}}},
"source": {"properties": {"ip": {"type": "ip"}}},
},
},
"""process where process.code_signature.exists == false and process.pid > 1024
""": {
"properties": {
"@timestamp": {"type": "date"},
"event": {"properties": {"category": {"type": "keyword"}}},
"process": {"properties": {"code_signature": {"properties": {"exists": {"type": "boolean"}}}, "pid": {"type": "long"}}}, # noqa: E501
},
},
}
mono_branch_mono_doc = {
"""any where true
""": [
[{}],
],
"""any where not false
""": [
[{}],
],
"""any where not (true and false)
""": [
[{}],
],
"""any where not (false or false)
""": [
[{}],
],
"""network where source.port > 512 and source.port < 1024
""": [
[{"event": {"category": ["network"]}, "source": {"port": 794}}],
],
"""network where not (source.port < 512 or source.port > 1024)
""": [
[{"event": {"category": ["network"]}, "source": {"port": 1021}}],
],
"""network where destination.port not in (80, 443)
""": [
[{"event": {"category": ["network"]}, "destination": {"port": 7564}}],
],
"""network where not destination.port in (80, 443)
""": [
[{"event": {"category": ["network"]}, "destination": {"port": 246}}],
],
"""network where destination.port == 22 and destination.port in (80, 443) or destination.port == 25
""": [
[{"event": {"category": ["network"]}, "destination": {"port": 25}}],
],
"""process where process.name == "regsvr32.exe"
""": [
[{"event": {"category": ["process"]}, "process": {"name": "regsvr32.exe"}}],
],
"""process where process.name != "regsvr32.exe"
""": [
[{"event": {"category": ["process"]}, "process": {"name": "Bmc"}}],
],
"""process where process.pid != 0
""": [
[{"event": {"category": ["process"]}, "process": {"pid": 3009213395}}],
],
"""process where process.pid >= 0
""": [
[{"event": {"category": ["process"]}, "process": {"pid": 1706296503}}],
],
"""process where process.pid > 0
""": [
[{"event": {"category": ["process"]}, "process": {"pid": 2505219495}}],
],
"""process where process.code_signature.exists == true
""": [
[{"event": {"category": ["process"]}, "process": {"code_signature": {"exists": True}}}],
],
"""process where process.code_signature.exists != true
""": [
[{"event": {"category": ["process"]}, "process": {"code_signature": {"exists": False}}}],
],
"""any where network.protocol == "some protocol"
""": [
[{"network": {"protocol": "some protocol"}}],
],
"""any where process.pid == null
""": [
[{}],
],
"""any where not process.pid != null
""": [
[{}],
],
"""any where process.pid != null
""": [
[{"process": {"pid": 102799507}}],
],
"""any where not process.pid == null
""": [
[{"process": {"pid": 2584819203}}],
],
"""process where process.name == "regsvr32.exe" and process.parent.name == "cmd.exe"
""": [
[{"event": {"category": ["process"]}, "process": {"name": "regsvr32.exe", "parent": {"name": "cmd.exe"}}}],
],
"""process where process.name : ("*.EXE", "*.DLL")
""": [
[{"event": {"category": ["process"]}, "process": {"name": "leneqzk.exe"}}],
],
"""network where destination.ip == "127.0.0.1"
""": [
[{"event": {"category": ["network"]}, "destination": {"ip": "127.0.0.1"}}],
],
"""network where cidrMatch(destination.ip, "10.0.0.0/8", "192.168.0.0/16")
""": [
[{"event": {"category": ["network"]}, "destination": {"ip": "10.77.153.19"}}],
],
"""network where not cidrMatch(destination.ip, "10.0.0.0/8", "192.168.0.0/16")
""": [
[{"event": {"category": ["network"]}, "destination": {"ip": "0.225.250.37"}}],
],
"""network where destination.ip == "::1"
""": [
[{"event": {"category": ["network"]}, "destination": {"ip": "::1"}}],
],
"""network where destination.ip == "822e::/16"
""": [
[{"event": {"category": ["network"]}, "destination": {"ip": "fc00:db20:35b:7399::5"}}],
],
"""event.category:network and destination.ip:"822e::/16"
""": [
[{"event": {"category": ["network"]}, "destination": {"ip": "fdf8:f53e:61e4::18"}}],
],
}
multi_branch_mono_doc = {
"""network where not (source.port > 512 and source.port < 1024)
""": [
[{"event": {"category": ["network"]}, "source": {"port": 182}}],
[{"event": {"category": ["network"]}, "source": {"port": 54422}}],
],
"""network where source.port > 512 or source.port < 1024
""": [
[{"event": {"category": ["network"]}, "source": {"port": 44925}}],
[{"event": {"category": ["network"]}, "source": {"port": 516}}],
],
"""network where source.port < 2000 and (source.port > 512 or source.port > 1024)
""": [
[{"event": {"category": ["network"]}, "source": {"port": 1334}}],
[{"event": {"category": ["network"]}, "source": {"port": 1034}}],
],
"""network where (source.port > 512 or source.port > 1024) and source.port < 2000
""": [
[{"event": {"category": ["network"]}, "source": {"port": 575}}],
[{"event": {"category": ["network"]}, "source": {"port": 1158}}],
],
"""network where (source.port > 1024 or source.port < 2000) and (source.port < 4000 or source.port > 512)
""": [
[{"event": {"category": ["network"]}, "source": {"port": 1970}}],
[{"event": {"category": ["network"]}, "source": {"port": 52226}}],
[{"event": {"category": ["network"]}, "source": {"port": 692}}],
[{"event": {"category": ["network"]}, "source": {"port": 1464}}],
],
"""network where destination.port in (80, 443)
""": [
[{"event": {"category": ["network"]}, "destination": {"port": 80}}],
[{"event": {"category": ["network"]}, "destination": {"port": 443}}],
],
"""process where process.name == "regsvr32.exe" or process.parent.name == "cmd.exe"
""": [
[{"event": {"category": ["process"]}, "process": {"name": "regsvr32.exe"}}],
[{"event": {"category": ["process"]}, "process": {"parent": {"name": "cmd.exe"}}}],
],
"""process where process.name == "regsvr32.exe" or process.name == "cmd.exe" or process.name == "powershell.exe"
""": [
[{"event": {"category": ["process"]}, "process": {"name": "regsvr32.exe"}}],
[{"event": {"category": ["process"]}, "process": {"name": "cmd.exe"}}],
[{"event": {"category": ["process"]}, "process": {"name": "powershell.exe"}}],
],
"""process where process.name in ("regsvr32.exe", "cmd.exe", "powershell.exe")
""": [
[{"event": {"category": ["process"]}, "process": {"name": "regsvr32.exe"}}],
[{"event": {"category": ["process"]}, "process": {"name": "cmd.exe"}}],
[{"event": {"category": ["process"]}, "process": {"name": "powershell.exe"}}],
],
"""process where process.name in ("regsvr32.exe", "cmd.exe") or process.name == "powershell.exe"
""": [
[{"event": {"category": ["process"]}, "process": {"name": "regsvr32.exe"}}],
[{"event": {"category": ["process"]}, "process": {"name": "cmd.exe"}}],
[{"event": {"category": ["process"]}, "process": {"name": "powershell.exe"}}],
],
"""process where event.type in ("start", "process_started") and process.args : "dump-keychain" and process.args : "-d"
""": [
[{"event": {"category": ["process"], "type": ["start"]}, "process": {"args": ["dump-keychain", "-d"]}}],
[{"event": {"category": ["process"], "type": ["process_started"]}, "process": {"args": ["dump-keychain", "-d"]}}], # noqa: E501
],
"""event.type:(start or process_started) and (process.args:"dump-keychain" and process.args:"-d")
""": [
[{"event": {"type": ["start"]}, "process": {"args": ["dump-keychain", "-d"]}}],
[{"event": {"type": ["process_started"]}, "process": {"args": ["dump-keychain", "-d"]}}],
],
}
mono_branch_multi_doc = {
"""sequence
[process where process.name : "cmd.exe"]
[process where process.parent.name : "cmd.exe"]
""": [[
{"event": {"category": ["process"]}, "process": {"name": "cmd.exe"}},
{"event": {"category": ["process"]}, "process": {"parent": {"name": "cmd.exe"}}},
]],
"""sequence by user.id
[process where process.name : "cmd.exe"]
[process where process.parent.name : "cmd.exe"]
""": [[
{"event": {"category": ["process"]}, "process": {"name": "cmd.exe"}, "user": {"id": "klM"}},
{"event": {"category": ["process"]}, "process": {"parent": {"name": "cmd.exe"}}, "user": {"id": "klM"}},
]],
"""sequence
[process where process.name : "cmd.exe"] by user.id
[process where process.parent.name : "cmd.exe"] by user.name
""": [[
{"event": {"category": ["process"]}, "process": {"name": "cmd.exe"}, "user": {"id": "fmC"}},
{"event": {"category": ["process"]}, "process": {"parent": {"name": "cmd.exe"}}, "user": {"name": "fmC"}},
]],
}
multi_branch_multi_doc = {
"""sequence
[process where process.name : "cmd.exe"]
[process where process.parent.name : "cmd.exe" or process.name : "powershell.exe"]
""": [[
{"event": {"category": ["process"]}, "process": {"name": "cmd.exe"}},
{"event": {"category": ["process"]}, "process": {"parent": {"name": "cmd.exe"}}},
], [
{"event": {"category": ["process"]}, "process": {"name": "cmd.exe"}},
{"event": {"category": ["process"]}, "process": {"name": "powershell.exe"}},
]],
"""sequence by user.id
[process where process.name : "cmd.exe"]
[process where process.parent.name : "cmd.exe" or process.name : "powershell.exe"]
""": [[
{"event": {"category": ["process"]}, "process": {"name": "cmd.exe"}, "user": {"id": "pKP"}},
{"event": {"category": ["process"]}, "process": {"parent": {"name": "cmd.exe"}}, "user": {"id": "pKP"}},
], [
{"event": {"category": ["process"]}, "process": {"name": "cmd.exe"}, "user": {"id": "dYR"}},
{"event": {"category": ["process"]}, "process": {"name": "powershell.exe"}, "user": {"id": "dYR"}},
]],
"""sequence
[process where process.name in ("cmd.exe", "powershell.exe")] by process.name
[process where process.name in ("cmd.exe", "powershell.exe")] by process.parent.name
""": [[
{"event": {"category": ["process"]}, "process": {"name": "cmd.exe"}},
{"event": {"category": ["process"]}, "process": {"name": "cmd.exe", "parent": {"name": "cmd.exe"}}},
], [
{"event": {"category": ["process"]}, "process": {"name": "cmd.exe"}},
{"event": {"category": ["process"]}, "process": {"name": "powershell.exe", "parent": {"name": "cmd.exe"}}},
], [
{"event": {"category": ["process"]}, "process": {"name": "powershell.exe"}},
{"event": {"category": ["process"]}, "process": {"name": "cmd.exe", "parent": {"name": "powershell.exe"}}},
], [
{"event": {"category": ["process"]}, "process": {"name": "powershell.exe"}},
{"event": {"category": ["process"]}, "process": {"name": "powershell.exe", "parent": {"name": "powershell.exe"}}}, # noqa: E501
]],
"""sequence by user.id
[process where process.name in ("cmd.exe", "powershell.exe")] | |
y_train)
print('Intercept \t: b = ', linreg.intercept_)
print('Coefficients \t: a = ', linreg.coef_)
# In[238]:
# Predict PYPL Prices values corresponding to Predictors
y_train_pred = linreg.predict(X_train)
y_test_pred = linreg.predict(X_test)
# Plot the Predictions vs the True values
f, axes = plt.subplots(1, 2, figsize=(24, 12))
axes[0].scatter(y_train, y_train_pred, color = "blue")
axes[0].plot(y_train, y_train, 'w-', linewidth = 1)
axes[0].set_xlabel("True values of the Response Variable (Train)")
axes[0].set_ylabel("Predicted values of the Response Variable (Train)")
axes[1].scatter(y_test, y_test_pred, color = "green")
axes[1].plot(y_test, y_test, 'w-', linewidth = 1)
axes[1].set_xlabel("True values of the Response Variable (Test)")
axes[1].set_ylabel("Predicted values of the Response Variable (Test)")
plt.show()
# In[239]:
print("Explained Variance (R^2) on Train Set \t:", linreg.score(X_train, y_train))
print("Mean Squared Error (MSE) on Train Set \t:", mean_squared_error(y_train, y_train_pred))
print("Mean Squared Error (MSE) on Test Set \t:", mean_squared_error(y_test, y_test_pred))
# # 1) Predicting SINGF Prices using Death Increased
# In[240]:
# Response: SINGF_Prices
# Predictor: Death_Increased
X_train, X_test, y_train, y_test = train_test_split(Death_Increased, SINGF_Prices, test_size = 0.25)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
print('Intercept of Regression \t: b = ', linreg.intercept_)
print('Coefficients of Regression \t: a = ', linreg.coef_)
print()
regline_x = X_train
regline_y = linreg.intercept_ + linreg.coef_ * X_train
# Plotting Linear Regression line
f, axes = plt.subplots(1, 1, figsize=(16, 8))
plt.scatter(X_train, y_train)
plt.plot(regline_x, regline_y, 'r-', linewidth = 3)
plt.show()
# R^2 of Model
print("Explained Variance (R^2) \t:", linreg.score(X_train, y_train))
y_train_pred = linreg.predict(X_train)
# MSE of Model
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_train, y_train_pred))
# In[241]:
y_test_pred = linreg.predict(X_test)
f = plt.figure(figsize=(16, 8))
plt.scatter(X_test, y_test, color = "green")
plt.scatter(X_test, y_test_pred, color = "red")
plt.show()
# MSE on the test set
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_test, y_test_pred))
# # 2) Predicting SINGF Prices using Total Deaths
# In[245]:
# Response: SINGF_Prices
# Predictor: Total_Deaths
X_train, X_test, y_train, y_test = train_test_split(Total_Deaths, SINGF_Prices, test_size = 0.25)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
print('Intercept of Regression \t: b = ', linreg.intercept_)
print('Coefficients of Regression \t: a = ', linreg.coef_)
print()
regline_x = X_train
regline_y = linreg.intercept_ + linreg.coef_ * X_train
# Plotting Linear Regression line
f, axes = plt.subplots(1, 1, figsize=(16, 8))
plt.scatter(X_train, y_train)
plt.plot(regline_x, regline_y, 'r-', linewidth = 3)
plt.show()
# R^2 of Model
print("Explained Variance (R^2) \t:", linreg.score(X_train, y_train))
y_train_pred = linreg.predict(X_train)
# MSE of Model
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_train, y_train_pred))
# In[246]:
y_test_pred = linreg.predict(X_test)
f = plt.figure(figsize=(16, 8))
plt.scatter(X_test, y_test, color = "green")
plt.scatter(X_test, y_test_pred, color = "red")
plt.show()
# MSE on the test set
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_test, y_test_pred))
# # 3) Predicting SINGF Prices using Positive Cases
# In[247]:
# Response: SINGF_Prices
# Predictor: Positive_Cases
X_train, X_test, y_train, y_test = train_test_split(Positive_Cases, SINGF_Prices, test_size = 0.25)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
print('Intercept of Regression \t: b = ', linreg.intercept_)
print('Coefficients of Regression \t: a = ', linreg.coef_)
print()
regline_x = X_train
regline_y = linreg.intercept_ + linreg.coef_ * X_train
# Plotting Linear Regression line
f, axes = plt.subplots(1, 1, figsize=(16, 8))
plt.scatter(X_train, y_train)
plt.plot(regline_x, regline_y, 'r-', linewidth = 3)
plt.show()
# R^2 of Model
print("Explained Variance (R^2) \t:", linreg.score(X_train, y_train))
y_train_pred = linreg.predict(X_train)
# MSE of Model
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_train, y_train_pred))
# In[248]:
y_test_pred = linreg.predict(X_test)
f = plt.figure(figsize=(16, 8))
plt.scatter(X_test, y_test, color = "green")
plt.scatter(X_test, y_test_pred, color = "red")
plt.show()
# MSE on the test set
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_test, y_test_pred))
# # 4) Predicting SINGF Prices using Positive Increased
# In[249]:
# Response: SINGF_Prices
# Predictor: Positive_Increased
X_train, X_test, y_train, y_test = train_test_split(Positive_Increased, SINGF_Prices, test_size = 0.25)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
print('Intercept of Regression \t: b = ', linreg.intercept_)
print('Coefficients of Regression \t: a = ', linreg.coef_)
print()
regline_x = X_train
regline_y = linreg.intercept_ + linreg.coef_ * X_train
# Plotting Linear Regression line
f, axes = plt.subplots(1, 1, figsize=(16, 8))
plt.scatter(X_train, y_train)
plt.plot(regline_x, regline_y, 'r-', linewidth = 3)
plt.show()
# R^2 of Model
print("Explained Variance (R^2) \t:", linreg.score(X_train, y_train))
y_train_pred = linreg.predict(X_train)
# MSE of Model
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_train, y_train_pred))
# In[250]:
y_test_pred = linreg.predict(X_test)
f = plt.figure(figsize=(16, 8))
plt.scatter(X_test, y_test, color = "green")
plt.scatter(X_test, y_test_pred, color = "red")
plt.show()
# MSE on the test set
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_test, y_test_pred))
# # 5) Predicting SINGF Prices using Total Testing
# In[251]:
# Response: SINGF_Prices
# Predictor: Total_Testing
X_train, X_test, y_train, y_test = train_test_split(Total_Testing, SINGF_Prices, test_size = 0.25)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
print('Intercept of Regression \t: b = ', linreg.intercept_)
print('Coefficients of Regression \t: a = ', linreg.coef_)
print()
regline_x = X_train
regline_y = linreg.intercept_ + linreg.coef_ * X_train
# Plotting Linear Regression line
f, axes = plt.subplots(1, 1, figsize=(16, 8))
plt.scatter(X_train, y_train)
plt.plot(regline_x, regline_y, 'r-', linewidth = 3)
plt.show()
# R^2 of Model
print("Explained Variance (R^2) \t:", linreg.score(X_train, y_train))
y_train_pred = linreg.predict(X_train)
# MSE of Model
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_train, y_train_pred))
# In[252]:
y_test_pred = linreg.predict(X_test)
f = plt.figure(figsize=(16, 8))
plt.scatter(X_test, y_test, color = "green")
plt.scatter(X_test, y_test_pred, color = "red")
plt.show()
# MSE on the test set
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_test, y_test_pred))
# # 6) Predicting SINGF Prices using Total Increased
# In[253]:
# Response: SINGF_Prices
# Predictor: Total_Increased
X_train, X_test, y_train, y_test = train_test_split(Total_Increased, SINGF_Prices, test_size = 0.25)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
print('Intercept of Regression \t: b = ', linreg.intercept_)
print('Coefficients of Regression \t: a = ', linreg.coef_)
print()
regline_x = X_train
regline_y = linreg.intercept_ + linreg.coef_ * X_train
# Plotting Linear Regression line
f, axes = plt.subplots(1, 1, figsize=(16, 8))
plt.scatter(X_train, y_train)
plt.plot(regline_x, regline_y, 'r-', linewidth = 3)
plt.show()
# R^2 of Model
print("Explained Variance (R^2) \t:", linreg.score(X_train, y_train))
y_train_pred = linreg.predict(X_train)
# MSE of Model
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_train, y_train_pred))
# In[254]:
y_test_pred = linreg.predict(X_test)
f = plt.figure(figsize=(16, 8))
plt.scatter(X_test, y_test, color = "green")
plt.scatter(X_test, y_test_pred, color = "red")
plt.show()
# MSE on the test set
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_test, y_test_pred))
# # Predicting SINGF Price using Multiple Variable
# In[255]:
y = SINGF_Prices # Response
X = jointCovid # Predictors
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25)
print("Train Set :", X_train.shape, y_train.shape)
print("Test Set :", X_test.shape, y_test.shape)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
print('Intercept \t: b = ', linreg.intercept_)
print('Coefficients \t: a = ', linreg.coef_)
# In[256]:
# Predict SINGF Prices values corresponding to Predictors
y_train_pred = linreg.predict(X_train)
y_test_pred = linreg.predict(X_test)
# Plot the Predictions vs the True values
f, axes = plt.subplots(1, 2, figsize=(24, 12))
axes[0].scatter(y_train, y_train_pred, color = "blue")
axes[0].plot(y_train, y_train, 'w-', linewidth = 1)
axes[0].set_xlabel("True values of the Response Variable (Train)")
axes[0].set_ylabel("Predicted values of the Response Variable (Train)")
axes[1].scatter(y_test, y_test_pred, color = "green")
axes[1].plot(y_test, y_test, 'w-', linewidth = 1)
axes[1].set_xlabel("True values of the Response Variable (Test)")
axes[1].set_ylabel("Predicted values of the Response Variable (Test)")
plt.show()
# In[257]:
print("Explained Variance (R^2) on Train Set \t:", linreg.score(X_train, y_train))
print("Mean Squared Error (MSE) on Train Set \t:", mean_squared_error(y_train, y_train_pred))
print("Mean Squared Error (MSE) on Test Set \t:", mean_squared_error(y_test, y_test_pred))
# # 1) Predicting TSLA Prices using Death Increased
# In[258]:
# Response: TSLA_Prices
# Predictor: Death_Increased
X_train, X_test, y_train, y_test = train_test_split(Death_Increased, TSLA_Prices, test_size = 0.25)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
print('Intercept of Regression \t: b = ', linreg.intercept_)
print('Coefficients of Regression \t: a = ', linreg.coef_)
print()
regline_x = X_train
regline_y = linreg.intercept_ + linreg.coef_ * X_train
# Plotting Linear Regression line
f, axes = plt.subplots(1, 1, figsize=(16, 8))
plt.scatter(X_train, y_train)
plt.plot(regline_x, regline_y, 'r-', linewidth = 3)
plt.show()
# R^2 of Model
print("Explained Variance (R^2) \t:", linreg.score(X_train, y_train))
y_train_pred = linreg.predict(X_train)
# MSE of Model
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_train, y_train_pred))
# In[259]:
y_test_pred = linreg.predict(X_test)
f = plt.figure(figsize=(16, 8))
plt.scatter(X_test, y_test, color = "green")
plt.scatter(X_test, y_test_pred, color = "red")
plt.show()
# MSE on the test set
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_test, y_test_pred))
# # 2) Predicting TSLA Prices using Total Deaths
# In[260]:
# Response: TSLA_Prices
# Predictor: Total_Deaths
X_train, X_test, y_train, y_test = train_test_split(Total_Deaths, TSLA_Prices, test_size = 0.25)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
print('Intercept of Regression \t: b = ', linreg.intercept_)
print('Coefficients of Regression \t: a = ', linreg.coef_)
print()
regline_x = X_train
regline_y = linreg.intercept_ + linreg.coef_ * X_train
# Plotting Linear Regression line
f, axes = plt.subplots(1, 1, figsize=(16, 8))
plt.scatter(X_train, y_train)
plt.plot(regline_x, regline_y, 'r-', linewidth = 3)
plt.show()
# R^2 of Model
print("Explained Variance (R^2) \t:", linreg.score(X_train, y_train))
y_train_pred = linreg.predict(X_train)
# MSE of Model
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_train, y_train_pred))
# In[261]:
y_test_pred = linreg.predict(X_test)
f = plt.figure(figsize=(16, 8))
plt.scatter(X_test, y_test, color = "green")
plt.scatter(X_test, y_test_pred, color = "red")
plt.show()
# MSE on the test set
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_test, y_test_pred))
# # 3) Predicting TSLA Prices using Positive Cases
# In[262]:
# Response: TSLA_Prices
# Predictor: Positive_Cases
X_train, X_test, y_train, y_test = train_test_split(Positive_Cases, TSLA_Prices, test_size = 0.25)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
print('Intercept of Regression \t: b = ', linreg.intercept_)
print('Coefficients of Regression \t: a = ', linreg.coef_)
print()
regline_x = X_train
regline_y = linreg.intercept_ + linreg.coef_ * X_train
# Plotting Linear Regression line
f, axes = plt.subplots(1, 1, figsize=(16, 8))
plt.scatter(X_train, y_train)
plt.plot(regline_x, regline_y, 'r-', linewidth = 3)
plt.show()
# R^2 of Model
print("Explained Variance (R^2) \t:", linreg.score(X_train, y_train))
y_train_pred = linreg.predict(X_train)
# MSE of Model
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_train, y_train_pred))
# In[263]:
y_test_pred = linreg.predict(X_test)
f = plt.figure(figsize=(16, 8))
plt.scatter(X_test, y_test, color = "green")
plt.scatter(X_test, y_test_pred, color = "red")
plt.show()
# MSE on the test set
print("Mean Squared Error (MSE) \t:", mean_squared_error(y_test, y_test_pred))
# # 4) Predicting TSLA Prices using Positive Increased
# In[264]:
# Response: TSLA_Prices
# Predictor: Positive_Increased
X_train, X_test, y_train, y_test = train_test_split(Positive_Increased, TSLA_Prices, test_size = 0.25)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
print('Intercept of Regression \t: b = ', linreg.intercept_)
print('Coefficients of Regression \t: a = ', linreg.coef_)
print()
regline_x = X_train
regline_y = linreg.intercept_ + linreg.coef_ * X_train
# Plotting Linear Regression line
f, axes | |
<gh_stars>0
#!/usr/bin/env python2
import time
from math import radians
from service_router import *
from locomotion import *
from math import asin, pi, atan2
#, positionN, \
# velocityAll, accelerationAll, positionAll, readFSR
from kinematics import Kinematics
K=Kinematics()
threshold = 30
stepSize = 50
riser = 163
thread = 266
def terminate():
ee_xyz, servopos = K.doFkine(readPos())
if abs(ee_xyz[2]-ee_xyz[5]) < 20:
if abs(ee_xyz[8]-ee_xyz[11]) < 20:
if abs(ee_xyz[8]-ee_xyz[2]) < 50:
print("yeay im on top of stairs")
return True
else:
return False
def standUpForStairs():
standup_pos = [2048, 2048, 1296, 2048, 2048, 1296,
2048, 2048, 1296, 2048, 2048, 1296,
2048, 2048, 1296, 2048, 2048, 1296]
front_standup = list_combine(leg[1] + leg[2], standup_pos)
rear_standup = list_combine(leg[5] + leg[6], standup_pos)
middle_standup = list_combine(leg[3] + leg[4], standup_pos)
positionN(front_standup)
time.sleep(1)
positionN(rear_standup)
time.sleep(1)
positionN(middle_standup)
time.sleep(1)
def correctMiddleLegs(z):
Up = [0, 0, z]
LiftUp = calc_motion(Up)
pos = list()
pos.extend(LiftUp[12:18])
pos.extend(LiftUp[18:24])
positionN(pos)
leg_case = [3,4]
check_position_error_legs(80, 20, pos, leg_case)
ServoCentering=[7,2048,10,2048]
positionN(ServoCentering)
time.sleep(1)
Down = [0, 0, -z]
LiftDown = calc_motion(Down)
pos1 = list()
pos1.extend(LiftDown[12:18])
pos1.extend(LiftDown[18:24])
positionN(pos1)
leg_case = [3,4]
check_position_error_legs(80, 20, pos1, leg_case)
def initialDistance(distance):
all_pos = readPos()
ee_xyz, servopos = K.doFkine(all_pos)
dist2FirstStep_1 = distance
dist2FirstStep_2 = distance
dist2FirstStep_3 = distance + ee_xyz[1] - ee_xyz[7]
dist2FirstStep_4 = distance + ee_xyz[1] - ee_xyz[10]
dist2FirstStep_5 = distance + ee_xyz[1] - ee_xyz[13]
dist2FirstStep_6 = distance + ee_xyz[1] - ee_xyz[16]
dist2FirstStep = dist2FirstStep_1, dist2FirstStep_2, dist2FirstStep_3, dist2FirstStep_4, dist2FirstStep_5, dist2FirstStep_6
print dist2FirstStep
return dist2FirstStep
def initConfig_legs(depth):
maxy = 344.74638441867046
r = 392.55798277243395 - 141.33 #maximumy - y_offset of leg one
miny = 181.0804846109524
phai = asin((depth-miny)/r) * 2048/pi # change of coxa in steps
#print(int(phai))
if depth < maxy:
standup_pos = [ 1536 + int(phai), 2048, 1296, 2560 - int(phai), 2048, 1296,
2048 , 2048, 1296, 2048 , 2048, 1296,
2560 - int(phai), 2048, 1296, 1536 + int(phai), 2048, 1296]
lift_up = [2048, 2448,1296,2048,2448,1296,
2048, 2448,1296,2048,2448,1296,
2048, 2448,1296,2048,2448,1296]
print(standup_pos)
front_liftup = list_combine(leg[2] + leg[5],lift_up)
positionN(front_liftup)
time.sleep(2)
front_standup = list_combine(leg[2] + leg[5], standup_pos)
positionN(front_standup)
time.sleep(1)
rear_liftup = list_combine(leg[1] + leg[6],lift_up)
positionN(rear_liftup)
time.sleep(1)
rear_standup = list_combine(leg[1] + leg[6], standup_pos)
positionN(rear_standup)
time.sleep(1)
rear_standup = list_combine(leg[5] + leg[6], standup_pos)
positionN(rear_standup)
time.sleep(1)
ee_xyz, servopos = K.doFkine(readPos())
return maxy - ee_xyz[1]
def correctRotation(depth,riser):
slope = atan2(riser,depth)*180/pi
gamma, beta = K.get_orientation([1,5,6])
new_gamma = slope - gamma
parallelGait(0,0,int(new_gamma-4),0,0,0)
time.sleep(3)
print("Slope is:", new_gamma)
def moveForward(x, y, z, alpha, beta, gamma, distance):
Forward = [x, y, z]
Up = [0, 0, z]
Down = [x, y, 0]
Push = [0, 0, 0]
HalfForward = [0.5*x, 0.5*y, z]
HalfUp = [ 0, 0, z]
HalfDown = [0.5*x, 0.5*y, 0]
PushBackwards = calc_motion(Push)
LiftUp = calc_motion(Up)
LiftDown = calc_motion(Down)
PutForward = calc_motion(Forward)
HalfLiftUp = calc_motion(HalfUp)
HalfLiftDown = calc_motion(HalfDown)
HalfPutForward = calc_motion(HalfForward)
while distance > 0.75 * stepSize:
if distance > 1.5 * stepSize:
pos = list()
pos.extend(LiftUp[6:12])
pos.extend(LiftUp[12:18])
pos.extend(LiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos, leg_case)
pos1 = list()
pos1.extend(PutForward[6:12])
pos1.extend(PutForward[12:18])
pos1.extend(PutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos1, leg_case)
pos2 = list()
pos2.extend(LiftDown[6:12])
pos2.extend(LiftDown[12:18])
pos2.extend(LiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos2, leg_case)
pos3 = list()
pos3.extend(LiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(LiftUp[18:24])
pos3.extend(LiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(40, 50, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 30, pos4, leg_case)
distance = distance - stepSize
else:
pos = list()
pos.extend(HalfLiftUp[6:12])
pos.extend(HalfLiftUp[12:18])
pos.extend(HalfLiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos, leg_case)
pos1 = list()
pos1.extend(HalfPutForward[6:12])
pos1.extend(HalfPutForward[12:18])
pos1.extend(HalfPutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos1, leg_case)
pos2 = list()
pos2.extend(HalfLiftDown[6:12])
pos2.extend(HalfLiftDown[12:18])
pos2.extend(HalfLiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos2, leg_case)
pos3 = list()
pos3.extend(HalfLiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(HalfLiftUp[18:24])
pos3.extend(HalfLiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(80, 50, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 30, pos4, leg_case)
distance = distance - (0.5 *stepSize)
time.sleep(0.5)
return distance
def walkUp(distanceToStair, x, stepSize, threshold, riser, alpha, beta, gamma):
orientation = [alpha,beta,gamma]
Forward = [x, stepSize, threshold]
Up = [0, 0, threshold]
Down = [x, stepSize, 0]
Push = [0, 0, 0]
UpForward = [x, stepSize, threshold+riser]
StepUp = [0, 0, threshold+riser]
StepDownFirst = [x, stepSize, threshold/2+riser]
StepDownSecond = [x, 0, threshold/2+riser]
PushBackwards = calc_motion(Push,orientation)
LiftUp = calc_motion(Up,orientation)
LiftDown = calc_motion(Down,orientation)
PutForward = calc_motion(Forward,orientation)
StepUpForward = calc_motion(UpForward,orientation)
StepUpUp = calc_motion(StepUp,orientation)
StepDownDownFirst = calc_motion(StepDownFirst,orientation)
StepDownDownSecond = calc_motion(StepDownSecond,orientation)
pos = []
##Lift_Up_First_Leg
for i in range(len(distanceToStair)):
if i == 0 or i == 3 or i == 4:
if distanceToStair[i] < stepSize:
pos.extend(StepUpUp[i*6 : i*6+6])
else:
pos.extend(LiftUp[i*6 : i*6+6])
positionN(pos)
leg_case = [1,4,5]
check_position_error_legs(140, 30, pos, leg_case)
pos = []
##Put_Forward_First_Leg
for i in range(len(distanceToStair)):
if i == 0 or i == 3 or i == 4:
if distanceToStair[i] < stepSize:
pos.extend(StepUpForward[i*6 : i*6+6])
else:
pos.extend(PutForward[i*6 : i*6+6])
positionN(pos)
leg_case = [1,4,5]
check_position_error_legs(140, 30, pos, leg_case)
pos = []
##Step_Down_First_leg
for i in range(len(distanceToStair)):
if i == 0 or i == 3 or i == 4:
if distanceToStair[i] < stepSize:
pos.extend(StepDownDownFirst[i*6 : i*6+6])
else:
pos.extend(LiftDown[i*6 : i*6+6])
positionN(pos)
leg_case = [1,4,5]
check_position_error_legs(140, 30, pos, leg_case)
pos = []
check_contact()
#########################################################################################################
UpNothing = [0, 0, 0]
UpUpNothing = calc_motion(UpNothing)
UpPushBackwards = [0, -stepSize, 0]
StepUpPushBackwards = calc_motion(UpPushBackwards)
##Lift_Up_Second_Leg
for i in range(len(distanceToStair)):
if i == 1 or i == 2 or i == 5:
if distanceToStair[i] < stepSize:
pos.extend(StepUpUp[i*6 : i*6+6])
else:
pos.extend(LiftUp[i*6 : i*6+6])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(120, 30, pos, leg_case)
pos = []
##Put_Forward_Second_Leg
for i in range(len(distanceToStair)):
if i == 0 or i == 3 or i == 4:
if distanceToStair[i] < stepSize:
pos.extend(StepUpPushBackwards[i*6 : i*6+6])
else:
pos.extend(StepUpPushBackwards[i*6 : i*6+6])
positionN(pos)
leg_case = [1,4,5]
check_position_error_legs(120, 30, pos, leg_case)
pos = []
##Step_Down_Second_leg
for i in range(len(distanceToStair)):
if i == 1 or i == 2 or i == 5:
if distanceToStair[i] < stepSize:
pos.extend(StepDownDownSecond[i*6 : i*6+6])
else:
pos.extend(PushBackwards[i*6 : i*6+6])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(120, 30, pos, leg_case)
pos = []
check_contact()
distanceToStair = [i - stepSize for i in distanceToStair]
def updateDistance(distanceToStair, stepSize):
distanceToStair = [i - stepSize for i in distanceToStair]
for i in range(len(distanceToStair)):
if distanceToStair[i] < 0:
distanceToStair[i] = distanceToStair[i] + thread
print distanceToStair
return distanceToStair
def rotateAndTranslate(riser,climbed_stairs_front, climbed_stairs_rear):
gamma, beta = K.get_orientation([1, 5, 6])
parallelGait(0, -beta, -gamma, 0, 0, 0)
time.sleep(2)
a = K.calc_translationStairs(riser,climbed_stairs_front, climbed_stairs_rear)
parallelGait(0, 0, 0, 0, a[1], a[0])
time.sleep(2)
return beta , gamma
def moveForwardOnStair(x, y, z, alpha, beta, gamma, distance):
initialDistance = distance
orientation = [alpha, beta, gamma]
Forward = [x, y, z]
Up = [0, 0, z]
Down = [x, y, 0]
Push = [0, 0, 0]
HalfForward = [0.5*x, 0.5*y, z]
HalfUp = [0, 0, z]
HalfDown = [0.5*x, 0.5*y, 0]
PushBackwards = calc_motion(Push, orientation)
LiftUp = calc_motion(Up, orientation)
LiftDown = calc_motion(Down, orientation)
PutForward = calc_motion(Forward, orientation)
HalfLiftUp = calc_motion(HalfUp, orientation)
HalfLiftDown = calc_motion(HalfDown, orientation)
HalfPutForward = calc_motion(HalfForward, orientation)
print (distance)
while distance > 0.75 * stepSize:
if distance > 1.5 * stepSize:
pos = list()
pos.extend(LiftUp[6:12])
pos.extend(LiftUp[12:18])
pos.extend(LiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos, leg_case)
pos1 = list()
pos1.extend(PutForward[6:12])
pos1.extend(PutForward[12:18])
pos1.extend(PutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos1, leg_case)
pos2 = list()
pos2.extend(LiftDown[6:12])
pos2.extend(LiftDown[12:18])
pos2.extend(LiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos2, leg_case)
pos3 = list()
pos3.extend(LiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(LiftUp[18:24])
pos3.extend(LiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(20, 50, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 30, pos4, leg_case)
distance = distance - stepSize
else:
pos = list()
pos.extend(HalfLiftUp[6:12])
pos.extend(HalfLiftUp[12:18])
pos.extend(HalfLiftUp[30:36])
positionN(pos)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos, leg_case)
pos1 = list()
pos1.extend(HalfPutForward[6:12])
pos1.extend(HalfPutForward[12:18])
pos1.extend(HalfPutForward[30:36])
positionN(pos1)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos1, leg_case)
pos2 = list()
pos2.extend(HalfLiftDown[6:12])
pos2.extend(HalfLiftDown[12:18])
pos2.extend(HalfLiftDown[30:36])
positionN(pos2)
leg_case = [2,3,6]
check_position_error_legs(20, 30, pos2, leg_case)
pos3 = list()
pos3.extend(HalfLiftUp[0:6])
pos3.extend(PushBackwards[6:12])
pos3.extend(PushBackwards[12:18])
pos3.extend(HalfLiftUp[18:24])
pos3.extend(HalfLiftUp[24:30])
pos3.extend(PushBackwards[30:36])
positionN(pos3)
check_position_error(20, 50, pos3)
pos4 = list()
pos4.extend(PushBackwards[0:6])
pos4.extend(PushBackwards[18:24])
pos4.extend(PushBackwards[24:30])
positionN(pos4)
leg_case = [1,4,5]
check_position_error_legs(20, 30, pos4, leg_case)
distance = distance - (0.5 *stepSize)
time.sleep(0.5)
return distance
def walkUpAllLegs(distanceToStair, x, stepSize, threshold, riser, alpha, beta, gamma):
orientation = [alpha,beta,gamma]
Forward = [x, stepSize, threshold]
Up = [0, 0, threshold]
Down = [x, stepSize, 0]
Push = [0, 0, 0]
UpForward = [x, stepSize, threshold+riser]
StepUp = [0, 0, threshold+riser]
StepDownFirst = [x, stepSize, threshold/2+riser]
StepDownSecond | |
import pytest
from rubrik_cdm.exceptions import InvalidParameterException, CDMVersionException, InvalidTypeException
from rubrik_cdm import Connect
def test_cluster_version(rubrik, mocker):
def mock_get_v1_cluster_me_version():
return {'version': '5.0.1-1280'}
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.return_value = mock_get_v1_cluster_me_version()
assert rubrik.cluster_version() == "5.0.1-1280"
def test_minimum_installed_cdm_version_met(rubrik, mocker):
def mock_self_cluster_version():
return "5.0.1-1280"
mock_cluster_version = mocker.patch('rubrik_cdm.Connect.cluster_version', autospec=True, spec_set=True)
mock_cluster_version.return_value = mock_self_cluster_version()
assert rubrik.minimum_installed_cdm_version("5.0") is True
def test_minimum_installed_cdm_version_not_met(rubrik, mocker):
def mock_self_cluster_version():
return "5.0.1-1280"
mock_cluster_version = mocker.patch('rubrik_cdm.Connect.cluster_version', autospec=True, spec_set=True)
mock_cluster_version.return_value = mock_self_cluster_version()
assert rubrik.minimum_installed_cdm_version("5.2") is False
def test_cluster_node_ip(rubrik, mocker):
def mock_internal_cluster_me_node():
return {
"hasMore": True,
"data": [
{
"id": "string",
"brikId": "string",
"status": "string",
"ipAddress": "192.168.1.1",
"supportTunnel": {
"isTunnelEnabled": True,
"port": 0,
"enabledTime": "2019-04-16T14:16:15.573Z",
"lastActivityTime": "2019-04-16T14:16:15.573Z",
"inactivityTimeoutInSeconds": 0
}
},
{
"id": "string",
"brikId": "string",
"status": "string",
"ipAddress": "192.168.1.2",
"supportTunnel": {
"isTunnelEnabled": True,
"port": 0,
"enabledTime": "2019-04-16T14:16:15.573Z",
"lastActivityTime": "2019-04-16T14:16:15.573Z",
"inactivityTimeoutInSeconds": 0
}
},
{
"id": "string",
"brikId": "string",
"status": "string",
"ipAddress": "192.168.1.3",
"supportTunnel": {
"isTunnelEnabled": True,
"port": 0,
"enabledTime": "2019-04-16T14:16:15.573Z",
"lastActivityTime": "2019-04-16T14:16:15.573Z",
"inactivityTimeoutInSeconds": 0
}
}
],
"total": 0
}
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.return_value = mock_internal_cluster_me_node()
assert rubrik.cluster_node_ip() == ["192.168.1.1", "192.168.1.2", "192.168.1.3"]
def test_cluster_node_name(rubrik, mocker):
def mock_internal_cluster_me_node():
return {
"hasMore": True,
"data": [
{
"id": "RVM000A000001",
"brikId": "string",
"status": "string",
"ipAddress": "string",
"supportTunnel": {
"isTunnelEnabled": True,
"port": 0,
"enabledTime": "2019-04-16T14:16:15.573Z",
"lastActivityTime": "2019-04-16T14:16:15.573Z",
"inactivityTimeoutInSeconds": 0
}
},
{
"id": "RVM000A000002",
"brikId": "string",
"status": "string",
"ipAddress": "string",
"supportTunnel": {
"isTunnelEnabled": True,
"port": 0,
"enabledTime": "2019-04-16T14:16:15.573Z",
"lastActivityTime": "2019-04-16T14:16:15.573Z",
"inactivityTimeoutInSeconds": 0
}
},
{
"id": "RVM000A000003",
"brikId": "string",
"status": "string",
"ipAddress": "string",
"supportTunnel": {
"isTunnelEnabled": True,
"port": 0,
"enabledTime": "2019-04-16T14:16:15.573Z",
"lastActivityTime": "2019-04-16T14:16:15.573Z",
"inactivityTimeoutInSeconds": 0
}
}
],
"total": 0
}
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.return_value = mock_internal_cluster_me_node()
assert rubrik.cluster_node_name() == ["RVM000A000001", "RVM000A000002", "RVM000A000003"]
def test_end_user_authorization_invalid_object(rubrik):
with pytest.raises(InvalidParameterException):
rubrik.end_user_authorization("object_name", "end_user", "not_a_supported_object_type")
def test_end_user_authorization_invalid_end_user(rubrik, mocker):
def mock_self_object_id():
return "VirtualMachine:::e6a7e6f1-6050-1ee33-9ba6-8e284e2801de-vm-38297"
def mock_internal_user_username():
return []
mock_object_id = mocker.patch('rubrik_cdm.Connect.object_id', autospec=True, spec_set=True)
mock_object_id.return_value = mock_self_object_id
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.return_value = mock_internal_user_username()
with pytest.raises(InvalidParameterException):
rubrik.end_user_authorization("object_name", "end_user", "vmware")
def test_end_user_authorization_idempotence(rubrik, mocker):
def mock_self_object_id():
return "VirtualMachine:::e6a7e6f1-6050-1ee33-9ba6-8e284e2801de-vm-38297"
def mock_internal_user_username():
return [
{
"id": "User:::119283ae-22ea-13f3-bfe2-9387cdf1d4a",
"authDomainId": "string",
"username": "string",
"firstName": "string",
"lastName": "string",
"emailAddress": "string",
"contactNumber": "string",
"mfaServerId": "string"
}
]
def mock_internal_authorization_role_end_user_principals():
return {
"hasMore": True,
"data": [
{
"principal": "string",
"privileges": {
"destructiveRestore": [
"string"
],
"restore": [
"VirtualMachine:::e6a7e6f1-6050-1ee33-9ba6-8e284e2801de-vm-38297"
],
"onDemandSnapshot": [
"string"
],
"restoreWithoutDownload": [
"string"
],
"viewEvent": [
"string"
],
"provisionOnInfra": [
"string"
],
"viewReport": [
"string"
]
},
"organizationId": "string"
}
],
"total": 0
}
mock_object_id = mocker.patch('rubrik_cdm.Connect.object_id', autospec=True, spec_set=True)
mock_object_id.return_value = mock_self_object_id()
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.side_effect = [mock_internal_user_username(), mock_internal_authorization_role_end_user_principals()]
assert rubrik.end_user_authorization("object_name", "end_user", "vmware", 1) \
== 'No change required. The End User "end_user" is already authorized to interact with the "object_name" VM.'
def test_end_user_authorization(rubrik, mocker):
def mock_self_object_id():
return "VirtualMachine:::e6a7e6f1-6050-1ee33-9ba6-8e284e2801de-vm-38297"
def mock_internal_user_username():
return [
{
"id": "User:::119283ae-22ea-13f3-bfe2-9387cdf1d4a",
"authDomainId": "string",
"username": "string",
"firstName": "string",
"lastName": "string",
"emailAddress": "string",
"contactNumber": "string",
"mfaServerId": "string"
}
]
def mock_internal_authorization_role_end_user_principals():
return {
"hasMore": True,
"data": [
{
"principal": "string",
"privileges": {
"destructiveRestore": [
"string"
],
"restore": [
"VirtualMachine:::e6a7e6r3-6050-1ee33-9ba6-8e284e2801de"
],
"onDemandSnapshot": [
"string"
],
"restoreWithoutDownload": [
"string"
],
"viewEvent": [
"string"
],
"provisionOnInfra": [
"string"
],
"viewReport": [
"string"
]
},
"organizationId": "string"
}
],
"total": 0
}
def mock_internal_authorization_role_end_user():
return {
"hasMore": False,
"data": [
{
"principal": "User:::119283ae-22ea-13f3-bfe2-9387cdf1d4a",
"privileges": {
"destructiveRestore": [],
"restore": [
"VirtualMachine:::e6a7e6f1-6050-1ee33-9ba6-8e284e2801de-vm-38297-not-present"
],
"onDemandSnapshot": [],
"restoreWithoutDownload": [],
"viewEvent": [],
"provisionOnInfra": [],
"viewReport": []
},
"organizationId": "Organization:::05e3ee0b-5ec1-e33b-88a5-d916855aff5f"
}
],
"total": 1
}
mock_object_id = mocker.patch('rubrik_cdm.Connect.object_id', autospec=True, spec_set=True)
mock_object_id.return_value = mock_self_object_id()
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.side_effect = [mock_internal_user_username(), mock_internal_authorization_role_end_user_principals()]
mock_post = mocker.patch('rubrik_cdm.Connect.post', autospec=True, spec_set=True)
mock_post.return_value = mock_internal_authorization_role_end_user()
assert rubrik.end_user_authorization("object_name", "end_user", "vmware") \
== mock_internal_authorization_role_end_user()
def test_add_vcenter_idempotence(rubrik, mocker):
def mock_v1_vmware_vcenter_primary_cluster_id():
return {
"hasMore": True,
"data": [
{
"caCerts": "string",
"configuredSlaDomainId": "string",
"id": "string",
"name": "string",
"configuredSlaDomainName": "string",
"primaryClusterId": "string",
"hostname": "vCenter-Hostname",
"username": "string",
"conflictResolutionAuthz": "AllowAutoConflictResolution",
"configuredSlaDomainPolarisManagedId": "string"
}
],
"total": 1
}
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.return_value = mock_v1_vmware_vcenter_primary_cluster_id()
assert rubrik.add_vcenter("vCenter-Hostname", "vcenter_username", "vcenter_password") == \
"No change required. The vCenter 'vCenter-Hostname' has already been added to the Rubrik cluster."
def test_add_vcenter(rubrik, mocker):
def mock_v1_vmware_vcenter_primary_cluster_id():
return {
"hasMore": True,
"data": [
{
"caCerts": "string",
"configuredSlaDomainId": "string",
"id": "string",
"name": "string",
"configuredSlaDomainName": "string",
"primaryClusterId": "string",
"hostname": "string",
"username": "string",
"conflictResolutionAuthz": "AllowAutoConflictResolution",
"configuredSlaDomainPolarisManagedId": "string"
}
],
"total": 1
}
def mock_v1_vmware_vcenter():
return {
"id": "string",
"status": "string",
"progress": 0,
"startTime": "2019-04-17T02:46:12.097Z",
"endTime": "2019-04-17T02:46:12.097Z",
"nodeId": "string",
"error": {
"message": "string"
},
"links": [
{
"href": "www.example.com",
"rel": "string"
}
]
}
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.return_value = mock_v1_vmware_vcenter_primary_cluster_id()
mock_post = mocker.patch('rubrik_cdm.Connect.post', autospec=True, spec_set=True)
mock_post.return_value = mock_v1_vmware_vcenter()
assert rubrik.add_vcenter("vCenter-Hostname", "vcenter_username", "vcenter_password") == \
(mock_v1_vmware_vcenter(), "www.example.com")
def test_configure_timezone_invalid_timezone(rubrik):
with pytest.raises(InvalidParameterException):
rubrik.configure_timezone("not_a_supported_timezone")
def test_configure_timezone_idempotence(rubrik, mocker):
def mock_get_v1_cluster_me():
return {
"id": "string",
"version": "string",
"apiVersion": "string",
"name": "string",
"timezone": {
"timezone": "America/Chicago"
},
"geolocation": {
"address": "string"
},
"acceptedEulaVersion": "string",
"latestEulaVersion": "string"
}
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.return_value = mock_get_v1_cluster_me()
assert rubrik.configure_timezone("America/Chicago") \
== "No change required. The Rubrik cluster is already configured with 'America/Chicago' as it's timezone."
def test_configure_timezone(rubrik, mocker):
def mock_get_v1_cluster_me():
return {
"id": "string",
"version": "string",
"apiVersion": "string",
"name": "string",
"timezone": {
"timezone": "America/Denver"
},
"geolocation": {
"address": "string"
},
"acceptedEulaVersion": "string",
"latestEulaVersion": "string"
}
def mock_patch_v1_cluster_me():
return {
"id": "string",
"version": "string",
"apiVersion": "string",
"name": "string",
"timezone": {
"timezone": "America/Denver"
},
"geolocation": {
"address": "string"
},
"acceptedEulaVersion": "string",
"latestEulaVersion": "string"
}
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.return_value = mock_get_v1_cluster_me()
mock_patch = mocker.patch('rubrik_cdm.Connect.patch', autospec=True, spec_set=True)
mock_patch.return_value = mock_patch_v1_cluster_me()
assert rubrik.configure_timezone("America/Chicago") == mock_patch_v1_cluster_me()
def test_configure_ntp_invalid_type(rubrik):
with pytest.raises(InvalidTypeException):
rubrik.configure_ntp("not_a_list")
def test_configure_syslog_invalid_protocol(rubrik):
with pytest.raises(InvalidParameterException):
rubrik.configure_syslog("syslog_ip", "not_a_valid_protocol")
def test_configure_syslog_invalid_idempotence(rubrik, mocker):
def mock_get_internal_syslog():
return {
"hasMore": True,
"data": [
{
"hostname": "syslog_ip",
"port": 514,
"protocol": "TCP",
"id": "string"
}
],
"total": 1
}
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.return_value = mock_get_internal_syslog()
assert rubrik.configure_syslog("syslog_ip", "TCP") == \
"No change required. The Rubrik cluster is already configured to use the syslog server 'syslog_ip' on port '514' using the 'TCP' protocol."
def test_configure_syslog(rubrik, mocker):
def mock_get_internal_syslog():
return {
"hasMore": True,
"data": [
{
"hostname": "syslog_ip",
"port": 514,
"protocol": "TCP",
"id": "string"
}
],
"total": 1
}
def mock_delete_internal_syslog_id():
return {'status_code': '204'}
def mock_post_internal_syslog():
return {
"hostname": "syslog_ip_new",
"port": 514,
"protocol": "TCP",
"id": "string"
}
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.return_value = mock_get_internal_syslog()
mock_delete = mocker.patch('rubrik_cdm.Connect.delete', autospec=True, spec_set=True)
mock_delete.return_value = mock_delete_internal_syslog_id()
mock_post = mocker.patch('rubrik_cdm.Connect.post', autospec=True, spec_set=True)
mock_post.return_value = mock_post_internal_syslog()
assert rubrik.configure_syslog("syslog_ip_new", "TCP") == mock_post_internal_syslog()
def test_configure_vlan_invalid_ip(rubrik):
with pytest.raises(InvalidParameterException):
rubrik.configure_vlan("vlan", "netmask", "not_valid_a_list_or_dict")
def test_configure_vlan_invalid_number_of_vlans(rubrik, mocker):
def mock_internal_cluster_me_node():
return {
"hasMore": True,
"data": [
{
"id": "RVM000A000001",
"brikId": "string",
"status": "string",
"ipAddress": "string",
"supportTunnel": {
"isTunnelEnabled": True,
"port": 0,
"enabledTime": "2019-04-16T14:16:15.573Z",
"lastActivityTime": "2019-04-16T14:16:15.573Z",
"inactivityTimeoutInSeconds": 0
}
},
{
"id": "RVM000A000002",
"brikId": "string",
"status": "string",
"ipAddress": "string",
"supportTunnel": {
"isTunnelEnabled": True,
"port": 0,
"enabledTime": "2019-04-16T14:16:15.573Z",
"lastActivityTime": "2019-04-16T14:16:15.573Z",
"inactivityTimeoutInSeconds": 0
}
},
{
"id": "RVM000A000003",
"brikId": "string",
"status": "string",
"ipAddress": "string",
"supportTunnel": {
"isTunnelEnabled": True,
"port": 0,
"enabledTime": "2019-04-16T14:16:15.573Z",
"lastActivityTime": "2019-04-16T14:16:15.573Z",
"inactivityTimeoutInSeconds": 0
}
}
],
"total": 0
}
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.return_value = mock_internal_cluster_me_node()
with pytest.raises(InvalidParameterException):
rubrik.configure_vlan("vlan", "netmask", ["IP_1", "IP_2"])
def test_configure_vlan(rubrik, mocker):
def mock_internal_cluster_me_node():
return {
"hasMore": True,
"data": [
{
"id": "RVM000A000001",
"brikId": "string",
"status": "string",
"ipAddress": "string",
"supportTunnel": {
"isTunnelEnabled": True,
"port": 0,
"enabledTime": "2019-04-16T14:16:15.573Z",
"lastActivityTime": "2019-04-16T14:16:15.573Z",
"inactivityTimeoutInSeconds": 0
}
},
{
"id": "RVM000A000002",
"brikId": "string",
"status": "string",
"ipAddress": "string",
"supportTunnel": {
"isTunnelEnabled": True,
"port": 0,
"enabledTime": "2019-04-16T14:16:15.573Z",
"lastActivityTime": "2019-04-16T14:16:15.573Z",
"inactivityTimeoutInSeconds": 0
}
},
{
"id": "RVM000A000003",
"brikId": "string",
"status": "string",
"ipAddress": "string",
"supportTunnel": {
"isTunnelEnabled": True,
"port": 0,
"enabledTime": "2019-04-16T14:16:15.573Z",
"lastActivityTime": "2019-04-16T14:16:15.573Z",
"inactivityTimeoutInSeconds": 0
}
}
],
"total": 0
}
def mock_internal_cluster_me_vlan():
return {
"hasMore": True,
"data": [
{
"vlan": 0,
"netmask": "string",
"interfaces": [
{
"node": "string",
"ip": "string"
}
]
}
],
"total": 0
}
def mock_post_internal_cluster_me_vlan():
return {'status_code': '204'}
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.side_effect = [mock_internal_cluster_me_node(), mock_internal_cluster_me_vlan()]
mock_post = mocker.patch('rubrik_cdm.Connect.post', autospec=True, spec_set=True)
mock_post.return_value = mock_post_internal_cluster_me_vlan()
assert rubrik.configure_vlan("100", "netmask", ["IP_1", "IP_2", "IP_3"]) == mock_post_internal_cluster_me_vlan()
def test_configure_dns_servers_invalid_server_ip(rubrik):
with pytest.raises(InvalidTypeException):
rubrik.configure_dns_servers("not_a_valid_server_ip_type")
def test_configure_dns_servers_idempotence(rubrik, mocker):
def mock_get_internal_cluster_me_dns_nameserver():
return [
"server_1"
]
mock_get = mocker.patch('rubrik_cdm.Connect.get', autospec=True, spec_set=True)
mock_get.return_value = mock_get_internal_cluster_me_dns_nameserver()
assert rubrik.configure_dns_servers(["server_1"]) == \
"No change required. The Rubrik cluster is already configured with the provided DNS servers."
def test_configure_dns_servers(rubrik, mocker):
def mock_get_internal_cluster_me_dns_nameserver():
return [
"server_1",
"server_2"
]
| |
loc="upper right",
)
plt.subplots_adjust(bottom=0.15, right=0.95)
plt.savefig(
os.path.splitext(filename)[0] + "_Risk_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig2.pickle', 'wb'))
StringIOBytes_risk = io.BytesIO()
plt.savefig(StringIOBytes_risk, format="png", bbox_inches="tight")
StringIOBytes_risk.seek(0)
risk_base_64_pngData = base64.b64encode(StringIOBytes_risk.read())
plt.close()
elif self.post_process_check == True:
preds_fig, (ax1, ax2) = plt.subplots(
1, 2, figsize=(6.69, 3.35), dpi=300
)
ax1.fill_between(
test1_frc,
self.avg_case_results_am_post["Upper 95 CI"],
self.avg_case_results_am_post["Lower 95 CI"],
facecolor="#ffa600",
alpha=0.5,
label="95th Percentile Range",
)
ax1.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax1.plot(
test1_frc,
self.avg_case_results_am_post["Ensemble Minimum"],
"#ffa600",
linewidth=0.5,
label="Minimum/Maximum",
)
ax1.plot(
test1_frc,
self.avg_case_results_am_post["Ensemble Maximum"],
"#ffa600",
linewidth=0.5,
)
ax1.plot(
test1_frc,
self.avg_case_results_am_post["median"],
"#ffa600",
linewidth=1,
label="Median Prediction",
)
ax1.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax1.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax1.set_xlim([0.19, 2.0])
ax1.set_xticks(np.arange(0.2, 2.2, 0.2))
ax1.set_xlabel("Tap Stand FRC (mg/L)")
ax1.set_ylabel("Household FRC (mg/L)")
ax1.set_title("AM Collection")
ax2.fill_between(
test1_frc,
self.avg_case_results_pm_post["Upper 95 CI"],
self.avg_case_results_pm_post["Lower 95 CI"],
facecolor="#ffa600",
alpha=0.5,
label="95th Percentile Range",
)
ax2.axhline(0.2, c="k", ls="-.", linewidth=1, label="FRC = 0.2 mg/L")
ax2.plot(
test1_frc,
self.avg_case_results_pm_post["Ensemble Minimum"],
"#ffa600",
linewidth=0.5,
label="Minimum/Maximum",
)
ax2.plot(
test1_frc,
self.avg_case_results_pm_post["Ensemble Maximum"],
"#ffa600",
linewidth=0.5,
)
ax2.plot(
test1_frc,
self.avg_case_results_pm_post["median"],
"#ffa600",
linewidth=1,
label="median Prediction",
)
ax2.scatter(
self.datainputs[FRC_IN],
self.dataoutputs,
c="k",
s=10,
marker="x",
label="Testing Observations",
)
ax2.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
loc="upper right",
)
ax2.set_xlim([0.19, 2.0])
ax2.set_xticks(np.arange(0.2, 2.2, 0.2))
ax2.set_xlabel("Tap Stand FRC (mg/L)")
ax2.set_ylabel("Household FRC (mg/L)")
ax2.set_title("PM Collection")
plt.subplots_adjust(wspace=0.25)
plt.tight_layout()
plt.savefig(
os.path.splitext(filename)[0] + "_Predictions_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig1.pickle', 'wb'))
StringIOBytes_preds = io.BytesIO()
plt.savefig(StringIOBytes_preds, format="png", bbox_inches="tight")
StringIOBytes_preds.seek(0)
preds_base_64_pngData = base64.b64encode(StringIOBytes_preds.read())
plt.close()
risk_fig = plt.figure(figsize=(6.69, 3.35), dpi=300)
plt.plot(
test1_frc,
self.avg_case_results_am_post["probability<=0.20"],
c="#ffa600",
label="Risk of Household FRC < 0.20 mg/L - Average Case, AM Collection",
)
plt.plot(
test1_frc,
self.avg_case_results_pm_post["probability<=0.20"],
c="#ffa600",
ls="--",
label="Risk of Household FRC < 0.20 mg/L - Average Case, PM Collection",
)
plt.xlim([0.2, 2])
plt.xlabel("Tapstand FRC (mg/L)")
plt.ylim([0, 1])
plt.ylabel("Risk of Point-of-Consumption FRC < 0.2 mg/L")
plt.legend(
bbox_to_anchor=(0.999, 0.999),
shadow=False,
fontsize="small",
ncol=1,
labelspacing=0.1,
columnspacing=0.2,
handletextpad=0.1,
loc="upper right",
)
plt.savefig(
os.path.splitext(filename)[0] + "_Risk_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig2.pickle', 'wb'))
StringIOBytes_risk = io.BytesIO()
plt.savefig(StringIOBytes_risk, format="png", bbox_inches="tight")
StringIOBytes_risk.seek(0)
risk_base_64_pngData = base64.b64encode(StringIOBytes_risk.read())
plt.close()
hist_fig, (ax1, ax2, ax3, ax4) = plt.subplots(
4, 1, figsize=(3.35, 6.69), dpi=300
)
ax1.set_ylabel("Frequency")
ax1.set_xlabel("Tapstand FRC (mg/L)")
ax1.hist(self.datainputs.iloc[:, 0], bins=30, color="grey")
ax2.set_ylabel("Frequency")
ax2.set_xlabel("Elapsed Time (hours)")
ax2.hist(self.datainputs.iloc[:, 1], bins=30, color="grey")
ax3.set_ylabel("Frequency")
ax3.set_xlabel("Collection Time (0=AM, 1=PM)")
ax3.hist(self.datainputs.iloc[:, 2], bins=30, color="grey")
ax4.set_ylabel("Frequency")
ax4.set_xlabel("Household FRC (mg/L)")
ax4.hist(self.dataoutputs, bins=30, color="grey")
plt.subplots_adjust(
left=0.18, hspace=0.60, top=0.99, bottom=0.075, right=0.98
)
plt.savefig(
os.path.splitext(filename)[0] + "_Histograms_Fig.png",
format="png",
bbox_inches="tight",
)
# pl.dump(fig, open(os.path.splitext(filename)[0] + 'Fig3.pickle', 'wb'))
plt.close()
StringIOBytes_histogram = io.BytesIO()
plt.savefig(StringIOBytes_histogram, format="png", bbox_inches="tight")
StringIOBytes_histogram.seek(0)
hist_base_64_pngData = base64.b64encode(StringIOBytes_histogram.read())
return hist_base_64_pngData, risk_base_64_pngData, preds_base_64_pngData
def display_results(self):
"""
Display the results of the predictions as a console output.
Display and return all the contents of the self.results variable which is a pandas Dataframe object
:return: A Pandas Dataframe object (self.results) containing all the result of the predictions
"""
if WATTEMP in self.datainputs.columns or COND in self.datainputs.columns:
if self.post_process_check == False:
logging.info(self.avg_case_results_am)
logging.info(self.worst_case_results_am)
logging.info(self.avg_case_results_pm)
logging.info(self.worst_case_results_pm)
return self.avg_case_results_am, self.avg_case_results_pm, self.worst_case_results_am, self.worst_case_results_pm
else:
logging.info(self.avg_case_results_am_post)
logging.info(self.worst_case_results_am_post)
logging.info(self.avg_case_results_pm_post)
logging.info(self.worst_case_results_pm_post)
return self.avg_case_results_am_post, self.avg_case_results_pm_post, self.worst_case_results_am_post, self.worst_case_results_pm_post
else:
if self.post_process_check == False:
logging.info(self.avg_case_results_am)
logging.info(self.avg_case_results_pm)
return self.avg_case_results_am, self.avg_case_results_pm
else:
logging.info(self.avg_case_results_am_post)
logging.info(self.avg_case_results_pm_post)
return self.avg_case_results_am_post, self.avg_case_results_pm_post
def export_results_to_csv(self, filename):
self.avg_case_results_am.to_csv(
os.path.splitext(filename)[0] + "_average_case_am.csv", index=False
)
self.avg_case_results_pm.to_csv(
os.path.splitext(filename)[0] + "_average_case_pm.csv", index=False
)
if WATTEMP in self.datainputs.columns or COND in self.datainputs.columns:
self.worst_case_results_am.to_csv(
os.path.splitext(filename)[0] + "_worst_case_am.csv", index=False
)
self.worst_case_results_pm.to_csv(
os.path.splitext(filename)[0] + "_worst_case_pm.csv", index=False
)
if self.post_process_check == True:
self.avg_case_results_am_post.to_csv(
os.path.splitext(filename)[0] + "_average_case_am.csv", index=False
)
self.avg_case_results_pm_post.to_csv(
os.path.splitext(filename)[0] + "_average_case_pm.csv", index=False
)
if WATTEMP in self.datainputs.columns or COND in self.datainputs.columns:
self.worst_case_results_am_post.to_csv(
os.path.splitext(filename)[0] + "_worst_case_am.csv", index=False
)
self.worst_case_results_pm_post.to_csv(
os.path.splitext(filename)[0] + "_worst_case_pm.csv", index=False
)
def generate_model_performance(self):
"""Generates training performance graphs
Plots the model performance metrics (MSE and R^2 vs # of epochs) after training and returns a
base64 encoded image. The NN has to be trained first otherwise the image will be empty.
Returns: Base64 data stream"""
fig, axs = plt.subplots(1, 2, sharex=True)
ax = axs[0]
ax.boxplot(
[self.total_mse_train, self.total_mse_val, self.total_mse_test],
labels=["Training", "Validation", "Testing"],
)
ax.set_title("Mean Squared Error")
tr_legend = "Training Avg MSE: {mse:.4f}".format(mse=self.avg_mse_train)
val_legend = "Validation Avg MSE: {mse:.4f}".format(mse=self.avg_mse_val)
ts_legend = "Testing Avg MSE: {mse:.4f}".format(mse=self.avg_mse_test)
ax.legend([tr_legend, val_legend, ts_legend])
ax = axs[1]
ax.boxplot(
[
self.total_rsquared_train,
self.total_rsquared_val,
self.total_rsquared_test,
],
labels=["Training", "Validation", "Testing"],
)
ax.set_title("R^2")
tr_legend = "Training Avg. R^2: {rs:.3f}".format(rs=self.avg_rsq_train)
val_legend = "Validation Avg. R^2: {rs:.3f}".format(rs=self.avg_rsq_val)
ts_legend = "Validation Avg. R^2: {rs:.3f}".format(rs=self.avg_rsq_test)
ax.legend([tr_legend, val_legend, ts_legend])
fig.suptitle(
"Performance metrics across 100 training runs on "
+ str(self.epochs)
+ " epochs, with "
+ str(self.layer1_neurons)
+ " neurons on hidden layer."
)
fig.set_size_inches(12, 8)
# plt.show()
# Uncomment the next lines to save the graph to disk
# plt.savefig("model_metrics\\" + str(self.epochs) + "_epochs_" + str(self.layer1_neurons) + "_neurons.png",
# dpi=100)
# plt.close()
plt.show()
myStringIOBytes = io.BytesIO()
plt.savefig(myStringIOBytes, format='png')
myStringIOBytes.seek(0)
my_base_64_pngData = base64.b64encode(myStringIOBytes.read())
return my_base_64_pngData
def generate_2d_scatterplot(self):
"""Generate a 2d scatterplot of the predictions
Plots three, 2-dimensional scatterplots of the predictions as a function of the inputs
The 3 scatterplots are plotting: predictions vs se1_frc and water temperature, predictions
vs water conductivity and water temperature, and predictions vs se1_frc and water conductivity.
A histogram of the prediction set is also generated and plotted. A prediction using the
predict() method must be made first.
Returns: a base64 data represenation of the image."""
df = self.results
# Uncomment the following line to load the results direclty from an csv file
# df = pd.read_csv('results.csv')
# Filter out outlier values
df = df.drop(df[df[FRC_IN] > 2.8].index)
frc = df[FRC_IN]
watt = df[WATTEMP]
cond = df[COND]
c = df["median"]
# sort data for the cdf
sorted_data = np.sort(c)
# The following lines of code calculate the width of the histogram bars
# and align the range of the histogram and the pdf
if min(c) < 0:
lo_limit = 0
else:
lo_limit = round(min(c), 2)
logging.info(lo_limit)
if max(c) <= 0.75:
divisions = 16
hi_limit = 0.75
elif max(c) < 1:
divisions = 21
hi_limit = 1
elif max(c) <= 1.5:
divisions = 31
hi_limit = 1.5
elif max(c) <= 2:
divisions = 41
hi_limit = 2
divisions = round((hi_limit - lo_limit) / 0.05, 0) + 1
logging.info(divisions)
# Get the data between the limits
sorted_data = sorted_data[sorted_data > lo_limit]
sorted_data = sorted_data[sorted_data < hi_limit]
# create a colorbar for the se4_frc and divide it in 0.2 mg/L intervals
cmap = plt.cm.jet_r
cmaplist = [cmap(i) for i in range(cmap.N)]
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"Custom cmap", cmaplist, cmap.N
)
bounds = np.linspace(0, 1.4, 8)
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
fig = plt.figure(figsize=(19.2, 10.8), dpi=100)
ax = fig.add_subplot(221)
img = ax.scatter(frc, watt, c=c, s=5, cmap=cmap, norm=norm, alpha=1)
ax.set_xlabel("FRC at tapstand (mg/L)")
ax.set_ylabel("Water Temperature (" + "\u00b0" + "C)")
ax.grid(linewidth=0.2)
ax = fig.add_subplot(222)
img = ax.scatter(frc, cond, c=c, s=5, cmap=cmap, norm=norm, alpha=1)
ax.set_xlabel("FRC at tapstand (mg/L)")
ax.set_ylabel("Water Conductivity (\u03BCS/cm)")
ax.grid(linewidth=0.2)
ax = fig.add_subplot(223)
img = ax.scatter(watt, cond, c=c, s=5, cmap=cmap, norm=norm, alpha=1)
ax.set_xlabel("Water Temperature (" + "\u00b0" + "C)")
ax.set_ylabel("Water Conductivity (\u03BCS/cm)")
ax.grid(linewidth=0.2)
ax = fig.add_subplot(224)
img = ax.hist(
c,
bins=np.linspace(lo_limit, hi_limit, divisions),
edgecolor="black",
linewidth=0.1,
)
ax.grid(linewidth=0.1)
line02 = ax.axvline(0.2, color="r", linestyle="dashed", linewidth=2)
line03 = ax.axvline(0.3, color="y", linestyle="dashed", linewidth=2)
ax.set_xlabel("FRC at household (mg/L)")
ax.set_ylabel("# of instances")
axcdf = ax.twinx()
(cdf,) = axcdf.step(sorted_data, np.arange(sorted_data.size), color="g")
ax.legend(
(line02, line03, cdf), ("0.2 mg/L", "0.3 mg/L", "CDF"), loc="center right"
)
ax2 = fig.add_axes([0.93, 0.1, 0.01, 0.75])
cb = mpl.colorbar.ColorbarBase(
ax2,
cmap=cmap,
norm=norm,
spacing="proportional",
ticks=bounds,
boundaries=bounds,
)
cb.ax.set_ylabel("FRC at se4 (mg/L)", rotation=270, labelpad=20)
plt.show()
myStringIOBytes = io.BytesIO()
plt.savefig(myStringIOBytes, format="png")
myStringIOBytes.seek(0)
my_base_64_pngData = base64.b64encode(myStringIOBytes.read())
return my_base_64_pngData
def generate_input_info_plots(self, filename):
"""Generates histograms of the inputs to the ANN
Plots one histogram for each input field on the neural network
along with the mean and median values."""
df = self.datainputs
# df = df.drop(df[df["se1_frc"] > 2.8].index)
frc = df[FRC_IN]
watt = df[WATTEMP]
cond = df[COND]
dfo = self.file
dfo = dfo.drop(dfo[dfo[FRC_IN] > 2.8].index)
frc4 = dfo[FRC_OUT]
fig = plt.figure(figsize=(19.2, 10.8), dpi=100)
# fig.suptitle('Total samples: '+ str(len(frc))) # +
# "\n" + "SWOT version: " + self.software_version +
# "\n" + "Input Filename: " + os.path.basename(self.input_filename) +
# "\n" + "Generated on: " + self.today)
axInitialFRC = fig.add_subplot(221)
axInitialFRC.hist(frc, bins=20, edgecolor="black", linewidth=0.1)
axInitialFRC.set_xlabel("Initial FRC (mg/L)")
axInitialFRC.set_ylabel("# of instances")
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, <NAME>, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Requirements:
# sudo apt-get install python-argparse
"""
This script computes the absolute trajectory error from the ground truth
trajectory and the estimated trajectory.
"""
from __future__ import with_statement # Not required in Python 2.6 any more
import argparse
import numpy
import sys
from pyquaternion import Quaternion as quat
from tools.trajectory.old import associate
def align(model, data):
"""Align two trajectories using the method of Horn (closed-form).
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
rot -- rotation matrix (3x3)
trans -- translation vector (3x1)
trans_error -- translational error per point (1xn)
"""
numpy.set_printoptions(precision=3, suppress=True)
model_zerocentered = model - model.mean(1)
data_zerocentered = data - data.mean(1)
W = numpy.zeros((3, 3))
for column in range(model.shape[1]):
W += numpy.outer(model_zerocentered[:, column], data_zerocentered[:, column])
U, d, Vh = numpy.linalg.linalg.svd(W.transpose())
S = numpy.matrix(numpy.identity(3))
if (numpy.linalg.det(U) * numpy.linalg.det(Vh) < 0):
S[2, 2] = -1
rot = U * S * Vh
trans = data.mean(1) - rot * model.mean(1)
model_aligned = rot * model + trans
print('Rotation matrix det is ', numpy.linalg.det(rot))
alignment_error = model_aligned - data
trans_error = numpy.sqrt(numpy.sum(numpy.multiply(alignment_error, alignment_error), 0)).A[0]
return rot, trans, trans_error
def R(motion):
return motion[0:3, 0:3]
def plot_traj(ax, stamps, traj, style, color, label):
"""
Plot a trajectory using matplotlib.
Input:
ax -- the plot
stamps -- time stamps (1xn)
traj -- trajectory (3xn)
style -- line style
color -- line color
label -- plot legend
"""
stamps.sort()
interval = numpy.median([s - t for s, t in zip(stamps[1:], stamps[:-1])])
x = []
y = []
last = stamps[0]
for i in range(len(stamps)):
if stamps[i] - last < 2 * interval:
x.append(traj[i][0])
y.append(traj[i][1])
elif len(x) > 0:
ax.plot(x, y, style, color=color, label=label)
label = ""
x = []
y = []
last = stamps[i]
if len(x) > 0:
ax.plot(x, y, style, color=color, label=label)
def plot_traj_z(ax, stamps, first_stamp, traj, style, color, label):
"""
Plot a trajectory using matplotlib.
Input:
ax -- the plot
stamps -- time stamps (1xn)
traj -- trajectory (3xn)
style -- line style
color -- line color
label -- plot legend
"""
stamps.sort()
interval = numpy.median([s - t for s, t in zip(stamps[1:], stamps[:-1])])
x = []
y = []
last = stamps[0]
for i in range(len(stamps)):
if stamps[i] - last < 2 * interval:
y.append(traj[i][2])
x.append(stamps[i] - first_stamp)
elif len(x) > 0:
ax.plot(x, y, style, color=color, label=label)
label = ""
x = []
y = []
last = stamps[i]
if len(x) > 0:
ax.plot(x, y, style, color=color, label=label)
def angle(R):
return numpy.arccos(min(1, max(-1, (numpy.trace(R[0:3, 0:3]) - 1) / 2)))
if __name__ == "__main__":
# parse command line
parser = argparse.ArgumentParser(description='''
This script computes the absolute trajectory error from the ground truth trajectory and the estimated trajectory.
''')
parser.add_argument('first_file', help='ground truth trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('second_file', help='estimated trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',
default=0.0)
parser.add_argument('--scale', help='scaling factor for the second trajectory (default: 1.0)', default=1.0)
parser.add_argument('--max_difference',
help='maximally allowed time difference for matching entries (default: 0.02)', default=0.02)
parser.add_argument('--save', help='save aligned second trajectory to disk (format: stamp2 x2 y2 z2)')
parser.add_argument('--save_associations',
help='save associated first and aligned second trajectory to disk (format: stamp1 x1 y1 z1 stamp2 x2 y2 z2)')
parser.add_argument('--plot', help='plot the first and the aligned second trajectory to an image (format: png)')
parser.add_argument('--plot_z', help='plot the first and the aligned second trajectory z-coordinate to an image (format: png)')
parser.add_argument('--verbose',
help='print all evaluation data (otherwise, only the RMSE absolute translational error in meters after alignment will be printed)',
action='store_true')
parser.add_argument('--show_difference',
help='show in red difference on plot',
action='store_true')
args = parser.parse_args()
first_list = associate.read_file_list(args.first_file)
second_list = associate.read_file_list(args.second_file)
matches = associate.associate(first_list, second_list, float(args.offset), float(args.max_difference))
if len(matches) < 2:
sys.exit(
"Couldn't find matching timestamp pairs between groundtruth and estimated trajectory! Did you choose the correct sequence?")
first_xyz = numpy.matrix([[float(value) for value in first_list[a][0:3]] for a, b in matches]).transpose()
second_xyz = numpy.matrix(
[[float(value) * float(args.scale) for value in second_list[b][0:3]] for a, b in matches]).transpose()
first_quat = numpy.matrix([[float(value) for value in first_list[a][3:7]] for a, b in matches])
second_quat = numpy.matrix(
[[float(value) * float(args.scale) for value in second_list[b][3:7]] for a, b in matches])
first_quats = []
second_quats = []
rot, trans, trans_error = align(second_xyz, first_xyz)
second_xyz_aligned = rot * second_xyz + trans
angle_zero = angle(numpy.matmul(rot, numpy.transpose(rot)))
for quat_1 in first_quat:
quat_1_array = numpy.squeeze(numpy.asarray(quat_1))
quat_constr = quat(quat_1_array[3], quat_1_array[0], quat_1_array[1], quat_1_array[2])
first_quats.append(quat_constr)
counter = 0
rot_errors = []
for quat_1 in second_quat:
quat_1_array = numpy.squeeze(numpy.asarray(quat_1))
quat_constr = quat(quat_1_array[3], quat_1_array[0], quat_1_array[1], quat_1_array[2])
orientation_aligned = first_quats[counter].inverse.rotation_matrix * rot * quat_constr.rotation_matrix
rot_errors.append(angle(orientation_aligned))
counter += 1
first_stamps = sorted(first_list)
first_xyz_full = numpy.matrix([[float(value) for value in first_list[b][0:3]] for b in first_stamps]).transpose()
second_stamps = sorted(second_list)
second_xyz_full = numpy.matrix(
[[float(value) * float(args.scale) for value in second_list[b][0:3]] for b in second_stamps]).transpose()
second_xyz_full_aligned = rot * second_xyz_full + trans
if args.verbose:
print("compared_pose_pairs " + str(len(trans_error)) + " pairs")
print("alignment transformation R + t is")
print(rot)
print(trans)
print("absolute_translational_error.rmse " + str(numpy.sqrt(
numpy.dot(trans_error, trans_error) / len(trans_error))) + " m")
print("absolute_translational_error.mean " + str(numpy.mean(trans_error)) + " m")
print("absolute_translational_error.median " + str(numpy.median(trans_error)) + " m")
print("absolute_translational_error.std " + str(numpy.std(trans_error)) + " m")
print("absolute_translational_error.min " + str(numpy.min(trans_error)) + " m")
print("absolute_translational_error.max " + str(numpy.max(trans_error)) + " m")
print()
print("absolute_rotational_error.rmse " + str(numpy.sqrt(
numpy.dot(rot_errors, rot_errors) / len(rot_errors))) + " rad")
print("absolute_rotational_error.mean " + str(numpy.mean(rot_errors)) + " rad")
print("absolute_rotational_error.median " + str(numpy.median(rot_errors)) + " rad")
print("absolute_rotational_error.std " + str(numpy.std(rot_errors)) + " rad")
print("absolute_rotational_error.min " + str(numpy.min(rot_errors)) + " rad")
print("absolute_rotational_error.max " + str(numpy.max(rot_errors)) + " rad")
else:
print(" + " + str(numpy.sqrt(numpy.dot(trans_error, trans_error) / len(trans_error))))
if args.save_associations:
file = open(args.save_associations, "w")
file.write("\n".join(
[" " % (a, x1, y1, z1, b, x2, y2, z2) for (a, b), (x1, y1, z1), (x2, y2, z2) in
zip(matches, first_xyz.transpose().A, second_xyz_aligned.transpose().A)]))
file.close()
if args.save:
file = open(args.save, "w")
file.write("\n".join([" " % stamp + " ".join(["" % d for d in line]) for stamp, line in
zip(second_stamps, second_xyz_full_aligned.transpose().A)]))
file.close()
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(111)
plot_traj(ax, first_stamps, first_xyz_full.transpose().A, '-', "green", u'Точная траектория')
plot_traj(ax, second_stamps, second_xyz_full_aligned.transpose().A, '-', "orange", u'Оцененная траектория')
if args.show_difference:
label = u'Разница'
for (a, b), (x1, y1, z1), (x2, y2, z2) in zip(matches, first_xyz.transpose().A,
second_xyz_aligned.transpose().A):
ax.plot([x1, x2], [y1, y2], '-', color="red", label=label)
label = ""
ax.legend()
ax.set_xlabel(u'x [м]')
ax.set_ylabel(u'y [м]')
plt.grid()
# plt.xticks(fontsize=14)
plt.savefig(args.plot, dpi=600)
if args.plot_z:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111)
plot_traj_z(ax, first_stamps, first_stamps[0], first_xyz_full.transpose().A, '-', "green", u'Точная траектория')
plot_traj_z(ax, second_stamps, second_stamps[0], second_xyz_full_aligned.transpose().A, '-', "orange", u'Оцененная траектория')
if args.show_difference:
label = u'Разница'
for (a, b), (x1, y1, | |
<reponame>iinnovations/iicontrollibs
#!/usr/bin/python3
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Interface Innovations"
__credits__ = ["<NAME>"]
__license__ = "Apache 2.0"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import os
import sys
import inspect
top_folder = \
os.path.split(os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])))[0]
if top_folder not in sys.path:
sys.path.insert(0, top_folder)
import cupid.pilib as pilib
import iiutilities.datalib as datalib
from iiutilities import utility
try:
import simplejson as json
except:
import json
# Need to backward compatible this for Pi2 based on hardware version
def write(message, port=None, baudrate=115200, timeout=1):
if not port:
print('NO PORT SPECIFIED')
return None
else:
import serial
ser = serial.Serial(port=port, baudrate=baudrate, timeout=timeout)
ser.write(message)
def getsystemserialport():
port = '/dev/ttyAMA0'
versions = pilib.dbs.system.read_table('versions')
try:
versions = pilib.dbs.system.read_table('versions')
hw_version = ''
for version in versions:
print(version['item'])
if version['item'] == 'versionname':
print(version)
hw_version = version['version']
except:
utility.log(pilib.dirs.dbs.system, 'Error retrieving hardware version in serial monitor. Reverting to /dev/tty/AMA0')
print('Error retrieving hardware version in serial monitor. Reverting to /dev/tty/AMA0')
else:
print(hw_version, port)
if hw_version in ['RPi 3 Model B', 'Pi 3 Model B']:
port = '/dev/ttyS0'
return port
def monitor(**kwargs):
settings = {
'port':None,
'baudrate':115200,
'timeout':1,
'checkstatus':True,
'printmessages':False,
'debug':True
}
settings.update(kwargs)
if not settings['port']:
settings['port'] = getsystemserialport()
import serial
from iiutilities import datalib, dblib
from time import mktime, localtime
from time import sleep
motes_db = pilib.dbs.motes
system_db = pilib.dbs.system
if settings['debug']:
pilib.set_debug()
if settings['printmessages']:
print('Message printing is enabled.')
data = []
stringmessage = ''
seriallog = True
if seriallog:
print('serial logging is enabled.')
logfile = open(pilib.dirs.logs.serial, 'a', 1)
logfile.write('\n' + datalib.gettimestring() + ": Initializing serial log\n")
if settings['checkstatus']:
systemstatus =system_db.read_table_row('systemstatus')[0]
runhandler = systemstatus['serialhandlerenabled']
checktime = mktime(localtime())
checkfrequency = 15 # seconds
if runhandler:
utility.log(pilib.dirs.logs.io, "Starting monitoring of serial port based on check status", 1, pilib.loglevels.io)
else:
utility.log(pilib.dirs.logs.io, "Not starting monitoring of serial port. How did I get here?", 1, pilib.loglevels.serial)
else:
runhandler = True
if runhandler:
ser = serial.Serial(port=settings['port'], baudrate=settings['baudrate'], timeout=settings['timeout'])
utility.log(pilib.dirs.logs.io, "Monitoring serial port {}, settings {}/{}".format(ser.name, settings['baudrate'], settings['timeout']), 1, pilib.loglevels.serial)
else:
utility.log(pilib.dirs.logs.io, 'not monitoring serial port ', 1, pilib.loglevels.serial)
while runhandler:
# This reading has to happen faster than the messages come, or they will all be stuck together
try:
ch = ser.read(1).decode('utf-8')
# if ch == '\x0D':
# print('carriage return')
# elif ch == '\x00':
# print('null character')
if len(ch) == 0 or ch == '\x0D':
utility.log(pilib.dirs.logs.io, 'Time to process message ', 5, pilib.loglevels.serial)
# rec'd nothing print all
if len(data) > 1: # This will avoid processing endline characters and other trash.
s = ''
for x in data:
s += '%s' % x # ord(x)
# clear data
data = []
# Here for diagnostics
# print '%s [len = %d]' % (s, len(data))
# now process data
# print(s)
# print(s.split('\n'))
try:
utility.log(pilib.dirs.logs.serial, 'processing datadict from serial message of length {}'.format(len(data)), 3, pilib.loglevels.serial)
datadicts, messages = processserialdata(s)
except:
import traceback
message = "An exception of occurred (line 99): {}".format(traceback.format_exc())
utility.log(pilib.dirs.logs.serial, message, 1, pilib.loglevels.serial)
else:
for datadict, message in zip(datadicts, messages):
if datadict:
if (settings['printmessages']):
print("datadict: ")
print(datadict)
# print("message: ")
# print(message)
# publish = False
# for k in datadict:
# # print(k + datadict[k])
# if k not in ['nodeid','RX_RSSI']:
# pass
# if 'cmd' in datadict:
publish = True
if publish:
if (settings['printmessages']):
print('publishing message: ')
print(message)
lograwmessages(message)
motes_db.size_table('read', **{'size':1000})
try:
processremotedata(datadict, message)
except:
import traceback
message = "An exception of occurred (line 184): {}".format(traceback.format_exc())
utility.log(pilib.dirs.logs.serial, message, 1, pilib.loglevels.serial)
else:
if message and settings['printmessage']:
print('message: \n{}'.format(message))
print(message)
# Log message
if seriallog:
try:
logfile.write(datalib.gettimestring() + ' : ' + message + '\n')
except:
import traceback
message = "An exception of occurred (line 198): {}".format(traceback.format_exc())
utility.log(pilib.dirs.logs.serial, message, 1, pilib.loglevels.serial)
else:
# no data, let's see if we should send message
utility.log(pilib.dirs.logs.serial, 'No data, try sending', 1, pilib.loglevels.serial)
# print('CLEARING DATA !!!')
data = []
# try:
# utility.log(pilib.dirs.logs.serial, "Attempting send routine", 4, pilib.loglevels.serial)
# except Exception as e:
# template = "An exception of type {0} occured while doing some serial logging. Arguments:\n{1!r}"
# message = template.format(type(ex).__name__, ex.args)
# print message
# See if there are messages to send.
# print('LET US TRY SEND HANDLER')
try:
queue_commands()
except:
import traceback
print('ERROR IN QUEUE COMMANDS \n {}'.format(traceback.format_exc()))
try:
runsendhandler(ser)
except:
import traceback
template = "An exception of in runsendhandler (line 142): {} .".format(traceback.format_exc())
utility.log(pilib.dirs.logs.serial, "Error in send routine: {}".format(template), 1, 1)
# print('SEND HANDLER DONE')
#
# template = "An exception of type {0} occured. Arguments:\n{1!r}"
# message = template.format(type(ex).__name__, ex.args)
# pilib.log(pilib.dirs.logs.serial, message, 1, 1)
else:
# print('DATA NOT ZERO')
# print(ch)
data.append(ch)
stringmessage += str(ch)
if settings['checkstatus']:
print('checking status')
thetime = mktime(localtime())
if thetime-checktime > checkfrequency:
print('checking control status')
systemstatus = dblib.readonedbrow(pilib.dirs.dbs.control, 'systemstatus')[0]
runserialhandler = systemstatus['serialhandlerenabled']
if runserialhandler:
checktime = thetime
utility.log(pilib.dirs.logs.io, 'Continuing serialhandler based on status check', 3, pilib.loglevels.io)
else:
runhandler=False
utility.log(pilib.dirs.logs.io, 'Aborting serialhandler based on status check', 3, pilib.loglevels.io)
except KeyboardInterrupt:
print('\n Exiting on keyboard interrupt\n')
logfile.close()
return
except:
# print('no characters available!')
sleep(0.5)
# return
#runsendhandler(ser)
logfile.close()
ser.close()
return
def queue_commands(**kwargs):
"""
This will check the commands table. If it has not been sent or cleared as acknowledged, we will queue the command
in the queued items for serial send.
We pass in the actiondatadict for the optional parameters below. These come from the action when it is queued, or
from default settings in action.onact() or the defaults below.
We optionally:
check to ensure we don't already have queued messages of the same type. This is not typically going to be an
issue, as messages are dispatched very quickly.
We increment number of retries and terminate request as aborted if we exceed this.
"""
settings = {
'destination':None,
'message':None,
'retry_time':120.0,
'retries':10,
'no_duplicates':True,
}
settings.update(kwargs)
commands = pilib.dbs.motes.read_table('commands')
for command in commands:
print('COMMAND {}'.format(command))
if command['status'] in ['new', 'retrying', 'sending']: # Redundant status types here?
message = '~sendmsg;{};;{}'.format(command['destination'], command['message'])
queue_command = True
if not command['senttimes']:
command['senttimes'] = []
sent_times = json.loads(command['senttimes'])
attempts = len(sent_times)
if attempts >= settings['retries']:
print('FAIL')
inc_message = 'Command retries exceeded ({}). '.format(settings['retries'])
command['status_message'] += inc_message
utility.log(pilib.dirs.logs.serial, inc_message, 2, pilib.loglevels.serial)
command['status'] = 'failed'
queue_command = False
else:
print('NOT FAIL')
if queue_command:
# Only check to see if existing command contain a duplicate entry if we were going to send it.
queued_match_commands = pilib.dbs.motes.read_table('queued', condition="message='{}'".format(message))
if queued_match_commands:
inc_message = 'Message is already queued. '
utility.log(pilib.dirs.logs.serial, inc_message, 2, pilib.loglevels.serial)
command['status_message'] += inc_message
queue_command = False
if queue_command and sent_times:
# If no sent times, then we don't need to see how long it's been
# Now check to see that it is time to retry
most_recent_sent_time = sent_times[-1]
print('most recent time: '.format(most_recent_sent_time))
elapsed_time = datalib.mstimestringtoseconds(datalib.getmstimestring()) - datalib.mstimestringtoseconds(most_recent_sent_time)
print('it has been : {}'.format(elapsed_time))
if elapsed_time > settings['retry_time']:
inc_message = 'It has been {}, longer than retry time of {}. Retrying. '.format(elapsed_time, settings['retry_time'])
else:
inc_message = 'It has been {}, not greater than retry time of {}. '.format(elapsed_time, settings['retry_time'])
queue_command = False
utility.log(pilib.dirs.logs.serial, inc_message, 2, pilib.loglevels.serial)
if queue_command:
sent_times.append(datalib.getmstimestring())
command['senttimes'] = json.dumps(sent_times)
pilib.dbs.motes.insert('queued', {'queuedtime':datalib.getmstimestring(), 'message':message}, queue=True)
command['status'] = 'retrying'
# Update message regardless
print('\n **INSERTING COMMAND (queued:{}) \n {}\n\n'.format(queue_command, command))
pilib.dbs.motes.insert('commands', command)
if pilib.dbs.motes.queued_queries:
pilib.dbs.motes.execute_queue()
def runsendhandler(ser):
from iiutilities import dblib, datalib
from iiutilities import utility
# print('looking for message to send')
motes_db = pilib.dbs.motes
try:
last_queued_message = motes_db.get_first_time_row('queued', 'queuedtime')
except:
import traceback
utility.log(pilib.dirs.logs.serial, 'Error getting queued message : {}'.format(traceback.format_exc()), 1, pilib.loglevels.serial)
else:
try:
utility.log(pilib.dirs.logs.serial, 'Sending message : {}'.format(last_queued_message['message']), 3, pilib.loglevels.serial)
ser.write(last_queued_message['message'].encode())
# sendserialmessage(ser, lastqueuedmessage['message'])
except:
utility.log(pilib.dirs.logs.serial, 'Error sending message', 1, 1)
else:
utility.log(pilib.dirs.logs.serial, 'Success sending message', 1, 1)
conditionnames = ['queuedtime', 'message']
conditionvalues = [last_queued_message['queuedtime'], last_queued_message['message']]
delquery = dblib.makedeletesinglevaluequery('queued', {'conditionnames':conditionnames, 'conditionvalues':conditionvalues})
dblib.sqlitequery(pilib.dirs.dbs.motes, delquery)
dblib.sqliteinsertsingle(pilib.dirs.dbs.motes, 'sent', [last_queued_message['queuedtime'], datalib.gettimestring(), last_queued_message['message']])
dblib.size_sqlite_table(pilib.dirs.dbs.motes, 'sent', 1000)
return
def sendserialmessage(serobject, message):
serobject.write(message.encode())
def processserialdata(data):
from iiutilities.datalib import parseoptions
datadicts = []
messages = []
# try:
# Break into chunks
print('processing data: ')
print(data)
print('end data')
# RF Message (deprecated, all are of serial form below)
if data.strip().find('BEGIN RECEIVED') > 0:
split1 = data.strip().split('BEGIN RECEIVED')
for split in split1:
if split.find('END RECEIVED') >= 0:
message = split.split('END RECEIVED')[0].replace('\x00', '')
# print(message)
messages.append(message.strip())
try:
datadict = parseoptions(message)
except:
print('error parsing message: | |
config_cached[k.lower()] = v
def get_config_or_none(key):
ensure_config_loaded()
return config_cached.get(key.lower())
def set_config(key, value):
run_git("config", "--", key, value)
ensure_config_loaded()
config_cached[key.lower()] = value
def unset_config(key):
ensure_config_loaded()
if get_config_or_none(key):
run_git("config", "--unset", key)
del config_cached[key.lower()]
remotes_cached = None
def remotes():
global remotes_cached
if remotes_cached is None:
remotes_cached = non_empty_lines(popen_git("remote"))
return remotes_cached
fetch_done_for = set()
def fetch_remote(remote):
global fetch_done_for
if remote not in fetch_done_for:
run_git("fetch", remote)
fetch_done_for.add(remote)
def set_upstream_to(rb):
run_git("branch", "--set-upstream-to", rb)
def reset_keep(to_revision):
try:
run_git("reset", "--keep", to_revision)
except MacheteException:
raise MacheteException("Cannot perform `git reset --keep %s`. This is most likely caused by local uncommitted changes." % to_revision)
def push(remote, b, force_with_lease=False):
if not force_with_lease:
opt_force = []
elif get_git_version() >= (1, 8, 5): # earliest version of git to support 'push --force-with-lease'
opt_force = ["--force-with-lease"]
else:
opt_force = ["--force"]
args = [remote, b]
run_git("push", "--set-upstream", *(opt_force + args))
def pull_ff_only(remote, rb):
fetch_remote(remote)
run_git("merge", "--ff-only", rb)
# There's apparently no way to set remote automatically when doing 'git pull' (as opposed to 'git push'),
# so a separate 'git branch --set-upstream-to' is needed.
set_upstream_to(rb)
def find_short_commit_sha_by_revision(revision):
return popen_git("rev-parse", "--short", revision + "^{commit}").rstrip()
short_commit_sha_by_revision_cached = {}
def short_commit_sha_by_revision(revision):
if revision not in short_commit_sha_by_revision_cached:
short_commit_sha_by_revision_cached[revision] = find_short_commit_sha_by_revision(revision)
return short_commit_sha_by_revision_cached[revision]
def find_commit_sha_by_revision(revision):
# Without ^{commit}, 'git rev-parse --verify' will not only accept references to other kinds of objects (like trees and blobs),
# but just echo the argument (and exit successfully) even if the argument doesn't match anything in the object store.
try:
return popen_git("rev-parse", "--verify", "--quiet", revision + "^{commit}").rstrip()
except MacheteException:
return None
commit_sha_by_revision_cached = None
def commit_sha_by_revision(revision, prefix="refs/heads/"):
global commit_sha_by_revision_cached
if commit_sha_by_revision_cached is None:
load_branches()
full_revision = prefix + revision
if full_revision not in commit_sha_by_revision_cached:
commit_sha_by_revision_cached[full_revision] = find_commit_sha_by_revision(full_revision)
return commit_sha_by_revision_cached[full_revision]
def is_full_sha(revision):
return re.match("^[0-9a-f]{40}$", revision)
committer_unix_timestamp_by_revision_cached = None
def committer_unix_timestamp_by_revision(revision, prefix="refs/heads/"):
global committer_unix_timestamp_by_revision_cached
if committer_unix_timestamp_by_revision_cached is None:
load_branches()
return committer_unix_timestamp_by_revision_cached.get(prefix + revision) or 0
def inferred_remote_for_fetching_of_branch(b):
# Since many people don't use '--set-upstream' flag of 'push', we try to infer the remote instead.
for r in remotes():
if r + "/" + b in remote_branches():
return r
return None
def strict_remote_for_fetching_of_branch(b):
remote = get_config_or_none("branch." + b + ".remote")
return remote.rstrip() if remote else None
def combined_remote_for_fetching_of_branch(b):
return strict_remote_for_fetching_of_branch(b) or inferred_remote_for_fetching_of_branch(b)
def inferred_counterpart_for_fetching_of_branch(b):
for r in remotes():
if r + "/" + b in remote_branches():
return r + "/" + b
return None
counterparts_for_fetching_cached = None
def strict_counterpart_for_fetching_of_branch(b):
global counterparts_for_fetching_cached
if counterparts_for_fetching_cached is None:
load_branches()
return counterparts_for_fetching_cached.get(b)
def combined_counterpart_for_fetching_of_branch(b):
# Since many people don't use '--set-upstream' flag of 'push' or 'branch', we try to infer the remote if the tracking data is missing.
return strict_counterpart_for_fetching_of_branch(b) or inferred_counterpart_for_fetching_of_branch(b)
def is_am_in_progress():
# As of git 2.24.1, this is how 'cmd_rebase()' in builtin/rebase.c checks whether am is in progress.
return os.path.isfile(get_git_subpath("rebase-apply", "applying"))
def is_cherry_pick_in_progress():
return os.path.isfile(get_git_subpath("CHERRY_PICK_HEAD"))
def is_merge_in_progress():
return os.path.isfile(get_git_subpath("MERGE_HEAD"))
def is_revert_in_progress():
return os.path.isfile(get_git_subpath("REVERT_HEAD"))
# Note: while rebase is ongoing, the repository is always in a detached HEAD state,
# so we need to extract the name of the currently rebased branch from the rebase-specific internals
# rather than rely on 'git symbolic-ref HEAD' (i.e. the contents of .git/HEAD).
def currently_rebased_branch_or_none():
# https://stackoverflow.com/questions/3921409
head_name_file = None
# .git/rebase-merge directory exists during cherry-pick-powered rebases,
# e.g. all interactive ones and the ones where '--strategy=' or '--keep-empty' option has been passed
rebase_merge_head_name_file = get_git_subpath("rebase-merge", "head-name")
if os.path.isfile(rebase_merge_head_name_file):
head_name_file = rebase_merge_head_name_file
# .git/rebase-apply directory exists during the remaining, i.e. am-powered rebases, but also during am sessions.
rebase_apply_head_name_file = get_git_subpath("rebase-apply", "head-name")
# Most likely .git/rebase-apply/head-name can't exist during am sessions, but it's better to be safe.
if not is_am_in_progress() and os.path.isfile(rebase_apply_head_name_file):
head_name_file = rebase_apply_head_name_file
if not head_name_file:
return None
with open(head_name_file) as f:
raw = f.read().strip()
return re.sub("^refs/heads/", "", raw)
def currently_checked_out_branch_or_none():
try:
raw = popen_git("symbolic-ref", "--quiet", "HEAD").strip()
return re.sub("^refs/heads/", "", raw)
except MacheteException:
return None
def expect_no_operation_in_progress():
rb = currently_rebased_branch_or_none()
if rb:
raise MacheteException("Rebase of `%s` in progress. Conclude the rebase first with `git rebase --continue` or `git rebase --abort`." % rb)
if is_am_in_progress():
raise MacheteException("`git am` session in progress. Conclude `git am` first with `git am --continue` or `git am --abort`.")
if is_cherry_pick_in_progress():
raise MacheteException("Cherry pick in progress. Conclude the cherry pick first with `git cherry-pick --continue` or `git cherry-pick --abort`.")
if is_merge_in_progress():
raise MacheteException("Merge in progress. Conclude the merge first with `git merge --continue` or `git merge --abort`.")
if is_revert_in_progress():
raise MacheteException("Revert in progress. Conclude the revert first with `git revert --continue` or `git revert --abort`.")
def current_branch_or_none():
return currently_checked_out_branch_or_none() or currently_rebased_branch_or_none()
def current_branch():
result = current_branch_or_none()
if not result:
raise MacheteException("Not currently on any branch")
return result
merge_base_cached = {}
def merge_base(sha1, sha2):
if sha1 > sha2:
sha1, sha2 = sha2, sha1
if not (sha1, sha2) in merge_base_cached:
# Note that we don't pass '--all' flag to 'merge-base', so we'll get only one merge-base
# even if there is more than one (in the rare case of criss-cross histories).
# This is still okay from the perspective of is-ancestor checks that are our sole use of merge-base:
# * if any of sha1, sha2 is an ancestor of another,
# then there is exactly one merge-base - the ancestor,
# * if neither of sha1, sha2 is an ancestor of another,
# then none of the (possibly more than one) merge-bases is equal to either of sha1/sha2 anyway.
merge_base_cached[sha1, sha2] = popen_git("merge-base", sha1, sha2).rstrip()
return merge_base_cached[sha1, sha2]
# Note: the 'git rev-parse --verify' validation is not performed in case for either of earlier/later
# if the corresponding prefix is empty AND the revision is a 40 hex digit hash.
def is_ancestor(earlier_revision, later_revision, earlier_prefix="refs/heads/", later_prefix="refs/heads/"):
if earlier_prefix == "" and is_full_sha(earlier_revision):
earlier_sha = earlier_revision
else:
earlier_sha = commit_sha_by_revision(earlier_revision, earlier_prefix)
if later_prefix == "" and is_full_sha(later_revision):
later_sha = later_revision
else:
later_sha = commit_sha_by_revision(later_revision, later_prefix)
if earlier_sha == later_sha:
return True
return merge_base(earlier_sha, later_sha) == earlier_sha
def create_branch(b, out_of_revision):
run_git("checkout", "-b", b, *([out_of_revision] if out_of_revision else []))
flush_caches() # the repository state has changed b/c of a successful branch creation, let's defensively flush all the caches
def log_shas(revision, max_count):
opts = (["--max-count=" + str(max_count)] if max_count else []) + ["--format=%H", "refs/heads/" + revision]
return non_empty_lines(popen_git("log", *opts))
MAX_COUNT_FOR_INITIAL_LOG = 10
initial_log_shas_cached = {}
remaining_log_shas_cached = {}
# Since getting the full history of a branch can be an expensive operation for large repositories (compared to all other underlying git operations),
# there's a simple optimization in place: we first fetch only a couple of first commits in the history,
# and only fetch the rest if none of them occurs on reflog of any other branch.
def spoonfeed_log_shas(b):
if b not in initial_log_shas_cached:
initial_log_shas_cached[b] = log_shas(b, max_count=MAX_COUNT_FOR_INITIAL_LOG)
for sha in initial_log_shas_cached[b]:
yield sha
if b not in remaining_log_shas_cached:
remaining_log_shas_cached[b] = log_shas(b, max_count=None)[MAX_COUNT_FOR_INITIAL_LOG:]
for sha in remaining_log_shas_cached[b]:
yield sha
local_branches_cached = None
remote_branches_cached = None
def local_branches():
global local_branches_cached, remote_branches_cached
if local_branches_cached is None:
load_branches()
return local_branches_cached
def remote_branches():
global local_branches_cached, remote_branches_cached
if remote_branches_cached is None:
load_branches()
return remote_branches_cached
def load_branches():
global commit_sha_by_revision_cached, committer_unix_timestamp_by_revision_cached, counterparts_for_fetching_cached, local_branches_cached, remote_branches_cached
commit_sha_by_revision_cached = {}
committer_unix_timestamp_by_revision_cached = {}
counterparts_for_fetching_cached = {}
local_branches_cached = []
remote_branches_cached = []
# Using 'committerdate:raw' instead of 'committerdate:unix' since the latter isn't supported by some older versions of git.
raw_remote = non_empty_lines(popen_git("for-each-ref", "--format=%(refname)\t%(objectname)\t%(committerdate:raw)", "refs/remotes"))
for line in raw_remote:
values = line.split("\t")
if len(values) != 3: # invalid, shouldn't happen
continue
b, sha, committer_unix_timestamp_and_time_zone = values
b_stripped = re.sub("^refs/remotes/", "", b)
remote_branches_cached += [b_stripped]
commit_sha_by_revision_cached[b] = sha
committer_unix_timestamp_by_revision_cached[b] = int(committer_unix_timestamp_and_time_zone.split(' ')[0])
raw_local = non_empty_lines(popen_git("for-each-ref", "--format=%(refname)\t%(objectname)\t%(committerdate:raw)\t%(upstream)", "refs/heads"))
for line in raw_local:
values = line.split("\t")
if len(values) != 4: # invalid, shouldn't happen
continue
b, sha, committer_unix_timestamp_and_time_zone, fetch_counterpart = values
b_stripped = re.sub("^refs/heads/", "", b)
fetch_counterpart_stripped = re.sub("^refs/remotes/", "", fetch_counterpart)
local_branches_cached += [b_stripped]
commit_sha_by_revision_cached[b] = sha
committer_unix_timestamp_by_revision_cached[b] = int(committer_unix_timestamp_and_time_zone.split(' ')[0])
if fetch_counterpart_stripped in remote_branches_cached:
counterparts_for_fetching_cached[b_stripped] = fetch_counterpart_stripped
def get_sole_remote_branch(b):
def matches(rb):
# Note that this matcher is defensively too inclusive:
# if there is both origin/foo and origin/feature/foo,
# then both are matched for 'foo';
# this is to reduce risk wrt. which '/'-separated fragments belong to remote and which to branch name.
# FIXME: this is still likely to deliver incorrect results in rare corner cases with compound remote names.
return rb.endswith('/' + b)
matching_remote_branches = list(filter(matches, remote_branches()))
return matching_remote_branches[0] if len(matching_remote_branches) == 1 else None
def merged_local_branches():
return | |
<gh_stars>0
"""Test UGrid2dDataExtractor.cpp"""
import unittest
import xmsgridtrace
from xmsgrid.ugrid import UGrid
from xmsgridtrace.gridtrace import GridTrace
import numpy as np
class TestGridTrace(unittest.TestCase):
"""GridTrace tests"""
def create_default_single_cell(self):
"""Create a default single cell"""
points = [(0, 0, 0), (1, 0, 0), (1, 1, 0), (0, 1, 0)]
cells = [UGrid.ugrid_celltype_enum.TRIANGLE, 3, 0, 1, 2,
UGrid.ugrid_celltype_enum.TRIANGLE, 3, 2, 3, 0]
ugrid = UGrid(points, cells)
tracer = GridTrace(ugrid)
self.assertIsInstance(tracer,GridTrace)
tracer.vector_multiplier = 1
tracer.max_tracing_time = 100
tracer.max_tracing_distance = 100
tracer.min_delta_time = .1
tracer.max_change_distance = 100
tracer.max_change_velocity = 100
tracer.max_change_direction_in_radians = 1.5*np.pi
scalars = [(1,1,0),(1,1,0),(1,1,0),(1,1,0)]
point_activity = [True]*4
tracer.add_grid_scalars_at_time(scalars,"points",point_activity,"points",0)
tracer.add_grid_scalars_at_time(scalars,"points",point_activity,"points",10)
return tracer
def create_default_two_cell(self):
"""Create a default two cell"""
points = [ ( 0, 0, 0 ),( 1, 0, 0 ),( 1, 1, 0 ),( 0, 1, 0 ),( 2, 0, 0 ),( 2, 1, 0 ) ]
cells = [ UGrid.ugrid_celltype_enum.QUAD, 4, 0, 1, 2, 3, UGrid.ugrid_celltype_enum.QUAD, 4, 1, 4, 5, 2 ]
ugrid = UGrid(points,cells)
tracer = GridTrace(ugrid)
self.assertIsInstance(tracer,GridTrace)
tracer.vector_multiplier = 1
tracer.max_tracing_time = 100
tracer.max_tracing_distance = 100
tracer.min_delta_time = .1
tracer.max_change_distance = 100
tracer.max_change_velocity = 100
tracer.max_change_direction_in_radians = 1.5*np.pi
scalars = [ ( .1,0,0 ),( .2,0,0 ) ]
point_activity = [True]*2
tracer.add_grid_scalars_at_time(scalars,"cells",point_activity,"cells",0)
tracer.add_grid_scalars_at_time(scalars,"cells",point_activity,"cells",10)
return tracer
def test_basic_trace_point(self):
"""test basic tracing functionality"""
tracer=self.create_default_single_cell()
start_time=.5
result_tuple = tracer.trace_point((.5,.5,0),start_time)
expected_out_trace = [(.5,.5,0),(1,1,0)]
expected_out_times = [.5,1]
np.testing.assert_array_almost_equal(expected_out_trace,result_tuple[0])
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_max_change_distance(self):
"""test max change distance functionality"""
tracer=self.create_default_single_cell()
start_time=.5
tracer.max_change_distance = .25
result_tuple = tracer.trace_point((.5,.5,0),start_time)
expected_out_trace = [(.5,.5,0 ),
( 0.67677668424809445, 0.67677668424809445, 0.00000000000000000 ),
( 0.85355336849618890, 0.85355336849618890, 0.00000000000000000 ),
(1,1,0)]
expected_out_times = [.5, 0.67677668424809445, 0.85355336849618890, 1]
np.testing.assert_array_almost_equal(expected_out_trace,result_tuple[0])
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_small_scalars_trace_point(self):
"""test functionality with small scalars"""
tracer=self.create_default_single_cell()
start_time=.5
tracer.max_change_distance = .25
scalars = [(.1,.1,0),(.1,.1,0),(.1,.1,0),(.1,.1,0)]
point_activity = [True]*4
tracer.add_grid_scalars_at_time(scalars,"points",point_activity,"points",0)
tracer.add_grid_scalars_at_time(scalars,"points",point_activity,"points",10)
result_tuple = tracer.trace_point((.5,.5,0),start_time)
expected_out_trace = [(.5,.5,0 ),
( 0.60000000149011612, 0.60000000149011612, 0 ),
( 0.72000000327825542, 0.72000000327825542, 0 ),
( 0.86400000542402267, 0.86400000542402267, 0 ),
(1,1,0)]
expected_out_times = [.5, 1.5, 2.7, 4.14, 5.5]
np.testing.assert_array_almost_equal(expected_out_trace,result_tuple[0])
np.testing.assert_array_almost_equal(expected_out_times,result_tuple[1])
def test_strong_direction_change(self):
"""test functionality with strong changes in direction"""
tracer=self.create_default_single_cell()
tracer.max_change_direction_in_radians = np.pi*.2
tracer.min_delta_time = -1
start_time=.5
scalars = [(0,1,0),(-1,0,0),(0,-1,0),(1,0,0)]
point_activity = [True]*4
tracer.add_grid_scalars_at_time(scalars,"points",point_activity,"points",0)
tracer.add_grid_scalars_at_time(scalars,"points",point_activity,"points",10)
result_tuple = tracer.trace_point((0,0,0),start_time)
expected_out_trace = [ ( 0,0,0 ),
(0.00000000000000000, 0.25000000000000000, 0.00000000000000000 ),
(0.074999999999999997, 0.47499999999999998, 0.00000000000000000 ),
(0.21900000214576720, 0.63699999570846555, 0.00000000000000000 ),
(0.30928799843788146, 0.66810399758815764, 0.00000000000000000 ),
(0.40229310507774352, 0.67396399235725402, 0.00000000000000000 ),
(0.48679361495018003, 0.65024498560905453, 0.00000000000000000 ),
(0.54780151323509219, 0.59909560095787040, 0.00000000000000000 ),
(0.55928876277122497, 0.56619817004051198, 0.00000000000000000 ),
(0.56114558691518779, 0.53247499044700608, 0.00000000000000000 ),
(0.55189971330840681, 0.50228363992173752, 0.00000000000000000 ),
(0.53269911067322617, 0.48131557500677169, 0.00000000000000000 ),
(0.52076836142536975, 0.47806150355091476, 0.00000000000000000 ),
(0.50886902895577013, 0.47838753608466128, 0.00000000000000000 ),
(0.49867742691962913, 0.48264835153512164, 0.00000000000000000 ),
(0.49224616907898289, 0.49014090685121131, 0.00000000000000000 ),
(0.49173935940609609, 0.49438094923206660, 0.00000000000000000 ),
(0.49250246625151450, 0.49839053740482164, 0.00000000000000000 ),
(0.49454361321306389, 0.50154755045413602, 0.00000000000000000 ),
(0.49745717820065949, 0.50317358562752867, 0.00000000000000000 ),
(0.49888395770889871, 0.50301615091938545, 0.00000000000000000 ),
(0.50012160117661586, 0.50244704462921241, 0.00000000000000000 ),
(0.50095740046883197, 0.50152383477622209, 0.00000000000000000 ),
(0.50107955145675120, 0.50098875888952354, 0.00000000000000000 ),
(0.50105605626599747, 0.50045352403892940, 0.00000000000000000 ),
(0.50086894918345870, 0.49998474718699493, 0.00000000000000000 ),
(0.50053945884260675, 0.49966662451478433, 0.00000000000000000 ),
(0.50034430627617277, 0.49962054739305783, 0.00000000000000000 ),
(0.50015012042108842, 0.49962997721910873, 0.00000000000000000 ),
(0.49998265395837810, 0.49970077747304897, 0.00000000000000000 ),
(0.49987374966305814, 0.49982308521808211, 0.00000000000000000 ),
(0.49986302487024292, 0.49988726006383088, 0.00000000000000000 ),
(0.49986815504448728, 0.49994012045071656, 0.00000000000000000 )]
expected_out_times = [.5,
0.75000000000000000,
1.0500000000000000,
1.4100000000000001,
1.6260000000000001,
1.8852000000000002,
2.1962400000000004,
2.5694880000000002,
2.7934368000000003,
3.0621753600000003,
3.3846616320000003,
3.7716451584000001,
4.0038352742400001,
4.2824634132480002,
4.6168171800576001,
5.0180417002291202,
5.2587764123320317,
5.5476580668555258,
5.8943160522837186,
6.3103056347975501,
6.5598993843058491,
6.8594118837158078,
7.2188268830077584,
7.4344758825829285,
7.6932546820731327,
8.0037892414613783,
8.3764307127272737,
8.6000155954868092,
8.8683174547982535,
9.1902796859719871,
9.5766343633804656,
9.7883171816902319,
10.000000000000000]
np.testing.assert_array_almost_equal(expected_out_trace,result_tuple[0])
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_max_tracing_time(self):
"""test functionality of max tracing time"""
tracer=self.create_default_single_cell()
tracer.max_change_direction_in_radians = np.pi*.2
tracer.min_delta_time = -1
tracer.max_tracing_time = 5
start_time=.5
scalars = [(0,1,0),(-1,0,0),(0,-1,0),(1,0,0)]
point_activity = [True]*4
tracer.add_grid_scalars_at_time(scalars,"points",point_activity,"points",0)
tracer.add_grid_scalars_at_time(scalars,"points",point_activity,"points",10)
result_tuple = tracer.trace_point((0,0,0),start_time)
expected_out_trace =[( 0,0,0 ),
(0.00000000000000000, 0.25000000000000000, 0.00000000000000000 ),
(0.074999999999999997, 0.47499999999999998, 0.00000000000000000 ),
(0.21900000214576720, 0.63699999570846555, 0.00000000000000000 ),
(0.30928799843788146, 0.66810399758815764, 0.00000000000000000 ),
(0.40229310507774352, 0.67396399235725402, 0.00000000000000000 ),
(0.48679361495018003, 0.65024498560905453, 0.00000000000000000 ),
(0.54780151323509219, 0.59909560095787040, 0.00000000000000000 ),
(0.55928876277122497, 0.56619817004051198, 0.00000000000000000 ),
(0.56114558691518779, 0.53247499044700608, 0.00000000000000000 ),
(0.55189971330840681, 0.50228363992173752, 0.00000000000000000 ),
(0.53269911067322617, 0.48131557500677169, 0.00000000000000000 ),
(0.52076836142536975, 0.47806150355091476, 0.00000000000000000 ),
(0.50886902895577013, 0.47838753608466128, 0.00000000000000000 ),
(0.49867742691962913, 0.48264835153512164, 0.00000000000000000 ),
(0.49224616907898289, 0.49014090685121131, 0.00000000000000000 ),
(0.49173935940609609, 0.49438094923206660, 0.00000000000000000 ),
(0.49237657318600692, 0.49772905815126539, 0.00000000000000000 )]
expected_out_times = [.5,
0.75000000000000000,
1.0500000000000000,
1.4100000000000001,
1.6260000000000001,
1.8852000000000002,
2.1962400000000004,
2.5694880000000002,
2.7934368000000003,
3.0621753600000003,
3.3846616320000003,
3.7716451584000001,
4.0038352742400001,
4.2824634132480002,
4.6168171800576001,
5.0180417002291202,
5.2587764123320317,
5.5]
np.testing.assert_array_almost_equal(expected_out_trace,result_tuple[0])
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_max_tracing_distance(self):
"""test functionality of max tracing distance"""
tracer=self.create_default_single_cell()
tracer.max_change_direction_in_radians = np.pi*.2
tracer.min_delta_time = -1
tracer.max_tracing_distance = 1.0
start_time=.5
scalars = [(0,1,0),(-1,0,0),(0,-1,0),(1,0,0)]
point_activity = [True]*4
tracer.add_grid_scalars_at_time(scalars,"points",point_activity,"points",0)
tracer.add_grid_scalars_at_time(scalars,"points",point_activity,"points",10)
result_tuple = tracer.trace_point((0,0,0),start_time)
expected_out_trace = [ ( 0,0,0 ),
( 0.00000000000000000, 0.25000000000000000, 0.00000000000000000 ),
( 0.074999999999999997, 0.47499999999999998, 0.00000000000000000 ),
( 0.21900000214576720, 0.63699999570846555, 0.00000000000000000 ),
( 0.30928799843788146, 0.66810399758815764, 0.00000000000000000 ),
( 0.40229310507774352, 0.67396399235725402, 0.00000000000000000 ),
( 0.48679361495018003, 0.65024498560905453, 0.00000000000000000 ),
( 0.50183556502673621, 0.63763372523131490, 0.00000000000000000 )]
expected_out_times = [ .5,
0.75000000000000000,
1.0500000000000000,
1.4100000000000001,
1.6260000000000001,
1.8852000000000002,
2.1962400000000004,
2.4774609356360582]
print(len(result_tuple[0]))
print(" Length of Expected:")
print(len(expected_out_trace))
np.testing.assert_array_almost_equal(expected_out_trace,result_tuple[0],6)
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_start_out_of_cell(self):
"""test functionality of starting outside of cell"""
tracer=self.create_default_single_cell()
start_time=.5
result_tuple = tracer.trace_point((-1,0,0),start_time)
expected_out_trace = []
expected_out_times = []
np.testing.assert_equal(0,len(result_tuple[0]))
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_beyond_timestep(self):
"""test functionality of starting beyond the time step"""
tracer=self.create_default_single_cell()
start_time=10.1
result_tuple = tracer.trace_point((.5,.5,0),start_time)
expected_out_trace = []
expected_out_times = []
np.testing.assert_equal(0,len(result_tuple[0]))
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_before_timestep(self):
"""test functionality of starting before the time step"""
tracer=self.create_default_single_cell()
start_time=-0.1
result_tuple = tracer.trace_point((.5,.5,0),start_time)
expected_out_trace = [ ( .5,.5,0 ), (1,1,0)]
expected_out_times = [-.1,.4]
np.testing.assert_array_almost_equal(expected_out_trace,result_tuple[0])
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_vector_multiplier(self):
"""test functionality of vector multiplier"""
tracer=self.create_default_single_cell()
tracer.max_change_direction_in_radians = np.pi*.2
tracer.min_delta_time = -1
tracer.vector_multiplier = 0.5
start_time=.5
scalars = [(0,1,0),(-1,0,0),(0,-1,0),(1,0,0)]
point_activity = [True]*4
tracer.add_grid_scalars_at_time(scalars,"points",point_activity,"points",0)
tracer.add_grid_scalars_at_time(scalars,"points",point_activity,"points",10)
result_tuple = tracer.trace_point((0,0,0),start_time)
expected_out_trace = [( 0,0,0 ),
(0.00000000000000000, 0.25000000000000000, 0.00000000000000000 ),
(0.074999999999999997, 0.47499999999999998, 0.00000000000000000 ),
(0.21900000214576720, 0.63699999570846555, 0.00000000000000000 ),
(0.30928799843788146, 0.66810399758815764, 0.00000000000000000 ),
(0.40229310507774352, 0.67396399235725402, 0.00000000000000000 ),
(0.48679361495018003, 0.65024498560905453, 0.00000000000000000 ),
(0.54780151323509219, 0.59909560095787040, 0.00000000000000000 ),
(0.55928876277122497, 0.56619817004051198, 0.00000000000000000 ),
(0.56114558691518779, 0.53247499044700608, 0.00000000000000000 ),
(0.55189971330840681, 0.50228363992173752, 0.00000000000000000 ),
(0.53269911067322617, 0.48131557500677169, 0.00000000000000000 ),
(0.52076836142536975, 0.47806150355091476, 0.00000000000000000 ),
(0.50886902895577013, 0.47838753608466128, 0.00000000000000000 ),
(0.49867742691962913, 0.48264835153512164, 0.00000000000000000 ),
(0.49224616907898289, 0.49014090685121131, 0.00000000000000000 ),
(0.49175783605462037, 0.49422637094165467, 0.00000000000000000 )]
expected_out_times = [ .5,
1.0000000000000000,
1.6000000000000001,
2.3200000000000003,
2.7520000000000002,
3.2704000000000004,
3.8924800000000004,
4.6389760000000004,
5.0868736000000006,
5.6243507200000007,
6.2693232640000005,
7.0432903168000003,
7.5076705484800001,
8.0649268264960003,
8.7336343601152002,
9.5360834004582404,
10.000000000000000]
np.testing.assert_array_almost_equal(expected_out_trace,result_tuple[0])
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_multi_cell(self):
"""test default functionality of multiple cells"""
tracer=self.create_default_two_cell()
start_time=0
result_tuple = tracer.trace_point((.5,.5,0),start_time)
expected_out_trace = [ ( .5,.5,0 ),
(0.60000000149011612, 0.50000000000000000, 0.00000000000000000 ),
(0.73200000077486038, 0.50000000000000000, 0.00000000000000000 ),
(0.90940801054239273, 0.50000000000000000, 0.00000000000000000 ),
(1.1529537134766579, 0.50000000000000000, 0.00000000000000000 ),
(1.4957102079987525, 0.50000000000000000, 0.00000000000000000 ),
(1.9923067892670629, 0.50000000000000000, 0.00000000000000000 ),
(2,.5,0)]
expected_out_times = [ 0,
1.0000000000000000,
2.2000000000000002,
3.6400000000000001,
5.3680000000000003,
7.4416000000000002,
9.9299199999999992,
9.9683860530914945]
np.testing.assert_array_almost_equal(expected_out_trace,result_tuple[0])
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_max_change_velocity(self):
"""test functionality of max change in velocity"""
tracer=self.create_default_two_cell()
tracer.max_change_velocity = .01
tracer.min_delta_time = .001
start_time=0
result_tuple = tracer.trace_point((.5,.5,0),start_time)
expected_out_trace = [ (.5,.5,0 ),
(0.60000000149011612, 0.50000000000000000, 0.00000000000000000 ),
(0.66600000113248825, 0.50000000000000000, 0.00000000000000000 ),
(0.74995200067758561, 0.50000000000000000, 0.00000000000000000 ),
(0.80394992786645891, 0.50000000000000000, 0.00000000000000000 ),
(0.87154669338464741, 0.50000000000000000, 0.00000000000000000 ),
(0.95686786960840231, 0.50000000000000000, 0.00000000000000000 ),
(1.0112451727318765, 0.50000000000000000, 0.00000000000000000 ),
(1.0789334834771158, 0.50000000000000000, 0.00000000000000000 ),
(1.1637975516948893, 0.50000000000000000, 0.00000000000000000 ),
(1.2174527417415202, 0.50000000000000000, 0.00000000000000000 ),
(1.2839153379250163, 0.50000000000000000, 0.00000000000000000 ),
(1.3667568384715398, 0.50000000000000000, 0.00000000000000000 ),
(1.4187699365302351, 0.50000000000000000, 0.00000000000000000 ),
(1.4829247317645506, 0.50000000000000000, 0.00000000000000000 ),
(1.5624845364724593, 0.50000000000000000, 0.00000000000000000 ),
(1.6587784227485147, 0.50000000000000000, 0.00000000000000000 ),
(1.7743310862797812, 0.50000000000000000, 0.00000000000000000 ),
(1.9129942825173010, 0.50000000000000000, 0.00000000000000000 ),
(2,.5,0)]
expected_out_times = [ 0,
1.0000000000000000,
1.6000000000000001,
2.3200000000000003,
2.7520000000000002,
3.2704000000000004,
3.8924800000000004,
4.2657280000000002,
4.7136256000000003,
5.2511027200000004,
5.5735889920000004,
5.9605725184000002,
6.4249527500800001,
6.7035808890880002,
7.0379346558976001,
7.4391591760691202,
7.9206286002749442,
8.4983919093219331,
9.1917078801783187,
9.6267364611093829]
np.testing.assert_array_almost_equal(expected_out_trace,result_tuple[0])
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_unique_time_steps(self):
"""test functionality of unique time steps"""
tracer=self.create_default_two_cell()
start_time=10
scalars = [(.2,0,0),(.3,0,0)]
point_activity = [True]*2
tracer.add_grid_scalars_at_time(scalars,"cells",point_activity,"cells",20)
result_tuple = tracer.trace_point((.5,.5,0),start_time)
expected_out_trace = [ ( .5,.5,0 ),
(0.70000000298023224, 0.50000000000000000, 0.00000000000000000 ),
(0.95200000226497650, 0.50000000000000000, 0.00000000000000000 ),
(1.2734079944372176, 0.50000000000000000, 0.00000000000000000 ),
(1.6897536998434066, 0.50000000000000000, 0.00000000000000000 ),
(2,.5,0)]
expected_out_times = [ 10,
11.000000000000000,
12.199999999999999,
13.640000000000001,
15.368000000000000,
16.627525378316030]
np.testing.assert_array_almost_equal(expected_out_trace,result_tuple[0])
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_inactive_cell(self):
"""test functionality of inactive cells"""
tracer=self.create_default_two_cell()
start_time=10
scalars = [(.2,0,0),(99999,0,0)]
point_activity = [True]*2
point_activity[1]=False
tracer.add_grid_scalars_at_time(scalars,"cells",point_activity,"cells",20)
result_tuple = tracer.trace_point((.5,.5,0),start_time)
expected_out_trace = [ ( .5,.5,0 ),
(0.70000000298023224, 0.50000000000000000, 0.00000000000000000 ),
(0.93040000677108770, 0.50000000000000000, 0.00000000000000000 ),
(0.99788877571821222, 0.50000000000000000, 0.00000000000000000 )]
expected_out_times = [ 10,
11.000000000000000,
12.199999999999999,
12.560000000000000]
np.testing.assert_array_almost_equal(expected_out_trace,result_tuple[0])
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_start_inactive_cell(self):
"""test functionality of starting in an inactive cell"""
tracer=self.create_default_two_cell()
start_time=10
scalars = [(.2,0,0),(99999,0,0)]
point_activity = [True]*2
point_activity[0]=False
tracer.add_grid_scalars_at_time(scalars,"cells",point_activity,"cells",20)
result_tuple = tracer.trace_point((.5,.5,0),start_time)
expected_out_trace = []
expected_out_times = []
np.testing.assert_equal(0,len(result_tuple[0]))
np.testing.assert_array_equal(expected_out_times,result_tuple[1])
def test_tutorial(self):
"""A test to serve as a tutorial"""
# -> ->
# 6----7----8|
# | | |
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentMatrix_b)
temporalRow.append(currentMatrix_x)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(currentMatrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x2 + b3*x3 + ... + bn*xn")
if (evtfbmip == True):
# ----------------------------------------------------------------------------------------------- #
# ----- We now get all possible combinations/permutations with the elements of our equation ----- #
# ----------------------------------------------------------------------------------------------- #
customizedPermutations = combinations.getCustomizedPermutationList()
customizedPermutations.pop(0) # We remove the null value
customizedPermutations.pop(len(customizedPermutations)-1) # We remove the last one because we already did it
for actualPermutation in range(0, len(customizedPermutations)):
newOriginalMatrix_x = []
for row in range(0, rowLengthOfBothMatrixes):
temporalRow = []
for column in range(0, len(customizedPermutations[actualPermutation])):
temporalRow.append(originalMatrix_x[row][customizedPermutations[actualPermutation][column]])
newOriginalMatrix_x.append(temporalRow)
# ----- WE START SEARCHING FOR THE BEST MODELING RESULTS USING CURRENT PERMUTATION ----- #
# We define a variable to save the search patterns in original matrix x
possibleCombinations = []
for n in range (0, len(newOriginalMatrix_x[0])):
possibleCombinations.append(n)
combinations = Combinations(possibleCombinations)
searchPatterns = combinations.getPositionCombinationsList()
# We start to search for the coefficients that give us the best accuracy
for currentSearchPattern in range(0, len(searchPatterns)):
currentMatrix_x = [ [ 0 for i in range(len(newOriginalMatrix_x[0])) ] for j in range(rowLengthOfBothMatrixes) ]
# We assign the current distribution that we want to study of the
# variables of the matrix x, to evaluate its resulting regression
# coefficients
for currentColumnOfMatrix_x in range(0, len(newOriginalMatrix_x[0])):
for column in range(0, len(newOriginalMatrix_x[0])):
if (customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][currentColumnOfMatrix_x]] == column):
for row in range(0, rowLengthOfBothMatrixes):
currentMatrix_x[row][currentColumnOfMatrix_x] = originalMatrix_x[row][column]
# We get the Transposed matrix of matrix X. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = currentMatrix_x
transposedMatrix_X = matrixMath.getTransposedMatrix(temporalMatrix1)
# WE GET MATRIX A. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = currentMatrix_x
matrix_A = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# WE GET MATRIX g. NOTE: We create a temporal
# variable to save matrix x because remember that in python, children
# and parent inheritance is ignored when using clases
temporalMatrix1 = transposedMatrix_X
temporalMatrix2 = matrix_y
matrix_g = matrixMath.getMultiplication(temporalMatrix1, temporalMatrix2)
# We get inverse matrix of matrix A.
inversedMatrix_A = matrixMath.getInverse(matrix_A)
# We get matrix b, which will contain the coeficient values
matrix_b = matrixMath.getMultiplication(inversedMatrix_A, matrix_g)
# ----- WE DETERMINE THE ACCURACY OF THE OBTAINED COEFFICIENTS ----- #
# We re-arrange the obtained coefficients to then evaluate this
# model
currentMatrix_b = [ [ 0 for i in range(1) ] for j in range(len(originalMatrix_x[0])) ]
for row in range(0, len(newOriginalMatrix_x[0])):
trueRowOfCoefficient = customizedPermutations[actualPermutation][searchPatterns[currentSearchPattern][row]]
currentMatrix_b[trueRowOfCoefficient][0] = matrix_b[row][0]
# We obtain the predicted data through the current obtained
# coefficients
newNumberOfIndependentVariables = len(currentMatrix_x[0])
predictedData = []
for row in range(0, len(matrix_y)):
temporalRow = []
actualIc = currentMatrix_b[0][0]
for currentIndependentVariable in range(0, (newNumberOfIndependentVariables-1)):
actualIc = actualIc + currentMatrix_b[currentIndependentVariable+1][0]*self.x_samplesList[row][currentIndependentVariable]
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = len(matrix_y)
for row in range(0, numberOfDataPoints):
n2 = matrix_y[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
if (isClassification == True):
if (abs(n1) > abs(n2)): # n2 has to be the one with the highest value with respect to n1
n2 = predictedData[row][0]
n1 = matrix_y[row][0]
if ((n1==0) and (n2>=-1 and n2<=1) and (n2!=0)):
predictionAcurracy = predictionAcurracy + ((1-abs(n2))/(1-n1))
if (n1==n2):
predictionAcurracy = predictionAcurracy + 1
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentMatrix_b)
temporalRow.append(currentMatrix_x)
allAccuracies.append(temporalRow)
# We save the current the modeling results if they are better than
# the actual best
currentBestAccuracy = bestModelingResults[1]
if (predictionAcurracy > currentBestAccuracy):
bestModelingResults = []
bestModelingResults.append(currentMatrix_b)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + b5*x1*x2")
# We include all the reports of all the models studied to the reporting
# variable that contains the report of the best fitting model and we
# then return it
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
getPolynomialRegression(
orderOfThePolynomial = "whole number to represent the desired order of the polynomial model to find",
evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
Returns the best fitting model of a regression problem that has only 1
independent variable (x) in it, through a polynomial regression solution.
EXAMPLE CODE:
matrix_y = [
[3.4769e-11],
[7.19967e-11],
[1.59797e-10],
[3.79298e-10]
]
matrix_x = [
[-0.7],
[-0.65],
[-0.6],
[-0.55]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getPolynomialRegression(orderOfThePolynomial=3, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[3.468869185343018e-08],
[1.5123521825664843e-07],
[2.2104758041867345e-07],
[1.0817080022072073e-07]]
accuracyFromTraining =
99.99999615014885
predictedData =
[[3.4769003219065136e-11],
[7.199670288280337e-11],
[1.597970024878988e-10],
[3.792980021998557e-10]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x + b2*x^2 + b3*x^3 + ... + bn*x^n'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getPolynomialRegression(self, orderOfThePolynomial, evtfbmip=False, isClassification=True):
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
matrixMath = mLAL.MatrixMath()
x_samples = matrixMath.getTransposedMatrix(self.x_samplesList)[0]
y_samples = matrixMath.getTransposedMatrix(self.y_samplesList)[0]
dataLength = len(y_samples)
matrixLength = orderOfThePolynomial+1
matrix_A = []
# MATRIX A MATHEMATICAL PROCEDURE
for n in range(0, matrixLength):
temporalRow = []
for i in range(0, matrixLength):
# Math process for Matrix_A's Row 1
if ((n==0) and (i==0)):
temporalRow.append(dataLength)
if ((n==0) and (i!=0)):
temporalSum = 0
for j in range(0, dataLength):
# For loop use to get the x_i value elevated to an exponential
xMultiplicationsResult = 1
for w in range(0, i):
xMultiplicationsResult = xMultiplicationsResult*x_samples[j]
temporalSum = temporalSum + xMultiplicationsResult
temporalRow.append(temporalSum)
# Math process for Matrix_A's Row 2 and above
if (n!=0):
if (i==0):
temporalSum = 0
for j in range(0, dataLength):
# For loop use to get the x_i value elevated to an exponential
additionalMultiplications = n-1
if (additionalMultiplications < 0):
additionalMultiplications = 0
xMultiplicationsResult = 1
for w in range(0, (i+1+additionalMultiplications)):
xMultiplicationsResult = xMultiplicationsResult*x_samples[j]
temporalSum = temporalSum + xMultiplicationsResult
temporalRow.append(temporalSum)
else:
temporalSum = 0
for j in range(0, dataLength):
# For loop use to get the x_i value elevated to an exponential
additionalMultiplications = n-1
if (additionalMultiplications < 0):
additionalMultiplications = 0
xMultiplicationsResult = 1
for w in range(0, (i+1+additionalMultiplications)):
xMultiplicationsResult = xMultiplicationsResult*x_samples[j]
temporalSum = temporalSum + xMultiplicationsResult
temporalRow.append(temporalSum)
matrix_A.append(temporalRow)
# MATRIX g MATHEMATICAL PROCEDURE
matrix_g = []
for n in range(0, matrixLength):
temporalRow = []
temporalSum = 0
for i in range(0, dataLength):
# For loop use to get the x_i value elevated to an exponential
xMultiplicationsResult = 1
for w in range(0, n):
xMultiplicationsResult = xMultiplicationsResult*x_samples[i]
temporalSum = temporalSum + xMultiplicationsResult*y_samples[i]
temporalRow.append(temporalSum)
matrix_g.append(temporalRow)
# GET THE INVERSE OF MATRIX A
matrixMath = mLAL.MatrixMath()
inverseMatrix_A = matrixMath.getInverse(matrix_A)
# MULTIPLY INVERSE OF MATRIX A WITH MATRIX g
matrix_b = matrixMath.getMultiplication(inverseMatrix_A, matrix_g)
# We determine the accuracy of the obtained coefficients
predictedData = []
bothMatrixRowLength = len(y_samples)
numberOfIndependentVariables = len(matrix_b)-1
for currentDataPoint in range(0, bothMatrixRowLength):
temporalRow = []
actualIc = matrix_b[0][0]
for currentIndependentVariable in range(0, numberOfIndependentVariables):
actualIc = actualIc + matrix_b[currentIndependentVariable+1][0]*x_samples[currentDataPoint]**(currentIndependentVariable+1)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = bothMatrixRowLength
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if | |
153.20
27.0 83.47 162.05
28.0 83.00 169.89
29.0 82.41 176.64
30.0 81.74 182.37
31.0 81.53 181.04
32.0 81.43 178.14
33.0 81.30 175.32
34.0 81.08 172.47
35.0 80.66 169.55
36.0 80.22 166.89
37.0 79.76 164.46
38.0 79.29 162.23
39.0 78.80 160.20
40.0 79.13 159.12
41.0 79.45 157.97
42.0 79.77 156.75
43.0 80.08 155.46
44.0 80.39 154.08
45.0 80.69 152.62
46.0 80.98 151.05
47.0 81.13 150.65
48.0 81.19 151.08
49.0 81.25 151.51
50.0 81.31 152.21
51.0 81.38 155.38
52.0 81.43 158.60
53.0 81.44 161.83
54.0 81.44 165.08
55.0 81.40 168.30
56.0 81.33 172.18
57.0 81.22 175.97
58.0 81.07 179.66
59.0 80.89 183.21
60.0 80.67 186.61
61.0 80.49 190.87
62.0 80.37 197.35
63.0 80.14 203.60
64.0 79.80 209.50
65.0 79.85 210.35
66.0 79.90 211.20
67.0 79.94 212.07
68.0 79.99 212.94
69.0 80.20 211.11
70.0 80.46 207.98
71.0 80.69 204.68
72.0 80.89 201.23
73.0 81.05 197.65
74.0 81.18 193.94
75.0 81.27 190.14
76.0 81.59 195.53
77.0 81.79 207.82
78.0 81.61 220.13
79.0 81.07 231.45
80.0 81.02 232.09
81.0 81.05 231.62
82.0 81.07 231.16
83.0 81.09 230.69
84.0 81.26 227.31
85.0 81.47 221.76
86.0 81.59 216.00
87.0 81.63 210.12
88.0 81.58 204.25
89.0 81.45 198.51
90.0 81.23 192.99
91.0 80.94 187.78
92.0 81.02 185.31
93.0 81.39 184.44
94.0 81.76 183.50
95.0 82.43 179.95
96.0 83.10 175.40
97.0 83.71 169.92
98.0 84.25 163.35
99.0 84.71 155.53
100.0 85.05 146.45
101.0 84.53 152.65
102.0 83.71 160.59
103.0 82.79 166.60
104.0 81.81 171.23
105.0 81.32 175.20
106.0 81.60 179.66
107.0 81.82 184.38
108.0 81.98 189.32
109.0 82.08 194.43
110.0 82.12 199.63
111.0 82.03 203.00
112.0 81.66 199.22
113.0 81.26 195.76
114.0 80.83 192.62
115.0 80.37 189.76
116.0 79.90 187.17
117.0 79.19 187.67
118.0 78.34 189.84
119.0 77.47 191.71
120.0 76.59 193.35
121.0 76.23 193.12
122.0 75.94 192.71
123.0 75.74 192.46
124.0 75.95 192.77
125.0 76.16 193.09
126.0 76.38 193.42
127.0 76.59 193.76
128.0 76.79 194.12
129.0 77.00 194.48
130.0 77.21 194.85
131.0 77.38 195.04
132.0 77.22 193.47
133.0 77.04 191.93
134.0 76.86 190.44
135.0 76.26 192.29
136.0 75.46 195.27
137.0 74.62 197.94
138.0 73.76 200.34
139.0 72.87 202.49
140.0 71.59 202.74
141.0 70.15 202.29
142.0 68.70 201.90
143.0 69.87 198.07
144.0 70.94 193.81
145.0 71.91 189.09
146.0 72.75 183.89
147.0 73.44 178.23
148.0 73.97 172.14
149.0 74.31 165.73
150.0 74.47 159.11
151.0 74.42 152.44
152.0 74.17 145.90
153.0 73.74 139.63
154.0 73.26 134.46
155.0 73.88 136.15
156.0 74.11 138.70
157.0 73.41 142.81
158.0 72.65 146.58
159.0 71.89 149.70
160.0 71.74 149.67
161.0 71.60 149.65
162.0 71.46 149.63
163.0 71.31 149.61
164.0 71.17 149.58
165.0 71.03 149.56
166.0 70.89 149.54
167.0 70.74 149.52
168.0 70.60 149.50
169.0 70.64 140.48
170.0 70.23 131.62
171.0 69.98 125.23
172.0 69.67 119.51
173.0 69.17 114.01
174.0 68.51 108.79
175.0 67.69 103.90
176.0 66.74 99.36
177.0 66.01 95.57
178.0 66.01 92.81
179.0 65.66 91.06
180.0 65.09 90.02
181.0 64.52 89.02
182.0 63.93 88.07
183.0 63.89 88.62
184.0 64.20 90.17
185.0 64.49 91.77
186.0 64.77 93.39
187.0 65.02 95.05
188.0 65.26 96.73
189.0 65.48 98.45
190.0 65.67 100.19
191.0 65.85 101.96
192.0 65.88 103.25
193.0 65.85 104.31
194.0 65.82 105.38
195.0 64.95 105.43
196.0 63.53 104.86
197.0 62.11 104.35
198.0 60.68 103.89
"""
if plate == 'GL':
apwp = """
0.0 90.00 0.00
1.0 88.33 180.70
2.0 86.67 180.70
3.0 86.14 175.33
4.0 85.95 173.39
5.0 85.79 171.94
6.0 85.62 170.59
7.0 85.45 169.35
8.0 85.28 168.19
9.0 85.11 167.12
10.0 84.94 166.12
11.0 84.76 165.19
12.0 84.57 164.34
13.0 84.22 163.81
14.0 83.88 163.34
15.0 83.49 162.61
16.0 82.96 160.83
17.0 82.42 159.31
18.0 81.88 157.98
19.0 81.33 156.83
20.0 81.12 156.68
21.0 81.41 157.94
22.0 81.70 159.28
23.0 81.98 160.72
24.0 82.26 162.26
25.0 82.53 163.92
26.0 82.80 165.70
27.0 82.16 172.55
28.0 81.43 178.31
29.0 80.63 183.13
30.0 79.78 187.17
31.0 79.55 186.15
32.0 79.47 183.99
33.0 79.37 181.86
34.0 79.20 179.58
35.0 78.84 176.94
36.0 78.45 174.48
37.0 78.05 172.17
38.0 77.63 170.01
39.0 77.20 168.00
40.0 77.40 168.23
41.0 77.61 168.47
42.0 77.81 168.71
43.0 78.01 168.97
44.0 78.21 169.23
45.0 78.42 169.50
46.0 78.62 169.78
47.0 78.58 170.26
48.0 78.38 170.84
49.0 78.18 171.41
50.0 77.97 172.10
51.0 77.62 174.07
52.0 77.26 175.92
53.0 76.88 177.68
54.0 76.50 179.33
55.0 76.10 180.90
56.0 75.72 182.56
57.0 75.33 184.14
58.0 74.93 185.63
59.0 74.52 187.05
60.0 74.10 188.40
61.0 73.71 190.34
62.0 73.39 193.73
63.0 73.02 196.99
64.0 72.60 200.10
65.0 72.58 200.61
66.0 72.56 201.13
67.0 72.53 201.64
68.0 72.51 202.15
69.0 72.64 201.35
70.0 72.83 199.97
71.0 73.02 198.55
72.0 73.19 197.11
73.0 73.35 195.64
74.0 73.50 194.14
75.0 73.65 192.62
76.0 73.70 196.06
77.0 73.52 202.77
78.0 73.14 209.26
79.0 72.57 215.41
80.0 72.42 216.02
81.0 72.32 216.04
82.0 72.23 216.07
83.0 72.14 216.09
84.0 72.16 214.56
85.0 72.23 211.98
86.0 72.27 209.39
87.0 72.28 206.78
88.0 72.25 204.19
89.0 72.19 201.60
90.0 72.09 199.04
91.0 71.96 196.50
92.0 72.14 195.56
93.0 72.55 195.67
94.0 72.96 195.79
95.0 73.76 195.21
96.0 74.60 194.49
97.0 75.44 193.69
98.0 76.28 192.80
99.0 77.11 191.79
100.0 77.94 190.65
101.0 77.17 190.62
102.0 76.01 190.85
103.0 74.85 191.04
104.0 73.70 191.21
105.0 73.00 192.27
106.0 72.98 194.70
107.0 72.94 197.12
108.0 72.86 199.52
109.0 72.76 201.90
110.0 72.62 204.25
111.0 72.45 205.69
112.0 72.21 203.68
113.0 71.94 201.72
114.0 71.66 199.82
115.0 71.35 197.98
116.0 71.03 196.20
117.0 70.33 195.81
118.0 69.39 196.30
119.0 68.44 196.75
120.0 67.49 197.16
121.0 67.17 196.83
122.0 66.91 196.42
123.0 66.74 196.16
124.0 66.92 196.46
125.0 67.11 196.77
126.0 67.30 197.09
127.0 67.48 197.41
128.0 67.67 197.73
129.0 67.85 198.06
130.0 68.04 198.39
131.0 68.19 198.60
132.0 68.11 197.59
133.0 68.02 196.59
134.0 67.93 195.60
135.0 67.26 196.33
136.0 66.33 197.70
137.0 65.39 198.98
138.0 64.45 200.17
139.0 63.49 201.28
140.0 62.22 201.09
141.0 60.81 200.42
142.0 59.40 199.80
143.0 60.73 197.43
144.0 62.01 194.85
145.0 63.24 192.06
146.0 64.41 189.02
147.0 65.52 185.72
148.0 66.54 182.13
149.0 67.48 178.26
150.0 68.32 174.08
151.0 69.04 169.61
152.0 69.64 164.86
153.0 70.11 159.86
154.0 70.43 155.41
155.0 70.72 157.56
156.0 70.56 159.72
157.0 69.42 161.65
158.0 68.26 163.38
159.0 67.19 164.78
160.0 67.04 164.60
161.0 66.90 164.42
162.0 66.76 164.24
163.0 66.62 164.06
164.0 66.47 163.88
165.0 66.33 163.71
166.0 66.19 163.54
167.0 66.04 163.37
168.0 65.90 163.20
169.0 67.23 156.43
170.0 68.24 148.97
171.0 69.04 143.36
172.0 69.69 138.01
173.0 70.17 132.36
174.0 70.47 126.50
175.0 70.57 120.51
176.0 70.48 114.53
177.0 70.45 109.51
178.0 70.93 106.46
179.0 70.89 104.04
180.0 70.54 102.15
181.0 70.16 100.33
182.0 69.76 98.58
183.0 69.64 99.18
184.0 69.68 101.32
185.0 69.70 103.47
186.0 69.69 105.62
187.0 69.65 107.76
188.0 69.59 109.89
189.0 69.50 112.01
190.0 69.39 114.11
191.0 69.25 116.18
192.0 69.07 117.58
193.0 68.88 118.69
194.0 68.68 119.77
195.0 67.88 118.87
196.0 66.66 116.82
197.0 65.42 114.97
198.0 64.15 113.29
"""
if plate == 'IN':
apwp = """
0.0 90.00 0.00
1.0 88.57 197.10
2.0 87.14 197.10
3.0 86.82 197.10
4.0 86.76 201.35
5.0 86.70 205.94
6.0 86.62 210.32
7.0 86.52 214.48
8.0 86.40 218.38
9.0 86.26 222.02
10.0 86.11 225.39
11.0 85.95 228.51
12.0 85.77 231.10
13.0 85.46 231.14
14.0 85.15 231.18
15.0 84.84 230.71
16.0 84.54 228.40
17.0 84.23 226.34
18.0 83.92 224.49
19.0 83.59 222.82
20.0 83.40 225.11
21.0 83.32 233.06
22.0 83.11 240.67
23.0 82.79 247.72
24.0 82.37 254.09
25.0 81.87 259.74
26.0 81.30 264.70
27.0 79.78 264.31
28.0 78.25 264.03
29.0 76.73 263.81
30.0 75.20 263.63
31.0 74.95 264.21
32.0 75.01 264.98
33.0 75.06 265.75
34.0 75.09 266.19
35.0 75.05 265.83
36.0 75.02 265.47
37.0 74.98 265.11
38.0 74.94 264.75
39.0 74.90 264.40
40.0 74.38 266.62
41.0 73.83 268.69
42.0 73.26 270.63
43.0 72.68 272.45
44.0 72.09 274.14
45.0 71.48 275.74
46.0 70.85 277.23
47.0 70.10 277.93
48.0 69.27 278.14
49.0 68.45 278.34
50.0 67.56 278.48
51.0 66.19 278.29
52.0 64.82 278.12
53.0 63.45 277.97
54.0 62.07 277.83
55.0 60.70 277.70
56.0 58.96 277.66
57.0 57.23 277.62
58.0 55.49 277.58
59.0 53.75 277.55
60.0 52.02 277.52
61.0 50.04 277.70
62.0 47.49 278.32
63.0 44.95 278.88
64.0 42.40 279.40
65.0 41.10 279.80
66.0 39.80 280.18
67.0 38.50 280.54
68.0 37.19 280.90
69.0 36.48 281.11
70.0 36.03 281.27
71.0 35.58 281.43
72.0 35.13 281.58
73.0 34.68 281.74
74.0 34.23 281.89
75.0 33.78 282.04
76.0 32.33 283.04
77.0 30.21 284.54
78.0 28.07 285.98
79.0 25.92 287.36
80.0 25.05 287.84
81.0 24.33 288.22
82.0 23.61 288.59
83.0 22.89 288.95
84.0 22.55 289.11
85.0 22.46 289.12
86.0 22.37 289.13
87.0 22.29 289.15
88.0 22.20 289.16
89.0 22.11 289.17
90.0 22.02 289.18
91.0 21.94 289.20
92.0 21.59 289.76
93.0 21.07 290.69
94.0 20.55 291.61
95.0 20.43 292.63
96.0 20.34 293.67
97.0 20.24 294.70
98.0 20.14 295.73
99.0 20.04 296.76
100.0 19.92 297.79
101.0 18.99 297.44
102.0 17.86 296.75
103.0 16.72 296.07
104.0 15.58 295.40
105.0 14.47 295.17
106.0 13.41 295.58
107.0 12.35 295.98
108.0 11.28 296.39
109.0 10.22 296.79
110.0 9.15 297.18
111.0 8.25 297.49
112.0 7.98 297.44
113.0 7.71 297.38
114.0 7.44 297.33
115.0 7.18 297.27
116.0 6.91 297.22
117.0 6.09 297.02
118.0 4.90 296.72
119.0 3.71 296.43
120.0 2.52 296.13
121.0 1.90 296.20
122.0 1.34 296.31
123.0 0.80 296.53
124.0 0.32 297.16
125.0 -0.16 297.78
126.0 -0.64 298.41
127.0 -1.12 299.04
128.0 -1.60 299.67
129.0 -2.09 300.30
130.0 -2.57 300.93
131.0 -3.01 301.50
132.0 -3.16 301.53
133.0 -3.31 301.56
134.0 -3.46 301.59
135.0 -4.41 301.48
136.0 -5.71 301.30
137.0 -7.01 301.12
138.0 -8.31 300.94
139.0 -9.61 300.75
140.0 -10.62 299.98
141.0 -11.51 298.94
142.0 -12.40 297.90
143.0 -10.89 298.80
144.0 -9.37 299.69
145.0 -7.85 300.58
146.0 -6.33 301.45
147.0 -4.81 302.32
148.0 -3.29 303.19
149.0 -1.76 304.06
150.0 -0.24 304.92
151.0 1.28 305.78
152.0 2.81 306.65
153.0 4.33 307.52
154.0 5.61 308.38
155.0 4.72 309.22
156.0 3.82 309.62
157.0 2.88 309.03
158.0 1.94 308.43
159.0 1.08 307.93
160.0 0.88 308.24
161.0 0.68 308.55
162.0 0.49 308.85
163.0 0.29 309.16
164.0 0.09 309.47
165.0 -0.11 309.78
166.0 -0.30 310.08
167.0 -0.50 310.39
168.0 -0.70 310.70
169.0 1.16 311.47
170.0 3.03 312.25
171.0 4.29 313.12
172.0 5.40 314.02
173.0 6.51 314.92
174.0 7.62 315.83
175.0 8.72 316.74
176.0 9.83 317.65
177.0 10.65 318.58
178.0 10.83 319.52
179.0 11.31 320.00
180.0 11.98 320.18
181.0 12.66 320.35
182.0 13.33 320.53
183.0 13.28 320.28
184.0 12.74 319.76
185.0 12.21 319.24
186.0 11.67 318.72
187.0 11.13 318.20
188.0 10.59 317.69
189.0 10.05 317.17
190.0 9.51 316.66
191.0 8.96 316.15
192.0 8.62 315.75
193.0 8.36 315.40
194.0 8.10 315.04
195.0 8.76 314.48
196.0 10.03 313.78
197.0 11.30 313.07
198.0 12.56 312.35
"""
if plate == 'NA':
apwp = """
0.0 90.00 0.00
1.0 88.33 180.70
2.0 86.67 180.70
3.0 86.14 175.33
4.0 85.95 173.39
5.0 85.79 171.94
6.0 85.62 170.59
7.0 85.45 169.35
8.0 85.28 168.19
9.0 85.11 167.12
10.0 84.94 166.12
11.0 84.76 165.19
12.0 84.57 164.34
13.0 84.22 163.81
14.0 83.88 163.34
15.0 83.49 162.61
16.0 82.96 160.83
17.0 82.42 159.31
18.0 81.88 157.98
19.0 81.33 156.83
20.0 81.12 156.68
21.0 81.41 157.94
22.0 81.70 159.28
23.0 81.98 160.72
24.0 82.26 162.26
25.0 82.53 163.92
26.0 82.80 165.70
27.0 82.16 172.55
28.0 81.43 178.31
29.0 80.63 183.13
30.0 79.78 187.17
31.0 79.55 186.15
32.0 79.47 183.99
33.0 79.37 181.86
34.0 79.20 179.56
35.0 78.86 176.88
36.0 78.50 174.36
37.0 78.12 171.99
38.0 77.72 169.78
39.0 77.30 167.70
40.0 77.61 167.72
41.0 77.92 167.75
42.0 78.23 167.77
43.0 78.54 167.80
44.0 78.85 167.83
45.0 79.16 167.86
46.0 79.48 167.89
47.0 79.55 168.32
48.0 79.47 169.01
49.0 79.38 169.70
50.0 79.28 170.59
51.0 79.05 173.39
52.0 78.79 176.08
53.0 78.52 178.64
54.0 78.22 181.08
55.0 77.90 183.40
56.0 77.51 185.86
57.0 77.09 188.16
58.0 76.65 190.32
59.0 | |
"""
General serializer field tests.
"""
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from uuid import uuid4
from django.core import validators
from django.db import models
from django.test import TestCase
from django.utils.datastructures import SortedDict
from rest_framework import serializers
from rest_framework.tests.models import RESTFrameworkModel
class TimestampedModel(models.Model):
added = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class CharPrimaryKeyModel(models.Model):
id = models.CharField(max_length=20, primary_key=True)
class TimestampedModelSerializer(serializers.ModelSerializer):
class Meta:
model = TimestampedModel
class CharPrimaryKeyModelSerializer(serializers.ModelSerializer):
class Meta:
model = CharPrimaryKeyModel
class TimeFieldModel(models.Model):
clock = models.TimeField()
class TimeFieldModelSerializer(serializers.ModelSerializer):
class Meta:
model = TimeFieldModel
SAMPLE_CHOICES = [
('red', 'Red'),
('green', 'Green'),
('blue', 'Blue'),
]
class ChoiceFieldModel(models.Model):
choice = models.CharField(choices=SAMPLE_CHOICES, blank=True, max_length=255)
class ChoiceFieldModelSerializer(serializers.ModelSerializer):
class Meta:
model = ChoiceFieldModel
class ChoiceFieldModelWithNull(models.Model):
choice = models.CharField(choices=SAMPLE_CHOICES, blank=True, null=True, max_length=255)
class ChoiceFieldModelWithNullSerializer(serializers.ModelSerializer):
class Meta:
model = ChoiceFieldModelWithNull
class BasicFieldTests(TestCase):
def test_auto_now_fields_read_only(self):
"""
auto_now and auto_now_add fields should be read_only by default.
"""
serializer = TimestampedModelSerializer()
self.assertEqual(serializer.fields['added'].read_only, True)
def test_auto_pk_fields_read_only(self):
"""
AutoField fields should be read_only by default.
"""
serializer = TimestampedModelSerializer()
self.assertEqual(serializer.fields['id'].read_only, True)
def test_non_auto_pk_fields_not_read_only(self):
"""
PK fields other than AutoField fields should not be read_only by default.
"""
serializer = CharPrimaryKeyModelSerializer()
self.assertEqual(serializer.fields['id'].read_only, False)
def test_dict_field_ordering(self):
"""
Field should preserve dictionary ordering, if it exists.
See: https://github.com/tomchristie/django-rest-framework/issues/832
"""
ret = SortedDict()
ret['c'] = 1
ret['b'] = 1
ret['a'] = 1
ret['z'] = 1
field = serializers.Field()
keys = list(field.to_native(ret).keys())
self.assertEqual(keys, ['c', 'b', 'a', 'z'])
class DateFieldTest(TestCase):
"""
Tests for the DateFieldTest from_native() and to_native() behavior
"""
def test_from_native_string(self):
"""
Make sure from_native() accepts default iso input formats.
"""
f = serializers.DateField()
result_1 = f.from_native('1984-07-31')
self.assertEqual(datetime.date(1984, 7, 31), result_1)
def test_from_native_datetime_date(self):
"""
Make sure from_native() accepts a datetime.date instance.
"""
f = serializers.DateField()
result_1 = f.from_native(datetime.date(1984, 7, 31))
self.assertEqual(result_1, datetime.date(1984, 7, 31))
def test_from_native_custom_format(self):
"""
Make sure from_native() accepts custom input formats.
"""
f = serializers.DateField(input_formats=['%Y -- %d'])
result = f.from_native('1984 -- 31')
self.assertEqual(datetime.date(1984, 1, 31), result)
def test_from_native_invalid_default_on_custom_format(self):
"""
Make sure from_native() don't accept default formats if custom format is preset
"""
f = serializers.DateField(input_formats=['%Y -- %d'])
try:
f.from_native('1984-07-31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Date has wrong format. Use one of these formats instead: YYYY -- DD"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_empty(self):
"""
Make sure from_native() returns None on empty param.
"""
f = serializers.DateField()
result = f.from_native('')
self.assertEqual(result, None)
def test_from_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DateField()
result = f.from_native(None)
self.assertEqual(result, None)
def test_from_native_invalid_date(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid date.
"""
f = serializers.DateField()
try:
f.from_native('1984-13-31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]]"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_invalid_format(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid format.
"""
f = serializers.DateField()
try:
f.from_native('1984 -- 31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]]"])
else:
self.fail("ValidationError was not properly raised")
def test_to_native(self):
"""
Make sure to_native() returns datetime as default.
"""
f = serializers.DateField()
result_1 = f.to_native(datetime.date(1984, 7, 31))
self.assertEqual(datetime.date(1984, 7, 31), result_1)
def test_to_native_iso(self):
"""
Make sure to_native() with 'iso-8601' returns iso formated date.
"""
f = serializers.DateField(format='iso-8601')
result_1 = f.to_native(datetime.date(1984, 7, 31))
self.assertEqual('1984-07-31', result_1)
def test_to_native_custom_format(self):
"""
Make sure to_native() returns correct custom format.
"""
f = serializers.DateField(format="%Y - %m.%d")
result_1 = f.to_native(datetime.date(1984, 7, 31))
self.assertEqual('1984 - 07.31', result_1)
def test_to_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DateField(required=False)
self.assertEqual(None, f.to_native(None))
class DateTimeFieldTest(TestCase):
"""
Tests for the DateTimeField from_native() and to_native() behavior
"""
def test_from_native_string(self):
"""
Make sure from_native() accepts default iso input formats.
"""
f = serializers.DateTimeField()
result_1 = f.from_native('1984-07-31 04:31')
result_2 = f.from_native('1984-07-31 04:31:59')
result_3 = f.from_native('1984-07-31 04:31:59.000200')
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31), result_1)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31, 59), result_2)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31, 59, 200), result_3)
def test_from_native_datetime_datetime(self):
"""
Make sure from_native() accepts a datetime.datetime instance.
"""
f = serializers.DateTimeField()
result_1 = f.from_native(datetime.datetime(1984, 7, 31, 4, 31))
result_2 = f.from_native(datetime.datetime(1984, 7, 31, 4, 31, 59))
result_3 = f.from_native(datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
self.assertEqual(result_1, datetime.datetime(1984, 7, 31, 4, 31))
self.assertEqual(result_2, datetime.datetime(1984, 7, 31, 4, 31, 59))
self.assertEqual(result_3, datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
def test_from_native_custom_format(self):
"""
Make sure from_native() accepts custom input formats.
"""
f = serializers.DateTimeField(input_formats=['%Y -- %H:%M'])
result = f.from_native('1984 -- 04:59')
self.assertEqual(datetime.datetime(1984, 1, 1, 4, 59), result)
def test_from_native_invalid_default_on_custom_format(self):
"""
Make sure from_native() don't accept default formats if custom format is preset
"""
f = serializers.DateTimeField(input_formats=['%Y -- %H:%M'])
try:
f.from_native('1984-07-31 04:31:59')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Datetime has wrong format. Use one of these formats instead: YYYY -- hh:mm"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_empty(self):
"""
Make sure from_native() returns None on empty param.
"""
f = serializers.DateTimeField()
result = f.from_native('')
self.assertEqual(result, None)
def test_from_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DateTimeField()
result = f.from_native(None)
self.assertEqual(result, None)
def test_from_native_invalid_datetime(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid datetime.
"""
f = serializers.DateTimeField()
try:
f.from_native('04:61:59')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Datetime has wrong format. Use one of these formats instead: "
"YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HHMM|-HHMM|Z]"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_invalid_format(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid format.
"""
f = serializers.DateTimeField()
try:
f.from_native('04 -- 31')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Datetime has wrong format. Use one of these formats instead: "
"YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HHMM|-HHMM|Z]"])
else:
self.fail("ValidationError was not properly raised")
def test_to_native(self):
"""
Make sure to_native() returns isoformat as default.
"""
f = serializers.DateTimeField()
result_1 = f.to_native(datetime.datetime(1984, 7, 31))
result_2 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31))
result_3 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59))
result_4 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
self.assertEqual(datetime.datetime(1984, 7, 31), result_1)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31), result_2)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31, 59), result_3)
self.assertEqual(datetime.datetime(1984, 7, 31, 4, 31, 59, 200), result_4)
def test_to_native_iso(self):
"""
Make sure to_native() with format=iso-8601 returns iso formatted datetime.
"""
f = serializers.DateTimeField(format='iso-8601')
result_1 = f.to_native(datetime.datetime(1984, 7, 31))
result_2 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31))
result_3 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59))
result_4 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
self.assertEqual('1984-07-31T00:00:00', result_1)
self.assertEqual('1984-07-31T04:31:00', result_2)
self.assertEqual('1984-07-31T04:31:59', result_3)
self.assertEqual('1984-07-31T04:31:59.000200', result_4)
def test_to_native_custom_format(self):
"""
Make sure to_native() returns correct custom format.
"""
f = serializers.DateTimeField(format="%Y - %H:%M")
result_1 = f.to_native(datetime.datetime(1984, 7, 31))
result_2 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31))
result_3 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59))
result_4 = f.to_native(datetime.datetime(1984, 7, 31, 4, 31, 59, 200))
self.assertEqual('1984 - 00:00', result_1)
self.assertEqual('1984 - 04:31', result_2)
self.assertEqual('1984 - 04:31', result_3)
self.assertEqual('1984 - 04:31', result_4)
def test_to_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.DateTimeField(required=False)
self.assertEqual(None, f.to_native(None))
class TimeFieldTest(TestCase):
"""
Tests for the TimeField from_native() and to_native() behavior
"""
def test_from_native_string(self):
"""
Make sure from_native() accepts default iso input formats.
"""
f = serializers.TimeField()
result_1 = f.from_native('04:31')
result_2 = f.from_native('04:31:59')
result_3 = f.from_native('04:31:59.000200')
self.assertEqual(datetime.time(4, 31), result_1)
self.assertEqual(datetime.time(4, 31, 59), result_2)
self.assertEqual(datetime.time(4, 31, 59, 200), result_3)
def test_from_native_datetime_time(self):
"""
Make sure from_native() accepts a datetime.time instance.
"""
f = serializers.TimeField()
result_1 = f.from_native(datetime.time(4, 31))
result_2 = f.from_native(datetime.time(4, 31, 59))
result_3 = f.from_native(datetime.time(4, 31, 59, 200))
self.assertEqual(result_1, datetime.time(4, 31))
self.assertEqual(result_2, datetime.time(4, 31, 59))
self.assertEqual(result_3, datetime.time(4, 31, 59, 200))
def test_from_native_custom_format(self):
"""
Make sure from_native() accepts custom input formats.
"""
f = serializers.TimeField(input_formats=['%H -- %M'])
result = f.from_native('04 -- 31')
self.assertEqual(datetime.time(4, 31), result)
def test_from_native_invalid_default_on_custom_format(self):
"""
Make sure from_native() don't accept default formats if custom format is preset
"""
f = serializers.TimeField(input_formats=['%H -- %M'])
try:
f.from_native('04:31:59')
except validators.ValidationError as e:
self.assertEqual(e.messages, ["Time has wrong format. Use one of these formats instead: hh -- mm"])
else:
self.fail("ValidationError was not properly raised")
def test_from_native_empty(self):
"""
Make sure from_native() returns None on empty param.
"""
f = serializers.TimeField()
result = f.from_native('')
self.assertEqual(result, None)
def test_from_native_none(self):
"""
Make sure from_native() returns None on None param.
"""
f = serializers.TimeField()
result = f.from_native(None)
self.assertEqual(result, None)
def test_from_native_invalid_time(self):
"""
Make sure from_native() raises a ValidationError on passing an invalid time.
"""
f = serializers.TimeField()
try:
f.from_native('04:61:59')
except validators.ValidationError as e:
| |
{"$in": list(extensions)}
if names_by_version_ids is not None:
or_query = []
for version_id, names in names_by_version_ids.items():
if version_id and names:
or_query.append({
"parent": _convert_id(version_id),
"name": {"$in": list(names)}
})
if not or_query:
return []
query_filter["$or"] = or_query
conn = _get_project_connection(project_name)
return conn.find(query_filter, _prepare_fields(fields))
def get_representations_parents(project_name, representations):
"""Prepare parents of representation entities.
Each item of returned dictionary contains version, subset, asset
and project in that order.
Args:
project_name (str): Name of project where to look for queried entities.
representations (list[dict]): Representation entities with at least
'_id' and 'parent' keys.
Returns:
dict[ObjectId, tuple]: Parents by representation id.
"""
repres_by_version_id = collections.defaultdict(list)
versions_by_version_id = {}
versions_by_subset_id = collections.defaultdict(list)
subsets_by_subset_id = {}
subsets_by_asset_id = collections.defaultdict(list)
for representation in representations:
repre_id = representation["_id"]
version_id = representation["parent"]
repres_by_version_id[version_id].append(representation)
versions = get_versions(
project_name, version_ids=repres_by_version_id.keys()
)
for version in versions:
version_id = version["_id"]
subset_id = version["parent"]
versions_by_version_id[version_id] = version
versions_by_subset_id[subset_id].append(version)
subsets = get_subsets(
project_name, subset_ids=versions_by_subset_id.keys()
)
for subset in subsets:
subset_id = subset["_id"]
asset_id = subset["parent"]
subsets_by_subset_id[subset_id] = subset
subsets_by_asset_id[asset_id].append(subset)
assets = get_assets(project_name, asset_ids=subsets_by_asset_id.keys())
assets_by_id = {
asset["_id"]: asset
for asset in assets
}
project = get_project(project_name)
output = {}
for version_id, representations in repres_by_version_id.items():
asset = None
subset = None
version = versions_by_version_id.get(version_id)
if version:
subset_id = version["parent"]
subset = subsets_by_subset_id.get(subset_id)
if subset:
asset_id = subset["parent"]
asset = assets_by_id.get(asset_id)
for representation in representations:
repre_id = representation["_id"]
output[repre_id] = (version, subset, asset, project)
return output
def get_representation_parents(project_name, representation):
"""Prepare parents of representation entity.
Each item of returned dictionary contains version, subset, asset
and project in that order.
Args:
project_name (str): Name of project where to look for queried entities.
representation (dict): Representation entities with at least
'_id' and 'parent' keys.
Returns:
dict[ObjectId, tuple]: Parents by representation id.
"""
if not representation:
return None
repre_id = representation["_id"]
parents_by_repre_id = get_representations_parents(
project_name, [representation]
)
return parents_by_repre_id.get(repre_id)
def get_thumbnail_id_from_source(project_name, src_type, src_id):
"""Receive thumbnail id from source entity.
Args:
project_name (str): Name of project where to look for queried entities.
src_type (str): Type of source entity ('asset', 'version').
src_id (str|objectId): Id of source entity.
Returns:
ObjectId: Thumbnail id assigned to entity.
None: If Source entity does not have any thumbnail id assigned.
"""
if not src_type or not src_id:
return None
query_filter = {"_id": _convert_id(src_id)}
conn = _get_project_connection(project_name)
src_doc = conn.find_one(query_filter, {"data.thumbnail_id"})
if src_doc:
return src_doc.get("data", {}).get("thumbnail_id")
return None
def get_thumbnails(project_name, thumbnail_ids, fields=None):
"""Receive thumbnails entity data.
Thumbnail entity can be used to receive binary content of thumbnail based
on it's content and ThumbnailResolvers.
Args:
project_name (str): Name of project where to look for queried entities.
thumbnail_ids (list[str|ObjectId]): Ids of thumbnail entities.
fields (list[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
Returns:
cursor: Cursor of queried documents.
"""
if thumbnail_ids:
thumbnail_ids = _convert_ids(thumbnail_ids)
if not thumbnail_ids:
return []
query_filter = {
"type": "thumbnail",
"_id": {"$in": thumbnail_ids}
}
conn = _get_project_connection(project_name)
return conn.find(query_filter, _prepare_fields(fields))
def get_thumbnail(project_name, thumbnail_id, fields=None):
"""Receive thumbnail entity data.
Args:
project_name (str): Name of project where to look for queried entities.
thumbnail_id (str|ObjectId): Id of thumbnail entity.
fields (list[str]): Fields that should be returned. All fields are
returned if 'None' is passed.
Returns:
None: If thumbnail with specified id was not found.
Dict: Thumbnail entity data which can be reduced to specified 'fields'.
"""
if not thumbnail_id:
return None
query_filter = {"type": "thumbnail", "_id": _convert_id(thumbnail_id)}
conn = _get_project_connection(project_name)
return conn.find_one(query_filter, _prepare_fields(fields))
"""
## Custom data storage:
- Settings - OP settings overrides and local settings
- Logging - logs from PypeLogger
- Webpublisher - jobs
- Ftrack - events
- Maya - Shaders
- openpype/hosts/maya/api/shader_definition_editor.py
- openpype/hosts/maya/plugins/publish/validate_model_name.py
## Global launch hooks
- openpype/hooks/pre_global_host_data.py
Query:
- project
- asset
## Global load plugins
- openpype/plugins/load/delete_old_versions.py
Query:
- versions
- representations
- openpype/plugins/load/delivery.py
Query:
- representations
## Global publish plugins
- openpype/plugins/publish/collect_avalon_entities.py
Query:
- asset
- project
- openpype/plugins/publish/collect_anatomy_instance_data.py
Query:
- assets
- subsets
- last version
- openpype/plugins/publish/collect_scene_loaded_versions.py
Query:
- representations
- openpype/plugins/publish/extract_hierarchy_avalon.py
Query:
- asset
- assets
- project
Create:
- asset
Update:
- asset
- openpype/plugins/publish/integrate_hero_version.py
Query:
- version
- hero version
- representations
- openpype/plugins/publish/integrate_new.py
Query:
- asset
- subset
- version
- representations
- openpype/plugins/publish/integrate_thumbnail.py
Query:
- version
- openpype/plugins/publish/validate_editorial_asset_name.py
Query:
- assets
## Lib
- openpype/lib/applications.py
Query:
- project
- asset
- openpype/lib/avalon_context.py
Query:
- project
- asset
- linked assets (new function get_linked_assets?)
- subset
- subsets
- version
- versions
- last version
- representations
- linked representations (new function get_linked_ids_for_representations)
Update:
- workfile data
- openpype/lib/plugin_tools.py
Query:
- asset
- openpype/lib/project_backpack.py
Query:
- project
- everything from mongo
Update:
- project
- openpype/lib/usdlib.py
Query:
- project
- asset
## Pipeline
- openpype/pipeline/load/utils.py
Query:
- project
- assets
- subsets
- version
- versions
- representation
- representations
- openpype/pipeline/mongodb.py
Query:
- project
- openpype/pipeline/thumbnail.py
Query:
- project
## Hosts
### Aftereffects
- openpype/hosts/aftereffects/plugins/create/workfile_creator.py
Query:
- asset
### Blender
- openpype/hosts/blender/api/pipeline.py
Query:
- asset
- openpype/hosts/blender/plugins/publish/extract_layout.py
Query:
- representation
### Celaction
- openpype/hosts/celaction/plugins/publish/collect_audio.py
Query:
- subsets
- last versions
- representations
### Fusion
- openpype/hosts/fusion/api/lib.py
Query:
- asset
- subset
- version
- representation
- openpype/hosts/fusion/plugins/load/load_sequence.py
Query:
- version
- openpype/hosts/fusion/scripts/fusion_switch_shot.py
Query:
- project
- asset
- versions
- openpype/hosts/fusion/utility_scripts/switch_ui.py
Query:
- assets
### Harmony
- openpype/hosts/harmony/api/pipeline.py
Query:
- representation
### Hiero
- openpype/hosts/hiero/api/lib.py
Query:
- project
- version
- versions
- representation
- openpype/hosts/hiero/api/tags.py
Query:
- task types
- assets
- openpype/hosts/hiero/plugins/load/load_clip.py
Query:
- version
- versions
- openpype/hosts/hiero/plugins/publish_old_workflow/collect_assetbuilds.py
Query:
- assets
### Houdini
- openpype/hosts/houdini/api/lib.py
Query:
- asset
- openpype/hosts/houdini/api/usd.py
Query:
- asset
- openpype/hosts/houdini/plugins/create/create_hda.py
Query:
- asset
- subsets
- openpype/hosts/houdini/plugins/publish/collect_usd_bootstrap.py
Query:
- asset
- subset
- openpype/hosts/houdini/plugins/publish/extract_usd_layered.py
Query:
- asset
- subset
- version
- representation
- openpype/hosts/houdini/plugins/publish/validate_usd_shade_model_exists.py
Query:
- asset
- subset
- openpype/hosts/houdini/vendor/husdoutputprocessors/avalon_uri_processor.py
Query:
- project
- asset
### Maya
- openpype/hosts/maya/api/action.py
Query:
- asset
- openpype/hosts/maya/api/commands.py
Query:
- asset
- project
- openpype/hosts/maya/api/lib.py
Query:
- project
- asset
- subset
- subsets
- version
- representation
- openpype/hosts/maya/api/setdress.py
Query:
- version
- representation
- openpype/hosts/maya/plugins/inventory/import_modelrender.py
Query:
- representation
- openpype/hosts/maya/plugins/load/load_audio.py
Query:
- asset
- subset
- version
- openpype/hosts/maya/plugins/load/load_image_plane.py
Query:
- asset
- subset
- version
- openpype/hosts/maya/plugins/load/load_look.py
Query:
- representation
- openpype/hosts/maya/plugins/load/load_vrayproxy.py
Query:
- representation
- openpype/hosts/maya/plugins/load/load_yeti_cache.py
Query:
- representation
- openpype/hosts/maya/plugins/publish/collect_review.py
Query:
- subsets
- openpype/hosts/maya/plugins/publish/validate_node_ids_in_database.py
Query:
- assets
- openpype/hosts/maya/plugins/publish/validate_node_ids_related.py
Query:
- asset
- openpype/hosts/maya/plugins/publish/validate_renderlayer_aovs.py
Query:
- asset
- subset
### Nuke
- openpype/hosts/nuke/api/command.py
Query:
- project
- asset
- openpype/hosts/nuke/api/lib.py
Query:
- project
- asset
- version
- versions
- representation
- openpype/hosts/nuke/plugins/load/load_backdrop.py
Query:
- version
- versions
- openpype/hosts/nuke/plugins/load/load_camera_abc.py
Query:
- version
- versions
- openpype/hosts/nuke/plugins/load/load_clip.py
Query:
- version
- versions
- openpype/hosts/nuke/plugins/load/load_effects_ip.py
Query:
- version
- versions
- openpype/hosts/nuke/plugins/load/load_effects.py
Query:
- version
- versions
- openpype/hosts/nuke/plugins/load/load_gizmo_ip.py
Query:
- version
- versions
- openpype/hosts/nuke/plugins/load/load_gizmo.py
Query:
- version
- versions
- openpype/hosts/nuke/plugins/load/load_image.py
Query:
- version
- versions
- openpype/hosts/nuke/plugins/load/load_model.py
Query:
- version
- versions
- openpype/hosts/nuke/plugins/load/load_script_precomp.py
Query:
- version
- versions
- openpype/hosts/nuke/plugins/publish/collect_reads.py
Query:
- asset
- openpype/hosts/nuke/plugins/publish/precollect_instances.py
Query:
- asset
- openpype/hosts/nuke/plugins/publish/precollect_writes.py
Query:
- representation
- openpype/hosts/nuke/plugins/publish/validate_script.py
Query:
- asset
- project
### Photoshop
- openpype/hosts/photoshop/plugins/create/workfile_creator.py
Query:
- asset
### Resolve
- openpype/hosts/resolve/plugins/load/load_clip.py
Query:
- version
- versions
### Standalone publisher
- openpype/hosts/standalonepublisher/plugins/publish/collect_bulk_mov_instances.py
Query:
- asset
- openpype/hosts/standalonepublisher/plugins/publish/collect_matching_asset.py
Query:
- assets
- openpype/hosts/standalonepublisher/plugins/publish/collect_hierarchy.py
Query:
- project
- asset
- openpype/hosts/standalonepublisher/plugins/publish/validate_task_existence.py
Query:
- assets
### TVPaint
- openpype/hosts/tvpaint/api/pipeline.py
Query:
- project
- asset
- openpype/hosts/tvpaint/plugins/load/load_workfile.py
Query:
- project
- asset
- openpype/hosts/tvpaint/plugins/publish/collect_instances.py
Query:
- asset
- openpype/hosts/tvpaint/plugins/publish/collect_scene_render.py
Query:
- asset
- openpype/hosts/tvpaint/plugins/publish/collect_workfile.py
Query:
- asset
### Unreal
- openpype/hosts/unreal/plugins/load/load_camera.py
Query:
- asset
- assets
- openpype/hosts/unreal/plugins/load/load_layout.py
Query:
- asset
- assets
- openpype/hosts/unreal/plugins/publish/extract_layout.py
Query:
- representation
### Webpublisher
- openpype/hosts/webpublisher/webserver_service/webpublish_routes.py
Query:
- assets
- openpype/hosts/webpublisher/plugins/publish/collect_published_files.py
Query:
- last versions
## Tools
openpype/tools/assetlinks/widgets.py
- SimpleLinkView
Query:
- get_versions
- get_subsets
- get_assets
- get_output_link_versions
openpype/tools/creator/window.py
- CreatorWindow
Query:
- get_asset_by_name
- get_subsets
openpype/tools/launcher/models.py
- LauncherModel
Query:
- get_project
- get_assets
openpype/tools/libraryloader/app.py
- LibraryLoaderWindow
Query:
- get_project
openpype/tools/loader/app.py
- LoaderWindow
Query:
- get_project
- show
Query:
- get_projects
openpype/tools/loader/model.py
- SubsetsModel
Query:
- get_assets
- get_subsets
- get_last_versions
- get_versions
- get_hero_versions
- get_version_by_name
- RepresentationModel
Query:
- get_representations
- sync server specific queries (separated into multiple functions?)
- NOT REPLACED
openpype/tools/loader/widgets.py
- FamilyModel
Query:
- get_subset_families
- VersionTextEdit
Query:
- get_subset_by_id
- get_version_by_id
- SubsetWidget
Query:
- get_subsets
- get_representations
Update:
- Subset groups (combination of asset id and subset names)
- RepresentationWidget
Query:
- get_subsets
- get_versions
- get_representations
- ThumbnailWidget
Query:
- get_thumbnail_id_from_source
- get_thumbnail
openpype/tools/mayalookassigner/app.py
- MayaLookAssignerWindow
Query:
- get_last_version_by_subset_id
openpype/tools/mayalookassigner/commands.py
- create_items_from_nodes
Query:
- get_asset_by_id
openpype/tools/mayalookassigner/vray_proxies.py
- get_look_relationships
Query:
- get_representation_by_name
- load_look
Query:
- get_representation_by_name
- vrayproxy_assign_look
Query:
- get_last_version_by_subset_name
openpype/tools/project_manager/project_manager/model.py
- HierarchyModel
Query:
- get_asset_ids_with_subsets
- get_project
- get_assets
openpype/tools/project_manager/project_manager/view.py
- ProjectDocCache
Query:
- get_project
openpype/tools/project_manager/project_manager/widgets.py
- CreateProjectDialog
Query:
- get_projects
openpype/tools/publisher/widgets/create_dialog.py
- CreateDialog
Query:
- get_asset_by_name
- get_subsets
openpype/tools/publisher/control.py
- AssetDocsCache
| |
in range(5)
for k in [np.arange(order, dtype=dtype), np.ones(1, dtype), None]))
def testPolyInt(self, a_shape, order, k, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1: np.polyint(arg1, m=order, k=k)
jnp_fun = lambda arg1: jnp.polyint(arg1, m=order, k=k)
args_maker = lambda: [rng(a_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_order={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
order),
"dtype": dtype, "a_shape": a_shape, "order" : order}
for dtype in default_dtypes
for a_shape in one_dim_array_shapes
for order in range(5)))
def testPolyDer(self, a_shape, order, dtype):
rng = jtu.rand_default(self.rng())
np_fun = lambda arg1: np.polyder(arg1, m=order)
jnp_fun = lambda arg1: jnp.polyder(arg1, m=order)
args_maker = lambda: [rng(a_shape, dtype)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(jnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ptype={}".format(ptype), "ptype": ptype}
for ptype in ['int', 'np.int', 'jnp.int']))
def testIntegerPower(self, ptype):
p = {'int': 2, 'np.int': np.int32(2), 'jnp.int': jnp.int32(2)}[ptype]
jaxpr = api.make_jaxpr(partial(jnp.power, x2=p))(1)
eqns = jaxpr.jaxpr.eqns
self.assertLen(eqns, 1)
self.assertEqual(eqns[0].primitive, lax.integer_pow_p)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_x={}_y={}".format(x, y), "x": x, "y": y}
for x in [-1, 0, 1]
for y in [0, 32, 64, 128]))
def testIntegerPowerOverflow(self, x, y):
# Regression test for https://github.com/google/jax/issues/5987
args_maker = lambda: [x, y]
self._CheckAgainstNumpy(np.power, jnp.power, args_maker)
self._CompileAndCheck(jnp.power, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes
for dtype in all_dtypes
for axis in [None] + list(range(len(shape)))))
def testCompress(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
if shape in scalar_shapes or len(shape) == 0:
cond_shape = (0,)
elif axis is None:
cond_shape = (prod(shape),)
else:
cond_shape = (shape[axis],)
args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]
np_fun = partial(np.compress, axis=axis)
jnp_fun = partial(jnp.compress, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_condition=array[{}]_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), len(condition), axis),
"shape": shape, "dtype": dtype, "condition": condition, "axis": axis}
for shape in [(2, 3)]
for dtype in int_dtypes
# condition entries beyond axis size must be zero.
for condition in [[1], [1, 0, 0, 0, 0, 0, 0]]
for axis in [None, 0, 1]))
def testCompressMismatchedShapes(self, shape, dtype, condition, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [np.array(condition), rng(shape, dtype)]
np_fun = partial(np.compress, axis=axis)
jnp_fun = partial(jnp.compress, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(len(shape)))))
def testCompressMethod(self, shape, dtype, axis):
rng = jtu.rand_some_zero(self.rng())
if shape in scalar_shapes or len(shape) == 0:
cond_shape = (0,)
elif axis is None:
cond_shape = (prod(shape),)
else:
cond_shape = (shape[axis],)
args_maker = lambda: [rng(cond_shape, jnp.float32), rng(shape, dtype)]
np_fun = lambda condition, x: np.compress(condition, x, axis=axis)
jnp_fun = lambda condition, x: x.compress(condition, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(np.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes}
for num_arrs in [3]
for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, arg_dtypes):
rng = jtu.rand_default(self.rng())
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def np_fun(*args):
args = [x if x.dtype != jnp.bfloat16 else x.astype(np.float32)
for x in args]
dtype = functools.reduce(jnp.promote_types, arg_dtypes)
return np.concatenate(args, axis=axis).astype(dtype)
jnp_fun = lambda *args: jnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in [(4, 1), (4, 3), (4, 5, 6)]
for dtype in all_dtypes
for axis in [None] + list(range(1 - len(shape), len(shape) - 1))))
def testConcatenateArray(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda x: np.concatenate(x, axis=axis)
jnp_fun = lambda x: jnp.concatenate(x, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
def testConcatenateAxisNone(self):
# https://github.com/google/jax/issues/3419
a = jnp.array([[1, 2], [3, 4]])
b = jnp.array([[5]])
jnp.concatenate((a, b), axis=None)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(np.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes}
for arg_dtypes in itertools.combinations_with_replacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, arg_dtypes):
rng = jtu.rand_default(self.rng())
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def np_fun(arr, values):
arr = arr.astype(np.float32) if arr.dtype == jnp.bfloat16 else arr
values = (values.astype(np.float32) if values.dtype == jnp.bfloat16
else values)
out = np.append(arr, values, axis=axis)
return out.astype(jnp.promote_types(*arg_dtypes))
jnp_fun = lambda arr, values: jnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_idx={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idx),
"dtype": dtype, "shape": shape, "axis": axis, "idx": idx}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for idx in (range(-prod(shape), prod(shape))
if axis is None else
range(-shape[axis], shape[axis]))))
def testDeleteInteger(self, shape, dtype, idx, axis):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, idx, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_slc={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, slc),
"dtype": dtype, "shape": shape, "axis": axis, "slc": slc}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for slc in [slice(None), slice(1, 3), slice(1, 5, 2)]))
def testDeleteSlice(self, shape, dtype, axis, slc):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, slc, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, slc, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_idx={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis,
jtu.format_shape_dtype_string(idx_shape, int)),
"dtype": dtype, "shape": shape, "axis": axis, "idx_shape": idx_shape}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))
for idx_shape in all_shapes))
def testDeleteIndexArray(self, shape, dtype, axis, idx_shape):
rng = jtu.rand_default(self.rng())
max_idx = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
# Previous to numpy 1.19, negative indices were ignored so we don't test this.
low = 0 if numpy_version < (1, 19, 0) else -max_idx
idx = jtu.rand_int(self.rng(), low=low, high=max_idx)(idx_shape, int)
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, idx, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, idx, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@unittest.skipIf(numpy_version < (1, 19), "boolean mask not supported in numpy < 1.19.0")
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"dtype": dtype, "shape": shape, "axis": axis}
for shape in nonempty_nonscalar_array_shapes
for dtype in all_dtypes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testDeleteMaskArray(self, shape, dtype, axis):
rng = jtu.rand_default(self.rng())
mask_size = np.zeros(shape).size if axis is None else np.zeros(shape).shape[axis]
mask = jtu.rand_int(self.rng(), low=0, high=2)(mask_size, bool)
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arg: np.delete(arg, mask, axis=axis)
jnp_fun = lambda arg: jnp.delete(arg, mask, axis=axis)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_out_dims={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, out_dims),
"shape": shape, "dtype": dtype, "axis": axis, "out_dims": out_dims}
for shape in nonempty_array_shapes
for dtype in default_dtypes
for axis in range(-len(shape), len(shape))
for out_dims in [0, 1, 2]))
def testApplyAlongAxis(self, shape, dtype, axis, out_dims):
def func(x, out_dims):
if out_dims == 0:
return x.sum()
elif out_dims == 1:
return x * x[0]
elif out_dims == 2:
return x[:, None] + x[None, :]
else:
raise NotImplementedError(f"out_dims={out_dims}")
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
np_fun = lambda arr: np.apply_along_axis(func, axis, arr, out_dims=out_dims)
jnp_fun = lambda arr: jnp.apply_along_axis(func, axis, arr, out_dims=out_dims)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_func={}_keepdims={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype),
func, keepdims, axes),
"shape": shape, "dtype": dtype, "func": func, "keepdims": keepdims, "axes": axes}
for shape in nonempty_shapes
for func in ["sum"]
for keepdims in [True, False]
for axes in itertools.combinations(range(len(shape)), 2)
# Avoid low-precision types in sum()
for dtype in default_dtypes if dtype not in [np.float16, jnp.bfloat16]))
def testApplyOverAxes(self, shape, dtype, func, keepdims, axes):
f = lambda x, axis: getattr(x, func)(axis=axis, keepdims=keepdims)
rng = jtu.rand_default(self.rng())
args_maker = lambda: (rng(shape, dtype),)
np_fun = lambda a: np.apply_over_axes(f, a, axes)
jnp_fun = lambda a: jnp.apply_over_axes(f, a, axes)
self._CompileAndCheck(jnp_fun, args_maker)
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}_fixed_size={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis, repeats, fixed_size),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
'fixed_size': fixed_size}
for repeats in [0, 1, | |
: lambda x : x * 2.23693629,
'knot2' : lambda x : x * 1.94384449,
'km_per_hour2' : lambda x : x * 3.6},
'cm_per_hour' : {'inch_per_hour' : lambda x : x * 0.393700787,
'mm_per_hour' : lambda x : x * 10.0},
'mm_per_hour' : {'inch_per_hour' : lambda x : x * .0393700787,
'cm_per_hour' : lambda x : x * 0.10},
'cm' : {'inch' : lambda x : x / CM_PER_INCH,
'mm' : lambda x : x * 10.0},
'mm' : {'inch' : lambda x : x / MM_PER_INCH,
'cm' : lambda x : x * 0.10},
'meter' : {'foot' : lambda x : x / METER_PER_FOOT,
'km' : lambda x : x / 1000.0},
'dublin_jd' : {'unix_epoch' : lambda x : (x-25567.5) * 86400.0},
'unix_epoch' : {'dublin_jd' : lambda x : x/86400.0 + 25567.5},
'second' : {'hour' : lambda x : x/3600.0,
'minute' : lambda x : x/60.0,
'day' : lambda x : x/86400.0},
'minute' : {'second' : lambda x : x*60.0,
'hour' : lambda x : x/60.0,
'day' : lambda x : x/1440.0},
'hour' : {'second' : lambda x : x*3600.0,
'minute' : lambda x : x*60.0,
'day' : lambda x : x/24.0},
'day' : {'second' : lambda x : x*86400.0,
'minute' : lambda x : x*1440.0,
'hour' : lambda x : x*24.0},
'gallon' : {'liter' : lambda x : x * 3.78541,
'litre' : lambda x : x * 3.78541,
'cubic_foot' : lambda x : x * 0.133681},
'liter' : {'gallon' : lambda x : x * 0.264172,
'cubic_foot' : lambda x : x * 0.0353147},
'cubic_foot' : {'gallon' : lambda x : x * 7.48052,
'litre' : lambda x : x * 28.3168,
'liter' : lambda x : x * 28.3168},
'bit' : {'byte' : lambda x : x / 8},
'byte' : {'bit' : lambda x : x * 8},
'km' : {'meter' : lambda x : x * 1000.0,
'mile' : lambda x : x * 0.621371192},
'mile' : {'km' : lambda x : x * 1.609344},
'watt' : {'kilowatt' : lambda x : x / 1000.0},
'kilowatt' : {'watt' : lambda x : x * 1000.0},
'watt_second' : {'kilowatt_hour' : lambda x : x / 3.6e6,
'watt_hour' : lambda x : x / 3600.0},
'watt_hour' : {'kilowatt_hour' : lambda x : x / 1000.0,
'watt_second' : lambda x : x * 3600.0},
'kilowatt_hour' : {'watt_second' : lambda x : x * 3.6e6,
'watt_hour' : lambda x : x * 1000.0},
}
# Default unit formatting when nothing specified in skin configuration file
default_unit_format_dict = {
"amp" : "%.1f",
"bit" : "%.0f",
"byte" : "%.0f",
"centibar" : "%.0f",
"cm" : "%.2f",
"cm_per_hour" : "%.2f",
"count" : "%d",
"cubic_foot" : "%.1f",
"day" : "%.1f",
"degree_C" : "%.1f",
"degree_C_day" : "%.1f",
"degree_E" : "%.1f",
"degree_F" : "%.1f",
"degree_F_day" : "%.1f",
"degree_compass" : "%.0f",
"foot" : "%.0f",
"gallon" : "%.1f",
"hPa" : "%.1f",
"hPa_per_hour" : "%.3f",
"hour" : "%.1f",
"inHg" : "%.3f",
"inHg_per_hour" : "%.5f",
"inch" : "%.2f",
"inch_per_hour" : "%.2f",
"kilowatt_hour" : "%.1f",
"km" : "%.1f",
"km_per_hour" : "%.0f",
"km_per_hour2" : "%.1f",
"knot" : "%.0f",
"knot2" : "%.1f",
"liter" : "%.1f",
"litre" : "%.1f",
"mbar" : "%.1f",
"mbar_per_hour" : "%.4f",
"meter" : "%.0f",
"meter_per_second" : "%.0f",
"meter_per_second2" : "%.1f",
"microgram_per_meter_cubed": "%.3f",
"mile" : "%.1f",
"mile_per_hour" : "%.0f",
"mile_per_hour2" : "%.1f",
"mm" : "%.1f",
"mmHg" : "%.1f",
"mmHg_per_hour" : "%.4f",
"mm_per_hour" : "%.1f",
"percent" : "%.0f",
"second" : "%.0f",
"uv_index" : "%.1f",
"volt" : "%.1f",
"watt" : "%.1f",
"watt_second" : "%.0f",
"watt_hour" : "%.1f",
"watt_per_meter_squared" : "%.0f",
"NONE" : u" N/A"
}
# Default unit labels to be used in the absence of a skin configuration file
default_unit_label_dict = {
"amp" : u" amp",
"bit" : u" b",
"byte" : u" B",
"centibar" : u" cb",
"cm" : u" cm",
"cm_per_hour" : u" cm/h",
"cubic_foot" : u" ft³",
"day" : (u" day", u" days"),
"degree_C" : u"°C",
"degree_C_day" : u"°C-day",
"degree_E" : u"°E",
"degree_F" : u"°F",
"degree_F_day" : u"°F-day",
"degree_compass" : u"°",
"foot" : u" feet",
"gallon" : u" gal",
"hPa" : u" hPa",
"hPa_per_hour" : u" hPa/h",
"inHg" : u" inHg",
"inHg_per_hour" : u" inHg/h",
"hour" : (u" hour", u" hours"),
"inch" : u" in",
"inch_per_hour" : u" in/h",
"kilowatt_hour" : u" kWh",
"km" : u" km",
"km_per_hour" : u" kph",
"km_per_hour2" : u" kph",
"knot" : u" knots",
"knot2" : u" knots",
"liter" : u" l",
"litre" : u" l",
"mbar" : u" mbar",
"mbar_per_hour" : u" mbar/h",
"meter" : u" meters",
"meter_per_second" : u" m/s",
"meter_per_second2" : u" m/s",
"microgram_per_meter_cubed": u"µg/m³",
"mile" : u" mile",
"mile_per_hour" : u" mph",
"mile_per_hour2" : u" mph",
"minute" : (u" minute", u" minutes"),
"mm" : u" mm",
"mmHg" : u" mmHg",
"mm_per_hour" : u" mm/h",
"mmHg_per_hour" : u" mmHg/h",
"percent" : u"%",
"second" : (u" second", u" seconds"),
"uv_index" : u"",
"volt" : u" V",
"watt" : u" W",
"watt_second" : u" Ws",
"watt_hour" : u" Wh",
"watt_per_meter_squared" : u" W/m²",
"NONE" : u""
}
# Default strftime formatting to be used in the absence of a skin
# configuration file. The entry for delta_time uses a special
# encoding.
default_time_format_dict = {
"day" : "%H:%M",
"week" : "%H:%M on %A",
"month" : "%d-%b-%Y %H:%M",
"year" : "%d-%b-%Y %H:%M",
"rainyear" : "%d-%b-%Y %H:%M",
"current" : "%d-%b-%Y %H:%M",
"ephem_day" : "%H:%M",
"ephem_year" : "%d-%b-%Y %H:%M",
"delta_time" : "%(day)d%(day_label)s, %(hour)d%(hour_label)s, "
"%(minute)d%(minute_label)s"
}
# Default mapping from compass degrees to ordinals
default_ordinate_names = [
'N', 'NNE','NE', 'ENE', 'E', 'ESE', 'SE', 'SSE',
'S', 'SSW','SW', 'WSW', 'W', 'WNW', 'NW', 'NNW',
'N/A'
]
#==============================================================================
# class ValueTuple
#==============================================================================
# A value, along with the unit it is in, can be represented by a 3-way tuple
# called a value tuple. All weewx routines can accept a simple unadorned
# 3-way tuple as a value tuple, but they return the type ValueTuple. It is
# useful because its contents can be accessed using named attributes.
#
# Item attribute Meaning
# 0 value The datum value (eg, 20.2)
# 1 unit The unit it is in ("degree_C")
# 2 group The unit group ("group_temperature")
#
# It is valid to have a datum value of None.
#
# It is also valid to have a unit type of None (meaning there is no information
# about the unit the value is in). In this case, you won't be able to convert
# it to another unit.
class ValueTuple(tuple):
def __new__(cls, *args):
return tuple.__new__(cls, args)
@property
def value(self):
return self[0]
@property
def unit(self):
return self[1]
@property
def group(self):
return self[2]
# ValueTuples have some modest math abilities: subtraction and addition.
def __sub__(self, other):
if self[1] != other[1] or self[2] != other[2]:
raise TypeError("Unsupported operand error for subtraction: %s and %s"
% (self[1], other[1]))
return ValueTuple(self[0] - other[0], self[1], self[2])
def __add__(self, other):
if self[1] != other[1] or self[2] != other[2]:
raise TypeError("Unsupported operand error for addition: %s and %s"
% (self[1], other[1]))
return ValueTuple(self[0] + other[0], self[1], self[2])
#==============================================================================
# class Formatter
#==============================================================================
class Formatter(object):
"""Holds formatting information for the various unit types.
Examples (using the default formatters):
>>> import os
>>> os.environ['TZ'] = 'America/Los_Angeles'
>>> time.tzset()
>>> f = Formatter()
>>> print(f.toString((20.0, "degree_C", "group_temperature")))
20.0°C
>>> print(f.toString((83.2, "degree_F", "group_temperature")))
83.2°F
>>> # Try the Spanish locale, which will use comma decimal separators.
>>> # For this to work, the Spanish locale must have been installed.
>>> # You can do this with the command:
>>> # sudo locale-gen es_ES.UTF-8 && sudo update-locale
>>> x = locale.setlocale(locale.LC_NUMERIC, 'es_ES.utf-8')
>>> print(f.toString((83.2, "degree_F", "group_temperature"), localize=True))
83,2°F
>>> # Try it again, but overriding the localization:
>>> print(f.toString((83.2, "degree_F", "group_temperature"), localize=False))
83.2°F
>>> # Set locale back to default
>>> x = locale.setlocale(locale.LC_NUMERIC, '')
>>> print(f.toString((123456789, "unix_epoch", "group_time")))
29-Nov-1973 13:33
>>> print(f.to_ordinal_compass((5.0, "degree_compass", "group_direction")))
N
>>> print(f.to_ordinal_compass((0.0, "degree_compass", "group_direction")))
N
>>> print(f.to_ordinal_compass((12.5, "degree_compass", "group_direction")))
NNE
>>> print(f.to_ordinal_compass((360.0, "degree_compass", "group_direction")))
N
>>> print(f.to_ordinal_compass((None, "degree_compass", "group_direction")))
N/A
>>> print(f.toString((1*86400 + 1*3600 + 16*60 + 42, "second", "group_deltatime")))
1 day, 1 hour, 16 minutes
>>> delta_format = "%(day)d%(day_label)s, %(hour)d%(hour_label)s, "\
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.