input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
in runs_where_y_has_same_sign_as_yaxis]
elif self.xaxis_mode == 'log':
for curve_name in self.curves.keys():
if self.curves[curve_name].yaxis == yaxis:
where_xy_have_same_sign_as_axes = np.where(np.logical_and(self.curves[curve_name].data_x * self.xaxis_sign > 0., self.curves[curve_name].data_y * self.yaxes[yaxis].yaxis_sign > 0.))[0]
runs_where_xy_have_same_sign_as_axes = np.split(where_xy_have_same_sign_as_axes, np.where(np.diff(where_xy_have_same_sign_as_axes) != 1)[0] + 1)
self.curves[curve_name].points_x = [self.xaxis_sign * np.log10(self.xaxis_sign * self.curves[curve_name].data_x[run]) for run in runs_where_xy_have_same_sign_as_axes]
self.curves[curve_name].points_y = [self.yaxes[yaxis].yaxis_sign * np.log10(self.yaxes[yaxis].yaxis_sign * self.curves[curve_name].data_y[run]) for run in runs_where_xy_have_same_sign_as_axes]
if args[0][0] < args[0][1]:
self.yaxes[yaxis].ylim[0] = self.yaxes[yaxis].yaxis_sign * math.log10(self.yaxes[yaxis].yaxis_sign * args[0][0])
self.yaxes[yaxis].ylim[1] = self.yaxes[yaxis].yaxis_sign * math.log10(self.yaxes[yaxis].yaxis_sign * args[0][1])
else:
self.yaxes[yaxis].ylim[0] = self.yaxes[yaxis].yaxis_sign * math.log10(self.yaxes[yaxis].yaxis_sign * args[0][1])
self.yaxes[yaxis].ylim[1] = self.yaxes[yaxis].yaxis_sign * math.log10(self.yaxes[yaxis].yaxis_sign * args[0][0])
elif len(args[0]) < 2:
raise IndexError('did not specify both a lower and an upper limit for the y-axis')
else:
raise IndexError('more than two limits were specified for the y-axis')
else:
raise ValueError("invalid y-limits specification; it must be 'auto', 'tight', or a list of limits")
self.refresh_plot()
def svg(self, filename):
self.svg_backend()
self.svg_file = codecs.open(filename, encoding = 'utf-8', mode = 'w')
self.svg_file.write(u'<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="{width!s}px" height="{height!s}px" viewBox="0 0 {width!s} {height!s}">\n'.format(width = self.canvas_width, height = self.canvas_height))
self.begin_group()
self.draw_plot()
self.end_group()
self.svg_file.write(u'</svg>\n')
self.svg_file.close()
self.svg_file = None
self.tk_backend()
def zoom_to_fit(self, **kwargs):
mode = kwargs.get('mode', 'auto')
if mode not in ('auto', 'tight'):
raise ValueError("if specified, mode must be 'auto' or 'tight'")
yaxis = kwargs.get('yaxis', 'all')
if (yaxis != 'all') and (yaxis not in self.yaxes.keys()):
raise ValueError('specified y-axis does not exist')
self.xlimits_mode = mode
if yaxis == 'all':
for yaxis in self.yaxes.keys():
self.yaxes[yaxis].ylimits_mode = mode
else:
self.yaxes[yaxis].ylimits_mode = mode
self.refresh_plot()
def zoom_in(self, **kwargs):
factor = kwargs.get('factor', math.sqrt(2))
yaxis = kwargs.get('yaxis', 'all')
if (yaxis != 'all') and (yaxis not in self.yaxes.keys()):
raise ValueError('specified y-axis does not exist')
cx = kwargs.get('cx', 0.5 * (self.axes_left + self.axes_right))
cy = kwargs.get('cy', 0.5 * (self.axes_top + self.axes_bottom))
x = self.from_canvas_x(cx)
self.xlimits_mode = 'manual'
self.xlim[0] = x - 0.5 * self.xrange / factor
self.xlim[1] = x + 0.5 * self.xrange / factor
if yaxis == 'all':
for yaxis in self.yaxes.keys():
y = self.from_canvas_y(cy, yaxis)
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] = y - 0.5 * self.yaxes[yaxis].yrange / factor
self.yaxes[yaxis].ylim[1] = y + 0.5 * self.yaxes[yaxis].yrange / factor
else:
y = self.from_canvas_y(cy, yaxis)
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] = y - 0.5 * self.yaxes[yaxis].yrange / factor
self.yaxes[yaxis].ylim[1] = y + 0.5 * self.yaxes[yaxis].yrange / factor
self.refresh_plot()
def zoom_in_x(self, **kwargs):
factor = kwargs.get('factor', math.sqrt(2))
cx = kwargs.get('cx', 0.5 * (self.axes_left + self.axes_right))
x = self.from_canvas_x(cx)
self.xlimits_mode = 'manual'
self.xlim[0] = x - 0.5 * self.xrange / factor
self.xlim[1] = x + 0.5 * self.xrange / factor
self.refresh_plot()
def zoom_in_y(self, **kwargs):
factor = kwargs.get('factor', math.sqrt(2))
yaxis = kwargs.get('yaxis', 'all')
if (yaxis != 'all') and (yaxis not in self.yaxes.keys()):
raise ValueError('specified y-axis does not exist')
cy = kwargs.get('cy', 0.5 * (self.axes_top + self.axes_bottom))
if yaxis == 'all':
for yaxis in self.yaxes.keys():
y = self.from_canvas_y(cy, yaxis)
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] = y - 0.5 * self.yaxes[yaxis].yrange / factor
self.yaxes[yaxis].ylim[1] = y + 0.5 * self.yaxes[yaxis].yrange / factor
else:
y = self.from_canvas_y(cy, yaxis)
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] = y - 0.5 * self.yaxes[yaxis].yrange / factor
self.yaxes[yaxis].ylim[1] = y + 0.5 * self.yaxes[yaxis].yrange / factor
self.refresh_plot()
def zoom_out(self, **kwargs):
factor = kwargs.get('factor', math.sqrt(2))
yaxis = kwargs.get('yaxis', 'all')
if (yaxis != 'all') and (yaxis not in self.yaxes.keys()):
raise ValueError('specified y-axis does not exist')
cx = kwargs.get('cx', 0.5 * (self.axes_left + self.axes_right))
cy = kwargs.get('cy', 0.5 * (self.axes_top + self.axes_bottom))
x = self.from_canvas_x(cx)
self.xlimits_mode = 'manual'
self.xlim[0] = x - 0.5 * self.xrange * factor
self.xlim[1] = x + 0.5 * self.xrange * factor
if yaxis == 'all':
for yaxis in self.yaxes.keys():
y = self.from_canvas_y(cy, yaxis)
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] = y - 0.5 * self.yaxes[yaxis].yrange * factor
self.yaxes[yaxis].ylim[1] = y + 0.5 * self.yaxes[yaxis].yrange * factor
else:
y = self.from_canvas_y(cy, yaxis)
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] = y - 0.5 * self.yaxes[yaxis].yrange * factor
self.yaxes[yaxis].ylim[1] = y + 0.5 * self.yaxes[yaxis].yrange * factor
self.refresh_plot()
def zoom_out_x(self, **kwargs):
factor = kwargs.get('factor', math.sqrt(2))
cx = kwargs.get('cx', 0.5 * (self.axes_left + self.axes_right))
x = self.from_canvas_x(cx)
self.xlimits_mode = 'manual'
self.xlim[0] = x - 0.5 * self.xrange * factor
self.xlim[1] = x + 0.5 * self.xrange * factor
self.refresh_plot()
def zoom_out_y(self, **kwargs):
factor = kwargs.get('factor', math.sqrt(2))
yaxis = kwargs.get('yaxis', 'all')
if (yaxis != 'all') and (yaxis not in self.yaxes.keys()):
raise ValueError('specified y-axis does not exist')
cy = kwargs.get('cy', 0.5 * (self.axes_top + self.axes_bottom))
if yaxis == 'all':
for yaxis in self.yaxes.keys():
y = self.from_canvas_y(cy, yaxis)
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] = y - 0.5 * self.yaxes[yaxis].yrange * factor
self.yaxes[yaxis].ylim[1] = y + 0.5 * self.yaxes[yaxis].yrange * factor
else:
y = self.from_canvas_y(cy, yaxis)
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] = y - 0.5 * self.yaxes[yaxis].yrange * factor
self.yaxes[yaxis].ylim[1] = y + 0.5 * self.yaxes[yaxis].yrange * factor
self.refresh_plot()
def zoom_rect(self, *args, **kwargs):
left = kwargs.get('left', self.axes_left)
right = kwargs.get('right', self.axes_right)
top = kwargs.get('top', self.axes_top)
bottom = kwargs.get('bottom', self.axes_bottom)
yaxis = kwargs.get('yaxis', 'all')
if (yaxis != 'all') and (yaxis not in self.yaxes.keys()):
raise ValueError('specified y-axis does not exist')
if len(args) == 1:
if (type(args[0]) is list) and (len(args[0]) == 4):
left = float(args[0][0])
right = float(args[0][2])
top = float(args[0][1])
bottom = float(args[0][3])
else:
raise ValueError('if specified, the optional argument must be a four-element list specifying the left, top, right, and bottom coordinates of the zoom rectangle')
elif len(args) > 1:
raise IndexError('too many arguments supplied to zoom_rect')
if (left < right) and (top < bottom):
self.xlimits_mode = 'manual'
self.xlim[0], self.xlim[1] = self.from_canvas_x(left), self.from_canvas_x(right)
if yaxis == 'all':
for yaxis in self.yaxes.keys():
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0], self.yaxes[yaxis].ylim[1] = self.from_canvas_y(bottom, yaxis), self.from_canvas_y(top, yaxis)
else:
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0], self.yaxes[yaxis].ylim[1] = self.from_canvas_y(bottom, yaxis), self.from_canvas_y(top, yaxis)
self.refresh_plot()
def pan_left(self, **kwargs):
fraction = kwargs.get('fraction', 0.1)
self.xlimits_mode = 'manual'
self.xlim[0] -= fraction * self.xrange
self.xlim[1] -= fraction * self.xrange
self.refresh_plot()
def pan_right(self, **kwargs):
fraction = kwargs.get('fraction', 0.1)
self.xlimits_mode = 'manual'
self.xlim[0] += fraction * self.xrange
self.xlim[1] += fraction * self.xrange
self.refresh_plot()
def pan_up(self, **kwargs):
fraction = kwargs.get('fraction', 0.1)
yaxis = kwargs.get('yaxis', 'all')
if (yaxis != 'all') and (yaxis not in self.yaxes.keys()):
raise ValueError('specified y-axis does not exist')
if yaxis == 'all':
for yaxis in self.yaxes.keys():
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] += fraction * self.yaxes[yaxis].yrange
self.yaxes[yaxis].ylim[1] += fraction * self.yaxes[yaxis].yrange
else:
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] += fraction * self.yaxes[yaxis].yrange
self.yaxes[yaxis].ylim[1] += fraction * self.yaxes[yaxis].yrange
self.refresh_plot()
def pan_down(self, **kwargs):
fraction = kwargs.get('fraction', 0.1)
yaxis = kwargs.get('yaxis', 'all')
if (yaxis != 'all') and (yaxis not in self.yaxes.keys()):
raise ValueError('specified y-axis does not exist')
if yaxis == 'all':
for yaxis in self.yaxes.keys():
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] -= fraction * self.yaxes[yaxis].yrange
self.yaxes[yaxis].ylim[1] -= fraction * self.yaxes[yaxis].yrange
else:
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] -= fraction * self.yaxes[yaxis].yrange
self.yaxes[yaxis].ylim[1] -= fraction * self.yaxes[yaxis].yrange
self.refresh_plot()
def pan(self, **kwargs):
dx = kwargs.get('dx', 0.)
dy = kwargs.get('dy', 0.)
yaxis = kwargs.get('yaxis', 'all')
if (yaxis != 'all') and (yaxis not in self.yaxes.keys()):
raise ValueError('specified y-axis does not exist')
if (dx != 0.) or (dy != 0.):
self.xlimits_mode = 'manual'
self.xlim[0] -= dx * self.x_epsilon
self.xlim[1] -= dx * self.x_epsilon
if yaxis == 'all':
for yaxis in self.yaxes.keys():
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] += dy * self.yaxes[yaxis].y_epsilon
self.yaxes[yaxis].ylim[1] += dy * self.yaxes[yaxis].y_epsilon
else:
self.yaxes[yaxis].ylimits_mode = 'manual'
self.yaxes[yaxis].ylim[0] += dy * self.yaxes[yaxis].y_epsilon
self.yaxes[yaxis].ylim[1] += dy * self.yaxes[yaxis].y_epsilon
self.refresh_plot()
def delete_curve(self, name):
if name in self.curves:
del(self.curves[name])
else:
raise NameError('no curve exists with name = {0!r}'.format(name))
self.refresh_plot()
def configure_curve(self, name, **kwargs):
style = kwargs.get('style', '')
if name in self.curves:
marker_color = kwargs.get('marker_color', self.curves[name].marker_color)
marker = kwargs.get('marker', self.curves[name].marker)
curve_color = kwargs.get('curve_color', self.curves[name].curve_color)
curve_style = kwargs.get('curve_style', self.curves[name].curve_style)
if style == '':
self.curves[name].marker_color = marker_color
self.curves[name].marker = marker
self.curves[name].curve_color = curve_color
self.curves[name].curve_style = curve_style
else:
[self.curves[name].marker_color, self.curves[name].marker, self.curves[name].curve_color, self.curves[name].curve_style] = self.parse_style(style)
else:
raise NameError('no curve exists with name = {0!r}'.format(name))
self.refresh_plot()
def bindings(self):
self.key_bindings()
self.mouse_bindings()
def key_bindings(self):
self.canvas.bind('<Up>', lambda event: self.pan_up())
self.canvas.bind('<Down>', lambda event: self.pan_down())
self.canvas.bind('<Left>', lambda event: self.pan_left())
self.canvas.bind('<Right>', lambda event: self.pan_right())
self.canvas.bind('<Control-Up>', lambda event: self.pan_up(fraction = 0.5))
self.canvas.bind('<Control-Down>', lambda event: self.pan_down(fraction = 0.5))
self.canvas.bind('<Control-Left>', lambda event: self.pan_left(fraction = 0.5))
self.canvas.bind('<Control-Right>', lambda event: self.pan_right(fraction = 0.5))
self.canvas.bind('<Shift-Up>', lambda event: self.pan_up(fraction = 1. / self.axes_height))
| |
properties for the resource.
:type tags: dict[str, str]
:param kind: Required. The kind of the environment.Constant filled by server. Possible values
include: "Gen1", "Gen2".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EnvironmentKind
:param sku: Required. The sku determines the type of environment, either Gen1 (S1 or S2) or
Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the
ingress rate, and the billing rate.
:type sku: ~azure.mgmt.timeseriesinsights.models.Sku
"""
_validation = {
'location': {'required': True},
'kind': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
_subtype_map = {
'kind': {'Gen1': 'Gen1EnvironmentCreateOrUpdateParameters', 'Gen2': 'Gen2EnvironmentCreateOrUpdateParameters'}
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(EnvironmentCreateOrUpdateParameters, self).__init__(location=location, tags=tags, **kwargs)
self.kind = 'EnvironmentCreateOrUpdateParameters' # type: str
self.sku = sku
class EnvironmentListResponse(msrest.serialization.Model):
"""The response of the List Environments operation.
:param value: Result of the List Environments operation.
:type value: list[~azure.mgmt.timeseriesinsights.models.EnvironmentResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[EnvironmentResource]'},
}
def __init__(
self,
*,
value: Optional[List["EnvironmentResource"]] = None,
**kwargs
):
super(EnvironmentListResponse, self).__init__(**kwargs)
self.value = value
class TrackedResource(Resource):
"""Time Series Insights resource that is tracked by Azure Resource Manager.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.location = location
self.tags = tags
class EnvironmentResource(TrackedResource):
"""An environment is a set of time-series data available for query, and is the top level Azure Time Series Insights resource.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Gen1EnvironmentResource, Gen2EnvironmentResource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param sku: Required. The sku determines the type of environment, either Gen1 (S1 or S2) or
Gen2 (L1). For Gen1 environments the sku determines the capacity of the environment, the
ingress rate, and the billing rate.
:type sku: ~azure.mgmt.timeseriesinsights.models.Sku
:param kind: Required. The kind of the environment.Constant filled by server. Possible values
include: "Gen1", "Gen2".
:type kind: str or ~azure.mgmt.timeseriesinsights.models.EnvironmentResourceKind
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'kind': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'kind': {'key': 'kind', 'type': 'str'},
}
_subtype_map = {
'kind': {'Gen1': 'Gen1EnvironmentResource', 'Gen2': 'Gen2EnvironmentResource'}
}
def __init__(
self,
*,
location: str,
sku: "Sku",
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(EnvironmentResource, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.kind = 'EnvironmentResource' # type: str
class EnvironmentResourceProperties(ResourceProperties):
"""Properties of the environment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:ivar data_access_id: An id used to access the environment data, e.g. to query the
environment's events or upload reference data for the environment.
:vartype data_access_id: str
:ivar data_access_fqdn: The fully qualified domain name used to access the environment data,
e.g. to query the environment's events or upload reference data for the environment.
:vartype data_access_fqdn: str
:ivar status: An object that represents the status of the environment, and its internal state
in the Time Series Insights service.
:vartype status: ~azure.mgmt.timeseriesinsights.models.EnvironmentStatus
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'data_access_id': {'readonly': True},
'data_access_fqdn': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'data_access_id': {'key': 'dataAccessId', 'type': 'str'},
'data_access_fqdn': {'key': 'dataAccessFqdn', 'type': 'str'},
'status': {'key': 'status', 'type': 'EnvironmentStatus'},
}
def __init__(
self,
**kwargs
):
super(EnvironmentResourceProperties, self).__init__(**kwargs)
self.data_access_id = None
self.data_access_fqdn = None
self.status = None
class EnvironmentStateDetails(msrest.serialization.Model):
"""An object that contains the details about an environment's state.
:param code: Contains the code that represents the reason of an environment being in a
particular state. Can be used to programmatically handle specific cases.
:type code: str
:param message: A message that describes the state in detail.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(EnvironmentStateDetails, self).__init__(**kwargs)
self.code = code
self.message = message
class EnvironmentStatus(msrest.serialization.Model):
"""An object that represents the status of the environment, and its internal state in the Time Series Insights service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar ingress: An object that represents the status of ingress on an environment.
:vartype ingress: ~azure.mgmt.timeseriesinsights.models.IngressEnvironmentStatus
:ivar warm_storage: An object that represents the status of warm storage on an environment.
:vartype warm_storage: ~azure.mgmt.timeseriesinsights.models.WarmStorageEnvironmentStatus
"""
_validation = {
'ingress': {'readonly': True},
'warm_storage': {'readonly': True},
}
_attribute_map = {
'ingress': {'key': 'ingress', 'type': 'IngressEnvironmentStatus'},
'warm_storage': {'key': 'warmStorage', 'type': 'WarmStorageEnvironmentStatus'},
}
def __init__(
self,
**kwargs
):
super(EnvironmentStatus, self).__init__(**kwargs)
self.ingress = None
self.warm_storage = None
class EnvironmentUpdateParameters(msrest.serialization.Model):
"""Parameters supplied to the Update Environment operation.
:param tags: A set of tags. Key-value pairs of additional properties for the environment.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(EnvironmentUpdateParameters, self).__init__(**kwargs)
self.tags = tags
class EventHubEventSourceCommonProperties(AzureEventSourceProperties):
"""Properties of the EventHub event source.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Provisioning state of the resource. Possible values include:
"Accepted", "Creating", "Updating", "Succeeded", "Failed", "Deleting".
:vartype provisioning_state: str or ~azure.mgmt.timeseriesinsights.models.ProvisioningState
:ivar creation_time: The time the resource was created.
:vartype creation_time: ~datetime.datetime
:param timestamp_property_name: The event property that will be used as the event source's
timestamp. If a value isn't specified for timestampPropertyName, or if null or empty-string is
specified, the event creation time will be used.
:type timestamp_property_name: str
:param event_source_resource_id: Required. The resource id of the event source in Azure
Resource Manager.
:type event_source_resource_id: str
:param service_bus_namespace: Required. The name of the service bus that contains the event
hub.
:type service_bus_namespace: str
:param event_hub_name: Required. The name of the event hub.
:type event_hub_name: str
:param consumer_group_name: Required. The name of the event hub's consumer group that holds the
partitions from which events will be read.
:type consumer_group_name: str
:param key_name: Required. The name of the SAS key that grants the Time Series Insights service
access to the event hub. The shared access policies for this key must grant 'Listen'
permissions to the event hub.
:type key_name: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'creation_time': {'readonly': True},
'event_source_resource_id': {'required': True},
'service_bus_namespace': {'required': True},
'event_hub_name': {'required': True},
'consumer_group_name': {'required': True},
'key_name': {'required': | |
"""
<Program Name>
repository_lib.py
<Author>
<NAME> <<EMAIL>>
<Started>
June 1, 2014
<Copyright>
See LICENSE for licensing information.
<Purpose>
Provide a library for the repository tool that can create a TUF repository.
The repository tool can be used with the Python interpreter in interactive
mode, or imported directly into a Python module. See 'tuf/README' for the
complete guide to using 'tuf.repository_tool.py'.
"""
# Help with Python 3 compatibility, where the print statement is a function, an
# implicit relative import is invalid, and the '/' operator performs true
# division. Example: print 'hello world' raises a 'SyntaxError' exception.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import errno
import sys
import time
import datetime
import getpass
import logging
import tempfile
import shutil
import json
import gzip
import random
import tuf
import tuf.formats
import tuf.util
import tuf.keydb
import tuf.roledb
import tuf.keys
import tuf.sig
import tuf.log
import tuf.conf
import tuf._vendor.iso8601 as iso8601
import tuf._vendor.six as six
# See 'log.py' to learn how logging is handled in TUF.
logger = logging.getLogger('tuf.repository_lib')
# Recommended RSA key sizes:
# http://www.emc.com/emc-plus/rsa-labs/historical/twirl-and-rsa-key-size.htm#table1
# According to the document above, revised May 6, 2003, RSA keys of
# size 3072 provide security through 2031 and beyond. 2048-bit keys
# are the recommended minimum and are good from the present through 2030.
DEFAULT_RSA_KEY_BITS = 3072
# The extension of TUF metadata.
METADATA_EXTENSION = '.json'
# The targets and metadata directory names. Metadata files are written
# to the staged metadata directory instead of the "live" one.
METADATA_STAGED_DIRECTORY_NAME = 'metadata.staged'
METADATA_DIRECTORY_NAME = 'metadata'
TARGETS_DIRECTORY_NAME = 'targets'
# The metadata filenames of the top-level roles.
ROOT_FILENAME = 'root' + METADATA_EXTENSION
TARGETS_FILENAME = 'targets' + METADATA_EXTENSION
SNAPSHOT_FILENAME = 'snapshot' + METADATA_EXTENSION
TIMESTAMP_FILENAME = 'timestamp' + METADATA_EXTENSION
# Log warning when metadata expires in n days, or less.
# root = 1 month, snapshot = 1 day, targets = 10 days, timestamp = 1 day.
ROOT_EXPIRES_WARN_SECONDS = 2630000
SNAPSHOT_EXPIRES_WARN_SECONDS = 86400
TARGETS_EXPIRES_WARN_SECONDS = 864000
TIMESTAMP_EXPIRES_WARN_SECONDS = 86400
# Supported key types.
SUPPORTED_KEY_TYPES = ['rsa', 'ed25519']
# The recognized compression extensions.
SUPPORTED_COMPRESSION_EXTENSIONS = ['.gz']
# The full list of supported TUF metadata extensions.
METADATA_EXTENSIONS = ['.json', '.json.gz']
def _generate_and_write_metadata(rolename, metadata_filename, write_partial,
targets_directory, metadata_directory,
consistent_snapshot=False, filenames=None):
"""
Non-public function that can generate and write the metadata of the specified
top-level 'rolename'. It also increments version numbers if:
1. write_partial==True and the metadata is the first to be written.
2. write_partial=False (i.e., write()), the metadata was not loaded as
partially written, and a write_partial is not needed.
"""
metadata = None
# Retrieve the roleinfo of 'rolename' to extract the needed metadata
# attributes, such as version number, expiration, etc.
roleinfo = tuf.roledb.get_roleinfo(rolename)
snapshot_compressions = tuf.roledb.get_roleinfo('snapshot')['compressions']
# Generate the appropriate role metadata for 'rolename'.
if rolename == 'root':
metadata = generate_root_metadata(roleinfo['version'],
roleinfo['expires'], consistent_snapshot)
_log_warning_if_expires_soon(ROOT_FILENAME, roleinfo['expires'],
ROOT_EXPIRES_WARN_SECONDS)
# Check for the Targets role, including delegated roles.
elif rolename.startswith('targets'):
metadata = generate_targets_metadata(targets_directory,
roleinfo['paths'],
roleinfo['version'],
roleinfo['expires'],
roleinfo['delegations'],
consistent_snapshot)
if rolename == 'targets':
_log_warning_if_expires_soon(TARGETS_FILENAME, roleinfo['expires'],
TARGETS_EXPIRES_WARN_SECONDS)
elif rolename == 'snapshot':
root_filename = filenames['root']
targets_filename = filenames['targets']
metadata = generate_snapshot_metadata(metadata_directory,
roleinfo['version'],
roleinfo['expires'], root_filename,
targets_filename,
consistent_snapshot)
_log_warning_if_expires_soon(SNAPSHOT_FILENAME, roleinfo['expires'],
SNAPSHOT_EXPIRES_WARN_SECONDS)
elif rolename == 'timestamp':
snapshot_filename = filenames['snapshot']
metadata = generate_timestamp_metadata(snapshot_filename,
roleinfo['version'],
roleinfo['expires'],
snapshot_compressions)
_log_warning_if_expires_soon(TIMESTAMP_FILENAME, roleinfo['expires'],
TIMESTAMP_EXPIRES_WARN_SECONDS)
signable = sign_metadata(metadata, roleinfo['signing_keyids'],
metadata_filename)
# Check if the version number of 'rolename' may be automatically incremented,
# depending on whether if partial metadata is loaded or if the metadata is
# written with write() / write_partial().
# Increment the version number if this is the first partial write.
if write_partial:
temp_signable = sign_metadata(metadata, [], metadata_filename)
temp_signable['signatures'].extend(roleinfo['signatures'])
status = tuf.sig.get_signature_status(temp_signable, rolename)
if len(status['good_sigs']) == 0:
metadata['version'] = metadata['version'] + 1
signable = sign_metadata(metadata, roleinfo['signing_keyids'],
metadata_filename)
# non-partial write()
else:
if tuf.sig.verify(signable, rolename) and not roleinfo['partial_loaded']:
metadata['version'] = metadata['version'] + 1
signable = sign_metadata(metadata, roleinfo['signing_keyids'],
metadata_filename)
# Write the metadata to file if contains a threshold of signatures.
signable['signatures'].extend(roleinfo['signatures'])
if tuf.sig.verify(signable, rolename) or write_partial:
_remove_invalid_and_duplicate_signatures(signable)
compressions = roleinfo['compressions']
filename = write_metadata_file(signable, metadata_filename, compressions,
consistent_snapshot)
# The root and timestamp files should also be written without a digest if
# 'consistent_snaptshots' is True. Client may request a timestamp and root
# file without knowing its digest and file size.
if rolename == 'root' or rolename == 'timestamp':
write_metadata_file(signable, metadata_filename, compressions,
consistent_snapshot=False)
# 'signable' contains an invalid threshold of signatures.
else:
message = 'Not enough signatures for ' + repr(metadata_filename)
raise tuf.UnsignedMetadataError(message, signable)
return signable, filename
def _prompt(message, result_type=str):
"""
Non-public function that prompts the user for input by loging 'message',
converting the input to 'result_type', and returning the value to the
caller.
"""
return result_type(six.moves.input(message))
def _get_password(prompt='Password: ', confirm=False):
"""
Non-public function that returns the password entered by the user. If
'confirm' is True, the user is asked to enter the previously entered
password once again. If they match, the password is returned to the caller.
"""
while True:
# getpass() prompts the user for a password without echoing
# the user input.
password = getpass.getpass(prompt, sys.stderr)
if not confirm:
return password
password2 = getpass.getpass('Confirm: ', sys.stderr)
if password == password2:
return password
else:
print('Mismatch; try again.')
def _metadata_is_partially_loaded(rolename, signable, roleinfo):
"""
Non-public function that determines whether 'rolename' is loaded with
at least zero good signatures, but an insufficient threshold (which means
'rolename' was written to disk with repository.write_partial()). A repository
maintainer may write partial metadata without including a valid signature.
Howerver, the final repository.write() must include a threshold number of
signatures.
If 'rolename' is found to be partially loaded, mark it as partially loaded in
its 'tuf.roledb' roleinfo. This function exists to assist in deciding whether
a role's version number should be incremented when write() or write_parital()
is called. Return True if 'rolename' was partially loaded, False otherwise.
"""
# The signature status lists the number of good signatures, including
# bad, untrusted, unknown, etc.
status = tuf.sig.get_signature_status(signable, rolename)
if len(status['good_sigs']) < status['threshold'] and \
len(status['good_sigs']) >= 0:
return True
else:
return False
def _check_directory(directory):
"""
<Purpose>
Non-public function that ensures 'directory' is valid and it exists. This
is not a security check, but a way for the caller to determine the cause of
an invalid directory provided by the user. If the directory argument is
valid, it is returned normalized and as an absolute path.
<Arguments>
directory:
The directory to check.
<Exceptions>
tuf.Error, if 'directory' could not be validated.
tuf.FormatError, if 'directory' is not properly formatted.
<Side Effects>
None.
<Returns>
The normalized absolutized path of 'directory'.
"""
# Does 'directory' have the correct format?
# Raise 'tuf.FormatError' if there is a mismatch.
tuf.formats.PATH_SCHEMA.check_match(directory)
# Check if the directory exists.
if not os.path.isdir(directory):
raise tuf.Error(repr(directory) + ' directory does not exist.')
directory = os.path.abspath(directory)
return directory
def _check_role_keys(rolename):
"""
Non-public function that verifies the public and signing keys of 'rolename'.
If either contain an invalid threshold of keys, raise an exception.
'rolename' is the full rolename (e.g., 'targets/unclaimed/django').
"""
# Extract the total number of public and private keys of 'rolename' from its
# roleinfo in 'tuf.roledb'.
roleinfo = tuf.roledb.get_roleinfo(rolename)
total_keyids = len(roleinfo['keyids'])
threshold = roleinfo['threshold']
total_signatures = len(roleinfo['signatures'])
total_signing_keys = len(roleinfo['signing_keyids'])
# Raise an exception for an invalid threshold of public keys.
if total_keyids < threshold:
message = repr(rolename) + ' role contains ' + \
repr(total_keyids) + ' / ' + repr(threshold) + ' public keys.'
raise tuf.InsufficientKeysError(message)
# Raise an exception for an invalid threshold of signing keys.
if total_signatures == 0 and total_signing_keys < threshold:
message = repr(rolename) + ' role contains ' + \
repr(total_signing_keys) + ' / ' + repr(threshold) + ' signing keys.'
raise tuf.InsufficientKeysError(message)
def _remove_invalid_and_duplicate_signatures(signable):
"""
Non-public function that removes invalid signatures from 'signable'.
'signable' may contain signatures (invalid) from previous versions
of the metadata that were loaded with load_repository(). Invalid, or
duplicate signatures are removed from 'signable'.
"""
# Store the keyids of valid signatures. 'signature_keyids' is checked
# for duplicates rather than comparing signature objects because PSS may
# generate duplicate valid signatures of the same data, yet contain different
# signatures.
signature_keyids | |
codes, indices = ret
else:
codes, indices, values = ret
self.update_self_loop_bonus(values)
codes = self.unpack_x(codes, enc_len)
indices = self.unpack_x(indices, enc_len)
self._log_code_usage(indices, criterion)
if self.needs_global_info:
codes = (codes * fc) + bias
if needs_transpose:
codes = codes.transpose(-1, dim)
indices = indices.transpose(-1, dim)
kl = (torch
.empty(indices.shape, device=codes.device)
.fill_(torch.log(torch.tensor(self.embedding.weight.size(0),
dtype=torch.float32))))
if self.log_input_norms:
norms = torch.norm(codes.view(-1, codes.size(-1)).contiguous(), dim=1)
logger.log_scalar('vq_input_norms_post_bottle/mean', torch.mean(norms))
logger.log_scalar('vq_input_norms_post_bottle/std', torch.std(norms))
if criterion != 'segmental':
return codes, kl, {'indices': indices,
'embeddings': codes.detach(),
'pre_bn_acts': x.transpose(-1, dim)}
else:
return codes, kl, {'indices': indices,
'segmental_values': values,
'embeddings': codes.detach(),
'pre_bn_acts': x.transpose(-1, dim)}
def _log_code_usage(self, indices, criterion):
if not logger.is_currently_logging():
return
num_tokens = self.embedding.weight.size(0)
code_freqs = torch.histc(
indices.float(),
bins=num_tokens, min=-0.5, max=num_tokens - 0.5
).float()
count = np.prod(indices.size())
if criterion != 'sparse':
assert code_freqs.sum().item() == count
code_freqs /= count
entropy = distributions.Categorical(code_freqs).entropy()
logger.log_scalar("vq_code_usage_frac",
entropy.item() / np.log(num_tokens))
def get_greedy_hmm_matrices(self):
proto_weights = self.embedding.weight
N = proto_weights.shape[0]
neg_inf = -1e20
# Greedy matrix creation
# state 0 is starting, state n + 1 refers to token N
# each state has N outgoing edges
states_mat = torch.arange(
0, N + 1, dtype=torch.int64).view(1, N + 1).repeat((N + 1, 1)).unsqueeze(0)
ilabels_mat = (states_mat.transpose(1, 2) - 1).clamp(0, N)
weights_mat = torch.zeros_like(states_mat, dtype=torch.float)
weights_mat[0, 0, :] = neg_inf
terminal_mat = torch.zeros((N + 1, 1), dtype=torch.float32).unsqueeze(0)
terminal_mat[0, 0] = neg_inf
return [
states_mat, ilabels_mat, weights_mat, terminal_mat
]
def update_self_loop_bonus(self, values):
self_loop_bonus = (
values[-1] - values[-self.self_loop_bonus_reestimation_num_merges - 1]
) / self.self_loop_bonus_reestimation_num_merges
self.self_loop_bonus_estimate *= (
self.self_loop_bonus_reestimation_smoothing)
self.self_loop_bonus_estimate += (
(1.0 - self.self_loop_bonus_reestimation_smoothing) *
self_loop_bonus
)
logger.log_scalar('self_loop_bonus', self_loop_bonus)
logger.log_scalar('smooth_self_loop_bonus', self.self_loop_bonus_estimate)
# print(f"SL_bonus: {self_loop_bonus} smooth: {self.self_loop_bonus_estimate}")
def quantize_hmm_segmental(self, x, x_lens=None):
from distsup.modules import fst_utils
self.greedy_hmm_matrices = [m.to(x.device) for m in self.greedy_hmm_matrices]
(states_mat, ilabels_mat, weights_mat, terminal_mat
) = self.greedy_hmm_matrices
weights_mat_seg = weights_mat + torch.diag(
torch.empty(weights_mat.size(-1), device=weights_mat.device
).fill_(self.self_loop_bonus_estimate))
seg_mats = [
states_mat, ilabels_mat, weights_mat_seg, terminal_mat
]
proto_weights = self.embedding.weight
log_probs = -torch.cdist(x.detach().reshape(-1, proto_weights.shape[1]),
proto_weights)
log_probs = log_probs.view(x.shape[:-1] + (-1,)).requires_grad_(True)
if x_lens is None:
x_lens = torch.empty(
log_probs.shape[0], dtype=torch.int64).fill_(log_probs.shape[1])
with torch.enable_grad():
loss = fst_utils.path_reduction(
log_probs, x_lens, seg_mats, red_kind='viterbi',)
loss.sum().backward()
_, seg_idx = torch.max(log_probs.grad, dim=-1)
quantized = self.embedding(seg_idx)
seg_idx.unsqueeze_(-1)
num_segments = float((np.diff(seg_idx.view(seg_idx.shape[:2]).cpu().numpy(),
axis=1) != 0).sum())
logger.log_scalar('hmm_segment_frac', num_segments / float(x_lens.sum()))
print(f"HMM segmenter self loop bonus {self.self_loop_bonus_estimate} "
f"num segments: {num_segments}, ratio {num_segments / float(x_lens.sum())}")
return quantized, seg_idx
class VQBottleneckSparse(VQBottleneck):
def __init__(self, in_dim, latent_dim, num_tokens, dim=-1, commitment=0.25,
criterion_kwargs={}, use_copy_through=False):
super(VQBottleneckSparse, self).__init__(in_dim, latent_dim, num_tokens,
dim=dim, commitment=commitment,
criterion='sparse',
criterion_kwargs=criterion_kwargs,
use_copy_through=use_copy_through)
class VQBottleneckSegmental(VQBottleneck):
def __init__(self, in_dim, latent_dim, num_tokens, dim=-1, commitment=0.25,
criterion_kwargs={}, use_copy_through=False):
super(VQBottleneckSegmental, self).__init__(in_dim, latent_dim, num_tokens,
dim=dim, commitment=commitment,
criterion='segmental',
criterion_kwargs=criterion_kwargs,
use_copy_through=use_copy_through)
class VQBottleneckNearest(VQBottleneck):
def __init__(self, in_dim, latent_dim, num_tokens, dim=-1, commitment=0.25,
use_copy_through=False):
super(VQBottleneckNearest, self).__init__(in_dim, latent_dim, num_tokens,
dim=dim, commitment=commitment,
criterion='nearest',
use_copy_through=use_copy_through)
class IndicesComputation(object):
@staticmethod
def nearest(inputs, codebook, temperature=None):
with torch.no_grad():
# inputs: NxD
# codebook: KxD
# NxK
distances_matrix = torch.cdist(inputs, codebook)
# Nx1
if temperature is None:
indices = torch.min(distances_matrix, dim=-1)[1].unsqueeze(1)
else:
probs = F.softmax(-distances_matrix / temperature, dim=-1)
m = torch.distributions.Categorical(probs)
indices = m.sample()
return indices
@staticmethod
def segmental(inputs, codebook, segment_frac=0.1, segment_threshold=None):
from distsup.modules.segment import calc
distances_matrix = torch.cdist(inputs, codebook)
indices, values = calc(distances_matrix, inputs.shape[0] * segment_frac, threshold=segment_threshold)
indices = torch.from_numpy(indices).to(device=inputs.device).unsqueeze(1)
return indices, values
class VectorQuantization(Function):
@staticmethod
def flatten(x):
code_dim = x.size(-1)
return x.view(-1, code_dim)
@staticmethod
def restore_shapes(codes, indices, target_shape):
idx_shape = list(target_shape)
idx_shape[-1] = 1
return codes.view(*target_shape), indices.view(*idx_shape)
@staticmethod
def forward(ctx, inputs, codebook, commitment=0.25,
criterion='nearest', criterion_kwargs={},
use_copy_through=False):
inputs_flat = VectorQuantization.flatten(inputs)
compute_indices = getattr(IndicesComputation, criterion)
indices = compute_indices(inputs_flat, codebook, **criterion_kwargs)
if type(indices) is tuple:
indices, values = indices
codes = codebook[indices.view(-1), :]
codes, indices = VectorQuantization.restore_shapes(
codes, indices, inputs.shape)
ctx.save_for_backward(codes, inputs, torch.FloatTensor([commitment]),
codebook, indices, torch.tensor([use_copy_through]))
ctx.mark_non_differentiable(indices)
if criterion != 'segmental':
return codes, indices
else:
return codes, indices, torch.tensor(values)
@staticmethod
def backward(ctx, straight_through, unused_indices, unused_values=None):
(codes, inputs, beta, codebook, indices, use_copy_through
) = ctx.saved_tensors
# TODO: figure out proper vq loss reduction
vq_loss = F.mse_loss(inputs, codes).detach()
logger.log_scalar('vqlayer_loss', vq_loss)
# gradient of vq_loss
diff = 2 * (inputs - codes) / inputs.numel()
commitment = beta.item() * diff
if use_copy_through.item():
code_disp = VectorQuantization.flatten(-diff + straight_through)
else:
code_disp = VectorQuantization.flatten(-diff)
indices = VectorQuantization.flatten(indices)
code_disp = (torch
.zeros_like(codebook)
.index_add_(0, indices.view(-1), code_disp))
return straight_through + commitment, code_disp, None, None, None, None
quantize = VectorQuantization.apply
class SparseVectorQuantization(Function):
@staticmethod
def sparse(x, K):
vec_len = torch.norm(x, dim=-1)
_, idx = torch.sort(vec_len, descending=True)
ret = torch.zeros_like(vec_len)
ret[idx[:K]] = 1.0
return ret, idx[K:], idx[:K]
@staticmethod
def restore_shapes(codes, indices, indices_weight, target_shape):
idx_shape = list(target_shape)
idx_shape[-1] = 1
return codes.view(*target_shape), indices.view(*idx_shape), indices_weight.view(*idx_shape)
@staticmethod
def forward(ctx, inputs, codebook, commitment=0.25, criterion_kwargs={}):
ratio = criterion_kwargs.pop('sparse_ratio', 0.2)
assert len(criterion_kwargs) == 0, (
f"unknown criterion_kwargs: {criterion_kwargs.keys()}")
inputs_flat = VectorQuantization.flatten(inputs)
indices = IndicesComputation.nearest(inputs_flat, codebook)
K = int(inputs.numel() / inputs.size(-1) * ratio)
indices_weight, removed_indices, remain_indices = SparseVectorQuantization.sparse(inputs_flat, K)
codes = codebook[indices.view(-1), :] * indices_weight.unsqueeze(-1)
codes, indices, indices_weight = SparseVectorQuantization.restore_shapes(
codes, indices, indices_weight, inputs.shape)
indices = indices * indices_weight.long() - (1 - indices_weight).long()
ctx.save_for_backward(codes, inputs, torch.FloatTensor([commitment]),
codebook, indices, indices_weight, remain_indices)
ctx.mark_non_differentiable(indices)
ctx.mark_non_differentiable(indices_weight)
return codes, indices
@staticmethod
def backward(ctx, straight_through, unused_indices):
codes, inputs, beta, codebook, indices, indices_weight, remain_indices = ctx.saved_tensors
# TODO: figure out proper vq loss reduction
vq_loss = F.mse_loss(inputs, codes).detach()
logger.log_scalar('vqlayer_loss', vq_loss)
#diff = 2 * (inputs * indices_weight - codes) / indices_weight.sum() / inputs.size(-1)
diff = 2 * (inputs - codes) / inputs.numel()
commitment = beta.item() * diff
code_disp = VectorQuantization.flatten(-diff)
indices = indices.view(-1)[remain_indices]
code_disp = code_disp[remain_indices]
code_disp = (torch
.zeros_like(codebook)
.index_add_(0, indices, code_disp))
#return straight_through * indices_weight + commitment, code_disp, None, None
return straight_through + commitment, code_disp, None, None, None
sparse_quantize = SparseVectorQuantization.apply
class SOMBottleneck(nn.Module):
def __init__(self, in_dim, latent_dim, num_tokens, dim=-1,
commitment=0.32, som_loss_mult=1.2,
prob_loss_mult=1.2,
smoothness_loss_mult=1.4):
super(SOMBottleneck, self).__init__()
assert isinstance(num_tokens, list)
# num_tokens is a list in this case of the form: [5, 5]
self.num_tokens = num_tokens
self.latent_dim = latent_dim
# total number of embeddings (multiply elements of the token list)
num_embeddings = np.prod(num_tokens)
# The codebook
self.embedding = nn.Embedding(num_embeddings, latent_dim)
nn.init.xavier_uniform_(self.embedding.weight)
# transition probabilities in the discrete space
# p(z_q_t | z_q_t-1)
self.trans = nn.Linear(self.latent_dim, num_embeddings)
# Linear projection from encoder output to embedding space
self.projection = nn.Linear(in_dim, latent_dim)
self.dim = dim
self.commitment = commitment
self.som_loss_mutl = som_loss_mult
self.smoothness_loss_mult = smoothness_loss_mult
self.prob_loss_mult = prob_loss_mult
def loss_z_prob(self, z_q, z_dist_flat):
"""Computes the smoothness loss for the transitions given their probabilities."""
# z_dist : bsz, time, ..
# k: bsz, time
# aggregate loss over all sequences
bsz = z_q.size(0)
time = z_q.size(1)
z_q_old = torch.cat([z_q[:, 0:1], z_q[:, :-1]], dim=1)
out_probabilities_old = F.softmax(self.trans(z_q_old), dim=-1).contiguous().view(bsz * time, -1)
weighted_z_dist_prob = z_dist_flat * out_probabilities_old
weighted_z_dist_prob = torch.mean(weighted_z_dist_prob)
return weighted_z_dist_prob
def loss_probabilities(self, z_q, k):
# z_q: bsz*time, latent_dim
z_q = z_q.contiguous().view(z_q.size(0), z_q.size(1), -1)
z_q_old = torch.cat([z_q[:, 0:1], z_q[:, :-1]], dim=1)
logits = self.trans(z_q_old)
loss_p = F.cross_entropy(logits.contiguous().view(-1, logits.size(-1)),
k.contiguous().view(-1))
return loss_p
def z_q_ne(self, z_q, k, codebook):
k_1 = k // self.num_tokens[1]
k_2 = k % self.num_tokens[1]
device = z_q.device.type
batch_size = z_q.size(0)
k1_not_top = torch.lt(k_1, self.num_tokens[0] - 1)
k1_not_bottom = torch.gt(k_1, 0)
k2_not_right = torch.lt(k_2, self.num_tokens[1] - 1)
k2_not_left = torch.gt(k_2, 0)
k1_up = torch.where(k1_not_top, torch.add(k_1, 1), k_1)
k1_down = torch.where(k1_not_bottom, torch.sub(k_1, 1), k_1)
k2_right = torch.where(k2_not_right, torch.add(k_2, 1), k_2)
k2_left = torch.where(k2_not_left, torch.sub(k_2, 1), k_2)
z_q_up = torch.where(k1_not_top.unsqueeze(-1), codebook[k1_up * self.num_tokens[1] + k_2],
torch.zeros(batch_size, self.latent_dim).to(device))
z_q_down = torch.where(k1_not_bottom.unsqueeze(-1), codebook[k1_down * self.num_tokens[1] + k_2],
torch.zeros(batch_size, self.latent_dim).to(device))
z_q_right = torch.where(k2_not_right.unsqueeze(-1), codebook[k_1 * self.num_tokens[1] + k2_right],
torch.zeros(batch_size, self.latent_dim).to(device))
z_q_left = torch.where(k2_not_left.unsqueeze(-1), codebook[k_1 * self.num_tokens[1] + k2_left],
torch.zeros(batch_size, self.latent_dim).to(device))
z_q_neighbors = torch.stack([z_q, z_q_up, z_q_down, z_q_right, z_q_left], dim=1)
return z_q_neighbors
def forward(self, x):
inp_shape = x.shape[:-1]
dim = self.dim
needs_transpose = dim != -1 or dim != x.dim() - 1
if needs_transpose:
x = x.transpose(-1, dim).contiguous()
# flatten x
x = x.contiguous().view(-1, x.size(-1))
# project to latent space
z_e = self.projection(x)
# compute distance between encodings and each of the embeddings
# ze: N X D, codebook: K x D
z_dist_flat = torch.cdist(z_e, self.embedding.weight)
# z_dist_flat = torch.sum((z_e.unsqueeze(1) - self.embedding.weight.unsqueeze(0)).pow(2), dim=-1)
# ---- Picks the index of the closest embedding for every encoding ----
k = torch.argmin(z_dist_flat, dim=-1)
# ---- Aggregates the respective closest embedding for every encoding ----
z_q = self.embedding.weight[k]
# ---- Get neighbours ----
z_q_ne = self.z_q_ne(z_q, k, self.embedding.weight)
if needs_transpose:
z_q = z_q.transpose(-1, dim)
z_q_ne = z_q_ne.transpose(-1, dim)
k = k.contiguous().view(*inp_shape)
loss = self.loss(z_e, | |
Residues Colored Table for NON bonded################################
NH_templist4graph=[]
NH_graphdic1={}
if bool(Adenin_graphdicNH):
for k,v in Adenin_graphdicNH.iteritems():
#print k
for value in v:
NH_templist4graph.append(value)
samp=sorted(list(set(NH_templist4graph)))
NH_graphdic1.setdefault('%s'%k,[]).append(', '.join(samp))
#print temlist
#print samp
NH_templist4graph=[]
length_listofcompiledresidues=[]
for key,value in NH_graphdic1.iteritems():
for i in value:
valu=i.split(', ')
#print valu
#print len(valu)
length_listofcompiledresidues.append(len(valu))
length_ofcell=max(length_listofcompiledresidues)
#print "<br/>"
print "<p style='font-size:20px; color:brown'> Physicochemical property based color-coding of amino acids: non-bonded contacts","</p>"
print "<table border='1'>"
print "<tr>"
print "<th col width='60'>Ligand Atoms</th>"
print "<th colspan='%d'>List of residues from analysed protein structures</th>"% length_ofcell
print "</tr>"
for key in sorted(NH_graphdic1.iterkeys()):
print "<td align='center'>%s</td>" %key
for g1 in NH_graphdic1[key]:
dat1= g1.split(', ')
for NH_k3 in dat1:
print "<td align='center'>"
#print k3
if NH_k3.startswith(('ALA','ILE','LEU','MET','MSE','VAL')):
print "<b><font color='pink'>%s</font></b>"%NH_k3
if NH_k3.startswith(('PHE','TRP', 'TYR')):
print " <b><font color='orange'>%s</font></b>"%NH_k3
if NH_k3.startswith(('LYS','ARG', 'HIS')):
print " <b><font color='red'>%s</font></b>"%NH_k3
if NH_k3.startswith(('GLU','ASP')):
print " <b><font color='green'>%s</font></b>"%NH_k3
if NH_k3.startswith(('ASN','GLN','SER','THR')):
print " <b><font color='blue'>%s</font></b>"%NH_k3
if NH_k3.startswith(('GLY','PRO')):
print " <b><font color='magenta'>%s</font></b>"%NH_k3
if NH_k3.startswith(('CYS','CME')):
print " <b><font color='yellow'>%s</font></b>"%NH_k3
print "</td>"
#print "<tr>"
print "</tr>"
print "</table>"
else:
print "<p style='font-size:20px; color:brown'> Physicochemical property based color-coding of amino acids: non-bonded contacts","</p>"
print "No Interactions"
print """
</div>
</div>
"""#closing of col-2-3 and module
print """
<div class="col-2-3">
<div class="module">
"""
if bool(Adenin_CommonH_Lig_Resdict):
print "<p style='font-size:20px; color:brown'>List of common residues: hydrogen bonds contacts" ,"</p>"
df_Adenin_CommonH_Lig_Resdict=pd.DataFrame.from_dict(Adenin_CommonH_Lig_Resdict).fillna('NIL')
print (df_Adenin_CommonH_Lig_Resdict.to_html(justify='center'))
#print pd.DataFrame.from_dict(Adenin_CommonH_Lig_Resdict).to_html(justify='center')#for common ligand atoms - hydrogen bonded
else:
print "<p style='font-size:20px; color:brown'>List of common residues: hydrogen bonds contacts" ,"</p>"
print "<p> No Common Interactions</p>"
####################Common Residues Colored Table for Adenin : H bonded################################
CommH_templist4graph=[]
CommH_graphdic1={}
if bool(Adenin_common_graphdicH):
for k,v in Adenin_common_graphdicH.iteritems():
for value in v:
CommH_templist4graph.append(value)
samp=sorted(list(set(CommH_templist4graph)))
CommH_graphdic1.setdefault('%s'%k,[]).append(', '.join(samp))
CommH_templist4graph=[]
length_listofcompiled_Common_residues=[]
for key,value in CommH_graphdic1.iteritems():
for i in value:
valu=i.split(', ')
length_listofcompiled_Common_residues.append(len(valu))
length_ofcell=max(length_listofcompiled_Common_residues)
#print "<br/>"
print "<p style='font-size:20px; color:brown'> Physicochemical property based color-coding of common amino acids: hydrogen bonds contacts ","</p>"
print "<table border='1'>"
print "<tr>"
print "<th col width='60'>Ligand Atoms</th>"
print "<th colspan='%d'>List of common residues from analysed protein structures</th>"% length_ofcell
print "</tr>"
for key in sorted(CommH_graphdic1.iterkeys()):
print "<td align='center'>%s</td>" %key
for g1 in CommH_graphdic1[key]:
dat1= g1.split(', ')
for H_k3 in dat1:
print "<td align='center'>"
#print k3
if H_k3.startswith(('ALA','ILE','LEU','MET','MSE','VAL')):
print "<b><font color='pink'>%s</font></b>"%H_k3
if H_k3.startswith(('PHE','TRP', 'TYR')):
print " <b><font color='orange'>%s</font></b>"%H_k3
if H_k3.startswith(('LYS','ARG', 'HIS')):
print " <b><font color='red'>%s</font></b>"%H_k3
if H_k3.startswith(('GLU','ASP')):
print " <b><font color='green'>%s</font></b>"%H_k3
if H_k3.startswith(('ASN','GLN','SER','THR')):
print " <b><font color='blue'>%s</font></b>"%H_k3
if H_k3.startswith(('GLY','PRO')):
print " <b><font color='magenta'>%s</font></b>"%H_k3
if H_k3.startswith(('CYS','CME')):
print " <b><font color='yellow'>%s</font></b>"%H_k3
print "</td>"
#print "<tr>"
print "</tr>"
print "</table>"
else:
print "<p style='font-size:20px; color:brown'> Physicochemical property based color-coding of common amino acids: hydrogen bonds contacts ","</p>"
print "<p> No Common Atoms Identified</p>"
if bool(Adenin_CommonNH_Lig_Resdict):
print "<p style='font-size:20px; color:brown'>List of common residues: non-bonded contacts","</p>"
df_Adenin_CommonNH_Lig_Resdict=pd.DataFrame.from_dict(Adenin_CommonNH_Lig_Resdict).fillna('NIL')
print (df_Adenin_CommonNH_Lig_Resdict.to_html(justify='center'))
#print pd.DataFrame.from_dict(Adenin_CommonNH_Lig_Resdict).to_html(justify='center')#for Common ligand atoms - Non hydrogen bonded
else:
print "<p style='font-size:20px; color:brown'>List of common residues: non-bonded contacts","</p>"
print "No Interactions"
####################Common Residues Colored Table for Adenin: NON bonded################################
CommNH_templist4graph=[]
CommNH_graphdic1={}
if bool(Adenin_common_graphdicNH):
for k,v in Adenin_common_graphdicNH.iteritems():
#print k
for value in v:
CommNH_templist4graph.append(value)
samp=sorted(list(set(CommNH_templist4graph)))
CommNH_graphdic1.setdefault('%s'%k,[]).append(', '.join(samp))
CommNH_templist4graph=[]
length_listofcompile_Common_dresidues=[]
for key,value in CommNH_graphdic1.iteritems():
for i in value:
valu=i.split(', ')
length_listofcompiled_Common_residues.append(len(valu))
length_ofcell=max(length_listofcompiled_Common_residues)
print "<p style='font-size:20px; color:brown'> Physicochemical property based color-coding of common amino acids: non-bonded contacts","</p>"
print "<table border='1'>"
print "<tr>"
print "<th col width='60'>Ligand Atoms</th>"
print "<th colspan='%d'>List of common residues from analysed protein structures</th>"% length_ofcell
print "</tr>"
for key in sorted(CommNH_graphdic1.iterkeys()):
print "<td align='center'>%s</td>" %key
for g1 in CommNH_graphdic1[key]:
dat1= g1.split(', ')
for NH_k3 in dat1:
print "<td align='center'>"
#print k3
if NH_k3.startswith(('ALA','ILE','LEU','MET','MSE','VAL')):
print "<b><font color='pink'>%s</font></b>"%NH_k3
if NH_k3.startswith(('PHE','TRP', 'TYR')):
print " <b><font color='orange'>%s</font></b>"%NH_k3
if NH_k3.startswith(('LYS','ARG', 'HIS')):
print " <b><font color='red'>%s</font></b>"%NH_k3
if NH_k3.startswith(('GLU','ASP')):
print " <b><font color='green'>%s</font></b>"%NH_k3
if NH_k3.startswith(('ASN','GLN','SER','THR')):
print " <b><font color='blue'>%s</font></b>"%NH_k3
if NH_k3.startswith(('GLY','PRO')):
print " <b><font color='magenta'>%s</font></b>"%NH_k3
if NH_k3.startswith(('CYS','CME')):
print " <b><font color='yellow'>%s</font></b>"%NH_k3
print "</td>"
#print "<tr>"
print "</tr>"
print "</table>"
else:
print "<p style='font-size:20px; color:brown'> Physicochemical property based color-coding of common amino acids: non-bonded contacts","</p>"
print "No Interactions"
print """
</div>
</div>
"""# closinf of column and module divi
###############Web logo for Common Residues Section: H bonding#######################
print """
<div class="col-2-3">
<div class="module">
"""
Adenin_graph_filename = str(uuid.uuid4())
Weblogo_dict_H={}
Weblogo_dict_H1={}
if bool(CommH_graphdic1):
for key in sorted(CommH_graphdic1):
for i in CommH_graphdic1[key]:
tems=i.split(', ')
for items in tems:
se=re.split('([0-9])' , items)
Weblogo_dict_H.setdefault('%s'%key,[]).append(se[0])
for m,n in Weblogo_dict_H.iteritems():
counted=dict(Counter(n))
Weblogo_dict_H1.setdefault('%s'%m,{}).update(counted)
zipfilename='tmp/'+Adenin_graph_filename+'_Hbonding'+'.zip'
Adenin_aminoacid_singlecode={}
aminoacid_code={'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
recoded={}
for Adenin_ligand_key, Adenin_amino_frequency in Weblogo_dict_H1.iteritems():
#print ligand_key
for i in Adenin_ligand_key:
for Adenin_amino,Adenin_frequency in Adenin_amino_frequency.iteritems():
for Adenin_amino_3letter,Adenin_code_frequency in aminoacid_code.iteritems():
if Adenin_amino == Adenin_amino_3letter:
recoded[Adenin_code_frequency]=Adenin_frequency
Adenin_aminoacid_singlecode.setdefault('%s'%Adenin_ligand_key,{}).update(recoded)
recoded={}
Adenin_Frequency=1
instances=[]
Adenin_weblogo_collection=[]
for Adenin_ligand_key1, amino_frequency1 in Adenin_aminoacid_singlecode.iteritems():
for Adenin_Amino1, Adenin_number in amino_frequency1.iteritems():
Adenin_Frequency=1
while Adenin_Frequency <= Adenin_number:
instances.append(Seq(Adenin_Amino1, IUPAC.protein))
Adenin_Frequency=Adenin_Frequency+1
Adenin_motif = motifs.create(instances)
Adenin_mymotif ='tmp/'+ Adenin_graph_filename+ '_H_'+ Adenin_ligand_key1 +'.svg'
Adenin_motif.weblogo('%s'%Adenin_mymotif,format='SVG',xaxis_label= '%s' %Adenin_ligand_key1,show_errorbars= False, color_scheme= 'color_chemistry')
Adenin_weblogo_collection.append(Adenin_mymotif)
instances=[]
weblogo_images=' '.join(str(x) for x in Adenin_weblogo_collection)
print "<p style='font-size:20px; color:brown'> Weblogo showing the frequency of residues binding to ligand atoms for the selected structures:</p>"
print "<div class='weblogo_row'>"
for Adenin_image in sorted(Adenin_weblogo_collection):
print "<div class='weblogo_column'>"
print "<embed src='%s#page=1&view=FitH ' />" %Adenin_image
#print "<iframe src='%s#page=1&view=FitH ' width='200' height='100' border='0'></iframe>"%Adenin_image
print "</div>"
print "</div>"
####zip file
with ZipFile('%s'%zipfilename, 'w') as Adenin_myzip:
for Adenin_Images in Adenin_weblogo_collection:
Adenin_myzip.write(Adenin_Images)
else:
print "<p style='font-size:20px; color:brown'> Weblogo for Common bonded Interactions:</p>"
print "No Interactions"
###############Web logo for Common Residues Section: NON bonding#######################
Weblogo_dict_NH={}
Weblogo_dict_NH1={}
if bool(CommNH_graphdic1):
for key in sorted(CommNH_graphdic1):
for i in CommNH_graphdic1[key]:
tems=i.split(', ')
for items in tems:
se=re.split('([0-9])' , items)
Weblogo_dict_NH.setdefault('%s'%key,[]).append(se[0])
for m,n in Weblogo_dict_NH.iteritems():
counted=dict(Counter(n))
Weblogo_dict_NH1.setdefault('%s'%m,{}).update(counted)
zipfilename='tmp/'+Adenin_graph_filename+'_NHbonding'+'.zip'
Adenin_aminoacid_singlecode={}
recoded={}
for Adenin_ligand_key, Adenin_amino_frequency in Weblogo_dict_NH1.iteritems():
#print ligand_key
for i in Adenin_ligand_key:
for Adenin_amino,Adenin_frequency in Adenin_amino_frequency.iteritems():
for Adenin_amino_3letter,Adenin_code_frequency in aminoacid_code.iteritems():
if Adenin_amino == Adenin_amino_3letter:
recoded[Adenin_code_frequency]=Adenin_frequency
Adenin_aminoacid_singlecode.setdefault('%s'%Adenin_ligand_key,{}).update(recoded)
recoded={}
Adenin_Frequency=1
instances=[]
Adenin_weblogo_collection=[]
for Adenin_ligand_key1, amino_frequency1 in Adenin_aminoacid_singlecode.iteritems():
for Adenin_Amino1, Adenin_number in amino_frequency1.iteritems():
Adenin_Frequency=1
while Adenin_Frequency <= Adenin_number:
instances.append(Seq(Adenin_Amino1, IUPAC.protein))
Adenin_Frequency=Adenin_Frequency+1
Adenin_motif = motifs.create(instances)
Adenin_mymotif ='tmp/'+ Adenin_graph_filename+ '_NH_'+ Adenin_ligand_key1 +'.svg'
Adenin_motif.weblogo('%s'%Adenin_mymotif,format='SVG',xaxis_label= '%s' %Adenin_ligand_key1,show_errorbars= False, color_scheme= 'color_chemistry')
Adenin_weblogo_collection.append(Adenin_mymotif)
instances=[]
weblogo_images=' '.join(str(x) for x in Adenin_weblogo_collection)
print "<p style='font-size:20px; color:brown'> Weblogo showing the frequency of residues binding to ligand atoms for the selected structures:</p>"
print "<div class='weblogo_row'>" #initiation of weblog_row
for Adenin_image in sorted(Adenin_weblogo_collection):
print "<div class='weblogo_column'>" #initiation of weblog_column
print "<embed src='%s#page=1&view=FitH ' />" %Adenin_image
#print "<iframe src='%s#page=1&view=FitH ' width='200' height='200' border='0'></iframe>"%Adenin_image
print "</div>"#closing of weblog_column
print "</div>"#closing of weblog_row
####zip file
with ZipFile('%s'%zipfilename, 'w') as Adenin_myzip:
for Adenin_Images in Adenin_weblogo_collection:
Adenin_myzip.write(Adenin_Images)
else:
print "<p style='font-size:20px; color:brown'> Weblogo for Common Nonbonded Interactions:</p>"
print "No Interactions"
print """
</div>
</div>
</div>
""" # closing of Adenin section
#####################################################
print "<p align='center'>################################################################","</p>"
print "<p style='font-size:20px; color:blue' align='center'>Ribose sub group structure","</p>"
print '<p style=text-align:center>Download: <a href=%s download>All Bonded,</a>' % Ribose_allH
print ' <a href=%s download>All Non-bonded,</a>' % Ribose_allNH
print ' <a href=%s download>Common Bonded,</a>' % Ribose_CommonH
print ' <a href=%s download>Common Non-bonded,</a>' % Ribose_CommonNH ,'</p>'
print "<p align='center'>################################################################" ,"</p>"
print "<button class='collapsible'>I. All bonded interactions - Click to read basic statistical information</button>"#Start of click drop down
print "<div class='contentsection'>"
print "<p style='font-size:20px; color:black' align='center'>"
print " Number of Ligand atoms:", len(Ribose), "<br/>"
print " Number of PDB IDs:", len(Ribose_allNH_Lig_Resdict.keys()), "<br/>"
print "<div class='row'>"# spliting into two columns
print "<div class='column'>"# spliting into two columns
if bool(Ribose_allH_Lig_Resdict):
print "Statistics of Bonded Intercations"
print percentage(Ribose_allH_Lig_Resdict,Ribose)
if bool(Ribose_allH_Lig_Resdict_distance):
print distance_calc(Ribose_allH_Lig_Resdict_distance)
print "</div>"# closing of first columns
print "<div class='column'>"
if bool(Ribose_allNH_Lig_Resdict):
print "Statistics of Non-Bonded Intercations", "<br/>"
print percentage(Ribose_allNH_Lig_Resdict,Ribose)
if bool(Ribose_allNH_Lig_Resdict_distance):
print distance_calc(Ribose_allNH_Lig_Resdict_distance)
print "</div>"# closing of second columns
print "</div>"#closing of row
print "</div>"#End of click drop down
print "<br/>"
print """
<div | |
"north pole" or the
# "south pole" (after the central lon/lat have been taken into
# account).
if n_parallels == 1:
plat = 90 if standard_parallels[0] > 0 else -90
else:
# Which pole are the parallels closest to? That is the direction
# that the cone converges.
if abs(standard_parallels[0]) > abs(standard_parallels[1]):
poliest_sec = standard_parallels[0]
else:
poliest_sec = standard_parallels[1]
plat = 90 if poliest_sec > 0 else -90
self.cutoff = cutoff
n = 91
lons = np.empty(n + 2)
lats = np.full(n + 2, float(cutoff))
lons[0] = lons[-1] = 0
lats[0] = lats[-1] = plat
if plat == 90:
# Ensure clockwise
lons[1:-1] = np.linspace(central_longitude + 180 - 0.001,
central_longitude - 180 + 0.001, n)
else:
lons[1:-1] = np.linspace(central_longitude - 180 + 0.001,
central_longitude + 180 - 0.001, n)
points = self.transform_points(PlateCarree(), lons, lats)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
def __eq__(self, other):
res = super(LambertConformal, self).__eq__(other)
if hasattr(other, "cutoff"):
res = res and self.cutoff == other.cutoff
return res
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.proj4_init, self.cutoff))
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class LambertAzimuthalEqualArea(Projection):
"""
A Lambert Azimuthal Equal-Area projection.
"""
def __init__(self, central_longitude=0.0, central_latitude=0.0,
false_easting=0.0, false_northing=0.0,
globe=None):
"""
Parameters
----------
central_longitude: optional
The central longitude. Defaults to 0.
central_latitude: optional
The central latitude. Defaults to 0.
false_easting: optional
X offset from planar origin in metres. Defaults to 0.
false_northing: optional
Y offset from planar origin in metres. Defaults to 0.
globe: optional
A :class:`cartopy.crs.Globe`. If omitted, a default globe is
created.
"""
proj4_params = [('proj', 'laea'),
('lon_0', central_longitude),
('lat_0', central_latitude),
('x_0', false_easting),
('y_0', false_northing)]
super(LambertAzimuthalEqualArea, self).__init__(proj4_params,
globe=globe)
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
# Find the antipode, and shift it a small amount in latitude to
# approximate the extent of the projection:
lon = central_longitude + 180
sign = np.sign(central_latitude) or 1
lat = -central_latitude + sign * 0.01
x, max_y = self.transform_point(lon, lat, PlateCarree())
coords = _ellipse_boundary(a * 1.9999, max_y - false_northing,
false_easting, false_northing, 61)
self._boundary = sgeom.polygon.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = np.diff(self._x_limits)[0] * 1e-3
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class Miller(_RectangularProjection):
_handles_ellipses = False
def __init__(self, central_longitude=0.0, globe=None):
if globe is None:
globe = Globe(semimajor_axis=math.degrees(1), ellipse=None)
# TODO: Let the globe return the semimajor axis always.
a = np.float(globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
proj4_params = [('proj', 'mill'), ('lon_0', central_longitude)]
# See Snyder, 1987. Eqs (11-1) and (11-2) substituting maximums of
# (lambda-lambda0)=180 and phi=90 to get limits.
super(Miller, self).__init__(proj4_params,
a * np.pi, a * 2.303412543376391,
globe=globe)
@property
def threshold(self):
return 0.5
class RotatedPole(_CylindricalProjection):
"""
A rotated latitude/longitude projected coordinate system
with cylindrical topology and projected distance.
Coordinates are measured in projection metres.
The class uses proj to perform an ob_tran operation, using the
pole_longitude to set a lon_0 then performing two rotations based on
pole_latitude and central_rotated_longitude.
This is equivalent to setting the new pole to a location defined by
the pole_latitude and pole_longitude values in the GeogCRS defined by
globe, then rotating this new CRS about it's pole using the
central_rotated_longitude value.
"""
def __init__(self, pole_longitude=0.0, pole_latitude=90.0,
central_rotated_longitude=0.0, globe=None):
"""
Parameters
----------
pole_longitude: optional
Pole longitude position, in unrotated degrees. Defaults to 0.
pole_latitude: optional
Pole latitude position, in unrotated degrees. Defaults to 0.
central_rotated_longitude: optional
Longitude rotation about the new pole, in degrees. Defaults to 0.
globe: optional
An optional :class:`cartopy.crs.Globe`. Defaults to a "WGS84"
datum.
"""
proj4_params = [('proj', 'ob_tran'), ('o_proj', 'latlon'),
('o_lon_p', central_rotated_longitude),
('o_lat_p', pole_latitude),
('lon_0', 180 + pole_longitude),
('to_meter', math.radians(1))]
super(RotatedPole, self).__init__(proj4_params, 180, 90, globe=globe)
@property
def threshold(self):
return 0.5
class Gnomonic(Projection):
_handles_ellipses = False
def __init__(self, central_latitude=0.0,
central_longitude=0.0, globe=None):
proj4_params = [('proj', 'gnom'), ('lat_0', central_latitude),
('lon_0', central_longitude)]
super(Gnomonic, self).__init__(proj4_params, globe=globe)
self._max = 5e7
@property
def boundary(self):
return sgeom.Point(0, 0).buffer(self._max).exterior
@property
def threshold(self):
return 1e5
@property
def x_limits(self):
return (-self._max, self._max)
@property
def y_limits(self):
return (-self._max, self._max)
class Stereographic(Projection):
def __init__(self, central_latitude=0.0, central_longitude=0.0,
false_easting=0.0, false_northing=0.0,
true_scale_latitude=None,
scale_factor=None, globe=None):
# Warn when using Stereographic with proj < 5.0.0 due to
# incorrect transformation with lon_0=0 (see
# https://github.com/OSGeo/proj.4/issues/194).
if central_latitude == 0:
if PROJ4_VERSION != ():
if PROJ4_VERSION < (5, 0, 0):
warnings.warn(
'The Stereographic projection in Proj older than '
'5.0.0 incorrectly transforms points when '
'central_latitude=0. Use this projection with '
'caution.',
stacklevel=2)
else:
warnings.warn(
'Cannot determine Proj version. The Stereographic '
'projection may be unreliable and should be used with '
'caution.',
stacklevel=2)
proj4_params = [('proj', 'stere'), ('lat_0', central_latitude),
('lon_0', central_longitude),
('x_0', false_easting), ('y_0', false_northing)]
if true_scale_latitude is not None:
if central_latitude not in (-90., 90.):
warnings.warn('"true_scale_latitude" parameter is only used '
'for polar stereographic projections. Consider '
'the use of "scale_factor" instead.',
stacklevel=2)
proj4_params.append(('lat_ts', true_scale_latitude))
if scale_factor is not None:
if true_scale_latitude is not None:
raise ValueError('It does not make sense to provide both '
'"scale_factor" and "true_scale_latitude". '
'Ignoring "scale_factor".')
else:
proj4_params.append(('k_0', scale_factor))
super(Stereographic, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
b = np.float(self.globe.semiminor_axis or WGS84_SEMIMINOR_AXIS)
# Note: The magic number has been picked to maintain consistent
# behaviour with a wgs84 globe. There is no guarantee that the scaling
# should even be linear.
x_axis_offset = 5e7 / WGS84_SEMIMAJOR_AXIS
y_axis_offset = 5e7 / WGS84_SEMIMINOR_AXIS
self._x_limits = (-a * x_axis_offset + false_easting,
a * x_axis_offset + false_easting)
self._y_limits = (-b * y_axis_offset + false_northing,
b * y_axis_offset + false_northing)
coords = _ellipse_boundary(self._x_limits[1], self._y_limits[1],
false_easting, false_northing, 91)
self._boundary = sgeom.LinearRing(coords.T)
self._threshold = np.diff(self._x_limits)[0] * 1e-3
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class NorthPolarStereo(Stereographic):
def __init__(self, central_longitude=0.0, true_scale_latitude=None,
globe=None):
super(NorthPolarStereo, self).__init__(
central_latitude=90,
central_longitude=central_longitude,
true_scale_latitude=true_scale_latitude, # None is +90
globe=globe)
class SouthPolarStereo(Stereographic):
def __init__(self, central_longitude=0.0, true_scale_latitude=None,
globe=None):
super(SouthPolarStereo, self).__init__(
central_latitude=-90,
central_longitude=central_longitude,
true_scale_latitude=true_scale_latitude, # None is -90
globe=globe)
class Orthographic(Projection):
_handles_ellipses = False
def __init__(self, central_longitude=0.0, central_latitude=0.0,
globe=None):
if PROJ4_VERSION != ():
if (5, 0, 0) <= PROJ4_VERSION < (5, 1, 0):
warnings.warn(
'The Orthographic projection in the v5.0.x series of Proj '
'incorrectly transforms points. Use this projection with '
'caution.',
stacklevel=2)
else:
warnings.warn(
'Cannot determine Proj version. The Orthographic projection '
'may be unreliable and should be used with caution.',
stacklevel=2)
proj4_params = [('proj', 'ortho'), ('lon_0', central_longitude),
('lat_0', central_latitude)]
super(Orthographic, self).__init__(proj4_params, globe=globe)
# TODO: Let the globe return the semimajor axis always.
a = np.float(self.globe.semimajor_axis or WGS84_SEMIMAJOR_AXIS)
# To stabilise the projection of geometries, we reduce the boundary by
# a tiny fraction at the cost of the extreme edges.
coords = _ellipse_boundary(a * 0.99999, a * 0.99999, n=61)
self._boundary = sgeom.polygon.LinearRing(coords.T)
mins = np.min(coords, axis=1)
maxs = np.max(coords, axis=1)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
self._threshold = np.diff(self._x_limits)[0] * 0.02
@property
def boundary(self):
return self._boundary
@property
def threshold(self):
return self._threshold
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _WarpedRectangularProjection(six.with_metaclass(ABCMeta, Projection)):
def __init__(self, proj4_params, central_longitude,
false_easting=None, false_northing=None, globe=None):
if false_easting is not None:
proj4_params += [('x_0', false_easting)]
if false_northing is not None:
proj4_params += [('y_0', false_northing)]
super(_WarpedRectangularProjection, self).__init__(proj4_params,
globe=globe)
# Obtain boundary points
minlon, maxlon = self._determine_longitude_bounds(central_longitude)
n = 91
lon = np.empty(2 * n + 1)
lat = np.empty(2 * n + 1)
lon[:n] = minlon
lat[:n] = np.linspace(-90, 90, n)
lon[n:2 * n] = maxlon
lat[n:2 * n] = np.linspace(90, -90, n)
lon[-1] = minlon
lat[-1] = -90
points = self.transform_points(self.as_geodetic(), lon, lat)
self._boundary = sgeom.LinearRing(points)
mins = np.min(points, axis=0)
maxs = np.max(points, axis=0)
self._x_limits = mins[0], maxs[0]
self._y_limits = mins[1], maxs[1]
@property
def boundary(self):
return self._boundary
@property
def x_limits(self):
return self._x_limits
@property
def y_limits(self):
return self._y_limits
class _Eckert(six.with_metaclass(ABCMeta, _WarpedRectangularProjection)):
"""
An Eckert projection.
This class implements all the | |
REMOTE
)
cmdComplete = runAndCheckCommandComplete(cmd)
""" Check for files with extensions (i.e. .1, .2, ...) """
""" First, get a list of all files in the base directory """
fixupFileExtsList = []
baseDir = seg.primary_data_directory + "/base"
primaryTempFile = seg.primaryTempDir + '/tempfilesindir' + str(seg.mirror_dbid)
cmdComplete = False
while cmdComplete == False:
fdCmd = FilesInDir( name = "gpupgrademirror files in dir %s:%s" % (seg.primary_host, baseDir)
, filePattern = baseDir
, ctxt = REMOTE
, remoteHost = seg.primary_host
, remoteTempFile = primaryTempFile
)
cmdComplete = runAndCheckCommandComplete(fdCmd)
fdCmd.validate()
allBaseList = fdCmd.get_result_list(localTempFile = options.info_data_directory + "/tempallbase" + str(seg.mirror_dbid))
allBaseList.sort()
""" Searh the base directory list for all fixup files that have dots (i.e. 123.1, 123.2) """
dotFiles = []
for file in self.fixupFileList:
fullPathFile = seg.primary_data_directory + '/' + file
tempList = findRelFileDotNodes(fullPathFile, allBaseList)
for suffix in tempList:
dotFiles.append(file + "." + suffix)
allFixupFileList = self.fixupFileList + dotFiles
sph = SpecialFileHandling( sourceList = allFixupFileList
, seg = seg
)
sph.createLinks()
cmdComplete = False
while cmdComplete == False:
fdlCmd = FileDirectoryList( name = "gpupgrademirror find all dirs and files for %s:%s " % (seg.primary_host, sph.fullPathLinkDir)
, filePattern = sph.fullPathLinkDir
, ctxt = REMOTE
, remoteHost = seg.primary_host
)
cmdComplete = runAndCheckCommandComplete(fdlCmd)
fdlCmd.validate()
linkDirList = fdlCmd.get_result_list()
copyList = []
for element in self.specialDirectoryList:
elementList = element.split("/")
if len(elementList) > 1:
elementPrefix = "/" + "/".join(elementList[:-1])
else:
elementPrefix = ""
copyList.append([seg.primary_data_directory + "/" + element, seg.mirror_data_directory + elementPrefix ])
for element in linkDirList:
copyList.append([sph.fullPathLinkDir + "/" + element, seg.mirror_data_directory])
for copyElement in copyList:
cmdComplete = False
while cmdComplete == False:
rcCmd = RemoteCopyPreserve( name = 'gpupgrademirror phase 2 fixup copy : %s:%s to %s:%s' % (seg.primary_host, copyElement[0], seg.mirror_host, copyElement[1])
, srcDirectory = copyElement[0]
, dstHost = seg.mirror_host
, dstDirectory = copyElement[1]
, ctxt = REMOTE
, remoteHost = seg.primary_host
)
cmdComplete = runAndCheckCommandComplete(rcCmd)
rcCmd.validate()
return
#-------------------------------------------------------------------------------
def run(self):
self.startedRun = True
try:
i = 0
for seg in self.mirrorSegmentList:
self.logger.debug("started run for Phase 2 for seg mirror: %s" % str(seg.mirror_dbid))
status = self.statusList[i]
currentStatus = status.get_current_status()
if status.compare_status('START_PHASE2') <= 0:
status.set_status('START_PHASE2')
''' Copy catalog tables to mirror '''
self.newMirrorSpecialFileFixup(seg)
if status.compare_status('PHASE2_DONE') <= 0:
status.set_status('PHASE2_DONE')
i = i + 1
self.shutdown()
except Exception, e:
self.logger.error('ERROR in processing mirror: %s Exception: %s' % (self.nodeName, str(e)))
self.logger.error('gpupgradmirror exiting')
traceback.print_exc()
sys.exit("The gpupgrademirror log information can be found in " + str(get_logfile()) + "\n")
#-------------------------------------------------------------------------------
def shutdown(self):
for seg in self.mirrorSegmentList:
cmdComplete = False
while cmdComplete == False:
rmCmd = RemoveFiles( name = 'gpupgrademirror remove temp dir: %s:%s' % (seg.primary_host, seg.primaryTempDir)
, directory = seg.primaryTempDir
, ctxt = REMOTE
, remoteHost = seg.primary_host
)
###print "rmCmd = "
###print str(rmCmd)
cmdComplete = runAndCheckCommandComplete(rmCmd)
rmCmd.validate()
#
#-------------------------------------------------------------------------------
#--------------------------------- Main ----------------------------------------
#-------------------------------------------------------------------------------
"""
This the the main body of code for gpupgrademirror. gpupgradmirror has two phases
(phase 1 and phase 2) and two modes (safe and unsafe).
"""
dburl = None
conn = None
remove_pid = True
p1MirrorInfo = None
phase2gparray = None
MirrorNodeList = []
MirrorInfoList = []
phase2MirrorList = []
minionList = []
coverage = GpCoverage()
coverage.start()
try:
# setup signal handlers so we can clean up correctly
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGHUP, sig_handler)
""" If we are a minion, we need to do a lookup of our seg id to properly setup the log file name. """
minionSegdbid = ""
argPrevious = ""
for arg in sys.argv:
if argPrevious == "-i":
segdbidList = arg.split(":")
minionSegdbid = segdbidList[0]
break
else:
argPrevious = arg
logger = get_default_logger()
applicationNameWithExt = EXECNAME + str(minionSegdbid)
setup_tool_logging( appName = applicationNameWithExt
, hostname = getLocalHostname()
, userName = getUserName()
)
options, args = parseargs()
if len(options.ids) > 0:
options.ids = options.ids.split(':')
if options.verbose:
enable_verbose_logging()
if is_gpupgrademirror_running(options.info_data_directory):
logger.error('gpupgrademirror is already running. Only one instance')
logger.error('of gpupgrademirror is allowed at a time.')
remove_pid = False
sys.exit("The gpupgrademirror log information can be found in " + str(get_logfile()) + "\n")
else:
create_pid_file(options.info_data_directory)
if options.phase2 == True:
phase = 2
else:
phase = 1
overallStatus = GpUpgradeMirrorStatus( logger = logger
, info_data_directory = options.info_data_directory
, segment_dbid = 0
, phase = phase
, overall = True
)
if options.continue_upgrade == True and \
len(options.ids) == 0 and \
overallStatus.get_current_status() == (None, None):
""" If there is no status, then we haven't gotten anywhere. Reset options.continue_upgrade to False"""
logger.warning("Although continue was specified, the upgrade are not far enough elong to continue. gpupgrademirror will attempt to restart this phase of upgrade")
options.continue_upgrade = False
"""
We will start the database in master only mode unless we are in phase 1 and
we are rolling back or continuing. If we are in phase 1 and are rolling back
or continuing, we will get the information we need from a flat file we
created on the first phase 1 attempt.
"""
dbUrl = dbconn.DbURL(dbname = 'template1')
phase1gpstartStatus = None
if options.rollback == True:
""" Never start the database in rollback mode. """
pass
elif options.phase2 == False and overallStatus.compare_status("END_PHASE1_SETUP") >= 0:
""" If we are past getting all the information we need, then don't start the database. """
pass
elif len(options.ids) == 0:
if options.phase2 == True:
GpStart.local('gpupgrademirror start database', masterOnly = True)
logging.debug('started database master only with GpStart')
else:
""" We are running in a 4.0 environment, but need to start like we are in a 3.3.x environment. """
env = SetupEnv(options.gphome)
Startup(env = env, utility = True)
logging.debug('started database master only with Startup')
phase1gpstartStatus = "STARTUP_MASTER"
if options.rollback == True and options.phase2 == False:
''' setup for rollback '''
if overallStatus.get_current_status() == (None, None):
logging.warning('There is no upgrade mirror in progress')
rmCmd = RemoveFiles( name = 'gpupgrademirror remove info directory: %s' % (options.info_data_directory)
, directory = options.info_data_directory
, ctxt = LOCAL
, remoteHost = None
)
rmCmd.run(validateAfter = False)
exit(0)
elif overallStatus.compare_status("END_PHASE1_SETUP") < 0:
"""
We have not gotten past the setup phase,
so remove the gpugprademirr info directory and exit.
"""
rmCmd = RemoveFiles( name = 'gpupgrademirror remove info directory: %s' % (options.info_data_directory)
, directory = options.info_data_directory
, ctxt = LOCAL
, remoteHost = None
)
rmCmd.run(validateAfter = False)
exit(0)
elif options.continue_upgrade == True:
pass
elif options.phase2 == False:
if (overallStatus.get_current_status() != (None, None)):
logging.error("Upgrade mirrors already started.")
logging.error("You must either rollback or continue mirror upgrade")
raise ValidationError("Unable to continue")
else:
""" We are in normal phase 1 mode """
overallStatus.create_status_file()
elif options.phase2 == True:
pass
if options.phase2 == True:
"""
Phase 2 of upgrade mirrors
There are a number of special files that we need to copy from the primary in phase 2.
Most of these files are catalog files.
"""
databaseList = []
if options.rollback == True:
raise ValidationError('Rollback is not possible in phase 2 mirror upgrade.')
conn = dbconn.connect( dburl = dbUrl
, utility = True
)
''' Get a list of databases. '''
databaseCursor = dbconn.execSQL(conn, DatabaseRow.query())
for row in databaseCursor:
dbrow = DatabaseRow(row)
databaseList.append(dbrow)
conn.close()
''' Setup gparray object. '''
try:
phase2gparray = GpArray.initFromCatalog(dbUrl, utility = True)
except Exception, e:
logger.warning('Unable to obtain gparray information: ' + str(e))
logger.warning('gpupgradmirror exiting')
exit(1)
''' Setup mirror info object. '''
try:
p2MirrorInfo = Phase1and2MirrorInfo(dbUrl)
except Exception, e:
logger.warning('Unable to obtain mirror information: ' + str(e))
logger.warning('gpupgradmirror exiting')
exit(1)
''' Start all the primary segments '''
StartupPrimaries(phase2gparray)
''' Get list of all segments '''
allSegMirrorInfo = p2MirrorInfo.getAllMirrorInfoList()
''' Go to each segment'''
for mirrorInfo in allSegMirrorInfo:
''' Connect to each segment's database, and make a list of files we will use '''
fileList = []
specialDirList = []
''' always copy the global directory '''
specialDirList.append('global')
''' always copy the pg_xlog directory '''
specialDirList.append('pg_xlog')
for db in databaseList:
if str(db.databaseName) == str("template0"):
''' Special case where we copy the entire database '''
specialDirList.append('base/' + str(db.databaseDirectory))
continue
connectURL = dbconn.DbURL(dbname = db.databaseName, hostname = mirrorInfo.primary_host, port = mirrorInfo.primary_host_port)
connectForDB = | |
<filename>persia/ctx.py<gh_stars>1-10
import os
import io
import socket
from enum import Enum
from queue import Queue
from typing import List, Tuple, Optional, Union
import torch
import persia.env as env
from persia.logger import get_default_logger
from persia.embedding.optim import Optimizer
from persia.embedding import EmbeddingConfig, get_default_embedding_config
from persia.embedding.data import PersiaBatch
from persia.prelude import (
PersiaCommonContext,
PersiaTrainingBatch,
Tensor,
)
from persia.distributed import DistributedBaseOption, get_default_distributed_option
_CURRENT_CXT = None
_logger = get_default_logger()
def _check_finite(tensors: List[torch.Tensor]) -> bool:
"""Check if all tensors in the input list contain only finite elements.
Arguments:
tensors (List[torch.Tensor]): List of tensor to be checked.
Returns:
bool: ``True`` if all elements in ``tensors`` are finite or None.
"""
return all([torch.isfinite(t).all() if t is not None else True for t in tensors])
def _cast_dlpack2torch_tensor(
tensor: Tensor, requires_grad: bool = False
) -> torch.Tensor:
"""Convert the DLPack PythonCapsule to torch tensor.
Arguments:
Tensor (Tensor): Tensor wrapper that contains dlpack information.
requires_grad (bool, optional): Whether current tensor requires grad or not.
Returns: pytorch tensor
"""
import torch.utils.dlpack as dlpack
tensor = dlpack.from_dlpack(tensor.dlpack)
tensor.requires_grad = requires_grad
return tensor
class PreprocessMode(Enum):
r"""Mode of preprocessing.
Used by ``EmbeddingCtx.prepare_features`` to generate features of different datatypes.
When set to ``TRAIN``, ``prepare_features`` will return a torch tensor with ``requires_grad`` attribute set to ``True``.
When set to ``EVAL``, ``prepare_features`` will return a torch tensor with ``requires_grad`` attribute set to ``False``.
``INFERENCE`` behaves almost identical to ``PreprocessMode.EVAL``, except that ``INFERENCE`` allows ""EmbeddingCtx``
to process the ``PersiaTrainingBatch`` without a target tensor.
"""
TRAIN = 1
EVAL = 2
INFERENCE = 3
class BaseCtx:
r"""Initializes a common context for other persia context, e.g. `DataCtx`, `EmbeddingCtx` and `TrainCtx`.
This class should not be instantiated directly.
"""
def __init__(
self, threadpool_worker_size: int = 10, device_id: Optional[int] = None
):
"""
Arguments:
threadpool_worker_size (int): Rpc threadpool worker size.
device_id (int, optional): The CUDA device to use for this process.
"""
self.origin_context = None
if device_id is not None and device_id >= 0:
assert torch.cuda.is_available() and (
0 <= device_id < torch.cuda.device_count()
), f"device_id: {device_id} invalid!"
torch.cuda.set_device(device_id)
else:
device_id = None
self.device_id = device_id
# PersiaCommonContext initialize with the rank and world size if
# it can retrive corresponding information
if env.get_rank() is not None:
replica_index = env.get_rank()
replica_size = env.get_world_size()
else:
replica_index = env.get_replica_index()
replica_size = env.get_replica_size()
self.common_context = PersiaCommonContext(
threadpool_worker_size, replica_index, replica_size, device_id
)
_logger.info(
f"init persia context, replica_size: {replica_size} replica_index: {replica_index}"
)
def _enter(self):
"""Hook when enter the context"""
...
def _exit(self):
"""Hook when exit the context"""
...
def __enter__(self):
self._enter()
global _CURRENT_CXT
self.origin_context = _CURRENT_CXT
_CURRENT_CXT = self
return self
def __exit__(self, exc_type, value, trace):
self._exit()
global _CURRENT_CXT
_CURRENT_CXT = self.origin_context
if exc_type:
import traceback
_logger.error("\n" + traceback.format_exc())
class DataCtx(BaseCtx):
r"""This data context provides communication functionality to data generator component.
Used for sending a PersiaBatch to the nn worker and embedding worker.
Example:
>>> import numpy as np
>>> ...
>>> from persia.embedding.data import PersiaBatch, IDTypeFeature
>>> ...
>>> def make_simple_loader():
>>> yield IDTypeFeature("empty_id_type_feature", [np.array([], np.uint64)])
>>> loader = make_simple_loader()
>>> def fn
>>> with DataCtx() as ctx:
>>> for id_type_feature in loader:
>>> batch_data = PersiaBatch([id_type_feature], requires_grad=False)
>>> ctx.send_data(batch_data)
"""
def __init__(
self,
*args,
**kwargs,
):
super(DataCtx, self).__init__(*args, **kwargs)
self.prepare()
_logger.info("Data ctx prepare done.")
def prepare(self):
"""Do some preparation to init `DataCtx`."""
self.common_context.init_nats_publisher(None)
self.common_context.wait_servers_ready()
def send_data(self, persia_batch: PersiaBatch):
"""Send PersiaBatch from data loader to nn worker and embedding worker side.
Arguments:
persia_batch (PersiaBatch): PersiaBatch that haven't been processed.
"""
self.common_context.send_id_type_features_to_embedding_worker(persia_batch.data)
self.common_context.send_non_id_type_features_to_nn_worker(persia_batch.data)
class EmbeddingCtx(BaseCtx):
r"""Provides the embedding-related functionality. EmbeddingCtx can run offline test or online inference
depending on different preprocess_mode. The simplest way to get this context is by using ``persia.ctx.eval_ctx()``
to get the ``EmbeddingCtx`` instance.
Example:
>>> import torch
>>> import numpy as np
>>> ...
>>> from persia.prelude import PersiaBatch
>>> ...
>>> model = torch.nn.Linear(128, 64)
>>> loader = make_dataloader()
>>> embedding_config = EmbeddingConfig()
>>> with EmbeddingCtx(
... model=model,
... PreprocessMode.EVAL,
... embedding_config
... ) as ctx:
>>> for (non_id_type_feature, id_type_features, label) in loader:
>>> persia_batch = PersiaBatch(id_type_features)
>>> persia_batch.add_non_id_type_feature(non_id_type_feature)
>>> persia_batch.add_label(label)
>>> persia_training_batch = ctx.get_embedding_from_data(persia_batch)
>>> (output, label) = ctx.forward(persia_training_batch)
"""
def __init__(
self,
preprocess_mode: PreprocessMode,
model: Optional[torch.nn.Module] = None,
embedding_config: Optional[EmbeddingConfig] = None,
*args,
**kwargs,
):
"""
Arguments:
preprocess_mode (PreprocessMode): Different preprocess mode effect the behavior of ``prepare_features``.
model (torch.nn.Module): Torch model matched with embeddings in this context.
embedding_config (EmbeddingConfig, optional): The embedding configuration that will be sent to the embedding server.
"""
super(EmbeddingCtx, self).__init__(*args, **kwargs)
self.preprocess_mode = preprocess_mode
self.model = model
self.embedding_config = embedding_config or get_default_embedding_config()
self.current_batch = None
def _enter(self):
if self.embedding_config is not None:
self.configure_embedding_parameter_servers(self.embedding_config)
def configure_embedding_parameter_servers(
self,
embedding_config: EmbeddingConfig,
):
"""Apply Embedding config to embedding servers.
Arguments:
embedding_config (EmbeddingConfig): The embedding configuration that will be sent to the embedding server.
"""
self.common_context.configure_embedding_parameter_servers(
embedding_config.emb_initialization[0],
embedding_config.emb_initialization[1],
embedding_config.admit_probability,
embedding_config.weight_bound > 0,
embedding_config.weight_bound,
)
def forward(
self, batch: PersiaTrainingBatch
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Call `prepare_features` and then do a forward step of the model in context.
Arguments:
batch (PersiaTrainingBatch): Training data provided by PersiaML upstream including
non_id_type_features ,labels, id_type_feature_embeddings and meta info.
Returns:
the tuple of output data and target data.
"""
assert self.model is not None, "model not found, please init context with model"
non_id_type_tensors, embedding_tensors, labels = self.prepare_features(batch)
output = self.model(non_id_type_tensors, embedding_tensors)
return (output, labels)
def prepare_features(
self, batch: PersiaTrainingBatch
) -> Tuple[List[torch.Tensor], List[torch.Tensor], Optional[List[torch.Tensor]]]:
r"""This function convert the data in `PersiaTrainingBatch' to `torch.Tensor`.
`PersiaTrainingBatch' contains non_id_type_features, id_type_feature_embeddings and labels.But they can't use directly in
training before convert the `persia.Tensor` to `torch.Tensor`.
Arguments:
batch (PersiaTrainingBatch): Training data provided by PersiaML upstream including
non_id_type_features, labels, id_type_feature_embeddings and meta info.
Returns:
the tuple of non_id_type_features, id_type_feature_embeddings and labels.
"""
if self.preprocess_mode == PreprocessMode.INFERENCE:
batch.label_torch_tensors = None
else:
# pytype: disable=attribute-error
batch.label_tensors = batch.consume_all_label_tensors()
# pytype: enable=attribute-error
batch.label_torch_tensors = [
_cast_dlpack2torch_tensor(label_tensor)
for label_tensor in batch.label_tensors
]
is_training = self.preprocess_mode == PreprocessMode.TRAIN # cache property
# pytype: disable=attribute-error
batch.non_id_type_feature_tensors = (
batch.consume_all_non_id_type_feature_tensors()
)
# pytype: enable=attribute-error
batch.non_id_type_feature_torch_tensors = [
_cast_dlpack2torch_tensor(non_id_type_feature_tensor)
for non_id_type_feature_tensor in batch.non_id_type_feature_tensors
]
# pytype: disable=attribute-error
batch.id_type_feature_embedding_tensors = (
batch.consume_all_id_type_feature_embedding_tensors()
)
# pytype: enable=attribute-error
batch.emb_slots = [] # cache embedding to prevent tensor expired
id_type_feature_embedding_cache_torch_tensors = (
[]
) # cache origin embedding for later backward procedure
id_type_feature_embedding_torch_tensors = (
[]
) # id type tensos for later forward procedure
for id_type_feature_embedding_tensor in batch.id_type_feature_embedding_tensors:
if id_type_feature_embedding_tensor.is_raw_embedding():
# no duplicate id in raw_id_tensor
(
raw_embedding,
index,
non_empty_index,
sample_id_num,
) = id_type_feature_embedding_tensor.get_raw_embedding()
batch.emb_slots.append([raw_embedding, index, non_empty_index])
distinct_id_tensor = _cast_dlpack2torch_tensor(raw_embedding)
index_tensor = _cast_dlpack2torch_tensor(
index
) # tensor shape (1, batch_size * sample_fixed_size)
max_index = index_tensor.max()
size_of_distinct_id_tensor = distinct_id_tensor.shape[0]
assert (
max_index < size_of_distinct_id_tensor
), "raw embedding select index larger than tensor"
non_empty_index_tensor = _cast_dlpack2torch_tensor(
non_empty_index
) # tensor shape (-1), variable length
batch_size = len(sample_id_num)
dim = distinct_id_tensor.shape[-1]
sample_fixed_size = index_tensor.shape[-1] // batch_size
index_select_raw_tensor = distinct_id_tensor.index_select(
0, index_tensor.view(-1)
)
index_select_raw_tensor.requires_grad = is_training
raw_fixed_size_tensor = index_select_raw_tensor.view(
-1, sample_fixed_size, dim
)
mask = (
index_tensor.view(batch_size, sample_fixed_size, 1) != 0
).half() # generate mask
raw_fixed_size_tensor_with_mask = torch.cat(
[raw_fixed_size_tensor, mask], dim=2
)
id_type_feature_embedding_cache_torch_tensors.append(
(
raw_embedding.name,
distinct_id_tensor,
index_tensor,
non_empty_index_tensor,
index_select_raw_tensor,
)
)
id_type_feature_embedding_torch_tensors.append(
raw_fixed_size_tensor_with_mask
)
else:
embedding = id_type_feature_embedding_tensor.get_sum_embedding()
batch.emb_slots.append([embedding])
attention_sum_tensor = _cast_dlpack2torch_tensor(
embedding, requires_grad=is_training
)
id_type_feature_embedding_torch_tensors.append(attention_sum_tensor)
id_type_feature_embedding_cache_torch_tensors.append(
(embedding.name, None, None, None, attention_sum_tensor)
)
batch.id_type_feature_embedding_torch_tensors = (
id_type_feature_embedding_torch_tensors
)
batch.id_type_feature_embedding_cache_torch_tensors = (
id_type_feature_embedding_cache_torch_tensors
)
self.current_batch = batch
return (
batch.non_id_type_feature_torch_tensors,
batch.id_type_feature_embedding_torch_tensors,
batch.label_torch_tensors,
)
def dump_checkpoint(
self,
dst_dir: str,
dense_filename: str = "dense.pt",
jit_dense_filename: str = "jit_dense.pt",
blocking: bool = True,
with_jit_model: bool = False,
):
"""Dump the dense and embedding checkpoint to destination directory.
Arguments:
dst_dir (str): Destination directory.
dense_filename (str, optional): Dense checkpoint filename.
jit_dense_filename (str, optional): Jit dense checkpoint filename.
blocking (bool, optional): Dump embedding checkpoint in blocking mode or not.
with_jit_model (bool, optional): Dump jit script dense checkpoint or not.
"""
assert self.model is not None, "model not found, please init context with model"
if with_jit_model:
self.dump_torch_state_dict(self.model, dst_dir, jit_dense_filename, True)
self.dump_torch_state_dict(self.model, dst_dir, dense_filename)
self.dump_embedding(dst_dir, blocking=blocking)
def load_checkpoint(
self,
src_dir: str,
map_location: Optional[str] = None,
dense_filename: str = "dense.pt",
blocking: bool = True,
):
"""Load the dense and embedding checkpoint from source directory.
Arguments:
src_dir (str): Source directory.
map_location (str, optional): Load the dense | |
is available immediately, then this function blocks until
a message is ready.
If the remote endpoint closes the connection, then the caller can still
get messages sent prior to closing. Once all pending messages have been
retrieved, additional calls to this method will raise
``ConnectionClosed``. If the local endpoint closes the connection, then
pending messages are discarded and calls to this method will immediately
raise ``ConnectionClosed``.
:rtype: str or bytes
:raises ConnectionClosed: if the connection is closed.
'''
try:
message = await self._recv_channel.receive()
except (trio.ClosedResourceError, trio.EndOfChannel):
raise ConnectionClosed(self._close_reason) from None
return message
async def ping(self, payload=None):
'''
Send WebSocket ping to remote endpoint and wait for a correspoding pong.
Each in-flight ping must include a unique payload. This function sends
the ping and then waits for a corresponding pong from the remote
endpoint.
*Note: If the remote endpoint recieves multiple pings, it is allowed to
send a single pong. Therefore, the order of calls to ``ping()`` is
tracked, and a pong will wake up its corresponding ping as well as all
previous in-flight pings.*
:param payload: The payload to send. If ``None`` then a random 32-bit
payload is created.
:type payload: bytes or None
:raises ConnectionClosed: if connection is closed.
:raises ValueError: if ``payload`` is identical to another in-flight
ping.
'''
if self._close_reason:
raise ConnectionClosed(self._close_reason)
if payload in self._pings:
raise ValueError('Payload value {} is already in flight.'.
format(payload))
if payload is None:
payload = struct.pack('!I', random.getrandbits(32))
event = trio.Event()
self._pings[payload] = event
self._wsproto.ping(payload)
await self._write_pending()
await event.wait()
async def pong(self, payload=None):
'''
Send an unsolicted pong.
:param payload: The pong's payload. If ``None``, then no payload is
sent.
:type payload: bytes or None
:raises ConnectionClosed: if connection is closed
'''
if self._close_reason:
raise ConnectionClosed(self._close_reason)
self._wsproto.ping(payload)
await self._write_pending()
async def send_message(self, message):
'''
Send a WebSocket message.
:param message: The message to send.
:type message: str or bytes
:raises ConnectionClosed: if connection is already closed.
'''
if self._close_reason:
raise ConnectionClosed(self._close_reason)
self._wsproto.send_data(message)
await self._write_pending()
def __str__(self):
''' Connection ID and type. '''
type_ = 'client' if self.is_client else 'server'
return '{}-{}'.format(type_, self._id)
async def _abort_web_socket(self):
'''
If a stream is closed outside of this class, e.g. due to network
conditions or because some other code closed our stream object, then we
cannot perform the close handshake. We just need to clean up internal
state.
'''
close_reason = wsframeproto.CloseReason.ABNORMAL_CLOSURE
if not self._wsproto.closed:
self._wsproto.close(close_reason)
if self._close_reason is None:
await self._close_web_socket(close_reason)
self._reader_running = False
# We didn't really handshake, but we want any task waiting on this event
# (e.g. self.aclose()) to resume.
self._close_handshake.set()
async def _accept(self, proposal):
'''
Accept a given proposal.
This finishes the server-side handshake with the given proposal
attributes and return the connection instance.
:rtype: WebSocketConnection
'''
self._subprotocol = proposal.subprotocol
self._path = proposal.url.path
self._wsproto.accept(proposal._event, self._subprotocol)
await self._write_pending()
self._open_handshake.set()
return self
async def _close_stream(self):
''' Close the TCP connection. '''
self._reader_running = False
try:
await self._stream.aclose()
except trio.BrokenResourceError:
# This means the TCP connection is already dead.
pass
async def _close_web_socket(self, code, reason=None):
'''
Mark the WebSocket as closed. Close the message channel so that if any
tasks are suspended in get_message(), they will wake up with a
ConnectionClosed exception.
'''
self._close_reason = CloseReason(code, reason)
exc = ConnectionClosed(self._close_reason)
logger.debug('%s websocket closed %r', self, exc)
await self._send_channel.aclose()
async def _get_request(self):
'''
Return a proposal for a WebSocket handshake.
This method can only be called on server connections and it may only be
called one time.
:rtype: WebSocketRequest
'''
if not self.is_server:
raise Exception('This method is only valid for server connections.')
if self._connection_proposal is None:
raise Exception('No proposal available. Did you call this method'
' multiple times or at the wrong time?')
proposal = await self._connection_proposal.wait_value()
self._connection_proposal = None
return proposal
async def _handle_connection_requested_event(self, event):
'''
Handle a ConnectionRequested event.
This method is async even though it never awaits, because the event
dispatch requires an async function.
:param event:
'''
proposal = WebSocketRequest(self._accept, event)
self._connection_proposal.set_value(proposal)
async def _handle_connection_established_event(self, event):
'''
Handle a ConnectionEstablished event.
:param event:
'''
self._subprotocol = event.subprotocol
self._open_handshake.set()
async def _handle_connection_closed_event(self, event):
'''
Handle a ConnectionClosed event.
:param event:
'''
await self._write_pending()
await self._close_web_socket(event.code, event.reason or None)
self._close_handshake.set()
async def _handle_connection_failed_event(self, event):
'''
Handle a ConnectionFailed event.
:param event:
'''
await self._write_pending()
await self._close_web_socket(event.code, event.reason or None)
await self._close_stream()
self._open_handshake.set()
self._close_handshake.set()
async def _handle_data_received_event(self, event):
'''
Handle a BytesReceived or TextReceived event.
:param event:
'''
self._message_size += len(event.data)
self._message_parts.append(event.data)
if self._message_size > self._max_message_size:
err = 'Exceeded maximum message size: {} bytes'.format(
self._max_message_size)
self._message_size = 0
self._message_parts = []
self._close_reason = CloseReason(1009, err)
self._wsproto.close(code=1009, reason=err)
await self._write_pending()
await self._recv_channel.aclose()
self._reader_running = False
elif event.message_finished:
msg = (b'' if isinstance(event, BytesReceived) else '') \
.join(self._message_parts)
self._message_size = 0
self._message_parts = []
try:
await self._send_channel.send(msg)
except trio.BrokenResourceError:
# The receive channel is closed, probably because somebody
# called ``aclose()``. We don't want to abort the reader task,
# and there's no useful cleanup that we can do here.
pass
async def _handle_ping_received_event(self, event):
'''
Handle a PingReceived event.
Wsproto queues a pong frame automatically, so this handler just needs to
send it.
:param event:
'''
logger.debug('%s ping %r', self, event.payload)
await self._write_pending()
async def _handle_pong_received_event(self, event):
'''
Handle a PongReceived event.
When a pong is received, check if we have any ping requests waiting for
this pong response. If the remote endpoint skipped any earlier pings,
then we wake up those skipped pings, too.
This function is async even though it never awaits, because the other
event handlers are async, too, and event dispatch would be more
complicated if some handlers were sync.
:param event:
'''
payload = bytes(event.payload)
try:
event = self._pings[payload]
except KeyError:
# We received a pong that doesn't match any in-flight pongs. Nothing
# we can do with it, so ignore it.
return
while self._pings:
key, event = self._pings.popitem(0)
skipped = ' [skipped] ' if payload != key else ' '
logger.debug('%s pong%s%r', self, skipped, key)
event.set()
if payload == key:
break
async def _reader_task(self):
''' A background task that reads network data and generates events. '''
handlers = {
'ConnectionRequested': self._handle_connection_requested_event,
'ConnectionFailed': self._handle_connection_failed_event,
'ConnectionEstablished': self._handle_connection_established_event,
'ConnectionClosed': self._handle_connection_closed_event,
'BytesReceived': self._handle_data_received_event,
'TextReceived': self._handle_data_received_event,
'PingReceived': self._handle_ping_received_event,
'PongReceived': self._handle_pong_received_event,
}
if self.is_client:
# Clients need to initiate the negotiation:
await self._write_pending()
while self._reader_running:
# Process events.
for event in self._wsproto.events():
event_type = type(event).__name__
try:
handler = handlers[event_type]
logger.debug('%s received event: %s', self,
event_type)
await handler(event)
except KeyError:
logger.warning('%s received unknown event type: "%s"', self,
event_type)
# Get network data.
try:
data = await self._stream.receive_some(RECEIVE_BYTES)
except (trio.BrokenResourceError, trio.ClosedResourceError):
await self._abort_web_socket()
break
if len(data) == 0:
logger.debug('%s received zero bytes (connection closed)',
self)
# If TCP closed before WebSocket, then record it as an abnormal
# closure.
if not self._wsproto.closed:
await self._abort_web_socket()
break
else:
logger.debug('%s received %d bytes', self, len(data))
if not self._wsproto.closed:
self._wsproto.receive_bytes(data)
logger.debug('%s reader task finished', self)
async def _write_pending(self):
''' Write any pending protocol data to the network socket. '''
data = self._wsproto.bytes_to_send()
if len(data) > 0:
# The reader task and one or more writers might try to send messages
# at the same time, so we need to synchronize access to this stream.
async with self._stream_lock:
logger.debug('%s sending %d bytes', self, len(data))
try:
await self._stream.send_all(data)
except (trio.BrokenResourceError, trio.ClosedResourceError):
await self._abort_web_socket()
raise ConnectionClosed(self._close_reason) from None
else:
logger.debug('%s no pending data to send', self)
class ListenPort:
''' Represents a listener on a given address and port. '''
def __init__(self, address, port, is_ssl):
self.address = ip_address(address)
self.port = port
self.is_ssl = is_ssl
def __str__(self):
''' Return a compact representation, like 127.0.0.1:80 or [::1]:80. '''
scheme = 'wss' if self.is_ssl else 'ws'
if self.address.version == 4:
return '{}://{}:{}'.format(scheme, self.address, self.port)
else:
return '{}://[{}]:{}'.format(scheme, self.address, self.port)
class WebSocketServer:
'''
WebSocket server.
The server class handles incoming connections on one or more ``Listener``
objects. For each incoming connection, it creates a ``WebSocketConnection``
instance and starts some background tasks,
| |
'fast_component_update',
callbacks=[config.INSTALLATION_CONFIG.IsAlternateReleaseChannel])
class _SectionTest(_Section):
"""Contains the properties for the 'test' section."""
def __init__(self):
super(_SectionTest, self).__init__('test')
class _SectionDevshell(_Section):
"""Contains the properties for the 'devshell' section."""
def __init__(self):
super(_SectionDevshell, self).__init__('devshell')
self.image = self._Add(
'image', hidden=True,
callbacks=[lambda: const_lib.DEFAULT_DEVSHELL_IMAGE])
self.metadata_image = self._Add(
'metadata_image', hidden=True,
callbacks=[lambda: const_lib.METADATA_IMAGE])
class _SectionEndpoints(_Section):
"""Contains the properties for the 'endpoints' section."""
def _AddEndpointFor(self, api_name):
setattr(self, api_name, self._Add(api_name, hidden=True))
def __init__(self):
super(_SectionEndpoints, self).__init__('endpoints')
self._AddEndpointFor('compute')
self._AddEndpointFor('container')
self._AddEndpointFor('dataflow')
self._AddEndpointFor('dns')
self._AddEndpointFor('testing')
self._AddEndpointFor('toolresults')
class _Property(object):
"""An individual property that can be gotten from the properties file.
Attributes:
section: str, The name of the section the property appears in in the file.
name: str, The name of the property.
hidden: bool, True to hide this property from display.
callbacks: [func], A list of functions to be called, in order, if no value
is found elsewhere.
validator: func(str), A function that is called on the value when .Set()'d
or .Get()'d. For valid values, the function should do nothing. For
invalid values, it should raise InvalidValueError with an
explanation of why it was invalid.
"""
def __init__(self, section, name, hidden=False,
callbacks=None, validator=None):
self.__section = section
self.__name = name
self.__hidden = hidden
self.__callbacks = callbacks or []
self.__validator = validator
@property
def section(self):
return self.__section
@property
def name(self):
return self.__name
@property
def is_hidden(self):
return self.__hidden
@property
def callbacks(self):
return self.__callbacks
def Get(self, required=False, validate=True):
"""Gets the value for this property.
Looks first in the environment, then in the workspace config, then in the
global config, and finally at callbacks.
Args:
required: bool, True to raise an exception if the property is not set.
validate: bool, Whether or not to run the fetched value through the
validation function.
Returns:
str, The value for this property.
"""
value = _GetProperty(self, _PropertiesFile.Load(), required)
if validate:
self.Validate(value)
return value
def Validate(self, value):
"""Test to see if the value is valid for this property.
Args:
value: str, The value of the property to be validated.
Raises:
InvalidValueError: If the value was invalid according to the property's
validator.
"""
if self.__validator:
self.__validator(value)
def GetBool(self, required=False, validate=True):
"""Gets the boolean value for this property.
Looks first in the environment, then in the workspace config, then in the
global config, and finally at callbacks.
Args:
required: bool, True to raise an exception if the property is not set.
validate: bool, Whether or not to run the fetched value through the
validation function.
Returns:
bool, The boolean value for this property, or None if it is not set.
"""
value = _GetBoolProperty(self, _PropertiesFile.Load(), required)
if validate:
self.Validate(value)
return value
def GetInt(self, required=False, validate=True):
"""Gets the integer value for this property.
Looks first in the environment, then in the workspace config, then in the
global config, and finally at callbacks.
Args:
required: bool, True to raise an exception if the property is not set.
validate: bool, Whether or not to run the fetched value through the
validation function.
Returns:
int, The integer value for this property.
"""
value = _GetIntProperty(self, _PropertiesFile.Load(), required)
if validate:
self.Validate(value)
return value
def Set(self, value):
"""Sets the value for this property as an environment variable.
Args:
value: str/bool, The proposed value for this property. If None, it is
removed from the environment.
"""
self.Validate(value)
if value is not None:
os.environ[self.EnvironmentName()] = str(value)
elif self.EnvironmentName() in os.environ:
del os.environ[self.EnvironmentName()]
def EnvironmentName(self):
"""Get the name of the environment variable for this property.
Returns:
str, The name of the correct environment variable.
"""
return 'CLOUDSDK_{section}_{name}'.format(
section=self.__section.upper(),
name=self.__name.upper(),
)
VALUES = _Sections()
class Scope(object):
"""An enum class for the different types of property files that can be used.
"""
_SCOPE_TUPLE = collections.namedtuple('_ScopeTuple',
['id', 'description', 'get_file'])
INSTALLATION = _SCOPE_TUPLE(
id='installation',
description='The installation based configuration file applies to all '
'users on the system that use this version of the Cloud SDK. If the SDK '
'was installed by an administrator, you will need administrator rights '
'to make changes to this file.',
get_file=lambda: config.Paths().installation_properties_path)
USER = _SCOPE_TUPLE(
id='user',
description='The user based configuration file applies only to the '
'current user of the system. It will override any values from the '
'installation configuration.',
get_file=lambda: config.Paths().user_properties_path)
WORKSPACE = _SCOPE_TUPLE(
id='workspace',
description='The workspace based configuration file is based on your '
'current working directory. You can set project specific configuration '
'here that will only take effect when working within that project\'s '
'directory. You cannot set this value if you are not currently within a '
'gcloud workspace. This will override all values from any other '
'configuration files.',
get_file=lambda: config.Paths().workspace_properties_path)
_ALL = [WORKSPACE, USER, INSTALLATION]
_ALL_SCOPE_NAMES = [s.id for s in _ALL]
@staticmethod
def AllValues():
"""Gets all possible enum values.
Returns:
[Scope], All the enum values.
"""
return list(Scope._ALL)
@staticmethod
def AllScopeNames():
return list(Scope._ALL_SCOPE_NAMES)
@staticmethod
def FromId(scope_id):
"""Gets the enum corresponding to the given scope id.
Args:
scope_id: str, The scope id to parse.
Raises:
InvalidScopeValueError: If the given value cannot be parsed.
Returns:
OperatingSystemTuple, One of the OperatingSystem constants or None if the
input is None.
"""
if not scope_id:
return None
for scope in Scope._ALL:
if scope.id == scope_id:
return scope
raise InvalidScopeValueError(scope_id)
@staticmethod
def GetHelpString():
return '\n\n'.join(['*{0}*::: {1}'.format(s.id, s.description)
for s in Scope.AllValues()])
def PersistProperty(prop, value, scope=None, properties_file=None):
"""Sets the given property in the properties file.
This function should not generally be used as part of normal program
execution. The property files are user editable config files that they should
control. This is mostly for initial setup of properties that get set during
SDK installation.
Args:
prop: properties.Property, The property to set.
value: str, The value to set for the property. If None, the property is
removed.
scope: Scope, The config location to set the property in. If given, only
this location will be udpated and it is an error if that location does
not exist. If not given, it will attempt to update the property in first
the workspace config (if it exists) but then fall back to user level
config. It will never fall back to installation properties; you must
use that scope explicitly to set that value.
properties_file: str, Path to an explicit properties file to use (instead of
one of the known locations). It is an error to specify a scope and an
explicit file.
Raises:
ValueError: If you give both a scope and a properties file.
MissingConfigLocationError: If there is not file for the given scope.
"""
prop.Validate(value)
if scope and properties_file:
raise ValueError('You cannot provide both a scope and a specific properties'
' file.')
if not properties_file:
if scope:
if scope == Scope.INSTALLATION:
config.EnsureSDKWriteAccess()
properties_file = scope.get_file()
if not properties_file:
raise MissingConfigLocationError(scope)
else:
properties_file = Scope.WORKSPACE.get_file()
if not properties_file:
properties_file = Scope.USER.get_file()
if not properties_file:
raise MissingConfigLocationError(Scope.USER)
parsed_config = ConfigParser.ConfigParser()
parsed_config.read(properties_file)
if not parsed_config.has_section(prop.section):
if value is None:
return
parsed_config.add_section(prop.section)
if value is None:
parsed_config.remove_option(prop.section, prop.name)
else:
parsed_config.set(prop.section, prop.name, str(value))
properties_dir, unused_name = os.path.split(properties_file)
files.MakeDir(properties_dir)
with open(properties_file, 'w') as fp:
parsed_config.write(fp)
_PropertiesFile.Invalidate()
def _GetProperty(prop, properties_file, required):
"""Gets the given property.
If the property has a designated command line argument and args is provided,
check args for the value first. If the corresponding environment variable is
set, use that second. If still nothing, use the callbacks.
Args:
prop: properties.Property, The property to get.
properties_file: _PropertiesFile, An already loaded properties files to use.
required: bool, True to raise an exception if the property is not set.
Raises:
RequiredPropertyError: If the property was required but unset.
Returns:
str, The value of the property, or None if it is not set.
"""
flag_to_use = None
invocation_stack = VALUES.GetInvocationStack()
if len(invocation_stack) > 1:
# First item is the blank stack entry, second is from the user command args.
first_invocation = invocation_stack[1]
if prop in first_invocation:
flag_to_use = first_invocation.get(prop).flag
value = _GetPropertyWithoutCallback(prop, properties_file)
if value is not None:
return str(value)
# Still nothing, fall back to the callbacks.
for callback in prop.callbacks:
value = callback()
if value is not None:
return str(value)
# Not | |
<filename>kubelet/datadog_checks/kubelet/kubelet.py
# (C) Datadog, Inc. 2016-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
import logging
import re
from urlparse import urljoin
# 3p
import requests
# project
from datadog_checks.checks import AgentCheck
from datadog_checks.errors import CheckException
from datadog_checks.checks.prometheus import PrometheusCheck
from kubeutil import get_connection_info
from tagger import get_tags
METRIC_TYPES = ['counter', 'gauge', 'summary']
# container-specific metrics should have all these labels
CONTAINER_LABELS = ['container_name', 'namespace', 'pod_name', 'name', 'image', 'id']
KUBELET_HEALTH_PATH = '/healthz'
NODE_SPEC_PATH = '/spec'
POD_LIST_PATH = '/pods/'
CADVISOR_METRICS_PATH = '/metrics/cadvisor'
# Suffixes per
# https://github.com/kubernetes/kubernetes/blob/8fd414537b5143ab039cb910590237cabf4af783/pkg/api/resource/suffix.go#L108
FACTORS = {
'n': float(1)/(1000*1000*1000),
'u': float(1)/(1000*1000),
'm': float(1)/1000,
'k': 1000,
'M': 1000*1000,
'G': 1000*1000*1000,
'T': 1000*1000*1000*1000,
'P': 1000*1000*1000*1000*1000,
'E': 1000*1000*1000*1000*1000*1000,
'Ki': 1024,
'Mi': 1024*1024,
'Gi': 1024*1024*1024,
'Ti': 1024*1024*1024*1024,
'Pi': 1024*1024*1024*1024*1024,
'Ei': 1024*1024*1024*1024*1024*1024,
}
log = logging.getLogger('collector')
class KubeletCheck(PrometheusCheck):
"""
Collect container metrics from Kubelet.
"""
def __init__(self, name, init_config, agentConfig, instances=None):
super(KubeletCheck, self).__init__(name, init_config, agentConfig, instances)
self.NAMESPACE = 'kubernetes'
if instances is not None and len(instances) > 1:
raise Exception('Kubelet check only supports one configured instance.')
inst = instances[0] if instances else None
self.kube_node_labels = inst.get('node_labels_to_host_tags', {})
self.metrics_mapper = {
'kubelet_runtime_operations_errors': 'kubelet.runtime.errors',
}
self.ignore_metrics = [
'container_cpu_cfs_periods_total',
'container_cpu_cfs_throttled_periods_total',
'container_cpu_cfs_throttled_seconds_total',
'container_cpu_load_average_10s',
'container_cpu_system_seconds_total',
'container_cpu_user_seconds_total',
'container_fs_inodes_free',
'container_fs_inodes_total',
'container_fs_io_current',
'container_fs_io_time_seconds_total',
'container_fs_io_time_weighted_seconds_total',
'container_fs_read_seconds_total',
'container_fs_reads_merged_total',
'container_fs_reads_total',
'container_fs_sector_reads_total',
'container_fs_sector_writes_total',
'container_fs_write_seconds_total',
'container_fs_writes_merged_total',
'container_fs_writes_total',
'container_last_seen',
'container_start_time_seconds',
'container_spec_memory_swap_limit_bytes',
'container_scrape_error'
]
# these are filled by container_<metric-name>_usage_<metric-unit>
# and container_<metric-name>_limit_<metric-unit> reads it to compute <metric-name>usage_pct
self.fs_usage_bytes = {}
self.mem_usage_bytes = {}
def check(self, instance):
self.kubelet_conn_info = get_connection_info()
endpoint = self.kubelet_conn_info.get('url')
if endpoint is None:
raise CheckException("Unable to find metrics_endpoint in config "
"file or detect the kubelet URL automatically.")
self.metrics_url = instance.get('metrics_endpoint') or urljoin(endpoint, CADVISOR_METRICS_PATH)
self.kube_health_url = urljoin(endpoint, KUBELET_HEALTH_PATH)
self.node_spec_url = urljoin(endpoint, NODE_SPEC_PATH)
self.pod_list_url = urljoin(endpoint, POD_LIST_PATH)
# By default we send the buckets.
send_buckets = instance.get('send_histograms_buckets', True)
if send_buckets is not None and str(send_buckets).lower() == 'false':
send_buckets = False
else:
send_buckets = True
try:
self.pod_list = self.retrieve_pod_list()
except Exception:
self.pod_list = None
instance_tags = instance.get('tags', [])
self._perform_kubelet_check(instance_tags)
self._report_node_metrics(instance_tags)
self._report_pods_running(self.pod_list, instance_tags)
self._report_container_spec_metrics(self.pod_list, instance_tags)
self.process(self.metrics_url, send_histograms_buckets=send_buckets, instance=instance)
def perform_kubelet_query(self, url, verbose=True, timeout=10):
"""
Perform and return a GET request against kubelet. Support auth and TLS validation.
"""
headers = None
cert = (self.kubelet_conn_info.get('client_crt'), self.kubelet_conn_info.get('client_key'))
if not cert[0] or not cert[1]:
cert = None
else:
self.ssl_cert = cert # prometheus check setting
if self.kubelet_conn_info.get('verify_tls') == 'false':
verify = False
else:
verify = self.kubelet_conn_info.get('ca_cert')
self.ssl_ca_cert = verify # prometheus check setting
# if cert-based auth is enabled, don't use the token.
if not cert and url.lower().startswith('https') and 'token' in self.kubelet_conn_info:
headers = {'Authorization': 'Bearer {}'.format(self.kubelet_conn_info['token'])}
self.extra_headers = headers # prometheus check setting
return requests.get(url, timeout=timeout, verify=verify,
cert=cert, headers=headers, params={'verbose': verbose})
def retrieve_pod_list(self):
return self.perform_kubelet_query(self.pod_list_url).json()
def retrieve_node_spec(self):
"""
Retrieve node spec from kubelet.
"""
node_spec = self.perform_kubelet_query(self.node_spec_url).json()
# TODO: report allocatable for cpu, mem, and pod capacity
# if we can get it locally or thru the DCA instead of the /nodes endpoint directly
return node_spec
def _report_node_metrics(self, instance_tags):
node_spec = self.retrieve_node_spec()
num_cores = node_spec.get('num_cores', 0)
memory_capacity = node_spec.get('memory_capacity', 0)
tags = instance_tags
self.gauge(self.NAMESPACE + '.cpu.capacity', float(num_cores), tags)
self.gauge(self.NAMESPACE + '.memory.capacity', float(memory_capacity), tags)
def _perform_kubelet_check(self, instance_tags):
"""Runs local service checks"""
service_check_base = self.NAMESPACE + '.kubelet.check'
is_ok = True
url = self.kube_health_url
try:
req = self.perform_kubelet_query(url)
for line in req.iter_lines():
# avoid noise; this check is expected to fail since we override the container hostname
if line.find('hostname') != -1:
continue
matches = re.match(r'\[(.)\]([^\s]+) (.*)?', line)
if not matches or len(matches.groups()) < 2:
continue
service_check_name = service_check_base + '.' + matches.group(2)
status = matches.group(1)
if status == '+':
self.service_check(service_check_name, AgentCheck.OK, tags=instance_tags)
else:
self.service_check(service_check_name, AgentCheck.CRITICAL, tags=instance_tags)
is_ok = False
except Exception as e:
self.log.warning('kubelet check %s failed: %s' % (url, str(e)))
self.service_check(service_check_base, AgentCheck.CRITICAL,
message='Kubelet check %s failed: %s' % (url, str(e)), tags=instance_tags)
else:
if is_ok:
self.service_check(service_check_base, AgentCheck.OK, tags=instance_tags)
else:
self.service_check(service_check_base, AgentCheck.CRITICAL, tags=instance_tags)
def _report_pods_running(self, pods, instance_tags):
"""
Reports the number of running pods on this node
tagged by service and creator.
"""
tag_counter = {}
for pod in pods['items']:
pod_id = pod.get('metadata', {}).get('uid')
tags = get_tags('kubernetes_pod://%s' % pod_id, False) or None
if not tags:
continue
hash_tags = tuple(sorted(tags))
if hash_tags in tag_counter.keys():
tag_counter[hash_tags] += 1
else:
tag_counter[hash_tags] = 1
for tags, count in tag_counter.iteritems():
self.gauge(self.NAMESPACE + '.pods.running', count, list(tags))
def _report_container_spec_metrics(self, pod_list, instance_tags):
"""Reports pod requests & limits by looking at pod specs."""
for pod in pod_list['items']:
pod_name = pod.get('metadata', {}).get('name')
if not pod_name:
continue
for ctr in pod['spec']['containers']:
if not ctr.get('resources'):
continue
c_name = ctr.get('name', '')
cid = None
for ctr_status in pod['status'].get('containerStatuses', []):
if ctr_status.get('name') == c_name:
# it is already prefixed with 'docker://'
cid = ctr_status.get('containerID')
break
if not cid:
continue
tags = get_tags('%s' % cid, True)
try:
for resource, value_str in ctr.get('resources', {}).get('requests', {}).iteritems():
value = self.parse_quantity(value_str)
self.gauge('{}.{}.requests'.format(self.NAMESPACE, resource), value, tags)
except (KeyError, AttributeError) as e:
self.log.debug("Unable to retrieve container requests for %s: %s", c_name, e)
try:
for resource, value_str in ctr.get('resources', {}).get('limits', {}).iteritems():
value = self.parse_quantity(value_str)
self.gauge('{}.{}.limits'.format(self.NAMESPACE, resource), value, tags)
except (KeyError, AttributeError) as e:
self.log.debug("Unable to retrieve container limits for %s: %s", c_name, e)
@staticmethod
def parse_quantity(s):
number = ''
unit = ''
for c in s:
if c.isdigit() or c == '.':
number += c
else:
unit += c
return float(number) * FACTORS.get(unit, 1)
def _is_container_metric(self, metric):
"""
Return whether a metric is about a container or not.
It can be about pods, or even higher levels in the cgroup hierarchy
and we don't want to report on that.
"""
for lbl in CONTAINER_LABELS:
if lbl == 'container_name':
for ml in metric.label:
if ml.name == lbl:
if ml.value == '' or ml.value == 'POD':
return False
if lbl not in [ml.name for ml in metric.label]:
return False
return True
def _is_pod_metric(self, metric):
"""
Return whether a metric is about a pod or not.
It can be about containers, pods, or higher levels in the cgroup hierarchy
and we don't want to report on that.
"""
for ml in metric.label:
if ml.name == 'container_name' and ml.value == 'POD':
return True
# container_cpu_usage_seconds_total has an id label that is a cgroup path
# eg: /kubepods/burstable/pod531c80d9-9fc4-11e7-ba8b-42010af002bb
# FIXME: this was needed because of a bug:
# https://github.com/kubernetes/kubernetes/pull/51473
# starting from k8s 1.8 we can remove this
elif ml.name == 'id' and ml.value.split('/')[-1].startswith('pod'):
return True
return False
def _get_container_label(self, labels, l_name):
for label in labels:
if label.name == l_name:
return label.value
def _get_container_id(self, labels):
"""
Should only be called on a container-scoped metric
as it doesn't do any validation of the container id.
It simply returns the last part of the cgroup hierarchy.
"""
for label in labels:
if label.name == 'id':
return label.value.split('/')[-1]
@staticmethod
def _get_pod_uid(labels):
for label in labels:
if label.name == 'id':
for part in label.value.split('/'):
if part.startswith('pod'):
return part[3:]
def _is_pod_host_networked(self, pod_uid):
for pod in self.pod_list['items']:
if pod.get('metadata', {}).get('uid', '') == pod_uid:
return pod.get('spec', {}).get('hostNetwork', False)
return False
def _get_pod_by_metric_label(self, labels):
"""
:param labels: metric labels: iterable
:return:
"""
pod_uid = self._get_pod_uid(labels)
for pod in self.pod_list["items"]:
try:
if pod["metadata"]["uid"] == pod_uid:
return pod
except KeyError:
continue
return None
@staticmethod
def _is_static_pending_pod(pod):
"""
Return if the pod is a static pending pod
See https://github.com/kubernetes/kubernetes/pull/57106
:param pod: dict
:return: bool
"""
try:
if pod["metadata"]["annotations"]["kubernetes.io/config.source"] == "api":
return False
pod_status = pod["status"]
if pod_status["phase"] != "Pending":
return False
return "containerStatuses" not in pod_status
except KeyError:
return False
@staticmethod
def _get_tags_from_labels(labels):
"""
Get extra tags from metric labels
label {
name: "container_name"
value: "kube-proxy"
}
:param labels: metric labels: iterable
:return: list
"""
tags = []
for label in labels:
if label.name == "container_name":
tags.append("kube_container_name:%s" % label.value)
return tags
return tags
def _process_container_rate(self, metric_name, message):
"""Takes a simple metric about a container, reports it as a rate."""
if message.type >= len(METRIC_TYPES):
self.log.error("Metric type %s unsupported for metric %s" % (message.type, message.name))
return
for metric in message.metric:
if self._is_container_metric(metric):
c_id = self._get_container_id(metric.label)
tags = get_tags('docker://%s' % c_id, True)
# FIXME we are forced to do that because the Kubelet PodList isn't updated
# for static pods, see https://github.com/kubernetes/kubernetes/pull/59948
pod = self._get_pod_by_metric_label(metric.label)
if pod is not None and self._is_static_pending_pod(pod):
tags += get_tags('kubernetes_pod://%s' | |
<gh_stars>0
#!/usr/bin/env python
# Copyright (c) 2014 Johns Hopkins University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
# - Neither the name of the copyright holders nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import matplotlib.pyplot as plt
import networkx as nx
import sqlite3
import sys
import random
from math import log
import pdb
class TestbedMap(object):
def __init__(self, nsluFile='config/nslu_locations.txt',
nodeFile='config/node_map.txt',
mapFile='static/floorplan.50.png',
scriptDir=None):
"""Set up a floorplan map of testbed nodes"""
if not scriptDir:
scriptDir='/'.join(sys.argv[0].split('/')[:-1])
scriptDir = scriptDir+'/'
#add background image
self.im = plt.imread(scriptDir+mapFile)
#read NSLU locations
f = open(scriptDir+nsluFile)
nslus={}
for l in f.readlines():
if not l.startswith("#"):
[nslu, port, x, y] = [int(v) for v in l.split()]
if nslu not in nslus:
nslus[nslu] = {}
nslus[nslu][port] = (x,y)
#read node-nslu mapping and get node locations
nodes = {}
f = open(scriptDir+nodeFile)
for l in f.readlines():
if not l.startswith("#"):
[nslu, port, nodeId] = [int(v) for v in l.split()]
if nslu in nslus:
nodes[nodeId] = {'pos':nslus[nslu][port], 'nslu':nslu}
else:
#TODO: log missing node error
pass
self.G = nx.DiGraph()
self.G.add_nodes_from([(n, nodes[n]) for n in nodes])
self.labelMap = None
def drawCMapNodes(self, node_size, palette):
""" Draw nodes, color according to palette/colorMap"""
#pdb.set_trace()
circleNodes = [ n for (n, m) in self.G.nodes(data=True) if m.get('shape','circle') == 'circle']
boxNodes = [ n for (n, m) in self.G.nodes(data=True) if m.get('shape','circle') == 'box']
starNodes = [ n for (n, m) in self.G.nodes(data=True) if m.get('shape','circle') == 'star']
if circleNodes:
nx.draw_networkx_nodes(self.G,
pos=nx.get_node_attributes(self.G, 'pos'),
node_size=node_size,
nodelist = [n for n in reversed(circleNodes)]
, node_color=[self.G.node[n][self.colAttr] for n in reversed(circleNodes)]
, cmap = palette
, linewidths = [self.G.node[n].get('linewidth', 1.0)
for n in reversed(circleNodes)]
)
if boxNodes:
nx.draw_networkx_nodes(self.G,
pos=nx.get_node_attributes(self.G, 'pos'),
node_size=2*node_size,
nodelist = [n for n in reversed(boxNodes)]
, node_color=[self.G.node[n][self.colAttr] for n in reversed(boxNodes)]
, cmap = palette
, linewidths = [self.G.node[n].get('linewidth', 1.0)
for n in reversed(boxNodes)]
, node_shape='s'
)
if starNodes:
nx.draw_networkx_nodes(self.G,
pos=nx.get_node_attributes(self.G, 'pos'),
node_size=2*node_size,
nodelist = [n for n in reversed(starNodes)]
, node_color=[self.G.node[n][self.colAttr] for n in reversed(starNodes)]
, cmap = palette
, linewidths = [self.G.node[n].get('linewidth', 1.0)
for n in reversed(starNodes)]
, node_shape=(5,1)
)
def drawEdges(self, alpha=0.2):
"""Draw a set of edges on the graph."""
nx.draw_networkx_edges(self.G,
pos = nx.get_node_attributes(self.G, 'pos'),
edgelist=self.G.edges(),
arrows=False,
alpha=0.2)
def drawLabels(self, labelAll=True):
"""Draw labels on nodes in graph (nodeId by default)"""
# circleNodes = [ n for (n, m) in self.G.nodes(data=True) if m.get('shape','circle') == 'circle']
# pdb.set_trace()
if labelAll:
nx.draw_networkx_labels(self.G,
pos=nx.get_node_attributes(self.G, 'pos'),
font_size=10,
labels=self.labelMap)
else:
# boxNodes = [ n for (n, m) in self.G.nodes(data=True) if m.get('shape','circle') == 'box']
# lm = {}
# for n in boxNodes:
# lm[n]=self.labelMap[n]
# nx.draw_networkx_labels(self.G,
# pos=nx.get_node_attributes(self.G, 'pos'),
# font_size=10,
# labels=lm)
emptyNodes = [ n for (n,m) in self.G.nodes(data=True) if m.get('shape','circle') == 'empty']
if emptyNodes:
lm = {}
for n in emptyNodes:
lm[n]=self.labelMap[n]
nx.draw_networkx_labels(self.G,
pos=nx.get_node_attributes(self.G, 'pos'),
font_size=10,
labels=lm)
def setAttr(self, attr, attrMap, defaultVal=None):
#ugly: we want to have a way to enforce that every node has a
# value in this map.
if defaultVal is not None:
for n in self.G.nodes():
if n not in attrMap:
attrMap[n] = defaultVal
nx.set_node_attributes(self.G, attr, attrMap)
def setColAttr(self, colAttr):
self.colAttr = colAttr
def setLabels(self, labelMap):
self.labelMap = labelMap
def addOutlined(self, nodeId, width):
self.G.node[nodeId]['linewidth'] = width
def draw(self, outFile=None, node_size=200, palette=plt.cm.jet,
labelAll= True, bgImage=True):
if bgImage:
implot = plt.imshow(self.im)
self.drawCMapNodes(node_size, palette)
self.drawEdges()
self.drawLabels(labelAll)
self.postDraw()
if not outFile:
plt.show()
else:
format = outFile.split('.')[-1]
F = plt.gcf()
plt.ylim(0, 1000)
plt.xlim(0, 1000)
F.set_size_inches([8, 8])
plt.savefig(outFile, format=format)
pass
def postDraw(self):
pass
def textOutput(self):
print 'node,%s'%self.colAttr
for n in self.G.nodes():
print '%d,%f'%(n, self.G.node[n][self.colAttr])
class SingleTXDepth(TestbedMap):
def __init__(self, root, dbFile, sr, txp, packetLen,
prr_threshold=0.0, rssi_threshold=-100,
distanceLabels = False, addKey=True,
**kwargs):
print root, dbFile, sr, txp, packetLen, prr_threshold, rssi_threshold, distanceLabels
super(SingleTXDepth, self).__init__(**kwargs)
self.loadPrrEdges(dbFile, sr, txp, packetLen, prr_threshold,
rssi_threshold)
self.distances = self.computeSPs(root)
self.distances[root]=1
self.G.node[root]['shape']='star'
if addKey:
key = {}
key[70] = {'pos': (25, 30), 'shape':'box'}
key[71] = {'pos': (25, 80), 'shape':'box'}
key[72] = {'pos': (25, 130), 'shape':'box'}
key[73] = {'pos': (25, 180), 'shape':'box'}
key[74] = {'pos': (25, 230), 'shape':'box'}
self.distances[70] = 1
self.distances[71] = 2
self.distances[72] = 3
self.distances[73] = 4
self.distances[74] = 5
key[80] = {'pos': (65, 30), 'shape':'empty'}
key[81] = {'pos': (65, 80), 'shape':'empty'}
key[82] = {'pos': (65, 130), 'shape':'empty'}
key[83] = {'pos': (65, 180), 'shape':'empty'}
key[84] = {'pos': (65, 230), 'shape':'empty'}
self.distances[80] = 1
self.distances[81] = 2
self.distances[82] = 3
self.distances[83] = 4
self.distances[84] = 5
self.G.add_nodes_from([(k, key[k]) for k in key])
self.setAttr('distance', self.distances)
self.setColAttr('distance')
self.addOutlined(root, 2)
if distanceLabels:
self.setLabels(self.distances)
def loadPrrEdges(self, dbFile, sr, txp, packetLen, prr_threshold,
rssi_threshold):
c = sqlite3.connect(dbFile)
links = c.execute('SELECT src, dest, prr FROM link WHERE sr=? AND txPower=? AND len=? AND prr >=? and avgRssi >=?', (sr,
txp, packetLen, prr_threshold, rssi_threshold)).fetchall()
for (src, dest, prr) in links:
self.G.add_edge(src, dest, prr=prr)
def computeSPs(self, root, unreachableVal=0):
"""Compute length of shortest paths from root to each other node"""
p = nx.single_source_shortest_path_length(self.G, root)
#fill in unreachable node values
maxDepth= max([v for v in p.values()])
for nodeId in self.G.nodes():
if nodeId not in p:
p[nodeId] = maxDepth
return p
class Degree(TestbedMap):
def __init__(self, dbFile, sr, txp, packetLen, prr_threshold=0.0,
degreeLabels = False, **kwargs):
super(Degree, self).__init__(**kwargs)
self.loadPrrEdges(dbFile, sr, txp, packetLen, prr_threshold)
self.degrees = self.computeDegrees()
self.setAttr('degree', self.degrees)
self.setColAttr('degree')
if degreeLabels:
self.setLabels(self.degrees)
def loadPrrEdges(self, dbFile, sr, txp, packetLen, prr_threshold):
c = sqlite3.connect(dbFile)
links = c.execute('SELECT src, dest, prr FROM link WHERE sr=? AND txPower=? AND len=? AND prr >=?', (sr, txp, packetLen, prr_threshold)).fetchall()
for (src, dest, prr) in links:
self.G.add_edge(src, dest, prr=prr)
def computeDegrees(self):
return self.G.in_degree()
class CXDistance(TestbedMap):
def __init__(self, dbFile, node, distanceFrom, **kwargs):
super(CXDistance, self).__init__(**kwargs)
self.loadDistances(node, distanceFrom, dbFile)
self.loadErrors(dbFile)
self.setAttr('distance', self.distances, 0)
self.setColAttr('distance')
self.addOutlined(node, 10)
for nodeId in self.errors:
self.addOutlined(nodeId, 3)
rounded = dict( [ (k, "%.1f"%self.distances[k])
for k in self.distances])
self.setLabels(rounded)
def loadDistances(self, node, distanceFrom, dbFile):
c = sqlite3.connect(dbFile)
if distanceFrom:
self.distances = dict(c.execute('SELECT dest, avgDepth FROM agg_depth WHERE src=?', (node,)))
else:
self.distances = dict(c.execute('SELECT src, avgDepth FROM agg_depth WHERE dest=?', (node,)))
self.distances[node] = 0
c.close()
def loadErrors(self, dbFile):
c = sqlite3.connect(dbFile)
self.errors = [ nodeId for (nodeId,) in c.execute('SELECT node from error_events')]
class CXForwarders(TestbedMap):
def __init__(self, src, dest, dbFile, outlineErrors=False,
addKey=True, **kwargs):
super(CXForwarders, self).__init__(**kwargs)
self.loadForwarders(src, dest, dbFile)
self.loadErrors(dbFile)
self.fwdRatio[src]=1.0
self.fwdRatio[dest]=1.0
self.G.node[src]['shape']='star'
self.G.node[dest]['shape']='star'
self.addOutlined(src, 2)
self.addOutlined(dest, 2)
for n in self.errors:
self.G.node[n]['shape'] = 'none'
if addKey:
self.addKeyToFigure()
self.setAttr('fwdRatio', self.fwdRatio, 0)
self.setColAttr('fwdRatio')
if outlineErrors:
for nodeId in self.errors:
self.addOutlined(nodeId, 3)
rounded = dict([ (k, "%.2f"%self.fwdRatio[k])
for k in self.fwdRatio])
self.setLabels(rounded)
def addKeyToFigure(self):
key = {}
key[70] = {'pos': (25, 30), 'shape':'box'}
key[71] = {'pos': (25, 80), 'shape':'box'}
key[72] = {'pos': (25, 130), 'shape':'box'}
key[73] = {'pos': (25, 180), 'shape':'box'}
key[74] = {'pos': (25, 230), 'shape':'box'}
self.fwdRatio[70] = 0
self.fwdRatio[71] = 0.25
self.fwdRatio[72] = 0.50
self.fwdRatio[73] = 0.75
self.fwdRatio[74] = 1.0
key[80] = {'pos': (75, 30), 'shape':'empty'}
key[81] = {'pos': (75, 80), 'shape':'empty'}
key[82] = {'pos': (75, 130), 'shape':'empty'}
| |
obj._deserialize(item)
self.SelectedTables.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteSnapshotsResponse(AbstractModel):
"""DeleteSnapshots返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 批量删除的快照数量
:type TotalCount: int
:param TableResults: 批量删除的快照结果
:type TableResults: list of SnapshotResult
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.TableResults = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("TableResults") is not None:
self.TableResults = []
for item in params.get("TableResults"):
obj = SnapshotResult()
obj._deserialize(item)
self.TableResults.append(obj)
self.RequestId = params.get("RequestId")
class DeleteTableDataFlowRequest(AbstractModel):
"""DeleteTableDataFlow请求参数结构体
"""
def __init__(self):
r"""
:param ClusterId: 表格所属集群实例ID
:type ClusterId: str
:param SelectedTables: 待删除分布式索引的表格列表
:type SelectedTables: list of SelectedTableInfoNew
"""
self.ClusterId = None
self.SelectedTables = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
if params.get("SelectedTables") is not None:
self.SelectedTables = []
for item in params.get("SelectedTables"):
obj = SelectedTableInfoNew()
obj._deserialize(item)
self.SelectedTables.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteTableDataFlowResponse(AbstractModel):
"""DeleteTableDataFlow返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 删除表格分布式索引结果数量
:type TotalCount: int
:param TableResults: 删除表格分布式索引结果列表
:type TableResults: list of TableResultNew
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.TableResults = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("TableResults") is not None:
self.TableResults = []
for item in params.get("TableResults"):
obj = TableResultNew()
obj._deserialize(item)
self.TableResults.append(obj)
self.RequestId = params.get("RequestId")
class DeleteTableGroupRequest(AbstractModel):
"""DeleteTableGroup请求参数结构体
"""
def __init__(self):
r"""
:param ClusterId: 表格组所属的集群ID
:type ClusterId: str
:param TableGroupId: 表格组ID
:type TableGroupId: str
"""
self.ClusterId = None
self.TableGroupId = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.TableGroupId = params.get("TableGroupId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteTableGroupResponse(AbstractModel):
"""DeleteTableGroup返回参数结构体
"""
def __init__(self):
r"""
:param TaskId: 删除表格组所创建的任务ID
:type TaskId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
class DeleteTableIndexRequest(AbstractModel):
"""DeleteTableIndex请求参数结构体
"""
def __init__(self):
r"""
:param ClusterId: 表格所属集群实例ID
:type ClusterId: str
:param SelectedTables: 待删除分布式索引的表格列表
:type SelectedTables: list of SelectedTableInfoNew
"""
self.ClusterId = None
self.SelectedTables = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
if params.get("SelectedTables") is not None:
self.SelectedTables = []
for item in params.get("SelectedTables"):
obj = SelectedTableInfoNew()
obj._deserialize(item)
self.SelectedTables.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteTableIndexResponse(AbstractModel):
"""DeleteTableIndex返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 删除表格分布式索引结果数量
:type TotalCount: int
:param TableResults: 删除表格分布式索引结果列表
:type TableResults: list of TableResultNew
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.TableResults = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("TableResults") is not None:
self.TableResults = []
for item in params.get("TableResults"):
obj = TableResultNew()
obj._deserialize(item)
self.TableResults.append(obj)
self.RequestId = params.get("RequestId")
class DeleteTablesRequest(AbstractModel):
"""DeleteTables请求参数结构体
"""
def __init__(self):
r"""
:param ClusterId: 待删除表所在集群ID
:type ClusterId: str
:param SelectedTables: 待删除表信息列表
:type SelectedTables: list of SelectedTableInfoNew
"""
self.ClusterId = None
self.SelectedTables = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
if params.get("SelectedTables") is not None:
self.SelectedTables = []
for item in params.get("SelectedTables"):
obj = SelectedTableInfoNew()
obj._deserialize(item)
self.SelectedTables.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteTablesResponse(AbstractModel):
"""DeleteTables返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 删除表结果数量
:type TotalCount: int
:param TableResults: 删除表结果详情列表
:type TableResults: list of TableResultNew
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.TableResults = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("TableResults") is not None:
self.TableResults = []
for item in params.get("TableResults"):
obj = TableResultNew()
obj._deserialize(item)
self.TableResults.append(obj)
self.RequestId = params.get("RequestId")
class DescribeApplicationsRequest(AbstractModel):
"""DescribeApplications请求参数结构体
"""
def __init__(self):
r"""
:param ClusterId: 集群ID,用于获取指定集群的单据
:type ClusterId: str
:param Limit: 分页
:type Limit: int
:param Offset: 分页
:type Offset: int
:param CensorStatus: 申请单状态,用于过滤
:type CensorStatus: int
:param TableGroupId: 表格组id,用于过滤
:type TableGroupId: str
:param TableName: 表格名,用于过滤
:type TableName: str
:param Applicant: 申请人uin,用于过滤
:type Applicant: str
:param ApplyType: 申请类型,用于过滤
:type ApplyType: int
"""
self.ClusterId = None
self.Limit = None
self.Offset = None
self.CensorStatus = None
self.TableGroupId = None
self.TableName = None
self.Applicant = None
self.ApplyType = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.CensorStatus = params.get("CensorStatus")
self.TableGroupId = params.get("TableGroupId")
self.TableName = params.get("TableName")
self.Applicant = params.get("Applicant")
self.ApplyType = params.get("ApplyType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeApplicationsResponse(AbstractModel):
"""DescribeApplications返回参数结构体
"""
def __init__(self):
r"""
:param Applications: 申请单列表
:type Applications: list of Application
:param TotalCount: 申请单个数
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Applications = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Applications") is not None:
self.Applications = []
for item in params.get("Applications"):
obj = Application()
obj._deserialize(item)
self.Applications.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeClusterTagsRequest(AbstractModel):
"""DescribeClusterTags请求参数结构体
"""
def __init__(self):
r"""
:param ClusterIds: 集群ID列表
:type ClusterIds: list of str
"""
self.ClusterIds = None
def _deserialize(self, params):
self.ClusterIds = params.get("ClusterIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeClusterTagsResponse(AbstractModel):
"""DescribeClusterTags返回参数结构体
"""
def __init__(self):
r"""
:param Rows: 集群标签信息列表
注意:此字段可能返回 null,表示取不到有效值。
:type Rows: list of TagsInfoOfCluster
:param TotalCount: 返回结果个数
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rows = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rows") is not None:
self.Rows = []
for item in params.get("Rows"):
obj = TagsInfoOfCluster()
obj._deserialize(item)
self.Rows.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeClustersRequest(AbstractModel):
"""DescribeClusters请求参数结构体
"""
def __init__(self):
r"""
:param ClusterIds: 指定查询的集群ID列表
:type ClusterIds: list of str
:param Filters: 查询过滤条件
:type Filters: list of Filter
:param Offset: 查询列表偏移量
:type Offset: int
:param Limit: 查询列表返回记录数,默认值20
:type Limit: int
:param Ipv6Enable: 是否启用Ipv6
:type Ipv6Enable: int
"""
self.ClusterIds = None
self.Filters = None
self.Offset = None
self.Limit = None
self.Ipv6Enable = None
def _deserialize(self, params):
self.ClusterIds = params.get("ClusterIds")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Ipv6Enable = params.get("Ipv6Enable")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeClustersResponse(AbstractModel):
"""DescribeClusters返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 集群实例数
:type TotalCount: int
:param Clusters: 集群实例列表
:type Clusters: list of ClusterInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Clusters = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Clusters") is not None:
self.Clusters = []
for item in params.get("Clusters"):
obj = ClusterInfo()
obj._deserialize(item)
self.Clusters.append(obj)
self.RequestId = params.get("RequestId")
class DescribeIdlFileInfosRequest(AbstractModel):
"""DescribeIdlFileInfos请求参数结构体
"""
def __init__(self):
r"""
:param ClusterId: 文件所属集群ID
:type ClusterId: str
:param TableGroupIds: 文件所属表格组ID
:type TableGroupIds: list of str
:param IdlFileIds: 指定文件ID列表
:type IdlFileIds: list of str
:param Offset: 查询列表偏移量
:type Offset: int
:param Limit: 查询列表返回记录数
:type Limit: int
"""
self.ClusterId = None
self.TableGroupIds = None
self.IdlFileIds = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.TableGroupIds = params.get("TableGroupIds")
self.IdlFileIds = params.get("IdlFileIds")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeIdlFileInfosResponse(AbstractModel):
"""DescribeIdlFileInfos返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 文件数量
:type TotalCount: int
:param IdlFileInfos: 文件详情列表
:type IdlFileInfos: list of IdlFileInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.IdlFileInfos = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("IdlFileInfos") is not None:
self.IdlFileInfos = []
for item in params.get("IdlFileInfos"):
obj = IdlFileInfo()
obj._deserialize(item)
self.IdlFileInfos.append(obj)
self.RequestId = params.get("RequestId")
class DescribeMachineRequest(AbstractModel):
"""DescribeMachine请求参数结构体
"""
def __init__(self):
r"""
:param Ipv6Enable: 不为0,表示查询支持ipv6的机器
:type Ipv6Enable: int
"""
self.Ipv6Enable = None
def _deserialize(self, params):
self.Ipv6Enable = params.get("Ipv6Enable")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if | |
nbar.x_index {sort}, nbar.y_index {sort}
""".format(sort=sort.value)
params = {"tile_type": [TILE_TYPE.value],
"tile_class": [tile_class.value for tile_class in TILE_CLASSES],
"satellite": [satellite.value for satellite in satellites],
"geom": bytearray(wkb),
"acq_min": acq_min, "acq_max": acq_max,
"level_nbar": ProcessingLevel.NBAR.value}
if DatasetType.PQ25 in dataset_types:
params["level_pqa"] = ProcessingLevel.PQA.value
if DatasetType.FC25 in dataset_types:
params["level_fc"] = ProcessingLevel.FC.value
if DatasetType.DSM in dataset_types:
params["level_dsm"] = ProcessingLevel.DSM.value
if DatasetType.DEM in dataset_types:
params["level_dem"] = ProcessingLevel.DEM.value
if DatasetType.DEM_HYDROLOGICALLY_ENFORCED in dataset_types:
params["level_dem_h"] = ProcessingLevel.DEM_H.value
if DatasetType.DEM_SMOOTHED in dataset_types:
params["level_dem_s"] = ProcessingLevel.DEM_S.value
return sql, params
# TODO - disabling this for now as I don't think the queries perform very well and nothing is currently using them
# as the AOI filtering is done at the CELL level rather than the TILE level.
# I'll do some work on the queries shortly!
# # TILE
#
# def list_tiles_vector_file(vector_file, vector_layer, vector_feature, satellites, acq_min, acq_max, dataset_types, sort=SortType.ASC, config=None):
#
# """
# Return a list of tiles matching the criteria as a SINGLE-USE generator
#
# .. warning::
# Deprecated: use either datacube.api.query.list_tiles_wkb_as_list() or datacube.api.query.list_tiles_wkb_as_generator()
#
# :param vector_file: Vector (ESRI Shapefile, KML, ...) file containing the shape
# :type vector_file: str
# :param vector_layer: Layer (0 based index) within the vector file
# :type vector_layer: int
# :param vector_feature: Feature (0 based index) within the layer
# :type vector_feature: int
# :param satellites: Satellites
# :type satellites: list[datacube.api.model.Satellite]
# :param acq_min: Acquisition date range
# :type acq_min: datetime.datetime
# :param acq_max: Acquisition date range
# :type acq_max: datetime.datetime
# :param dataset_types: Dataset types
# :type dataset_types: list[datacube.api.model.DatasetType]
# :param sort: Sort order
# :type sort: datacube.api.query.SortType
# :param config: Config
# :type config: datacube.config.Config
#
# :return: List of tiles
# :rtype: list[datacube.api.model.Tile]
# """
# return list_tiles_vector_file_as_generator(vector_file, vector_layer, vector_feature, satellites, acq_min, acq_max, dataset_types, sort, config)
#
#
# def list_tiles_vector_file_as_list(vector_file, vector_layer, vector_feature, satellites, acq_min, acq_max, dataset_types, sort=SortType.ASC, config=None):
#
# """
# Return a list of tiles matching the criteria AS A REUSABLE LIST rather than as a one-use-generator
#
# :param vector_file: Vector (ESRI Shapefile, KML, ...) file containing the shape
# :type vector_file: str
# :param vector_layer: Layer (0 based index) within the vector file
# :type vector_layer: int
# :param vector_feature: Feature (0 based index) within the layer
# :type vector_feature: int
# :param satellites: Satellites
# :type satellites: list[datacube.api.model.Satellite]
# :param acq_min: Acquisition date range
# :type acq_min: datetime.datetime
# :param acq_max: Acquisition date range
# :type acq_max: datetime.datetime
# :param dataset_types: Dataset types
# :type dataset_types: list[datacube.api.model.DatasetType]
# :param sort: Sort order
# :type sort: datacube.api.query.SortType
# :param config: Config
# :type config: datacube.config.Config
#
# :return: List of tiles
# :rtype: list[datacube.api.model.Tile]
# """
# return list(list_tiles_vector_file_as_generator(vector_file, vector_layer, vector_feature, satellites, acq_min, acq_max, dataset_types, sort, config))
#
#
# def list_tiles_vector_file_as_generator(vector_file, vector_layer, vector_feature, satellites, acq_min, acq_max, dataset_types, sort=SortType.ASC, config=None):
#
# """
# Return a list of cells matching the criteria AS A REUSABLE LIST rather than as a one-use-generator
#
# :param vector_file: Vector (ESRI Shapefile, KML, ...) file containing the shape
# :type vector_file: str
# :param vector_layer: Layer (0 based index) within the vector file
# :type vector_layer: int
# :param vector_feature: Feature (0 based index) within the layer
# :type vector_feature: int
# :param satellites: Satellites
# :type satellites: list[datacube.api.model.Satellite]
# :param acq_min: Acquisition date range
# :type acq_min: datetime.datetime
# :param acq_max: Acquisition date range
# :type acq_max: datetime.datetime
# :param dataset_types: Dataset types
# :type dataset_types: list[datacube.api.model.DatasetType]
# :param sort: Sort order
# :type sort: datacube.api.query.SortType
# :param config: Config
# :type config: datacube.config.Config
#
# :return: List of tiles
# :rtype: list[datacube.api.model.Tile]
# """
#
# return list_tiles_wkb_as_generator(extract_feature_geometry_wkb(vector_file, vector_layer, vector_feature),
# satellites, acq_min, acq_max, dataset_types, sort, config)
#
#
# def list_tiles_wkb(wkb, satellites, acq_min, acq_max, dataset_types, sort=SortType.ASC, config=None):
#
# """
# Return a list of tiles matching the criteria as a SINGLE-USE generator
#
# .. warning::
# Deprecated: use either datacube.api.query.list_tiles_as_list() or datacube.api.query.list_tiles_as_generator()
#
# :param wkb: Shape as WKB format
# :type wkb: WKB
# :param satellites: Satellites
# :type satellites: list[datacube.api.model.Satellite]
# :param acq_min: Acquisition date range
# :type acq_min: datetime.datetime
# :param acq_max: Acquisition date range
# :type acq_max: datetime.datetime
# :param dataset_types: Dataset types
# :type dataset_types: list[datacube.api.model.DatasetType]
# :param sort: Sort order
# :type sort: datacube.api.query.SortType
# :param config: Config
# :type config: datacube.config.Config
#
# :return: List of tiles
# :rtype: list[datacube.api.model.Tile]
# """
# return list_tiles_wkb_as_generator(wkb, satellites, acq_min, acq_max, dataset_types, sort, config)
#
#
# def list_tiles_wkb_as_list(wkb, satellites, acq_min, acq_max, dataset_types, sort=SortType.ASC, config=None):
#
# """
# Return a list of cells matching the criteria AS A REUSABLE LIST rather than as a one-use-generator
#
# :param wkb: Shape as WKB format
# :type wkb: WKB
# :param satellites: Satellites
# :type satellites: list[datacube.api.model.Satellite]
# :param acq_min: Acquisition date range
# :type acq_min: datetime.datetime
# :param acq_max: Acquisition date range
# :type acq_max: datetime.datetime
# :param dataset_types: Dataset types
# :type dataset_types: list[datacube.api.model.DatasetType]
# :param sort: Sort order
# :type sort: datacube.api.query.SortType
# :param config: Config
# :type config: datacube.config.Config
#
# :return: List of tiles
# :rtype: list[datacube.api.model.Tile]
# """
# return list(list_tiles_wkb_as_generator(wkb, satellites, acq_min, acq_max, dataset_types, sort))
#
#
# def list_tiles_wkb_as_generator(wkb, satellites, acq_min, acq_max, dataset_types, sort=SortType.ASC, config=None):
#
# """
# Return a list of tiles matching the criteria as a SINGLE-USE generator
#
# :param wkb: Shape as WKB format
# :type wkb: WKB
# :param satellites: Satellites
# :type satellites: list[datacube.api.model.Satellite]
# :param acq_min: Acquisition date range
# :type acq_min: datetime.datetime
# :param acq_max: Acquisition date range
# :type acq_max: datetime.datetime
# :param dataset_types: Dataset types
# :type dataset_types: list[datacube.api.model.DatasetType]
# :param sort: Sort order
# :type sort: datacube.api.query.SortType
# :param config: Config
# :type config: datacube.config.Config
#
# :return: List of tiles
# :rtype: list[datacube.api.model.Tile]
# """
#
# conn, cursor = None, None
#
# try:
# # connect to database
#
# conn, cursor = connect_to_db(config=config)
#
# sql, params = build_list_tiles_wkb_sql_and_params(wkb, satellites, acq_min, acq_max, dataset_types, sort)
#
# _log.debug(cursor.mogrify(sql, params))
#
# cursor.execute(sql, params)
#
# for record in result_generator(cursor):
# _log.debug(record)
# yield Tile.from_db_record(record)
#
# except Exception as e:
#
# _log.error("Caught exception %s", e)
# conn.rollback()
# raise
#
# finally:
#
# conn = cursor = None
#
#
# def list_tiles_wkb_to_file(wkb, satellites, acq_min, acq_max, dataset_types, filename, sort=SortType.ASC, config=None):
#
# """
# Write the list of tiles matching the criteria to the specified file
#
# :param wkb: Shape as WKB format
# :type wkb: WKB
# :param satellites: Satellites
# :type satellites: list[datacube.api.model.Satellite]
# :param acq_min: Acquisition date range
# :type acq_min: datetime.datetime
# :param acq_max: Acquisition date range
# :type acq_max: datetime.datetime
# :param dataset_types: Dataset types
# :type dataset_types: list[datacube.api.model.DatasetType]
# :param filename: The output file
# :type filename: str
# :param sort: Sort order
# :type sort: datacube.api.query.SortType
# :param config: Config
# :type config: datacube.config.Config
# """
#
# conn = cursor = None
#
# try:
# # connect to database
#
# conn, cursor = connect_to_db(config=config)
#
# sql, params = build_list_tiles_wkb_sql_and_params(wkb, satellites, acq_min, acq_max, dataset_types, sort)
#
# sql = to_file_ify_sql(sql)
#
# if filename:
# with open(filename, "w") as f:
# cursor.copy_expert(cursor.mogrify(sql, params), f)
# else:
# cursor.copy_expert(cursor.mogrify(sql, params), sys.stdout)
#
# except Exception as e:
#
# _log.error("Caught exception %s", e)
# conn.rollback()
# raise
#
# finally:
#
# conn = cursor = None
#
#
# def build_list_tiles_wkb_sql_and_params(wkb, satellites, acq_min, acq_max, dataset_types, sort=SortType.ASC):
#
# """
# Build the SQL query string and parameters required to return the tiles matching the criteria
#
# :param wkb: Shape as WKB format
# :type wkb: WKB
# :param satellites: Satellites
# :type satellites: list[datacube.api.model.Satellite]
# :param acq_min: Acquisition date range
# :type acq_min: datetime.datetime
# :param acq_max: Acquisition date range
# :type acq_max: datetime.datetime
# :param dataset_types: Dataset types
# :type dataset_types: list[datacube.api.model.DatasetType]
# :param sort: Sort order
# :type sort: datacube.api.query.SortType
#
# :return: The SQL query and params
# :rtype: (str, dict)
# """
#
# sql = """
# select
# acquisition.acquisition_id, satellite_tag as satellite, start_datetime, end_datetime,
# extract(year from end_datetime) as end_datetime_year, extract(month from end_datetime) as end_datetime_month,
# nbar.x_index, nbar.y_index, point(nbar.x_index, nbar.y_index) as xy,
# """
#
# sql += """
# ARRAY[
# """
#
# sql += """
# ['ARG25', nbar.tile_pathname]
# """
#
# if DatasetType.PQ25 in dataset_types:
# sql += """
# ,['PQ25', pqa.tile_pathname]
# """
#
# if DatasetType.FC25 in dataset_types:
# sql += """
# ,['FC25', fc.tile_pathname]
# """
#
# if DatasetType.NDVI in dataset_types:
# sql += """
# ,['NDVI', nbar.tile_pathname]
# """
#
# if DatasetType.EVI in dataset_types:
# sql += """
# ,['EVI', nbar.tile_pathname]
# """
#
# if DatasetType.NBR in dataset_types:
# sql += """
# ,['NBR', nbar.tile_pathname]
# """
#
# if DatasetType.TCI in dataset_types:
# sql += """
# ,['TCI', nbar.tile_pathname]
# """
#
# if DatasetType.DSM in dataset_types:
# sql += """
# ,['DSM', dsm.tile_pathname]
# """
#
# if DatasetType.DEM in dataset_types:
# sql += """
# ,['DEM', dem.tile_pathname]
# """
#
# if DatasetType.DEM_HYDROLOGICALLY_ENFORCED in dataset_types:
# sql += """
# ,['DEM_HYDROLOGICALLY_ENFORCED', dem_h.tile_pathname]
# """
#
# if DatasetType.DEM_SMOOTHED in dataset_types:
# sql += """
# ,['DEM_SMOOTHED', dem_s.tile_pathname]
# """
#
# sql += """
# ] as datasets
# """
#
# sql += """
# from acquisition
# join satellite on satellite.satellite_id=acquisition.satellite_id
# """
#
# sql += """
# join
# (
# select
# dataset.acquisition_id, tile.dataset_id, tile.x_index, tile.y_index, tile.tile_pathname, tile.tile_type_id, tile.tile_class_id
# from tile
# join dataset on dataset.dataset_id=tile.dataset_id
# where dataset.level_id = %(level_nbar)s
# ) as nbar on nbar.acquisition_id=acquisition.acquisition_id
# """
#
# sql += """
# join tile_footprint on tile_footprint.x_index=nbar.x_index and tile_footprint.y_index=nbar.y_index
# """
#
# if DatasetType.PQ25 in dataset_types:
# sql += """
# join
# (
# select
# dataset.acquisition_id, tile.dataset_id, tile.x_index, tile.y_index, tile.tile_pathname, tile.tile_type_id, tile.tile_class_id
# from tile
# join dataset on dataset.dataset_id=tile.dataset_id
# where dataset.level_id = %(level_pqa)s
# ) as pqa on
# pqa.acquisition_id=acquisition.acquisition_id
# and pqa.x_index=nbar.x_index and pqa.y_index=nbar.y_index
# and pqa.tile_type_id=nbar.tile_type_id and pqa.tile_class_id=nbar.tile_class_id
#
# """
#
# if DatasetType.FC25 in dataset_types:
# sql += """
# join
# (
# select
# dataset.acquisition_id, tile.dataset_id, tile.x_index, | |
them
Nlon=dict()
N_tot= Nlon['N_total'] =float(D.longitude.size)
all_CD_rep=dict()
all_CD_bud=dict()
for kk in seg_dict.keys():
CD_storage_rep=dict()
CD_storage_bud=dict()
lon_seg=seg_dict[kk]
print(kk)
Dsurface_segment = Dsurface.sel(longitude=lon_seg)
#ps_zm_seg = ps.sel(longitude=lon_seg).mean('longitude').compute()
BATA_seg = BATA.sel(longitude=lon_seg)
#BATA_zm_seg = BATA_seg.mean('longitude').compute()
BATA_01_zm_seg = BATA_01.sel(longitude=lon_seg).mean('longitude').compute()
Nlon[kk] = float(Dsurface_segment.longitude.size)
N_weight = Nlon[kk]/N_tot
# a) surface tourque
#sp_dlambda_seg = ddlambda_spherical(Dsurface_segment.sp, None, r_e, lon, lat )
#gph_sp_dlambda_seg= Dsurface_segment.z * sp_dlambda_seg / 9.81
#better copy from above and select
gph_sp_dlambda_seg= gph_sp_dlambda.sel(longitude=lon_seg)
# take zonal mean, at this point treat it as a surface variable
gph_sp_dlambda_zm_rep_seg = (gph_sp_dlambda_seg).mean('longitude')
gph_sp_dlambda_zm_rep_seg.name='zonal mean surface mountain torque'
gph_sp_dlambda_zm_rep_seg.attrs['units']='N m**-2'
gph_sp_dlambda_zm_budget_seg = gph_sp_dlambda_zm_rep_seg * ps_zm
gph_sp_dlambda_zm_budget_seg.attrs['units']='N**2 m**-4'
CD_storage_rep['torque_srf_zm']= gph_sp_dlambda_zm_rep_seg * N_weight
CD_storage_bud['torque_srf_zm']= gph_sp_dlambda_zm_budget_seg * N_weight
# b) GPh level
a =gph_bata_div.sel(longitude=lon_seg)
a_zm_rep = (a * BATA_seg).mean('longitude') / BATA_zm
a_zm_conventional = a.mean('longitude')
# apply devinition of representative mean
a_zm_rep = a_zm_rep.where(BATA_zm != 0, a_zm_conventional)
a_zm_budget = a_zm_rep * BATA_zm #(data * BATA).mean('longitude')
CD_storage_rep['torque_lev_zm']= (a_zm_rep * N_weight)
CD_storage_bud['torque_lev_zm']= (a_zm_budget * N_weight)
# c) gravity wave drag
F_gravity_zm_data_seg =(Dsurface_segment['megwss']* Dsurface_segment.sp).mean('longitude')/ ps_zm
F_gravity_zm_rep_seg = xr.DataArray(data=F_gravity_zm_data_seg, name='Zonal mean zonal gravity wave stress', attrs= gravity_drag.attrs)
F_gravity_zm_rep_seg.attrs['units']='N m**-2'
F_gravity_zm_budget_seg = F_gravity_zm_rep_seg * ps_zm
F_gravity_zm_budget_seg.attrs['units']='N**2 m**-4'
CD_storage_rep['F_gwave_zm']= (F_gravity_zm_rep_seg * N_weight)
CD_storage_bud['F_gwave_zm']= (F_gravity_zm_budget_seg * N_weight)
# d) save
CD_storage_rep = xr.Dataset(CD_storage_rep)
G_CD_int =vertical_integal_Hbeta(CD_storage_rep,BATA_01_zm_seg ).compute()
G_CD_int.attrs['long_name'], G_CD_int.attrs['units'] = 'Continental Surface Drag as Representetive Mean', 'Pa'
CD_storage_bud = xr.Dataset(CD_storage_bud)
GB_CD_int =vertical_integal_Hbeta(CD_storage_bud, BATA_01_zm_seg).compute()
GB_CD_int.attrs['long_name'], GB_CD_int.attrs['units'] = 'Continental Surface Drag as Budget Mean', 'Pa**2'
save_path_local = save_path + '/drag_on_continents_zm/repres/'
M.mkdirs_r(save_path_local)
G_CD_int.to_netcdf(save_path_local + key_name + 'repres_'+ kk+ '_'+ date_str + '.nc')
save_path_local = save_path + '/drag_on_continents_zm/budget/'
M.mkdirs_r(save_path_local)
GB_CD_int.to_netcdf(save_path_local + key_name + 'budget_'+ kk+ '_'+ date_str + '.nc')
all_CD_rep[kk]=G_CD_int
#all_CD_bud[kk]=GB_CD_int
if plot_cont is False:
del all_CD_rep
del all_CD_bud
del GB_CD_int
del G_CD_int
del Dsurface_segment
del BATA_seg
del BATA_01_zm_seg
del CD_storage_rep
del CD_storage_bud
del a_zm_rep
del a_zm_budget
del a_zm_conventional
tend=time.time()
hist = logger(hist, 'continental sepeparatio time: ' + str(tend-tstart) )
tstart=time.time()
""" end of continental excurse """
# %% 2. c) finish up global mean of surface vars. compute and store them
# 2. c) 1 surface tourque
repres['torque_srf_zm']= gph_sp_dlambda_zm_rep.compute()
budget['torque_srf_zm']= gph_sp_dlambda_zm_budget.compute()
del gph_sp_dlambda_zm_rep
del gph_sp_dlambda_zm_budget
del sp_dlambda
del gph_sp_dlambda
# 2. c) 2. gph level
repres['torque_lev_zm']= gph_bata_div_zm_rep.compute()
budget['torque_lev_zm']= gph_bata_div_zm_budget.compute()
del gph_bata_div
del gph_bata_div_zm_conventional
del gph_bata_div_zm_rep
del gph_bata_div_zm_budget
tend=time.time()
hist = logger(hist, 'store and compute global surface tourque part II : ' + str(tend-tstart) )
tstart=time.time()
# %% 3 . global surface drag terms
# %% a) surface var: 1. turbulent stress
F_srf_zm_data =(fluxes* ps).mean('longitude')/ ps_zm
F_srf_zm_rep = xr.DataArray(data=F_srf_zm_data, name='Zonal mean zonal Surface Stress', attrs= fluxes.attrs)
F_srf_zm_rep.attrs['units']='N m**-2'
F_srf_zm_budget = F_srf_zm_rep * ps_zm
F_srf_zm_budget.attrs['units']='N**2 m**-4'
repres['F_tur_zm'] = F_srf_zm_rep
budget['F_tur_zm'] = F_srf_zm_budget
BATAD['ps_zm'] = ps_zm
# %% b) surface var: 2. gravity drag
F_gravity_zm_data =(gravity_drag* ps).mean('longitude')/ ps_zm
F_gravity_zm_rep = xr.DataArray(data=F_gravity_zm_data, name='Zonal mean zonal gravity wave stress', attrs= gravity_drag.attrs)
F_gravity_zm_rep.attrs['units']='N m**-2'
F_gravity_zm_budget = F_gravity_zm_rep * ps_zm
F_gravity_zm_budget.attrs['units']='N**2 m**-4'
repres['F_gwave_zm'] = F_gravity_zm_rep
budget['F_gwave_zm'] = F_gravity_zm_budget
# %% 4 . representative average
data_zm_rep = (data * BATA).mean('longitude') / BATA_zm
data_zm_conventional = data.mean('longitude')
#levmask =data.level < 0.1e5
#data_zm_conventional = data.sel(level=~levmask).mean('longitude')
#data.sel(level=levmask)*np.nan
# apply devinition of representative mean
data_zm_rep = data_zm_rep.where(BATA_zm != 0, data_zm_conventional).compute()
data_zm_budget = (data_zm_rep * BATA_zm).compute() #(data * BATA).mean('longitude')
# store in dict
repres['data_zm'] = data_zm_rep
budget['data_zm'] = data_zm_budget
tend=time.time()
hist = logger(hist, 'Surface stresses and representative means: ' + str(tend-tstart) )
tstart=time.time()
# %% eddy terms
# a) mean flow:
uzm_vzm_rep = data_zm_rep.u * data_zm_rep.v
uzm_vzm_budget = BATA_zm * uzm_vzm_rep
# b) eddies
data_p =data - data_zm_rep
# test if primes are 0 , see Boer eq. sec. 4b.
#(data_p * BATA).mean()
upvp_zm_budget = (data_p.u * data_p.v * BATA_zm).mean('longitude').compute()
upvp_zm_conventional = (data_p.u * data_p.v).mean('longitude')
# apply devinition of representative mean
upvp_zm_rep = (upvp_zm_budget / BATA_zm)
upvp_zm_rep =upvp_zm_rep.where(BATA_zm != 0 , upvp_zm_conventional).compute()
#upvp_zm_rep
# store in dict
repres['uzm_vzm'] = uzm_vzm_rep
repres['uprime_vprime_zm'] = upvp_zm_rep
budget['uzm_vzm'] = uzm_vzm_budget
budget['uprime_vprime_zm'] = upvp_zm_budget
tend=time.time()
hist = logger(hist, 'eddy terms compute: ' + str(tend-tstart) )
tstart=time.time()
# %%
# 2. zonal derivative
# define constants
repres['uzm_vzm_div'] = ddphi_spherical_zm(uzm_vzm_rep ,ps_zm, r_e, lat ).where(lat !=-90, 0).where(lat !=90, 0).compute()
repres['uprime_vprime_zm_div'] = ddphi_spherical_zm(upvp_zm_rep ,ps_zm, r_e, lat ).where(lat !=-90, 0).where(lat !=90, 0).compute()
budget['uzm_vzm_div'] = ddphi_spherical_zm(uzm_vzm_budget ,ps_zm, r_e, lat ).where(lat !=-90, 0).where(lat !=90, 0).compute()
budget['uprime_vprime_zm_div'] = ddphi_spherical_zm(upvp_zm_budget ,ps_zm, r_e, lat ).where(lat !=-90, 0).where(lat !=90, 0).compute()
tend=time.time()
hist = logger(hist, 'phi gradients compute: ' + str(tend-tstart) )
tstart=time.time()
# 3. tendency term
repres['dudt'] = data_zm_rep.u.differentiate('time', edge_order=2, datetime_unit='s').compute()
budget['dudt'] = data_zm_budget.u.differentiate('time', edge_order=2, datetime_unit='s').compute()
del data_zm_rep
del data_zm_budget
del data
tend=time.time()
hist = logger(hist, 'tendency term compute: ' + str(tend-tstart) )
tstart=time.time()
# %% 4. also process gph
Phi_zm_rep = (Phi * BATA).mean('longitude') / BATA_zm
repres['phi_zm'] = Phi_zm_rep.where(BATA_zm != 0, Phi.mean('longitude'))
repres['phi_zm'].attrs =Phi.attrs
budget['phi_zm'] = Phi_zm_rep * BATA_zm #(data * BATA).mean('longitude')
budget['phi_zm'].attrs =Phi.attrs
tend=time.time()
hist = logger(hist, 'Phi single var compute time: ' + str(tend-tstart) )
tstart=time.time()
# %% 4. merge data to xr.DataSets
print('Repack data and Cal data')
# a) representetive means
G = repres['dudt']
G.name , G.attrs['long_name'], G.attrs['units'] = 'dudt' , 'Zonal Mean Tendency', '(m s**-2)'
G =repres['dudt'].to_dataset()
G.attrs['long_name'], G.attrs['units'] = 'Terms for Representative Zonal Mean Momentum Budget', 'm s**-2 (fields var) or N m**-2 (surface var)'
key= 'uzm_vzm'
repres[key].name , repres[key].attrs['long_name'], repres[key].attrs['units'] = 'uzm_vzm' , 'Zonal Mean mean-momentum flux', '(m**2 s**-2)'
G['uzm_vzm']=repres[key]
key= 'uprime_vprime_zm'
repres[key].name , repres[key].attrs['long_name'], repres[key].attrs['units'] = 'uprime_vprime_zm' , 'Zonal Mean eddy-moemntum flux', '(m**2 s**-2)'
G['uprime_vprime_zm']=repres[key]
key= '<KEY>'
repres[key].name , repres[key].attrs['long_name'], repres[key].attrs['units'] = 'uzm_vzm_div' , 'Zonal Mean mean-flux divergence', '(m s**-2)'
G['uzm_vzm_div']=repres[key]
key= 'uprime_vprime_zm_div'
repres[key].name , repres[key].attrs['long_name'], repres[key].attrs['units'] = 'uprime_vprime_zm_div' , 'Zonal Mean eddy-flux divergence', '(m s**-2)'
G['uprime_vprime_zm_div']=repres[key]
key= 'torque_lev_zm'
repres[key].name , repres[key].attrs['long_name'], repres[key].attrs['units'] = 'torque_lev_zm' , 'Zonal Mean Geopotential Height Torque', '(m s**-2)'
G['torque_lev_zm']=repres[key].T
key= 'torque_srf_zm'
repres[key].name , repres[key].attrs['long_name'], repres[key].attrs['units'] = 'torque_srf_zm' , 'Zonal Mean Surface Torque', '(m s**-2)'
G['torque_srf_zm']=repres[key]
key= 'F_tur_zm'
repres[key].name , repres[key].attrs['long_name'] = 'F_tur_zm' , 'Zonal Mean Turbulent Surface Stress'
G['F_tur_zm']=repres[key]
key= 'F_gwave_zm'
repres[key].name , repres[key].attrs['long_name'] = 'F_gwave_zm' , 'Zonal Mean Zonal Gravity Wave Stress'
G['F_gwave_zm']=repres[key]
key= 'data_zm'
tempv =repres[key].v * f
tempv.name , tempv.attrs['long_name'], tempv.attrs['units'] = 'Zonal Mean Advection of Planetary Momentum' , 'Zonal Mean Advection of Planetary Momentum', '(m s**-2)'
G['v_f_zm']=tempv
# save also zonal mean winds and GPH
G_others = repres['data_zm'].u
G_others.name , G_others.attrs['long_name'], G_others.attrs['units'] = 'u_repres' , 'Representative Zonal Mean Zonal Wind' , '(m s**-1)'
G_others = G_others.to_dataset()
key='v_repres'
G_others[key]=repres['data_zm'].v
G_others[key].name , G_others[key].attrs['long_name'], G_others[key].attrs['units'] = key , 'Representative Zonal Mean Meridional Wind' , '(m s**-1)'
key= 'phi_repres'
G_others[key]=repres['phi_zm'].compute()
G_others[key].name , G_others[key].attrs['long_name'], G_others[key].attrs['units'] = key , 'Representative Zonal Mean Geopotential Height' , '(m**2 s**-2)'
# b) budget means
GB = budget['dudt']
GB.name , GB.attrs['long_name'], GB.attrs['units'] = 'dudt' , 'Zonal Mean Tendency', '(Pa m * s**-2)'
GB =budget['dudt'].to_dataset()
GB.attrs['long_name'], GB.attrs['units'] = 'Terms for Zonal Mean Momentum Budget', '(Pa m * s**-2)'
key= 'uzm_vzm'
budget[key].name , budget[key].attrs['long_name'], budget[key].attrs['units'] = 'uzm_vzm' , 'Zonal Mean mean-flux', '(Pa m**2 s**-2)'
GB['uzm_vzm']=budget[key]
key= 'uprime_vprime_zm'
budget[key].name , budget[key].attrs['long_name'], budget[key].attrs['units'] = 'uprime_vprime_zm' , 'Zonal Mean eddy-flux', '(Pa m**2 s**-2)'
GB['uprime_vprime_zm']=budget[key]
key= '<KEY>'
budget[key].name , budget[key].attrs['long_name'], budget[key].attrs['units'] = 'uzm_vzm_div' , 'Zonal Mean mean-flux divergence', '(Pa m s**-2)'
GB['uzm_vzm_div']=budget[key]
key= 'uprime_vprime_zm_div'
budget[key].name , budget[key].attrs['long_name'], budget[key].attrs['units'] = 'uprime_vprime_zm_div' , 'Zonal Mean Eddy-flux divergence', '(Pa m s**-2)'
GB['uprime_vprime_zm_div']=budget[key]
key= 'torque_lev_zm'
budget[key].name , budget[key].attrs['long_name'], budget[key].attrs['units'] = 'torque_lev_zm' , 'Zonal Mean Geopotential Height Torque', '(Pa m s**-2)'
GB['torque_lev_zm']=budget[key].T
key= 'torque_srf_zm'
budget[key].name , budget[key].attrs['long_name'], budget[key].attrs['units'] = 'torque_srf_zm' , 'Zonal Mean Surface Torque', '(Pa m s**-2)'
GB['torque_srf_zm']=budget[key]
key= 'F_tur_zm'
budget[key].name , budget[key].attrs['long_name'] = 'F_tur_zm' , 'Zonal Mean Turbulent Surface Stress'
GB['F_tur_zm']=budget[key]
key= 'F_gwave_zm'
budget[key].name , budget[key].attrs['long_name'] = 'F_gwave_zm' , 'Zonal Mean Zonal Gravity Wave Stress'
GB['F_gwave_zm']=budget[key]
key= 'data_zm'
tempv =budget[key].v * f
tempv.name , tempv.attrs['long_name'], tempv.attrs['units'] = 'Zonal Mean Advection of Planetary Momentum' , 'Zonal Mean Advection of Planetary Momentum', '(Pa m s**-2)'
GB['v_f_zm']=tempv
# save also zonal mean winds and GPH
key='u_budget'
G_others[key]=budget['data_zm'].u
G_others[key].name , G_others[key].attrs['long_name'], G_others[key].attrs['units'] = key , 'Budget Zonal Mean Zonal Wind' , '(Pa m s**-1)'
key='v_budget'
G_others[key]=budget['data_zm'].v
G_others[key].name , G_others[key].attrs['long_name'], G_others[key].attrs['units'] = key , 'Budget Zonal Mean Meridional Wind' , '(Pa m s**-1)'
key= 'phi_budget'
G_others[key]=budget['phi_zm'].compute()
G_others[key].name , G_others[key].attrs['long_name'], G_others[key].attrs['units'] = key , 'Budget Zonal Mean Geopotential Height' , '(Pa m**2 s**-2)'
# close original files
Dsurface.close()
D.close()
tend=time.time()
print('define and zm process time :' + str(tend- tstart))
hist = logger(hist, 'define and zm process time :'+ str(tend- tstart))
tstart=time.time()
# %% 5. Vertical integrals
print('5. Vertical integrals')
G_int =vertical_integal_Hbeta(G, BATA_01_zm) # bata not computed jet.
G_int.attrs['long_name'], G_int.attrs['units'] = 'Momentum Budget in the Representetive Mean', 'Pa'
GB_int =vertical_integal_Hbeta(GB, BATA_01_zm)
GB_int.attrs['long_name'], GB_int.attrs['units'] = 'Momentum Budget in the Budget Mean', 'Pa**2'
level_vars = list(G.keys())
flux_vars=['uprime_vprime_zm','uzm_vzm' ]
#level_vars.remove('F_tur_zm')
#level_vars.remove('F_gwave_zm')
for k in flux_vars:
level_vars.remove(k)
for k in level_vars:
G_int[k].attrs = G[k].attrs
G_int[k].attrs['units'] = 'Pa'
GB_int[k].attrs = GB[k].attrs
GB_int[k].attrs['units'] = 'Pa**2'
for k in flux_vars:
G_int[k].attrs = G[k].attrs
G_int[k].attrs['units'] = 'Pa m'
GB_int[k].attrs = GB[k].attrs
GB_int[k].attrs['units'] = 'Pa**2 m'
# %% 5.b optional plotting of the continenal seperation
if plot_cont:
F =plot_continent_seperation(all_CD_rep, G_int)
F.save(name='exmpl_repres_'+ date_str, path=plot_path+'contitent_separ/')
del all_CD_rep
del all_CD_bud
del GB_CD_int
del G_CD_int
# %% 5.b save zonal mean data
date_str
save_path_local = save_path + '/instantanious_zm/repres/'
os.makedirs(save_path_local, exist_ok = True)
G.to_netcdf(save_path_local + key_name +'repres_zm_'+ date_str + '.nc')
save_path_local = save_path + '/instantanious_zm/budget/'
os.makedirs(save_path_local, exist_ok = True)
GB.to_netcdf(save_path_local + key_name +'budget_zm_'+ date_str + '.nc')
save_path_local = save_path + '/instantanious_eulerian_zm/'
os.makedirs(save_path_local, exist_ok = True)
G_others.to_netcdf(save_path_local + key_name +'zm_others_'+ date_str + '.nc')
G_bata = xr.Dataset(BATAD)
key='BATA_zm'
G_bata[key].name , G_bata[key].attrs['long_name'], G_bata[key].attrs['units'] = 'BATA_zm' , 'Zonal mean rho_beta', 'Pa'
key='BATA_zm_01'
G_bata[key].name , G_bata[key].attrs['long_name'], G_bata[key].attrs['units'] = 'BATA_zm_01' , 'Zonal mean H_beta', 'binary'
key='ps_zm'
G_bata[key].name , G_bata[key].attrs['long_name'], G_bata[key].attrs['units'] = 'ps_zm' , 'Zonal mean surface pressure', 'Pa'
save_path_local = save_path + '/instantanious_bata/'
os.makedirs(save_path_local, exist_ok = True)
G_bata.to_netcdf(save_path_local + | |
is fixed.
if not aggressive:
return self._export_capsule(repository, includes_dependencies=includes_dependencies)
else:
try:
return self._export_capsule(repository, includes_dependencies=includes_dependencies)
except Exception:
# Empirically this fails occasionally, we don't know
# why however.
time.sleep(1)
return self._export_capsule( repository, includes_dependencies=includes_dependencies)
def _export_capsule( self, repository, includes_dependencies=None ):
url = '/repository/export?repository_id=%s&changeset_revision=%s' % \
( self.security.encode_id( repository.id ), self.get_repository_tip( repository ) )
self.visit_url( url )
log.info( "Visited url %s looking for export capsule button" % url )
self.check_page_for_string( "Repository '" )
self.check_page_for_string( "Export" )
# Explicit check for True/False since None means we don't know if this
# includes dependencies and so we skip both checks...
if includes_dependencies is True:
self.check_page_for_string( "Export repository dependencies?" )
elif includes_dependencies is False:
self.check_page_for_string( "No repository dependencies are defined for revision" )
self.submit_form( 'export_repository', 'export_repository_button' )
fd, capsule_filename = tempfile.mkstemp()
os.close( fd )
with open( capsule_filename, 'w' ) as f:
f.write( self.last_page() )
return capsule_filename
def fetch_repository_metadata( self, repository, strings_displayed=None, strings_not_displayed=None ):
url = '/api/repositories/%s/metadata' % self.security.encode_id( repository.id )
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
def fill_review_form( self, review_contents_dict, strings_displayed=None, strings_not_displayed=None ):
kwd = dict()
changed = False
for label, contents in review_contents_dict.items():
if contents:
changed = True
kwd[ '%s__ESEP__comment' % label ] = contents[ 'comment' ]
kwd[ '%s__ESEP__rating' % label ] = contents[ 'rating' ]
if 'private' in contents:
kwd[ '%s__ESEP__private' % label ] = contents[ 'private' ]
kwd[ '%s__ESEP__approved' % label ] = contents[ 'approved' ]
else:
kwd[ '%s__ESEP__approved' % label ] = 'not_applicable'
self.check_for_strings( strings_displayed, strings_not_displayed )
self.submit_form( 1, 'Workflows__ESEP__review_button', **kwd )
if changed:
strings_displayed.append( 'Reviews were saved' )
self.check_for_strings( strings_displayed, strings_not_displayed )
def galaxy_login( self, email='<EMAIL>', password='<PASSWORD>', username='admin-user', redirect='', logout_first=True ):
if logout_first:
self.galaxy_logout()
previously_created, username_taken, invalid_username = \
self.create_user_in_galaxy( email=email, password=password, username=username, redirect=redirect )
if previously_created:
self.visit_galaxy_url( "/user/login?use_panels=False" )
self.submit_form( '1', 'login_button', login=email, redirect=redirect, password=password )
def galaxy_logout( self ):
self.visit_galaxy_url( "/user/logout" )
self.check_page_for_string( "You have been logged out" )
tc.browser.cj.clear()
def generate_complex_dependency_xml( self, filename, filepath, repository_tuples, package, version ):
file_path = os.path.join( filepath, filename )
dependency_entries = []
template = string.Template( common.new_repository_dependencies_line )
for toolshed_url, name, owner, changeset_revision in repository_tuples:
dependency_entries.append( template.safe_substitute( toolshed_url=toolshed_url,
owner=owner,
repository_name=name,
changeset_revision=changeset_revision,
prior_installation_required='' ) )
if not os.path.exists( filepath ):
os.makedirs( filepath )
dependency_template = string.Template( common.complex_repository_dependency_template )
repository_dependency_xml = dependency_template.safe_substitute( package=package, version=version, dependency_lines='\n'.join( dependency_entries ) )
# Save the generated xml to the specified location.
open( file_path, 'w' ).write( repository_dependency_xml )
def generate_simple_dependency_xml( self,
repository_tuples,
filename,
filepath,
dependency_description='',
complex=False,
package=None,
version=None,
prior_installation_required=False ):
if not os.path.exists( filepath ):
os.makedirs( filepath )
dependency_entries = []
if prior_installation_required:
prior_installation_value = ' prior_installation_required="True"'
else:
prior_installation_value = ''
for toolshed_url, name, owner, changeset_revision in repository_tuples:
template = string.Template( common.new_repository_dependencies_line )
dependency_entries.append( template.safe_substitute( toolshed_url=toolshed_url,
owner=owner,
repository_name=name,
changeset_revision=changeset_revision,
prior_installation_required=prior_installation_value ) )
if dependency_description:
description = ' description="%s"' % dependency_description
else:
description = dependency_description
template_parser = string.Template( common.new_repository_dependencies_xml )
repository_dependency_xml = template_parser.safe_substitute( description=description, dependency_lines='\n'.join( dependency_entries ) )
# Save the generated xml to the specified location.
full_path = os.path.join( filepath, filename )
open( full_path, 'w' ).write( repository_dependency_xml )
def generate_temp_path( self, test_script_path, additional_paths=[] ):
temp_path = os.path.join( self.tool_shed_test_tmp_dir, test_script_path, os.sep.join( additional_paths ) )
if not os.path.exists( temp_path ):
os.makedirs( temp_path )
return temp_path
def get_datatypes_count( self ):
url = '/api/datatypes?upload_only=false'
self.visit_galaxy_url( url )
html = self.last_page()
datatypes = loads( html )
return len( datatypes )
def get_env_sh_path( self, tool_dependency_name, tool_dependency_version, repository ):
'''Return the absolute path to an installed repository's env.sh file.'''
env_sh_path = os.path.join( self.get_tool_dependency_path( tool_dependency_name, tool_dependency_version, repository ),
'env.sh' )
return env_sh_path
def get_filename( self, filename, filepath=None ):
if filepath is not None:
return os.path.abspath( os.path.join( filepath, filename ) )
else:
return os.path.abspath( os.path.join( self.file_dir, filename ) )
def get_hg_repo( self, path ):
return hg.repository( ui.ui(), path )
def get_last_reviewed_revision_by_user( self, user, repository ):
changelog_tuples = self.get_repository_changelog_tuples( repository )
reviews = test_db_util.get_reviews_ordered_by_changeset_revision( repository.id, changelog_tuples, reviewer_user_id=user.id )
if reviews:
last_review = reviews[ -1 ]
else:
last_review = None
return last_review
def get_repositories_category_api( self, categories, strings_displayed=None, strings_not_displayed=None ):
for category in categories:
url = '/api/categories/%s/repositories' % self.security.encode_id( category.id )
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
def get_tool_dependency_path( self, tool_dependency_name, tool_dependency_version, repository ):
'''Return the absolute path for an installed tool dependency.'''
return os.path.join( self.galaxy_tool_dependency_dir,
tool_dependency_name,
tool_dependency_version,
repository.owner,
repository.name,
repository.installed_changeset_revision )
def get_or_create_repository( self, owner=None, strings_displayed=None, strings_not_displayed=None, **kwd ):
# If not checking for a specific string, it should be safe to assume that
# we expect repository creation to be successful.
if strings_displayed is None:
strings_displayed = [ 'Repository', kwd[ 'name' ], 'has been created' ]
if strings_not_displayed is None:
strings_not_displayed = []
repository = test_db_util.get_repository_by_name_and_owner( kwd[ 'name' ], owner )
if repository is None:
self.visit_url( '/repository/create_repository' )
self.submit_form( 1, 'create_repository_button', **kwd )
self.check_for_strings( strings_displayed, strings_not_displayed )
repository = test_db_util.get_repository_by_name_and_owner( kwd[ 'name' ], owner )
return repository
def get_repo_path( self, repository ):
# An entry in the hgweb.config file looks something like: repos/test/mira_assembler = database/community_files/000/repo_123
lhs = "repos/%s/%s" % ( repository.user.username, repository.name )
try:
return self.hgweb_config_manager.get_entry( lhs )
except:
raise Exception( "Entry for repository %s missing in hgweb config file %s." % ( lhs, self.hgweb_config_manager.hgweb_config ) )
def get_repository_changelog_tuples( self, repository ):
repo = self.get_hg_repo( self.get_repo_path( repository ) )
changelog_tuples = []
for changeset in repo.changelog:
ctx = repo.changectx( changeset )
changelog_tuples.append( ( ctx.rev(), repo.changectx( changeset ) ) )
return changelog_tuples
def get_repository_datatypes_count( self, repository ):
metadata = self.get_repository_metadata( repository )[0].metadata
if 'datatypes' not in metadata:
return 0
else:
return len( metadata[ 'datatypes' ] )
def get_repository_file_list( self, repository, base_path, current_path=None ):
'''Recursively load repository folder contents and append them to a list. Similar to os.walk but via /repository/open_folder.'''
if current_path is None:
request_param_path = base_path
else:
request_param_path = os.path.join( base_path, current_path )
# Get the current folder's contents.
params = dict( folder_path=request_param_path, repository_id=self.security.encode_id( repository.id ) )
url = '/repository/open_folder'
self.visit_url( url, params=params )
file_list = loads( self.last_page() )
returned_file_list = []
if current_path is not None:
returned_file_list.append( current_path )
# Loop through the json dict returned by /repository/open_folder.
for file_dict in file_list:
if file_dict[ 'isFolder' ]:
# This is a folder. Get the contents of the folder and append it to the list,
# prefixed with the path relative to the repository root, if any.
if current_path is None:
returned_file_list.extend( self.get_repository_file_list( repository=repository, base_path=base_path, current_path=file_dict[ 'title' ] ) )
else:
sub_path = os.path.join( current_path, file_dict[ 'title' ] )
returned_file_list.extend( self.get_repository_file_list( repository=repository, base_path=base_path, current_path=sub_path ) )
else:
# This is a regular file, prefix the filename with the current path and append it to the list.
if current_path is not None:
returned_file_list.append( os.path.join( current_path, file_dict[ 'title' ] ) )
else:
returned_file_list.append( file_dict[ 'title' ] )
return returned_file_list
def get_repository_metadata( self, repository ):
return [ metadata_revision for metadata_revision in repository.metadata_revisions ]
def get_repository_metadata_by_changeset_revision( self, repository, changeset_revision ):
return test_db_util.get_repository_metadata_for_changeset_revision( repository.id, changeset_revision )
def get_repository_metadata_revisions( self, repository ):
return [ str( repository_metadata.changeset_revision ) for repository_metadata in repository.metadata_revisions ]
def get_repository_tip( self, repository ):
repo = self.get_hg_repo( self.get_repo_path( repository ) )
return str( repo.changectx( repo.changelog.tip() ) )
def get_sniffers_count( self ):
url = '/api/datatypes/sniffers'
self.visit_galaxy_url( url )
html = self.last_page()
sniffers = loads( html )
return len( sniffers )
def get_tools_from_repository_metadata( self, repository, include_invalid=False ):
'''Get a list of valid and (optionally) invalid tool dicts from the repository metadata.'''
valid_tools = []
invalid_tools = []
for repository_metadata in repository.metadata_revisions:
if 'tools' in repository_metadata.metadata:
valid_tools.append( dict( tools=repository_metadata.metadata[ 'tools' ], changeset_revision=repository_metadata.changeset_revision ) )
if include_invalid and 'invalid_tools' in repository_metadata.metadata:
invalid_tools.append( dict( tools=repository_metadata.metadata[ 'invalid_tools' ], changeset_revision=repository_metadata.changeset_revision ) )
return valid_tools, invalid_tools
def get_tool_panel_section_from_api( self, metadata ):
tool_metadata = metadata[ 'tools' ]
tool_guid = quote_plus( tool_metadata[ 0 ][ 'guid' ], safe='' )
api_url = '/%s' % '/'.join( [ 'api', 'tools', tool_guid ] )
self.visit_galaxy_url( api_url )
tool_dict = loads( self.last_page() )
tool_panel_section = tool_dict[ 'panel_section_name' ]
return tool_panel_section
def get_tool_panel_section_from_repository_metadata( self, metadata ):
tool_metadata = metadata[ 'tools' ]
tool_guid = tool_metadata[ 0 ][ 'guid' ]
assert 'tool_panel_section' in metadata, 'Tool panel section not found in metadata: %s' % metadata
tool_panel_section_metadata = metadata[ 'tool_panel_section' ]
# tool_section_dict = dict( | |
list_extensions(self, **_params):
"""Fetch a list of all extensions on server side."""
return self.get(self.extensions_path, params=_params)
def show_extension(self, ext_alias, **_params):
"""Fetches information of a certain extension."""
return self.get(self.extension_path % ext_alias, params=_params)
def list_ports(self, retrieve_all=True, **_params):
"""Fetches a list of all ports for a project."""
# Pass filters in "params" argument to do_request
return self.list('ports', self.ports_path, retrieve_all,
**_params)
def show_port(self, port, **_params):
"""Fetches information of a certain port."""
return self.get(self.port_path % (port), params=_params)
def create_port(self, body=None):
"""Creates a new port."""
return self.post(self.ports_path, body=body)
def update_port(self, port, body=None):
"""Updates a port."""
return self.put(self.port_path % (port), body=body)
def delete_port(self, port):
"""Deletes the specified port."""
return self.delete(self.port_path % (port))
def list_networks(self, retrieve_all=True, **_params):
"""Fetches a list of all networks for a project."""
# Pass filters in "params" argument to do_request
return self.list('networks', self.networks_path, retrieve_all,
**_params)
def show_network(self, network, **_params):
"""Fetches information of a certain network."""
return self.get(self.network_path % (network), params=_params)
def create_network(self, body=None):
"""Creates a new network."""
return self.post(self.networks_path, body=body)
def update_network(self, network, body=None):
"""Updates a network."""
return self.put(self.network_path % (network), body=body)
def delete_network(self, network):
"""Deletes the specified network."""
return self.delete(self.network_path % (network))
def list_subnets(self, retrieve_all=True, **_params):
"""Fetches a list of all subnets for a project."""
return self.list('subnets', self.subnets_path, retrieve_all,
**_params)
def show_subnet(self, subnet, **_params):
"""Fetches information of a certain subnet."""
return self.get(self.subnet_path % (subnet), params=_params)
def create_subnet(self, body=None):
"""Creates a new subnet."""
return self.post(self.subnets_path, body=body)
def update_subnet(self, subnet, body=None):
"""Updates a subnet."""
return self.put(self.subnet_path % (subnet), body=body)
def delete_subnet(self, subnet):
"""Deletes the specified subnet."""
return self.delete(self.subnet_path % (subnet))
def list_subnetpools(self, retrieve_all=True, **_params):
"""Fetches a list of all subnetpools for a project."""
return self.list('subnetpools', self.subnetpools_path, retrieve_all,
**_params)
def show_subnetpool(self, subnetpool, **_params):
"""Fetches information of a certain subnetpool."""
return self.get(self.subnetpool_path % (subnetpool), params=_params)
def create_subnetpool(self, body=None):
"""Creates a new subnetpool."""
return self.post(self.subnetpools_path, body=body)
def update_subnetpool(self, subnetpool, body=None):
"""Updates a subnetpool."""
return self.put(self.subnetpool_path % (subnetpool), body=body)
def delete_subnetpool(self, subnetpool):
"""Deletes the specified subnetpool."""
return self.delete(self.subnetpool_path % (subnetpool))
def list_routers(self, retrieve_all=True, **_params):
"""Fetches a list of all routers for a project."""
# Pass filters in "params" argument to do_request
return self.list('routers', self.routers_path, retrieve_all,
**_params)
def show_router(self, router, **_params):
"""Fetches information of a certain router."""
return self.get(self.router_path % (router), params=_params)
def create_router(self, body=None):
"""Creates a new router."""
return self.post(self.routers_path, body=body)
def update_router(self, router, body=None):
"""Updates a router."""
return self.put(self.router_path % (router), body=body)
def delete_router(self, router):
"""Deletes the specified router."""
return self.delete(self.router_path % (router))
def list_address_scopes(self, retrieve_all=True, **_params):
"""Fetches a list of all address scopes for a project."""
return self.list('address_scopes', self.address_scopes_path,
retrieve_all, **_params)
def show_address_scope(self, address_scope, **_params):
"""Fetches information of a certain address scope."""
return self.get(self.address_scope_path % (address_scope),
params=_params)
def create_address_scope(self, body=None):
"""Creates a new address scope."""
return self.post(self.address_scopes_path, body=body)
def update_address_scope(self, address_scope, body=None):
"""Updates a address scope."""
return self.put(self.address_scope_path % (address_scope), body=body)
def delete_address_scope(self, address_scope):
"""Deletes the specified address scope."""
return self.delete(self.address_scope_path % (address_scope))
def add_interface_router(self, router, body=None):
"""Adds an internal network interface to the specified router."""
return self.put((self.router_path % router) + "/add_router_interface",
body=body)
def remove_interface_router(self, router, body=None):
"""Removes an internal network interface from the specified router."""
return self.put((self.router_path % router) +
"/remove_router_interface", body=body)
def add_gateway_router(self, router, body=None):
"""Adds an external network gateway to the specified router."""
return self.put((self.router_path % router),
body={'router': {'external_gateway_info': body}})
def remove_gateway_router(self, router):
"""Removes an external network gateway from the specified router."""
return self.put((self.router_path % router),
body={'router': {'external_gateway_info': {}}})
def list_floatingips(self, retrieve_all=True, **_params):
"""Fetches a list of all floatingips for a project."""
# Pass filters in "params" argument to do_request
return self.list('floatingips', self.floatingips_path, retrieve_all,
**_params)
def show_floatingip(self, floatingip, **_params):
"""Fetches information of a certain floatingip."""
return self.get(self.floatingip_path % (floatingip), params=_params)
def create_floatingip(self, body=None):
"""Creates a new floatingip."""
return self.post(self.floatingips_path, body=body)
def update_floatingip(self, floatingip, body=None):
"""Updates a floatingip."""
return self.put(self.floatingip_path % (floatingip), body=body)
def delete_floatingip(self, floatingip):
"""Deletes the specified floatingip."""
return self.delete(self.floatingip_path % (floatingip))
def create_security_group(self, body=None):
"""Creates a new security group."""
return self.post(self.security_groups_path, body=body)
def update_security_group(self, security_group, body=None):
"""Updates a security group."""
return self.put(self.security_group_path %
security_group, body=body)
def list_security_groups(self, retrieve_all=True, **_params):
"""Fetches a list of all security groups for a project."""
return self.list('security_groups', self.security_groups_path,
retrieve_all, **_params)
def show_security_group(self, security_group, **_params):
"""Fetches information of a certain security group."""
return self.get(self.security_group_path % (security_group),
params=_params)
def delete_security_group(self, security_group):
"""Deletes the specified security group."""
return self.delete(self.security_group_path % (security_group))
def create_security_group_rule(self, body=None):
"""Creates a new security group rule."""
return self.post(self.security_group_rules_path, body=body)
def delete_security_group_rule(self, security_group_rule):
"""Deletes the specified security group rule."""
return self.delete(self.security_group_rule_path %
(security_group_rule))
def list_security_group_rules(self, retrieve_all=True, **_params):
"""Fetches a list of all security group rules for a project."""
return self.list('security_group_rules',
self.security_group_rules_path,
retrieve_all, **_params)
def show_security_group_rule(self, security_group_rule, **_params):
"""Fetches information of a certain security group rule."""
return self.get(self.security_group_rule_path % (security_group_rule),
params=_params)
def list_endpoint_groups(self, retrieve_all=True, **_params):
"""Fetches a list of all VPN endpoint groups for a project."""
return self.list('endpoint_groups', self.endpoint_groups_path,
retrieve_all, **_params)
def show_endpoint_group(self, endpointgroup, **_params):
"""Fetches information for a specific VPN endpoint group."""
return self.get(self.endpoint_group_path % endpointgroup,
params=_params)
def create_endpoint_group(self, body=None):
"""Creates a new VPN endpoint group."""
return self.post(self.endpoint_groups_path, body=body)
def update_endpoint_group(self, endpoint_group, body=None):
"""Updates a VPN endpoint group."""
return self.put(self.endpoint_group_path % endpoint_group, body=body)
def delete_endpoint_group(self, endpoint_group):
"""Deletes the specified VPN endpoint group."""
return self.delete(self.endpoint_group_path % endpoint_group)
def list_vpnservices(self, retrieve_all=True, **_params):
"""Fetches a list of all configured VPN services for a project."""
return self.list('vpnservices', self.vpnservices_path, retrieve_all,
**_params)
def show_vpnservice(self, vpnservice, **_params):
"""Fetches information of a specific VPN service."""
return self.get(self.vpnservice_path % (vpnservice), params=_params)
def create_vpnservice(self, body=None):
"""Creates a new VPN service."""
return self.post(self.vpnservices_path, body=body)
def update_vpnservice(self, vpnservice, body=None):
"""Updates a VPN service."""
return self.put(self.vpnservice_path % (vpnservice), body=body)
def delete_vpnservice(self, vpnservice):
"""Deletes the specified VPN service."""
return self.delete(self.vpnservice_path % (vpnservice))
def list_ipsec_site_connections(self, retrieve_all=True, **_params):
"""Fetches all configured IPsecSiteConnections for a project."""
return self.list('ipsec_site_connections',
self.ipsec_site_connections_path,
retrieve_all,
**_params)
def show_ipsec_site_connection(self, ipsecsite_conn, **_params):
"""Fetches information of a specific IPsecSiteConnection."""
return self.get(
self.ipsec_site_connection_path % (ipsecsite_conn), params=_params
)
def create_ipsec_site_connection(self, body=None):
"""Creates a new IPsecSiteConnection."""
return self.post(self.ipsec_site_connections_path, body=body)
def update_ipsec_site_connection(self, ipsecsite_conn, body=None):
"""Updates an IPsecSiteConnection."""
return self.put(
self.ipsec_site_connection_path % (ipsecsite_conn), body=body
)
def delete_ipsec_site_connection(self, ipsecsite_conn):
"""Deletes the specified IPsecSiteConnection."""
return self.delete(self.ipsec_site_connection_path % (ipsecsite_conn))
def list_ikepolicies(self, retrieve_all=True, **_params):
"""Fetches a list of all configured IKEPolicies for a project."""
return self.list('ikepolicies', self.ikepolicies_path, retrieve_all,
**_params)
def show_ikepolicy(self, ikepolicy, **_params):
"""Fetches information of a specific IKEPolicy."""
return self.get(self.ikepolicy_path % (ikepolicy), params=_params)
def create_ikepolicy(self, body=None):
"""Creates a new IKEPolicy."""
return self.post(self.ikepolicies_path, body=body)
def update_ikepolicy(self, ikepolicy, body=None):
"""Updates an IKEPolicy."""
return self.put(self.ikepolicy_path % (ikepolicy), body=body)
def delete_ikepolicy(self, ikepolicy):
"""Deletes the specified IKEPolicy."""
return self.delete(self.ikepolicy_path % (ikepolicy))
def list_ipsecpolicies(self, retrieve_all=True, **_params):
"""Fetches a list of all configured IPsecPolicies for a project."""
return self.list('ipsecpolicies',
self.ipsecpolicies_path,
retrieve_all,
**_params)
def show_ipsecpolicy(self, ipsecpolicy, **_params):
"""Fetches information of a specific IPsecPolicy."""
return self.get(self.ipsecpolicy_path % (ipsecpolicy), params=_params)
def create_ipsecpolicy(self, body=None):
"""Creates a new IPsecPolicy."""
return self.post(self.ipsecpolicies_path, body=body)
def update_ipsecpolicy(self, ipsecpolicy, body=None):
"""Updates an IPsecPolicy."""
return self.put(self.ipsecpolicy_path % (ipsecpolicy), body=body)
def delete_ipsecpolicy(self, ipsecpolicy):
"""Deletes the specified IPsecPolicy."""
return self.delete(self.ipsecpolicy_path % (ipsecpolicy))
def list_loadbalancers(self, retrieve_all=True, **_params):
"""Fetches a list of all loadbalancers for a project."""
return self.list('loadbalancers', self.lbaas_loadbalancers_path,
retrieve_all, **_params)
def show_loadbalancer(self, lbaas_loadbalancer, **_params):
"""Fetches information for a load balancer."""
return self.get(self.lbaas_loadbalancer_path % (lbaas_loadbalancer),
params=_params)
def create_loadbalancer(self, body=None):
"""Creates a new load balancer."""
return self.post(self.lbaas_loadbalancers_path, body=body)
def update_loadbalancer(self, lbaas_loadbalancer, body=None):
"""Updates a load balancer."""
return self.put(self.lbaas_loadbalancer_path % (lbaas_loadbalancer),
body=body)
def delete_loadbalancer(self, lbaas_loadbalancer):
"""Deletes the specified load balancer."""
return self.delete(self.lbaas_loadbalancer_path %
(lbaas_loadbalancer))
def retrieve_loadbalancer_stats(self, loadbalancer, **_params):
"""Retrieves stats for a certain load balancer."""
return self.get(self.lbaas_loadbalancer_path_stats % (loadbalancer),
params=_params)
def retrieve_loadbalancer_status(self, loadbalancer, **_params):
"""Retrieves status for a certain load balancer."""
return self.get(self.lbaas_loadbalancer_path_status % (loadbalancer),
params=_params)
def list_listeners(self, retrieve_all=True, **_params):
"""Fetches a list of all lbaas_listeners for a project."""
return self.list('listeners', self.lbaas_listeners_path,
retrieve_all, **_params)
def show_listener(self, lbaas_listener, **_params):
"""Fetches information for a lbaas_listener."""
return self.get(self.lbaas_listener_path % (lbaas_listener),
params=_params)
def create_listener(self, body=None):
"""Creates a new lbaas_listener."""
return self.post(self.lbaas_listeners_path, body=body)
def update_listener(self, lbaas_listener, body=None):
"""Updates a lbaas_listener."""
return self.put(self.lbaas_listener_path % (lbaas_listener),
body=body)
def delete_listener(self, lbaas_listener):
"""Deletes the specified lbaas_listener."""
return self.delete(self.lbaas_listener_path % (lbaas_listener))
def list_lbaas_l7policies(self, retrieve_all=True, **_params):
"""Fetches a list of all L7 policies for a listener."""
return self.list('l7policies', self.lbaas_l7policies_path,
retrieve_all, **_params)
def show_lbaas_l7policy(self, l7policy, **_params):
"""Fetches | |
18, 11),
r.period_end)
elif (r.groupby == {'resource_metadata.instance_type': '83'} and
r.period_start == datetime.datetime(2013, 8, 1, 10, 11)):
self.assertEqual(1, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(4, r.min)
self.assertEqual(4, r.max)
self.assertEqual(4, r.sum)
self.assertEqual(4, r.avg)
self.assertEqual(0, r.duration)
self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22),
r.duration_start)
self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22),
r.duration_end)
self.assertEqual(7200, r.period)
self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11),
r.period_end)
elif (r.groupby == {'resource_metadata.instance_type': '83'} and
r.period_start == datetime.datetime(2013, 8, 1, 14, 11)):
self.assertEqual(1, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(2, r.min)
self.assertEqual(2, r.max)
self.assertEqual(2, r.sum)
self.assertEqual(2, r.avg)
self.assertEqual(0, r.duration)
self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37),
r.duration_start)
self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37),
r.duration_end)
self.assertEqual(7200, r.period)
self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11),
r.period_end)
elif (r.groupby == {'resource_metadata.instance_type': '84'} and
r.period_start == datetime.datetime(2013, 8, 1, 14, 11)):
self.assertEqual(2, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(2, r.min)
self.assertEqual(2, r.max)
self.assertEqual(4, r.sum)
self.assertEqual(2, r.avg)
self.assertEqual(4260, r.duration)
self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
r.duration_start)
self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10),
r.duration_end)
self.assertEqual(7200, r.period)
self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11),
r.period_end)
else:
self.assertNotEqual([{'resource_metadata.instance_type': '82'},
datetime.datetime(2013, 8, 1, 14, 11)],
[r.groupby, r.period_start])
self.assertNotEqual([{'resource_metadata.instance_type': '83'},
datetime.datetime(2013, 8, 1, 16, 11)],
[r.groupby, r.period_start])
self.assertNotEqual([{'resource_metadata.instance_type': '84'},
datetime.datetime(2013, 8, 1, 10, 11)],
[r.groupby, r.period_start])
self.assertNotEqual([{'resource_metadata.instance_type': '84'},
datetime.datetime(2013, 8, 1, 16, 11)],
[r.groupby, r.period_start])
def test_group_by_with_query_filter_and_period(self):
f = storage.SampleFilter(
meter='instance',
source='source-1',
)
results = list(self.conn.get_meter_statistics(f,
period=7200,
groupby=['project_id']))
self.assertEqual(3, len(results))
groupby_list = [r.groupby for r in results]
groupby_keys_set = set(x for sub_dict in groupby_list
for x in sub_dict.keys())
groupby_vals_set = set(x for sub_dict in groupby_list
for x in sub_dict.values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
period_start_set = set([r.period_start for r in results])
period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11),
datetime.datetime(2013, 8, 1, 14, 11),
datetime.datetime(2013, 8, 1, 16, 11)])
self.assertEqual(period_start_valid, period_start_set)
for r in results:
if (r.groupby == {'project_id': 'project-1'} and
r.period_start == datetime.datetime(2013, 8, 1, 10, 11)):
self.assertEqual(2, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(1, r.min)
self.assertEqual(1, r.max)
self.assertEqual(2, r.sum)
self.assertEqual(1, r.avg)
self.assertEqual(1740, r.duration)
self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11),
r.duration_start)
self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40),
r.duration_end)
self.assertEqual(7200, r.period)
self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11),
r.period_end)
elif (r.groupby == {'project_id': 'project-1'} and
r.period_start == datetime.datetime(2013, 8, 1, 14, 11)):
self.assertEqual(1, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(2, r.min)
self.assertEqual(2, r.max)
self.assertEqual(2, r.sum)
self.assertEqual(2, r.avg)
self.assertEqual(0, r.duration)
self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
r.duration_start)
self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
r.duration_end)
self.assertEqual(7200, r.period)
self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11),
r.period_end)
elif (r.groupby == {'project_id': 'project-2'} and
r.period_start == datetime.datetime(2013, 8, 1, 16, 11)):
self.assertEqual(1, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(4, r.min)
self.assertEqual(4, r.max)
self.assertEqual(4, r.sum)
self.assertEqual(4, r.avg)
self.assertEqual(0, r.duration)
self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28),
r.duration_start)
self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28),
r.duration_end)
self.assertEqual(7200, r.period)
self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11),
r.period_end)
else:
self.assertNotEqual([{'project_id': 'project-1'},
datetime.datetime(2013, 8, 1, 16, 11)],
[r.groupby, r.period_start])
self.assertNotEqual([{'project_id': 'project-2'},
datetime.datetime(2013, 8, 1, 10, 11)],
[r.groupby, r.period_start])
def test_group_by_metadata_with_query_filter_and_period(self):
# This test checks grouping with metadata fields in combination
# with a query filter and period grouping.
f = storage.SampleFilter(
meter='instance',
project='project-1',
)
results = list(
self.conn.get_meter_statistics(
f, period=7200, groupby=['resource_metadata.instance_type']))
self.assertEqual(3, len(results))
groupby_list = [r.groupby for r in results]
groupby_keys_set = set(x for sub_dict in groupby_list
for x in sub_dict.keys())
groupby_vals_set = set(x for sub_dict in groupby_list
for x in sub_dict.values())
self.assertEqual(set(['resource_metadata.instance_type']),
groupby_keys_set)
self.assertEqual(set(['82', '83', '84']), groupby_vals_set)
period_start_set = set([r.period_start for r in results])
period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11),
datetime.datetime(2013, 8, 1, 14, 11)])
self.assertEqual(period_start_valid, period_start_set)
for r in results:
if (r.groupby == {'resource_metadata.instance_type': '82'} and
r.period_start == datetime.datetime(2013, 8, 1, 10, 11)):
self.assertEqual(2, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(1, r.min)
self.assertEqual(1, r.max)
self.assertEqual(2, r.sum)
self.assertEqual(1, r.avg)
self.assertEqual(1740, r.duration)
self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11),
r.duration_start)
self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40),
r.duration_end)
self.assertEqual(7200, r.period)
self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11),
r.period_end)
elif (r.groupby == {'resource_metadata.instance_type': '83'} and
r.period_start == datetime.datetime(2013, 8, 1, 10, 11)):
self.assertEqual(1, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(4, r.min)
self.assertEqual(4, r.max)
self.assertEqual(4, r.sum)
self.assertEqual(4, r.avg)
self.assertEqual(0, r.duration)
self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22),
r.duration_start)
self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22),
r.duration_end)
self.assertEqual(7200, r.period)
self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11),
r.period_end)
elif (r.groupby == {'resource_metadata.instance_type': '84'} and
r.period_start == datetime.datetime(2013, 8, 1, 14, 11)):
self.assertEqual(2, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(2, r.min)
self.assertEqual(2, r.max)
self.assertEqual(4, r.sum)
self.assertEqual(2, r.avg)
self.assertEqual(4260, r.duration)
self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
r.duration_start)
self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10),
r.duration_end)
self.assertEqual(7200, r.period)
self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11),
r.period_end)
else:
self.assertNotEqual([{'resource_metadata.instance_type': '82'},
datetime.datetime(2013, 8, 1, 14, 11)],
[r.groupby, r.period_start])
self.assertNotEqual([{'resource_metadata.instance_type': '83'},
datetime.datetime(2013, 8, 1, 14, 11)],
[r.groupby, r.period_start])
self.assertNotEqual([{'resource_metadata.instance_type': '84'},
datetime.datetime(2013, 8, 1, 10, 11)],
[r.groupby, r.period_start])
def test_group_by_start_timestamp_after(self):
f = storage.SampleFilter(
meter='instance',
start_timestamp=datetime.datetime(2013, 8, 1, 17, 28, 1),
)
results = list(self.conn.get_meter_statistics(f,
groupby=['project_id']))
self.assertEqual([], results)
def test_group_by_end_timestamp_before(self):
f = storage.SampleFilter(
meter='instance',
end_timestamp=datetime.datetime(2013, 8, 1, 10, 10, 59),
)
results = list(self.conn.get_meter_statistics(f,
groupby=['project_id']))
self.assertEqual([], results)
def test_group_by_start_timestamp(self):
f = storage.SampleFilter(
meter='instance',
start_timestamp=datetime.datetime(2013, 8, 1, 14, 58),
)
results = list(self.conn.get_meter_statistics(f,
groupby=['project_id']))
self.assertEqual(2, len(results))
groupby_list = [r.groupby for r in results]
groupby_keys_set = set(x for sub_dict in groupby_list
for x in sub_dict.keys())
groupby_vals_set = set(x for sub_dict in groupby_list
for x in sub_dict.values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
for r in results:
if r.groupby == {'project_id': 'project-1'}:
self.assertEqual(2, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(2, r.min)
self.assertEqual(2, r.max)
self.assertEqual(4, r.sum)
self.assertEqual(2, r.avg)
elif r.groupby == {'project_id': 'project-2'}:
self.assertEqual(2, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(2, r.min)
self.assertEqual(4, r.max)
self.assertEqual(6, r.sum)
self.assertEqual(3, r.avg)
def test_group_by_end_timestamp(self):
f = storage.SampleFilter(
meter='instance',
end_timestamp=datetime.datetime(2013, 8, 1, 11, 45),
)
results = list(self.conn.get_meter_statistics(f,
groupby=['project_id']))
self.assertEqual(1, len(results))
groupby_list = [r.groupby for r in results]
groupby_keys_set = set(x for sub_dict in groupby_list
for x in sub_dict.keys())
groupby_vals_set = set(x for sub_dict in groupby_list
for x in sub_dict.values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1']), groupby_vals_set)
for r in results:
if r.groupby == {'project_id': 'project-1'}:
self.assertEqual(3, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(1, r.min)
self.assertEqual(4, r.max)
self.assertEqual(6, r.sum)
self.assertEqual(2, r.avg)
def test_group_by_start_end_timestamp(self):
f = storage.SampleFilter(
meter='instance',
start_timestamp=datetime.datetime(2013, 8, 1, 8, 17, 3),
end_timestamp=datetime.datetime(2013, 8, 1, 23, 59, 59),
)
results = list(self.conn.get_meter_statistics(f,
groupby=['project_id']))
self.assertEqual(2, len(results))
groupby_list = [r.groupby for r in results]
groupby_keys_set = set(x for sub_dict in groupby_list
for x in sub_dict.keys())
groupby_vals_set = set(x for sub_dict in groupby_list
for x in sub_dict.values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
for r in results:
if r.groupby == {'project_id': 'project-1'}:
self.assertEqual(5, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(1, r.min)
self.assertEqual(4, r.max)
self.assertEqual(10, r.sum)
self.assertEqual(2, r.avg)
elif r.groupby == {'project_id': 'project-2'}:
self.assertEqual(2, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(2, r.min)
self.assertEqual(4, r.max)
self.assertEqual(6, r.sum)
self.assertEqual(3, r.avg)
def test_group_by_start_end_timestamp_with_query_filter(self):
f = storage.SampleFilter(
meter='instance',
project='project-1',
start_timestamp=datetime.datetime(2013, 8, 1, 11, 1),
end_timestamp=datetime.datetime(2013, 8, 1, 20, 0),
)
results = list(self.conn.get_meter_statistics(f,
groupby=['resource_id']))
groupby_list = [r.groupby for r in results]
groupby_keys_set = set(x for sub_dict in groupby_list
for x in sub_dict.keys())
groupby_vals_set = set(x for sub_dict in groupby_list
for x in sub_dict.values())
self.assertEqual(set(['resource_id']), groupby_keys_set)
self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set)
for r in results:
if r.groupby == {'resource_id': 'resource-1'}:
self.assertEqual(2, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(2, r.min)
self.assertEqual(2, r.max)
self.assertEqual(4, r.sum)
self.assertEqual(2, r.avg)
elif r.groupby == {'resource_id': 'resource-3'}:
self.assertEqual(1, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(4, r.min)
self.assertEqual(4, r.max)
self.assertEqual(4, r.sum)
self.assertEqual(4, r.avg)
def test_group_by_start_end_timestamp_with_period(self):
f = storage.SampleFilter(
meter='instance',
start_timestamp=datetime.datetime(2013, 8, 1, 14, 0),
end_timestamp=datetime.datetime(2013, 8, 1, 17, 0),
)
results = list(self.conn.get_meter_statistics(f,
period=3600,
groupby=['project_id']))
self.assertEqual(3, len(results))
groupby_list = [r.groupby for r in results]
groupby_keys_set = set(x for sub_dict in groupby_list
for x in sub_dict.keys())
groupby_vals_set = set(x for sub_dict in groupby_list
for x in sub_dict.values())
self.assertEqual(set(['project_id']), groupby_keys_set)
self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
period_start_set = set([r.period_start for r in results])
period_start_valid = set([datetime.datetime(2013, 8, 1, 14, 0),
datetime.datetime(2013, 8, 1, 15, 0),
datetime.datetime(2013, 8, 1, 16, 0)])
self.assertEqual(period_start_valid, period_start_set)
for r in results:
if (r.groupby == {'project_id': 'project-1'} and
r.period_start == datetime.datetime(2013, 8, 1, 14, 0)):
self.assertEqual(1, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(2, r.min)
self.assertEqual(2, r.max)
self.assertEqual(2, r.sum)
self.assertEqual(2, r.avg)
self.assertEqual(0, r.duration)
self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
r.duration_start)
self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
r.duration_end)
self.assertEqual(3600, r.period)
self.assertEqual(datetime.datetime(2013, 8, 1, 15, 0),
r.period_end)
elif (r.groupby == {'project_id': 'project-1'} and
r.period_start == datetime.datetime(2013, 8, 1, 16, 0)):
self.assertEqual(1, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(2, r.min)
self.assertEqual(2, r.max)
self.assertEqual(2, r.sum)
self.assertEqual(2, r.avg)
self.assertEqual(0, r.duration)
self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10),
r.duration_start)
self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10),
r.duration_end)
self.assertEqual(3600, r.period)
self.assertEqual(datetime.datetime(2013, 8, 1, 17, 0),
r.period_end)
elif (r.groupby == {'project_id': 'project-2'} and
r.period_start == datetime.datetime(2013, 8, 1, 15, 0)):
self.assertEqual(1, r.count)
self.assertEqual('s', r.unit)
self.assertEqual(2, r.min)
self.assertEqual(2, r.max)
self.assertEqual(2, r.sum)
| |
# Copyright (c) 2020 The Foundry Visionmongers Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
import logging
import time
from Katana import NodegraphAPI, Utils
import LookFileBakeAPI
from LookFileBakeAPI import LookFileBaker
from Nodes3DAPI import LookFileBaking
log = logging.getLogger("UsdMaterialBake.Node")
__all__ = ['UsdMaterialBakeNode']
class UsdMaterialBakeNode(NodegraphAPI.SuperTool):
"""
UsdMaterialBake node, with extra methods to allow for baking features
and ability to change and re-order inputs driven by the Editor UI, or
by calling these directly. Used for baking USD data using the UsdExport
OutputFormat plug-in.
"""
MIN_PORTS = 1
def __init__(self):
try:
Utils.UndoStack.DisableCapture()
try:
self.setName("UsdMaterialBake")
self.setType("UsdMaterialBake")
networkVersion = "1.0"
parameters_XML = _parameters_XML.format(
networkVersion=networkVersion)
self.getParameters().parseXML(parameters_XML)
self.addInputPort("orig")
self.addInputPort("default")
self.addOutputPort("out")
dot = NodegraphAPI.CreateNode('Dot', self)
dot.getInputPortByIndex(0).connect(self.getSendPort("orig"))
dot.getOutputPortByIndex(0).connect(self.getReturnPort("out"))
NodegraphAPI.SetNodePosition(dot, (0, 200))
NodegraphAPI.SetNodeShapeAttr(self, 'basicDisplay', 1)
NodegraphAPI.SetNodeShapeAttr(self, 'iconName', '')
finally:
Utils.UndoStack.EnableCapture()
except Exception:
log.exception("CREATE UsdMaterialBake FAILED.")
raise
self.__timer = None
self.__interruptWidget = None
# --- Public node API -------------------------------------------
def addVariantInput(self, variantName):
"""
Adds a new input port for a new variant name.
@type variantName: C{str}
@param variantName: New name to give to the new port.
"""
self.addInputPort(variantName)
def deleteVariantInput(self, index):
"""
Deletes the existing variant from the index provided.
@type index: C{int}
@param index: Index of the input port to delete.
"""
if index < self.MIN_PORTS or index >= self.getNumInputPorts():
return
portName = self.getInputPortByIndex(index).getName()
self.removeInputPort(portName)
def reorderInput(self, index, newIndex):
"""
Reorders two input variants by Deleting the old port and recreating
it in the new index.
@type index: C{int}
@type newIndex: C{int}
@param index: Index of the input port to reposition.
@param newIndex: New position of the input port. Assuming that the
current index does not exist.
"""
if index < self.MIN_PORTS or index >= self.getNumInputPorts():
return
if newIndex < self.MIN_PORTS or newIndex >= self.getNumInputPorts():
return
oldPort = self.getInputPortByIndex(index)
connections = oldPort.getConnectedPorts()
self.removeInputPort(oldPort.getName())
oldPortName = oldPort.getName()
del oldPort
newPort = self.addInputPortAtIndex(oldPortName, newIndex)
for i in connections:
newPort.connect(i)
def renameVariantInput(self, index, newName):
"""
Renames an existing input. Does not change the input order.
@type index: C{int}
@type newName: C{str}
@param index: Index of the input to rename.
@param newName: New name to change the port to.
"""
if index < self.MIN_PORTS or index >= self.getNumInputPorts():
return
oldPort = self.getInputPortByIndex(index)
if newName == oldPort.getName():
return
connections = oldPort.getConnectedPorts()
self.removeInputPort(oldPort.getName())
del oldPort
newPort = self.addInputPortAtIndex(newName, index)
for i in connections:
newPort.connect(i)
def addParameterHints(self, attrName, inputDict):
"""
Adds parameter hints to the given dictionary of hints for a
GenericAssign parameter that shows the value of an attribute from the
incoming scene.
@type attrName: C{str}
@type inputDict: C{dict}
@param attrName: The name of the scene graph attribute from the
incoming scene for which to add parameter hints.
@param inputDict: The dictionary to which to add parameter hints.
"""
inputDict.update(_ExtraHints.get(attrName, {}))
def require3DInput(self, portName, graphState):
"""
A Method from C{Node3D} used inside the look file bake code to gather
the input port given the port name and graph state.
@type portName: C{str}
@type graphState: C{NodegraphAPI.GraphState}
@param portName: Port name to read the 3dInput from.
@param graphState: Graph state used to get the input source.
@raise TypeError: If C{graphState} provided is not valid.
@raise RuntimeError: If the port related to the C{portName} does
not point to a valid node.
@return: Tuple of the source Node, port and graphState.
"""
# Required for the current iteration of LookFileBakeAPI, it expects
# the node it is owrking on to be able to provide the 3dInput
from Nodes3DAPI import Node3D
if not isinstance(graphState, NodegraphAPI.GraphState):
raise TypeError('Node3D.require3DInput(): Given graphState object '
'is not a NodegraphAPI.GraphState instance: %s'
% repr(graphState))
sourcePort, sourceGraphState = self.getInputSource(portName,
graphState)
sourceNode = None
if sourcePort is not None:
sourceNode = sourcePort.getNode()
if not isinstance(sourceNode, Node3D):
sourceNode = None
if sourceNode is None:
raise RuntimeError('The required input "%s" was not connected on '
'the node "%s".' % (portName, self.getName()))
return (sourceNode, sourcePort, sourceGraphState)
def bake(self, parentWidget=None):
"""
Performs the bake based on the settings of this current node
parameter settings. If a parentWidget is provided, we create a progress
widget and setup callbacks to update it. If no parentWidget is provided
we can run this without calling any UI code.
@type parentWidget: C{QtWidgets.QWidget}
@param parentWidget: Parent for the progress widget. If set to None,
the progress callback is not produced.
"""
graphState = NodegraphAPI.GetCurrentGraphState()
frameTime = graphState.getTime()
node = self
inputPorts = node.getInputPorts()
numPorts = len(inputPorts)
if numPorts < 2:
log.error("Requires at least two input ports to bake a USD Look")
variantSetName = node.getParameter(
"variantSetName").getValue(frameTime)
rootPrimName = node.getParameter("rootPrimName").getValue(frameTime)
alwaysCreateVariantSet = bool(node.getParameter(
"alwaysCreateVariantSet").getValue(frameTime) == "Yes")
looksFilename = node.getParameter("looksFilename").getValue(frameTime)
looksFileFormat = node.getParameter("looksFileFormat").getValue(
frameTime)
createCompleteUsdAssemblyFile = bool(node.getParameter(
"createCompleteUsdAssemblyFile").getValue(frameTime))
assemblyFilename = node.getParameter("assemblyFilename").getValue(
frameTime)
payloadFilename = node.getParameter("payloadFilename").getValue(
frameTime)
createVariantSet = alwaysCreateVariantSet or (len(inputPorts) > 2)
additionalSettings = {
"variantSetName": variantSetName,
"rootPrimName": rootPrimName,
"createVariantSet": createVariantSet,
"looksFileFormat": looksFileFormat,
"looksFilename": looksFilename,
"createCompleteUsdAssemblyFile": createCompleteUsdAssemblyFile,
"assemblyFilename": assemblyFilename,
"payloadFilename": payloadFilename,
}
# Ensure the interruptWidget is only created in a UI session
if parentWidget:
import UI4
self.__timer = time.time()
self.__interruptWidget = UI4.Widgets.ModalProcessInterruptWidget(
self.__interruptCallback, minWidth=512)
else:
self.__interruptWidget = None
self.__timer = None
assetId = node.getParameter("saveTo").getValue(frameTime)
rootLocationsParam = node.getParameter("rootLocations")
rootLocations = [x.getValue(frameTime) for x in
rootLocationsParam.getChildren()]
# Retrieve the Ops for each of the inputs
referenceOp, passNamesAndOps = self.__getBakerOps(
self._getPassInputPortNames(), graphState)
sourceFile = NodegraphAPI.GetOriginalSourceFile()
if not sourceFile:
# Use legacy API call in case this file was created in a very old
# version of Katana (1.6.11 or earlier)
sourceFile = NodegraphAPI.GetSourceFile()
sourceAsset = NodegraphAPI.GetKatanaSceneName()
#When updating to the later version of the LookFileBakeAPI, dont
# forget to update require3DInput from the Node.py
baker = LookFileBaker("UsdExport")
baker.progressCallback = self.__progressCallback
baker.additionalSettings = additionalSettings
baker.sourceAsset = sourceAsset
baker.sourceFile = sourceFile
if self.__interruptWidget:
self.__interruptWidget.show()
self.__interruptWidget.update("Saving Materials To %s" % assetId,
True)
try:
baker.bakeAndPublish(
referenceOp, passNamesAndOps, rootLocations, assetId)
finally:
if self.__interruptWidget:
self.__interruptWidget.close()
self.__interruptWidget.setParent(None)
self.__interruptWidget.deleteLater()
self.__interruptWidget = None
def __progressCallback(self, message=None):
"""
The Callback for displaying progess in the interruptWidget whilst in
UI mode. This is passed to the look file bake functionality code
which handles passing back the text to write.
@type message: C{str}
@param message: The message to display in the progress bar.
"""
if not self.__interruptWidget or self.__timer:
return
tick = False
if (time.time() - self.__timer) > 0.1:
# Ensure we import QtWidgets if we know we have the interrupt
# widget, which means we must be in a UI session.
from Katana import QtWidgets
QtWidgets.QApplication.processEvents()
self.__timer = time.time()
tick = True
return self.__interruptWidget.update(message, tick)
def __interruptCallback(self):
class InterruptionException(Exception):
pass
raise InterruptionException()
def _getReferenceInputPort(self):
return self.getInputPortByIndex(0)
def _getPassInputPorts(self):
return self.getInputPorts()[1:]
def _getReferenceInputPortName(self):
return self._getReferenceInputPort().getName()
def _getPassInputPortNames(self):
return [port.getName() for port in self._getPassInputPorts()]
def __getBakerOps(self, passInputPortNames, graphState):
ops = LookFileBaking.GetLookFileBakeOps(
self, [self._getReferenceInputPortName()] + passInputPortNames,
graphState)
referenceOp = ops[0]
passNamesAndOps = zip(passInputPortNames, ops[1:])
return (referenceOp, passNamesAndOps)
_parameters_XML = """
<group_parameter>
<string_parameter name="__nodePanel" value="UsdMaterialBake"/>
<number_parameter name="__networkVersion" value="{networkVersion}"/>
<stringarray_parameter name="rootLocations" size="1" tupleSize="1">
<string_parameter name="i0" value="/root/world"/>
</stringarray_parameter>
<string_parameter name="saveTo"/>
<string_parameter name="looksFilename" value="shadingVariants"/>
<string_parameter name="looksFileFormat" value="usd"/>
<number_parameter name="createCompleteUsdAssemblyFile" value="0"/>
<string_parameter name="assemblyFilename" value="assembly"/>
<string_parameter name="payloadFilename" value=""/>
<string_parameter name="rootPrimName" value="/root"/>
<string_parameter name="variantSetName" value="shadingVariants"/>
<string_parameter name="alwaysCreateVariantSet" value="No"/>
<stringarray_parameter name="variants" size="0" tupleSize="1"/>
</group_parameter>
"""
_ExtraHints = {
"UsdMaterialBake": {
},
"UsdMaterialBake.rootLocations": {
"widget": "scenegraphLocationArray",
"help": """ Specify the rootLocations from which to read the scene
graph difference from.
"""
},
"UsdMaterialBake.variants": {
| |
<reponame>yohanesnuwara/seismic-deeplearning
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# commitHash: c76bf579a0d5090ebd32426907d051d499f3e847
# url: https://github.com/olivesgatech/facies_classification_benchmark
"""Script to generate train and validation sets for Netherlands F3 dataset
"""
import itertools
import logging
import logging.config
import math
import warnings
from os import path, mkdir
import fire
import numpy as np
from sklearn.model_selection import train_test_split
def _write_split_files(splits_path, train_list, val_list, loader_type):
if not path.isdir(splits_path):
mkdir(splits_path)
file_object = open(path.join(splits_path,
loader_type + "_train_val.txt"), "w")
file_object.write("\n".join(train_list + val_list))
file_object.close()
file_object = open(path.join(splits_path,
loader_type + "_train.txt"), "w")
file_object.write("\n".join(train_list))
file_object.close()
file_object = open(path.join(splits_path,
loader_type + "_val.txt"), "w")
file_object.write("\n".join(val_list))
file_object.close()
def _get_aline_range(aline, per_val, slice_steps):
try:
if slice_steps < 1:
raise ValueError('slice_steps cannot be zero or a negative number')
# Inline and Crossline sections
val_aline = math.floor(aline * per_val / 2)
val_aline_range = itertools.chain(range(0, val_aline),
range(aline - val_aline, aline))
train_aline_range = range(val_aline, aline - val_aline, slice_steps)
print("aline: ", aline)
print("val_aline: ", val_aline)
return train_aline_range, val_aline_range
except (Exception, ValueError):
raise
def split_section_train_val(data_dir, output_dir, label_file, per_val=0.2,
log_config=None, slice_steps=1):
"""Generate train and validation files for Netherlands F3 dataset.
Args:
data_dir (str): data directory path
output_dir (str): directory under data_dir to store the split files
label_file (str): npy files with labels. Stored in data_dir
per_val (float, optional): the fraction of the volume to use for
validation. Defaults to 0.2.
log_config (str): path to log configurations
slice_steps (int): increment to the slices count.
If slice_steps > 1 the function will skip:
slice_steps - 1 slice.
Defaults to 1, do not skip any slice.
"""
if log_config is not None:
logging.config.fileConfig(log_config)
logger = logging.getLogger(__name__)
logger.info("Splitting data into sections .... ")
logger.info(f"Reading data from {data_dir}")
logger.info(f"Loading {label_file}")
labels = np.load(label_file)
logger.debug(f"Data shape [iline|xline|depth] {labels.shape}")
iline, xline, _ = labels.shape
# Inline sections
train_iline_range, val_iline_range = _get_aline_range(iline,
per_val,
slice_steps)
train_i_list = ["i_" + str(i) for i in train_iline_range]
val_i_list = ["i_" + str(i) for i in val_iline_range]
# Xline sections
train_xline_range, val_xline_range = _get_aline_range(xline,
per_val,
slice_steps)
train_x_list = ["x_" + str(x) for x in train_xline_range]
val_x_list = ["x_" + str(x) for x in val_xline_range]
train_list = train_x_list + train_i_list
val_list = val_x_list + val_i_list
# write to files to disk
logger.info(f"Writing {output_dir}")
_write_split_files(output_dir, train_list, val_list, "section")
def split_patch_train_val(data_dir, output_dir, label_file, stride, patch_size,
slice_steps=1, per_val=0.2, log_config=None):
"""Generate train and validation files for Netherlands F3 dataset.
Args:
data_dir (str): data directory path
output_dir (str): directory under data_dir to store the split files
label_file (str): npy files with labels. Stored in data_dir
stride (int): stride to use when sectioning of the volume
patch_size (int): size of patch to extract
per_val (float, optional): the fraction of the volume to use for
validation. Defaults to 0.2.
log_config (str): path to log configurations
slice_steps (int): increment to the slices count.
If slice_steps > 1 the function will skip:
slice_steps - 1 slice.
Defaults to 1, do not skip any slice.
"""
if log_config is not None:
logging.config.fileConfig(log_config)
logger = logging.getLogger(__name__)
logger.info("Splitting data into patches .... ")
logger.info(f"Reading data from {data_dir}")
logger.info(f"Loading {label_file}")
labels = np.load(label_file)
logger.debug(f"Data shape [iline|xline|depth] {labels.shape}")
iline, xline, depth = labels.shape
# Inline sections
train_iline_range, val_iline_range = _get_aline_range(iline,
per_val,
slice_steps)
# Xline sections
train_xline_range, val_xline_range = _get_aline_range(xline,
per_val,
slice_steps)
# Generate patches from sections
# Vertical locations is common to all patches processed
vert_locations = range(0, depth - patch_size, patch_size)
logger.debug(vert_locations)
# Process inlines
def _i_extract_patches(iline_range, horz_locations, vert_locations):
for i in iline_range:
locations = ([j, k] for j in horz_locations
for k in vert_locations)
for j, k in locations:
yield "i_" + str(i) + "_" + str(j) + "_" + str(k)
# Process inlines - train
logger.debug("Generating Inline patches")
logger.debug("Generating Inline patches - Train")
# iline = xline x depth
val_iline = math.floor(xline * per_val / 2)
logger.debug(val_iline)
# Process ilines - train
horz_locations_train = range(val_iline, xline - val_iline, max(1,patch_size))
logger.debug(horz_locations_train)
train_i_list = list(_i_extract_patches(train_iline_range,
horz_locations_train,
vert_locations))
# val_iline - define size of the validation set for the fist part
val_iline_range = list(val_iline_range)
# Process inlines - validation
horz_locations_val = itertools.chain(range(0, val_iline, max(1,patch_size)),
range(xline - val_iline, xline, max(1,patch_size)))
val_iline_range = list(val_iline_range)
val_i_list = list(_i_extract_patches(val_iline_range,
horz_locations_val,
vert_locations))
logger.debug(train_iline_range)
logger.debug(val_iline_range)
# Process crosslines
def _x_extract_patches(xline_range, horz_locations, vert_locations):
for j in xline_range:
locations = ([i, k] for i in horz_locations
for k in vert_locations)
for i, k in locations:
yield "x_" + str(i) + "_" + str(j) + "_" + str(k)
logger.debug("Generating Crossline patches")
logger.debug("Generating Crossline patches - Train")
# xline = iline x depth
val_xline = math.floor(iline * per_val / 2)
logger.debug(val_xline)
# Process xlines - train
horz_locations_train = range(val_xline, iline - val_xline, max(1,patch_size))
logger.debug(horz_locations_train)
train_x_list = list(_x_extract_patches(train_xline_range,
horz_locations_train,
vert_locations))
# val_xline - define size of the validation set for the fist part
val_xline_range = list(val_xline_range)
# Process xlines - validation
horz_locations_val = itertools.chain(range(0, val_xline, max(1,patch_size)),
range(iline - val_xline, iline, max(1,patch_size)))
val_xline_range = list(val_xline_range)
val_x_list = list(_x_extract_patches(val_xline_range,
horz_locations_val,
vert_locations))
logger.debug(train_xline_range)
logger.debug(val_xline_range)
train_list = train_x_list + train_i_list
val_list = val_x_list + val_i_list
logger.debug(train_list)
logger.debug(val_list)
# write to files to disk:
# NOTE: This isn't quite right we should calculate the patches
# again for the whole volume
logger.info(f"Writing {output_dir}")
_write_split_files(output_dir, train_list, val_list, "patch")
_LOADER_TYPES = {"section": split_section_train_val,
"patch": split_patch_train_val}
def get_split_function(loader_type):
return _LOADER_TYPES.get(loader_type, split_patch_train_val)
def run_split_func(loader_type, *args, **kwargs):
split_func = get_split_function(loader_type)
split_func(*args, **kwargs)
def split_alaudah_et_al_19(data_dir, stride, patch_size, fraction_validation=0.2, loader_type="patch", log_config=None):
"""Generate train and validation files (with overlap) for Netherlands F3 dataset.
The original split method from https://github.com/olivesgatech/facies_classification_benchmark
DON'T USE, SEE NOTES BELOW
Args:
data_dir (str): data directory path
stride (int): stride to use when sectioning of the volume
patch_size (int): size of patch to extract
fraction_validation (float, optional): the fraction of the volume to use for validation.
Defaults to 0.2.
loader_type (str, optional): type of data loader, can be "patch" or "section".
Defaults to "patch".
log_config (str, optional): path to log config. Defaults to None.
Notes:
Only kept for reproducibility. It generates overlapping train and val which makes
validation results unreliable.
"""
if log_config is not None:
logging.config.fileConfig(log_config)
warnings.warn("THIS CREATES OVERLAPPING TRAINING AND VALIDATION SETS")
assert loader_type in [
"section",
"patch",
], f"Loader type {loader_type} is not valid. \
Please specify either 'section' or 'patch' for loader_type"
# create inline and crossline pacthes for training and validation:
logger = logging.getLogger(__name__)
logger.info("Reading data from {data_dir}")
labels_path = _get_labels_path(data_dir)
logger.info("Loading {labels_path}")
labels = np.load(labels_path)
iline, xline, depth = labels.shape
logger.debug(f"Data shape [iline|xline|depth] {labels.shape}")
if loader_type == "section":
i_list = ["i_" + str(i) for i in range(iline)]
x_list = ["x_" + str(x) for x in range(xline)]
elif loader_type == "patch":
i_list = []
horz_locations = range(0, xline - patch_size + 1, stride)
vert_locations = range(0, depth - patch_size + 1, stride)
logger.debug("Generating Inline patches")
logger.debug(horz_locations)
logger.debug(vert_locations)
for i in range(iline):
# for every inline:
# images are references by top-left corner:
locations = [[j, k] for j in horz_locations for k in vert_locations]
patches_list = ["i_" + str(i) + "_" + str(j) + "_" + str(k) for j, k in locations]
i_list.append(patches_list)
# flatten the list
i_list = list(itertools.chain(*i_list))
x_list = []
horz_locations = range(0, iline - patch_size + 1, stride)
vert_locations = range(0, depth - patch_size + 1, stride)
for j in range(xline):
# for every xline:
# images are references by top-left corner:
locations = [[i, k] for i in horz_locations for k in vert_locations]
patches_list = ["x_" + str(i) + "_" + str(j) + "_" + str(k) for i, k in locations]
x_list.append(patches_list)
# flatten the list
x_list = list(itertools.chain(*x_list))
list_train_val = i_list + x_list
# create train and validation splits:
train_list, val_list = train_test_split(list_train_val, val_size=fraction_validation, shuffle=True)
# write to files to disk:
splits_path = _get_splits_path(data_dir)
_write_split_files(splits_path, train_list, val_list, loader_type)
# TODO: Try https://github.com/Chilipp/docrep for doscstring reuse
class SplitTrainValCLI(object):
def section(self, data_dir, label_file, per_val=0.2,
log_config="logging.conf", output_dir=None,
slice_steps=1):
"""Generate section based train and validation files for Netherlands F3
dataset.
Args:
data_dir (str): data directory path
output_dir (str): directory under data_dir to store the split files
label_file (str): npy files with labels. Stored in data_dir
per_val (float, optional): the fraction of the volume to use for
validation. Defaults to 0.2.
log_config (str): path to log configurations
slice_steps | |
False == false_res.json()['enable']
req['build_index_resources'] = -3.222
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
assert false_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_16(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources')
assert False == false_res.json()['enable']
req['build_index_resources'] = ' '
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
assert false_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_17(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources')
assert False == false_res.json()['enable']
req['build_index_resources'] = '汉子'
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
assert false_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_18(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources')
assert False == false_res.json()['enable']
req['build_index_resources'] = '\t'
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
assert false_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_19(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources#')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources#', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources#')
assert False == false_res.json()['enable']
requests.put(base_url + 'config/gpu_resources#', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources#')
assert false_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_20(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources?')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources?', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources?')
assert False == false_res.json()['enable']
requests.put(base_url + 'config/gpu_resources?', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources?')
assert false_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_21(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources/')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = False
res = requests.put(base_url + 'config/gpu_resources/', data=json.dumps(req))
false_res = requests.get(base_url + 'config/gpu_resources/')
assert False == false_res.json()['enable']
requests.put(base_url + 'config/gpu_resources/', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources/')
assert false_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_22(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
logging.getLogger().info(true_res.json())
req = copy.deepcopy(original_req)
req['build_index_resources'] = 0
requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
res = requests.get(base_url + 'config/gpu_resources')
logging.getLogger().info(res.json())
def test_put_gpu_resources_build_index_resources_23(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
req['build_index_resources'] = 5
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
assert 500 == res.status_code
assert 'Internal Server Error' in res.text
res = requests.get(base_url + 'config/gpu_resources')
assert true_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_24(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
req['build_index_resources'] = 2147386
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
assert 500 == res.status_code
assert 'Internal Server Error' in res.text
res = requests.get(base_url + 'config/gpu_resources')
assert true_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_25(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
req['build_index_resources'] = -10
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
assert 500 == res.status_code
assert 'Internal Server Error' in res.text
res = requests.get(base_url + 'config/gpu_resources')
assert true_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_26(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
req['build_index_resources'] = 5
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
assert 500 == res.status_code
assert 'Internal Server Error' in res.text
res = requests.get(base_url + 'config/gpu_resources')
assert true_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_27(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
req['build_index_resources'] = 0.0
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
assert 500 == res.status_code
assert 'Internal Server Error' in res.text
res = requests.get(base_url + 'config/gpu_resources')
assert true_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_28(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
req['build_index_resources'] = ''
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
assert 500 == res.status_code
assert 'Internal Server Error' in res.text
res = requests.get(base_url + 'config/gpu_resources')
assert true_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_29(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
req['build_index_resources'] = 'xxxx'
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
assert res.status_code == 500
assert 'Internal Server Error' in res.text
res = requests.get(base_url + 'config/gpu_resources')
assert true_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_30(self, args):
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
if self.get_mode(args) == 'CPU':
pytest.skip('this API do not support CPU version')
base_url = 'http://%s:%s/' % (args['ip'], args['port'])
res = requests.get(base_url + 'config/gpu_resources')
original_req = res.json()
req = copy.deepcopy(original_req)
req['enable'] = True
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
true_res = requests.get(base_url + 'config/gpu_resources')
assert True == true_res.json()['enable']
req['build_index_resources'] = '_%20'
res = requests.put(base_url + 'config/gpu_resources', data=json.dumps(req))
assert res.status_code == 500
assert 'Internal Server Error' in res.text
res = requests.get(base_url + 'config/gpu_resources')
assert req['build_index_resources'] != res.json()['build_index_resources']
assert true_res.json() == res.json()
def test_put_gpu_resources_build_index_resources_31(self, args):
if self.get_mode(args) == | |
from pollination_dsl.alias import OutputAlias
from queenbee.io.common import IOAliasHandler
"""Alias for daylight factor recipe output."""
daylight_factor_results = [
OutputAlias.any(
name='results',
description='Daylight factor values. These can be plugged into the "LB '
'Spatial Heatmap" component along with meshes of the sensor grids to '
'visualize results.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_df_from_folder'
)
]
),
# Revit alias
OutputAlias.any(
name='results',
description='Daylight factor values.',
platform=['revit'],
handler=[
IOAliasHandler(
language='csharp',
module='Pollination.RevitHandlers',
function='ReadDaylightFactorResultsFromFolder'
),
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_df_from_folder'
)
]
),
# Rhino alias
OutputAlias.linked(
name='results',
platform=['rhino'],
handler=[
# Preload results
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_df_from_folder'
),
# load preloaded outputs to Rhino with following method
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='LoadMeshBasedResultsToRhino'
)
]
)
]
"""Alias for sky view recipe output."""
sky_view_results = [
OutputAlias.any(
name='results',
description='Numbers for the sky view or sky exposure at each sensor. These '
'can be plugged into the "LB Spatial Heatmap" component along with meshes of '
'the sensor grids to visualize results. Values are in percent (between 0 '
'and 100).',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_df_from_folder'
)
]
)
]
"""Point-in-time grid-based results."""
point_in_time_grid_results = [
OutputAlias.any(
name='results',
description='Numbers for the point-in-time value at each sensor. These can be '
'plugged into the "LB Spatial Heatmap" component along with meshes of the '
'sensor grids to visualize results. Values are in the standard SI '
'units of the requested input metric.\n* illuminance = lux'
'\n* irradiance = W/m2\n* luminance = cd/m2\n* radiance = W/m2-sr',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_pit_from_folder'
)
]
)
]
"""Point-in-time view-based results."""
point_in_time_view_results = [
OutputAlias.any(
name='results',
description='High Dynamic Range (HDR) images for each View in the model. These '
'can be plugged into the Ladybug "Image Viewer" component to preview the image. '
'They can also be plugged into the "HB False Color" component to convert '
'the image into a false color version. Lastly, it can be connected to '
'the "HB HDR to GIF" component to get a GIF image that is more portable '
'and easily previewed by different software. Pixel values are '
'in the standard SI units of the requested input metric.\n* illuminance = lux'
'\n* irradiance = W/m2\n* luminance = cd/m2\n* radiance = W/m2-sr',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_images_from_folder'
)
]
)
]
"""Cumulative sun hours output from the direct sun hours recipe."""
cumulative_sun_hour_results = [
OutputAlias.any(
name='hours',
description='The cumulative number of timesteps that each sensor sees the sun. '
'If the input wea timestep is 1 (the default), then this is the number of '
'direct sun hours for each sensor. These can be plugged into the "LB '
'Spatial Heatmap" component along with meshes of the sensor grids to '
'visualize results.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_hours_from_folder'
)
]
)
]
"""Direct sun hours recipe output."""
direct_sun_hours_results = [
OutputAlias.any(
name='results',
description='Raw result files (.ill) that contain the number of timesteps '
'that each sensor is exposed to sun. The units are the timestep of '
'input wea file. For an hourly wea, each value corresponds to an hour '
'of direct sun.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='sort_ill_from_folder'
)
]
)
]
"""Annual daylight recipe output."""
annual_daylight_results = [
OutputAlias.any(
name='results',
description='Raw result files (.ill) that contain illuminance matrices.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='sort_ill_from_folder'
)
]
)
]
daylight_autonomy_results = [
OutputAlias.any(
name='DA',
description='Daylight autonomy values for each sensor. These can be plugged '
'into the "LB Spatial Heatmap" component along with meshes of the sensor '
'grids to visualize results.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_da_from_folder'
)
]
),
# Rhino alias
OutputAlias.linked(
name='DA Results',
platform=['rhino'],
handler=[
# Preload results
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_da_from_folder'
),
# load preloaded outputs to Rhino with following method
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='LoadMeshBasedResultsToRhino'
)
]
)
]
continuous_daylight_autonomy_results = [
OutputAlias.any(
name='cDA',
description='Continuous daylight autonomy values for each sensor. These can '
'be plugged into the "LB Spatial Heatmap" component along with meshes of '
'the sensor grids to visualize results.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_cda_from_folder'
)
]
),
# Rhino alias
OutputAlias.linked(
name='cDA Results',
platform=['rhino'],
handler=[
# Preload results
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_cda_from_folder'
),
# load preloaded outputs to Rhino with following method
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='LoadMeshBasedResultsToRhino'
)
]
)
]
udi_results = [
OutputAlias.any(
name='UDI',
description='Useful daylight autonomy values for each sensor. These can be '
'plugged into the "LB Spatial Heatmap" component along with meshes of the '
'sensor grids to visualize results.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_udi_from_folder'
)
]
),
# Rhino alias
OutputAlias.linked(
name='UDI Results',
platform=['rhino'],
handler=[
# Preload results
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_udi_from_folder'
),
# load preloaded outputs to Rhino with following method
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='LoadMeshBasedResultsToRhino'
)
]
)
]
udi_lower_results = [
OutputAlias.any(
name='UDI_low',
description='Values for the percent of time that is below the lower threshold '
'of useful daylight illuminance. These can be plugged into the "LB '
'Spatial Heatmap" component along with meshes of the sensor grids to '
'visualize results.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_udi_from_folder'
)
]
),
# Rhino alias
OutputAlias.linked(
name='UDI low Results',
platform=['rhino'],
handler=[
# Preload results
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_udi_from_folder'
),
# load preloaded outputs to Rhino with following method
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='LoadMeshBasedResultsToRhino'
)
]
)
]
udi_upper_results = [
OutputAlias.any(
name='UDI_up',
description='Values for the percent of time that is above the upper threshold '
'of useful daylight illuminance. These can be plugged into the "LB '
'Spatial Heatmap" component along with meshes of the sensor grids to '
'visualize results.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_udi_from_folder'
)
]
),
# Rhino alias
OutputAlias.linked(
name='UDI up Results',
platform=['rhino'],
handler=[
# Preload results
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_udi_from_folder'
),
# load preloaded outputs to Rhino with following method
IOAliasHandler(
language='csharp', module='Pollination.RhinoHandlers',
function='LoadMeshBasedResultsToRhino'
)
]
)
]
"""Total Irradiance results from the Annual Irradiance recipe."""
total_radiation_results = [
OutputAlias.any(
name='results',
description='Raw result files (.ill) that contain irradiance matrices '
'for the total radiation at each sensor and timestep.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='sort_ill_from_folder'
)
]
)
]
"""Direct Irradiance results from the Annual Irradiance recipe."""
direct_radiation_results = [
OutputAlias.any(
name='direct',
description='Raw result files (.ill) that contain irradiance matrices '
'for the direct radiation at each sensor and timestep.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='sort_ill_from_folder'
)
]
)
]
"""Average Irradiance from the Annual Irradiance recipe."""
average_irradiance_results = [
OutputAlias.any(
name='avg_irr',
description='The average irradiance in W/m2 for each sensor over the Wea '
'time period.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_pit_from_folder'
)
]
)
]
"""Peak Irradiance from the Annual Irradiance recipe."""
peak_irradiance_results = [
OutputAlias.any(
name='peak_irr',
description='The highest irradiance value in W/m2 during the Wea time period. '
'This is suitable for assessing the worst-case solar load of clear skies on '
'cooling design days. It can also be used to determine the highest radiant '
'temperatures that occupants might experience in over the time period of '
'the Wea.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_pit_from_folder'
)
]
)
]
"""Peak Irradiance from the Annual Irradiance recipe."""
cumulative_radiation_results = [
OutputAlias.any(
name='radiation',
description='The cumulative radiation in kWh/m2 over the Wea time period.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_pit_from_folder'
)
]
)
]
"""LEED Daylight Illuminance 9AM recipe output."""
illuminance_9am_results = [
OutputAlias.any(
name='ill_9am',
description='Illuminance results for the 9AM simulation in lux.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_pit_from_folder'
)
]
)
]
"""LEED Daylight Illuminance 3PM recipe output."""
illuminance_3pm_results = [
OutputAlias.any(
name='ill_3pm',
description='Illuminance results for the 3PM simulation in lux.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_pit_from_folder'
)
]
)
]
"""LEED Daylight Pass/Fail 9AM recipe output."""
pass_fail_9am_results = [
OutputAlias.any(
name='passing_9am',
description='Pass/Fail results for the 9AM simulation as one/zero values.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_pit_from_folder'
)
]
)
]
"""LEED Daylight Pass/Fail 9AM recipe output."""
pass_fail_3pm_results = [
OutputAlias.any(
name='passing_3pm',
description='Pass/Fail results for the 3PM simulation as one/zero values.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_pit_from_folder'
)
]
)
]
"""LEED Daylight Pass/Fail combined recipe output."""
pass_fail_comb_results = [
OutputAlias.any(
name='passing_comb',
description='Pass/Fail results for the combined simulation as one/zero values.',
platform=['grasshopper'],
handler=[
IOAliasHandler(
language='python',
module='pollination_handlers.outputs.daylight',
function='read_pit_from_folder'
)
]
)
]
"""LEED daylight illuminance credit summary output.
The result is a JSON with a summary of the credits achieved.
"""
leed_ill_credit_summary_results = [
OutputAlias.any(
name='credits',
description='The number of LEED daylight | |
158, 157.949316, False),
'Lu-159': Iso('Lu-159', 'lutetium-159', 71, 159, 158.946636, False),
'Lu-160': Iso('Lu-160', 'lutetium-160', 71, 160, 159.946033, False),
'Lu-161': Iso('Lu-161', 'lutetium-161', 71, 161, 160.943572, False),
'Lu-162': Iso('Lu-162', 'lutetium-162', 71, 162, 161.943283, False),
'Lu-163': Iso('Lu-163', 'lutetium-163', 71, 163, 162.941179, False),
'Lu-164': Iso('Lu-164', 'lutetium-164', 71, 164, 163.941339, False),
'Lu-165': Iso('Lu-165', 'lutetium-165', 71, 165, 164.939407, False),
'Lu-166': Iso('Lu-166', 'lutetium-166', 71, 166, 165.939859, False),
'Lu-167': Iso('Lu-167', 'lutetium-167', 71, 167, 166.938270, False),
'Lu-168': Iso('Lu-168', 'lutetium-168', 71, 168, 167.938736, False),
'Lu-169': Iso('Lu-169', 'lutetium-169', 71, 169, 168.9376441, False),
'Lu-170': Iso('Lu-170', 'lutetium-170', 71, 170, 169.938478, False),
'Lu-171': Iso('Lu-171', 'lutetium-171', 71, 171, 170.9379170, False),
'Lu-172': Iso('Lu-172', 'lutetium-172', 71, 172, 171.9390891, False),
'Lu-173': Iso('Lu-173', 'lutetium-173', 71, 173, 172.9389340, False),
'Lu-174': Iso('Lu-174', 'lutetium-174', 71, 174, 173.9403409, False),
'Lu-175': Iso('Lu-175', 'lutetium-175', 71, 175, 174.9407752, True,
isotopic_abundance=0.97401),
'Lu-176': Iso('Lu-176', 'lutetium-176', 71, 176, 175.9426897, False,
isotopic_abundance=0.02599),
'Lu-177': Iso('Lu-177', 'lutetium-177', 71, 177, 176.9437615, False,
half_life=573696.0),
'Lu-178': Iso('Lu-178', 'lutetium-178', 71, 178, 177.9459580, False),
'Lu-179': Iso('Lu-179', 'lutetium-179', 71, 179, 178.9473309, False),
'Lu-180': Iso('Lu-180', 'lutetium-180', 71, 180, 179.949888, False),
'Lu-181': Iso('Lu-181', 'lutetium-181', 71, 181, 180.95191, False),
'Lu-182': Iso('Lu-182', 'lutetium-182', 71, 182, 181.95504, False),
'Lu-183': Iso('Lu-183', 'lutetium-183', 71, 183, 182.957363, False),
'Lu-184': Iso('Lu-184', 'lutetium-184', 71, 184, 183.96091, False),
'Lu-185': Iso('Lu-185', 'lutetium-185', 71, 185, 184.96362, False),
'Hf-153': Iso('Hf-153', 'hafnium-153', 72, 153, 152.97069, False),
'Hf-154': Iso('Hf-154', 'hafnium-154', 72, 154, 153.96486, False),
'Hf-155': Iso('Hf-155', 'hafnium-155', 72, 155, 154.96311, False),
'Hf-156': Iso('Hf-156', 'hafnium-156', 72, 156, 155.95935, False),
'Hf-157': Iso('Hf-157', 'hafnium-157', 72, 157, 156.95824, False),
'Hf-158': Iso('Hf-158', 'hafnium-158', 72, 158, 157.954801, False),
'Hf-159': Iso('Hf-159', 'hafnium-159', 72, 159, 158.953996, False),
'Hf-160': Iso('Hf-160', 'hafnium-160', 72, 160, 159.950691, False),
'Hf-161': Iso('Hf-161', 'hafnium-161', 72, 161, 160.950278, False),
'Hf-162': Iso('Hf-162', 'hafnium-162', 72, 162, 161.9472148, False),
'Hf-163': Iso('Hf-163', 'hafnium-163', 72, 163, 162.947113, False),
'Hf-164': Iso('Hf-164', 'hafnium-164', 72, 164, 163.944371, False),
'Hf-165': Iso('Hf-165', 'hafnium-165', 72, 165, 164.944567, False),
'Hf-166': Iso('Hf-166', 'hafnium-166', 72, 166, 165.942180, False),
'Hf-167': Iso('Hf-167', 'hafnium-167', 72, 167, 166.942600, False),
'Hf-168': Iso('Hf-168', 'hafnium-168', 72, 168, 167.940568, False),
'Hf-169': Iso('Hf-169', 'hafnium-169', 72, 169, 168.941259, False),
'Hf-170': Iso('Hf-170', 'hafnium-170', 72, 170, 169.939609, False),
'Hf-171': Iso('Hf-171', 'hafnium-171', 72, 171, 170.940492, False),
'Hf-172': Iso('Hf-172', 'hafnium-172', 72, 172, 171.939450, False),
'Hf-173': Iso('Hf-173', 'hafnium-173', 72, 173, 172.940513, False),
'Hf-174': Iso('Hf-174', 'hafnium-174', 72, 174, 173.9400461, False,
isotopic_abundance=0.0016),
'Hf-175': Iso('Hf-175', 'hafnium-175', 72, 175, 174.9415092, False),
'Hf-176': Iso('Hf-176', 'hafnium-176', 72, 176, 175.9414076, True,
isotopic_abundance=0.0526),
'Hf-177': Iso('Hf-177', 'hafnium-177', 72, 177, 176.9432277, True,
isotopic_abundance=0.1860),
'Hf-178': Iso('Hf-178', 'hafnium-178', 72, 178, 177.9437058, True,
isotopic_abundance=0.2728),
'Hf-179': Iso('Hf-179', 'hafnium-179', 72, 179, 178.9458232, True,
isotopic_abundance=0.1362),
'Hf-180': Iso('Hf-180', 'hafnium-180', 72, 180, 179.9465570, True,
isotopic_abundance=0.3508),
'Hf-181': Iso('Hf-181', 'hafnium-181', 72, 181, 180.9491083, False),
'Hf-182': Iso('Hf-182', 'hafnium-182', 72, 182, 181.9505612, False),
'Hf-183': Iso('Hf-183', 'hafnium-183', 72, 183, 182.953530, False),
'Hf-184': Iso('Hf-184', 'hafnium-184', 72, 184, 183.955446, False),
'Hf-185': Iso('Hf-185', 'hafnium-185', 72, 185, 184.958862, False),
'Hf-186': Iso('Hf-186', 'hafnium-186', 72, 186, 185.960897, False),
'Hf-187': Iso('Hf-187', 'hafnium-187', 72, 187, 186.96477, False),
'Hf-188': Iso('Hf-188', 'hafnium-188', 72, 188, 187.96685, False),
'Hf-189': Iso('Hf-189', 'hafnium-189', 72, 189, 188.97084, False),
'Ta-155': Iso('Ta-155', 'tantalum-155', 73, 155, 154.97424, False),
'Ta-156': Iso('Ta-156', 'tantalum-156', 73, 156, 155.97203, False),
'Ta-157': Iso('Ta-157', 'tantalum-157', 73, 157, 156.96818, False),
'Ta-158': Iso('Ta-158', 'tantalum-158', 73, 158, 157.96654, False),
'Ta-159': Iso('Ta-159', 'tantalum-159', 73, 159, 158.963023, False),
'Ta-160': Iso('Ta-160', 'tantalum-160', 73, 160, 159.961488, False),
'Ta-161': Iso('Ta-161', 'tantalum-161', 73, 161, 160.958452, False),
'Ta-162': Iso('Ta-162', 'tantalum-162', 73, 162, 161.957294, False),
'Ta-163': Iso('Ta-163', 'tantalum-163', 73, 163, 162.954337, False),
'Ta-164': Iso('Ta-164', 'tantalum-164', 73, 164, 163.953534, False),
'Ta-165': Iso('Ta-165', 'tantalum-165', 73, 165, 164.950781, False),
'Ta-166': Iso('Ta-166', 'tantalum-166', 73, 166, 165.950512, False),
'Ta-167': Iso('Ta-167', 'tantalum-167', 73, 167, 166.948093, False),
'Ta-168': Iso('Ta-168', 'tantalum-168', 73, 168, 167.948047, False),
'Ta-169': Iso('Ta-169', 'tantalum-169', 73, 169, 168.946011, False),
'Ta-170': Iso('Ta-170', 'tantalum-170', 73, 170, 169.946175, False),
'Ta-171': Iso('Ta-171', 'tantalum-171', 73, 171, 170.944476, False),
'Ta-172': Iso('Ta-172', 'tantalum-172', 73, 172, 171.944895, False),
'Ta-173': Iso('Ta-173', 'tantalum-173', 73, 173, 172.943750, False),
'Ta-174': Iso('Ta-174', 'tantalum-174', 73, 174, 173.944454, False),
'Ta-175': Iso('Ta-175', 'tantalum-175', 73, 175, 174.943737, False),
'Ta-176': Iso('Ta-176', 'tantalum-176', 73, 176, 175.944857, False),
'Ta-177': Iso('Ta-177', 'tantalum-177', 73, 177, 176.9444795, False),
'Ta-178': Iso('Ta-178', 'tantalum-178', 73, 178, 177.945678, False),
'Ta-179': Iso('Ta-179', 'tantalum-179', 73, 179, 178.9459366, False),
'Ta-180': Iso('Ta-180', 'tantalum-180', 73, 180, 179.9474648, True,
isotopic_abundance=0.0001201),
'Ta-181': Iso('Ta-181', 'tantalum-181', 73, 181, 180.9479958, True,
isotopic_abundance=0.9998799),
'Ta-182': Iso('Ta-182', 'tantalum-182', 73, 182, 181.9501519, False),
'Ta-183': Iso('Ta-183', 'tantalum-183', 73, 183, 182.9513726, False),
'Ta-184': Iso('Ta-184', 'tantalum-184', 73, 184, 183.954008, False),
'Ta-185': Iso('Ta-185', 'tantalum-185', 73, 185, 184.955559, False),
'Ta-186': Iso('Ta-186', 'tantalum-186', 73, 186, 185.958551, False),
'Ta-187': Iso('Ta-187', 'tantalum-187', 73, 187, 186.960386, False),
'Ta-188': Iso('Ta-188', 'tantalum-188', 73, 188, 187.963916, False),
'Ta-189': Iso('Ta-189', 'tantalum-189', 73, 189, 188.96583, False),
'Ta-190': Iso('Ta-190', 'tantalum-190', 73, 190, 189.96939, False),
'Ta-191': Iso('Ta-191', 'tantalum-191', 73, 191, 190.97156, False),
'Ta-192': Iso('Ta-192', 'tantalum-192', 73, 192, 191.97514, False),
'W-157': Iso('W-157', 'tungsten-157', 74, 157, 156.97884, False),
'W-158': Iso('W-158', 'tungsten-158', 74, 158, 157.97456, False),
'W-159': Iso('W-159', 'tungsten-159', 74, 159, 158.97264, False),
'W-160': Iso('W-160', 'tungsten-160', 74, 160, 159.96846, False),
'W-161': Iso('W-161', 'tungsten-161', 74, 161, 160.96720, False),
'W-162': Iso('W-162', 'tungsten-162', 74, 162, 161.963499, False),
'W-163': Iso('W-163', 'tungsten-163', 74, 163, 162.962524, False),
'W-164': Iso('W-164', 'tungsten-164', 74, 164, 163.958961, False),
'W-165': Iso('W-165', 'tungsten-165', 74, 165, 164.958281, False),
'W-166': Iso('W-166', 'tungsten-166', 74, 166, 165.955031, False),
'W-167': Iso('W-167', 'tungsten-167', 74, 167, 166.954805, False),
'W-168': Iso('W-168', 'tungsten-168', 74, 168, 167.951806, False),
'W-169': Iso('W-169', 'tungsten-169', 74, 169, 168.951779, False),
'W-170': Iso('W-170', 'tungsten-170', 74, 170, 169.949232, False),
'W-171': Iso('W-171', 'tungsten-171', 74, 171, 170.949451, False),
'W-172': Iso('W-172', 'tungsten-172', 74, 172, 171.947292, False),
'W-173': Iso('W-173', 'tungsten-173', 74, 173, 172.947689, False),
'W-174': Iso('W-174', 'tungsten-174', 74, 174, 173.946079, False),
'W-175': Iso('W-175', 'tungsten-175', 74, 175, 174.946717, False),
'W-176': Iso('W-176', 'tungsten-176', 74, 176, 175.945634, False),
'W-177': Iso('W-177', 'tungsten-177', 74, 177, 176.946643, False),
'W-178': Iso('W-178', 'tungsten-178', 74, 178, 177.945883, False),
'W-179': Iso('W-179', 'tungsten-179', 74, 179, 178.947077, False),
'W-180': Iso('W-180', 'tungsten-180', 74, 180, 179.9467108, False,
isotopic_abundance=0.0012),
'W-181': Iso('W-181', 'tungsten-181', 74, 181, 180.9481978, False,
half_life=10462608.0),
'W-182': Iso('W-182', 'tungsten-182', 74, 182, 181.94820394, True,
isotopic_abundance=0.2650),
'W-183': Iso('W-183', 'tungsten-183', 74, 183, 182.95022275, True,
isotopic_abundance=0.1431),
'W-184': Iso('W-184', 'tungsten-184', 74, 184, 183.95093092, True,
isotopic_abundance=0.3064),
'W-185': Iso('W-185', 'tungsten-185', 74, 185, 184.95341897, False),
'W-186': Iso('W-186', 'tungsten-186', 74, 186, 185.9543628, True,
isotopic_abundance=0.2843),
'W-187': Iso('W-187', 'tungsten-187', 74, 187, 186.9571588, False),
'W-188': Iso('W-188', 'tungsten-188', 74, 188, 187.9584862, False,
half_life=6029251.2),
'W-189': Iso('W-189', 'tungsten-189', 74, 189, 188.961763, False),
'W-190': Iso('W-190', 'tungsten-190', 74, 190, 189.963091, False),
'W-191': Iso('W-191', 'tungsten-191', 74, 191, 190.966531, False),
'W-192': Iso('W-192', 'tungsten-192', 74, 192, 191.96817, False),
'W-193': Iso('W-193', 'tungsten-193', 74, 193, 192.97178, False),
'W-194': Iso('W-194', 'tungsten-194', 74, 194, 193.97367, False),
'Re-159': Iso('Re-159', 'rhenium-159', 75, 159, 158.98418, False),
'Re-160': Iso('Re-160', 'rhenium-160', 75, 160, 159.98182, False),
'Re-161': Iso('Re-161', 'rhenium-161', 75, 161, 160.97757, False),
'Re-162': Iso('Re-162', 'rhenium-162', 75, 162, 161.97584, False),
'Re-163': Iso('Re-163', 'rhenium-163', 75, 163, 162.972080, False),
'Re-164': Iso('Re-164', 'rhenium-164', 75, 164, 163.970453, False),
'Re-165': Iso('Re-165', 'rhenium-165', 75, 165, 164.967103, False),
'Re-166': Iso('Re-166', 'rhenium-166', 75, 166, 165.965761, False),
'Re-167': Iso('Re-167', 'rhenium-167', 75, 167, 166.962595, False),
'Re-168': Iso('Re-168', 'rhenium-168', 75, 168, 167.961573, False),
'Re-169': Iso('Re-169', 'rhenium-169', 75, 169, 168.958766, False),
'Re-170': Iso('Re-170', 'rhenium-170', 75, 170, 169.958220, False),
'Re-171': Iso('Re-171', 'rhenium-171', 75, 171, 170.955716, False),
'Re-172': Iso('Re-172', 'rhenium-172', 75, 172, 171.955420, False),
'Re-173': Iso('Re-173', 'rhenium-173', 75, 173, 172.953243, False),
'Re-174': Iso('Re-174', 'rhenium-174', 75, 174, 173.953115, False),
'Re-175': Iso('Re-175', 'rhenium-175', 75, 175, 174.951381, False),
'Re-176': Iso('Re-176', 'rhenium-176', 75, 176, 175.951623, False),
'Re-177': Iso('Re-177', 'rhenium-177', 75, 177, 176.950328, False),
'Re-178': Iso('Re-178', 'rhenium-178', 75, 178, 177.950989, False),
'Re-179': Iso('Re-179', 'rhenium-179', 75, 179, 178.949989, False),
'Re-180': Iso('Re-180', 'rhenium-180', 75, 180, 179.950792, False),
'Re-181': Iso('Re-181', 'rhenium-181', 75, 181, 180.950058, False),
'Re-182': Iso('Re-182', 'rhenium-182', 75, 182, 181.95121, False),
'Re-183': Iso('Re-183', 'rhenium-183', 75, 183, 182.9508196, False),
'Re-184': Iso('Re-184', 'rhenium-184', 75, 184, 183.9525228, False),
'Re-185': Iso('Re-185', 'rhenium-185', 75, 185, 184.9529545, True,
isotopic_abundance=0.3740),
'Re-186': Iso('Re-186', 'rhenium-186', 75, 186, 185.9549856, False,
half_life=321292.8),
'Re-187': Iso('Re-187', 'rhenium-187', 75, 187, 186.9557501, False,
isotopic_abundance=0.6260),
'Re-188': Iso('Re-188', 'rhenium-188', 75, 188, 187.9581115, False,
half_life=61203.600000000006),
'Re-189': Iso('Re-189', 'rhenium-189', 75, 189, 188.9592260, False),
'Re-190': Iso('Re-190', 'rhenium-190', 75, 190, 189.961744, False),
'Re-191': Iso('Re-191', 'rhenium-191', 75, 191, 190.963122, False),
'Re-192': Iso('Re-192', 'rhenium-192', 75, 192, 191.966088, False),
'Re-193': Iso('Re-193', 'rhenium-193', 75, 193, 192.967541, False),
'Re-194': Iso('Re-194', 'rhenium-194', 75, 194, 193.97076, False),
'Re-195': Iso('Re-195', 'rhenium-195', 75, 195, 194.97254, False),
'Re-196': Iso('Re-196', 'rhenium-196', 75, 196, 195.97580, False),
'Re-197': Iso('Re-197', 'rhenium-197', 75, 197, 196.97799, False),
'Re-198': Iso('Re-198', | |
self.splitter_5_pop.setOrientation(QtCore.Qt.Vertical)
self.splitter_5_pop.setObjectName("splitter_5_pop")
self.layoutWidget_6 = QtWidgets.QWidget(self.splitter_5_pop)
self.layoutWidget_6.setObjectName("layoutWidget_6")
self.horizontalLayout_ExampleImgs_pop = QtWidgets.QHBoxLayout(self.layoutWidget_6)
self.horizontalLayout_ExampleImgs_pop.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_ExampleImgs_pop.setObjectName("horizontalLayout_ExampleImgs_pop")
self.comboBox_ShowTrainOrValid_pop = QtWidgets.QComboBox(self.layoutWidget_6)
self.comboBox_ShowTrainOrValid_pop.setObjectName("comboBox_ShowTrainOrValid_pop")
self.horizontalLayout_ExampleImgs_pop.addWidget(self.comboBox_ShowTrainOrValid_pop)
self.comboBox_ShowWOrWoAug_pop = QtWidgets.QComboBox(self.layoutWidget_6)
self.comboBox_ShowWOrWoAug_pop.setObjectName("comboBox_ShowWOrWoAug_pop")
self.horizontalLayout_ExampleImgs_pop.addWidget(self.comboBox_ShowWOrWoAug_pop)
self.label_ShowIndex_pop = QtWidgets.QLabel(self.layoutWidget_6)
self.label_ShowIndex_pop.setObjectName("label_ShowIndex_pop")
self.label_ShowIndex_pop.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.horizontalLayout_ExampleImgs_pop.addWidget(self.label_ShowIndex_pop)
self.spinBox_ShowIndex_pop = QtWidgets.QSpinBox(self.layoutWidget_6)
self.spinBox_ShowIndex_pop.setObjectName("spinBox_ShowIndex_pop")
self.horizontalLayout_ExampleImgs_pop.addWidget(self.spinBox_ShowIndex_pop)
self.pushButton_ShowExamleImgs_pop = QtWidgets.QPushButton(self.layoutWidget_6)
self.pushButton_ShowExamleImgs_pop.setObjectName("pushButton_ShowExamleImgs_pop")
self.horizontalLayout_ExampleImgs_pop.addWidget(self.pushButton_ShowExamleImgs_pop)
self.widget_ViewImages_pop = QtWidgets.QWidget(self.splitter_5_pop)
self.widget_ViewImages_pop.setObjectName("widget_ViewImages_pop")
self.gridLayout_18.addWidget(self.splitter_5_pop, 0, 0, 1, 1)
self.tabWidget_DefineModel_pop.addTab(self.tab_ExampleImgs_pop, "")
self.tab_expertMode_pop = QtWidgets.QWidget()
#self.tab_expertMode_pop.setEnabled(True)
self.tab_expertMode_pop.setObjectName("tab_expertMode_pop")
self.gridLayout_2 = QtWidgets.QGridLayout(self.tab_expertMode_pop)
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupBox_expertMode_pop = QtWidgets.QGroupBox(self.tab_expertMode_pop)
self.groupBox_expertMode_pop.setEnabled(True)
self.groupBox_expertMode_pop.setCheckable(True)
self.groupBox_expertMode_pop.setChecked(True)
self.groupBox_expertMode_pop.setObjectName("groupBox_expertMode_pop")
self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox_expertMode_pop)
self.gridLayout_3.setObjectName("gridLayout_3")
self.scrollArea_expertMode_pop = QtWidgets.QScrollArea(self.groupBox_expertMode_pop)
self.scrollArea_expertMode_pop.setEnabled(True)
self.scrollArea_expertMode_pop.setWidgetResizable(True)
self.scrollArea_expertMode_pop.setObjectName("scrollArea_expertMode_pop")
self.scrollAreaWidgetContents_pop = QtWidgets.QWidget()
self.scrollAreaWidgetContents_pop.setGeometry(QtCore.QRect(0, -186, 697, 505))
self.scrollAreaWidgetContents_pop.setObjectName("scrollAreaWidgetContents_pop")
self.gridLayout_4 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents_pop)
self.gridLayout_4.setObjectName("gridLayout_4")
self.groupBox_modelKerasFit_pop = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_pop)
self.groupBox_modelKerasFit_pop.setObjectName("groupBox_modelKerasFit_pop")
self.gridLayout_11 = QtWidgets.QGridLayout(self.groupBox_modelKerasFit_pop)
self.gridLayout_11.setObjectName("gridLayout_11")
self.label_batchSize_pop = QtWidgets.QLabel(self.groupBox_modelKerasFit_pop)
self.label_batchSize_pop.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_batchSize_pop.setObjectName("label_batchSize_pop")
self.gridLayout_11.addWidget(self.label_batchSize_pop, 0, 0, 1, 1)
self.spinBox_batchSize = QtWidgets.QSpinBox(self.groupBox_modelKerasFit_pop)
self.spinBox_batchSize.setMinimum(1)
self.spinBox_batchSize.setMaximum(999999999)
self.spinBox_batchSize.setProperty("value", 32)
self.spinBox_batchSize.setObjectName("spinBox_batchSize")
self.gridLayout_11.addWidget(self.spinBox_batchSize, 0, 1, 1, 1)
self.label_epochs_pop = QtWidgets.QLabel(self.groupBox_modelKerasFit_pop)
self.label_epochs_pop.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_epochs_pop.setObjectName("label_epochs_pop")
self.gridLayout_11.addWidget(self.label_epochs_pop, 0, 2, 1, 1)
self.spinBox_epochs = QtWidgets.QSpinBox(self.groupBox_modelKerasFit_pop)
self.spinBox_epochs.setMinimum(1)
self.spinBox_epochs.setMaximum(999999999)
self.spinBox_epochs.setObjectName("spinBox_epochs")
self.gridLayout_11.addWidget(self.spinBox_epochs, 0, 3, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_modelKerasFit_pop, 0, 0, 1, 1)
self.groupBox_regularization_pop = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_pop)
self.groupBox_regularization_pop.setObjectName("groupBox_regularization_pop")
self.gridLayout_12 = QtWidgets.QGridLayout(self.groupBox_regularization_pop)
self.gridLayout_12.setObjectName("gridLayout_12")
self.horizontalLayout_43_pop = QtWidgets.QHBoxLayout()
self.horizontalLayout_43_pop.setObjectName("horizontalLayout_43_pop")
self.checkBox_trainLastNOnly_pop = QtWidgets.QCheckBox(self.groupBox_regularization_pop)
self.checkBox_trainLastNOnly_pop.setLayoutDirection(QtCore.Qt.LeftToRight)
self.checkBox_trainLastNOnly_pop.setCheckable(True)
self.checkBox_trainLastNOnly_pop.setObjectName("checkBox_trainLastNOnly_pop")
self.horizontalLayout_43_pop.addWidget(self.checkBox_trainLastNOnly_pop)
self.spinBox_trainLastNOnly_pop = QtWidgets.QSpinBox(self.groupBox_regularization_pop)
self.spinBox_trainLastNOnly_pop.setEnabled(False)
self.spinBox_trainLastNOnly_pop.setMaximum(9999)
self.spinBox_trainLastNOnly_pop.setObjectName("spinBox_trainLastNOnly_pop")
self.horizontalLayout_43_pop.addWidget(self.spinBox_trainLastNOnly_pop)
self.checkBox_trainDenseOnly_pop = QtWidgets.QCheckBox(self.groupBox_regularization_pop)
self.checkBox_trainDenseOnly_pop.setObjectName("checkBox_trainDenseOnly_pop")
self.horizontalLayout_43_pop.addWidget(self.checkBox_trainDenseOnly_pop)
self.gridLayout_12.addLayout(self.horizontalLayout_43_pop, 0, 0, 1, 1)
self.horizontalLayout_3_pop = QtWidgets.QHBoxLayout()
self.horizontalLayout_3_pop.setObjectName("horizontalLayout_3_pop")
self.checkBox_dropout_pop = QtWidgets.QCheckBox(self.groupBox_regularization_pop)
self.checkBox_dropout_pop.setObjectName("checkBox_dropout_pop")
self.horizontalLayout_3_pop.addWidget(self.checkBox_dropout_pop)
self.lineEdit_dropout_pop = QtWidgets.QLineEdit(self.groupBox_regularization_pop)
self.lineEdit_dropout_pop.setEnabled(False)
self.lineEdit_dropout_pop.setObjectName("lineEdit_dropout_pop")
self.horizontalLayout_3_pop.addWidget(self.lineEdit_dropout_pop)
self.gridLayout_12.addLayout(self.horizontalLayout_3_pop, 1, 0, 1, 1)
self.horizontalLayout_pTr_pop = QtWidgets.QHBoxLayout()
self.horizontalLayout_pTr_pop.setObjectName("horizontalLayout_pTr_pop")
# self.checkBox_pTr_pop = QtWidgets.QCheckBox(self.groupBox_regularization_pop)
# self.checkBox_pTr_pop.setObjectName("checkBox_pTr_pop")
# self.horizontalLayout_pTr_pop.addWidget(self.checkBox_pTr_pop)
# self.lineEdit_pTr_pop = QtWidgets.QLineEdit(self.groupBox_regularization_pop)
# self.lineEdit_pTr_pop.setEnabled(False)
# self.lineEdit_pTr_pop.setObjectName("lineEdit_pTr_pop")
# self.horizontalLayout_pTr_pop.addWidget(self.lineEdit_pTr_pop)
# self.pushButton_pTr_pop = QtWidgets.QPushButton(self.groupBox_regularization_pop)
# self.pushButton_pTr_pop.setEnabled(False)
# self.pushButton_pTr_pop.setMaximumSize(QtCore.QSize(40, 16777215))
# self.pushButton_pTr_pop.setObjectName("pushButton_pTr_pop")
# self.horizontalLayout_pTr_pop.addWidget(self.pushButton_pTr_pop)
self.gridLayout_12.addLayout(self.horizontalLayout_pTr_pop, 2, 0, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_regularization_pop, 3, 0, 1, 1)
self.groupBox_lossOptimizer = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_pop)
self.groupBox_lossOptimizer.setObjectName("groupBox_lossOptimizer")
self.gridLayout_14 = QtWidgets.QGridLayout(self.groupBox_lossOptimizer)
self.gridLayout_14.setObjectName("gridLayout_14")
self.pushButton_optimizer_pop = QtWidgets.QPushButton(self.groupBox_lossOptimizer)
self.pushButton_optimizer_pop.setEnabled(False)
self.pushButton_optimizer_pop.setMaximumSize(QtCore.QSize(40, 16777215))
self.pushButton_optimizer_pop.setObjectName("pushButton_optimizer_pop")
self.gridLayout_14.addWidget(self.pushButton_optimizer_pop, 0, 4, 1, 1)
self.checkBox_lossW = QtWidgets.QCheckBox(self.groupBox_lossOptimizer)
self.checkBox_lossW.setLayoutDirection(QtCore.Qt.RightToLeft)
self.checkBox_lossW.setObjectName("checkBox_lossW")
self.gridLayout_14.addWidget(self.checkBox_lossW, 1, 0, 1, 1)
self.checkBox_expt_loss_pop = QtWidgets.QCheckBox(self.groupBox_lossOptimizer)
self.checkBox_expt_loss_pop.setLayoutDirection(QtCore.Qt.RightToLeft)
self.checkBox_expt_loss_pop.setObjectName("checkBox_expt_loss_pop")
self.gridLayout_14.addWidget(self.checkBox_expt_loss_pop, 0, 0, 1, 1)
self.comboBox_expt_loss_pop = QtWidgets.QComboBox(self.groupBox_lossOptimizer)
self.comboBox_expt_loss_pop.setEnabled(False)
self.comboBox_expt_loss_pop.setObjectName("comboBox_expt_loss_pop")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.comboBox_expt_loss_pop.addItem("")
self.gridLayout_14.addWidget(self.comboBox_expt_loss_pop, 0, 1, 1, 1)
self.checkBox_optimizer_pop = QtWidgets.QCheckBox(self.groupBox_lossOptimizer)
self.checkBox_optimizer_pop.setLayoutDirection(QtCore.Qt.RightToLeft)
self.checkBox_optimizer_pop.setObjectName("checkBox_optimizer_pop")
self.gridLayout_14.addWidget(self.checkBox_optimizer_pop, 0, 2, 1, 1)
self.comboBox_optimizer = QtWidgets.QComboBox(self.groupBox_lossOptimizer)
self.comboBox_optimizer.setEnabled(False)
self.comboBox_optimizer.setObjectName("comboBox_optimizer")
self.comboBox_optimizer.addItem("")
self.comboBox_optimizer.addItem("")
self.comboBox_optimizer.addItem("")
self.comboBox_optimizer.addItem("")
self.comboBox_optimizer.addItem("")
self.comboBox_optimizer.addItem("")
self.comboBox_optimizer.addItem("")
self.gridLayout_14.addWidget(self.comboBox_optimizer, 0, 3, 1, 1)
self.lineEdit_lossW = QtWidgets.QLineEdit(self.groupBox_lossOptimizer)
self.lineEdit_lossW.setEnabled(False)
self.lineEdit_lossW.setObjectName("lineEdit_lossW")
self.gridLayout_14.addWidget(self.lineEdit_lossW, 1, 1, 1, 3)
self.pushButton_lossW = QtWidgets.QPushButton(self.groupBox_lossOptimizer)
self.pushButton_lossW.setEnabled(False)
self.pushButton_lossW.setMaximumSize(QtCore.QSize(40, 16777215))
self.pushButton_lossW.setObjectName("pushButton_lossW")
self.gridLayout_14.addWidget(self.pushButton_lossW, 1, 4, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_lossOptimizer, 1, 0, 1, 1)
self.groupBox_learningRate_pop = QtWidgets.QGroupBox(self.scrollAreaWidgetContents_pop)
self.groupBox_learningRate_pop.setEnabled(True)
self.groupBox_learningRate_pop.setCheckable(True)
self.groupBox_learningRate_pop.setChecked(False)
self.groupBox_learningRate_pop.setObjectName("groupBox_learningRate_pop")
self.gridLayout_16 = QtWidgets.QGridLayout(self.groupBox_learningRate_pop)
self.gridLayout_16.setObjectName("gridLayout_16")
self.radioButton_LrConst = QtWidgets.QRadioButton(self.groupBox_learningRate_pop)
self.radioButton_LrConst.setChecked(True)
self.radioButton_LrConst.setObjectName("radioButton_LrConst")
self.gridLayout_16.addWidget(self.radioButton_LrConst, 0, 0, 1, 1)
self.label_LrConst_pop = QtWidgets.QLabel(self.groupBox_learningRate_pop)
self.label_LrConst_pop.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_LrConst_pop.setObjectName("label_LrConst_pop")
self.gridLayout_16.addWidget(self.label_LrConst_pop, 0, 1, 1, 1)
self.doubleSpinBox_learningRate = QtWidgets.QDoubleSpinBox(self.groupBox_learningRate_pop)
self.doubleSpinBox_learningRate.setEnabled(False)
self.doubleSpinBox_learningRate.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.doubleSpinBox_learningRate.setDecimals(6)
self.doubleSpinBox_learningRate.setMaximum(999.0)
self.doubleSpinBox_learningRate.setSingleStep(0.0001)
self.doubleSpinBox_learningRate.setProperty("value", 0.001)
self.doubleSpinBox_learningRate.setObjectName("doubleSpinBox_learningRate")
self.gridLayout_16.addWidget(self.doubleSpinBox_learningRate, 0, 2, 1, 1)
self.radioButton_LrCycl = QtWidgets.QRadioButton(self.groupBox_learningRate_pop)
self.radioButton_LrCycl.setObjectName("radioButton_LrCycl")
self.gridLayout_16.addWidget(self.radioButton_LrCycl, 1, 0, 1, 1)
self.label_cycLrMin = QtWidgets.QLabel(self.groupBox_learningRate_pop)
self.label_cycLrMin.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_cycLrMin.setObjectName("label_cycLrMin")
self.gridLayout_16.addWidget(self.label_cycLrMin, 1, 1, 1, 1)
self.lineEdit_cycLrMin = QtWidgets.QLineEdit(self.groupBox_learningRate_pop)
self.lineEdit_cycLrMin.setObjectName("lineEdit_cycLrMin")
self.lineEdit_cycLrMin.setEnabled(False)
validator = QtGui.QRegExpValidator(QtCore.QRegExp("^[0-9 . , e -]+$")) #validator allows numbers, dots, commas, e and -
self.lineEdit_cycLrMin.setValidator(validator)
self.gridLayout_16.addWidget(self.lineEdit_cycLrMin, 1, 2, 1, 1)
self.lineEdit_cycLrMax = QtWidgets.QLineEdit(self.groupBox_learningRate_pop)
self.lineEdit_cycLrMax.setObjectName("lineEdit_cycLrMax")
self.lineEdit_cycLrMax.setEnabled(False)
self.lineEdit_cycLrMax.setValidator(validator)
self.gridLayout_16.addWidget(self.lineEdit_cycLrMax, 1, 3, 1, 1)
self.label_cycLrMethod = QtWidgets.QLabel(self.groupBox_learningRate_pop)
self.label_cycLrMethod.setObjectName("label_cycLrMethod")
self.label_cycLrMethod.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.gridLayout_16.addWidget(self.label_cycLrMethod, 1, 4, 1, 1)
self.comboBox_cycLrMethod = QtWidgets.QComboBox(self.groupBox_learningRate_pop)
self.comboBox_cycLrMethod.setEnabled(False)
self.comboBox_cycLrMethod.setMinimumSize(QtCore.QSize(80, 0))
self.comboBox_cycLrMethod.setObjectName("comboBox_cycLrMethod")
self.comboBox_cycLrMethod.addItem("")
self.comboBox_cycLrMethod.addItem("")
self.comboBox_cycLrMethod.addItem("")
self.gridLayout_16.addWidget(self.comboBox_cycLrMethod, 1, 6, 1, 1)
self.pushButton_cycLrPopup = QtWidgets.QPushButton(self.groupBox_learningRate_pop)
self.pushButton_cycLrPopup.setEnabled(False)
self.pushButton_cycLrPopup.setMaximumSize(QtCore.QSize(50, 16777215))
self.pushButton_cycLrPopup.setObjectName("pushButton_cycLrPopup")
self.gridLayout_16.addWidget(self.pushButton_cycLrPopup, 1, 7, 1, 1)
self.radioButton_LrExpo = QtWidgets.QRadioButton(self.groupBox_learningRate_pop)
self.radioButton_LrExpo.setObjectName("radioButton_LrExpo")
self.gridLayout_16.addWidget(self.radioButton_LrExpo, 2, 0, 2, 1)
self.label_expDecInitLr = QtWidgets.QLabel(self.groupBox_learningRate_pop)
self.label_expDecInitLr.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_expDecInitLr.setObjectName("label_expDecInitLr")
self.gridLayout_16.addWidget(self.label_expDecInitLr, 2, 1, 1, 1)
self.doubleSpinBox_expDecInitLr = QtWidgets.QDoubleSpinBox(self.groupBox_learningRate_pop)
self.doubleSpinBox_expDecInitLr.setEnabled(False)
self.doubleSpinBox_expDecInitLr.setMaximumSize(QtCore.QSize(63, 16777215))
self.doubleSpinBox_expDecInitLr.setDecimals(6)
self.doubleSpinBox_expDecInitLr.setSingleStep(0.0001)
self.doubleSpinBox_expDecInitLr.setProperty("value", 0.001)
self.doubleSpinBox_expDecInitLr.setObjectName("doubleSpinBox_expDecInitLr")
self.gridLayout_16.addWidget(self.doubleSpinBox_expDecInitLr, 2, 2, 2, 1)
self.label_expDecSteps = QtWidgets.QLabel(self.groupBox_learningRate_pop)
self.label_expDecSteps.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_expDecSteps.setObjectName("label_expDecSteps")
self.gridLayout_16.addWidget(self.label_expDecSteps, 2, 3, 1, 1)
self.spinBox_expDecSteps = QtWidgets.QSpinBox(self.groupBox_learningRate_pop)
self.spinBox_expDecSteps.setEnabled(False)
self.spinBox_expDecSteps.setMaximumSize(QtCore.QSize(63, 16777215))
self.spinBox_expDecSteps.setMaximum(999999999)
self.spinBox_expDecSteps.setObjectName("spinBox_expDecSteps")
self.gridLayout_16.addWidget(self.spinBox_expDecSteps, 2, 4, 2, 2)
self.label_expDecRate = QtWidgets.QLabel(self.groupBox_learningRate_pop)
self.label_expDecRate.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_expDecRate.setObjectName("label_expDecRate")
self.gridLayout_16.addWidget(self.label_expDecRate, 2, 6, 1, 1)
self.doubleSpinBox_expDecRate = QtWidgets.QDoubleSpinBox(self.groupBox_learningRate_pop)
self.doubleSpinBox_expDecRate.setEnabled(False)
self.doubleSpinBox_expDecRate.setMaximumSize(QtCore.QSize(63, 16777215))
self.doubleSpinBox_expDecRate.setDecimals(6)
self.doubleSpinBox_expDecRate.setMaximum(1.0)
self.doubleSpinBox_expDecRate.setSingleStep(0.01)
self.doubleSpinBox_expDecRate.setProperty("value", 0.96)
self.doubleSpinBox_expDecRate.setObjectName("doubleSpinBox_expDecRate")
self.gridLayout_16.addWidget(self.doubleSpinBox_expDecRate, 2, 7, 1, 1)
self.line = QtWidgets.QFrame(self.groupBox_learningRate_pop)
self.line.setLayoutDirection(QtCore.Qt.RightToLeft)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout_16.addWidget(self.line, 4, 5, 1, 1)
self.pushButton_LR_finder = QtWidgets.QPushButton(self.groupBox_learningRate_pop)
self.pushButton_LR_finder.setObjectName("pushButton_LR_finder")
self.gridLayout_16.addWidget(self.pushButton_LR_finder, 4, 6, 1, 1)
self.pushButton_LR_plot = QtWidgets.QPushButton(self.groupBox_learningRate_pop)
self.pushButton_LR_plot.setObjectName("pushButton_LR_plot")
self.gridLayout_16.addWidget(self.pushButton_LR_plot, 4, 7, 1, 1)
self.gridLayout_4.addWidget(self.groupBox_learningRate_pop, 2, 0, 1, 1)
self.scrollArea_expertMode_pop.setWidget(self.scrollAreaWidgetContents_pop)
self.gridLayout_3.addWidget(self.scrollArea_expertMode_pop, 0, 0, 1, 1)
self.gridLayout_2.addWidget(self.groupBox_expertMode_pop, 0, 0, 1, 1)
self.tabWidget_DefineModel_pop.addTab(self.tab_expertMode_pop, "")
self.gridLayout_3_pop.addWidget(self.tabWidget_DefineModel_pop, 1, 0, 1, 1)
self.verticalLayout_3_pop.addWidget(self.splitter_pop)
self.verticalLayout_4_pop.addLayout(self.verticalLayout_3_pop)
self.gridLayout_slider_pop.addLayout(self.verticalLayout_4_pop, 0, 0, 1, 1)
######################ICONS############################################
os.path.join(dir_root,"art",Default_dict["Icon theme"],"color_mode.png")
self.label_colorModeIcon_pop.setPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"color_mode.png")))
self.label_NormalizationIcon_pop.setPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"normalization.png")))
self.label_Crop_NrEpochsIcon_pop.setPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"nr_epochs.png")))
self.label_zoomIcon.setPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"zoom_order.png")))
self.label_ModelGeomIcon_pop.setPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"model_architecture.png")))
self.label_padIcon_pop.setPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"padding.png")))
self.label_CropIcon_pop.setPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"cropping.png")))
self.label_Crop_pop.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"gpu.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.radioButton_gpu_pop.setIcon(icon)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"cpu.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.radioButton_cpu_pop.setIcon(icon1)
self.checkBox_ApplyNextEpoch.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"thumb.png")))
self.checkBox_saveEpoch_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"save_epoch.png")))
self.pushButton_Pause_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"pause.png")))
self.pushButton_Stop_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"stop.png")))
self.checkBox_HorizFlip_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"horizontal_flip.png")))
self.checkBox_VertFlip_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"vertical_flip.png")))
self.label_Rotation_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"rotation.png")))
self.label_Rotation_pop.setChecked(True)
self.label_Rotation_pop.stateChanged.connect(self.keras_changed_rotation_pop)
self.label_width_shift_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"width_shift.png")))
self.label_width_shift_pop.setChecked(True)
self.label_width_shift_pop.stateChanged.connect(self.keras_changed_width_shift_pop)
self.label_height_shift_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"height_shift.png")))
self.label_height_shift_pop.setChecked(True)
self.label_height_shift_pop.stateChanged.connect(self.keras_changed_height_shift_pop)
self.label_zoom_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"zoom.png")))
self.label_zoom_pop.setChecked(True)
self.label_zoom_pop.stateChanged.connect(self.keras_changed_zoom_pop)
self.label_shear_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"shear.png")))
self.label_shear_pop.setChecked(True)
self.label_shear_pop.stateChanged.connect(self.keras_changed_shear_pop)
self.label_Plus_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"brightness_plus.png")))
self.label_Plus_pop.setChecked(True)
self.label_Plus_pop.stateChanged.connect(self.keras_changed_brightplus_pop)
self.label_Mult_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"brightness_mult.png")))
self.label_Mult_pop.setChecked(True)
self.label_Mult_pop.stateChanged.connect(self.keras_changed_brightmult_pop)
self.label_GaussianNoiseMean_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"gaussian_noise_mean.png")))
self.label_GaussianNoiseMean_pop.setChecked(True)
self.label_GaussianNoiseMean_pop.stateChanged.connect(self.keras_changed_noiseMean_pop)
self.label_GaussianNoiseScale_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"gaussian_noise_scale.png")))
self.label_GaussianNoiseScale_pop.setChecked(True)
self.label_GaussianNoiseScale_pop.stateChanged.connect(self.keras_changed_noiseScale_pop)
self.checkBox_contrast_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"contrast.png")))
self.checkBox_contrast_pop.stateChanged.connect(self.keras_changed_contrast_pop)
self.checkBox_saturation_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"saturation.png")))
self.checkBox_saturation_pop.stateChanged.connect(self.keras_changed_saturation_pop)
self.checkBox_hue_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"hue.png")))
self.checkBox_hue_pop.stateChanged.connect(self.keras_changed_hue_pop)
self.checkBox_avgBlur_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"average_blur.png")))
#self.checkBox_avgBlur_pop.stateChanged.connect(self.changed_averageBlur_pop)
self.checkBox_gaussBlur_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"gaussian_blur.png")))
#self.checkBox_gaussBlur_pop.stateChanged.connect(self.changed_gaussBlur_pop)
self.checkBox_motionBlur_pop.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"motion_blur.png")))
#self.checkBox_motionBlur_pop.stateChanged.connect(self.changed_motionBlur_pop)
validator = QtGui.QRegExpValidator(QtCore.QRegExp("^[0-9 . ,]+$")) #validator allows numbers, dots and commas
#aternatively, I could use "^[0-9 . , \[ \] ]+$" - this would also allow the user to put the brackets. But why? I just do it in the program
self.lineEdit_dropout_pop.setValidator(validator)
############Icons Expert tab########
#self.pushButton_LR_finder.setIcon(QtGui.QIcon(os.path.join(dir_root,"art",Default_dict["Icon theme"],"lr_screen.png")))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"lr_screen.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_LR_finder.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"lr_plot.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.pushButton_LR_plot.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"lr_const.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.radioButton_LrConst.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"lr_cycle.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.radioButton_LrCycl.setIcon(icon)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(os.path.join(dir_root,"art",Default_dict["Icon theme"],"lr_exponential.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.radioButton_LrExpo.setIcon(icon)
#####################Some manual settings##############################
#######################################################################
###########################Variables###################################
self.Histories = [] #List container for the fitting histories, that are produced by the keras.fit function that is controlled by this popup
self.RealTime_Acc,self.RealTime_ValAcc,self.RealTime_Loss,self.RealTime_ValLoss = [],[],[],[]
self.RealTime_OtherMetrics = {} #provide dictionary where AID can save all other metrics in case there are some (like precision...)
self.X_batch_aug = []#list for storing augmented image, created by some parallel processes
self.threadpool_quad = QtCore.QThreadPool()#Threadpool for image augmentation
self.threadpool_quad.setMaxThreadCount(4)#Maximum 4 threads
self.threadpool_quad_count = 0 #count nr. of threads in queue;
self.clr_settings = {} #variable to store step_size and gamma, will be filled with information when starting to fit
self.optimizer_settings = {} #dict to store advanced optimizer settings
self.epoch_counter = 0 #Counts the nr. of epochs
self.tableWidget_HistoryInfo_pop.setMinimumSize(QtCore.QSize(0, 100))
self.tableWidget_HistoryInfo_pop.setMaximumSize(QtCore.QSize(16777215, 140))
self.tableWidget_HistoryInfo_pop.setColumnCount(7)
self.tableWidget_HistoryInfo_pop.setRowCount(0)
self.spinBox_imagecrop_pop.setMinimum(1)
self.spinBox_imagecrop_pop.setMaximum(9E8)
#self.comboBox_colorMode_pop.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter)
self.label_Normalization_pop.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.label_Crop_NrEpochs_pop.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.spinBox_RefreshAfterEpochs_pop.setMinimum(1)
self.spinBox_RefreshAfterEpochs_pop.setMaximum(9E8)
self.spinBox_RefreshAfterNrEpochs_pop.setMinimum(1)
self.spinBox_RefreshAfterNrEpochs_pop.setMaximum(9E8)
self.spinBox_PlusLower_pop.setMinimum(-255)
self.spinBox_PlusLower_pop.setMaximum(255)
self.spinBox_PlusLower_pop.setSingleStep(1)
self.spinBox_PlusUpper_pop.setMinimum(-255)
self.spinBox_PlusUpper_pop.setMaximum(255)
self.spinBox_PlusUpper_pop.setSingleStep(1)
self.doubleSpinBox_MultLower_pop.setMinimum(0)
self.doubleSpinBox_MultLower_pop.setMaximum(10)
self.doubleSpinBox_MultLower_pop.setSingleStep(0.1)
self.doubleSpinBox_MultUpper_pop.setMinimum(0)
self.doubleSpinBox_MultUpper_pop.setMaximum(10)
self.doubleSpinBox_MultUpper_pop.setSingleStep(0.1)
self.doubleSpinBox_GaussianNoiseMean_pop.setMinimum(-255)
self.doubleSpinBox_GaussianNoiseMean_pop.setMaximum(255)
self.doubleSpinBox_GaussianNoiseMean_pop.setSingleStep(0.1)
self.doubleSpinBox_GaussianNoiseScale_pop.setMinimum(0)
self.doubleSpinBox_GaussianNoiseScale_pop.setMaximum(99.9)
self.doubleSpinBox_GaussianNoiseScale_pop.setSingleStep(0.1)
self.spinBox_avgBlurMin_pop.setMinimum(0)
self.spinBox_avgBlurMin_pop.setMaximum(255)
self.spinBox_avgBlurMax_pop.setMinimum(0)
self.spinBox_avgBlurMax_pop.setMaximum(255)
self.spinBox_gaussBlurMin_pop.setMinimum(0)
self.spinBox_gaussBlurMin_pop.setMaximum(255)
self.spinBox_gaussBlurMax_pop.setMinimum(0)
self.spinBox_gaussBlurMax_pop.setMaximum(255)
self.comboBox_ShowTrainOrValid_pop.addItems(["Training","Validation"])
self.comboBox_ShowWOrWoAug_pop.addItems(["With Augmentation","Original image"])
# self.groupBox_expertMode_pop.setEnabled(True)
# self.groupBox_expertMode_pop.setCheckable(True)
# self.groupBox_expertMode_pop.setChecked(False)
# self.scrollArea_expertMode_pop.setWidgetResizable(True)
#Adjust some QObjects manually
self.spinBox_batchSize.setMinimum(1)
self.spinBox_batchSize.setMaximum(1E6)
self.spinBox_batchSize.setValue(32)
self.spinBox_epochs.setMinimum(1)
self.spinBox_epochs.setMaximum(1E6)
self.spinBox_epochs.setValue(1)
self.doubleSpinBox_learningRate.setDecimals(9)
self.doubleSpinBox_learningRate.setMinimum(0.0)
self.doubleSpinBox_learningRate.setMaximum(1E6)
self.doubleSpinBox_learningRate.setValue(0.001)
self.doubleSpinBox_learningRate.setSingleStep(0.0001)
self.spinBox_trainLastNOnly_pop.setMinimum(0)
self.spinBox_trainLastNOnly_pop.setMaximum(1E6)
self.spinBox_trainLastNOnly_pop.setValue(0)
self.checkBox_trainDenseOnly_pop.setChecked(False)
self.spinBox_NrEpochs.setMinimum(1)
self.spinBox_NrEpochs.setMaximum(9E8)
self.spinBox_realTimeEpochs.setSingleStep(1)
self.spinBox_realTimeEpochs.setMinimum(1)
self.spinBox_realTimeEpochs.setMaximum(9999999)
self.spinBox_realTimeEpochs.setValue(250)
self.pushButton_Pause_pop.setMinimumSize(QtCore.QSize(60, 30))
self.pushButton_Pause_pop.setMaximumSize(QtCore.QSize(60, 30))
self.pushButton_Stop_pop.setMinimumSize(QtCore.QSize(60, 30))
self.pushButton_Stop_pop.setMaximumSize(QtCore.QSize(60, 30))
#######################################################################
######################Connections######################################
self.doubleSpinBox_learningRate.setEnabled(False)
self.spinBox_trainLastNOnly_pop.setEnabled(False)
self.lineEdit_dropout_pop.setEnabled(False)
self.pushButton_LR_finder.setEnabled(False)
#self.pushButton_LR_plot.setEnabled(False)
self.radioButton_LrConst.toggled['bool'].connect(self.doubleSpinBox_learningRate.setEnabled)
self.radioButton_LrCycl.toggled['bool'].connect(self.lineEdit_cycLrMin.setEnabled)
self.radioButton_LrCycl.toggled['bool'].connect(self.lineEdit_cycLrMax.setEnabled)
self.radioButton_LrCycl.toggled['bool'].connect(self.comboBox_cycLrMethod.setEnabled)
self.radioButton_LrCycl.toggled['bool'].connect(self.pushButton_cycLrPopup.setEnabled)
self.radioButton_LrExpo.toggled['bool'].connect(self.doubleSpinBox_expDecInitLr.setEnabled)
self.radioButton_LrExpo.toggled['bool'].connect(self.spinBox_expDecSteps.setEnabled)
self.radioButton_LrExpo.toggled['bool'].connect(self.doubleSpinBox_expDecRate.setEnabled)
self.groupBox_learningRate_pop.toggled['bool'].connect(self.doubleSpinBox_learningRate.setEnabled)
self.checkBox_expt_loss_pop.toggled['bool'].connect(self.comboBox_expt_loss_pop.setEnabled)
self.checkBox_optimizer_pop.toggled['bool'].connect(self.comboBox_optimizer.setEnabled)
self.checkBox_optimizer_pop.toggled['bool'].connect(self.pushButton_optimizer_pop.setEnabled)
self.checkBox_trainLastNOnly_pop.toggled['bool'].connect(self.spinBox_trainLastNOnly_pop.setEnabled)
self.checkBox_dropout_pop.toggled['bool'].connect(self.lineEdit_dropout_pop.setEnabled)
self.checkBox_avgBlur_pop.clicked['bool'].connect(self.spinBox_avgBlurMin_pop.setEnabled)
self.checkBox_avgBlur_pop.clicked['bool'].connect(self.spinBox_avgBlurMax_pop.setEnabled)
self.checkBox_gaussBlur_pop.clicked['bool'].connect(self.spinBox_gaussBlurMin_pop.setEnabled)
self.checkBox_gaussBlur_pop.clicked['bool'].connect(self.spinBox_gaussBlurMax_pop.setEnabled)
self.checkBox_motionBlur_pop.clicked['bool'].connect(self.label_motionBlurKernel_pop.setEnabled)
self.checkBox_motionBlur_pop.clicked['bool'].connect(self.lineEdit_motionBlurKernel_pop.setEnabled)
self.checkBox_motionBlur_pop.clicked['bool'].connect(self.label_motionBlurAngle_pop.setEnabled)
self.checkBox_motionBlur_pop.clicked['bool'].connect(self.lineEdit_motionBlurAngle_pop.setEnabled)
self.checkBox_gaussBlur_pop.clicked['bool'].connect(self.label_gaussBlurMin_pop.setEnabled)
self.checkBox_gaussBlur_pop.clicked['bool'].connect(self.label_gaussBlurMax_pop.setEnabled)
self.checkBox_avgBlur_pop.clicked['bool'].connect(self.label_avgBlurMin_pop.setEnabled)
self.checkBox_avgBlur_pop.clicked['bool'].connect(self.label_avgBlurMax_pop.setEnabled)
self.checkBox_optimizer_pop.toggled['bool'].connect(self.comboBox_optimizer.setEnabled)
self.checkBox_expt_loss_pop.toggled['bool'].connect(self.comboBox_expt_loss_pop.setEnabled)
#self.comboBox_optimizer.currentTextChanged.connect(lambda: self.expert_optimizer_changed())
self.checkBox_optimizer_pop.stateChanged.connect(self.expert_optimizer_off_pop)
self.groupBox_learningRate_pop.toggled.connect(self.expert_learningrate_off_pop)
self.checkBox_expt_loss_pop.stateChanged.connect(self.expert_loss_off_pop)
self.groupBox_expertMode_pop.toggled.connect(self.expert_mode_off_pop)
self.retranslateUi(Form)
self.tabWidget_DefineModel_pop.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.pushButton_UpdatePlot_pop.setText(_translate("Form", "Update Plot", None))
self.checkBox_realTimePlotting_pop.setToolTip(_translate("Form", tooltips["checkBox_realTimePlotting_pop"], None))
self.checkBox_realTimePlotting_pop.setText(_translate("Form", "Real-time plotting", None))
self.label_realTimeEpochs_pop.setText(_translate("Form", "Nr. of epochs for RT", None))
self.label_realTimeEpochs_pop.setToolTip(_translate("Form",tooltips["label_realTimeEpochs_pop"] , None))
self.spinBox_realTimeEpochs.setToolTip(_translate("Form", tooltips["label_realTimeEpochs_pop"], None))
self.groupBox_FittingInfo_pop.setTitle(_translate("Form", "Fitting Info", None))
self.pushButton_saveTextWindow_pop.setText(_translate("Form", "Save text ", None))
self.pushButton_clearTextWindow_pop.setToolTip(_translate("Form",tooltips["pushButton_clearTextWindow_pop"] , None))
self.pushButton_clearTextWindow_pop.setText(_translate("Form", "Clear text", None))
self.groupBox_ChangeModel_pop.setTitle(_translate("Form", "Change fitting parameters", None))
self.label_ModelGeom_pop.setText(_translate("Form", "Model Architecture", None))
self.label_ModelGeom_pop.setToolTip(_translate("Form", tooltips["comboBox_ModelSelection"], None))
self.label_ModelGeomIcon_pop.setToolTip(_translate("Form", tooltips["comboBox_ModelSelection"], None))
self.comboBox_ModelSelection_pop.setToolTip(_translate("Form", tooltips["comboBox_ModelSelection"], None))
self.label_colorMode_pop.setToolTip(_translate("Form", "Color mode used for this model", None))
self.label_colorMode_pop.setText(_translate("Form", "Color Mode", None))
self.label_colorModeIcon_pop.setToolTip(_translate("Form", "Color mode used for this model", None))
self.comboBox_colorMode_pop.setToolTip(_translate("Form", "Color mode used for this model", None))
self.label_Normalization_pop.setToolTip(_translate("Form", tooltips["label_Normalization"], None))
self.label_Normalization_pop.setText(_translate("Form", "Normalization", None))
self.label_NormalizationIcon_pop.setToolTip(_translate("Form", tooltips["label_Normalization"], None))
self.comboBox_Normalization_pop.setToolTip(_translate("Form", tooltips["label_Normalization"], None))
self.label_zoomOrder.setText(_translate("Form", "Zoom order", None))
self.label_Crop_pop.setToolTip(_translate("Form", tooltips["label_Crop"], None))
self.label_Crop_pop.setText(_translate("Form", "Input image size", None))
self.label_CropIcon_pop.setToolTip(_translate("Form", tooltips["label_Crop"], None))
self.groupBox_system_pop.setTitle(_translate("Form", "Training", None))
self.label_padIcon_pop.setToolTip(_translate("Form", tooltips["label_paddingMode"], None))
self.comboBox_paddingMode_pop.setToolTip(_translate("Form", tooltips["label_paddingMode"], None))
self.label_paddingMode_pop.setToolTip(_translate("Form", tooltips["label_paddingMode"], None))
self.spinBox_imagecrop_pop.setToolTip(_translate("Form", tooltips["label_Crop"], None))
self.label_Crop_NrEpochs_pop.setToolTip(_translate("Form", "Total number of training iterations", None))
self.label_Crop_NrEpochs_pop.setText(_translate("Form", "Nr. epochs", None))
self.spinBox_NrEpochs.setToolTip(_translate("Form", "Total number of training iterations", None))
self.label_Crop_NrEpochsIcon_pop.setToolTip(_translate("Form", "Total | |
<gh_stars>10-100
""" A container for all information about the field: geometry and labels, as well as convenient API. """
import os
import re
from glob import glob
from difflib import get_close_matches
from concurrent.futures import ThreadPoolExecutor
import numpy as np
from ...batchflow.notifier import Notifier
from .visualization import VisualizationMixin
from ..geometry import SeismicGeometry
from ..labels import Horizon, Fault
from ..metrics import FaciesMetrics
from ..utils import AugmentedList
class Field(VisualizationMixin):
""" A common container for all information about the field: cube geometry and various labels.
To initialize, one must provide:
- geometry-like entity, which can be a path to a seismic cube or instance of `:class:SeismicGeometry`;
additional parameters of geometry instantiation can be passed via `geometry_kwargs` parameters.
- optionally, `labels` in one of the following formats:
- dictionary with keys defining attribute to store loaded labels in and values as
sequences of label-like entities (path to a label or instance of label class)
- sequence with label-like entities. This way, labels will be stored in `labels` attribute
- string to define path(s) to labels (same as those paths wrapped in a list)
- None as a signal that no labels are provided for a field.
- `labels_class` defines the class to use for loading and can be supplied in one of the following formats:
- dictionary with same keys as in `labels`. Values are either string (e.g. `horizon`) or
the type to initialize label itself (e.g. `:class:.Horizon`)
- a single string or type to use for all of the labels
- if not provided, we try to infer the class from name of the attribute to store the labels in.
The guess is based on a similarity between passed name and a list of pre-defined label types.
For example, `horizons` will be threated as `horizon` and loaded as such.
>>> {'horizons': 'path/to/horizons/*'}
would be loaded as instances of `:class:.Horizon`.
- `labels_kwargs` are passed for instantiation of every label.
Examples
--------
Initialize field with only geometry:
>>> Field(geometry='path/to/cube.qblosc')
>>> Field(geometry=SeismicGeometry(...))
The most complete labels definition:
>>> Field(geometry=..., labels={'horizons': ['path/to/horizon', Horizon(...)],
'fans': 'paths/to/fans/*',
'faults': ['path/to/fault1', 'path/to/fault2', ],
'lift_geometry': 'path/to/geometry_target.hdf5'})
Use a `labels_class` instead; this way, all of the labels are stored as `labels` attribute, no matter the class:
>>> Field(geometry=..., labels='paths/*', labels_class='horizon')
>>> Field(geometry=..., labels=['paths/1', 'paths/2', 'paths/3'], labels_class='fault')
"""
#pylint: disable=redefined-builtin
def __init__(self, geometry, labels=None, labels_class=None, geometry_kwargs=None, labels_kwargs=None, **kwargs):
# Attributes
self.labels = []
self.horizons, self.facies, self.fans, self.channels, self.faults = [], [], [], [], []
self.loaded_labels = []
# Geometry: description and convenient API to a seismic cube
if isinstance(geometry, str):
geometry_kwargs = geometry_kwargs or {}
geometry = SeismicGeometry(geometry, **{**kwargs, **geometry_kwargs})
self.geometry = geometry
# Labels: objects on a field
if labels:
labels_kwargs = labels_kwargs or {}
self.load_labels(labels, labels_class, **{**kwargs, **labels_kwargs})
# Label initialization inner workings
METHOD_TO_NAMES = {
'_load_horizons': ['horizon', 'facies', 'fans', 'channels', Horizon],
'_load_faults': ['fault', Fault],
'_load_geometries': ['geometries', 'geometry', SeismicGeometry],
}
NAME_TO_METHOD = {name: method for method, names in METHOD_TO_NAMES.items() for name in names}
def load_labels(self, labels=None, labels_class=None, **labels_kwargs):
""" Load labels and store them in the instance. Refer to the class documentation for details. """
if isinstance(labels, str):
labels = self.make_path(labels, makedirs=False)
labels = glob(labels)
if isinstance(labels, (tuple, list)):
labels = {'labels': labels}
if not isinstance(labels, dict):
raise TypeError(f'Labels type should be `str`, `sequence` or `dict`, got {type(labels)} instead!')
# Labels class: make a dictionary
if labels_class is None:
labels_class_dict = {label_dst : None for label_dst in labels.keys()}
if isinstance(labels_class, (type, str)):
labels_class_dict = {label_dst : labels_class for label_dst in labels.keys()}
if isinstance(labels_class, dict):
labels_class_dict = labels_class
for label_dst, label_src in labels.items():
# Try getting provided `labels_class`, else fallback on NAME_TO_METHOD closest match
label_class = labels_class_dict.get(label_dst)
if label_class is None:
# Roughly equivalent to ``label_class = self.NAME_TO_METHOD.get(label_dst)``
str_names = [name for name in (self.NAME_TO_METHOD.keys())
if isinstance(name, str)]
matched = get_close_matches(label_dst, str_names, n=1)
if matched:
label_class = matched[0]
if label_class is None:
raise TypeError(f"Can't determine the label class for `{label_dst}`!")
# Process paths: get rid of service files
if isinstance(label_src, str):
label_src = self.make_path(label_src, makedirs=False)
label_src = glob(label_src)
if not isinstance(label_src, (tuple, list)):
label_src = [label_src]
label_src = self._filter_paths(label_src)
# Load desired labels, based on class
method_name = self.NAME_TO_METHOD[label_class]
method = getattr(self, method_name)
result = method(label_src, **labels_kwargs)
setattr(self, label_dst, result)
self.loaded_labels.append(label_dst)
if 'labels' not in labels and not self.labels:
setattr(self, 'labels', result)
@staticmethod
def _filter_paths(paths):
""" Remove paths fors service files. """
return [path for path in paths
if not isinstance(path, str) or \
not any(ext in path for ext in ['.dvc', '.gitignore', '.meta'])]
def _load_horizons(self, paths, max_workers=4, filter=True, interpolate=False, sort=True, **kwargs):
""" Load horizons from paths or re-use already created ones. """
# Separate paths from ready-to-use instances
horizons, paths_to_load = [], []
for item in paths:
if isinstance(item, str):
paths_ = self._filter_paths(glob(item))
paths_to_load.extend(paths_)
elif isinstance(item, Horizon):
item.field = self
horizons.append(item)
# Load from paths in multiple threads
with ThreadPoolExecutor(max_workers=min(max_workers, len(paths_to_load) or 1)) as executor:
function = lambda path: self._load_horizon(path, filter=filter, interpolate=interpolate, **kwargs)
loaded = list(executor.map(function, paths_to_load))
horizons.extend(loaded)
if sort:
sort = sort if isinstance(sort, str) else 'h_mean'
horizons.sort(key=lambda label: getattr(label, sort))
return horizons
def _load_horizon(self, path, filter=True, interpolate=False, **kwargs):
""" Load a single horizon from path. """
horizon = Horizon(path, field=self, **kwargs)
if filter:
horizon.filter()
if interpolate:
horizon.interpolate()
return horizon
def _load_faults(self, paths, max_workers=4, pbar=True, filter=True, fix=True, **kwargs):
""" Load faults from paths. """
with ThreadPoolExecutor(max_workers=min(max_workers, len(paths) or 1)) as executor:
function = lambda path: self._load_fault(path, filter=filter, fix=fix, **kwargs)
loaded = list(Notifier(pbar, total=len(paths))(executor.map(function, paths)))
faults = [fault for fault in loaded if len(fault) > 0]
return faults
def _load_fault(self, path, filter=True, fix=True, **kwargs):
""" Load a single fault from path. """
fault = Fault(path, field=self, fix=fix, **kwargs)
if filter and fault.format != 'file-npz':
fault.filter()
return fault
def _load_geometries(self, paths, **kwargs):
if isinstance(paths, str):
path = paths
if isinstance(paths, (tuple, list)):
if len(paths) > 1:
raise ValueError(f'Path for Geometry loading is non-unique!, {paths}')
path = paths[0]
return SeismicGeometry(path, **kwargs)
# Other methods of initialization
@classmethod
def from_horizon(cls, horizon):
""" Create a field from a single horizon. """
return cls(geometry=horizon.geometry, labels={'horizons': horizon})
@classmethod
def from_dvc(cls, tag, dvc_path=''):
""" Create a field from a dvc tag. """
# Inner workings
def __getattr__(self, key):
""" Redirect calls for missing attributes, properties and methods to `geometry`. """
if hasattr(self.geometry, key):
return getattr(self.geometry, key)
raise AttributeError(f'Attribute `{key}` does not exist in either Field or associated Geometry!')
def __getattribute__(self, key):
""" Wrap every accessed list with `AugmentedList`.
The wrapped attribute is re-stored in the instance, so that we return the same object as in the instance. """
result = super().__getattribute__(key)
if isinstance(result, list) and not isinstance(result, AugmentedList):
result = AugmentedList(result)
if not (key in vars(self.__class__) and isinstance(getattr(self.__class__, key), property)):
setattr(self, key, result)
return result
# Public methods. Usually, used by Batch class
def load_seismic(self, location, native_slicing=False, src='geometry', **kwargs):
""" Load data from cube.
Parameters
----------
location : sequence
A triplet of slices to define exact location in the cube.
native_slicing : bool
if True, crop will be loaded as a slice of geometry. Prefered for 3D crops to speed up loading.
If False, use `load_crop` method to load crops.
src : str
Attribute with desired geometry.
"""
geometry = getattr(self, src)
if native_slicing:
seismic_crop = geometry[tuple(location)]
else:
seismic_crop = geometry.load_crop(location, **kwargs)
return seismic_crop
def make_mask(self, location, axis=None, indices='all', width=3, src='labels', **kwargs):
""" Create masks from labels.
Parameters
----------
location : int or sequence
If integer, then location along specified `axis`.
Otherwise, a triplet of slices to define exact location in the cube.
axis : int or str
Axis identifier. must be provided if `location` is integer.
indices : str, int or sequence of ints
Which labels to use in mask creation.
If 'all', then use all labels.
If 'single' or `random`, then use one random label.
If int or array-like, then element(s) are interpreted as indices of desired labels.
width : int
Width of the resulting label.
src : str
Attribute with desired labels.
"""
# Parse parameters
if isinstance(location, (int, np.integer)):
location | |
# C2SMART Lab, NYU
# NCHRP 03-137
# @file TTCD_Calculation_Online.py
# @author <NAME>
# @author <NAME>
# @date 2020-10-18
import pandas as pd
import numpy as np
from shapely.geometry import Polygon
import math
import time
import multiprocessing as mp
from itertools import repeat
from scipy import spatial
import sys
def frange(start, stop=None, step=None):
"""Returns the range by float numbers."""
if stop == None:
stop = start + 0.0
start = 0.0
if step == None:
step = 1.0
while True:
if step > 0 and start >= stop:
break
elif step < 0 and start <= stop:
break
yield ("%g" % start) # return float number
start = start + step
def dist(x1, y1, x2, y2):
"""
Returns the euclidean distance.
Keyword arguments:
>>> x1: float value for X for first point (ft.)
>>> y1: float value for Y for first point (ft.)
>>> x2: float value for X for 2nd point (ft.)
>>> y2: float value for Y for 2nd point (ft.)
RETURN: The euclidean distance (float, ft.).
"""
return float("{:.6f}".format(math.sqrt((x2-x1) ** 2 + (y2 - y1) ** 2)))
def get_heading(x1, y1, x2, y2):
"""
Returns the Heading based on two points
Keyword arguments:
>>> x1: Float value for X for first point
>>> y1: Float value for Y for first point
>>> x2: Float value for X for 2nd point
>>> y2: Float value for Y for 2nd point
RETURN: The new heading value(float).
"""
heading = 0
dx = x2 - x1
dy = y2 - y1
if dx != 0:
heading = float("{:.6f}".format((90 - math.degrees(math.atan2(dy, dx)) + 360) % 360))
elif dy > 0:
heading = 0
elif dy < 0:
heading = 180
return heading
def ttc_location(data_check, distance, start_time):
"""
Returns TTCmax Location (Please see the document for the detailed definition).
Keyword arguments:
>>> data_check: The working data frame selected from the main data frame.
>>> distance: The projecting distance based on the current speed (ft.).
>>> start_time: The time stamp of the processing step.
RETURN: TTCmax point X, TTCmax point Y, the nearest time stamp before the TTCmax location projected,
heading of the vehicle at the TTCmax point.
"""
dist1 = distance
Start_X = data_check.at[0, 'X']
Start_Y = data_check.at[0, 'Y']
TTC_X = np.NaN
TTC_Y = np.NaN
Heading = np.NaN
for i in range(len(data_check)-1):
Check_X = data_check.at[i + 1, 'X']
Check_Y = data_check.at[i + 1, 'Y']
dist2 = dist(Start_X, Start_Y, Check_X, Check_Y)
if dist2 <= dist1:
dist1 = dist1 - dist2
Start_X = Check_X
Start_Y = Check_Y
start_time = float("{:.1f}".format(start_time + 0.1))
pass
else:
Heading = get_heading(Start_X, Start_Y, Check_X, Check_Y)
rad = math.pi / 2 - math.radians(Heading)
TTC_X = Start_X + dist1 * math.cos(rad)
TTC_Y = Start_Y + dist1 * math.sin(rad)
start_time = float("{:.1f}".format(start_time + 0.1))
break
return [TTC_X, TTC_Y, float("{:.1f}".format(start_time - 0.1)), Heading]
def ttc_location_online(data_check, distance, start_time):
"""
Returns TTCmax Location (Please see the document for the detailed definition) without projections potential trajectory.
This is the online version, can be used for single step length updating process.
Replace all the function [ttc_location] for the online version.
Keyword arguments:
>>> data_check: The working data frame selected from the main data frame.
>>> distance: The projecting distance based on the current speed (ft.).
>>> start_time: The time stamp of the processing step.
RETURN: TTCmax point X, TTCmax point Y, the nearest time stamp before the TTCmax location projected,
heading of the vehicle at the TTCmax point.
"""
dist1 = distance
Start_X = data_check.at[0, 'X']
Start_Y = data_check.at[0, 'Y']
Check_X = data_check.at[1, 'X']
Check_Y = data_check.at[1, 'Y']
Heading = get_heading(Start_X, Start_Y, Check_X, Check_Y)
rad = math.pi / 2 - math.radians(Heading)
TTC_X = Start_X + dist1 * math.cos(rad)
TTC_Y = Start_Y + dist1 * math.sin(rad)
start_time = float("{:.1f}".format(start_time + 0.1))
return [TTC_X, TTC_Y, float("{:.1f}".format(start_time - 0.1)), Heading]
def next_location(data_check, point, X_now, Y_now, speed_now, accel, heading):
"""Returns next time step's location of the target vehicle.
Keyword arguments:
>>> data_check: The working data frame selected from the main dataframe.
>>> point: The index of the target vehicle's trajectory in the working data frame.
>>> X_now: Current X of the target vehicle (ft.).
>>> Y_now: Current Y of the target vehicle (ft.).
>>> speed_now: Current speed of the target vehicle (ft/s).
>>> accel: Acceleration of the target vehicel (fpss).
>>> heading: Current heading angle of the target vehicle.
RETURN: Next X, Next Y, Next heading, Next speed, the index of the reference trajectory for next time stamp.
"""
if len(data_check) <= 1:
return (X_now, Y_now, heading, speed_now, point)
else:
Start_X = X_now
Start_Y = Y_now
Check_X = data_check.at[point, 'X']
Check_Y = data_check.at[point, 'Y']
count = 0
dist1 = float("{:.6f}".format(speed_now * 0.1 + 0.5 * accel * 0.01))
speed_next = float("{:.6f}".format(speed_now + 0.5 * accel * 0.1))
dist2 = dist(Start_X, Start_Y, Check_X, Check_Y)
if accel < 0:
time0 = float("{:.6f}".format(-2 * speed_now / accel))
dist0 = float("{:.6f}".format(speed_now * time0 + 0.5 * accel * time0 * time0))
if (time0 < 0.1) and (dist0 <= dist2):
Heading = heading
rad = math.pi / 2 - math.radians(Heading)
Des_X = Start_X + dist0 * math.cos(rad)
Des_Y = Start_Y + dist0 * math.sin(rad)
return (Des_X, Des_Y, Heading, 0, point)
elif (time0 < 0.1) and (dist0 > dist2):
while (dist0 > dist2):
count += 1
if point + count < len(data_check):
dist0 = dist0 - dist2
Start_X = Check_X
Start_Y = Check_Y
Check_X = data_check.at[point + count, 'X']
Check_Y = data_check.at[point + count, 'Y']
dist2 = dist(Start_X, Start_Y, Check_X, Check_Y)
else:
break
Heading = get_heading(Start_X, Start_Y, Check_X, Check_Y)
rad = math.pi / 2 - math.radians(Heading)
Des_X = Start_X + dist0 * math.cos(rad)
Des_Y = Start_Y + dist0 * math.sin(rad)
return (Des_X, Des_Y, Heading, 0, point + count)
else:
while (dist1 > dist2):
count += 1
if point + count < len(data_check):
dist1 = dist1 - dist2
# dist0 = dist0 - dist2
Start_X = Check_X
Start_Y = Check_Y
Check_X = data_check.at[point + count, 'X']
Check_Y = data_check.at[point + count, 'Y']
dist2 = dist(Start_X, Start_Y, Check_X, Check_Y)
speed_next = float("{:.6f}".format(speed_next + 0.5 * accel * 0.1))
else:
break
Heading = get_heading(Start_X, Start_Y, Check_X, Check_Y)
rad = math.pi / 2 - math.radians(Heading)
Des_X = Start_X + dist1 * math.cos(rad)
Des_Y = Start_Y + dist1 * math.sin(rad)
return (Des_X, Des_Y, Heading, speed_next, point + count)
elif (accel == 0) & (speed_now == 0):
return (Start_X, Start_Y, heading, speed_now, point)
else:
while (dist1 > dist2):
count += 1
if point + count < len(data_check):
dist1 = dist1 - dist2
Start_X = Check_X
Start_Y = Check_Y
#print ("LengthCheck",point, count, len(data_check))
Check_X = data_check.at[point + count, 'X']
Check_Y = data_check.at[point + count, 'Y']
dist2 = dist(Start_X, Start_Y, Check_X, Check_Y)
speed_next = float("{:.6f}".format(speed_next + 0.5 * accel * 0.1))
else:
break
Heading = get_heading(Start_X, Start_Y, Check_X, Check_Y)
rad = math.pi / 2 - math.radians(Heading)
Des_X = Start_X + dist1 * math.cos(rad)
Des_Y = Start_Y + dist1 * math.sin(rad)
return (Des_X, Des_Y, Heading, speed_next, point + count)
def next_location_online(data_check, point, X_now, Y_now, speed_now, accel, heading):
"""Returns next time step's location of the target vehicle without projections potential trajectory.
This is the online version, can be used for single step length updating process.
Replace all the function [next_location] for the online version.
Keyword arguments:
>>> data_check: The working data frame selected from the main dataframe.
>>> point: The index of the target vehicle's trajectory in the working data frame.
>>> X_now: Current X of the target vehicle (ft.).
>>> Y_now: Current Y of the target vehicle (ft.).
>>> speed_now: Current speed of the target vehicle (ft/s).
>>> accel: Acceleration of the target vehicel (fpss).
>>> heading: Current heading angle of the target vehicle.
RETURN: Next X, Next Y, Next heading, Next speed, the index of the reference trajectory for next time stamp.
"""
Start_X = X_now
Start_Y = Y_now
dist1 = float("{:.6f}".format(speed_now * 0.1 + 0.5 * accel * 0.01))
speed_next = float("{:.6f}".format(speed_now + 0.5 * accel * 0.1))
if (accel == 0) & | |
#Adventure Game
#<NAME>, <NAME>, <NAME> (C)2010
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import textwrap
def pprint(text):
print textwrap.fill(text, 80)
pprint('''Welcome to the Adventure Game, here you will embark on a dangerous journey
to defeat the evil Dragon named Kibbles, and rescue Princess Catherine, You're story
begins in the small town of Jersey, type the commands north, south, east, and west
in order to navigate,type the command use followed by the item name to use an item, type
the command inventory to look at the items in your posession, type the command look
to look around you, type the command search followed by a target to search for people
or items, type the command commands to repeat these directions. Good luck''')
items=[]
place=1
town=2
forest=3
mine=4
lair=5
dead=6
game=0
on=90
win=91
game=on
place=town
lose=92
while game==on:
while place==town:
direction=raw_input("What would you like to do?\n")
if direction=="west":
if "iron ore" and "wood" and "3 Gold Pieces" in items:
pprint("the blacksmith greets you, and you tell him that you have the items and money he requires, you also give him the saw to make up for some of the difference, he then forges you a battleaxe and wishes you luck on the rest of your quest")
items.remove ("saw")
items.remove ("3 Gold Pieces")
items.remove ("iron ore")
items.remove ("wood")
items.append ("battleaxe")
else:
pprint('''You are at the blacksmith shop, many different kinds of weapons decorate the walls, the blacksmith is a tall,
hairy man who smiles as you enter the door. You tell him that you need a weapon to kill Kibbles the Evil Dragon.
He laughs and says 'Mah boy! Killing Kibbles is what all true warriors strive for! But you can't do it with any of my weapons,
you need Atari, the magic sword that lies in the cave east of the forest. Many have tried to get it, but all have failed as
it is guarded by the evil wizard Gwonam! If you're looking to fight Gwonam, I can make you an axe, but you need to bring
me iron ore, wood, and some gold for my troubles.' You then decide to head into the forest to seek out the materials for the blacksmith''')
elif direction=="north":
pprint('''You walk up the gates of the king's castle, the guards stop and ask you to
state your business, you tell them that you want to rescue <NAME>. They laugh and
tell you that you should probably obtain a weapon first, you head back to the center of town''')
elif direction=="east":
print "You head into the residential district of town, a few huts line the streets, but there isn't much else of note here, you decide to head back to the center of town"
elif direction=="south":
print "You head deep into the forest"
place=forest
elif direction=="commands":
print "type the commands north, south, east, and west in order to navigate,type the command use followed by the item name to use an item, type the command inventory to look at the items in your posession, type the command look to look around you, type the command search followed by a target to search for people or items, type the command commands to repeat these directions"
elif direction=="inventory":
print items
elif direction=="look":
print "You're located in the small town of Jersey, here you see a blacksmith shop to the west, the king's castle to the north, houses to the east, and the town's exit to the forest to your south"
elif "use" in direction:
print "You have nothing to use"
elif "search" in direction:
print "There's nothing of importance to search"
else:
print "Please type a command"
while place==forest:
direction=raw_input("What would you like to do?\n")
if direction=="west":
print "You head into the mine"
place=mine
elif direction=="south":
print "The mountains look too treacherous to try and pass through. It might not hurt to try and look for that man though."
elif direction=="east":
if "battleaxe" in items:
print "You head into Gwonam's Lair"
place=lair
else:
print "it's not a good idea to go to Gwonam's lair unprotected"
elif direction=="north":
print "You head back to Jersey"
place=town
elif direction=="look":
print "You are at the center of a vast forest, surrounded by many tall trees, you could defintiely obtain some wood from some of them, but you would need the proper tools. To your west lies a mine, to your south a group of impassable mountains, but you can hear a person in the distance, to your east lies the evil gwonam's lair, and to the north lies Jersey"
elif "use" in direction:
if "pickaxe" in direction:
if "iron ore" in items:
if "man" or "person" in direction:
print "The man looks at the pickaxe with sorrow and tells you that this was his brothers pickaxe, he offers you his saw for it and you accept."
items.append ("saw")
items.remove ("pickaxe")
if "saw" in direction:
if "saw" in items:
if "tree" in direction:
print "you use the saw to cut some wood off of a nearby tree"
items.append ("wood")
else:
print "you can't use that"
elif direction=="inventory":
print items
elif "search" in direction:
if direction== "search person":
print "You find the man, he appears to be a lumberjack and is carrying a large saw. You tell him about your quest and the items you are looking for. He directs you to the mine for the iron ore and tells you that he's always wanted to be a miner like his brother. He tells you that his brother is in the mines right now if you should need any help."
else:
print "You can't search for that"
elif direction=="commands":
print "type the commands north, south, east, and west in order to navigate,type the command use followed by the item name to use an item, type the command inventory to look at the items in your posession, type the command look to look around you, type the command search followed by a target to search for people or items, type the command commands to repeat these directions"
while place==mine:
direction=raw_input("What would you like to do?\n")
if direction=="west":
print "The cavern is too dark to travel down, you head back to the center of the mine"
elif direction=="east":
print "You head back to the forest"
place=forest
elif direction=="south":
print "You are at a desposit of rich iron, this is perfect for the blacksmith, the only problem is you don't have a way to mine it"
elif direction=="north":
print "You are in a small cavern with a dead body on the floor, you are not sure how he died. You see a pickaxe underneath him and a bag around his waist"
elif direction=="look":
print "You find yourself in the center of a large mine. To your east lies the exit back to the forest, to your north lies a cavern with a dead body, to your south lies an iron deposit, and to your west lies a very dark cavern."
elif "search" in direction:
if "body" in direction:
if "pickaxe" not in items:
print "You take the pickaxe and the bag which contained 3 Gold Pieces"
items.append ("pickaxe")
items.append ("3 Gold Pieces")
else:
print "You cannot search that"
elif "use" in direction:
if "pickaxe" in direction:
if "pickaxe" in items:
if "iron" in direction:
print "You use the pickaxe to mine the iron ore"
items.append ("iron ore")
else:
| |
A.B")
temp_buck_distr_B_virt_err_self = temp_buck_distr_B_self + self.virtual_error
temp_buck_distr_B_virt_err_pb = temp_buck_distr_B_pb + pb.virtual_error
temp_virtual_error = self.convolve_same(temp_buck_distr_B_virt_err_self, temp_buck_distr_B_virt_err_pb) - temp_buck_distr_B_convolved
self.logger.info(" Compute virtual_error A.B_under")
temp_virtual_error_under = self.convolve_full(temp_buck_distr_B_virt_err_self[0:self.one_index+1], temp_buck_distr_B_virt_err_pb[0:pb.one_index+1])
temp_virtual_error_under = np.sum(temp_virtual_error_under[0:self.one_index+1])
temp_virtual_error[0] = max(0, temp_virtual_error_under)
self.virtual_error = temp_virtual_error
self.u = self.u + pb.u
# decrease reference counters
del temp_buck_distr_B_self
del temp_buck_distr_B_pb
del temp_buck_distr_B_convolved
del temp_buck_distr_B_virt_err_self
del temp_buck_distr_B_virt_err_pb
del temp_virtual_error_under
# overwrites
self.bucket_distribution = temp_bucket_distribution
self.infty_bucket = delta_infty_bucket
# clean up
del temp_over
del temp_under
gc.collect()
return pb
@classmethod
def load_state(cls, state_directory):
params = pickle.load(open(os.path.join(state_directory, "non_ndarray"),'rb'))
# If you search bugs with incomplete loaded states, look here.
# The "instance.__init__()" method is *NOT* called when loading!
instance = cls.__new__(cls)
instance.__dict__.update(params)
instance.logger_setup(instance.logging_level)
instance.bucket_distribution = np.fromfile(os.path.join(state_directory, "bucket_distribution"), dtype=np.float64)
if instance.error_correction:
instance.real_error = np.fromfile(os.path.join(state_directory, "real_error"), dtype=np.float64)
instance.virtual_error = np.fromfile(os.path.join(state_directory, "virtual_error"), dtype=np.float64)
return instance
def save_state(self, state_directory):
self.mkdir_p(state_directory)
excluded_attributes = ['logger', 'bucket_distribution', 'real_error', 'virtual_error']
# excluding np arrays from dumping in "non_ndarray" without copying large arrays:
save_dict = {}
for key in self.__dict__.keys():
if key not in excluded_attributes:
save_dict[key] = self.__dict__[key]
pickle.dump(save_dict, open(os.path.join(state_directory, "non_ndarray"),'wb'))
self.bucket_distribution.tofile(open(os.path.join(state_directory, "bucket_distribution"), 'w'))
if self.error_correction:
self.real_error.tofile(open(os.path.join(state_directory, "real_error"), 'w'))
self.virtual_error.tofile(open(os.path.join(state_directory, "virtual_error"), 'w'))
def compose(self, nr_of_compositions):
if not self.caching_directory:
self.logger.critical("This method requires caching. Abort")
return None
state_filepath_base = os.path.join(self.caching_directory, "compositions-")
get_state_filepath = lambda needed_exp: state_filepath_base + str(int(2**needed_exp))
target_state_filepath = state_filepath_base + str(nr_of_compositions)
if os.path.isdir(target_state_filepath):
self.logger.info("Target state is cached. Loading it")
return self.load_state(target_state_filepath)
# wich compositions do we need
target_exp = int( np.log2(nr_of_compositions) )
needed_compositions = [ x if (nr_of_compositions & (2**x) != 0) else -1 for x in range(target_exp + 1)]
needed_compositions = list(filter(lambda a: a != -1, needed_compositions))
self.logger.info("Needed compositions: " + ", ".join(map(str, needed_compositions)))
# start with a copy of the current state
previous_state = self.copy()
avoided_self_composition = False
# which compositions already exist? Generate?
for needed_exp in range(target_exp + 1):
state_filepath = get_state_filepath(needed_exp)
if not os.path.isdir(state_filepath):
self.logger.info("[*] State 2**" + str(needed_exp) + " does not exist. Creating it.")
if not needed_exp == 0:
if avoided_self_composition: # only load from disk when it differs from current state
previous_state_filepath = get_state_filepath(needed_exp - 1)
previous_state = self.load_state(previous_state_filepath)
avoided_self_composition = False
previous_state.compose_with(previous_state)
previous_state.print_state()
# previous_state.print_state()
previous_state.save_state(state_filepath)
gc.collect()
else:
avoided_self_composition = True
self.logger.info("[*] All intermediate states up to 2**" + str(target_exp) + " exist now")
previous_state = self.load_state( get_state_filepath(needed_compositions[0]) )
self.logger.info("[*] Loaded state 2**" + str(needed_compositions[0]))
# compose to the desired state
for i in needed_compositions[1:]:
self.logger.info("[*] Compose with state 2**{}".format(i))
current_state = self.load_state(get_state_filepath(i))
# while the factor of previous state and current state is not same
while(current_state.factor != previous_state.factor):
self.logger.info("factors are unequal ( {} != {} ), squaring".format(current_state.factor, previous_state.factor))
if current_state.factor > previous_state.factor:
previous_state.squaring()
else:
current_state.squaring()
# now the factor should be the same
previous_state.compose_with(current_state)
previous_state.print_state()
previous_state.save_state(target_state_filepath) # caching..
return previous_state
def print_state(self):
""" prints some information about the current state """
sum_of_bucket_distr = np.sum(self.bucket_distribution)
summary = ( 'Summary:\n'
' caching_directoy : {}\n'
' number_of_buckets : {}\n'
' factor : {}\n'
' infty_bucket : {} (max 1.0, numerical error, should be < {:g})\n'
' disting_events : {} (max 1.0)\n'
' minus-n-bucket : {}\n'
' sum bucket_distr : {:.30f}\n'
' sum of all buckets : {:.30f} (should be 1.000000)\n'
' delta_upper(eps=0) : {}'
.format(
self.caching_directory,
self.number_of_buckets,
self.factor,
self.infty_bucket,
_infty_bucket_warning_bound,
self.distinguishing_events,
self.bucket_distribution[0],
sum_of_bucket_distr,
sum_of_bucket_distr + self.infty_bucket + self.distinguishing_events,
self.delta_of_eps_upper_bound(0),
))
if self.error_correction:
summary += ('\n'
' delta_lower(eps=0) : {}\n'
' sum(virtual_error) : {} (max 1.0, should be < {:g})\n'
' sum(real_error) : {} (max 1.0, should be < {:g})\n'
' the u : {}'
.format(
self.delta_of_eps_lower_bound(0),
np.sum(self.virtual_error),
_virtual_error_warning_bound,
np.sum(self.real_error),
_virtual_error_warning_bound,
self.u,
))
self.logger.info(summary)
def convolve_same(self, x, y):
return np.convolve(x, y, mode = 'same')
def convolve_full(self, x, y):
return np.convolve(x, y, mode = 'full')
def delta_PDP(self, eps):
""" Returns upper bound for tight delta for probabilistic differential privacy. Error correction not supported. """
if self.error_correction:
self.logger.warning("Error correction for PDP delta not supported. Omit error correction.")
k = int(np.floor(eps / self.log_factor))
if k > self.number_of_buckets // 2: # eps is above of bucket_distribution range
return self.infty_bucket + self.distinguishing_events
if k < -self.number_of_buckets // 2: # eps is below of bucket_distribution range
k = -self.number_of_buckets // 2
return np.sum(self.bucket_distribution[self.one_index + k + 1:]) + self.infty_bucket + self.distinguishing_events
def _g_func(self, l):
return (1 - self.factor**-l)
def delta_ADP(self, eps):
""" Returns an upper bound for tight delta for approximate differntial privacy. Error correction is supported."""
return self.delta_ADP_upper_bound(eps)
def delta_ADP_upper_bound(self, eps):
# Use np.floor to guarantee an upper bound for the delta(eps) graph
k = int(np.floor(eps / self.log_factor))
if k > self.number_of_buckets // 2: # eps is above of bucket_distribution range
return self.infty_bucket + self.distinguishing_events
if k < -self.number_of_buckets // 2: # eps is below of bucket_distribution range
k = -self.number_of_buckets // 2
ret = np.sum(self._g_func(np.arange(1, self.one_index - k + 1)) * self.bucket_distribution[self.one_index + k + 1:])
if self.error_correction:
ret -= np.exp(eps) * np.sum( self.real_error[ min(self.one_index + k + self.u, self.number_of_buckets): ] )
# check:
# a = np.sum(self._g_func(np.arange( self.one_index - k)) * self.bucket_distribution[self.one_index + k + 1:][self.u-1:])
# a -= np.exp(eps) * np.sum( self.real_error[ min(self.one_index + k + self.u, self.number_of_buckets): ] )
# assert np.all( a >= 0)
return ret + self.infty_bucket + self.distinguishing_events
def delta_ADP_lower_bound(self, eps):
if not self.error_correction:
self.logger.error("Error correction required for lower bound")
return None
# Use np.ceil to garantee an lower bound for the delta(eps) graph
k = int(np.ceil(eps / self.log_factor))
if k > self.number_of_buckets // 2: # eps is above of bucket_distribution range
return self.distinguishing_events
if k <= -self.number_of_buckets // 2: # eps is below of bucket_distribution range
k = -self.number_of_buckets // 2 + 1
vals = self._g_func(np.arange(1, self.one_index - k + 1)) * self.bucket_distribution[self.one_index + k + 1:]
vals -= np.exp(eps) * self.virtual_error[self.one_index + k + 1:]
vals[vals < 0] = 0 # = max(0, vals)
return np.sum(vals) + self.distinguishing_events
def eps_ADP_upper_bound(self, delta):
"""
Returns an upper bound of epsilon for a target delta. Throws descriptive errors if it cannot satisfy the
conditions. Without error correction, the optimizer might not converge properly.
"""
if not self.error_correction:
self.logger.warning("Error-correction disabled, optimizer might not converge properly.")
# The maximal epsilon we can serve. For any epsilons larger, self.delta_ADP_upper_bound will return the
# delta of the infinity bucket+dist_events only, rendering upper-bound-epsilon=np.inf
max_eps = (self.number_of_buckets // 2 ) * self.log_factor
try:
root = optimize.bisect(lambda eps: self.delta_ADP_upper_bound(eps) - delta, 0, max_eps)
except ValueError as e:
if self.delta_ADP_upper_bound(eps=max_eps) > delta:
raise ValueError("Required target-delta is smaller than self.delta_ADP_upper_bound(max_eps) can serve. "
"For an instant remedy, increase number_of_buckets, or increase factor.")
elif self.delta_ADP_upper_bound(eps=0.0) < delta:
self.logger.warning("Returning over-approximation eps=0. "
"(self.delta_ADP_upper_bound(eps=0) < target-delta)")
return 0.0
raise e
return root
def renyi_divergence_upper_bound(self, alpha):
"""
returns a upper bound on the alpha renyi-divergence for a given alpha >= 1
R(aplha) = 1/(alpha - 1) * log_e E_{x~B} (A/B)^(alpha) if alpha > 1
R(aplha) = E_{x~B} log_e (A/B)^(alpha) if alpha == 1
"""
assert self.distinguishing_events == 0 and self.infty_bucket == 0, \
"Nonzero infty bucket or distingushing events not supported"
# if alpha == 1, the divergence is reduced to KL-divergence
if alpha == 1:
return self.KL_divergence()
# else, compute the renyi-divergence
lam = alpha - 1
# the alpha renyi moments are the alpha-1 moments of the exponentiated bucket_distribution. Informal,
# i.e. R(alpha) = 1/lam * ln E_{y~buckets} exp(y*lam)
#
# for additional details, see Lemma 8 in
# Sommer et al. "Privacy loss classes: The central limit theorem in differential privacy." PoPETS 2019.2
# to provide a upper bound, we assume that all content of a specific bucket manifests at the position
# with the highest leakage (most right)
summands = np.exp((np.arange(self.number_of_buckets) - self.one_index)*self.log_factor*lam)
expectation_value = np.sum(self.bucket_distribution*summands)
renyi_div = np.log(expectation_value) / lam
return renyi_div
def KL_divergence(self):
return np.sum(self.bucket_distribution * ((np.arange(self.number_of_buckets) - self.one_index)*self.log_factor))
# needed for pickle (and deepcopy)
def __getstate__(self):
d = self.__dict__.copy() # copy the dict since we change it
del d['logger'] # remove logger instance entry
return d
def __setstate__(self, dict):
if not hasattr(self, 'logger'):
self.logger_setup(dict['logging_level'])
self.__dict__.update(dict)
# utility functions
def copy(self):
# I hope this is | |
'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = | |
<gh_stars>1-10
#! /usr/bin/python3
r'''###############################################################################
###################################################################################
#
#
# Tegridy MIDI X Module (TMIDI X / tee-midi eks)
# Version 1.0
#
# NOTE: TMIDI X Module starts after the partial MIDI.py module @ line 1342
#
# Based upon MIDI.py module v.6.7. by <NAME> / pjb.com.au
#
# Project Los Angeles
#
# Tegridy Code 2021
#
# https://github.com/Tegridy-Code/Project-Los-Angeles
#
#
###################################################################################
###################################################################################
# Copyright 2021 Project Los Angeles / Tegridy Code
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###################################################################################
###################################################################################
#
# PARTIAL MIDI.py Module v.6.7. by <NAME>
# Please see TMIDI 2.3/tegridy-tools repo for full MIDI.py module code
#
# Or you can always download the latest full version from:
# https://pjb.com.au/
#
# Copyright 2020 <NAME>
#
###################################################################################
###################################################################################'''
import sys, struct, copy
Version = '6.7'
VersionDate = '20201120'
_previous_warning = '' # 5.4
_previous_times = 0 # 5.4
#------------------------------- Encoding stuff --------------------------
def opus2midi(opus=[], text_encoding='ISO-8859-1'):
r'''The argument is a list: the first item in the list is the "ticks"
parameter, the others are the tracks. Each track is a list
of midi-events, and each event is itself a list; see above.
opus2midi() returns a bytestring of the MIDI, which can then be
written either to a file opened in binary mode (mode='wb'),
or to stdout by means of: sys.stdout.buffer.write()
my_opus = [
96,
[ # track 0:
['patch_change', 0, 1, 8], # and these are the events...
['note_on', 5, 1, 25, 96],
['note_off', 96, 1, 25, 0],
['note_on', 0, 1, 29, 96],
['note_off', 96, 1, 29, 0],
], # end of track 0
]
my_midi = opus2midi(my_opus)
sys.stdout.buffer.write(my_midi)
'''
if len(opus) < 2:
opus=[1000, [],]
tracks = copy.deepcopy(opus)
ticks = int(tracks.pop(0))
ntracks = len(tracks)
if ntracks == 1:
format = 0
else:
format = 1
my_midi = b"MThd\x00\x00\x00\x06"+struct.pack('>HHH',format,ntracks,ticks)
for track in tracks:
events = _encode(track, text_encoding=text_encoding)
my_midi += b'MTrk' + struct.pack('>I',len(events)) + events
_clean_up_warnings()
return my_midi
def score2opus(score=None, text_encoding='ISO-8859-1'):
r'''
The argument is a list: the first item in the list is the "ticks"
parameter, the others are the tracks. Each track is a list
of score-events, and each event is itself a list. A score-event
is similar to an opus-event (see above), except that in a score:
1) the times are expressed as an absolute number of ticks
from the track's start time
2) the pairs of 'note_on' and 'note_off' events in an "opus"
are abstracted into a single 'note' event in a "score":
['note', start_time, duration, channel, pitch, velocity]
score2opus() returns a list specifying the equivalent "opus".
my_score = [
96,
[ # track 0:
['patch_change', 0, 1, 8],
['note', 5, 96, 1, 25, 96],
['note', 101, 96, 1, 29, 96]
], # end of track 0
]
my_opus = score2opus(my_score)
'''
if len(score) < 2:
score=[1000, [],]
tracks = copy.deepcopy(score)
ticks = int(tracks.pop(0))
opus_tracks = []
for scoretrack in tracks:
time2events = dict([])
for scoreevent in scoretrack:
if scoreevent[0] == 'note':
note_on_event = ['note_on',scoreevent[1],
scoreevent[3],scoreevent[4],scoreevent[5]]
note_off_event = ['note_off',scoreevent[1]+scoreevent[2],
scoreevent[3],scoreevent[4],scoreevent[5]]
if time2events.get(note_on_event[1]):
time2events[note_on_event[1]].append(note_on_event)
else:
time2events[note_on_event[1]] = [note_on_event,]
if time2events.get(note_off_event[1]):
time2events[note_off_event[1]].append(note_off_event)
else:
time2events[note_off_event[1]] = [note_off_event,]
continue
if time2events.get(scoreevent[1]):
time2events[scoreevent[1]].append(scoreevent)
else:
time2events[scoreevent[1]] = [scoreevent,]
sorted_times = [] # list of keys
for k in time2events.keys():
sorted_times.append(k)
sorted_times.sort()
sorted_events = [] # once-flattened list of values sorted by key
for time in sorted_times:
sorted_events.extend(time2events[time])
abs_time = 0
for event in sorted_events: # convert abs times => delta times
delta_time = event[1] - abs_time
abs_time = event[1]
event[1] = delta_time
opus_tracks.append(sorted_events)
opus_tracks.insert(0,ticks)
_clean_up_warnings()
return opus_tracks
def score2midi(score=None, text_encoding='ISO-8859-1'):
r'''
Translates a "score" into MIDI, using score2opus() then opus2midi()
'''
return opus2midi(score2opus(score, text_encoding), text_encoding)
#--------------------------- Decoding stuff ------------------------
def midi2opus(midi=b''):
r'''Translates MIDI into a "opus". For a description of the
"opus" format, see opus2midi()
'''
my_midi=bytearray(midi)
if len(my_midi) < 4:
_clean_up_warnings()
return [1000,[],]
id = bytes(my_midi[0:4])
if id != b'MThd':
_warn("midi2opus: midi starts with "+str(id)+" instead of 'MThd'")
_clean_up_warnings()
return [1000,[],]
[length, format, tracks_expected, ticks] = struct.unpack(
'>IHHH', bytes(my_midi[4:14]))
if length != 6:
_warn("midi2opus: midi header length was "+str(length)+" instead of 6")
_clean_up_warnings()
return [1000,[],]
my_opus = [ticks,]
my_midi = my_midi[14:]
track_num = 1 # 5.1
while len(my_midi) >= 8:
track_type = bytes(my_midi[0:4])
if track_type != b'MTrk':
#_warn('midi2opus: Warning: track #'+str(track_num)+' type is '+str(track_type)+" instead of b'MTrk'")
pass
[track_length] = struct.unpack('>I', my_midi[4:8])
my_midi = my_midi[8:]
if track_length > len(my_midi):
_warn('midi2opus: track #'+str(track_num)+' length '+str(track_length)+' is too large')
_clean_up_warnings()
return my_opus # 5.0
my_midi_track = my_midi[0:track_length]
my_track = _decode(my_midi_track)
my_opus.append(my_track)
my_midi = my_midi[track_length:]
track_num += 1 # 5.1
_clean_up_warnings()
return my_opus
def opus2score(opus=[]):
r'''For a description of the "opus" and "score" formats,
see opus2midi() and score2opus().
'''
if len(opus) < 2:
_clean_up_warnings()
return [1000,[],]
tracks = copy.deepcopy(opus) # couple of slices probably quicker...
ticks = int(tracks.pop(0))
score = [ticks,]
for opus_track in tracks:
ticks_so_far = 0
score_track = []
chapitch2note_on_events = dict([]) # 4.0
for opus_event in opus_track:
ticks_so_far += opus_event[1]
if opus_event[0] == 'note_off' or (opus_event[0] == 'note_on' and opus_event[4] == 0): # 4.8
cha = opus_event[2]
pitch = opus_event[3]
key = cha*128 + pitch
if chapitch2note_on_events.get(key):
new_event = chapitch2note_on_events[key].pop(0)
new_event[2] = ticks_so_far - new_event[1]
score_track.append(new_event)
elif pitch > 127:
pass #_warn('opus2score: note_off with no note_on, bad pitch='+str(pitch))
else:
pass #_warn('opus2score: note_off with no note_on cha='+str(cha)+' pitch='+str(pitch))
elif opus_event[0] == 'note_on':
cha = opus_event[2]
pitch = opus_event[3]
key = cha*128 + pitch
new_event = ['note',ticks_so_far,0,cha,pitch, opus_event[4]]
if chapitch2note_on_events.get(key):
chapitch2note_on_events[key].append(new_event)
else:
chapitch2note_on_events[key] = [new_event,]
else:
opus_event[1] = ticks_so_far
score_track.append(opus_event)
# check for unterminated notes (Oisín) -- 5.2
for chapitch in chapitch2note_on_events:
note_on_events = chapitch2note_on_events[chapitch]
for new_e in note_on_events:
new_e[2] = ticks_so_far - new_e[1]
score_track.append(new_e)
pass #_warn("opus2score: note_on with no note_off cha="+str(new_e[3])+' pitch='+str(new_e[4])+'; adding note_off at end')
score.append(score_track)
_clean_up_warnings()
return score
def midi2score(midi=b''):
r'''
Translates MIDI into a "score", using midi2opus() then opus2score()
'''
return opus2score(midi2opus(midi))
def midi2ms_score(midi=b''):
r'''
Translates MIDI into a "score" with one beat per second and one
tick per millisecond, using midi2opus() then to_millisecs()
then opus2score()
'''
return opus2score(to_millisecs(midi2opus(midi)))
#------------------------ Other Transformations ---------------------
def to_millisecs(old_opus=None, desired_time_in_ms=1):
r'''Recallibrates all the times in an "opus" to use one beat
per second and one tick per millisecond. This makes it
hard to retrieve any information about beats or barlines,
but it does make it easy to mix different scores together.
'''
if old_opus == None:
return [1000 * desired_time_in_ms,[],]
try:
old_tpq = int(old_opus[0])
except IndexError: # 5.0
_warn('to_millisecs: the opus '+str(type(old_opus))+' has no elements')
return [1000 * desired_time_in_ms,[],]
new_opus = [1000 * desired_time_in_ms,]
# 6.7 first go through building a table of set_tempos by absolute-tick
ticks2tempo = {}
itrack = 1
while itrack < len(old_opus):
ticks_so_far = 0
for old_event in old_opus[itrack]:
if old_event[0] == 'note':
raise TypeError('to_millisecs needs an opus, not a score')
ticks_so_far += old_event[1]
if old_event[0] == 'set_tempo':
ticks2tempo[ticks_so_far] = old_event[2]
itrack += 1
# then get the sorted-array of their keys
tempo_ticks = [] # list of keys
for k in ticks2tempo.keys():
tempo_ticks.append(k)
tempo_ticks.sort()
# then go through converting to millisec, testing if the next
# set_tempo lies before the next track-event, and using it if so.
itrack = 1
while itrack < len(old_opus):
ms_per_old_tick = 400 / old_tpq # float: will round later 6.3
i_tempo_ticks = 0
ticks_so_far = 0
ms_so_far = 0.0
previous_ms_so_far = 0.0
new_track = [['set_tempo',0,1000000 * desired_time_in_ms],] # new "crochet" is 1 sec
for old_event in old_opus[itrack]:
# detect if ticks2tempo has something before this event
# 20160702 if ticks2tempo is at the same time, leave it
event_delta_ticks = old_event[1] * desired_time_in_ms
if (i_tempo_ticks < len(tempo_ticks) and
tempo_ticks[i_tempo_ticks] < (ticks_so_far + old_event[1]) * desired_time_in_ms):
delta_ticks = tempo_ticks[i_tempo_ticks] - ticks_so_far
ms_so_far += (ms_per_old_tick * delta_ticks * desired_time_in_ms)
ticks_so_far = tempo_ticks[i_tempo_ticks]
ms_per_old_tick = ticks2tempo[ticks_so_far] / (1000.0*old_tpq * desired_time_in_ms)
i_tempo_ticks += 1
event_delta_ticks -= delta_ticks
new_event = copy.deepcopy(old_event) # now handle the new event
ms_so_far += (ms_per_old_tick * old_event[1] * desired_time_in_ms)
new_event[1] = round(ms_so_far - previous_ms_so_far)
if old_event[0] != 'set_tempo':
previous_ms_so_far = ms_so_far
new_track.append(new_event)
ticks_so_far += event_delta_ticks
new_opus.append(new_track)
itrack += 1
_clean_up_warnings()
return new_opus
def event2alsaseq(event=None): # 5.5
r'''Converts an event into the format needed by | |
<gh_stars>1-10
# Program to program a Science of Cambridge MK14 with a 7Bot
# An MK14 is a very old micro-computer from what became Sinclair Research
# A 7Bot is a 7 degrees of freedom robot arm which orignated here:
# More information on this project is here: http://robdobson.com/2016/10/mk14-meets-7bot/
from __future__ import print_function
import serial
import numpy as np
import time
import threading
import sys
import tkinter
# Name of the program to "send" to the MK14 (entered physically by robot arm)
programToRun = "Duck Shoot"
# Setup of the 7Bot position and speed in relation to the MK14
normalSpeeds = [80, 80, 80, 150, 100, 100, 100]
homePositionAngles = [10, 150, 75, 83, 121, 89.64, 56]
readyPositionAngles = [7, 115, 65, 83, 121, 89.64, 56]
keyboardClearAngles = [7, 107, 90, 90, 118, 89.64, 56]
# Distance to move between hovering over a key and pressing it
# the three values are azimuth (0 means no change), shoulder position (+ve value) and
# elbow position (-ve value)
keyPunchDownAngleDeltas = [0, 5, -5]
# Enter a sequence here to override sending the MK14 a program and instead just press keys
testKeySequence = []
#testKeySequence = ["0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","TRM","MEM","ABT","GO"]
# Positions of keys on the MK14 keypad
keyPositions = {
"0": [12, 103-2, 102+2, 90, 115.56, 89.64, 56],
"1": [7.92, 94+2, 103.52-2, 84.06, 116.1, 89.46, 56],
"2": [12.6, 95, 101.52, 83.52, 116.64, 89.46, 56],
"3": [18.2, 95, 102, 90, 114.3, 89.46, 56],
"4": [7.2, 89, 101-0.5, 81.0, 117.9, 89.46, 56],
"5": [11.7, 87+1.5, 102-1.5, 79.92, 118.8, 89.46, 56],
"6": [15.5, 86+1, 102-1, 79.92, 120.42, 89.46, 56],
"7": [9, 84, 101, 90, 114, 90, 56],
"8": [13, 81, 100, 90, 121.86, 89.46, 56],
"9": [16.8, 80+.5, 100-.5, 90, 122.22, 89.64, 57],
"A": [24, 77-1, 92+5.5, 82.98, 124.2, 89.64, 56],
"B": [27.72, 77-2.5, 95+1.5, 86.94, 127.44, 89.64, 56],
"C": [30+1.5, 75-4, 95+1, 87.48, 126.18, 89.46, 56],
"D": [25.56, 86-3.5, 98+2.5, 87.48, 122.04, 89.28, 56],
"E": [28+1.5, 84-4, 98+2.5, 87.48, 121.5, 89.64, 56],
"F": [33, 81.0-3.5, 98+1.5, 87.84, 121.14, 89.64, 56],
"ABT": [27, 88, 103.34, 90, 117.9, 89.64, 56],
"TRM": [30.5, 86-1, 103, 90, 117.9, 89.64, 56],
"MEM": [28.62, 94, 105-1, 90.0, 117.72, 89.46, 56],
"GO": [33, 92, 105.14, 90, 115.74, 89.46, 56],
}
# Programs to be sent to the MK14
programs = {
"Duck Shoot":
{
"execAddr": "0F12",
"hexLines": [
":180F1200C40D35C40031C401C8F4C410C8F1C400C8EEC40801C0E71EB2",
":180F2A00C8E49404C4619002C400C9808F01C0D89C0EC180E4FF980811",
":160F4200C8CEC0CAE480C8C64003FC0194D6B8BF98C8C40790CEDD"
]
},
"Moon Landing":
{
"execAddr": "0F52",
"hexLines": [
":180F14000850009980009998000258003EC8E3C40135C8DFC40B31C877",
":180F2C00DBC0D702D40F01C180CF01C4008F04C0C91C1C1C1C010603EA",
":180F440094EDC400CF01C0BB35C0B93190CEC40F35C41431C40F36C4EA",
":180F5C002032C40CCAE4C10BCDFFBAE49CF8C40C37C4FF33C401CAE473",
":180F7400C5069404C5049032C402CAE302C5FFE902C900BAE39CF6C19A",
":180F8C00029402C499EDFFC900BAE494E3C50CAAE303C5FFF9FEC900A9",
":180FA40008BAE394F50694029004C400C9FFC1FF03EC94C9FDC499ECF9",
":180FBC0000C9FCC1003EC1F9940AC49903F9FA03EC009002C1FA3EC173",
":180FD400F73EC7FFC5F63EC40ACAE4C7FF940AE4DF9A31BAE49CF492E3",
":0A0FEC0049C109980333C90992496D"
]
}
}
# Servo info
NUM_SERVOS = 7
isAllConverge = False
measuredForces = [0] * NUM_SERVOS
measuredRotationDegs = [0] * NUM_SERVOS
# Abort flag
globalAbortFlag = False
# Read data from 7bot - this is done in a separate thread and global variables are altered as the means
# of communication between the thread and the main program - ugly !
def botPortRead(ser):
global isAllConverge, measuredRotationDegs, measuredForces, NUM_SERVOS
global serialIsClosing
rxBuf = [0] * (NUM_SERVOS * 2 + 1)
beginFlag = False
instruction = 0
cnt = 0
while True:
# Handle closing down
if serialIsClosing or not ser.isOpen():
break
# Get a char if there is one
val = ser.read(1)
if len(val) == 0:
continue
# print("Rx", "%02X " % ord(val))
if not beginFlag:
beginFlag = (ord(val) == 0xFE)
if not beginFlag:
if (ord(val) < 0x20 or ord(val) > 0x7e) and ord(val) != 0x0d and ord(val) != 0x0a:
print("<%02X>" % ord(val))
sys.stdout.flush()
else:
print(val.decode("utf-8"), end="")
sys.stdout.flush()
instruction = 0
cnt = 0
elif instruction == 0:
instruction = ord(val) - 240
elif instruction == 6:
forceStatus = ord(val)
print("<== ForceStatus", forceStatus)
beginFlag = False
instruction = 0
cnt = 0
elif instruction == 9:
rxBuf[cnt] = ord(val)
cnt += 1
if cnt >= NUM_SERVOS * 2 + 1:
beginFlag = False
instruction = 0
cnt = 0
for i in range(NUM_SERVOS):
posCode = rxBuf[i*2] * 128 + rxBuf[i*2+1]
measuredForces[i] = posCode % 16384 / 1024
if posCode / 16384 > 0:
measuredForces[i] = -measuredForces[i]
# Convert 0-1000 code to 0-180 deg
measuredRotationDegs[i] = (posCode % 1024) * 9 / 50
isAllConverge = (rxBuf[(NUM_SERVOS-1)*2+2] == 1)
# print("Forces:", measuredForces, ",Angles:", measuredRotationDegs, isAllConverge)
else:
beginFlag = False
# Utility functions
def appendTwoByteVal(buf, val):
buf.append((int(val) // 128) & 0x7F)
buf.append(int(val) & 0x7F)
def appendVecToSend(buf, vec):
for el in vec:
val = int(abs(el)) + (0 if el >= 0 else 1024)
appendTwoByteVal(buf, val)
# Called while waiting for the robot arm to reach its destination
# Also allows the TKINTER UI to have some time to operate
def waitAndFlush(timeInSecs):
for tii in range(int(timeInSecs * 1000)):
sys.stdout.flush()
masterTk.update_idletasks()
masterTk.update()
time.sleep(0.001)
# Limit value between two thresholds
def constrain(val, valMin, valMax):
if val < valMin:
return valMin
if val > valMax:
return valMax
return val
# Set Servo angles
def setServoAngles(servoAngles):
global isAllConverge
isAllConverge = False
# 1- Process Data
sendData = bytearray([0xFE, 0xF9])
for servoAngle in servoAngles:
val = servoAngle*50//9
appendTwoByteVal(sendData, val)
# 2- Send Data
botPort.write(sendData)
# set motor force status: 0-forceless, 1-normal servo, 2-protection
def setForceStatus(status):
data = bytearray([0xFE, 0xF5, status])
botPort.write(data)
# get servo angles
def getForceStatus():
data = bytearray([0xFE, 0xF6, 0x00])
botPort.write(data)
# set motion fluency & speeds (0~250 ---> 0~25)
def setSpeed(fluentEnables, speeds):
# 1- Process Data
sendData = bytearray([0xFE, 0xF7])
servoIdx = 0
for speed in speeds:
sendData.append(constrain(speed, 0, 250)//10)
if fluentEnables[servoIdx]:
sendData[len(sendData)-1] += 64
servoIdx += 1
# 2- Send Data
# Byte 1 (Beginning Flag) 0xFE
# Byte 2 (Instruction Type) 0xF7
# Byte 3 (motor 0 data)
# Byte 4 (motor 1 data)
# Byte 5 (motor 2 data)
# Byte 6 (motor 3 data)
# Byte 7 (motor 4 data)
# Byte 8 (motor 5 data)
# Byte 9 (motor 6 data)
# For each of bytes 3..9
# bit 6: 0-disable fluency, 1-enable fluency;
# bit 5~0: speed value(range:0~25, 10 means 100 degrees per second)
botPort.write(sendData)
# IK6(6 angles)
# j6:mm(-500~500), vec:(-1.0~1.0)--->(-500~500), theta:Degrees
def setIK6(j6, vec56, vec67, theta6):
global isAllConverge
isAllConverge = False
# 1- Process Data
j6_c = np.array([constrain(j6[0], -500, 500), constrain(j6[1], -500, 500), constrain(j6[2], -500, 500)])
vec56_c = np.copy(vec56)
vec56_c /= np.linalg.norm(vec56_c)
vec56_c *= 500
vec67_c = np.copy(vec67)
vec67_c /= np.linalg.norm(vec67_c)
vec67_c *= 500
sendData = bytearray([0xFE, 0xFA])
appendVecToSend(sendData, j6_c)
appendVecToSend(sendData, vec56_c)
appendVecToSend(sendData, vec67_c)
appendTwoByteVal(sendData, int((theta6*50/9)))
# 2- Send Data
# for dat in sendData:
# print("%02X " % dat, end = "")
botPort.write(sendData)
# Move to a specific azimuth - the 7Bot standard firmware doesn't seem to do this in one go even though it
# can tell that the arm has not reached the desired position - not sure why this is but this is a fix which
# iterates towards the correct point by requesting positions greater or lesser than actually required until
# the arm gets near enough to the desired point
def moveToAzimuth(anglesDown):
azimAngles = keyboardClearAngles[:]
azimAngles[0] = anglesDown[0]
implAngles = azimAngles[:]
for i in range(5):
setServoAngles(implAngles)
while not isAllConverge and not globalAbortFlag:
waitAndFlush(0.1)
waitAndFlush(0.1)
if globalAbortFlag:
break
angleErrs = calcAngleError(azimAngles)
print("Azimuth attempt", i)
dispAngleError(azimAngles)
if abs(angleErrs[0]) < 1:
break
implAngles[0] += 1 if (angleErrs[0] > 0) else -1
return implAngles[0]
# Send the robot arm to predefined locations
def goToHome():
setServoAngles(homePositionAngles)
while not isAllConverge and not globalAbortFlag:
waitAndFlush(0.1)
waitAndFlush(0.1)
def goToReady():
setServoAngles(readyPositionAngles)
while not isAllConverge and not globalAbortFlag:
waitAndFlush(0.1)
waitAndFlush(0.1)
def punchDownOnKey(armAngles, tryAdjustments):
# Check for abort
if globalAbortFlag:
return
print(" hovering ", formatAngles(armAngles), "TryAdjust", tryAdjustments)
# Initially go to a hover position above the key
acceptedErrorDegs = [1, 1, 1, 1, 1]
anglesToTry = armAngles[:]
for i in range(len(keyPunchDownAngleDeltas)):
anglesToTry[i] += keyPunchDownAngleDeltas[i]
# Try several adjustments to get where we want to be
setServoAngles(anglesToTry)
while not isAllConverge and not globalAbortFlag:
waitAndFlush(0.1)
waitAndFlush(0.1)
if globalAbortFlag:
return
print(" punchDown ")
setServoAngles(armAngles)
while not isAllConverge and not globalAbortFlag:
waitAndFlush(0.1)
waitAndFlush(0.1)
if globalAbortFlag:
return
dispAngleError(armAngles)
print(" pullUp ")
setServoAngles(anglesToTry)
while not isAllConverge and not globalAbortFlag:
waitAndFlush(0.1)
waitAndFlush(0.1)
if globalAbortFlag:
return
# Press a single key
def pressKey(key):
print("pressKey", key, "...................")
# Get the key position
anglesDown = keyPositions[key]
okAzimuth = moveToAzimuth(anglesDown)
keyAngles = anglesDown[:]
keyAngles[0] = okAzimuth
punchDownOnKey(keyAngles, False)
# Send a sequence of keys (in a list)
def sendKeySequence(keys):
for key in keys:
if globalAbortFlag:
return
# Press the key
pressKey(key)
# Set address on MK14
def setAddress(addr, forExec=False):
print("Setting address | |
<reponame>CrankySupertoon01/Toontown-2
# File: D (Python 2.4)
from direct.distributed.DistributedNodeAI import DistributedNodeAI
from direct.distributed.ClockDelta import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.fsm import StateData
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
class DistributedFindFourAI(DistributedNodeAI):
def __init__(self, air, parent, name, x, y, z, h, p, r):
DistributedNodeAI.__init__(self, air)
self.name = name
self.air = air
self.setPos(x, y, z)
self.setHpr(h, p, r)
self.myPos = (x, y, z)
self.myHpr = (h, p, r)
self.board = [
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0],
[
0,
0,
0,
0,
0,
0,
0]]
self.parent = self.air.doId2do[parent]
self.parentDo = parent
self.wantStart = []
self.playersPlaying = []
self.playersSitting = 0
self.playersTurn = 1
self.movesMade = 0
self.playerNum = 1
self.winDirection = None
self.playersGamePos = [
None,
None]
self.wantTimer = True
self.timerEnd = 0
self.turnEnd = 0
self.playersObserving = []
self.winLaffPoints = 20
self.movesRequiredToWin = 10
self.zoneId = self.air.allocateZone()
self.generateOtpObject(air.districtId, self.zoneId, optionalFields = [
'setX',
'setY',
'setZ',
'setH',
'setP',
'setR'])
self.parent.setCheckersZoneId(self.zoneId)
self.timerStart = None
self.fsm = ClassicFSM.ClassicFSM('Checkers', [
State.State('waitingToBegin', self.enterWaitingToBegin, self.exitWaitingToBegin, [
'playing']),
State.State('playing', self.enterPlaying, self.exitPlaying, [
'gameOver']),
State.State('gameOver', self.enterGameOver, self.exitGameOver, [
'waitingToBegin'])], 'waitingToBegin', 'waitingToBegin')
self.fsm.enterInitialState()
def announceGenerate(self):
self.parent.setGameDoId(self.doId)
def getTableDoId(self):
return self.parentDo
def delete(self):
self.fsm.requestFinalState()
self.parent = None
self.parentDo = None
del self.board
del self.fsm
DistributedNodeAI.delete(self)
def informGameOfPlayer(self):
self.playersSitting += 1
if self.playersSitting < 2:
self.timerEnd = 0
elif self.playersSitting == 2:
self.timerEnd = globalClock.getRealTime() + 20
self.parent.isAccepting = False
self.parent.sendUpdate('setIsPlaying', [
1])
elif self.playersSitting > 2:
pass
self.sendUpdate('setTimer', [
globalClockDelta.localToNetworkTime(self.timerEnd)])
def informGameOfPlayerLeave(self):
self.playersSitting -= 1
if self.playersSitting < 2 and self.fsm.getCurrentState().getName() == 'waitingToBegin':
self.timerEnd = 0
self.parent.isAccepting = True
self.parent.sendUpdate('setIsPlaying', [
0])
if self.playersSitting > 2 and self.fsm.getCurrentState().getName() == 'waitingToBegin':
pass
1
self.timerEnd = 0
if self.timerEnd != 0:
self.sendUpdate('setTimer', [
globalClockDelta.localToNetworkTime(self.timerEnd)])
else:
self.sendUpdate('setTimer', [
0])
def setGameCountdownTime(self):
self.timerEnd = globalClock.getRealTime() + 10
def setTurnCountdownTime(self):
self.turnEnd = globalClock.getRealTime() + 25
def getTimer(self):
if self.timerEnd != 0:
return 0
else:
return 0
def getTurnTimer(self):
return globalClockDelta.localToNetworkTime(self.turnEnd)
def requestTimer(self):
avId = self.air.getAvatarIdFromSender()
self.sendUpdateToAvatarId(avId, 'setTimer', [
globalClockDelta.localToNetworkTime(self.timerEnd)])
def handlePlayerExit(self, avId):
if avId in self.wantStart:
self.wantStart.remove(avId)
if self.fsm.getCurrentState().getName() == 'playing':
gamePos = self.playersGamePos.index(avId)
self.playersGamePos[gamePos] = None
self.fsm.request('gameOver')
def handleEmptyGame(self):
self.movesMade = 0
self.playersPlaying = []
self.playersTurn = 1
self.playerNum = 1
self.fsm.request('waitingToBegin')
self.parent.isAccepting = True
def requestWin(self, pieceNum):
avId = self.air.getAvatarIdFromSender()
playerNum = self.playersGamePos.index(avId) + 1
x = pieceNum[0]
y = pieceNum[1]
if self.checkWin(x, y, playerNum) == True:
self.sendUpdate('announceWinnerPosition', [
x,
y,
self.winDirection,
playerNum])
winnersSequence = Sequence(Wait(5.0), Func(self.fsm.request, 'gameOver'), Func(self.parent.announceWinner, 'Find Four', avId))
winnersSequence.start()
else:
self.sendUpdateToAvatarId(avId, 'illegalMove', [])
def distributeLaffPoints(self):
for x in self.parent.seats:
if x != None:
av = self.air.doId2do.get(x)
av.toonUp(self.winLaffPoints)
continue
def enterWaitingToBegin(self):
self.setGameCountdownTime()
self.parent.isAccepting = True
def exitWaitingToBegin(self):
self.turnEnd = 0
def enterPlaying(self):
self.parent.isAccepting = False
for x in self.playersGamePos:
if x != None:
self.playersTurn = self.playersGamePos.index(x)
self.d_sendTurn(self.playersTurn + 1)
break
continue
self.setTurnCountdownTime()
self.sendUpdate('setTurnTimer', [
globalClockDelta.localToNetworkTime(self.turnEnd)])
def exitPlaying(self):
pass
def enterGameOver(self):
self.timerEnd = 0
isAccepting = True
self.parent.handleGameOver()
self.playersObserving = []
self.playersTurn = 1
self.playerNum = 1
self.playersPlaying = []
self.movesMade = 0
self.playersGamePos = [
None,
None]
self.parent.isAccepting = True
self.fsm.request('waitingToBegin')
def exitGameOver(self):
pass
def requestBegin(self):
avId = self.air.getAvatarIdFromSender()
if avId not in self.wantStart:
self.wantStart.append(avId)
numPlayers = 0
for x in self.parent.seats:
if x != None:
numPlayers = numPlayers + 1
continue
if len(self.wantStart) == numPlayers and numPlayers >= 2:
self.d_gameStart(avId)
self.parent.sendIsPlaying()
def d_gameStart(self, avId):
for x in self.playersObserving:
self.sendUpdateToAvatarId(x, 'gameStart', [
255])
zz = 0
numPlayers = 0
for x in self.parent.seats:
if x != None:
numPlayers += 1
self.playersPlaying.append(x)
continue
if numPlayers == 2:
player1 = self.playersPlaying[0]
self.sendUpdateToAvatarId(player1, 'gameStart', [
1])
self.playersGamePos[0] = player1
player2 = self.playersPlaying[1]
self.sendUpdateToAvatarId(player2, 'gameStart', [
2])
self.playersGamePos[1] = player2
self.wantStart = []
self.fsm.request('playing')
self.parent.getTableState()
def d_sendTurn(self, playersTurn):
self.sendUpdate('sendTurn', [
playersTurn])
def advancePlayerTurn(self):
if self.playersTurn == 0:
self.playersTurn = 1
self.playerNum = 2
else:
self.playerNum = 1
self.playersTurn = 0
def requestMove(self, moveColumn):
avId = self.air.getAvatarIdFromSender()
turn = self.playersTurn
if avId in self.playersGamePos:
if self.playersGamePos.index(avId) != self.playersTurn:
pass
if self.board[0][moveColumn] != 0:
self.sendUpdateToAvatarId(avId, 'illegalMove', [])
for x in xrange(6):
if self.board[x][moveColumn] == 0:
movePos = x
continue
self.board[movePos][moveColumn] = self.playersTurn + 1
if self.checkForTie() == True:
self.sendUpdate('setGameState', [
self.board,
moveColumn,
movePos,
turn])
self.sendUpdate('tie', [])
winnersSequence = Sequence(Wait(8.0), Func(self.fsm.request, 'gameOver'))
winnersSequence.start()
return None
self.movesMade += 1
self.advancePlayerTurn()
self.setTurnCountdownTime()
self.sendUpdate('setTurnTimer', [
globalClockDelta.localToNetworkTime(self.turnEnd)])
self.d_sendTurn(self.playersTurn + 1)
self.sendUpdate('setGameState', [
self.board,
moveColumn,
movePos,
turn])
def checkForTie(self):
for x in xrange(7):
if self.board[0][x] == 0:
return False
continue
return True
def getState(self):
return self.fsm.getCurrentState().getName()
def getName(self):
return self.name
def getGameState(self):
return [
self.board,
0,
0,
0]
def clearBoard(self):
for x in self.board.squareList:
x.setState(0)
def getPosHpr(self):
return self.posHpr
def tempSetBoardState(self):
self.board = [
[
0,
0,
0,
0,
0,
0,
0],
[
1,
2,
1,
2,
2,
2,
1],
[
2,
2,
1,
2,
1,
2,
1],
[
2,
1,
1,
2,
2,
1,
2],
[
1,
2,
2,
1,
2,
1,
1],
[
1,
2,
1,
2,
1,
2,
1]]
self.sendUpdate('setGameState', [
self.board,
0,
0,
1])
def checkWin(self, rVal, cVal, playerNum):
if self.checkHorizontal(rVal, cVal, playerNum) == True:
self.winDirection = 0
return True
elif self.checkVertical(rVal, cVal, playerNum) == True:
self.winDirection = 1
return True
elif self.checkDiagonal(rVal, cVal, playerNum) == True:
self.winDirection = 2
return True
else:
self.winDirection = None
return False
def checkHorizontal(self, rVal, cVal, playerNum):
if cVal == 3:
for x in xrange(1, 4):
if self.board[rVal][cVal - x] != playerNum:
break
if self.board[rVal][cVal - x] == playerNum and x == 3:
return True
continue
for x in xrange(1, 4):
if self.board[rVal][cVal + x] != playerNum:
break
if self.board[rVal][cVal + x] == playerNum and x == 3:
return True
continue
return False
elif cVal == 2:
for x in xrange(1, 4):
if self.board[rVal][cVal + x] != playerNum:
break
if self.board[rVal][cVal + x] == playerNum and x == 3:
return True
continue
return False
elif cVal == 4:
for x in xrange(1, 4):
if self.board[rVal][cVal - x] != playerNum:
break
if self.board[rVal][cVal - x] == playerNum and x == 3:
return True
continue
return False
else:
return False
def checkVertical(self, rVal, cVal, playerNum):
if rVal == 2:
for x in xrange(1, 4):
if self.board[rVal + x][cVal] != playerNum:
break
if self.board[rVal + x][cVal] == playerNum and x == 3:
return True
continue
return False
elif rVal == 3:
for x in xrange(1, 4):
if self.board[rVal - x][cVal] != playerNum:
break
if self.board[rVal - x][cVal] == playerNum and x == 3:
return True
continue
return False
else:
return False
def checkDiagonal(self, rVal, cVal, playerNum):
if cVal <= 2:
if rVal == 2:
for x in xrange(1, 4):
if self.board[rVal + x][cVal + x] != playerNum:
break
if self.board[rVal + x][cVal + x] == playerNum and x == 3:
return True
continue
return False
elif rVal == 3:
for x in xrange(1, 4):
if self.board[rVal - x][cVal + x] != playerNum:
break
if self.board[rVal - x][cVal + x] == playerNum and x == 3:
return True
continue
return False
elif cVal >= 4:
if rVal == 2:
for x in xrange(1, 4):
if self.board[rVal + x][cVal - x] != playerNum:
break
if self.board[rVal + x][cVal - x] == playerNum and x == 3:
return True
continue
return False
elif rVal == 3:
for x in xrange(1, 4):
if self.board[rVal - x][cVal - x] != playerNum:
break
if self.board[rVal - x][cVal - x] == playerNum and x == 3:
return True
continue
return False
elif rVal == 3 and rVal == 4 or rVal == 5:
for x in xrange(1, 4):
if self.board[rVal - x][cVal - x] != playerNum:
break
if self.board[rVal - x][cVal - x] == playerNum and x == 3:
return True
continue
for x in xrange(1, 4):
if self.board[rVal + x][cVal - x] != playerNum:
break
if self.board[rVal + x][cVal - x] == playerNum and x == 3:
return True
continue
return | |
<reponame>MosHumanoid/bitbots_thmos_meta<gh_stars>0
#!/usr/bin/env python3
import cv2
import rospy
import tf2_ros
import numpy as np
import sensor_msgs.point_cloud2 as pc2
from cv_bridge import CvBridge
from sensor_msgs.msg import Image, CameraInfo, PointCloud2
from geometry_msgs.msg import Point, PolygonStamped
from tf2_geometry_msgs import PointStamped
from tf2_sensor_msgs.tf2_sensor_msgs import do_transform_cloud
from humanoid_league_msgs.msg import LineInformationInImage, LineInformationRelative, \
LineSegmentRelative, LineIntersectionRelative, \
ObstacleInImageArray, ObstacleRelativeArray, ObstacleRelative, \
PoseWithCertainty, PoseWithCertaintyArray, GoalPostInImageArray, BallInImageArray
class Transformer(object):
def __init__(self):
rospy.init_node("humanoid_league_transformer")
self._tf_buffer = tf2_ros.Buffer(cache_time=rospy.Duration(10.0))
self._tf_listener = tf2_ros.TransformListener(self._tf_buffer)
self._cv_bridge = CvBridge()
# Parameters
self._ball_height = rospy.get_param("~ball/ball_radius", 0.075)
self._bar_height = rospy.get_param("~goalposts/bar_height", 2.0)
self._publish_frame = rospy.get_param("~publish_frame", "base_footprint")
self._base_footprint_frame = rospy.get_param("~base_footprint_frame", "base_footprint")
self._goalpost_footpoint_out_of_image_threshold = \
rospy.get_param("~goalposts/footpoint_out_of_image_threshold", 30)
camera_info_topic = rospy.get_param("~camera_info/camera_info_topic", "camera/camera_info")
ball_in_image_array_topic = rospy.get_param("~ball/ball_topic", "balls_in_image")
lines_in_image_topic = rospy.get_param("~lines/lines_topic", "line_in_image")
goalposts_in_image_topic = rospy.get_param("~goalposts/goalposts_topic", "goalposts_in_image")
obstacles_in_image_topic = rospy.get_param("~obstacles/obstacles_topic", "obstacles_in_image")
field_boundary_in_image_topic = rospy.get_param("~field_boundary/field_boundary_topic",
"field_boundary_in_image")
line_mask_in_image_topic = rospy.get_param("~masks/line_mask/topic",
"line_mask_in_image")
line_mask_scaling = rospy.get_param("~masks/line_mask/scale", 1.0)
publish_lines_as_lines_relative = rospy.get_param("~lines/lines_relative", True)
publish_lines_as_pointcloud = rospy.get_param("~lines/pointcloud", False)
self._camera_info = None
rospy.Subscriber(camera_info_topic, CameraInfo, self._callback_camera_info, queue_size=1)
# Wait for Camera info
cam_info_counter = 0
while self._camera_info is None:
rospy.sleep(0.1)
cam_info_counter += 1
if cam_info_counter > 100:
rospy.logerr_throttle(5, rospy.get_name() + ": Camera Info not received on topic '" +
camera_info_topic + "'")
if rospy.is_shutdown():
return
# Wait up to 5 seconds for transforms to become available, then print an error and try again
# rospy.Time(0) gets the most recent transform
while not self._tf_buffer.can_transform(self._publish_frame,
self._camera_info.header.frame_id,
rospy.Time(0),
timeout=rospy.Duration(5)):
rospy.logerr(rospy.get_name() + ": Could not get transformation from " + self._publish_frame +
"to " + self._camera_info.header.frame_id)
# Also check if we can transform from optical frame to base_footprint
while not self._tf_buffer.can_transform(self._base_footprint_frame,
self._camera_info.header.frame_id,
rospy.Time(0),
timeout=rospy.Duration(5)):
rospy.logerr(rospy.get_name() + ": Could not get transformation from " + self._base_footprint_frame +
" to " + self._camera_info.header.frame_id)
# Publishers TODO make topics configurable
self._balls_relative_pub = rospy.Publisher("balls_relative", PoseWithCertaintyArray, queue_size=1)
if publish_lines_as_lines_relative:
self._line_relative_pub = rospy.Publisher("line_relative", LineInformationRelative, queue_size=1)
if publish_lines_as_pointcloud:
self._line_relative_pc_pub = rospy.Publisher("line_relative_pc", PointCloud2, queue_size=1)
self._line_mask_relative_pc_pub = rospy.Publisher("line_mask_relative_pc", PointCloud2, queue_size=1)
self._goalposts_relative = rospy.Publisher("goal_posts_relative", PoseWithCertaintyArray, queue_size=1)
self._obstacle_relative_pub = rospy.Publisher("obstacles_relative", ObstacleRelativeArray, queue_size=1)
self._field_boundary_pub = rospy.Publisher("field_boundary_relative", PolygonStamped, queue_size=1)
# Subscribers
rospy.Subscriber(ball_in_image_array_topic, BallInImageArray, self._callback_ball, queue_size=1)
if publish_lines_as_lines_relative:
rospy.Subscriber(lines_in_image_topic, LineInformationInImage, self._callback_lines, queue_size=1)
if publish_lines_as_pointcloud:
rospy.Subscriber(lines_in_image_topic, LineInformationInImage, self._callback_lines_pc, queue_size=1)
rospy.Subscriber(goalposts_in_image_topic, GoalPostInImageArray, self._callback_goalposts, queue_size=1)
rospy.Subscriber(obstacles_in_image_topic, ObstacleInImageArray, self._callback_obstacles, queue_size=1)
rospy.Subscriber(field_boundary_in_image_topic, PolygonStamped,
self._callback_field_boundary, queue_size=1)
rospy.Subscriber(line_mask_in_image_topic, Image,
lambda msg: self._callback_masks(
msg,
self._line_mask_relative_pc_pub,
scale=line_mask_scaling), queue_size=1)
rospy.spin()
def _callback_camera_info(self, camera_info):
if camera_info.K[0] == 0:
rospy.logerr_throttle(5.0, rospy.get_name() + ": Invalid CameraInfo received. Check your camera settings.")
self._camera_info = camera_info
def _callback_ball(self, msg):
field = self.get_plane(msg.header.stamp, self._ball_height)
if field is None:
return
balls = []
for ball in msg.candidates:
transformed_ball = self._transform_point(ball.center, field, msg.header.stamp)
if transformed_ball is not None:
ball_relative = PoseWithCertainty()
ball_relative.pose.pose.position = transformed_ball
ball_relative.confidence = ball.confidence
balls.append(ball_relative)
balls_relative = PoseWithCertaintyArray()
balls_relative.header.stamp = msg.header.stamp
balls_relative.header.frame_id = self._publish_frame
balls_relative.poses = balls
self._balls_relative_pub.publish(balls_relative)
def _callback_lines(self, msg):
field = self.get_plane(msg.header.stamp, 0.0)
if field is None:
return
line = LineInformationRelative()
line.header.stamp = msg.header.stamp
line.header.frame_id = self._publish_frame
for seg in msg.segments:
rel_seg = LineSegmentRelative()
rel_seg.pose.position.start = self._transform_point(seg.start, field, msg.header.stamp)
rel_seg.pose.position.end = self._transform_point(seg.end, field, msg.header.stamp)
rel_seg.confidence = seg.confidence
# only proceed if all transformations were successful
if rel_seg.start is not None and rel_seg.end is not None:
line.segments.append(rel_seg)
for intersection in msg.intersections:
rel_inter = LineIntersectionRelative()
rel_inter_pos = self._transform_point(intersection.point, field, msg.header.stamp)
if rel_inter_pos is not None:
rel_inter.type = intersection.type
rel_inter.pose.confidence = intersection.confidence
rel_inter.pose.pose.pose.position = rel_inter_pos
line.intersections.append(rel_inter)
if line.segments or line.intersections:
self._line_relative_pub.publish(line)
else:
rospy.logwarn_throttle(5.0, rospy.get_name() +
": Could not transform any segments or intersections" +
" in LineInformationInImage message.")
def _callback_lines_pc(self, msg):
field = self.get_plane(msg.header.stamp, 0)
if field is None:
return
points = np.zeros((len(msg.segments), 3))
num_transformed_correctly = 0
for i in range(len(msg.segments)):
transformed = self._transform_point(msg.segments[i].start, field, msg.header.stamp)
if transformed is not None:
points[i] = np.array([transformed.x, transformed.y, transformed.z])
num_transformed_correctly += 1
if num_transformed_correctly == 0:
rospy.logwarn_throttle(5.0, rospy.get_name() + ": No line points could be transformed")
pc_header = msg.header
pc_header.frame_id = self._publish_frame
self._line_relative_pc_pub.publish(pc2.create_cloud_xyz32(pc_header, points[:num_transformed_correctly]))
def _callback_goalposts(self, msg: GoalPostInImageArray):
field = self.get_plane(msg.header.stamp, 0.0)
if field is None:
return
bar_plane = self.get_plane(msg.header.stamp, self._bar_height)
if bar_plane is None:
return
# Create new message
goalposts_relative_msg = PoseWithCertaintyArray()
goalposts_relative_msg.header.stamp = msg.header.stamp
goalposts_relative_msg.header.frame_id = self._publish_frame
# Transform goal posts
for goal_post_in_image in msg.posts:
# Check if footpoint is not in the bottom area of the image, to filter out goal posts without visible footpoint
image_vertical_resolution = self._camera_info.height / max(self._camera_info.binning_y, 1)
if goal_post_in_image.foot_point.y < image_vertical_resolution - self._goalpost_footpoint_out_of_image_threshold:
# Transform footpoint
relative_foot_point = self._transform_point(goal_post_in_image.foot_point, field, msg.header.stamp)
if relative_foot_point is None:
rospy.logwarn_throttle(5.0, rospy.get_name() +
": Got a post with foot point ({},{}) I could not transform.".format(
goal_post_in_image.foot_point.x,
goal_post_in_image.foot_point.y))
else:
post_relative = PoseWithCertainty()
post_relative.pose.pose.position = relative_foot_point
post_relative.confidence = goal_post_in_image.confidence
goalposts_relative_msg.poses.append(post_relative)
self._goalposts_relative.publish(goalposts_relative_msg)
def _callback_obstacles(self, msg: ObstacleInImageArray):
field = self.get_plane(msg.header.stamp, 0.0)
if field is None:
return
obstacles = ObstacleRelativeArray()
obstacles.header = msg.header
obstacles.header.frame_id = self._publish_frame
for o in msg.obstacles:
obstacle = ObstacleRelative()
obstacle.playerNumber = o.playerNumber
obstacle.pose.confidence = o.confidence
obstacle.type = o.type
point = Point()
point.x = o.top_left.x + o.width/2
point.y = o.top_left.y + o.height
position = self._transform_point(point, field, msg.header.stamp)
if position is not None:
obstacle.pose.pose.pose.position = position
obstacles.obstacles.append(obstacle)
else:
rospy.logwarn_throttle(5.0, rospy.get_name() + ": Got an obstacle I could not transform")
self._obstacle_relative_pub.publish(obstacles)
def _callback_field_boundary(self, msg):
field = self.get_plane(msg.header.stamp, 0.0)
if field is None:
return
field_boundary = PolygonStamped()
field_boundary.header = msg.header
field_boundary.header.frame_id = self._publish_frame
for p in msg.polygon.points:
p_relative = self._transform_point(p, field, msg.header.stamp)
if p_relative is not None:
field_boundary.polygon.points.append(p_relative)
else:
rospy.logwarn_throttle(5.0, rospy.get_name() +
": At least one point of the Field Boundary could not be transformed," +
" dropping message")
return
self._field_boundary_pub.publish(field_boundary)
def _callback_masks(self, msg: Image, publisher: rospy.Publisher, encoding='8UC1', scale: float = 1.0):
"""
Projects a mask from the input image as a pointcloud on the field plane.
"""
# Get field plane
field = self.get_plane(msg.header.stamp, 0.0)
if field is None:
return
# Convert subsampled image
image = cv2.resize(
self._cv_bridge.imgmsg_to_cv2(msg, encoding),
(0,0), fx=scale, fy=scale, interpolation=cv2.INTER_NEAREST)
# Get indices for all non 0 pixels (the pixels which should be displayed in the pointcloud)
point_idx_tuple = np.where(image != 0)
# Restructure index tuple to a array
point_idx_array = np.empty((point_idx_tuple[0].shape[0], 3))
point_idx_array[:, 0] = point_idx_tuple[1]
point_idx_array[:, 1] = point_idx_tuple[0]
# Project the pixels onto the field plane
points_on_plane_from_cam = self._get_field_intersection_for_pixels(
point_idx_array,
field,
scale=scale)
# Make a pointcloud2 out of them
pc_in_image_frame = pc2.create_cloud_xyz32(msg.header, points_on_plane_from_cam)
# Lookup the transform from the camera to the field plane
try:
trans = self._tf_buffer.lookup_transform(
self._publish_frame,
self._camera_info.header.frame_id,
msg.header.stamp)
except tf2_ros.LookupException as ex:
rospy.logwarn_throttle(5.0, rospy.get_name() + ": " + str(ex))
return
except tf2_ros.ExtrapolationException as ex:
rospy.logwarn_throttle(5.0, rospy.get_name() + ": " + str(ex))
return
# Transform the whole point cloud accordingly
pc_relative = do_transform_cloud(pc_in_image_frame, trans)
# Publish point cloud
publisher.publish(pc_relative)
def get_plane(self, stamp, object_height):
""" returns a plane which an object is believed to be on as a tuple of a point on this plane and a normal"""
base_frame = self._base_footprint_frame
field_normal = PointStamped()
field_normal.header.frame_id = base_frame
field_normal.header.stamp = stamp
field_normal.point.x = 0.0
field_normal.point.y = 0.0
field_normal.point.z = 1.0
try:
field_normal = self._tf_buffer.transform(field_normal,
self._camera_info.header.frame_id,
timeout=rospy.Duration(0.2))
except tf2_ros.LookupException as ex:
rospy.logwarn_throttle(5.0, rospy.get_name() + ": " + str(ex))
return None
except tf2_ros.ExtrapolationException as ex:
rospy.logwarn_throttle(5.0, rospy.get_name() + ": " + str(ex))
return None
field_point = PointStamped()
field_point.header.frame_id = base_frame
field_point.header.stamp = stamp
field_point.point.x = 0.0
field_point.point.y = 0.0
field_point.point.z = object_height
try:
field_point = self._tf_buffer.transform(field_point, self._camera_info.header.frame_id)
except tf2_ros.LookupException as ex:
rospy.logwarn_throttle(5.0, rospy.get_name() + ": " + str(ex))
return None
except tf2_ros.ExtrapolationException as ex:
rospy.logwarn_throttle(5.0, rospy.get_name() + ": " + str(ex))
return None
field_normal = np.array([field_normal.point.x, field_normal.point.y, field_normal.point.z])
field_point = np.array([field_point.point.x, field_point.point.y, field_point.point.z])
# field normal is a vector! so it stats at field point and goes up in z direction
field_normal = field_point - field_normal
return field_normal, field_point
def _get_field_intersection_for_pixels(self, points, field, scale=1.0):
"""
Projects an numpy array of points to the correspoding places on the field plane (in the camera frame).
"""
camera_projection_matrix = self._camera_info.K
binning_x = max(self._camera_info.binning_x, 1) / scale
binning_y = max(self._camera_info.binning_y, 1) / scale
points[:, 0] = (points[:, 0] - (camera_projection_matrix[2] / binning_x)) / (camera_projection_matrix[0] / binning_x)
points[:, 1] = (points[:, 1] - (camera_projection_matrix[5] / binning_y)) / (camera_projection_matrix[4] / binning_y)
points[:, 2] = 1
intersections = self._line_plane_intersections(field[0], field[1], points)
return intersections
def _transform_point(self, point: Point, field, stamp) -> Point:
np_point = self._get_field_intersection_for_pixels(np.array([[point.x, point.y, point.z]]), field)[0]
if np.isnan(np_point).any():
return None
intersection_stamped = PointStamped()
intersection_stamped.point.x = np_point[0]
intersection_stamped.point.y = np_point[1]
intersection_stamped.point.z | |
<filename>lightautoml/transformers/text.py
"""Text features transformers."""
import gc
import os
import pickle
from copy import deepcopy, copy
from typing import Optional, Union, List, Dict, Any
import gensim
import numpy as np
import pandas as pd
import torch
from log_calls import record_history
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDRegressor, SGDClassifier
from .base import LAMLTransformer
from ..dataset.base import LAMLDataset
from ..dataset.np_pd_dataset import PandasDataset, NumpyDataset, CSRSparseDataset
from ..dataset.roles import NumericRole, TextRole
from ..text.dl_transformers import RandomLSTM, BOREP, BertEmbedder, DLTransformer
from ..text.embed_dataset import EmbedDataset, BertDataset
from ..text.tokenizer import BaseTokenizer, SimpleEnTokenizer
from ..text.utils import get_textarr_hash
from ..text.weighted_average_transformer import WeightedAverageTransformer
from ..utils.logging import get_logger
logger = get_logger(__name__)
NumpyOrPandas = Union[NumpyDataset, PandasDataset]
NumpyOrSparse = Union[NumpyDataset, CSRSparseDataset]
model_by_name = {'random_lstm': {'model': RandomLSTM,
'model_params': {'embed_size': 300, 'hidden_size': 256, 'pooling': 'mean', 'num_layers': 1},
'dataset': EmbedDataset,
'dataset_params': {'embedding_model': None, 'max_length': 200,
'embed_size': 300},
'loader_params': {'batch_size': 1024, 'shuffle': False, 'num_workers': 4},
'embedding_model_params': {}
},
'random_lstm_bert': {'model': RandomLSTM,
'model_params': {'embed_size': 768, 'hidden_size': 256, 'pooling': 'mean',
'num_layers': 1},
'dataset': BertDataset,
'dataset_params': {'max_length': 256, 'model_name': 'bert-base-cased'},
'loader_params': {'batch_size': 320, 'shuffle': False, 'num_workers': 4},
'embedding_model': BertEmbedder,
'embedding_model_params': {'model_name': 'bert-base-cased', 'pooling': 'none'}
},
'borep': {'model': BOREP,
'model_params': {'embed_size': 300, 'proj_size': 300, 'pooling': 'mean', 'max_length': 200,
'init': 'orthogonal', 'pos_encoding': False},
'dataset': EmbedDataset,
'dataset_params': {'embedding_model': None, 'max_length': 200,
'embed_size': 300},
'loader_params': {'batch_size': 1024, 'shuffle': False, 'num_workers': 4},
'embedding_model_params': {}
},
'pooled_bert': {'model': BertEmbedder,
'model_params': {'model_name': 'bert-base-cased', 'pooling': 'mean'},
'dataset': BertDataset,
'dataset_params': {'max_length': 256, 'model_name': 'bert-base-cased'},
'loader_params': {'batch_size': 320, 'shuffle': False, 'num_workers': 4},
'embedding_model_params': {}
},
'wat': {'embedding_model': None, 'embed_size': 300, 'weight_type': 'idf',
'use_svd': True}
}
@record_history(enabled=False)
def oof_task_check(dataset: LAMLDataset):
"""Check if task is binary or regression.
Args:
dataset: Dataset to check.
"""
task = dataset.task
assert task.name in ['binary', 'reg'], 'Only binary and regression tasks supported in this transformer'
@record_history(enabled=False)
def text_check(dataset: LAMLDataset):
"""Check if all passed vars are text.
Args:
dataset: LAMLDataset to check.
Raises:
AssertionError: If non-text features are present.
"""
roles = dataset.roles
features = dataset.features
for f in features:
assert roles[f].name == 'Text', 'Only text accepted in this transformer'
# TODO: combine TunableTransformer with LAMLTransformer class?
@record_history(enabled=False)
class TunableTransformer(LAMLTransformer):
"""Base class for ML transformers.
Assume that parameters my set before training.
"""
_default_params: dict = {}
_params: dict = None
@property
def params(self) -> dict:
"""Parameters.
Returns:
Dict.
"""
if self._params is None:
self._params = copy(self.default_params)
return self._params
@params.setter
def params(self, new_params: dict):
assert isinstance(new_params, dict)
self._params = {**self.params, **new_params}
def init_params_on_input(self, dataset: NumpyOrPandas) -> dict:
"""Init params depending on input data.
Returns:
Dict with model hyperparameters.
"""
return self.params
def __init__(self, default_params: Optional[dict] = None, freeze_defaults: bool = True):
"""
Args:
default_params: algo hyperparams.
freeze_defaults:
- ``True`` : params may be rewritten depending on dataset.
- ``False``: params may be changed only manually or with tuning.
"""
self.task = None
self.freeze_defaults = freeze_defaults
if default_params is None:
default_params = {}
self.default_params = {**self._default_params, **default_params}
@record_history(enabled=False)
class TfidfTextTransformer(TunableTransformer):
"""Simple Tfidf vectorizer."""
_fit_checks = (text_check,)
_transform_checks = ()
_fname_prefix = 'tfidf'
_default_params = {'min_df': 5, 'max_df': 1.0, 'max_features': 30_000, 'ngram_range': (1, 1), 'analyzer': 'word',
'dtype': np.float32}
@property
def features(self) -> List[str]:
"""Features list."""
return self._features
def __init__(self, default_params: Optional[dict] = None, freeze_defaults: bool = True, subs: Optional[int] = None,
random_state: int = 42):
"""
Args:
default_params: algo hyperparams.
freeze_defaults: Flag.
subs: Subsample to calculate freqs. If ``None`` - full data.
random_state: Random state to take subsample.
Note:
The behaviour of `freeze_defaults`:
- ``True`` : params may be rewritten depending on dataset.
- ``False``: params may be changed only
manually or with tuning.
"""
super().__init__(default_params, freeze_defaults)
self.subs = subs
self.random_state = random_state
self.vect = TfidfVectorizer
self.dicts = {}
def init_params_on_input(self, dataset: NumpyOrPandas) -> dict:
"""Get transformer parameters depending on dataset parameters.
Args:
dataset: Dataset used for model parmaeters initialization.
Returns:
Parameters of model.
"""
# TODO: use features_num
suggested_params = copy(self.default_params)
if self.freeze_defaults:
# if user change defaults manually - keep it
return suggested_params
rows_num = len(dataset.data)
if rows_num > 50_000:
suggested_params['min_df'] = 25
return suggested_params
def fit(self, dataset: NumpyOrPandas):
"""Fit tfidf vectorizer.
Args:
dataset: Pandas or Numpy dataset of text features.
Returns:
self.
"""
# set transformer names and add checks
for check_func in self._fit_checks:
check_func(dataset)
# set transformer features
# convert to accepted dtype and get attributes
if self._params is None:
self.params = self.init_params_on_input(dataset)
dataset = dataset.to_pandas()
df = dataset.data
# fit ...
if self.subs is not None and df.shape[0] >= self.subs:
subs = df.sample(n=self.subs, random_state=self.random_state)
else:
subs = df
feats = []
for n, i in enumerate(subs.columns):
vect = self.vect(**self.params)
vect.fit(subs[i].fillna('').astype(str))
features = list(
np.char.array([self._fname_prefix + '_']) + np.arange(len(vect.vocabulary_)).astype(str) + np.char.array(
['__' + i]))
self.dicts[i] = {'vect': vect, 'feats': features}
feats.extend(features)
self._features = feats
return self
def transform(self, dataset: NumpyOrPandas) -> CSRSparseDataset:
"""Transform text dataset to sparse tfidf representation.
Args:
dataset: Pandas or Numpy dataset of text features.
Returns:
Sparse dataset with encoded text.
"""
# checks here
super().transform(dataset)
# convert to accepted dtype and get attributes
dataset = dataset.to_pandas()
df = dataset.data
# transform
roles = NumericRole()
outputs = []
for n, i in enumerate(df.columns):
new_arr = self.dicts[i]['vect'].transform(df[i].fillna('').astype(str))
output = dataset.empty().to_numpy().to_csr()
output.set_data(new_arr, self.dicts[i]['feats'], roles)
outputs.append(output)
# create resulted
return dataset.empty().to_numpy().to_csr().concat(outputs)
@record_history(enabled=False)
class TokenizerTransformer(LAMLTransformer):
"""Simple tokenizer transformer."""
_fit_checks = (text_check,)
_transform_checks = ()
_fname_prefix = 'tokenized'
def __init__(self, tokenizer: BaseTokenizer = SimpleEnTokenizer()):
"""
Args:
tokenizer: text tokenizer.
"""
self.tokenizer = tokenizer
def transform(self, dataset: NumpyOrPandas) -> PandasDataset:
"""Transform text dataset to tokenized text dataset.
Args:
dataset: Pandas or Numpy dataset of text features.
Returns:
Pandas dataset with tokenized text.
"""
# checks here
super().transform(dataset)
# convert to accepted dtype and get attributes
dataset = dataset.to_pandas()
df = dataset.data
# transform
roles = TextRole()
outputs = []
for n, i in enumerate(df.columns):
pred = np.array(self.tokenizer.tokenize(df[i].fillna('').astype(str).tolist()))
new_df = pd.DataFrame(pred, columns=[self._fname_prefix + '__' + i])
outputs.append(new_df)
# create resulted
output = dataset.empty().to_pandas()
output.set_data(pd.concat(outputs, axis=1), None, {feat: roles for feat in self.features})
return output
@record_history(enabled=False)
class OneToOneTransformer(TunableTransformer):
"""Out-of-fold sgd model prediction to reduce dimension of encoded text data."""
_fit_checks = (oof_task_check,)
_transform_checks = ()
_fname_prefix = 'sgd_oof'
_default_params = {'alpha': 0.0001, 'max_iter': 1, 'loss': 'log'}
@property
def features(self) -> List[str]:
"""Features list."""
return self._features
def init_params_on_input(self, dataset: NumpyOrPandas) -> dict:
"""Get model parameters depending on dataset parameters.
Args:
dataset: NumpyOrPandas.
Returns:
Parameters of model.
"""
# TODO: use features_num
suggested_params = copy(self.default_params)
self.task = dataset.task.name
if self.task != 'binary':
suggested_params['loss'] = 'squared_loss'
algo = SGDRegressor
else:
algo = SGDClassifier
self.algo = algo
return suggested_params
def __init__(self, default_params: Optional[int] = None, freeze_defaults: bool = False):
super().__init__(default_params, freeze_defaults)
"""
Args:
default_params: Algo hyperparams.
freeze_defaults:
- ``True`` : params may be rewritten depending on dataset.
- ``False``: params may be changed only manually or with tuning.
subs: Subsample to calculate freqs. If None - full data.
"""
def fit(self, dataset: NumpyOrPandas):
"""Apply fit transform.
Args:
dataset: Pandas or Numpy dataset of encoded text features.
"""
for check_func in self._fit_checks:
check_func(dataset)
self.fit(dataset)
for check_func in self._transform_checks:
check_func(dataset)
return self.transform(dataset)
def fit_transform(self, dataset: NumpyOrPandas) -> NumpyDataset:
"""Fit and predict out-of-fold sgd model.
Args:
dataset: Pandas or Numpy dataset of encoded text features.
Returns:
Numpy dataset with out-of-fold model prediction.
"""
# set transformer names and add checks
for check_func in self._fit_checks:
check_func(dataset)
if self._params is None:
self.params = self.init_params_on_input(dataset)
dataset = dataset.to_numpy()
data = dataset.data
target = dataset.target.astype(np.int32)
folds = dataset.folds
n_folds = folds.max() + 1
self.models = []
oof_feats = np.zeros(len(data), dtype=np.float32)
for n in range(n_folds):
algo = self.algo(**self.params)
algo.fit(data[folds != n], target[folds != n])
if self.task == 'binary':
pred = algo.predict_proba(data[folds == n])[:, 1]
else:
pred = algo.predict(data[folds == n])
oof_feats[folds == n] = pred
self.models.append(deepcopy(algo))
orig_name = dataset.features[0].split('__')[-1]
self._features = [self._fname_prefix + '__' + orig_name]
output = dataset.empty()
self.output_role = NumericRole(np.float32, prob=output.task.name == 'binary')
output.set_data(oof_feats[:, np.newaxis], self.features, self.output_role)
return output
def transform(self, dataset: NumpyOrPandas) -> NumpyDataset:
"""Transform dataset to out-of-fold model-based encoding.
Args:
dataset: Pandas or Numpy dataset of encoded text features.
Returns:
Numpy dataset with out-of-fold model prediction.
"""
# checks here
super().transform(dataset)
# convert to accepted dtype and get attributes
dataset = dataset.to_numpy()
data = dataset.data
# transform
out = np.zeros(len(data), dtype=np.float32)
for n, model in enumerate(self.models):
if self.task == 'binary':
pred = model.predict_proba(data)[:, 1]
else:
pred = model.predict(data)
out | |
"people")
def test_no_empty_strings(self):
input_chunks = ["soylent", "green", "is", "people"]
si = utils.Spliterator(input_chunks)
outputs = (list(si.take(7)) # starts and ends on chunk boundary
+ list(si.take(2)) # spans two chunks
+ list(si.take(3)) # begins but does not end chunk
+ list(si.take(2)) # ends but does not begin chunk
+ list(si.take(6))) # whole chunk + EOF
self.assertNotIn('', outputs)
def test_running_out(self):
input_chunks = ["not much"]
si = utils.Spliterator(input_chunks)
self.assertEqual(''.join(si.take(4)), "not ")
self.assertEqual(''.join(si.take(99)), "much") # short
self.assertEqual(''.join(si.take(4)), "")
self.assertEqual(''.join(si.take(4)), "")
def test_overlap(self):
input_chunks = ["one fish", "two fish", "red fish", "blue fish"]
si = utils.Spliterator(input_chunks)
t1 = si.take(20) # longer than first chunk
self.assertLess(len(next(t1)), 20) # it's not exhausted
t2 = si.take(20)
self.assertRaises(ValueError, next, t2)
def test_closing(self):
input_chunks = ["abcd", "efg", "hij"]
si = utils.Spliterator(input_chunks)
it = si.take(3) # shorter than first chunk
self.assertEqual(next(it), 'abc')
it.close()
self.assertEqual(list(si.take(20)), ['d', 'efg', 'hij'])
si = utils.Spliterator(input_chunks)
self.assertEqual(list(si.take(1)), ['a'])
it = si.take(1) # still shorter than first chunk
self.assertEqual(next(it), 'b')
it.close()
self.assertEqual(list(si.take(20)), ['cd', 'efg', 'hij'])
si = utils.Spliterator(input_chunks)
it = si.take(6) # longer than first chunk, shorter than first + second
self.assertEqual(next(it), 'abcd')
self.assertEqual(next(it), 'ef')
it.close()
self.assertEqual(list(si.take(20)), ['g', 'hij'])
si = utils.Spliterator(input_chunks)
self.assertEqual(list(si.take(2)), ['ab'])
it = si.take(3) # longer than rest of chunk
self.assertEqual(next(it), 'cd')
it.close()
self.assertEqual(list(si.take(20)), ['efg', 'hij'])
class TestParseContentRange(unittest.TestCase):
def test_good(self):
start, end, total = utils.parse_content_range("bytes 100-200/300")
self.assertEqual(start, 100)
self.assertEqual(end, 200)
self.assertEqual(total, 300)
def test_bad(self):
self.assertRaises(ValueError, utils.parse_content_range,
"100-300/500")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes 100-200/aardvark")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes bulbous-bouffant/4994801")
class TestParseContentDisposition(unittest.TestCase):
def test_basic_content_type(self):
name, attrs = utils.parse_content_disposition('text/plain')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {})
def test_content_type_with_charset(self):
name, attrs = utils.parse_content_disposition(
'text/plain; charset=UTF8')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {'charset': 'UTF8'})
def test_content_disposition(self):
name, attrs = utils.parse_content_disposition(
'form-data; name="somefile"; filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
def test_content_disposition_without_white_space(self):
name, attrs = utils.parse_content_disposition(
'form-data;name="somefile";filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
class TestIterMultipartMimeDocuments(unittest.TestCase):
def test_bad_start(self):
it = utils.iter_multipart_mime_documents(BytesIO(b'blah'), b'unique')
exc = None
try:
next(it)
except MimeInvalid as err:
exc = err
self.assertTrue('invalid starting boundary' in str(exc))
self.assertTrue('--unique' in str(exc))
def test_empty(self):
it = utils.iter_multipart_mime_documents(BytesIO(b'--unique'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'')
self.assertRaises(StopIteration, next, it)
def test_basic(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique--'), b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abcdefg')
self.assertRaises(StopIteration, next, it)
def test_basic2(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abcdefg')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_tiny_reads(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(2), b'ab')
self.assertEqual(fp.read(2), b'cd')
self.assertEqual(fp.read(2), b'ef')
self.assertEqual(fp.read(2), b'g')
self.assertEqual(fp.read(2), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_big_reads(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(65536), b'abcdefg')
self.assertEqual(fp.read(), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_leading_crlfs(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'\r\n\r\n\r\n--unique\r\nabcdefg\r\n'
b'--unique\r\nhijkl\r\n--unique--'),
b'unique')
fp = next(it)
self.assertEqual(fp.read(65536), b'abcdefg')
self.assertEqual(fp.read(), b'')
fp = next(it)
self.assertEqual(fp.read(), b'hijkl')
self.assertRaises(StopIteration, next, it)
def test_broken_mid_stream(self):
# We go ahead and accept whatever is sent instead of rejecting the
# whole request, in case the partial form is still useful.
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nabc'), b'unique')
fp = next(it)
self.assertEqual(fp.read(), b'abc')
self.assertRaises(StopIteration, next, it)
def test_readline(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
b'jkl\r\n\r\n--unique--'), b'unique')
fp = next(it)
self.assertEqual(fp.readline(), b'ab\r\n')
self.assertEqual(fp.readline(), b'cd\ref\ng')
self.assertEqual(fp.readline(), b'')
fp = next(it)
self.assertEqual(fp.readline(), b'hi\r\n')
self.assertEqual(fp.readline(), b'\r\n')
self.assertEqual(fp.readline(), b'jkl\r\n')
self.assertRaises(StopIteration, next, it)
def test_readline_with_tiny_chunks(self):
it = utils.iter_multipart_mime_documents(
BytesIO(b'--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
b'\r\njkl\r\n\r\n--unique--'),
b'unique',
read_chunk_size=2)
fp = next(it)
self.assertEqual(fp.readline(), b'ab\r\n')
self.assertEqual(fp.readline(), b'cd\ref\ng')
self.assertEqual(fp.readline(), b'')
fp = next(it)
self.assertEqual(fp.readline(), b'hi\r\n')
self.assertEqual(fp.readline(), b'\r\n')
self.assertEqual(fp.readline(), b'jkl\r\n')
self.assertRaises(StopIteration, next, it)
class TestParseMimeHeaders(unittest.TestCase):
def test_parse_mime_headers(self):
doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size"
Foo: Bar
NOT-title-cAsED: quux
Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=
Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=
Latin-1: Resincronizaci\xf3n realizada con \xe9xito
Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80
This is the body
""")
headers = utils.parse_mime_headers(doc_file)
utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440'
if six.PY2:
utf8 = utf8.encode('utf-8')
expected_headers = {
'Content-Disposition': 'form-data; name="file_size"',
'Foo': "Bar",
'Not-Title-Cased': "quux",
# Encoded-word or non-ASCII values are treated just like any other
# bytestring (at least for now)
'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=",
'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=",
'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito",
'Utf-8': utf8,
}
self.assertEqual(expected_headers, headers)
self.assertEqual(b"This is the body\n", doc_file.read())
class FakeResponse(object):
def __init__(self, status, headers, body):
self.status = status
self.headers = HeaderKeyDict(headers)
self.body = StringIO(body)
def getheader(self, header_name):
return str(self.headers.get(header_name, ''))
def getheaders(self):
return self.headers.items()
def read(self, length=None):
return self.body.read(length)
def readline(self, length=None):
return self.body.readline(length)
class TestDocumentItersToHTTPResponseBody(unittest.TestCase):
def test_no_parts(self):
body = utils.document_iters_to_http_response_body(
iter([]), 'dontcare',
multipart=False, logger=FakeLogger())
self.assertEqual(body, '')
def test_single_part(self):
body = "time flies like an arrow; fruit flies like a banana"
doc_iters = [{'part_iter': iter(StringIO(body).read, '')}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'dontcare',
multipart=False, logger=FakeLogger()))
self.assertEqual(resp_body, body)
def test_multiple_parts(self):
part1 = "two peanuts were walking down a railroad track"
part2 = "and one was a salted. ... peanut."
doc_iters = [{
'start_byte': 88,
'end_byte': 133,
'content_type': 'application/peanut',
'entity_length': 1024,
'part_iter': iter(StringIO(part1).read, ''),
}, {
'start_byte': 500,
'end_byte': 532,
'content_type': 'application/salted',
'entity_length': 1024,
'part_iter': iter(StringIO(part2).read, ''),
}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'boundaryboundary',
multipart=True, logger=FakeLogger()))
self.assertEqual(resp_body, (
"--boundaryboundary\r\n" +
# This is a little too strict; we don't actually care that the
# headers are in this order, but the test is much more legible
# this way.
"Content-Type: application/peanut\r\n" +
"Content-Range: bytes 88-133/1024\r\n" +
"\r\n" +
part1 + "\r\n" +
"--boundaryboundary\r\n"
"Content-Type: application/salted\r\n" +
"Content-Range: bytes 500-532/1024\r\n" +
"\r\n" +
part2 + "\r\n" +
"--boundaryboundary--"))
def test_closed_part_iterator(self):
print('test')
useful_iter_mock = mock.MagicMock()
useful_iter_mock.__iter__.return_value = ['']
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=FakeLogger())
body = ''
for s in body_iter:
body += s
self.assertEqual(body, '')
useful_iter_mock.close.assert_called_once_with()
# Calling "close" on the mock will now raise an AttributeError
del useful_iter_mock.close
body_iter = utils.document_iters_to_http_response_body(
iter([{'part_iter': useful_iter_mock}]), 'dontcare',
multipart=False, logger=FakeLogger())
body = ''
for s in body_iter:
body += s
class TestPairs(unittest.TestCase):
def test_pairs(self):
items = [10, 20, 30, 40, 50, 60]
got_pairs = set(utils.pairs(items))
self.assertEqual(got_pairs,
set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60),
(20, 30), (20, 40), (20, 50), (20, 60),
(30, 40), (30, 50), (30, 60),
(40, 50), (40, 60),
(50, 60)]))
class TestSocketStringParser(unittest.TestCase):
def test_socket_string_parser(self):
default = 1337
addrs = [('1.2.3.4', '1.2.3.4', default),
('1.2.3.4:5000', '1.2.3.4', 5000),
('[dead:beef::1]', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', default),
('[dead:fdf8:f53e:61e4::18]:5000', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b', 5000),
('example.com', 'example.com', default),
('example.com:5000', 'example.com', 5000),
('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000),
('1.2.3.4:10:20', None, None),
('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:5000', None, None)]
for addr, expected_host, expected_port in addrs:
if expected_host:
host, port = utils.parse_socket_string(addr, default)
self.assertEqual(expected_host, host)
self.assertEqual(expected_port, int(port))
else:
with self.assertRaises(ValueError):
utils.parse_socket_string(addr, default)
class TestHashForFileFunction(unittest.TestCase):
def setUp(self):
self.tempfilename = tempfile.mktemp()
def tearDown(self):
try:
os.unlink(self.tempfilename)
except OSError:
pass
def test_hash_for_file_smallish(self):
stub_data = b'some data'
with open(self.tempfilename, 'wb') as fd:
fd.write(stub_data)
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual([mock.call(stub_data)],
mock_hasher.update.call_args_list)
def test_hash_for_file_big(self):
num_blocks = 10
block_size = utils.MD5_BLOCK_READ_BYTES
truncate = 523
start_char = ord('a')
expected_blocks = [chr(i).encode('utf8') * block_size
for i in range(start_char, start_char + num_blocks)]
full_data = b''.join(expected_blocks)
trimmed_data = full_data[:-truncate]
# sanity
self.assertEqual(len(trimmed_data), block_size * num_blocks - truncate)
with open(self.tempfilename, 'wb') as fd:
fd.write(trimmed_data)
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertEqual(rv, mock_hasher.hexdigest.return_value)
self.assertEqual(num_blocks, len(mock_hasher.update.call_args_list))
found_blocks = []
for i, (expected_block, call) in enumerate(zip(
expected_blocks, mock_hasher.update.call_args_list)):
args, kwargs = call
self.assertEqual(kwargs, {})
self.assertEqual(1, len(args))
block = args[0]
if i < num_blocks - 1:
self.assertEqual(block, expected_block)
else:
self.assertEqual(block, expected_block[:-truncate])
found_blocks.append(block)
self.assertEqual(b''.join(found_blocks), trimmed_data)
def test_hash_for_file_empty(self):
with open(self.tempfilename, 'wb'):
pass
with mock.patch('swift.common.utils.md5') as mock_md5:
mock_hasher = mock_md5.return_value
rv = utils.md5_hash_for_file(self.tempfilename)
self.assertTrue(mock_hasher.hexdigest.called)
self.assertIs(rv, mock_hasher.hexdigest.return_value)
self.assertEqual([], mock_hasher.update.call_args_list)
def test_hash_for_file_brittle(self):
data_to_expected_hash = {
b'': 'd41d8cd98f00b204e9800998ecf8427e',
b'some data': '1e50210a0202497fb79bc38b6ade6c34',
(b'a' * 4096 * 10)[:-523]: '06a41551609656c85f14f659055dc6d3',
}
# unlike some other places where the concrete implementation really
# matters for backwards compatibility these brittle tests are probably
# not needed or justified, if a future maintainer rips them out later
# they're probably doing the right thing
failures = []
for stub_data, expected_hash in data_to_expected_hash.items():
with open(self.tempfilename, 'wb') as fd:
fd.write(stub_data)
rv = utils.md5_hash_for_file(self.tempfilename)
try:
self.assertEqual(expected_hash, rv)
except AssertionError:
trim_cap = 80
if len(stub_data) > trim_cap:
stub_data = '%s...<truncated>' % stub_data[:trim_cap]
failures.append('hash for %r was %s instead of expected %s' % (
stub_data, rv, expected_hash))
if failures:
self.fail('Some data did not compute expected hash:\n' +
'\n'.join(failures))
class TestSetSwiftDir(unittest.TestCase):
def setUp(self):
self.swift_dir = tempfile.mkdtemp()
self.swift_conf = os.path.join(self.swift_dir, 'swift.conf')
self.policy_name = ''.join(random.sample(string.ascii_letters, 20))
with open(self.swift_conf, "wt") as sc:
sc.write('''
[swift-hash]
swift_hash_path_suffix = changeme
[storage-policy:0]
name = default
default = yes
[storage-policy:1]
name = %s
''' % self.policy_name)
def tearDown(self):
shutil.rmtree(self.swift_dir, ignore_errors=True)
def test_set_swift_dir(self):
set_swift_dir(None)
reload_storage_policies()
self.assertIsNone(POLICIES.get_by_name(self.policy_name))
set_swift_dir(self.swift_dir)
reload_storage_policies()
self.assertIsNotNone(POLICIES.get_by_name(self.policy_name))
class TestPipeMutex(unittest.TestCase):
| |
<reponame>pagabuc/FirmAE<gh_stars>1-10
#!/usr/bin/env python3
import sys
import getopt
import re
import struct
import socket
import stat
import os
import time
import subprocess
debug = 0
SCRATCHDIR = ''
SCRIPTDIR = ''
QEMUCMDTEMPLATE = """#!/bin/bash
set -e
set -u
ARCHEND=%(ARCHEND)s
IID=%(IID)i
if [ -e ./firmae.config ]; then
source ./firmae.config
elif [ -e ../firmae.config ]; then
source ../firmae.config
elif [ -e ../../firmae.config ]; then
source ../../firmae.config
else
echo "Error: Could not find 'firmae.config'!"
exit 1
fi
RUN_MODE=`basename ${0}`
IMAGE=`get_fs ${IID}`
if (echo ${ARCHEND} | grep -q "mips" && echo ${RUN_MODE} | grep -q "debug"); then
KERNEL=`get_kernel ${ARCHEND} true`
else
KERNEL=`get_kernel ${ARCHEND} false`
fi
if (echo ${RUN_MODE} | grep -q "analyze"); then
QEMU_DEBUG="user_debug=31 firmadyne.syscall=32"
else
QEMU_DEBUG="user_debug=0 firmadyne.syscall=1"
fi
QEMU=`get_qemu ${ARCHEND}`
QEMU_MACHINE=`get_qemu_machine ${ARCHEND}`
QEMU_ROOTFS=`get_qemu_disk ${ARCHEND}`
WORK_DIR=`get_scratch ${IID}`
DEVICE=`add_partition "${WORK_DIR}/image.raw"`
mount ${DEVICE} ${WORK_DIR}/image > /dev/null
echo "%(NETWORK_TYPE)s" > ${WORK_DIR}/image/firmadyne/network_type
echo "%(NET_BRIDGE)s" > ${WORK_DIR}/image/firmadyne/net_bridge
echo "%(NET_INTERFACE)s" > ${WORK_DIR}/image/firmadyne/net_interface
sleep 1
sync
umount ${WORK_DIR}/image > /dev/null
del_partition ${DEVICE:0:$((${#DEVICE}-2))}
%(START_NET)s
echo -n "Starting emulation of firmware... "
%(QEMU_ENV_VARS)s ${QEMU} -m 1024 -M ${QEMU_MACHINE} -kernel ${KERNEL} \\
%(QEMU_DISK)s -append "root=${QEMU_ROOTFS} console=ttyS0 nandsim.parts=64,64,64,64,64,64,64,64,64,64 %(QEMU_INIT)s rw debug ignore_loglevel print-fatal-signals=1 FIRMAE_NETWORK=${FIRMAE_NETWORK} FIRMAE_NVRAM=${FIRMAE_NVRAM} FIRMAE_KERNEL=${FIRMAE_KERNEL} FIRMAE_ETC=${FIRMAE_ETC} ${QEMU_DEBUG}" \\
-serial file:${WORK_DIR}/qemu.final.serial.log \\
-serial unix:/tmp/qemu.${IID}.S1,server,nowait \\
-monitor unix:/tmp/qemu.${IID},server,nowait \\
-display none \\
%(QEMU_NETWORK)s | true
%(STOP_NET)s
echo "Done!"
"""
def mountImage(targetDir):
loopFile = subprocess.check_output(['bash', '-c', 'source firmae.config && add_partition %s/image.raw' % targetDir]).decode().strip()
os.system('mount %s %s/image > /dev/null' % (loopFile, targetDir))
time.sleep(1)
return loopFile
def umountImage(targetDir, loopFile):
os.system('umount %s/image > /dev/null' % targetDir)
subprocess.check_output(['bash', '-c', 'source firmae.config && del_partition %s' % loopFile.rsplit('p', 1)[0]])
def checkVariable(key):
if os.environ[key] == 'true':
return True
else:
return False
def stripTimestamps(data):
lines = data.split(b"\n")
#throw out the timestamps
prog = re.compile(b"^\[[^\]]*\] firmadyne: ")
lines = [prog.sub(b"", l) for l in lines]
return lines
def findMacChanges(data, endianness):
lines = stripTimestamps(data)
candidates = filter(lambda l: l.startswith(b"ioctl_SIOCSIFHWADDR"), lines)
if debug:
print("Mac Changes %r" % candidates)
result = []
if endianness == "eb":
fmt = ">I"
elif endianness == "el":
fmt = "<I"
prog = re.compile(b"^ioctl_SIOCSIFHWADDR\[[^\]]+\]: dev:([^ ]+) mac:0x([0-9a-f]+) 0x([0-9a-f]+)")
for c in candidates:
g = prog.match(c)
if g:
(iface, mac0, mac1) = g.groups()
iface = iface.decode('utf-8')
m0 = struct.pack(fmt, int(mac0, 16))[2:]
m1 = struct.pack(fmt, int(mac1, 16))
mac = "%02x:%02x:%02x:%02x:%02x:%02x" % struct.unpack("BBBBBB", m0+m1)
result.append((iface, mac))
return result
def findPorts(data, endianness):
lines = stripTimestamps(data)
candidates = filter(lambda l: l.startswith(b"inet_bind"), lines) # logs for the inconfig process
result = []
if endianness == "eb":
fmt = ">I"
elif endianness == "el":
fmt = "<I"
prog = re.compile(b"^inet_bind\[[^\]]+\]: proto:SOCK_(DGRAM|STREAM), ip:port: 0x([0-9a-f]+):([0-9]+)")
portSet = {}
for c in candidates:
g = prog.match(c)
if g:
(proto, addr, port) = g.groups()
proto = "tcp" if proto == b"STREAM" else "udp"
addr = socket.inet_ntoa(struct.pack(fmt, int(addr, 16)))
port = int(port.decode())
if port not in portSet:
result.append((proto, addr, port))
portSet[port] = True
return result
# Get the netwokr interfaces in the router, except 127.0.0.1
def findNonLoInterfaces(data, endianness):
lines = stripTimestamps(data)
candidates = filter(lambda l: l.startswith(b"__inet_insert_ifa"), lines) # logs for the inconfig process
if debug:
print("Candidate ifaces: %r" % candidates)
result = []
if endianness == "eb":
fmt = ">I"
elif endianness == "el":
fmt = "<I"
prog = re.compile(b"^__inet_insert_ifa\[[^\]]+\]: device:([^ ]+) ifa:0x([0-9a-f]+)")
for c in candidates:
g = prog.match(c)
if g:
(iface, addr) = g.groups()
iface = iface.decode('utf-8')
addr = socket.inet_ntoa(struct.pack(fmt, int(addr, 16)))
if addr != "127.0.0.1" and addr != "0.0.0.0":
result.append((iface, addr))
return result
def findIfacesForBridge(data, brif):
lines = stripTimestamps(data)
result = []
candidates = filter(lambda l: l.startswith(b"br_dev_ioctl") or l.startswith(b"br_add_if"), lines)
progs = [re.compile(p % brif.encode()) for p in [b"^br_dev_ioctl\[[^\]]+\]: br:%s dev:(.*)", b"^br_add_if\[[^\]]+\]: br:%s dev:(.*)"]]
for c in candidates:
for p in progs:
g = p.match(c)
if g:
iface = g.group(1)
iface = iface.decode('utf-8')
#we only add it if the interface is not the bridge itself
#there are images that call brctl addif br0 br0 (e.g., 5152)
if iface != brif:
result.append(iface.strip())
return result
def findVlanInfoForDev(data, dev):
lines = stripTimestamps(data)
results = []
candidates = filter(lambda l: l.startswith(b"register_vlan_dev"), lines)
prog = re.compile(b"register_vlan_dev\[[^\]]+\]: dev:%s vlan_id:([0-9]+)" % dev.encode())
for c in candidates:
g = prog.match(c)
if g:
results.append(int(g.group(1)))
return results
def ifaceNo(dev):
g = re.match(r"[^0-9]+([0-9]+)", dev)
return int(g.group(1)) if g else -1
def isDhcpIp(ip):
# normal dhcp client ip
if ip.startswith("10.0.2."):
return True
# netgear armel R6900 series
elif ip.endswith(".190"):
return True
return False
def qemuArchNetworkConfig(i, tap_num, arch, n, isUserNetwork, ports):
if arch == "arm":
device = "virtio-net-device"
else:
device = "e1000"
if not n:
return "-device %(DEVICE)s,netdev=net%(I)i -netdev socket,id=net%(I)i,listen=:200%(I)i" % {'DEVICE': device, 'I': i}
else:
(ip, dev, vlan, mac, brif) = n
vlan_id = vlan if vlan else i
mac_str = "" if not mac else ",macaddr=%s" % mac
if isUserNetwork: # user network dhcp server
# TODO: get port list (inet_bind)
# TODO: maybe need reverse ping checker
#portfwd = ",hostfwd=udp::67-:67"
#portfwd += ",hostfwd=udp::68-:68" # icmp port cannot foward
portfwd = "hostfwd=tcp::80-:80,hostfwd=tcp::443-:443,"
for (proto, ip, port) in ports:
if port in [80, 443]:
continue
portfwd += "hostfwd=%(TYPE)s::%(PORT)i-:%(PORT)i," % {"TYPE" : proto, "PORT" : port}
return "-device %(DEVICE)s,netdev=net%(I)i -netdev user,id=net%(I)i,%(FWD)s" % {'DEVICE': device, 'I': i, "FWD": portfwd[:-1]}
else:
return "-device %(DEVICE)s,netdev=net%(I)i -netdev tap,id=net%(I)i,ifname=${TAPDEV_%(TAP_NUM)i},script=no" % { 'I' : i, 'DEVICE' : device, 'TAP_NUM' : tap_num}
def qemuNetworkConfig(arch, network, isUserNetwork, ports):
output = []
assigned = []
interfaceNum = 4
if arch == "arm" and checkVariable("FIRMAE_NETWORK"):
interfaceNum = 1
for i in range(0, interfaceNum):
for j, n in enumerate(network):
# need to connect the jth emulated network interface to the corresponding host interface
if i == ifaceNo(n[1]):
output.append(qemuArchNetworkConfig(i, j, arch, n, isUserNetwork, ports))
assigned.append(n)
break
# otherwise, put placeholder socket connection
if len(output) <= i:
output.append(qemuArchNetworkConfig(i, i, arch, None, isUserNetwork, ports))
# find unassigned interfaces
for j, n in enumerate(network[:interfaceNum]):
if n not in assigned:
# guess assignment
print("Warning: Unmatched interface: %s" % (n,))
output[j] = qemuArchNetworkConfig(j, j, arch, n, isUserNetwork, ports)
assigned.append(n)
return ' '.join(output)
def buildConfig(brif, iface, vlans, macs):
#there should be only one ip
ip = brif[1]
br = brif[0]
#strip vlanid from interface name (e.g., eth2.2 -> eth2)
dev = iface.split(".")[0]
#check whether there is a different mac set
mac = None
d = dict(macs)
if br in d:
mac = d[br]
elif dev in d:
mac = d[dev]
vlan_id = None
if len(vlans):
vlan_id = vlans[0]
return (ip, dev, vlan_id, mac, br)
def convertToHostIp(ip):
tups = [int(x) for x in ip.split(".")]
if tups[3] > 1: # sometimes it can has 0 asus FW_RT_AC3100_300438432738
tups[3] -= 1
else:
tups[3] += 1
return ".".join([str(x) for x in tups])
# iterating the networks
def startNetwork(network):
template_1 = """
TAPDEV_%(I)i=tap${IID}_%(I)i
HOSTNETDEV_%(I)i=${TAPDEV_%(I)i}
echo "Creating TAP device ${TAPDEV_%(I)i}..."
sudo tunctl -t ${TAPDEV_%(I)i} -u ${USER}
"""
if checkVariable("FIRMAE_NETWORK"):
template_vlan = """
echo "Initializing VLAN..."
HOSTNETDEV_%(I)i=${TAPDEV_%(I)i}.%(VLANID)i
sudo ip link add link ${TAPDEV_%(I)i} name ${HOSTNETDEV_%(I)i} type vlan id %(VLANID)i
sudo ip link set ${TAPDEV_%(I)i} up
"""
template_2 = """
echo "Bringing up TAP device..."
sudo ip link set ${HOSTNETDEV_%(I)i} up
sudo ip addr add %(HOSTIP)s/24 dev ${HOSTNETDEV_%(I)i}
"""
else:
template_vlan = """
echo "Initializing VLAN..."
HOSTNETDEV_%(I)i=${TAPDEV_%(I)i}.%(VLANID)i
sudo ip link add link ${TAPDEV_%(I)i} name ${HOSTNETDEV_%(I)i} type vlan id %(VLANID)i
sudo ip link set ${HOSTNETDEV_%(I)i} up
"""
template_2 = """
echo "Bringing up TAP device..."
sudo ip link set ${HOSTNETDEV_%(I)i} up
sudo ip addr add %(HOSTIP)s/24 dev ${HOSTNETDEV_%(I)i}
echo "Adding route to %(GUESTIP)s..."
sudo ip route add %(GUESTIP)s via %(GUESTIP)s dev ${HOSTNETDEV_%(I)i}
"""
output = []
for i, (ip, dev, vlan, mac, brif) in enumerate(network):
output.append(template_1 % {'I' : i})
if vlan != None:
output.append(template_vlan % {'I' : i, 'VLANID' : vlan})
output.append(template_2 % {'I' : i, 'HOSTIP' : convertToHostIp(ip), 'GUESTIP': ip})
return '\n'.join(output)
def stopNetwork(network):
template_1 = """
echo "Bringing down TAP device..."
sudo ip link set ${TAPDEV_%(I)i} down
"""
template_vlan = """
echo "Removing VLAN..."
sudo ip link delete ${HOSTNETDEV_%(I)i}
"""
template_2 = """
echo "Deleting TAP device ${TAPDEV_%(I)i}..."
sudo tunctl -d ${TAPDEV_%(I)i}
"""
output = []
for i, (ip, dev, vlan, mac, brif) in enumerate(network):
output.append(template_1 % {'I' : i})
if vlan != None:
output.append(template_vlan % {'I' : i})
output.append(template_2 % {'I' : i})
return '\n'.join(output)
def qemuCmd(iid, network, ports, network_type, arch, endianness, qemuInitValue, isUserNetwork):
network_bridge = ""
network_iface = ""
if arch == "mips":
qemuEnvVars = ""
qemuDisk = "-drive if=ide,format=raw,file=${IMAGE}"
if endianness != "eb" and endianness != "el":
raise Exception("You didn't specify a valid endianness")
elif arch == "arm":
qemuDisk = "-drive if=none,file=${IMAGE},format=raw,id=rootfs -device virtio-blk-device,drive=rootfs"
if endianness == "el":
qemuEnvVars = "QEMU_AUDIO_DRV=none"
elif endianness == "eb":
raise Exception("armeb currently not supported")
else:
raise Exception("You didn't specify a valid endianness")
else:
raise Exception("Unsupported architecture")
for (ip, dev, vlan, mac, brif) in network:
network_bridge = | |
<gh_stars>0
#
# Copyright (C) 2012 - 2018 <NAME> <<EMAIL>>
# License: MIT
#
# pylint: disable=unused-import,import-error,invalid-name
r"""Public APIs of anyconfig module.
.. versionadded:: 0.9.8
- Added new API load_plugins to [re-]load plugins
.. versionadded:: 0.9.5
- Added pathlib support. Now all of load and dump APIs can process
pathlib.Path object basically.
- 'ignore_missing' keyword option for load APIs are now marked as deprecated
and will be removed soon.
- Allow to load data other than mapping objects for some backends such as
JSON and YAML.
.. versionadded:: 0.8.3
- Added ac_dict keyword option to pass dict factory (any callable like
function or class) to make dict-like object in backend parsers.
- Added ac_query keyword option to query data with JMESPath expression.
- Added experimental query api to query data with JMESPath expression.
- Removed ac_namedtuple keyword option.
- Export :func:`merge`.
- Stop exporting :func:`to_container` which was deprecated and removed.
.. versionadded:: 0.8.2
- Added new API, version to provide version information.
.. versionadded:: 0.8.0
- Removed set_loglevel API as it does not help much.
- Added :func:`open` API to open files with appropriate open mode.
- Added custom exception classes, :class:`UnknownProcessorTypeError` and
:class:`UnknownFileTypeError` to express specific errors.
- Change behavior of the API :func:`find_loader` and others to make them
fail firt and raise exceptions (ValueError, UnknownProcessorTypeError or
UnknownFileTypeError) as much as possible if wrong parser type for uknown
file type was given.
.. versionadded:: 0.5.0
- Most keyword arguments passed to APIs are now position independent.
- Added ac_namedtuple parameter to \*load and \*dump APIs.
.. versionchanged:: 0.3
- Replaced `forced_type` optional argument of some public APIs with
`ac_parser` to allow skip of config parser search by passing parser object
previously found and instantiated.
Also removed some optional arguments, `ignore_missing`, `merge` and
`marker`, from definitions of some public APIs as these may not be changed
from default in common use cases.
.. versionchanged:: 0.2
- Now APIs :func:`find_loader`, :func:`single_load`, :func:`multi_load`,
:func:`load` and :func:`dump` can process a file/file-like object or a
list of file/file-like objects instead of a file path or a list of file
paths.
.. versionadded:: 0.2
- Export factory method (create) of anyconfig.mergeabledict.MergeableDict
"""
from __future__ import absolute_import
import os.path
import warnings
# Import some global constants will be re-exported:
from anyconfig.globals import (
LOGGER, IOI_PATH_OBJ, UnknownProcessorTypeError, UnknownFileTypeError
)
import anyconfig.query
import anyconfig.globals
import anyconfig.dicts
import anyconfig.ioinfo
import anyconfig.template
import anyconfig.utils
from anyconfig.dicts import (
MS_REPLACE, MS_NO_REPLACE, MS_DICTS, MS_DICTS_AND_LISTS, MERGE_STRATEGIES,
get, set_, merge # flake8: noqa
)
from anyconfig.backends import Parsers
from anyconfig.schema import validate, gen_schema
def version():
"""
:return: A tuple of version info, (major, minor, release), e.g. (0, 8, 2)
"""
return anyconfig.globals.VERSION.split('.')
def load_plugins():
"""[Re-]Load pluggable parsers.
"""
Parsers().load_plugins()
def list_types():
return Parsers().list_types()
def _try_validate(cnf, schema, **options):
"""
:param cnf: Mapping object represents configuration data
:param schema: JSON schema object
:param options: Keyword options passed to :func:`~jsonschema.validate`
:return: Given `cnf` as it is if validation succeeds else None
"""
valid = True
if schema:
(valid, msg) = validate(cnf, schema, **options)
if msg:
LOGGER.warning(msg)
if valid:
return cnf
return None
def find_loader(path, parser_or_type=None):
"""
Find out parser object appropriate to load configuration from a file of
given path or file or file-like object.
:param path:
Configuration file path or file or file-like object or pathlib.Path
object if it's available
:param parser_or_type:
Forced configuration parser type or parser object itself
:return:
An instance of a class inherits :class:`~anyconfig.backend.base.Parser`
or None
"""
try:
return Parsers().find(path, forced_type=parser_or_type)
except (ValueError, UnknownProcessorTypeError, UnknownFileTypeError):
raise
def _maybe_schema(**options):
"""
:param options: Optional keyword arguments such as
- ac_template: Assume configuration file may be a template file and try
to compile it AAR if True
- ac_context: Mapping object presents context to instantiate template
- ac_schema: JSON schema file path to validate configuration files
:return: Mapping object or None means some errors
"""
ac_schema = options.get("ac_schema", None)
if ac_schema is not None:
# Try to detect the appropriate parser to load the schema data as it
# may be different from the original config file's format, perhaps.
options["ac_parser"] = None
options["ac_schema"] = None # Avoid infinite loop.
LOGGER.info("Loading schema: %s", ac_schema)
return load(ac_schema, **options)
return None
# pylint: disable=redefined-builtin
def open(path, mode=None, ac_parser=None, **options):
"""
Open given configuration file with appropriate open flag.
:param path: Configuration file path
:param mode:
Can be 'r' and 'rb' for reading (default) or 'w', 'wb' for writing.
Please note that even if you specify 'r' or 'w', it will be changed to
'rb' or 'wb' if selected backend, xml and configobj for example, for
given config file prefer that.
:param options:
Optional keyword arguments passed to the internal file opening APIs of
each backends such like 'buffering' optional parameter passed to
builtin 'open' function.
:return: A file object or None on any errors
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
"""
psr = Parsers().find(path, forced_type=ac_parser)
if mode is not None and mode.startswith('w'):
return psr.wopen(path, **options)
return psr.ropen(path, **options)
def _single_load(input_, ac_parser=None, ac_template=False,
ac_context=None, **options):
"""
:param input_:
File path or file or file-like object or pathlib.Path object represents
the file or a namedtuple `~anyconfig.globals.IOInfo` object represents
some input to load some data from
:param ac_parser: Forced parser type or parser object itself
:param ac_template:
Assume configuration file may be a template file and try to compile it
AAR if True
:param ac_context: A dict presents context to instantiate template
:param options:
Optional keyword arguments :func:`single_load` supports except for
ac_schema and ac_query
:return: Mapping object
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
"""
ioi = anyconfig.ioinfo.make(input_, forced_type=ac_parser)
psr = Parsers().find(ioi, forced_type=ac_parser)
filepath = ioi.path
# .. note::
# This will be kept for backward compatibility until 'ignore_missing'
# option is deprecated and removed completely.
if "ignore_missing" in options:
warnings.warn("keyword option 'ignore_missing' is deprecated, use "
"'ac_ignore_missing' instead", DeprecationWarning)
options["ac_ignore_missing"] = options["ignore_missing"]
LOGGER.info("Loading: %s", filepath)
if ac_template and filepath is not None:
content = anyconfig.template.try_render(filepath=filepath,
ctx=ac_context, **options)
if content is not None:
return psr.loads(content, **options)
return psr.load(ioi, **options)
def single_load(input_, ac_parser=None, ac_template=False,
ac_context=None, **options):
"""
Load single configuration file.
.. note::
:func:`load` is a preferable alternative and this API should be used
only if there is a need to emphasize given input `input_` is single one.
:param input_:
File path or file or file-like object or pathlib.Path object represents
the file or a namedtuple `~anyconfig.globals.IOInfo` object represents
some input to load some data from
:param ac_parser: Forced parser type or parser object itself
:param ac_template:
Assume configuration file may be a template file and try to compile it
AAR if True
:param ac_context: A dict presents context to instantiate template
:param options: Optional keyword arguments such as:
- Options common in :func:`single_load`, :func:`multi_load`,
:func:`load` and :func:`loads`:
- ac_dict: callable (function or class) to make mapping objects from
loaded data if the selected backend can customize that such as JSON
which supports that with 'object_pairs_hook' option, or None. If
this option was not given or None, dict or :class:`OrderedDict`
will be used to make result as mapping object depends on if
ac_ordered (see below) is True and selected backend can keep the
order of items loaded. See also :meth:`_container_factory` of
:class:`~anyconfig.backend.base.Parser` for more implementation
details.
- ac_ordered: True if you want to keep resuls ordered. Please note
that order of items may be lost depends on the selected backend.
- ac_schema: JSON schema file path to validate given config file
- ac_query: JMESPath expression to query data
- Common backend options:
- ac_ignore_missing:
Ignore and just return empty result if given file ``input_`` does
not exist actually.
- Backend specific options such as {"indent": 2} for JSON backend
:return: Mapping object
:raises: ValueError, UnknownProcessorTypeError, UnknownFileTypeError
"""
cnf = _single_load(input_, ac_parser=ac_parser, ac_template=ac_template,
ac_context=ac_context, **options)
schema = _maybe_schema(ac_template=ac_template, ac_context=ac_context,
**options)
cnf = _try_validate(cnf, schema, **options)
return anyconfig.query.query(cnf, **options)
def multi_load(inputs, ac_parser=None, ac_template=False, ac_context=None,
**options):
r"""
Load multiple config files.
.. note::
:func:`load` is a preferable alternative and this API should be used
only if there is a need to emphasize given inputs are multiple ones.
The first argument `inputs` may be a list of a file paths or a glob pattern
| |
f_result = directions.ParaclinicResult(issledovaniye=i, field=gi, value="")
else:
f_result = directions.ParaclinicResult.objects.filter(issledovaniye=i, field=gi)[0]
if f_result.value != content:
f_result.value = content
f_result.save()
if i.doc_save != doc or i.time_save != date or i.doc_confirmation != doc or i.time_confirmation != date:
i.doc_save = doc
i.time_save = date
i.doc_confirmation = doc
i.time_confirmation = date
if i.napravleniye:
i.napravleniye.qr_check_token = None
i.napravleniye.save(update_fields=['qr_check_token'])
i.save()
if not i.napravleniye.visit_who_mark or not i.napravleniye.visit_date:
i.napravleniye.visit_who_mark = doc
i.napravleniye.visit_date = date
i.napravleniye.save()
slog.Log(key=dpk, type=13, body=json.dumps({"content": content, "doc_f": doc_f}), user=None).save()
return JsonResponse({"ok": ok})
def search_template(request):
result = []
q = request.GET.get('q', '')
if q != '':
for r in users.AssignmentTemplates.objects.filter(title__istartswith=q, global_template=False).order_by('title')[:10]:
result.append({"pk": r.pk, "title": r.title, "researches": [x.research.pk for x in users.AssignmentResearches.objects.filter(template=r, research__hide=False)]})
return JsonResponse({"result": result, "q": q})
def load_templates(request):
result = []
t = request.GET.get('type', '1')
for r in users.AssignmentTemplates.objects.filter(global_template=t == '1').order_by('title'):
result.append({"pk": r.pk, "title": r.title, "researches": [x.research.pk for x in users.AssignmentResearches.objects.filter(template=r, research__hide=False)]})
return JsonResponse({"result": result})
def get_template(request):
title = ''
researches = []
global_template = False
pk = request.GET.get('pk')
if pk:
t = users.AssignmentTemplates.objects.get(pk=pk)
title = t.title
researches = [x.research_id for x in users.AssignmentResearches.objects.filter(template=t, research__hide=False)]
global_template = t.global_template
return JsonResponse({"title": title, "researches": researches, "global_template": global_template})
@login_required
@group_required("Конструктор: Настройка шаблонов")
def update_template(request):
response = {"ok": False}
request_data = json.loads(request.body)
pk = request_data.get("pk", -2)
if pk > -2:
title = request_data.get("title").strip()
researches = request_data["researches"]
global_template = request_data["global_template"]
if len(title) > 0 and len(researches) > 0:
t = None
if pk == -1:
t = users.AssignmentTemplates(title=title, global_template=global_template)
t.save()
pk = t.pk
if users.AssignmentTemplates.objects.filter(pk=pk).exists():
t = users.AssignmentTemplates.objects.get(pk=pk)
t.title = title
t.global_template = global_template
t.save()
if t:
users.AssignmentResearches.objects.filter(template=t).exclude(research__pk__in=researches).delete()
to_add = [x for x in researches if not users.AssignmentResearches.objects.filter(template=t, research__pk=x).exists()]
for ta in to_add:
if DResearches.objects.filter(pk=ta).exists():
users.AssignmentResearches(template=t, research=DResearches.objects.get(pk=ta)).save()
response["ok"] = True
return JsonResponse(response)
def modules_view(request):
return JsonResponse({"l2_cards": SettingManager.get("l2_cards_module", default='false', default_type='b')})
def autocomplete(request):
t = request.GET.get("type")
v = request.GET.get("value", "")
limit = int(request.GET.get("limit", 10))
data = []
if v != "" and limit > 0:
if t == "harmful":
p = Card.objects.filter(harmful_factor__istartswith=v).distinct('harmful_factor')[:limit]
if p.exists():
data = [x.harmful_factor for x in p]
elif t == "fias":
data = fias.suggest(v)
elif t == "fias-extended":
data = fias.suggest(v, count=limit, detalized=True)
elif t == "name":
p = Individual.objects.filter(name__istartswith=v).distinct('name')[:limit]
if p.exists():
data = [x.name for x in p]
elif t == "family":
p = Individual.objects.filter(family__istartswith=v).distinct('family')[:limit]
if p.exists():
data = [x.family for x in p]
elif t == "patronymic":
p = Individual.objects.filter(patronymic__istartswith=v).distinct('patronymic')[:limit]
if p.exists():
data = [x.patronymic for x in p]
elif t == "work_place":
p = Card.objects.filter(work_place__istartswith=v).distinct('work_place')[:limit]
if p.exists():
data = [x.work_place for x in p]
elif t == "main_diagnosis":
p = Card.objects.filter(main_diagnosis__istartswith=v).distinct('main_diagnosis')[:limit]
if p.exists():
data = [x.main_diagnosis for x in p]
elif t == "work_position":
p = Card.objects.filter(work_position__istartswith=v).distinct('work_position')[:limit]
if p.exists():
data = [x.work_position for x in p]
elif "who_give:" in t:
tpk = t.split(":")[1]
p = Document.objects.filter(document_type__pk=tpk, who_give__istartswith=v).distinct('who_give')[:limit]
if p.exists():
data = [x.who_give for x in p]
elif t == "fsli":
if v == "HGB":
p = FsliRefbookTest.objects.filter(
Q(code_fsli__startswith=v) | Q(title__icontains=v) | Q(english_title__icontains=v) | Q(short_title__icontains=v) | Q(synonym__istartswith=v) | Q(synonym='Hb')
)
else:
p = FsliRefbookTest.objects.filter(
Q(code_fsli__startswith=v) | Q(title__icontains=v) | Q(english_title__icontains=v) | Q(short_title__icontains=v) | Q(synonym__istartswith=v)
)
p = p.filter(active=True).distinct('code_fsli').order_by('code_fsli', 'ordering')[:limit]
if p.exists():
data = [{"code_fsli": x.code_fsli, "short_title": x.short_title, "title": x.title, "sample": x.sample, "synonym": x.synonym, "nmu": x.code_nmu} for x in p]
elif t == "drugs":
data = [
{
"title": str(x),
"pk": x.pk,
}
for x in Drugs.objects.filter(Q(mnn__istartswith=v) | Q(trade_name__istartswith=v)).order_by('mnn', 'trade_name').distinct('mnn', 'trade_name')[:limit]
]
return JsonResponse({"data": data})
def laborants(request):
data = []
if SettingManager.l2('results_laborants'):
data = [{"pk": '-1', "fio": 'Не выбрано'}]
for d in users.DoctorProfile.objects.filter(user__groups__name="Лаборант", podrazdeleniye__p_type=users.Podrazdeleniya.LABORATORY).order_by('fio'):
data.append({"pk": str(d.pk), "fio": d.get_full_fio()})
return JsonResponse({"data": data, "doc": request.user.doctorprofile.has_group("Врач-лаборант")})
@login_required
def load_docprofile_by_group(request):
request_data = json.loads(request.body)
if request_data['group'] == '*':
users = users_all(request.user.doctorprofile.get_hospital_id())
else:
users = users_by_group(request_data['group'], request.user.doctorprofile.get_hospital_id())
users_grouped = {}
for row in users:
if row[2] not in users_grouped:
users_grouped[row[2]] = {'id': f"pord-{row[2]}", 'label': row[4] or row[3], 'children': []}
users_grouped[row[2]]['children'].append({'id': str(row[0]), 'label': row[1], 'podr': row[4] or row[3]})
return JsonResponse({"users": list(users_grouped.values())})
@login_required
@group_required("Создание и редактирование пользователей")
def users_view(request):
request_data = json.loads(request.body)
user_hospital_pk = request.user.doctorprofile.get_hospital_id()
hospital_pk = request_data.get('selected_hospital', user_hospital_pk)
can_edit = request.user.is_superuser or request.user.doctorprofile.all_hospitals_users_control or hospital_pk == user_hospital_pk
data = []
if can_edit:
podr = Podrazdeleniya.objects.filter(Q(hospital_id=hospital_pk) | Q(hospital__isnull=True)).exclude(p_type=Podrazdeleniya.HIDDEN, hospital__isnull=True).order_by("title")
for x in podr:
otd = {"pk": x.pk, "title": x.title, "users": []}
docs = users.DoctorProfile.objects.filter(podrazdeleniye=x, hospital_id=hospital_pk).order_by('fio')
if not request.user.is_superuser:
docs = docs.filter(user__is_superuser=False)
for y in docs:
otd["users"].append({"pk": y.pk, "fio": y.get_fio(), "username": y.user.username})
data.append(otd)
spec = users.Speciality.objects.all().order_by("title")
spec_data = []
for s in spec:
spec_data.append({"pk": s.pk, "title": s.title})
return JsonResponse({"departments": data, "specialities": spec_data})
@login_required
@group_required("Создание и редактирование пользователей")
def user_view(request):
request_data = json.loads(request.body)
pk = request_data["pk"]
if pk == -1:
data = {
"family": '',
"name": '',
"patronymic": '',
"username": '',
"department": '',
"groups": [],
"restricted_to_direct": [],
"users_services": [],
"groups_list": [{"pk": x.pk, "title": x.name} for x in Group.objects.all()],
"password": '',
"rmis_location": '',
"rmis_login": '',
"rmis_password": '',
"rmis_resource_id": '',
"doc_pk": -1,
"doc_code": -1,
"rmis_employee_id": '',
"rmis_service_id_time_table": '',
}
else:
doc = users.DoctorProfile.objects.get(pk=pk)
fio_parts = doc.get_fio_parts()
data = {
"family": fio_parts[0],
"name": fio_parts[1],
"patronymic": fio_parts[2],
"username": doc.user.username,
"department": doc.podrazdeleniye_id,
"groups": [x.pk for x in doc.user.groups.all()],
"restricted_to_direct": [x.pk for x in doc.restricted_to_direct.all()],
"users_services": [x.pk for x in doc.users_services.all()],
"groups_list": [{"pk": x.pk, "title": x.name} for x in Group.objects.all()],
"password": '',
"rmis_location": doc.rmis_location or '',
"rmis_login": doc.rmis_login or '',
"rmis_resource_id": doc.rmis_resource_id or '',
"rmis_password": '',
"doc_pk": doc.user.pk,
"personal_code": doc.personal_code,
"speciality": doc.specialities_id,
"rmis_employee_id": doc.rmis_employee_id,
"rmis_service_id_time_table": doc.rmis_service_id_time_table,
}
return JsonResponse({"user": data})
@login_required
@group_required("Создание и редактирование пользователей")
def user_save_view(request):
request_data = json.loads(request.body)
pk = request_data["pk"]
ok = True
message = ""
ud = request_data["user_data"]
username = ud["username"]
rmis_location = str(ud["rmis_location"]).strip() or None
rmis_employee_id = str(ud["rmis_employee_id"]).strip() or None
rmis_service_id_time_table = str(ud["rmis_service_id_time_table"]).strip() or None
rmis_login = ud["rmis_login"].strip() or None
rmis_password = ud["rmis_password"].strip() or None
personal_code = ud.get("personal_code", 0)
rmis_resource_id = ud["rmis_resource_id"].strip() or None
user_hospital_pk = request.user.doctorprofile.get_hospital_id()
hospital_pk = request_data.get('hospital_pk', user_hospital_pk)
can_edit = request.user.is_superuser or request.user.doctorprofile.all_hospitals_users_control or hospital_pk == user_hospital_pk
if not can_edit:
return JsonResponse({"ok": False})
npk = pk
if pk == -1:
if not User.objects.filter(username=username).exists():
user = User.objects.create_user(username)
user.is_active = True
user.save()
doc = users.DoctorProfile(user=user, fio=f'{ud["family"]} {ud["name"]} {ud["patronymic"]}')
doc.save()
doc.get_fio_parts()
npk = doc.pk
else:
ok = False
message = "Имя пользователя уже занято"
doc = None
else:
doc = users.DoctorProfile.objects.get(pk=pk)
if pk and doc and (not doc.user.is_superuser or request.user.is_superuser):
if ud["password"] != '':
doc.user.set_password(ud["password"])
doc.user.save()
if pk != -1 and doc.user.username != ud['username']:
if not User.objects.filter(username=username).exists():
doc.user.username = username
doc.user.save()
else:
ok = False
message = "Имя пользователя уже занято"
if ok:
doc.user.groups.clear()
for g in ud["groups"]:
group = Group.objects.get(pk=g)
doc.user.groups.add(group)
doc.user.save()
doc.restricted_to_direct.clear()
for r in ud["restricted_to_direct"]:
doc.restricted_to_direct.add(DResearches.objects.get(pk=r))
doc.users_services.clear()
for r in ud["users_services"]:
doc.users_services.add(DResearches.objects.get(pk=r))
doc.podrazdeleniye_id = ud['department']
doc.specialities_id = ud.get('speciality', None)
doc.family = ud["family"]
doc.name = ud["name"]
doc.patronymic = ud["patronymic"]
doc.fio = f'{ud["family"]} {ud["name"]} {ud["patronymic"]}'
doc.rmis_location = rmis_location
doc.rmis_employee_id = rmis_employee_id
doc.rmis_service_id_time_table = rmis_service_id_time_table
doc.personal_code = personal_code
doc.rmis_resource_id = rmis_resource_id
doc.hospital_id = hospital_pk
if rmis_login:
doc.rmis_login = rmis_login
if rmis_password:
doc.rmis_password = rmis_password
else:
doc.rmis_login = None
doc.rmis_password = None
doc.save()
return JsonResponse({"ok": ok, "npk": npk, "message": message})
def slot_status(x):
s = 0
pk = None
n = directions.Napravleniya.objects.filter(rmis_slot_id=x["slot"]).first()
if n:
pk = n.pk
s = 1
if n.is_all_confirm():
s = 2
return {"code": s, "direction": pk}
@login_required
def user_location(request):
request_data = json.loads(request.body)
date = request_data["date"]
d = {}
rl = request.user.doctorprofile.rmis_location
if rl and SettingManager.get("l2_rmis_queue", default='false', default_type='b'):
if rl == 1337 and request.user.is_superuser:
from rmis_integration.client import Patients
d = Patients.get_fake_reserves()
else:
from rmis_integration.client import Client
c = Client(modules=['patients'])
d = c.patients.get_reserves(date, rl)
d = list(map(lambda x: {**x, "status": slot_status(x)}, d))
return JsonResponse({"data": d})
@login_required
def user_get_reserve(request):
request_data = json.loads(request.body)
pk = request_data["pk"]
patient_uid = request_data["patient"]
rl = request.user.doctorprofile.rmis_location
if rl:
if rl == 1337 and request.user.is_superuser:
from rmis_integration.client import Patients
d = Patients.get_fake_slot()
else:
from rmis_integration.client import Client
c = Client(modules=['patients'])
d = c.patients.get_slot(pk)
n = directions.Napravleniya.objects.filter(rmis_slot_id=pk).first()
d["direction"] = n.pk if n else None
ds = directions.Issledovaniya.objects.filter(napravleniye=n, napravleniye__isnull=False).first()
d['direction_service'] = ds.research_id if ds else -1
if d:
return JsonResponse({**d, "datetime": d["datetime"].strftime('%d.%m.%Y %H:%M'), "patient_uid": patient_uid, "pk": int(str(pk)[1:]) if str(pk).isdigit() else str(pk)})
return JsonResponse({})
@login_required
def user_fill_slot(request):
slot = json.loads(request.body).get('slot', {})
slot_data = slot.get('data', {})
if directions.Napravleniya.objects.filter(rmis_slot_id=slot["id"]).exists():
direction = directions.Napravleniya.objects.filter(rmis_slot_id=slot["id"])[0].pk
else:
result = directions.Napravleniya.gen_napravleniya_by_issledovaniya(
slot["card_pk"],
"",
"ОМС",
"",
None,
request.user.doctorprofile,
{-1: [slot_data["direction_service"]]},
{},
False,
{},
vich_code="",
count=1,
discount=0,
parent_iss=None,
rmis_slot=slot["id"],
)
direction = result["list_id"][0]
return JsonResponse({"direction": direction})
@login_required
def | |
not found!
return None
def get_color(self, type_category, type_name):
color = self.get_representation(type_category, type_name, 'color')
if color != None:
return ' #%s' % color
else:
return ''
def get_label(self, type_category, node_name, type_name):
icon = self.get_representation(type_category, type_name, 'icon')
if icon != None:
return '<img:%s>\\n%s' % (icon, node_name)
else:
return '%s: %s' % (node_name, short_type_name(type_name))
def generate_UML2_component_diagram(self, topology_template, with_relationships):
self.generate('@startuml')
self.generate('skinparam componentStyle uml2')
if with_relationships:
self.generate('skinparam component {')
self.generate(' backgroundColor<<relationship>> White')
self.generate('}')
self.generate()
substitution_mappings = topology_template.get(SUBSTITUTION_MAPPINGS)
if substitution_mappings:
substitution_mappings_uml_id = SUBSTITUTION_MAPPINGS
substitution_mappings_node_type = substitution_mappings.get(NODE_TYPE)
merged_substitution_mappings_type = self.type_system.merge_node_type(substitution_mappings_node_type)
for capability_name, capability_yaml in get_dict(merged_substitution_mappings_type, CAPABILITIES).items():
capability_uml_id = substitution_mappings_uml_id + '_' + normalize_name(capability_name)
# Declare an UML interface for the substitution_mappings capability.
self.generate('interface "', capability_name, '" as ', capability_uml_id, sep='')
self.generate('component ": ', substitution_mappings_node_type, '" <<node>> as ', substitution_mappings_uml_id,
self.get_color('node', substitution_mappings_node_type), ' {', sep='')
relationship_templates = get_dict(topology_template, RELATIONSHIP_TEMPLATES)
already_generated_interfaces = {}
# Iterate over all node templates.
node_templates = get_dict(topology_template, NODE_TEMPLATES)
for node_template_name, node_template_yaml in node_templates.items():
node_template_type = node_template_yaml.get(TYPE)
merged_node_template_type = self.type_system.merge_node_type(node_template_type)
node_template_uml_id = 'node_' + normalize_name(node_template_name)
# Declare an UML component for the node template.
icon = self.get_representation('node', node_template_type, 'icon')
if icon is not None:
self.generate('component "<img:', icon, '>" <<node>> as ', node_template_uml_id, self.get_color('node', node_template_type), ' {', sep='')
self.generate('label "**', node_template_name, '**" as ', node_template_uml_id, '_label', sep='')
self.generate('}')
else:
self.generate('component "', self.get_label('node', node_template_name, node_template_type), '" <<node>> as ', node_template_uml_id, self.get_color('node', node_template_type), sep='')
# Iterate over all capabilities of the node template.
for capability_name, capability_yaml in get_dict(merged_node_template_type, CAPABILITIES).items():
if type(capability_yaml) == dict:
capability_occurrences = capability_yaml.get(OCCURRENCES)
else:
capability_occurrences = None
if with_relationships or (capability_occurrences and capability_occurrences[0] > 0):
capability_uml_id = node_template_uml_id + '_' + normalize_name(capability_name)
# Declare an UML interface for the node template capability.
self.generate('interface "', capability_name, '" as ', capability_uml_id, sep='')
# Connect the capability UML interface to the node template UML component.
self.generate(capability_uml_id, '--', node_template_uml_id)
already_generated_interfaces[capability_uml_id] = capability_uml_id
if with_relationships:
# Iterate over all requirements of the node template.
index = 0
for requirement in get_list(node_template_yaml, REQUIREMENTS):
for requirement_name, requirement_yaml in requirement.items():
requirement_uml_id = node_template_uml_id + '_' + normalize_name(requirement_name) + '_relationship' + str(index)
index = index + 1
requirement_node = get_requirement_node_template(requirement_yaml)
if requirement_node == None:
continue
relationship_component_name = '' # No name.
relationship_component_type = None
if type(requirement_yaml) == dict:
requirement_relationship = syntax.get_requirement_relationship(requirement_yaml)
if type(requirement_relationship) == dict:
relationship_component_type = syntax.get_relationship_type(requirement_relationship)
else:
relationship_template = relationship_templates.get(requirement_relationship)
if relationship_template:
relationship_component_name = requirement_relationship
relationship_component_type = relationship_template.get(TYPE)
else:
relationship_component_type = requirement_relationship
if relationship_component_type == None:
requirement = get_dict(merged_node_template_type, REQUIREMENTS).get(requirement_name, {})
tmp = syntax.get_requirement_relationship(requirement)
relationship_component_type = syntax.get_relationship_type(tmp)
if relationship_component_type == None:
continue
# Declare an UML component for the node template requirement relationship.
self.generate('component "', relationship_component_name, ': ', short_type_name(relationship_component_type), '" <<relationship>> as ', requirement_uml_id, sep='')
# Declare an UML interface for the node template requirement relationship.
self.generate('interface " " as ', requirement_uml_id, '_source', sep='')
# Connect the UML interface to the relationship UML component.
self.generate(requirement_uml_id, '_source', ' -- ', requirement_uml_id, sep='')
# Connect the node template UML component to the relationship UML component.
self.generate(node_template_uml_id, ' --( ', requirement_uml_id, '_source', ' : ', requirement_name, sep='')
self.generate()
# Iterate over all node templates.
for node_template_name, node_template_yaml in node_templates.items():
node_template_uml_id = 'node_' + normalize_name(node_template_name)
node_template_type = node_template_yaml.get(TYPE)
merged_node_template_type = self.type_system.merge_node_type(node_template_type)
# Iterate over all requirements of the node template.
index = 0
for requirement in get_list(node_template_yaml, REQUIREMENTS):
for requirement_name, requirement_yaml in requirement.items():
source_uml_id = node_template_uml_id
if with_relationships:
source_uml_id = source_uml_id + '_' + normalize_name(requirement_name) + '_relationship' + str(index)
index = index + 1
requirement_node = get_requirement_node_template(requirement_yaml)
if requirement_node == None:
continue
requirement_node_template = node_templates.get(requirement_node)
if requirement_node_template == None:
continue
requirement_node_type_name = requirement_node_template.get(TYPE)
if requirement_node_type_name == None:
continue
requirement_capability = syntax.get_requirement_capability(get_dict(merged_node_template_type, REQUIREMENTS).get(requirement_name))
capability_found = False
for ( capability_name, capability_yaml ) in get_dict(self.type_system.merge_node_type(requirement_node_type_name), CAPABILITIES).items():
if self.type_system.is_derived_from(syntax.get_capability_type(capability_yaml), requirement_capability):
capability_found = True
break
if capability_found:
target_node_uml_id = 'node_' + normalize_name(requirement_node)
target_capability_uml_id = target_node_uml_id + '_' + normalize_name(capability_name)
if with_relationships:
self.generate(source_uml_id, ' --( ', target_capability_uml_id, sep='')
else:
if already_generated_interfaces.get(target_capability_uml_id) == None:
self.generate('interface "', capability_name, '" as ', target_capability_uml_id, sep='')
# Connect the capability UML interface to the node template UML component.
self.generate(target_capability_uml_id, '--', target_node_uml_id)
already_generated_interfaces[target_capability_uml_id] = target_capability_uml_id
self.generate(source_uml_id, ' "' + requirement_name + '" --( ', target_capability_uml_id, sep='')
# generate UML representation for TOSCA policies
for policy in topology_template.get(POLICIES, []):
for policy_name, policy_yaml in policy.items():
policy_type = policy_yaml.get(TYPE)
policy_uml_id = 'policy_' + normalize_name(policy_name)
self.generate('agent %s <<policy>> #AliceBlue [' % policy_uml_id)
if policy_type in [ 'tosca.policies.nfv.VnfIndicator' ]:
self.generate(policy_name, ': ', short_type_name(policy_type), sep='')
else:
self.generate(short_type_name(policy_type), sep='')
self.generate('---')
properties = policy_yaml.get('properties', {})
if len(properties) > 0:
self.generate('.. properties ..')
for prop_name, prop_value in properties.items():
self.generate(prop_name, '=', self.stringify_value(prop_value))
triggers = policy_yaml.get('triggers', {})
if len(triggers) > 0:
self.generate('.. triggers ..')
for trigger_name, trigger in triggers.items():
filename = self.get_filename(self.tosca_service_template)
filename = filename[:filename.rfind('.')] + '-' + policy_name + '-' + trigger_name + '-sequence-diagram.svg'
self.generate('[[%s %s]]' % (filename, trigger_name))
self.generate(']')
for target in policy_yaml.get(TARGETS, []):
if node_templates.get(target) != None:
target_uml_id = 'node_' + normalize_name(target)
self.generate(policy_uml_id, ' -up-> ', target_uml_id, sep='')
else:
target_group = topology_template.get(GROUPS, {}).get(target)
if target_group is None:
self.error(target + " - undefined node template or group")
continue
for member in target_group.get(MEMBERS, []):
member_uml_id = 'node_' + normalize_name(member)
self.generate(policy_uml_id, ' -up-> ', member_uml_id, sep='')
if substitution_mappings:
capabilities = get_dict(substitution_mappings, CAPABILITIES)
for capability_name, capability_yaml in get_dict(merged_substitution_mappings_type, CAPABILITIES).items():
capability = capabilities.get(capability_name)
if capability != None:
if type(capability) != list:
continue # TODO when capability is not a list
target_node_uml_id = 'node_' + normalize_name(capability[0])
target_uml_id = target_node_uml_id + '_' + normalize_name(capability[1])
if already_generated_interfaces.get(target_uml_id) == None:
self.generate('interface "', normalize_name(capability[1]), '" as ', target_uml_id, sep='')
# Connect the capability UML interface to the node template UML component.
self.generate(target_uml_id, '--', target_node_uml_id)
already_generated_interfaces[target_uml_id] = target_uml_id
self.generate('}')
for capability_name, capability_yaml in get_dict(merged_substitution_mappings_type, CAPABILITIES).items():
capability_uml_id = substitution_mappings_uml_id + '_' + normalize_name(capability_name)
# Connect the capability UML interface to the node template UML component.
capability = capabilities.get(capability_name)
if capability != None:
if type(capability) != list:
continue # TODO when capability is not a list
target_node_uml_id = 'node_' + normalize_name(capability[0])
target_uml_id = target_node_uml_id + '_' + normalize_name(capability[1])
self.generate(capability_uml_id, '--(', target_uml_id)
else:
self.generate(capability_uml_id, '--', substitution_mappings_uml_id)
index = 0
requirements = syntax.get_substitution_mappings_requirements(substitution_mappings)
for requirement_name, requirement_def in merged_substitution_mappings_type.get(REQUIREMENTS, {}).items():
interface_uml_id = substitution_mappings_uml_id + '_' + normalize_name(requirement_name) + str(index)
index = index + 1
self.generate('interface "', requirement_name, '" as ', interface_uml_id)
requirement_yaml = requirements.get(requirement_name)
if requirement_yaml:
source_uml_id = 'node_' + normalize_name(requirement_yaml[0])
self.generate(source_uml_id, ' "' + requirement_yaml[1] + '" --( ', interface_uml_id, sep='')
else:
self.generate(substitution_mappings_uml_id, ' --( ', interface_uml_id)
self.generate('@enduml')
def generate_UML2_deployment_diagram(self, topology_template):
self.generate('@startuml')
self.generate('skinparam componentStyle uml2')
self.generate('allowmixing')
self.generate()
node_templates = get_dict(topology_template, NODE_TEMPLATES)
non_containeds = list(node_templates.keys())
containers = {}
contained_containers = []
# Iterate over all node templates to find containers.
for node_template_name, node_template_yaml in node_templates.items():
merged_node_template_type = self.type_system.merge_node_type(node_template_yaml.get(TYPE))
# Iterate over all capabilities of the node template type.
for capability_name, capability_yaml in get_dict(merged_node_template_type, CAPABILITIES).items():
capability_type = get_capability_type(capability_yaml)
if self.type_system.is_derived_from(capability_type, 'tosca.capabilities.Container'):
containers[node_template_name] = containers.get(node_template_name, dict())
try:
non_containeds.remove(node_template_name)
except ValueError:
pass
# Iterate over all node templates to find containeds.
for node_template_name, node_template_yaml in node_templates.items():
merged_node_template_type = self.type_system.merge_node_type(node_template_yaml.get(TYPE))
# Iterate over all requirements of the node template.
for requirement in get_list(node_template_yaml, REQUIREMENTS):
for requirement_name, requirement_yaml in requirement.items():
requirement_definition = get_dict(merged_node_template_type, REQUIREMENTS).get(requirement_name)
requirement_relationship = syntax.get_requirement_relationship(requirement_definition)
requirement_relationship_type = syntax.get_relationship_type(requirement_relationship)
if self.type_system.is_derived_from(requirement_relationship_type, 'tosca.relationships.HostedOn'):
requirement_node = get_requirement_node_template(requirement_yaml)
if requirement_node != None:
try:
containers[requirement_node][node_template_name] = containers.get(node_template_name, dict())
except KeyError as e:
self.error(e)
contained_containers.append(node_template_name)
try:
non_containeds.remove(node_template_name)
except ValueError:
pass
# TODO: Remove containers contained by other containers.
for contained_container_name in contained_containers:
if containers.get(contained_container_name) != None:
del containers[contained_container_name]
# Iterate over all containers.
def get_uml2_kind(tosca_type):
uml2_kind = 'component'
for tt, kind in self.configuration.get(UML2, 'kinds').items():
if self.type_system.is_derived_from(tosca_type, tt):
uml2_kind = kind
break
return uml2_kind
def generate_container(self, container_name, containeds):
node_template = node_templates.get(container_name)
node_template_type = node_template.get(TYPE)
uml2_kind = get_uml2_kind(node_template_type)
node_template_artifacts = get_dict(node_template, ARTIFACTS)
properties = self.get_representation('node', node_template_type, 'properties')
if len(containeds) == 0 and len(node_template_artifacts) == 0 and properties is None:
icon = self.get_representation('node', node_template_type, 'icon')
if icon is not None:
self.generate(uml2_kind, ' "<img:', icon, '>" as node_', normalize_name(container_name), self.get_color('node', node_template_type), ' {', sep='')
self.generate('label "**', container_name, '**" as ', normalize_name(container_name), '_label', sep='')
self.generate('}')
else:
self.generate(uml2_kind, ' "', self.get_label('node', container_name, node_template_type), '" as node_', normalize_name(container_name), | |
-2, -3, -3, -2, -3, -3]
victimization -2.3 0.78102 [-1, -3, -3, -2, -3, -1, -3, -2, -3, -2]
victimizations -1.5 1.85742 [-2, -3, -3, -1, -2, 2, 2, -2, -3, -3]
victimize -2.5 0.67082 [-3, -2, -4, -2, -2, -2, -2, -3, -3, -2]
victimized -1.8 1.53623 [-2, -1, -3, -3, -3, 1, 1, -2, -3, -3]
victimizer -1.8 1.72047 [-3, -2, -3, -3, -2, 2, 1, -2, -3, -3]
victimizers -1.6 1.68523 [-3, -2, -3, 1, -3, -1, -2, 2, -2, -3]
victimizes -1.5 1.9105 [-2, -1, -4, -3, -2, 2, 2, -2, -3, -2]
victimizing -2.6 0.4899 [-2, -3, -3, -3, -2, -3, -2, -3, -2, -3]
victimless 0.6 0.4899 [0, 1, 0, 1, 1, 0, 0, 1, 1, 1]
victimologies -0.6 1.35647 [-2, 0, -2, -1, 0, 2, 1, 0, -2, -2]
victimologist -0.5 0.67082 [0, -1, -1, 0, 0, 0, 0, -1, -2, 0]
victimologists -0.4 0.91652 [0, 1, 0, -2, -2, 0, 0, 0, -1, 0]
victimology 0.3 1.00499 [0, 0, 0, -1, 0, 1, 0, 0, 3, 0]
victims -1.3 2.05183 [-3, -1, -3, -3, -3, 2, 1, -2, 2, -3]
vigilant 0.7 0.9 [0, 2, 0, 2, 0, -1, 1, 1, 1, 1]
vigor 1.1 1.37477 [0, 3, 2, 1, 2, 2, 0, 1, -2, 2]
vigorish -0.4 1.2 [0, -3, -1, -1, 0, -1, 0, 0, 2, 0]
vigorishes 0.4 1.56205 [0, 0, 2, 1, 2, 0, -2, -2, 0, 3]
vigoroso 1.5 0.67082 [2, 0, 1, 2, 2, 1, 2, 1, 2, 2]
vigorously 0.5 0.92195 [0, 0, 0, 1, 2, 0, 2, -1, 1, 0]
vigorousness 0.4 1.11355 [0, 3, 0, -1, -1, 0, 0, 1, 1, 1]
vigors 1.0 1.0 [0, 1, 0, 1, 0, 0, 1, 3, 2, 2]
vigour 0.9 0.9434 [0, 2, 2, 2, 1, 1, 0, 1, -1, 1]
vigours 0.4 1.68523 [-4, 1, 1, 1, -1, 1, 2, 2, 0, 1]
vile -3.1 0.83066 [-4, -2, -4, -4, -2, -3, -3, -3, -2, -4]
villain -2.6 0.4899 [-3, -2, -2, -3, -2, -2, -3, -3, -3, -3]
villainess -2.9 0.53852 [-3, -2, -3, -4, -3, -2, -3, -3, -3, -3]
villainesses -2.0 1.18322 [-2, -3, -2, -2, -2, -3, 1, -3, -1, -3]
villainies -2.3 1.00499 [-3, -2, -3, -3, -3, -1, -3, -2, -3, 0]
villainous -2.0 0.63246 [-3, -2, -1, -2, -2, -2, -2, -1, -2, -3]
villainously -2.9 0.53852 [-3, -3, -3, -3, -3, -4, -2, -3, -2, -3]
villainousness -2.7 0.9 [-4, -3, -4, -3, -1, -3, -2, -2, -2, -3]
villains -3.4 0.91652 [-4, -3, -4, -3, -4, -3, -4, -4, -1, -4]
villainy -2.6 0.4899 [-3, -2, -3, -3, -2, -2, -2, -3, -3, -3]
vindicate 0.3 1.95192 [2, -1, -2, -3, -1, 3, 0, 3, 1, 1]
vindicated 1.8 1.16619 [1, 3, -1, 2, 2, 1, 3, 2, 2, 3]
vindicates 1.6 0.66332 [2, 3, 2, 2, 2, 1, 1, 1, 1, 1]
vindicating -1.1 1.97231 [-3, -2, 2, -3, -2, 1, 1, 1, -3, -3]
violate -2.2 0.6 [-3, -3, -2, -3, -2, -2, -1, -2, -2, -2]
violated -2.4 0.66332 [-3, -3, -3, -3, -2, -3, -1, -2, -2, -2]
violater -2.6 0.91652 [-3, -3, -4, -4, -2, -3, -2, -2, -2, -1]
violaters -2.4 0.8 [-1, -3, -1, -2, -3, -3, -3, -2, -3, -3]
violates -2.3 0.9 [-3, -2, -4, -3, -2, -3, -2, -2, -1, -1]
violating -2.5 0.92195 [-2, -3, -3, -1, -3, -2, -4, -1, -3, -3]
violation -2.2 0.9798 [-3, -1, -1, -3, -3, -2, -1, -2, -2, -4]
violations -2.4 0.66332 [-2, -2, -2, -3, -2, -4, -2, -2, -2, -3]
violative -2.4 0.66332 [-2, -3, -3, -3, -1, -3, -2, -2, -2, -3]
violator -2.4 1.0198 [-1, -4, -3, -2, -3, -2, -2, -1, -4, -2]
violators -1.9 1.51327 [-2, 2, -3, -4, -1, -2, -3, -2, -2, -2]
violence -3.1 0.53852 [-2, -3, -3, -3, -3, -4, -4, -3, -3, -3]
violent -2.9 0.53852 [-3, -3, -3, -3, -3, -4, -3, -2, -2, -3]
violently -2.8 0.74833 [-3, -3, -2, -3, -3, -3, -4, -1, -3, -3]
virtue 1.8 0.74833 [1, 2, 3, 2, 2, 2, 3, 1, 1, 1]
virtueless -1.4 1.0198 [-2, 0, -2, -3, -1, -3, -1, -1, -1, 0]
virtues 1.5 0.80623 [2, 2, 2, 1, 0, 1, 3, 1, 2, 1]
virtuosa 1.7 1.48661 [0, 4, 2, 3, 2, 3, 0, 2, -1, 2]
virtuosas 1.8 0.87178 [2, 3, 1, 2, 1, 0, 3, 2, 2, 2]
virtuose 1.0 1.41421 [2, 1, 0, 2, 1, -1, 1, 1, -1, 4]
virtuosi 0.9 1.37477 [2, 0, 0, 2, 1, 0, 0, 1, -1, 4]
virtuosic 2.2 1.07703 [2, 2, 4, 1, 0, 3, 3, 2, 2, 3]
virtuosity 2.1 0.83066 [3, 3, 3, 2, 1, 3, 2, 2, 1, 1]
virtuoso 2.0 1.0 [2, 2, 3, 2, 1, 0, 3, 3, 3, 1]
virtuosos 1.8 1.16619 [2, 3, 1, -1, 2, 1, 3, 3, 2, 2]
virtuous 2.4 1.2 [0, 3, 2, 1, 3, 4, 2, 2, 4, 3]
virtuously 1.8 1.16619 [3, 2, 3, 1, 3, 1, -1, 2, 2, 2]
virtuousness 2.0 1.09545 [3, 4, 2, 2, 0, 1, 2, 3, 2, 1]
virulent -2.7 0.64031 [-3, -2, -4, -2, -3, -3, -2, -3, -2, -3]
vision 1.0 1.0 [0, 0, 0, 2, 1, 3, 2, 1, 1, 0]
visionary 2.4 1.0198 [1, 3, 1, 2, 4, 1, 3, 3, 3, 3]
visioning 1.1 0.9434 [1, 2, 0, 0, 3, 0, 1, 1, 2, 1]
visions 0.9 0.9434 [2, 0, 0, 0, 0, 1, 2, 2, 0, 2]
vital 1.2 1.46969 [-3, 2, 1, 1, 2, 2, 1, 2, 2, 2]
vitalise 1.1 0.9434 [1, 2, 0, 2, 2, 0, 2, 0, 2, 0]
vitalised 0.6 1.49666 [1, -2, 2, 0, 2, 1, -2, 2, 0, 2]
vitalises 1.1 1.3 [1, 2, 2, 0, 2, 2, -2, 2, 0, 2]
vitalising 2.1 0.53852 [2, 2, 3, 2, 3, 2, 2, 1, 2, 2]
vitalism 0.2 0.6 [0, 0, 0, 0, 0, 0, 0, 0, 2, 0]
vitalist 0.3 0.64031 [0, 0, 0, 0, 0, 0, 1, 0, 2, 0]
vitalists 0.3 1.34536 [2, -3, 1, 0, 0, 0, 1, 0, 2, 0]
vitalities 1.2 0.87178 [2, 1, 3, 1, 1, 0, 2, 1, 0, 1]
vitality 1.3 0.9 [3, 2, 0, 1, 1, 1, 2, 0, 2, 1]
vitalization 1.6 0.91652 [2, 3, 3, 1, 2, 1, 0, 2, 1, 1]
vitalizations 0.8 0.74833 [0, 1, 1, 2, 0, 0, 0, 2, 1, 1]
vitalize 1.6 0.66332 [3, 2, 2, 1, 2, 1, 1, 1, 2, 1]
vitalized 1.5 0.67082 [1, 1, 2, 2, 0, 2, 1, 2, 2, 2]
vitalizes 1.4 0.4899 [2, 1, 1, 2, 1, 2, 1, 2, 1, 1]
vitalizing 1.3 0.9 [3, 1, 0, 0, 1, 1, 2, 2, 2, 1]
vitally 1.1 0.53852 [0, 2, 1, 1, 1, 1, 2, 1, 1, 1]
vitals 1.1 0.7 [1, 0, 1, 2, 2, 0, 2, 1, 1, 1]
vitamin 1.2 0.87178 [3, 1, 0, 0, 1, 2, 1, 2, 1, 1]
vitriolic -2.1 0.83066 [-2, -2, -2, -4, -3, -2, -1, -1, -2, -2]
vivacious 1.8 0.9798 [0, 1, 3, 3, 3, 2, 2, 2, 1, 1]
vociferous -0.8 0.9798 [1, -2, -1, -2, -1, -1, 1, -1, -1, -1]
vulnerabilities -0.6 1.49666 [0, -3, -1, -1, -1, 2, -2, 2, -1, -1]
vulnerability -0.9 1.75784 [1, -1, -1, -2, 1, -3, -1, 2, -4, -1]
vulnerable -0.9 1.37477 [-2, -2, 2, -1, -3, -1, 1, -1, -1, -1]
vulnerableness -1.1 1.04403 [-1, -1, -2, -3, -1, 1, 0, -2, -1, -1]
vulnerably -1.2 1.46969 [-2, -2, 2, -1, -2, -3, 1, -1, -2, -2]
vulture -2.0 0.89443 [-2, -3, -1, -2, -1, -1, -1, -3, -3, -3]
vultures -1.3 1.55242 [-2, -3, -2, 2, -2, -2, -3, -1, -1, 1]
w00t 2.2 1.32665 [3, 2, 3, 2, 0, 4, 0, 4, 2, 2]
walkout -1.3 0.9 [-1, -2, -2, -1, -1, -2, -1, 1, -2, -2]
walkouts -0.7 1.00499 [-2, -2, -1, 0, -1, -1, -1, 1, -1, 1]
wanker -2.5 0.67082 [-2, -3, -3, -2, -3, -3, -2, -1, -3, -3]
want 0.3 1.18743 [0, -2, 0, 1, 2, -1, 2, 1, 0, 0]
war -2.9 1.13578 [-1, -3, -4, -4, -3, -1, -2, -3, -4, -4]
warfare -1.2 1.16619 [-2, 0, -1, -2, 0, -3, 1, -2, -2, -1]
warfares -1.8 0.87178 [-2, -1, -2, -2, -3, -1, -3, 0, -2, -2]
warm 0.9 0.7 [1, 0, 0, 1, 1, 2, 1, 2, 1, 0]
warmblooded 0.2 0.6 [0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
warmed 1.1 0.53852 [2, 0, 1, 1, 1, 2, 1, 1, 1, 1]
warmer 1.2 0.9798 [2, 2, 2, 1, -1, 0, 2, 1, 1, 2]
warmers 1.0 0.44721 [1, 1, 1, 2, 1, 1, 1, 0, 1, 1]
warmest 1.7 1.34536 [3, 2, 1, 2, 3, 2, 2, 2, -2, 2]
warmhearted 1.8 0.6 [3, 2, 2, 2, 2, 1, 2, 1, 1, 2]
warmheartedness 2.7 0.64031 [2, 4, 3, 2, 3, 3, 3, 2, 2, 3]
warming 0.6 0.8 [0, 0, 2, 2, 1, 1, 0, 0, 0, 0]
warmish 1.4 0.66332 [1, 3, 2, 1, 1, 1, 1, 2, 1, 1]
warmly 1.7 0.64031 [2, 1, 2, 1, 2, 1, 2, 1, 2, 3]
warmness 1.5 0.92195 [3, 1, 2, 1, 0, 1, 3, 2, 1, 1]
warmonger -2.9 1.13578 [-3, 0, -4, -4, -2, -3, -4, -3, -3, -3]
warmongering -2.5 0.67082 [-2, -3, -3, -1, -2, -3, -3, -2, -3, -3]
warmongers -2.8 0.87178 [-2, -3, -4, -4, -3, -1, -2, -3, -3, -3]
warmouth 0.4 0.66332 [0, 0, 2, 0, 0, 0, 0, 1, 1, 0]
warmouths -0.8 1.32665 [-1, -1, -2, -1, -2, -2, 0, 2, 1, -2]
warms 1.1 0.7 [2, 2, 1, 2, 1, 0, 1, 1, 0, 1]
warmth 2.0 0.44721 [2, 2, 2, 2, 1, 2, 2, 3, 2, 2]
warmup 0.4 0.66332 [0, 2, 0, 1, 1, 0, 0, 0, 0, 0]
warmups 0.8 0.9798 [0, 2, 0, 0, 0, 2, 2, 0, 2, 0]
warn -0.4 1.35647 [0, -1, 0, -2, -1, 2, 1, -2, 1, -2]
warned -1.1 0.53852 [-1, -1, -2, 0, -1, -1, -1, -2, -1, -1]
warning -1.4 1.0198 [-2, -1, -1, -1, -2, 0, -4, -1, -1, -1]
warnings -1.2 0.9798 [-2, -1, 0, -1, -2, 0, 0, -1, -3, -2]
warns -0.4 1.0198 [1, -1, -1, 1, -1, -1, -2, 1, 0, -1]
warred -2.4 0.8 [-2, -2, -4, -1, -3, -2, -3, -2, -3, -2]
warring -1.9 1.04403 [-3, -3, 0, -1, -2, -2, -3, -1, -1, -3]
wars -2.6 0.8 [-2, -3, -1, -3, -2, -4, -3, -3, -2, -3]
warsaw -0.1 0.3 [0, -1, 0, 0, 0, 0, 0, 0, 0, 0]
warsaws -0.2 0.4 [0, 0, 0, -1, 0, 0, 0, 0, -1, 0]
warship -0.7 0.9 [0, 0, 0, 0, 0, -2, -1, 0, -2, -2]
warships -0.5 0.80623 [0, -1, 0, 0, -2, 0, 0, -2, 0, 0]
warstle 0.1 0.7 [0, 0, 0, 0, 0, 0, | |
data_newref_mask,
table_trans['X_POS'], table_trans['Y_POS'], psffit=psffit,
moffat=moffat, gauss=gauss, psfex_bintable_ref=fits_ref_psf,
header_new=header_new, header_ref=header_ref,
Scorr_peak=table_trans['SCORR_PEAK'], log=log)
return results
# determine optimal flux in D, directly added as columns to table_trans
colnames = ['E_FLUX_OPT_D', 'E_FLUXERR_OPT_D']
table_trans.add_columns(help_psffit_D (False, False, False), names=colnames)
# PSF fit to D, directly added as columns to table_trans
colnames = ['E_FLUX_OPT_D_alt1', 'E_FLUXERR_OPT_D_alt1', 'E_FLUX_PSF_D',
'E_FLUXERR_PSF_D', 'X_PSF_D', 'Y_PSF_D', 'CHI2_PSF_D',
'XERR_PSF_D', 'YERR_PSF_D']
table_trans.add_columns(help_psffit_D (True, False, False), names=colnames)
log.info ('[get_trans] time after PSF fit to D: {}'.format(time.time()-t))
# filter on CHI2_PSF_D
# ====================
if get_par(set_zogy.make_plots,tel):
ds9_rad += 2
result = prep_ds9regions(
'{}_ds9regions_trans_filt3_elong.txt'.format(base),
table_trans['X_POS'], table_trans['Y_POS'],
radius=ds9_rad, width=2, color='blue',
value=table_trans['CHI2_PSF_D'])
# filter out transient candidates with high chi2 and non-finite values
chi2_max = get_par(set_zogy.chi2_max,tel)
mask_keep = (table_trans['CHI2_PSF_D'] <= chi2_max)
# discard rows where fit values are infinite or NaN
for col in colnames:
mask_finite = np.isfinite(table_trans[col])
nbad = np.sum(~mask_finite)
if nbad > 0:
mask_keep &= mask_finite
log.warning ('column {} contains {} infinite or NaN values for image '
'{}; discarding the corresponding row(s)'
.format(col, nbad, new_fits))
# filter
table_trans = table_trans[mask_keep]
log.info('ntrans after PSF_D fit chi2 filter: {}'.format(len(table_trans)))
# filter on S/N_PSF_D
# ===================
# check S/N of E_FLUX_PSF_D
s2n_psfD = np.copy(table_trans['E_FLUX_PSF_D'])
mask_nonzero = (table_trans['E_FLUXERR_PSF_D'] != 0)
s2n_psfD[mask_nonzero] /= table_trans['E_FLUXERR_PSF_D'][mask_nonzero]
if get_par(set_zogy.make_plots,tel):
ds9_rad += 2
result = prep_ds9regions(
'{}_ds9regions_trans_filt4_chi2_PSF_D.txt'.format(base),
table_trans['X_POS'], table_trans['Y_POS'],
radius=ds9_rad, width=2, color='yellow',
value=s2n_psfD)
mask_keep = (np.abs(s2n_psfD) >= get_par(set_zogy.transient_nsigma,tel))
table_trans = table_trans[mask_keep]
log.info('ntrans after PSF_D fit S/N filter: {}'.format(len(table_trans)))
# Gauss fit to D
# ==============
# Gauss fit to D, directly added as columns to table_trans
colnames = ['E_FLUX_OPT_D_alt3', 'E_FLUXERR_OPT_D_alt3', 'X_GAUSS_D',
'XERR_GAUSS_D', 'Y_GAUSS_D', 'YERR_GAUSS_D', 'FWHM_GAUSS_D',
'ELONG_GAUSS_D', 'CHI2_GAUSS_D']
table_trans.add_columns(help_psffit_D (False, False, True), names=colnames)
log.info ('[get_trans] time after Gauss fit to D: {}'.format(time.time()-t))
if get_par(set_zogy.make_plots,tel):
ds9_rad += 2
result = prep_ds9regions(
'{}_ds9regions_trans_filt5_s2n_PSF_D.txt'.format(base),
table_trans['X_POS'], table_trans['Y_POS'],
radius=ds9_rad, width=2, color='blue',
value=table_trans['CHI2_GAUSS_D'])
# filter out transient candidates with high chi2 values
mask_keep = (table_trans['CHI2_GAUSS_D'] <= chi2_max)
# discard rows where fit values are infinite or NaN
for col in colnames:
mask_finite = np.isfinite(table_trans[col])
nbad = np.sum(~mask_finite)
if nbad > 0:
mask_keep &= mask_finite
log.warning ('column {} contains {} infinite or NaN values for image '
'{}; discarding the corresponding row(s)'
.format(col, nbad, new_fits))
# filter
# switch off for the moment; CHECK!!!
if False:
table_trans = table_trans[mask_keep]
log.info('ntrans after Gauss fit chi2 filter: {}'.format(len(table_trans)))
if get_par(set_zogy.make_plots,tel):
ds9_rad += 2
result = prep_ds9regions(
'{}_ds9regions_trans_filt6_chi2_GAUSS.txt'.format(base),
table_trans['X_POS'], table_trans['Y_POS'],
radius=ds9_rad, width=2, color='green')
# determine RAs and DECs
wcs_new = WCS(header_new)
wcs_ref = WCS(header_ref)
ra_peak, dec_peak = wcs_new.all_pix2world(table_trans['X_PEAK'],
table_trans['Y_PEAK'], 1)
# determine RA and DEC corresponding to x_psf_D and y_psf_D
ra_psf_D, dec_psf_D = wcs_new.all_pix2world(table_trans['X_PSF_D'],
table_trans['Y_PSF_D'], 1)
if False:
# determine RA and DEC corresponding to x_moffat and y_moffat
ra_moffat, dec_moffat = wcs_new.all_pix2world(table_trans['X_MOFFAT_D'],
table_trans['Y_MOFFAT_D'],
1)
# determine RA and DEC corresponding to x_moffat and y_moffat
ra_gauss, dec_gauss = wcs_new.all_pix2world(table_trans['X_GAUSS_D'],
table_trans['Y_GAUSS_D'], 1)
# determine RA and DEC corresponding to x_moffat and y_moffat
ra_D, dec_D = wcs_new.all_pix2world(table_trans['X_POS'],
table_trans['Y_POS'], 1)
# adding RAs and DECs to table
table_trans.add_columns([ra_peak, dec_peak,
ra_psf_D, dec_psf_D,
#ra_moffat, dec_moffat,
ra_gauss, dec_gauss,
ra_D, dec_D],
names=['RA_PEAK', 'DEC_PEAK',
'RA_PSF_D', 'DEC_PSF_D',
#'RA_MOFFAT_D','DEC_MOFFAT_D',
'RA_GAUSS_D', 'DEC_GAUSS_D',
'RA_SCORR', 'DEC_SCORR'])
# need to convert psf fluxes to magnitudes by applying the zeropoint
keywords = ['exptime', 'filter', 'obsdate']
exptime, filt, obsdate = read_header (header_new, keywords, log=log)
# get zeropoint from [header_new]
if 'PC-ZP' in header_new:
zp = header_new['PC-ZP']
else:
zp = get_par(set_zogy.zp_default,tel)[filt]
# get airmass from [header_new]
if 'AIRMASSC' in header_new:
airmass = header_new['AIRMASSC']
elif 'PC-AIRM' in header_new:
airmass = header_new['PC-AIRM']
# determine individual airmasses of transients to be able to
# determine their magnitudes accurately also at high airmass
lat = get_par(set_zogy.obs_lat,tel)
lon = get_par(set_zogy.obs_lon,tel)
height = get_par(set_zogy.obs_height,tel)
airmass_trans = get_airmass(table_trans['RA_PEAK'], table_trans['DEC_PEAK'],
obsdate, lat, lon, height, log=log)
# get magnitudes corresponding to absolute fluxes; fluxes, which
# can be negative for e.g. an object detected in the reference
# image but not in the new image, are first converted to positive
# fluxes. That it was a negative flux object is still clear from
# the sign of Scorr_peak.
data_Fpsf = read_hdulist (fits_Fpsf)
data_Fpsferr = read_hdulist (fits_Fpsferr)
# read off fluxes and errors at X_PEAK and Y_PEAK indices
flux_peak = data_Fpsf[table_trans['Y_PEAK']-1, table_trans['X_PEAK']-1]
fluxerr_peak = data_Fpsferr[table_trans['Y_PEAK']-1, table_trans['X_PEAK']-1]
del data_Fpsf, data_Fpsferr
mag_peak, magerr_peak = apply_zp (
np.abs(flux_peak), zp, airmass_trans, exptime, filt, log, zp_std=None,
fluxerr=np.abs(fluxerr_peak))
mag_psf_D, magerr_psf_D = apply_zp (
np.abs(table_trans['E_FLUX_PSF_D']), zp, airmass_trans, exptime, filt, log,
fluxerr=np.abs(table_trans['E_FLUXERR_PSF_D']), zp_std=None)
mag_opt_D, magerr_opt_D = apply_zp (
np.abs(table_trans['E_FLUX_OPT_D']), zp, airmass_trans, exptime, filt, log,
fluxerr=np.abs(table_trans['E_FLUXERR_OPT_D']), zp_std=None)
log.info ('[get_trans] time after converting flux to mag: {}'.format(time.time()-t))
# adding magnitudes and also flux_peak to table
table_trans.add_columns([flux_peak, fluxerr_peak,
mag_peak, magerr_peak,
mag_psf_D, magerr_psf_D,
mag_opt_D, magerr_opt_D],
names=['E_FLUX_PEAK', 'E_FLUXERR_PEAK',
'MAG_PEAK', 'MAGERR_PEAK',
'MAG_PSF_D', 'MAGERR_PSF_D',
'MAG_OPT_D', 'MAGERR_OPT_D'])
# change some of the column names
colnames_new = {'X_POS': 'X_POS_SCORR',
'Y_POS': 'Y_POS_SCORR',
'XVAR_POS': 'XVAR_POS_SCORR',
'YVAR_POS': 'YVAR_POS_SCORR',
'XYCOV_POS': 'XYCOV_POS_SCORR',
'ELONGATION': 'ELONG_SCORR',
'FLAGS': 'FLAGS_SCORR',
'FLAGS_MASK': 'FLAGS_MASK_SCORR'}
for key in colnames_new.keys():
if key in table_trans.colnames:
table_trans.rename_column (key, colnames_new[key])
table_trans['NUMBER'] = np.arange(len(table_trans))+1
ntrans = len(table_trans)
# create output fits catalog; final table definition is determined
# in [format_cat]
table_trans.write('{}.transcat'.format(base_newref), format='fits',
overwrite=True)
# extract the thumbnail images corresponding to the transients in
# case either thumbnail data is being saved or MeerCRAB
# probabilities need to be calculated for ML/BG
if (get_par(set_zogy.save_thumbnails,tel) or
(get_par(set_zogy.ML_calc_prob,tel) and
tel in ['ML1', 'BG2', 'BG3', 'BG4'])):
data_full_list = [data_new, data_ref, data_D, data_Scorr]
keys_thumbnails = ['THUMBNAIL_RED', 'THUMBNAIL_REF',
'THUMBNAIL_D', 'THUMBNAIL_SCORR']
n_thumbnails = len(keys_thumbnails)
# coordinates to loop
xcoords = table_trans['X_PEAK']
ycoords = table_trans['Y_PEAK']
ncoords = len(xcoords)
# thumbnail size
size_thumbnails = get_par(set_zogy.size_thumbnails,tel)
# initialise output thumbnail columns
data_thumbnails = np.zeros((n_thumbnails, ncoords,
size_thumbnails, size_thumbnails))
# size of full input images; assuming they have identical shapes
ysize, xsize = data_full_list[0].shape
# loop x,y coordinates
for i_pos in range(ncoords):
# get index around x,y position using function [get_index_around_xy]
index_full, index_tn = (get_index_around_xy(
ysize, xsize, ycoords[i_pos], xcoords[i_pos], size_thumbnails))
# loop thumbnails and record pixels from full image to
# data_thumbnails
for i_tn, key in enumerate(keys_thumbnails):
try:
data_thumbnails[i_tn][i_pos][index_tn] = (
data_full_list[i_tn][index_full])
# if [orient_thumbnails] is switched on,
# orient the thumbnails in North-up, East left
# orientation
if get_par(set_zogy.orient_thumbnails,tel):
# input reference data is the remapped
# reference image and its orientation is
# the same as that of the new, D and Scorr
# images, and so the same header
# (header_toadd=header_newzogy) should be
# used rather than the reference image
# header header_ref
#data_thumbnails[i_tn, i_pos] = orient_data (
# data_thumbnails[i_tn, i_pos], header_new,
# MLBG_rot90_flip=True, tel=tel, log=log)
data_thumbnails[i_tn][i_pos] = orient_data (
data_thumbnails[i_tn][i_pos], header_new,
MLBG_rot90_flip=True, tel=tel, log=log)
except Exception as e:
if log is not None:
log.info('skipping remapping of thumbnail at x,y: '
'{:.0f},{:.0f} due to exception: {}'.
format(xcoords[i_pos], ycoords[i_pos], e))
else:
data_thumbnails = None
if get_par(set_zogy.timing,tel):
log_timing_memory (t0=t, label='get_trans_alt', log=log)
return table_trans, data_thumbnails
################################################################################
def get_trans (data_new, data_ref, data_D, data_Scorr, data_Fpsf, data_Fpsferr,
data_new_mask, data_ref_mask, data_new_bkg_std, data_ref_bkg_std,
header_new, header_ref, header_trans,
psfex_bintable_new, psfex_bintable_ref,
fits_cat_new, fits_cat_ref, log):
"""Function that selects transient candidates from the significance
array (data_Scorr), and determines all regions with peak Scorr
values above the set threshold, and down to where the region wings
reaches the 2 sigma isochrone. Regions are discarded if they:
- are too big or too small
- contain both negatively and positively significant Scorr values
(at 5 sigma level) within the same region
- contain both negatively and positively significant Scorr values
(at 5 sigma level) in this region and region at a pixel position
that is due to the shift between the new and reference image
(e.g. an artefact that is present in both the new and reference
image will create transients with opposite significance at
pixel positions equal to the shift between the images)
- contain more flagged pixels than the maximum indicated in
[set_zogy.transient_mask_max]
Futhermore, a PSF fit to the D image is performed at the location
of the filtered transients, using combination of the PSFs of the
new and ref image, i.e. P_D in ZOGY-speak. This results | |
<filename>src/ecs_tasks_ops_qt5/qt5_ecs.py
"""Qt5 Tree Model for ecs."""
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from ecs_tasks_ops import ecs_conf
from ecs_tasks_ops import ecs_data
from ecs_tasks_ops import ecs_facade
from ecs_tasks_ops import ecs_ssh
from ecs_tasks_ops import pretty_json
class ECSTreeItem(QtWidgets.QTreeWidgetItem):
"""Tree Widgets Item for ECS elements."""
def __init__(self, name, identifier, detail_type, detail, parent=None):
"""Initialization of basic ECS element, with name, arn_id, type and json data."""
super(ECSTreeItem, self).__init__(parent, [name, identifier])
self.name = name
self.identifier = identifier
self.detail_type = detail_type
self.detail = detail
def refresh_children(self):
"""Refresh list of children elements."""
pass
def get_attributes(self):
"""Get a list of basic attributes for detail view."""
return [
("Name", self.name),
("Identifier", self.identifier),
("Detail Type", self.detail_type),
]
def clear_children(self):
"""Clear list of children elements."""
for i in reversed(range(self.childCount())):
self.removeChild(self.child(i))
def get_context_menu(self, menu):
"""Get basic context menu with mouse right-button."""
menu.addSeparator()
menu.addAction("Refresh Children", self.refresh_children)
menu.addAction("Show Detail", self.command_show_detail)
def command_show_detail(self):
"""Update detail view with this elements."""
self.treeWidget().command_show_detail(self)
class ECSClusterTreeItem(ECSTreeItem):
"""Tree Widgets Item for Clusters elements."""
def __init__(self, detail, parent=None):
"""Initialize Cluster item."""
super(ECSClusterTreeItem, self).__init__(
name=detail["clusterName"],
identifier=detail["clusterArn"],
detail_type="cluster",
detail=detail,
parent=parent,
)
def refresh_children(self):
"""Refresh cluster children with available options."""
self.clear_children()
self.addChild(ECSListTasksClusterTreeItem(self.detail, self))
self.addChild(ECSListServicesClusterTreeItem(self.detail, self))
self.addChild(ECSListContainersClusterTreeItem(self.detail, self))
def get_attributes(self):
"""Get a list of basic attributes for cluters."""
return [
("Name", self.name),
("Identifier", self.identifier),
("Detail Type", self.detail_type),
("Status", self.detail["status"]),
("Active Services", self.detail["activeServicesCount"]),
("Running Tasks", self.detail["runningTasksCount"]),
("Pending Tasks", self.detail["pendingTasksCount"]),
("Containers", self.detail["registeredContainerInstancesCount"]),
]
class ECSListServicesClusterTreeItem(ECSTreeItem):
"""Tree Widgets Item for List of Services of a cluster."""
def __init__(self, detail, parent=None):
"""Initilize info to get a list of services for a cluster."""
super(ECSListServicesClusterTreeItem, self).__init__(
name=f"Services on '{detail['clusterName']}'",
identifier=detail["clusterName"],
detail_type="list_services",
detail=detail,
parent=parent,
)
def refresh_children(self):
"""Get a list of services for a cluster."""
self.clear_children()
for service in ecs_data.get_services(self.identifier):
self.addChild(ECSServiceTreeItem(service, self))
class ECSListTasksClusterTreeItem(ECSTreeItem):
"""Tree Widgets Item for List of Tasks of a cluster."""
def __init__(self, detail, parent=None):
"""Initialize info to get a list of tasks for a cluster."""
super(ECSListTasksClusterTreeItem, self).__init__(
name=f"Tasks on '{detail['clusterName']}'",
identifier=detail["clusterName"],
detail_type="list_services",
detail=detail,
parent=parent,
)
def refresh_children(self):
"""Get a list of tasks for a cluster."""
self.clear_children()
for task in ecs_data.get_tasks_cluster(self.identifier):
self.addChild(ECSTaskTreeItem(task, self))
class ECSListContainersClusterTreeItem(ECSTreeItem):
"""Tree Widgets Item for List of Containers Instances of a cluster."""
def __init__(self, detail, parent=None):
"""Initialize info to get a list of container instances for a cluster."""
super(ECSListContainersClusterTreeItem, self).__init__(
name=f"Containers on '{detail['clusterName']}'",
identifier=detail["clusterName"],
detail_type="list_services",
detail=detail,
parent=parent,
)
def refresh_children(self):
"""Get a list of container instances for a cluster."""
self.clear_children()
for container in ecs_data.get_containers_instances(self.identifier):
self.addChild(ECSContainerTreeItem(container, self.identifier, self))
class ECSServiceTreeItem(ECSTreeItem):
"""Tree Widgets Item for Services elements."""
def __init__(self, detail, parent=None):
"""Contains information about a service."""
super(ECSServiceTreeItem, self).__init__(
name=detail["serviceName"],
identifier=detail["serviceArn"],
detail_type="service",
detail=detail,
parent=parent,
)
self.cluster_identifier = detail["clusterArn"]
def refresh_children(self):
"""Get a list of tasks for this service."""
self.clear_children()
for task in ecs_data.get_tasks_service(
self.cluster_identifier, self.identifier
):
self.addChild(ECSTaskTreeItem(task, self))
def get_attributes(self):
"""Detail attributes view for this service."""
deployment_config = self.detail["deploymentConfiguration"]
min_bracket = deployment_config["minimumHealthyPercent"]
max_bracket = deployment_config["maximumPercent"]
return [
("Name", self.name),
("Identifier", self.identifier),
("Detail Type", self.detail_type),
("Status", self.detail["status"]),
("Task Definition", self.detail["taskDefinition"]),
("Running", self.detail["runningCount"]),
("Pending", self.detail["pendingCount"]),
("Desired", self.detail["desiredCount"]),
(
"Redeployment bracket",
"Min: " + str(min_bracket) + "%, Max: " + str(max_bracket) + "%",
),
]
def get_context_menu(self, menu):
"""Context menu for a service item."""
menu.addAction("Show Events", self.command_service_show_events)
menu.addAction("Force Restart Service", self.command_service_restart)
super(ECSServiceTreeItem, self).get_context_menu(menu)
def command_service_restart(self):
"""Send a restart command for this service."""
self.treeWidget().command_service_restart(self)
def command_service_show_events(self):
"""Open a list of events produced in this services."""
self.treeWidget().command_service_show_events(self)
class ECSTaskTreeItem(ECSTreeItem):
"""Tree Widgets Item for Tasks elements."""
def __init__(self, detail, parent=None):
"""Contains information about a task."""
super(ECSTaskTreeItem, self).__init__(
name=detail["name"],
identifier=detail["taskArn"],
detail_type="task",
detail=detail,
parent=parent,
)
self.cluster_identifier = self.detail["clusterArn"]
def refresh_children(self):
"""Get a list of docker containers of this task."""
self.clear_children()
for task in ecs_data.get_containers_tasks(
self.cluster_identifier, self.identifier
):
self.addChild(
ECSDockerContainerTreeItem(
task, self.identifier, self.cluster_identifier, self
)
)
def get_attributes(self):
"""Get a list of attributes for detail view."""
return [
("Name", self.name),
("Identifier", self.identifier),
("Detail Type", self.detail_type),
("Cluster Arn", self.detail["clusterArn"]),
("Status", self.detail["lastStatus"]),
("Desired Status", self.detail["desiredStatus"]),
("EC2 Instance", self.detail["ec2InstanceId"]),
("Availability Zone", self.detail["availabilityZone"]),
("Connectivity", self.detail.get("connectivity", "")),
("Task Definition", self.detail["taskDefinitionArn"]),
(
"Container Instance ID",
self.detail["containerInstanceArn"].split("/", 1)[1],
),
("N. Docker images", len(self.detail["containers"])),
("Networks", "\n".join(self.detail["networks"])),
("Connectivity Time", self.detail["connectivityAt"]),
]
def get_context_menu(self, menu):
"""Context menu for this task item."""
menu.addAction("Stop Task", self.command_task_stop)
menu.addAction("SSH Instance Container", self.command_container_ssh)
menu.addAction("Docker Log (First Task)", self.command_task_log)
super(ECSTaskTreeItem, self).get_context_menu(menu)
def command_task_stop(self):
"""Send stop command to ECS for this task."""
self.treeWidget().command_task_stop(self)
def command_container_ssh(self):
"""Access to the continer instance of this tasks through ssh."""
self.treeWidget().command_container_ssh(self)
def command_task_log(self):
"""Show log info for the first docker container of this tasks."""
self.treeWidget().command_task_log(self)
class ECSContainerTreeItem(ECSTreeItem):
"""Tree Widgets Item for Container Instances elements."""
def __init__(self, detail, cluster_identifier, parent=None):
"""Contains information about container instances."""
super(ECSContainerTreeItem, self).__init__(
name=detail["ec2InstanceId"],
identifier=detail["containerInstanceArn"],
detail_type="container",
detail=detail,
parent=parent,
)
self.cluster_identifier = cluster_identifier
def refresh_children(self):
"""Get a list of tasks of this container instance."""
self.clear_children()
for task in ecs_data.get_tasks_container_instance(
self.cluster_identifier, self.identifier
):
self.addChild(ECSTaskTreeItem(task, self))
def get_attributes(self):
"""Get a list of attributes for detail view."""
return [
("Name", self.name),
("Identifier", self.identifier),
("Detail Type", self.detail_type),
("Status", self.detail["status"]),
("EC2 Instance Id", self.detail["ec2InstanceId"]),
("Running Tasks", self.detail["runningTasksCount"]),
("Pending Tasks", self.detail["pendingTasksCount"]),
("AMI Id", self.detail["ami_id"]),
("Instance Type", self.detail["instance_type"]),
("Availability Zone", self.detail["availability_zone"]),
(
"Memory",
"Available: "
+ str(self.detail["available_memory"])
+ " Total: "
+ str(self.detail["total_memory"]),
),
(
"CPU",
"Available: "
+ str(self.detail["available_cpu"])
+ " Total: "
+ str(self.detail["total_cpu"]),
),
("Taken ports", self.detail["taken_ports"]),
]
def get_context_menu(self, menu):
"""Context menu for container instances."""
menu.addAction("SSH Instance Container", self.command_container_ssh)
super(ECSContainerTreeItem, self).get_context_menu(menu)
def command_container_ssh(self):
"""Access to this container instance through ssh."""
self.treeWidget().command_container_ssh(self)
class ECSDockerContainerTreeItem(ECSTreeItem):
"""Tree Widgets Item for Docker Containers elements."""
def __init__(self, detail, task_identifier, cluster_identifier, parent=None):
"""Contains information about docker container."""
super(ECSDockerContainerTreeItem, self).__init__(
name=detail["name"],
identifier=detail["containerArn"],
detail_type="docker_container",
detail=detail,
parent=parent,
)
self.cluster_identifier = cluster_identifier
self.task_identifier = task_identifier
# def refresh_children(self):
# self.clear_children()
# for task in ecs_data.get_tasks_container_instance(self.cluster_identifier, self.identifier):
# self.addChild(ECSTaskTreeItem(task, self))
def get_attributes(self):
"""Get a list of attributes for detail view."""
return [
("Name", self.name),
("Identifier", self.identifier),
("Detail Type", self.detail_type),
("Container Arn", self.detail["containerArn"]),
("Status", self.detail["lastStatus"]),
("Health Status", self.detail["healthStatus"]),
("Docker id", self.detail["runtimeId"]),
("Docker Image", self.detail["image"]),
("CPU", self.detail["cpu"]),
# ('Memory Reservation', self.detail['memoryReservation']),
("Instance ID", self.detail["ec2InstanceId"]),
("Networks", self.detail["networks"]),
]
def get_context_menu(self, menu):
"""Context menu for docker containers."""
menu.addAction("SSH Instance Container", self.command_container_ssh)
menu.addAction("Docker log", self.command_docker_log)
menu.addAction("Docker exec", self.command_docker_exec)
super(ECSDockerContainerTreeItem, self).get_context_menu(menu)
def command_container_ssh(self):
"""Access to container instances server which contains this docker container."""
self.treeWidget().command_container_ssh(self)
def command_docker_log(self):
"""Show log information of this docker container."""
self.treeWidget().command_docker_log(self)
def command_docker_exec(self):
"""Exec a command on this docker container."""
self.treeWidget().command_docker_exec(self)
class ECSElementsTreeWidget(QtWidgets.QTreeWidget):
"""Tree Widgets for all ECS elements."""
sig_status_changed = QtCore.pyqtSignal(str)
sig_command_show_detail = QtCore.pyqtSignal(QtWidgets.QTreeWidgetItem)
sig_command_service_restart = QtCore.pyqtSignal(QtWidgets.QTreeWidgetItem)
sig_command_service_show_events = QtCore.pyqtSignal(QtWidgets.QTreeWidgetItem)
sig_command_task_stop = QtCore.pyqtSignal(QtWidgets.QTreeWidgetItem)
sig_command_task_log = QtCore.pyqtSignal(QtWidgets.QTreeWidgetItem)
sig_command_container_ssh = QtCore.pyqtSignal(QtWidgets.QTreeWidgetItem)
sig_command_docker_log = QtCore.pyqtSignal(QtWidgets.QTreeWidgetItem)
sig_command_docker_exec = QtCore.pyqtSignal(QtWidgets.QTreeWidgetItem)
def __init__(self, parent=None):
"""Initialize tree widgets."""
super(ECSElementsTreeWidget, self).__init__(parent)
self.reload_cluster_info()
self.currentItemChanged.connect(
lambda item: self.show_status_on_selection(item)
)
self.itemActivated.connect(lambda item: item.refresh_children())
self.itemDoubleClicked.connect(lambda item: item.refresh_children())
def contextMenuEvent(self, event):
"""Open context menu depends on item options."""
item = self.itemAt(event.pos())
if item:
menu = QtWidgets.QMenu(self)
item.get_context_menu(menu)
menu.exec_(event.globalPos())
else:
super(ECSElementsTreeWidget, self).contextMenuEvent(event)
def keyPressEvent(self, e):
"""Process key pressed on this widget."""
# if e.key() == QtCore.Qt.Key_Escape:
# self.close()
super(ECSElementsTreeWidget, self).keyPressEvent(e)
@QtCore.pyqtSlot()
def reload_cluster_info(self):
"""Reload a list of clusters."""
self.clear()
for cluster in ecs_data.get_clusters():
self.addTopLevelItem(ECSClusterTreeItem(cluster))
def show_status_on_selection(self, item):
"""Update status bar with item info."""
if item:
self.sig_status_changed.emit(f"Selecting {item.name}: {item.identifier}")
def command_show_detail(self, item):
"""Show attributes on detail view widget."""
if item and isinstance(item, ECSTreeItem):
self.sig_status_changed.emit(
f"Show details for {item.name}: {item.identifier}"
)
self.sig_command_show_detail.emit(item)
def command_service_restart(self, item):
"""Restart a service command."""
if item and isinstance(item, ECSTreeItem):
self.sig_status_changed.emit(
f"Restarting service {item.name}: {item.identifier}"
)
self.sig_command_service_restart.emit(item)
def command_task_stop(self, item):
"""Stop a task command."""
if item and isinstance(item, ECSTreeItem):
self.sig_status_changed.emit(
f"Stopping task {item.name}: {item.identifier}"
)
self.sig_command_task_stop.emit(item)
def command_task_log(self, item):
"""Show log of a task."""
if item and isinstance(item, ECSTreeItem):
self.sig_status_changed.emit(f"Log for task {item.name}: {item.identifier}")
self.sig_command_task_log.emit(item)
def command_container_ssh(self, item):
"""Access to container instance."""
if item and isinstance(item, ECSTreeItem):
self.sig_status_changed.emit(
f"Access to ssh for {item.name}: {item.identifier}"
)
self.sig_command_container_ssh.emit(item)
def command_docker_log(self, item):
"""Show log of docker container."""
if item and isinstance(item, ECSTreeItem):
self.sig_status_changed.emit(
f"Docker log for {item.name}: {item.identifier}"
)
self.sig_command_docker_log.emit(item)
def command_docker_exec(self, item):
"""Execute command on docker container."""
if item and isinstance(item, ECSTreeItem):
self.sig_status_changed.emit(
f"Docker exec for {item.name}: {item.identifier}"
)
self.sig_command_docker_exec.emit(item)
def command_service_show_events(self, item):
"""Show events of a service."""
if item and isinstance(item, ECSTreeItem):
self.sig_status_changed.emit(
f"Show Events for {item.name}: {item.identifier}"
)
self.sig_command_service_show_events.emit(item)
# @QtCore.pyqtSignal(str)
# def operation_status(self, message):
# self.emit
class ECSAttributesTreeWidget(QtWidgets.QTreeWidget):
"""Widget to show item attributes."""
sig_status_changed = QtCore.pyqtSignal(str)
def __init__(self, parent=None):
"""Initialize attributes view."""
super(ECSAttributesTreeWidget, self).__init__(parent)
| |
from tkinter import *
import time, mysql.connector,csv,os,subprocess
from PIL import Image
from ast import literal_eval
from mysql.connector import errorcode
from tkinter import messagebox
from tkinter.filedialog import asksaveasfilename
class window(Tk):
def __init__(self):
super().__init__()
# create sql connection obeject and testing connection
try:
with open("dbconn.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
self.username=str(row[0])
self.password=str(row[1])
self.host=str(row[2])
self.database=str(row[3])
self.tablename="medicalbill"
break
except:
messagebox.showerror("Error","Please configure database connection. Goto- Options->DB Connection")
try:
self.cnx = mysql.connector.connect(user=self.username, password=self.password, host=self.host, database=self.database)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
messagebox.showerror(title="Error",message="Invalid user name or Password")
print("Something is wrong with your user name or password. Goto: Options->DB Connection to setup connection")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
messagebox.showerror(title="Error",message="Database does not exsit. Goto: Options->DB Connection to setup connection")
print("Database does not exist")
else:
messagebox.showerror(title="Error",message="Connection Error. Goto: Options->DB Connection to setup connection")
print(err)
# else:
# self.cursor=self.cnx.cursor()
# self.cnx.close()
# initializing window
self.geometry("852x600+0+0")
self.minsize(852,600)
self.title("Medical Billing System")
self.config(bg="powder blue")
try:
self.iconbitmap("icon.ico")
except:
pass
def commit_db(self,query):
self.cnx.database = self.database
cursor=self.cnx.cursor()
cursor.execute(query)
self.cnx.commit()
def statusbar(self):
'''
To show status at buttom of GUI
'''
self.status=StringVar()
self.status.set("Ready")
self.sbar=Label(self,textvariable=self.status,relief=SUNKEN,anchor="w")
self.sbar.pack(side=BOTTOM,fill=X)
def update_status(self,state="Ready",freeze=0):
'''
To update status of GUI
'''
self.status.set(state)
self.sbar.update()
if freeze>0:
time.sleep(freeze)
def create_button(self,master=None,btntxt="Button",bg="sky blue",relief=RAISED,bd=6,funcname=None,side=None,padx=3,pady=3,anchor=None,ipadx=10,ipady=None,**kwargs):
'''
To Create a button
'''
kargs={}
for key,value in kwargs.items():
kargs.__setitem__(key,value)
btn=Button(master,text=btntxt,command=funcname,bg=bg,relief=relief,bd=bd,**kargs)
btn.pack(side=side,padx=padx,pady=pady,anchor=anchor,ipadx=ipadx,ipady=ipady)
def create_grid_label(self,master=None,text="unknown",bg=None,relief=SUNKEN,bd=None,padx=None,pady=None,ipady=None,ipadx=None,column=None,row=None,columnspan=None,rowspan=None,**kwargs):
'''
To Create a label
'''
kargs={}
for key,value in kwargs.items():
kargs.__setitem__(key,value)
label=Label(master,text=text,bg=bg,relief=relief,bd=bd,**kargs)
label.grid(ipadx=ipadx,ipady=ipady,columnspan=columnspan,rowspan=rowspan,row=row,column=column)
def create_grid_entry(self,master=None,bg=None,variable=None,relief=SUNKEN,bd=None,padx=None,pady=None,ipady=None,ipadx=None,column=None,row=None,columnspan=None,rowspan=None,**kwargs):
'''
To Create a entry
'''
kargs={}
for key,value in kwargs.items():
kargs.__setitem__(key,value)
entry=Entry(master,bg=bg,relief=relief,bd=bd,textvariable=variable,**kargs)
entry.grid(ipadx=ipadx,ipady=ipady,padx=padx,pady=pady,columnspan=columnspan,rowspan=rowspan,row=row,column=column)
def exit(self):
self.cnx.close() # Closing the connection before exit
self.destroy()
class conn_window(Frame):
def __init__(self):
Frame.__init__(self)
window1=Toplevel(self)
window1.title("Database connection wizard")
window1.geometry("500x150+100+100")
try:
window1.iconbitmap("dbconn.ico")
except:
pass
# methods
def updateconnfile(username,password,host,database):
with open("dbconn.csv","w") as csv_file:
file=csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file.writerow([username,password,host,database])
def updateConnectionData():
user=username.get()
passwd=password.get()
hst=host.get()
db=database.get()
try:
cnx = mysql.connector.connect(user=user, password=<PASSWORD>, host=hst, database=db)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
Label(frame1,text="Invalid user name or Password", bg="red").pack(side=TOP,pady=3)
elif err.errno == errorcode.ER_BAD_DB_ERROR:
Label(frame1,text="Database doesnot exit. Creating Database...",bg="red").pack(side=TOP,pady=3)
# intializing connection
cnx = mysql.connector.connect(user=user, password=<PASSWORD>, host=hst)
# creating cursor
cursor = cnx.cursor()
cursor.execute(f"CREATE DATABASE IF NOT EXISTS {db} DEFAULT CHARACTER SET 'utf8'")
# updating database details
cnx.database = db
Label(frame1,text=f"Success!!! Please restart the program to continue...",bg="green").pack(side=TOP,pady=3)
# initialing table
cursor.execute(f"CREATE TABLE `{db}`.`medicalbill` ( `srl_no` INT NOT NULL AUTO_INCREMENT,`invoice_no` INT NOT NULL , `customer` VARCHAR(50) NOT NULL , `address` VARCHAR(150) NOT NULL , `city` VARCHAR(50) NOT NULL , `state` VARCHAR(50) NOT NULL , `doctor` VARCHAR(50) NOT NULL , `purchage_data` TEXT NOT NULL , PRIMARY KEY (`srl_no`)) ENGINE = InnoDB")
# Intializing invoice number from 1001
# cursor.execute(f"ALTER TABLE medicalbill AUTO_INCREMENT=1001;")
cursor.close()
cnx.close()
else:
Label(frame1,text=f"Error{err}", bg="red").pack(side=TOP,pady=3)
else:
Label(frame1,text="Success!!! Restart the program.",bg="green").pack(side=TOP)
cnx.close()
updateconnfile(username=user,password=<PASSWORD>,host=hst,database=db)
username=StringVar()
password=StringVar()
host=StringVar()
database=StringVar()
try:
with open("dbconn.csv") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
username.set(str(row[0]))
password.set(str(row[1]))
host.set(str(row[2]))
database.set(str(row[3]))
break
except:
messagebox.showinfo("Info","please enter appropriate details to continue")
with open("dbconn.csv","w") as csv_file:
file=csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
file.writerow(["lipun","1234","127.0.0.1","invoice"])
window1.destroy()
new=conn_window()
frame=Frame(window1)
frame.pack(anchor="w",padx=10,pady=10)
Label(frame,text="User Name",font=font1).grid(row=0,column=0,padx=2,sticky="w")
Entry(frame,textvariable=username).grid(row=0,column=1,padx=2)
Label(frame,text="Password",font=font1).grid(row=1,column=0,padx=2,sticky="w")
Entry(frame,textvariable=password).grid(row=1,column=1,padx=2)
Label(frame,text="Host",font=font1).grid(row=0,column=2,padx=2,sticky="w")
Entry(frame,textvariable=host).grid(row=0,column=3,padx=2)
Label(frame,text="Database Name",font=font1).grid(row=1,column=2,padx=2,sticky="w")
Entry(frame,textvariable=database).grid(row=1,column=3,padx=2)
frame1=Frame(window1)
frame1.pack(anchor="w",padx=10,pady=10)
Button(frame1,text="update",font=font1,bg="skyblue",command=updateConnectionData).pack(padx=10,pady=5,side=LEFT)
Button(frame1,text="close",font=font1,bg="skyblue",command=window1.destroy).pack(side=LEFT,padx=10)
class print_window(Frame):
def __init__(self):
Frame.__init__(self)
window1=Toplevel(self)
# definations
def savedata():
if check_duplicate_invoice(invoice=inv_no.get()):
response=messagebox.askquestion("Question","Do you want to save DUPLICATE INVOICE?")
if response == "yes":
savePDF()
clear_entries()
else:
savePDF()
push_invoice()
def savePDF():
try:
self.canvas.postscript(file="tmp.ps",colormode='color')
file=asksaveasfilename(initialfile=f"{inv_no.get()}.pdf",defaultextension=".pdf",filetypes=[("All Files","*.*"),("PDF Documents","*.pdf")])
process = subprocess.Popen(["ps2pdf", "tmp.ps", file], shell=True)
process.wait()
except:
messagebox.showerror("Error","Install Ghost Script and add it's bin and lib file to system envronment.")
def printdata():
try:
self.canvas.postscript(file="tmp.ps",colormode='color')
img=Image.open("tmp.ps")
img.save("tmp.png")
os.startfile("tmp.png","print")
except:
messagebox.showerror("Error","Install Ghost Script and add it's bin and lib file to system envronment.")
def checkprintdata():
if check_duplicate_invoice(invoice=inv_no.get()):
response=messagebox.askquestion("Question","Do you want to print DUPLICATE INVOICE?")
if response == "yes":
printdata()
clear_entries()
else:
push_invoice()
printdata()
# Initializing Window
window1.geometry("650x620+100+20")
window1.minsize(650,600)
window1.configure(bg="gray20")
window1.title("Print")
# Trying to set icon
try:
window1.iconbitmap("print.ico")
except:
pass
# variables
invoice=inv_no.get()
customer=customer_name.get()
c_address= local_add.get()
c_city= city.get()
c_state= state.get()
doc= doctor.get()
purchage_data= get_table()
total= totalInvoiceValue()
# FONTS
fontlabel="Eras 9 bold"
fontinvoice= "Lucida 13 bold"
fontdata= "Lucida 13 normal"
fontdata1= "Lucida 8 bold"
fontdata2= "Lucida 8 normal"
# Creatting frame for displaying preview
frame1=Frame(window1)
frame1.pack(fill=BOTH)
# Frame for buttons
frame2=Frame(window1,bg="gray30")
frame2.pack(side=BOTTOM,fill=X)
Button(frame2,text="Print",font=font2,bd=3,relief=RAISED,command=checkprintdata).pack(side=LEFT,padx=10,pady=5)
Button(frame2,text="Save As PDF",font=font2,bd=3,relief=RAISED,command=savedata).pack(side=LEFT,padx=10,pady=5)
Button(frame2,text="Cancel",font=font2,bd=3,relief=RAISED, command=window1.destroy).pack(side=LEFT,padx=10,pady=5)
# creating casvas and ploting data
self.canvaswidth=600
self.canvasheight=550
self.canvas=Canvas(window1,height=self.canvasheight,width=self.canvaswidth,bd=1,relief=GROOVE)
self.canvas.create_text(300,25,font="Rockwell 17 bold",text="PAYMENT RECEIPT")
self.canvas.create_line(0,45,600,45,dash=(200,1))
self.canvas.create_text(10,70,font=fontlabel,text="INVOICE NUMBER:",anchor="w")
self.canvas.create_text(120,70,font=fontinvoice,text=invoice,anchor="w")
self.canvas.create_text(10,100,font=fontlabel,text="CUSTOMER NAME:",anchor="w")
self.canvas.create_text(120,100,font=fontdata,text=customer,anchor="w")
self.canvas.create_text(10,130,font=fontlabel,text="ADDRESS:",anchor="w")
self.canvas.create_text(73,130,font=fontdata,text=c_address,anchor="w")
self.canvas.create_text(392,130,font=fontlabel,text="CITY:",anchor="w")
self.canvas.create_text(425,130,font=fontdata,text=c_city,anchor="w")
self.canvas.create_text(380,160,font=fontlabel,text="STATE:",anchor="w")
self.canvas.create_text(425,160,font=fontdata,text=c_state,anchor="w")
self.canvas.create_text(10,160,font=fontlabel,text="REFERRED BY:",anchor="w")
self.canvas.create_text(93,160,font=fontdata,text=doc,anchor="w")
self.canvas.create_line(10,200,590,200) #table upper line
self.canvas.create_line(10,400,590,400) #table bottom line
self.canvas.create_line(10,200,10,400) #table left line
self.canvas.create_line(590,200,590,400)#table right line
self.canvas.create_line(50,200,50,400,dash=(4,1))#table column1
self.canvas.create_line(340,200,340,400,dash=(4,1))#table column2
self.canvas.create_line(415,200,415,400,dash=(4,1))#table column3
self.canvas.create_line(490,200,490,400,dash=(4,1))#table column4
#self.canvas.create_line(10,230,590,230,dash=(4,1)) #table row1
ycord=230
a=0
while a<10:
self.canvas.create_line(10,ycord,590,ycord,dash=(4,1))
ycord+=17
a+=1
self.canvas.create_text(20,210,font=fontdata1,text="Srl.",anchor="w")
self.canvas.create_text(70,210,font=fontdata1,text="MEDICINE NAME",anchor="w")
self.canvas.create_text(365,210,font=fontdata1,text="QTY",anchor="w")
self.canvas.create_text(425,210,font=fontdata1,text="UNIT PRICE",anchor="w")
self.canvas.create_text(505,210,font=fontdata1,text="TOTAL PRICE",anchor="w")
self.canvas.create_text(370,415,font=fontdata,text="GRAND TOTAL:",anchor="w")
self.canvas.create_text(500,415,font=fontdata,text="₹"+total,anchor="w")
# self.canvas.create_text(25,233,font=fontdata1,text="1",anchor="w")
ycord=238
srl=1
for srl in range(1,11):
self.canvas.create_text(25,ycord,font=fontdata1,text=str(srl),anchor="w")
ycord+=17
srl+=1
ycord=238
xcord=[60,345,420,500]
for row in range(10):
for column in range(4):
self.canvas.create_text(xcord[column],ycord,font=fontdata2,text=purchage_data[row][column],anchor="w")
ycord+=17
self.canvas.create_text(450,460,font=fontlabel,text="SIGNATURE",anchor="w")
self.canvas.create_line(400,450,570,450)
self.canvas.create_text(300,490,font="Bookman 14 bold",text="JENA MEDICAL STORE")
self.canvas.create_text(300,510,font="Arial 11 normal",text="25/10 New market (Near SBI ATM), Choota Govindpur, Jamshedpur, pin-831010")
self.canvas.create_text(300,525,font="Arial 8 normal",text="Busniess timing: 8AM - 8PM (online delivery abailabe on phone. Call 999999999 for enquiry)")
self.canvas.create_text(300,542,font="Arial 8 normal",text="Tel- 0657 669 6652 Mobile- 999999999 / 88888888888")
self.canvas.pack(padx=10,pady=10)
# Gui Variables for setting widets
font1="Arial 12 normal"
font2="Arial 13 normal"
theme="powder blue"
labelwidth=15
# Main Program
if __name__ == "__main__":
# Definations here
def totalInvoiceValue():
'''Retuns total invoice value'''
table=get_table()
total="0"
for row in range(10):
for column in range(3,4):
# total+=int(table[row][column])
val=table[row][column]
if val=="":
total+="+0"
else:
total+="+"+val
# Evaluating concatinated string data eg. "1+2+3+4" will be evaluated as 10.
total=eval(total)
return str(total)
def calTotal(event):
'''Multiply qty with price and return value'''
# Requsting Table
table= get_table()
# Extracting qty and unit price information from table
# Making list of tupples (qty,price) format
data=[]
for val in range(10):
data.append((table[val][1],table[val][2]))
# calculating total
total=[]
for qty,price in data:
if qty=="" or price=="":
total.append("")
else:
total.append(float(qty)*float(price))
# Displaying total on screen
for row in range(1,11):
for column in range(4,5):
index=(row,column)
entry[index].delete(0,END)
entry[index].insert(0,str(total[row-1]))
def clear_entries(row=10,column=4):
'''Clear Previous entries'''
rows=row+1
columns=column+1
customer_name.set("")
local_add.set("")
doctor.set("Dr. ")
for row in range(1,rows):
for column in range(1,columns):
index = (row, column)
entry[index].delete(0,'end')
def encodeList(list):
'''Encoding algorith of list for database entry'''
string=str(list)
prepared_data=""
for char in string:
if char=="\'":
prepared_data += "\\'"
elif char==",":
prepared_data += "\\,"
else:
prepared_data += char
return prepared_data
def decodeList(str):
'''decoding algorithm for encoded list'''
list=str.replace("\\","")
list=literal_eval(list) #convering string to list
return list
def plot_invoice():
'''Recreate invoice from invoice number. It connect to data base and fetch respective data from database.'''
try:
inv=inv_no.get()
cursor= root.cnx.cursor()
query=f"SELECT * FROM `medicalbill` where invoice_no={inv}"
cursor.execute(query)
row=cursor.fetchall()
if row:
clear_entries()
cursor.execute(query)
for(srl_no,invoice_no,customer,address,cty,ste,doc,purchage_data) in cursor:
customer_name.set(customer)
local_add.set(address)
city.set(cty)
state.set(ste)
doctor.set(doc)
decoded_list=decodeList(purchage_data)
set_table(decoded_list)
cursor.close()
else:
messagebox.showerror("Error","No Entry Found on database!!!")
cursor.close()
except:
messagebox.showerror("Error","Please Enter a Value. If value entered, Please check database connection.")
def set_invoice_no():
inv=get_invoice_no()
inv_no.set(str(inv))
frame1.update()
def get_invoice_no():
'''It coonect to database then fetch the last invoice number and return next invoice number'''
try:
cursor= root.cnx.cursor()
query="SELECT * FROM `medicalbill` ORDER BY `invoice_no` DESC"
cursor.execute(query)
list= cursor.fetchall() #retuns a list of tupples(each result in a tuple).
if not list:
lastinv="1001"
else:
l=1
for row in list:
if l==1:
inv=row[1]
l+=1
else:
continue
lastinv=inv+1
cursor.close()
except:
cursor.close()
messagebox.showerror("Error","Internal Error")
return lastinv
def create_invoice():
'''Create a invoice without printing'''
try:
push_invoice()
except:
messagebox.showerror("error","Internal Error")
else:
set_invoice_no()
def check_duplicate_invoice(invoice):
cursor= root.cnx.cursor()
query=f"SELECT * FROM `medicalbill` WHERE invoice_no={invoice}"
cursor.execute(query)
row= cursor.fetchall()
cursor.close()
if row:
return True #duplicate exist
else:
return False
def validate_form():
'''Form Validation'''
invoice= inv_no.get()
customer=customer_name.get()
table= get_table()
count=1
if invoice=="":
messagebox.showwarning("Information","Invoice Number Required!")
return False
elif customer=="":
messagebox.showwarning("Information","Customer Name Required!")
return False
elif table[0][0]=="":
messagebox.showwarning("Information","Atleast 1 Item Required!")
return False
elif count==1:
rowcount=1
for row in table:
if row[0] != "":
if row[1]=="" or row[2]=="" or row[3]=="":
messagebox.showwarning("Information",f"Fill all details in Row No-{rowcount}")
return False
rowcount+=1
return True
def push_invoice():
'''It push invoice data entered by user to database'''
root.update_status("Pushing data to database...")
invoice= get_invoice_no()
if check_duplicate_invoice(invoice):
messagebox.showerror("Error","Duplicate Entry Found")
elif validate_form():
customer=customer_name.get()
purchage_data= get_table()
c_address= local_add.get()
c_city= city.get()
c_state= state.get()
doc= doctor.get()
#encoding list data to insert into db
prepared_data= encodeList(purchage_data)
try:
root.commit_db(query=f"INSERT INTO `{root.database}`.`{root.tablename}` (`invoice_no`,`customer`, `address`, `city`, `state`, `doctor`, `purchage_data`) VALUES ('{invoice}','{customer}', '{c_address}', '{c_city}', '{c_state}', '{doc}', '{prepared_data}')")
messagebox.showinfo("Information","Invoice Created")
except:
messagebox.showerror("Error","Error | |
<filename>AnalyzeControl.py
#!/usr/bin/env python
"""
Tencent is pleased to support the open source community by making HaboMalHunter available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in
compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the specific language governing permissions
and limitations under the License.
"""
"""
Author:
Date: August 08, 2016
Description: Linux Malware Analysis System
"""
"""
usage: AnalyzeControl.py [-h] [-v] [-t] [-z] [-i TIME_LIMIT_DYNAMIC] [-s] [-c]
[-e CONFIG_PATH] -l TARGET
Linux Malware Analysis System
optional arguments:
-h, --help show this help message and exit
-v, --verbose Display debug messages
-t, --test Only init and output Tag files
-z, --zip Indicate the target is a compressed package
-i TIME_LIMIT_DYNAMIC, --time_limit_dynamic TIME_LIMIT_DYNAMIC
Set the timeout limitation (seconds) for dynamic
analysis
-s, --static_only Only static analysis
-c, --clean Clean the workspace
-e CONFIG_PATH, --config_path CONFIG_PATH
Set the configuration path
-l TARGET, --target TARGET
Set the absolute path of the target
"""
import sys
import os
import stat
import hashlib
import subprocess
import argparse
import ConfigParser
import logging
import logging.handlers
import shutil
import tempfile
import json
# Customised Package
import static
import dynamic
import metrics
import base
TIME_LIMIT_DYNAMIC_DEF = 60 # 60 seconds for dynamic analysis timeout
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__)) # current_file dir
BASE_HOME = PROJECT_DIR
SECTION_DEF = 'main'
CONFIG_PATH_DEF = os.path.join(PROJECT_DIR, 'config.ini')
ENV_PREFIX = 'HABO_'
LOG_FMT = "%(asctime)s [%(filename)s:%(lineno)d %(funcName)s] %(levelname)s: %(message)s"
# global variables
log = logging.getLogger()
class Config_OBJ:
def __init__(self, **entries):
self.__dict__.update(entries)
def strTobool(value):
true_list = ["yes", "y", "true", "t", "1"]
false_list = ["no", "n", "false", "f", "0", "0.0", "", "none"]
v = str(value).lower()
if v in true_list:
return True
if v in false_list:
return False
return False
def init_cfg(cfg_path, args):
"""
The priority level:
define > args > env > ini
"""
cfg = {}
# file
conf_parser = ConfigParser.ConfigParser()
if os.path.exists(cfg_path):
conf_parser.read(cfg_path)
#print(conf_parser.items('main'))
for k,v in conf_parser.items(SECTION_DEF):
cfg[k]=v
# env
for k,v in os.environ.items():
if k.startswith(ENV_PREFIX):
cfg_key = k[len(ENV_PREFIX):]
cfg[cfg_key]=v
# args
args_dict = vars(args)
for k,v in args_dict.iteritems():
cfg[k]=v
# define
cfg["BASE_HOME"] = BASE_HOME
# adjustment
file_log_dir = os.path.join(BASE_HOME, cfg["log_dir"])
cfg["file_log_dir"] = file_log_dir
cfg["static_finished_fname"] = os.path.join(file_log_dir,cfg["static_finished_fname"])
cfg["dynamic_finished_fname"] = os.path.join(file_log_dir,cfg["dynamic_finished_fname"])
# in order to using like cfg.log_dir
cfg = Config_OBJ(**cfg)
# force verbose
cfg.verbose=True
# convert string to int
cfg.time_limit_dynamic = int(cfg.time_limit_dynamic)
cfg.strings_limit = int(cfg.strings_limit)
cfg.decompress_limit = int(cfg.decompress_limit)
cfg.tcpdump_limit = int(cfg.tcpdump_limit)
cfg.sysdig_limit = int(cfg.sysdig_limit)
cfg.trace_limit = int(cfg.trace_limit)
# convert string to bool
cfg.is_inplace = strTobool(cfg.is_inplace)
cfg.enable_inetsim = strTobool(cfg.enable_inetsim)
cfg.enable_prefix_remove = strTobool(cfg.enable_prefix_remove)
cfg.enable_mem_analysis = strTobool(cfg.enable_mem_analysis)
return cfg
def init_arguments(argv):
parser = argparse.ArgumentParser(description='Linux Malware Analysis System')
parser.add_argument('-v', '--verbose', help='Display debug messages', action='store_true', required=False)
parser.add_argument('-t', '--test', help='Only init and output Tag files', action='store_true', required=False)
parser.add_argument('-z', '--zip', help='Indicate the target is a compressed package', action='store_true', required=False)
#please config time out in ini file
#parser.add_argument('-i', '--time_limit_dynamic', help='Set the timeout limitation (seconds) for dynamic analysis', type=int, default=TIME_LIMIT_DYNAMIC_DEF, required=False)
parser.add_argument('-s', '--static_only', help='Only static analysis', action='store_true', required=False)
parser.add_argument('-c', '--clean', help='Clean the workspace', action='store_true', required=False)
parser.add_argument('-e', '--config_path', help='Set the configuration path', type=str, default=CONFIG_PATH_DEF, required=False)
# required arg
parser.add_argument('-l', '--target', help='Set the absolute path of the target', type=str, required=True)
args = parser.parse_args()
# cd into base_home
os.chdir(BASE_HOME)
# configuration init
# I need change dir before pasering the configuration.
cfg = init_cfg(args.config_path,args)
return cfg
def init_log(cfg):
# dir
file_log_dir = cfg.file_log_dir
if not os.path.exists(file_log_dir):
os.mkdir(file_log_dir)
fmt = logging.Formatter(LOG_FMT)
# file
file_log_path = os.path.join(BASE_HOME, cfg.log_dir, cfg.file_log)
file_handler = logging.handlers.WatchedFileHandler(file_log_path)
file_handler.setFormatter(fmt)
log.addHandler(file_handler)
# Console
console_handler = logging.StreamHandler()
console_handler.setFormatter(fmt)
log.addHandler(console_handler)
if cfg.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.ERROR)
log.info("Linux Malware Analysis System. version:%s", cfg.version)
log.debug("Configuration: ")
log.debug(vars(cfg))
# LD_DEBUG
cfg.ld_debug_log_abs = os.path.join(file_log_dir, cfg.ld_debug_log)
# inetsim dir
if not os.path.exists(cfg.inetsim_log_dir):
os.mkdir(cfg.inetsim_log_dir)
if not os.path.exists(cfg.inetsim_log_report_dir):
os.mkdir(cfg.inetsim_log_report_dir)
def init_workspace_inplace(cfg):
workspace_dir = os.path.dirname(cfg.target)
workspace_dir = os.path.abspath(workspace_dir)
cfg.workspace_dir = workspace_dir
if os.path.exists(cfg.target):
target_abs_path = os.path.abspath(cfg.target)
cfg.target_abs_path = target_abs_path
os.chmod(target_abs_path, stat.S_IRUSR)
else:
log.critical("%s dose not exist.", cfg.target)
os._exit(1)
log.info("Target absolute path: %s", cfg.target_abs_path)
def init_workspace(cfg, is_inplace=False):
log.info("init workspace is_inplace:%r",is_inplace)
if is_inplace:
init_workspace_inplace(cfg)
else:
# create a clean workspace
workspace_dir = os.path.join(BASE_HOME,cfg.exec_home)
if not os.path.exists(workspace_dir):
os.mkdir(workspace_dir)
else:
shutil.rmtree(workspace_dir)
os.mkdir(workspace_dir)
cfg.workspace_dir = workspace_dir
# copy target file into workspace
if os.path.exists(cfg.target):
shutil.copy(cfg.target, workspace_dir)
target_abs_path = os.path.join(workspace_dir, os.path.basename(cfg.target))
cfg.target_abs_path = target_abs_path
os.chmod(target_abs_path, stat.S_IRUSR)
else:
log.critical("%s dose not exist.", cfg.target)
os._exit(1)
log.info("Target absolute path: %s", cfg.target_abs_path)
# I can not change dir at here, the dir will be changed everytime when analyzer starts.
def get_filetype(file_path):
if os.path.exists(file_path):
output = subprocess.check_output(['/usr/bin/file', file_path])
parts = output.split(":")
file_type = "UNKNOWN"
full_info = ""
if len(parts) > 1:
full_info = parts[1].strip()
detailed_parts = parts[1].split()
if len(detailed_parts) > 1:
file_type = detailed_parts[0].strip()
log.debug("file_type: %s, full_info: %s",file_type, full_info)
return (file_type, full_info)
else:
log.critical("%s dose not exist.", file_path)
return ("UNKNOWN","")
def is_compressed(file_path):
(file_type, full_info) = get_filetype(file_path)
compressed_type_list = ['7-zip', 'bzip2', 'gzip', 'XZ', 'Zip']
ret = False
if file_type in compressed_type_list:
ret = True
elif -1 != full_info.find("tar"):
ret = True
log.info("file %s compressed: %r",file_path,ret)
return ret
def is_executable(file_path):
(file_type, full_info) = get_filetype(file_path)
# until now, ELF is only supported.
exec_type_list = ['ELF']
ret = False
if file_type in exec_type_list:
if -1 != full_info.find("executable"):
ret = True
log.info("file %s executable: %r",file_path, ret)
return ret
def do_work(cfg, do_static, do_dynamic):
# since dynamic analyzer needs info from static analyzer
if do_dynamic:
do_static = True
log.info("set do_static True, since dynamic analyzer needs info from static analyzer")
if do_static:
# static analysis
log.info("will do_static analysis")
static_analyzer = static.StaticAnalyzer(cfg)
static_analyzer.start()
static_analyzer.output('json')
static_analyzer.end()
if (cfg.target == cfg.main_target):
cfg.main_target_md5 = static_analyzer.info["hash_md5"]
log.info("main target md5: %s", cfg.main_target_md5)
# dynamic analysis
# check whether executable
if do_dynamic:
if is_executable(cfg.target):
cfg.is_executable = True
else:
cfg.is_executable = False
log.info("The target %s is not executable. do_dynamic: %r",cfg.target,do_dynamic)
dynamic_analyzer = dynamic.DynamicAnalyzer(cfg)
# dynamic analyzer will need info from static analyzer, such as md5
dynamic_analyzer.info = static_analyzer.info
dynamic_analyzer.start()
dynamic_analyzer.output('json')
dynamic_analyzer.end()
else:
log.info("skip dynamic analysis since do_dynamic is False")
def de_compress_top_lev(file_path):
"""
1. create dir with the format {file_path}_7zdump.
2. only decompress the top level.
"""
bfs_list = [file_path]
temp_list = []
head = file_path
if is_compressed(head):
tmp_dir = head+"_7zdump"
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
log.info("top_lev decompress dir: %s",tmp_dir)
temp_list.append(tmp_dir)
output = ""
try:
cmd_list = ["/usr/bin/7z","x","-y","-o"+tmp_dir,head]
log.info("call 7z command: %s",str(cmd_list))
output = subprocess.check_output(cmd_list)
except subprocess.CalledProcessError as e:
log.error("CalledProcessError: %s",str(e))
output = e.output
for root, dirs, files in os.walk(tmp_dir):
for item in files:
f = os.path.join(root,item)
f = os.path.abspath(f)
# make sure any file will be enqueue only once
if (not f in bfs_list):
#log.debug("en queue f: %s, queue:%s",f,str(queue))
bfs_list.append(f)
return (bfs_list, temp_list)
def de_compress(file_path, is_inplace=False, decompress_limit=100):
"""
It will decompress the target as bfs order.
"""
queue = [file_path]
bfs_list = []
temp_list = []
pop_cnt=0
while len(queue):
head = queue.pop(0)
bfs_list.append(head)
pop_cnt=pop_cnt+1
if pop_cnt >= decompress_limit:
log.info("pop_cnt:%d, limit:%d ,break",pop_cnt,decompress_limit)
break
if is_compressed(head):
if is_inplace:
tmp_dir = os.path.dirname(head)
tmp_dir = os.path.abspath(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp()
log.info("is_inplace %r decompress dir: %s",is_inplace,tmp_dir)
temp_list.append(tmp_dir)
output = ""
try:
cmd_list = ["/usr/bin/7z","x","-y","-o"+tmp_dir,head]
log.info("call 7z command: %s",str(cmd_list))
output = subprocess.check_output(cmd_list)
except subprocess.CalledProcessError as e:
log.error("CalledProcessError: %s",str(e))
output = e.output
for root, dirs, files in os.walk(tmp_dir):
for item in files:
f = os.path.join(root,item)
f = os.path.abspath(f)
# make sure any file will be enqueue only once
if (not os.path.samefile(head,f)) and (not f in queue) and (not f in bfs_list):
#log.debug("en queue f: %s, queue:%s",f,str(queue))
queue.append(f)
return (bfs_list, temp_list)
def clean_temp(temp_list):
for d in temp_list:
log.info("clean dir: %s",d)
shutil.rmtree(d, True)
def init_target_loader(cfg):
file_path_32 = cfg.target_loader+".32.elf";
file_path_64 = cfg.target_loader+".64.elf";
if os.path.exists(file_path_32) and os.path.exists(file_path_64):
log.info("target loader: %s, %s",file_path_32,file_path_64)
# chmod 0777 targeT_loader
os.chmod(file_path_64, stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO)
os.chmod(file_path_32, stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO)
cfg.target_loader_64 = file_path_64
cfg.target_loader_32 = file_path_32
else:
log.critical("failed to locate target loader: %s,%s",file_path_32,file_path_64)
os._exit(2)
def generate_tag_file(cfg, do_static, do_dynamic):
if do_static:
base.BaseAnalyzer.touchFile(cfg.static_finished_fname)
log.info("static tag file: %s is generated.", cfg.static_finished_fname)
if do_dynamic:
base.BaseAnalyzer.touchFile(cfg.dynamic_finished_fname)
log.info("dynamic tag file: %s is generated.", cfg.dynamic_finished_fname)
def combine_static_perfile(cfg):
main_log_path = os.path.join(cfg.file_log_dir,cfg.main_target_md5+".static")
log_dir = cfg.file_log_dir
if os.path.exists(main_log_path):
fi = open(main_log_path,"rb")
main_info = json.load(fi)
fi.close()
main_info["SubBaseInfo"]=[]
for item in cfg.bfs_list:
if os.path.isfile(item):
file_md5 = base.BaseAnalyzer.get_md5_by_fname(item)
node = os.path.join(cfg.file_log_dir,file_md5+".static")
log.info("combine %s, md5:%s, node:%s",item, file_md5, node)
if os.path.exists(node) and node!=main_log_path:
sub_f = open(node,"rb")
sub_info = json.load(sub_f)
sub_f.close()
if len(sub_info["BaseInfo"])>0:
node = sub_info["BaseInfo"][0]
# fix Name info
if cfg.enable_prefix_remove:
node["Name"] = base.BaseAnalyzer.prefix_remove(item)
else:
node["Name"] = item
# fix "__full_path"
node["__full_path"] = item
node["ID"] = metrics.S_ID_SUB_BASE_INFO
main_info["SubBaseInfo"].append(sub_info["BaseInfo"][0])
fo = open(main_log_path,"wb")
json.dump(main_info, fo, indent=4, sort_keys=False)
fo.close()
log.info("main static log updated %s",main_log_path)
else:
log.error("main log file: %s is missing",main_log_path)
def combine_static_log(cfg):
main_log_path = os.path.join(cfg.file_log_dir,cfg.main_target_md5+".static")
log_dir = cfg.file_log_dir
if os.path.exists(main_log_path):
fi = open(main_log_path,"rb")
main_info = json.load(fi)
fi.close()
main_info["SubBaseInfo"]=[]
for root, dirs, files in os.walk(log_dir):
for item in files:
node = os.path.join(root,item)
if node.endswith(".static") and node!=main_log_path:
log.info("combine %s",node)
sub_f = open(node,"rb")
sub_info = json.load(sub_f)
sub_f.close()
if len(sub_info["BaseInfo"])>0:
node = sub_info["BaseInfo"][0]
node["ID"] = metrics.S_ID_SUB_BASE_INFO
main_info["SubBaseInfo"].append(sub_info["BaseInfo"][0])
fo = open(main_log_path,"wb")
json.dump(main_info, fo, indent=4, sort_keys=False)
fo.close()
log.info("main static log updated %s",main_log_path)
else:
log.error("main log file: %s is missing",main_log_path)
def exratc_file_size(main_info, node_md5):
sub_info_list = main_info["SubBaseInfo"]
for item in sub_info_list:
if item["MD5"] == node_md5:
return item["SizeInfo"]
return 0
def pick_largest_elf(cfg):
"""
Pick the largest in file size
"""
main_static = os.path.join(cfg.file_log_dir,cfg.main_target_md5+".static")
log_dir = cfg.file_log_dir
if os.path.exists(main_static):
fi = open(main_static,"rb")
main_info = json.load(fi)
fi.close()
max_size = 0
target_md5 = ""
target_path = ""
sub_info_list = main_info["SubBaseInfo"]
for item in sub_info_list:
full_path = item["__full_path"]
if item["FileType"].startswith("ELF") and is_executable(full_path):
node_md5 = item["MD5"]
file_size = item["SizeInfo"]
log.debug("file %s size: %d",node_md5, file_size)
if max_size < file_size:
max_size = file_size
target_md5 = node_md5
target_path = full_path
if len(target_md5)>0:
log.info("found ELF %s, md5 %s with file size: %d",target_path,target_md5,max_size)
else:
if len(sub_info_list)>0:
item = sub_info_list[0]
full_path = item["__full_path"]
node_md5 = item["MD5"]
file_size = item["SizeInfo"]
max_size = file_size
target_md5 = node_md5
target_path = full_path
log.info("Failed to find a ELF, pick first one: %s",target_path)
else:
log.info("Failed to pick any file.")
return (target_md5,target_path)
def generate_main_dyn_log(cfg,target_md5):
main_dynamic = os.path.join(cfg.file_log_dir,cfg.main_target_md5+".dynamic")
if len(target_md5)>0:
src_file = os.path.join(cfg.file_log_dir, target_md5+".dynamic")
if os.path.exists(src_file):
log.info("found dynamic log: %s",src_file)
dest_file = main_dynamic
if src_file!=dest_file:
shutil.copyfile(src_file, dest_file)
log.info("copy file from %s to %s",src_file, dest_file)
log.info("main dynamic log updated %s", main_dynamic)
else:
log.error("dynamic log %s can not be found.",src_file)
#discard
def pick_dynamic_log(cfg):
"""
Pick the largest in file size
"""
#main_dynamic = os.path.join(cfg.file_log_dir,cfg.main_target_md5+".dynamic")
main_static = os.path.join(cfg.file_log_dir,cfg.main_target_md5+".static")
log_dir = cfg.file_log_dir
if os.path.exists(main_static):
fi = open(main_static,"rb")
main_info = json.load(fi)
fi.close()
max_size = 0
target_md5 = ""
for root, dirs, files in os.walk(log_dir):
for item in files:
node = os.path.join(root,item)
if node.endswith(".static"):
#log.debug("node name %s",os.path.basename(node))
node_md5 = os.path.basename(node)[0:32]
file_size = exratc_file_size(main_info,node_md5)
log.debug("file %s size: %d",node_md5, file_size)
if max_size < file_size:
max_size = file_size
target_md5 = node_md5
if len(target_md5)>0:
log.info("found dynamic log %s with file size: %d",target_md5,max_size)
src_file = os.path.join(cfg.file_log_dir, target_md5+".dynamic")
dest_file = main_dynamic
# TODO why they are the same
if src_file!=dest_file:
shutil.copyfile(src_file, dest_file)
log.info("main dynamic log updated %s", main_dynamic)
def generate_output_log(cfg, do_static, do_dynamic):
"""
generate output log as:
output.static
output.dynamic
"""
if do_static:
main_static = os.path.join(cfg.file_log_dir,cfg.main_target_md5+".static")
output_static = os.path.join(cfg.file_log_dir, cfg.static_log)
if os.path.exists(main_static):
shutil.copyfile(main_static, output_static)
log.info("output static logs %s have been generated", output_static)
if do_dynamic:
main_dynamic = | |
df.shape[0])
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
# test transform scenarios on data frames
r = tile(df.transform(lambda x: list(range(len(x)))))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == df.shape
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (df.shape[0], 20 // df.shape[0])
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform(lambda x: list(range(len(x))), axis=1))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == df.shape
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (20 // df.shape[1], df.shape[1])
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform(['cumsum', 'cummax', lambda x: x + 1]))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], df.shape[1] * 3)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (df.shape[0], 20 // df.shape[0] * 3)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform({'A': 'cumsum', 'D': ['cumsum', 'cummax'], 'F': lambda x: x + 1}))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], 4)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (df.shape[0], 1)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
# test agg scenarios on series
r = tile(df.transform(lambda x: x.iloc[:-1], _call_agg=True))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (np.nan, df.shape[1])
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (np.nan, 1)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (df.shape[0], np.nan)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (2, np.nan)
assert r.chunks[0].inputs[0].shape[1] == df_raw.shape[1]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = tile(df.transform(fn_list, _call_agg=True))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (np.nan, df.shape[1] * 2)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (np.nan, 2)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
r = tile(df.transform(lambda x: x.sum(), _call_agg=True))
assert r.dtype == np.dtype('int64')
assert r.shape == (df.shape[1],)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (20 // df.shape[0],)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
fn_dict = {
'A': rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
'D': [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)],
'F': lambda x: x.iloc[:-1].reset_index(drop=True),
}
r = tile(df.transform(fn_dict, _call_agg=True))
assert all(v == np.dtype('int64') for v in r.dtypes) is True
assert r.shape == (np.nan, 4)
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.dataframe
assert r.chunks[0].shape == (np.nan, 1)
assert r.chunks[0].inputs[0].shape[0] == df_raw.shape[0]
assert r.chunks[0].inputs[0].op._op_type_ == opcodes.CONCATENATE
# SERIES CASES
# test transform scenarios on series
r = tile(series.transform(lambda x: x + 1))
assert np.dtype('int64') == r.dtype
assert r.shape == series.shape
assert r.op._op_type_ == opcodes.TRANSFORM
assert r.op.output_types[0] == OutputType.series
assert r.chunks[0].shape == (5,)
assert r.chunks[0].inputs[0].shape == (5,)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_string_method():
s = pd.Series(['a', 'b', 'c'], name='s')
series = from_pandas_series(s, chunk_size=2)
with pytest.raises(AttributeError):
_ = series.str.non_exist
r = series.str.contains('c')
assert r.dtype == np.bool_
assert r.name == s.name
pd.testing.assert_index_equal(r.index_value.to_pandas(), s.index)
assert r.shape == s.shape
r = tile(r)
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == np.bool_
assert c.name == s.name
pd.testing.assert_index_equal(c.index_value.to_pandas(),
s.index[i * 2: (i + 1) * 2])
assert c.shape == (2,) if i == 0 else (1,)
r = series.str.split(',', expand=True, n=1)
assert r.op.output_types[0] == OutputType.dataframe
assert r.shape == (3, 2)
pd.testing.assert_index_equal(r.index_value.to_pandas(), s.index)
pd.testing.assert_index_equal(r.columns_value.to_pandas(), pd.RangeIndex(2))
r = tile(r)
for i, c in enumerate(r.chunks):
assert c.index == (i, 0)
pd.testing.assert_index_equal(c.index_value.to_pandas(),
s.index[i * 2: (i + 1) * 2])
pd.testing.assert_index_equal(c.columns_value.to_pandas(), pd.RangeIndex(2))
assert c.shape == (2, 2) if i == 0 else (1, 2)
with pytest.raises(TypeError):
_ = series.str.cat([['1', '2']])
with pytest.raises(ValueError):
_ = series.str.cat(['1', '2'])
with pytest.raises(ValueError):
_ = series.str.cat(',')
with pytest.raises(TypeError):
_ = series.str.cat({'1', '2', '3'})
r = series.str.cat(sep=',')
assert r.op.output_types[0] == OutputType.scalar
assert r.dtype == s.dtype
r = tile(r)
assert len(r.chunks) == 1
assert r.chunks[0].op.output_types[0] == OutputType.scalar
assert r.chunks[0].dtype == s.dtype
r = series.str.extract(r'[ab](\d)', expand=False)
assert r.op.output_types[0] == OutputType.series
assert r.dtype == s.dtype
r = tile(r)
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == s.dtype
assert c.name == s.name
pd.testing.assert_index_equal(c.index_value.to_pandas(),
s.index[i * 2: (i + 1) * 2])
assert c.shape == (2,) if i == 0 else (1,)
r = series.str.extract(r'[ab](\d)', expand=True)
assert r.op.output_types[0] == OutputType.dataframe
assert r.shape == (3, 1)
pd.testing.assert_index_equal(r.index_value.to_pandas(), s.index)
pd.testing.assert_index_equal(r.columns_value.to_pandas(), pd.RangeIndex(1))
r = tile(r)
for i, c in enumerate(r.chunks):
assert c.index == (i, 0)
pd.testing.assert_index_equal(c.index_value.to_pandas(),
s.index[i * 2: (i + 1) * 2])
pd.testing.assert_index_equal(c.columns_value.to_pandas(), pd.RangeIndex(1))
assert c.shape == (2, 1) if i == 0 else (1, 1)
assert 'lstrip' in dir(series.str)
def test_datetime_method():
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
pd.Timestamp('2020-3-1')],
name='ss')
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
assert r.dtype == s.dt.year.dtype
pd.testing.assert_index_equal(r.index_value.to_pandas(), s.index)
assert r.shape == s.shape
assert r.op.output_types[0] == OutputType.series
assert r.name == s.dt.year.name
r = tile(r)
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == s.dt.year.dtype
assert c.op.output_types[0] == OutputType.series
assert r.name == s.dt.year.name
pd.testing.assert_index_equal(c.index_value.to_pandas(),
s.index[i * 2: (i + 1) * 2])
assert c.shape == (2,) if i == 0 else (1,)
with pytest.raises(AttributeError):
_ = series.dt.non_exist
assert 'ceil' in dir(series.dt)
def test_series_isin():
# one chunk in multiple chunks
a = from_pandas_series(pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), chunk_size=10)
b = from_pandas_series(pd.Series([2, 1, 9, 3]), chunk_size=2)
r = tile(a.isin(b))
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == np.dtype('bool')
assert c.shape == (10,)
assert len(c.op.inputs) == 2
assert c.op.output_types[0] == OutputType.series
assert c.op.inputs[0].index == (i,)
assert c.op.inputs[0].shape == (10,)
assert c.op.inputs[1].index == (0,)
assert c.op.inputs[1].shape == (4,) # has been rechunked
# multiple chunk in one chunks
a = from_pandas_series(pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), chunk_size=2)
b = from_pandas_series(pd.Series([2, 1, 9, 3]), chunk_size=4)
r = tile(a.isin(b))
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == np.dtype('bool')
assert c.shape == (2,)
assert len(c.op.inputs) == 2
assert c.op.output_types[0] == OutputType.series
assert c.op.inputs[0].index == (i,)
assert c.op.inputs[0].shape == (2,)
assert c.op.inputs[1].index == (0,)
assert c.op.inputs[1].shape == (4,)
# multiple chunk in multiple chunks
a = from_pandas_series(pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), chunk_size=2)
b = from_pandas_series(pd.Series([2, 1, 9, 3]), chunk_size=2)
r = tile(a.isin(b))
for i, c in enumerate(r.chunks):
assert c.index == (i,)
assert c.dtype == np.dtype('bool')
assert c.shape == (2,)
assert len(c.op.inputs) == 2
assert c.op.output_types[0] == OutputType.series
assert c.op.inputs[0].index == (i,)
assert c.op.inputs[0].shape == (2,)
assert c.op.inputs[1].index == (0,)
assert c.op.inputs[1].shape == (4,) # has been rechunked
with pytest.raises(TypeError):
_ = a.isin('sth')
with pytest.raises(TypeError):
_ = a.to_frame().isin('sth')
def test_cut():
s = from_pandas_series(pd.Series([1., 2., 3., 4.]), chunk_size=2)
with pytest.raises(ValueError):
_ = cut(s, -1)
with pytest.raises(ValueError):
_ = cut([[1, 2], [3, 4]], 3)
with pytest.raises(ValueError):
_ = cut([], 3)
r, b = cut(s, [1.5, 2.5], retbins=True)
assert isinstance(r, SERIES_TYPE)
assert isinstance(b, TENSOR_TYPE)
r = tile(r)
assert len(r.chunks) == 2
for c in r.chunks:
assert isinstance(c, SERIES_CHUNK_TYPE)
assert c.shape == (2,)
r = cut(s.to_tensor(), [1.5, 2.5])
assert isinstance(r, CATEGORICAL_TYPE)
assert len(r) == len(s)
assert 'Categorical' in repr(r)
r = tile(r)
assert len(r.chunks) == 2
for c in r.chunks:
assert isinstance(c, CATEGORICAL_CHUNK_TYPE)
assert c.shape == (2,)
assert c.ndim == 1
r = cut([0, 1, 1, 2], bins=4, labels=False)
assert isinstance(r, TENSOR_TYPE)
e = pd.cut([0, 1, 1, 2], bins=4, labels=False)
assert r.dtype == e.dtype
def test_to_numeric():
raw = pd.DataFrame({"a": [1.0, 2, 3, -3]})
df = from_pandas_df(raw, chunk_size=2)
with pytest.raises(ValueError):
_ = to_numeric(df)
with pytest.raises(ValueError):
_ = to_numeric([['1.0', 1]])
with pytest.raises(ValueError):
_ = to_numeric([])
s = from_pandas_series(pd.Series(['1.0', '2.0', 1, -2]), chunk_size=2)
r = tile(to_numeric(s))
assert len(r.chunks) == 2
assert isinstance(r, SERIES_TYPE)
r = tile(to_numeric(['1.0', '2.0', 1, -2]))
assert isinstance(r, TENSOR_TYPE)
def test_astype():
s = from_pandas_series(pd.Series([1, 2, 1, 2], name='a'), chunk_size=2)
with pytest.raises(KeyError):
astype(s, {'b': 'str'})
df = from_pandas_df(pd.DataFrame({'a': [1, 2, 1, 2],
'b': ['a', 'b', | |
OperationalError:
# something error occured
break
ln = _bytes_to_bint(self._read(4)) - 4
data = self._read(ln)
if code == 90:
self._trans_status = data
DEBUG_OUTPUT("-> ReadyForQuery('Z'):{}".format(data))
break
elif code == 82:
auth_method = _bytes_to_bint(data[:4])
DEBUG_OUTPUT("-> Authentication('R'):{}".format(auth_method))
if auth_method == 0: # trust
pass
elif auth_method == 5: # md5
salt = data[4:]
hash1 = hashlib.md5(self.password.encode('ascii') + self.user.encode("ascii")).hexdigest().encode("ascii")
hash2 = hashlib.md5(hash1+salt).hexdigest().encode("ascii")
self._send_data(b'p', b''.join([b'md5', hash2, b'\x00']))
# accept
code = ord(self._read(1))
assert code == 82
ln = _bytes_to_bint(self._read(4)) - 4
data = self._read(ln)
assert _bytes_to_bint(data[:4]) == 0
elif auth_method == 10: # SASL
assert data[4:-2].decode('utf-8') == 'SCRAM-SHA-256'
printable = string.ascii_letters + string.digits + '+/'
client_nonce = ''.join(
printable[random.randrange(0, len(printable))]
for i in range(24)
)
# send client first message
first_message = 'n,,n=,r=' + client_nonce
self._send_data(b'p', b''.join([
b'SCRAM-SHA-256\x00',
_bint_to_bytes(len(first_message)),
first_message.encode('utf-8')
]))
code = ord(self._read(1))
assert code == 82
ln = _bytes_to_bint(self._read(4)) - 4
data = self._read(ln)
_bytes_to_bint(data[:4]) == 11 # SCRAM first
server = {
kv[0]: kv[2:]
for kv in data[4:].decode('utf-8').split(',')
}
# r: server nonce
# s: servre salt
# i: iteration count
assert server['r'][:len(client_nonce)] == client_nonce
# send client final message
salted_pass = hashlib.pbkdf2_hmac(
'<PASSWORD>',
self.password.encode('utf-8'),
base64.standard_b64decode(server['s']),
int(server['i']),
)
client_key = hmac.HMAC(
salted_pass, b"Client Key", hashlib.sha256
).digest()
client_first_message_bare = "n=,r=" + client_nonce
server_first_message = "r=%s,s=%s,i=%s" % (server['r'], server['s'], server['i'])
client_final_message_without_proof = "c=biws,r=" + server['r']
auth_msg = ','.join([
client_first_message_bare,
server_first_message,
client_final_message_without_proof
])
client_sig = hmac.HMAC(
hashlib.sha256(client_key).digest(),
auth_msg.encode('utf-8'),
hashlib.sha256
).digest()
proof = base64.standard_b64encode(
b"".join([bytes([x ^ y]) for x, y in zip(client_key, client_sig)])
)
self._send_data(
b'p',
(client_final_message_without_proof + ",p=").encode('utf-8') + proof
)
code = ord(self._read(1))
assert code == 82
ln = _bytes_to_bint(self._read(4)) - 4
data = self._read(ln)
_bytes_to_bint(data[:4]) == 12 # SCRAM final
# accept
code = ord(self._read(1))
assert code == 82
ln = _bytes_to_bint(self._read(4)) - 4
data = self._read(ln)
assert _bytes_to_bint(data[:4]) == 0
else:
errobj = InterfaceError("Authentication method %d not supported." % (auth_method,))
elif code == 83:
k, v, _ = data.split(b'\x00')
DEBUG_OUTPUT("-> ParameterStatus('S'):{}:{}".format(k, v))
if k == b'server_encoding':
self.encoding = v.decode('ascii')
elif k == b'server_version':
version = v.decode('ascii').split('(')[0].split('.')
self.server_version = int(version[0]) * 10000
if len(version) > 0:
try:
self.server_version += int(version[1]) * 100
except Exception:
pass
if len(version) > 1:
try:
self.server_version += int(version[2])
except Exception:
pass
elif k == b'TimeZone':
self.tz_name = v.decode('ascii')
self.tzinfo = None
elif code == 75:
DEBUG_OUTPUT("-> BackendKeyData('K')")
pass
elif code == 67:
if not obj:
DEBUG_OUTPUT("-> CommandComplete('C')")
continue
command = data[:-1].decode('ascii')
DEBUG_OUTPUT("-> CommandComplete('C'):{}".format(command))
if command == 'SHOW':
obj._rowcount = 1
else:
for k in ('SELECT', 'UPDATE', 'DELETE', 'INSERT'):
if command[:len(k)] == k:
obj._rowcount = int(command.split(' ')[-1])
break
elif code == 84:
if not obj:
continue
count = _bytes_to_bint(data[0:2])
obj.description = [None] * count
n = 2
idx = 0
for i in range(count):
name = data[n:n+data[n:].find(b'\x00')]
n += len(name) + 1
try:
name = name.decode(self.encoding)
except UnicodeDecodeError:
pass
type_code = _bytes_to_bint(data[n+6:n+10])
if type_code == PG_TYPE_VARCHAR:
size = _bytes_to_bint(data[n+12:n+16]) - 4
precision = -1
scale = -1
elif type_code == PG_TYPE_NUMERIC:
size = _bytes_to_bint(data[n+10:n+12])
precision = _bytes_to_bint(data[n+12:n+14])
scale = precision - _bytes_to_bint(data[n+14:n+16])
else:
size = _bytes_to_bint(data[n+10:n+12])
precision = -1
scale = -1
# table_oid = _bytes_to_bint(data[n:n+4])
# table_pos = _bytes_to_bint(data[n+4:n+6])
# size = _bytes_to_bint(data[n+10:n+12])
# modifier = _bytes_to_bint(data[n+12:n+16])
# format = _bytes_to_bint(data[n+16:n+18]),
field = Description(name, type_code, None, size, precision, scale, None)
n += 18
obj.description[idx] = field
idx += 1
DEBUG_OUTPUT("-> RowDescription('T'):{}".format(obj.description))
elif code == 68:
if not obj:
DEBUG_OUTPUT("-> DataRow('D')")
continue
n = 2
row = []
while n < len(data):
if data[n:n+4] == b'\xff\xff\xff\xff':
row.append(None)
n += 4
else:
ln = _bytes_to_bint(data[n:n+4])
n += 4
row.append(data[n:n+ln])
n += ln
for i in range(len(row)):
row[i] = self._decode_column(row[i], obj.description[i][1])
obj._rows.append(tuple(row))
DEBUG_OUTPUT("-> DataRow('D'):{}".format(tuple(row)))
elif code == 78:
DEBUG_OUTPUT("-> NoticeResponse('N')")
pass
elif code == 69 and not errobj:
err = data.split(b'\x00')
# http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html
errcode = err[2][1:].decode('utf-8')
message = "{}:{}".format(self.query, err[3][1:].decode(self.encoding))
DEBUG_OUTPUT("-> ErrorResponse('E'):{}:{}".format(errcode, message))
if errcode[:2] == '0A':
errobj = NotSupportedError(message, errcode)
elif errcode[:2] in ('20', '21'):
errobj = ProgrammingError(message, errcode)
elif errcode[:2] in ('22', ):
errobj = DataError(message, errcode)
elif errcode[:2] == '23':
errobj = IntegrityError(message, errcode)
elif errcode[:2] in('24', '25'):
errobj = InternalError(message, errcode)
elif errcode[:2] in('26', '27', '28'):
errobj = OperationalError(message, errcode)
elif errcode[:2] in('2B', '2D', '2F'):
errobj = InternalError(message, errcode)
elif errcode[:2] == '34':
errobj = OperationalError(message, errcode)
elif errcode[:2] in ('38', '39', '3B'):
errobj = InternalError(message, errcode)
elif errcode[:2] in ('3D', '3F'):
errobj = ProgrammingError(message, errcode)
elif errcode[:2] in ('40', '42', '44'):
errobj = ProgrammingError(message, errcode)
elif errcode[:1] == '5':
errobj = OperationalError(message, errcode)
elif errcode[:1] in 'F':
errobj = InternalError(message, errcode)
elif errcode[:1] in 'H':
errobj = OperationalError(message, errcode)
elif errcode[:1] in ('P', 'X'):
errobj = InternalError(message, errcode)
else:
errobj = DatabaseError(message, errcode)
elif code == 72: # CopyOutputResponse('H')
pass
elif code == 100: # CopyData('d')
obj.write(data)
elif code == 99: # CopyDataDone('c')
pass
elif code == 71: # CopyInResponse('G')
while True:
buf = obj.read(8192)
if not buf:
break
# send CopyData
self._write(b'd' + _bint_to_bytes(len(buf) + 4))
self._write(buf)
# send CopyDone and Sync
self._write(b'c\x00\x00\x00\x04S\x00\x00\x00\x04')
else:
DEBUG_OUTPUT("-> Unknown({}):{}{}".format(code, ln, binascii.b2a_hex(data)))
pass
return errobj
def process_messages(self, obj):
err = self._process_messages(obj)
if err:
raise err
def _read(self, ln):
if not self.sock:
raise InterfaceError("Lost connection", "08003")
r = b''
while len(r) < ln:
b = self.sock.recv(ln-len(r))
if not b:
raise InterfaceError("Can't recv packets", "08003")
r += b
return r
def _write(self, b):
if not self.sock:
raise InterfaceError("Lost connection", "08003")
n = 0
while (n < len(b)):
n += self.sock.send(b[n:])
def _open(self):
self.sock = socket.create_connection((self.host, self.port), self.timeout)
DEBUG_OUTPUT("Connection._open() socket %s:%d" % (self.host, self.port))
if self.use_ssl:
import ssl
self._write(_bint_to_bytes(8))
self._write(_bint_to_bytes(80877103)) # SSL request
if self._read(1) == b'S':
self.sock = ssl.wrap_socket(self.sock)
else:
raise InterfaceError("Server refuses SSL")
# protocol version 3.0
v = b'\x00\x03\x00\x00'
v += b'user\x00' + self.user.encode('ascii') + b'\x00'
if self.database:
v += b'database\x00' + self.database.encode('ascii') + b'\x00'
v += b'\x00'
self._write(_bint_to_bytes(len(v) + 4) + v)
self.process_messages(None)
self._begin()
if self.tz_name and self.tzinfo is None:
self.set_timezone(self.tz_name)
def escape_parameter(self, v):
if isinstance(v, enum.Enum):
v = v.value
t = type(v)
func = self.encoders.get(t)
if func:
return func(self, v)
if v is None:
return 'NULL'
elif t == str:
return u"'" + v.replace(u"'", u"''") + u"'"
elif t == bytearray or t == bytes: # binary
return "'" + ''.join(['\\%03o' % (c, ) for c in v]) + "'::bytea"
elif t == bool:
return u"TRUE" if v else u"FALSE"
elif t == time.struct_time:
return u'%04d-%02d-%02d %02d:%02d:%02d' % (
v.tm_year, v.tm_mon, v.tm_mday, v.tm_hour, v.tm_min, v.tm_sec)
elif t == datetime.datetime:
if v.tzinfo:
return "timestamp with time zone '" + v.isoformat() + "'"
else:
return "timestamp '" + v.isoformat() + "'"
elif t == datetime.date:
return "date '" + str(v) + "'"
elif t == datetime.timedelta:
return u"interval '" + str(v) + "'"
elif t == int or t == float:
return str(v)
elif t == decimal.Decimal:
return "decimal '" + str(v) + "'"
elif t == list or t == tuple:
return u'ARRAY[' + u','.join([self.escape_parameter(e) for e in v]) + u']'
else:
return "'" + str(v) + "'"
def is_connect(self):
return bool(self.sock)
def cursor(self, factory=Cursor):
return factory(self)
def execute(self, query, obj=None):
self.query = query
self._send_message(b'Q', query.encode(self.encoding) + b'\x00')
self.process_messages(obj)
if self.autocommit:
self.commit()
def get_parameter_status(self, s):
with self.cursor() as cur:
cur.execute('SHOW {}'.format(s))
return cur.fetchone()[0]
def set_timezone(self, timezone_name):
self.tz_name = timezone_name
with self.cursor() as cur:
cur.execute("SET TIME ZONE %s", [self.tz_name])
self.tzinfo = zoneinfo.ZoneInfo(self.tz_name)
@property
def isolation_level(self):
return self.get_parameter_status('TRANSACTION ISOLATION LEVEL')
def set_autocommit(self, autocommit):
self.autocommit = autocommit
def _begin(self):
self._send_message(b'Q', b"BEGIN\x00")
self._process_messages(None)
def begin(self):
if DEBUG:
DEBUG_OUTPUT('BEGIN')
self._begin()
def commit(self):
if DEBUG:
DEBUG_OUTPUT('COMMIT')
if self.sock:
self._send_message(b'Q', b"COMMIT\x00")
self.process_messages(None)
self._begin()
def _rollback(self):
self._send_message(b'Q', b"ROLLBACK\x00")
self._process_messages(None)
def rollback(self):
if DEBUG:
DEBUG_OUTPUT('ROLLBACK')
if self.sock:
self._rollback()
self._begin()
def reopen(self):
self.close()
self._open()
def close(self):
if DEBUG:
DEBUG_OUTPUT('Connection::close()')
if self.sock:
# send Terminate
self._write(b'X\x00\x00\x00\x04')
self.sock.close()
self.sock = None
def connect(host, user, password='', database=None, port=None, timeout=None, use_ssl=False):
return Connection(user, password, database, host, port if port else 5432, timeout, use_ssl)
def create_database(database, host, user, password='', port=None, use_ssl=False):
with connect(host, user, password, None, port, None, use_ssl) as conn:
conn._rollback()
conn._send_message(b'Q', 'CREATE DATABASE {}'.format(database).encode('utf-8') + b'\x00')
conn.process_messages(None)
def drop_database(database, host, | |
"""Android Calculator App Test: Addition"""
# Created by <NAME>.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
import allure
from tests.android_native.calculator_tests.calculator_base_testcase import AndroidCalculatorBaseTestCase
@allure.epic('Android Native App')
@allure.parent_suite('Functional Test')
@allure.suite("Calculator Test Suite")
@allure.sub_suite("Positive Tests")
@allure.feature("Addition Calculation")
@allure.story('Addition Button')
class TestAdditionCase(AndroidCalculatorBaseTestCase):
"""
Android Calculator App Test: Addition
This suite tests the current code in the calculator.
Terminology
Addend + Addend = Sum
Test Suite List:
Should be able to add two positive integers numbers
Should be able to add a negative integer to a positive floating point number
Should be able to add a floating point number to an integer
Should be able to add an integer to a floating point number
Should be able to add two floating point numbers
Should be able to add a negative integer and zero
Should be able to add zero and a positive integer
Should be able to add a negative integer with a positive number
Should be able to add two large positive integers
Should be able to add a negative floating point and a positive integer
Should be able to add a positive integer to the results of a previous operation
Should be able to add a positive floating point number to the results of a previous operation
Should be able to add a floating point number with many decimal places to a previous result
Should be able to add a large integer to a previous result
Source: https://mozilla.github.io/calculator/test/
"""
def test_able_to_add_two_positive_integers(self):
"""
Should be able to add two positive integers numbers and display the result
Steps:
1. Key in a valid integer from - 9999999999 to +9999999999
2. Key in operator +
3. Key in second operand,a valid integer from - 9999999999 To +999999999
4. Result verification > check the screen output
Source: https://www.ques10.com/p/38809/prepare-and-write-six-test-cases-for-simple-calcul/?
:return:
"""
allure.dynamic.title("Add two positive integers numbers and display the result")
allure.dynamic.severity(allure.severity_level.BLOCKER)
# Should be able to add two positive integers numbers
numbers = [1, 1]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == str(sum(numbers))
numbers = [1, 9]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == str(sum(numbers))
numbers = [19, 2]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == str(sum(numbers))
numbers = [19, 37]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == str(sum(numbers))
numbers = [1500, 2000]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
assert self.app.screen_result.formatted_text == str(sum(numbers))
numbers = [999999, 1]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == str(sum(numbers))
numbers = [9999999, 1]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [99999999, 1]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [999999999, 1]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [9999999999, 1]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
def test_able_to_add_a_negative_integer_to_a_positive_floating_point_number(self):
"""
Should be able to add a negative integer to a positive floating point number
:return:
"""
allure.dynamic.title("Add a negative integer to a positive floating point number")
allure.dynamic.severity(allure.severity_level.BLOCKER)
numbers = [-1, 1.000]
with allure.step("Check the addition of negative integer to a positive floating point number: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [-2, 1.000]
with allure.step(
"Check the addition of negative integer to a positive floating point number: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [-1, 2.000]
with allure.step(
"Check the addition of negative integer to a positive floating point number: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
def test_able_to_add_a_floating_point_number_to_an_integer(self):
"""
Should be able to add a floating point number to an integer
:return:
"""
allure.dynamic.title("Add a floating point number to an integer")
allure.dynamic.severity(allure.severity_level.BLOCKER)
numbers = [10.1, 2]
with allure.step(
"Check the addition of floating point number to an integer: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [10.000, 2]
with allure.step(
"Check the addition of floating point number to an integer: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [0.0, 2]
with allure.step(
"Check the addition of floating point number to an integer: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
def test_able_to_add_an_integer_to_a_floating_point_number(self):
"""
Should be able to add an integer to a floating point number
:return:
"""
allure.dynamic.title("Add an integer to a floating point number")
allure.dynamic.severity(allure.severity_level.BLOCKER)
numbers = [10, 9.9999]
with allure.step(
"Check the addition of an integer to a floating point number: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [9, 10.9999]
with allure.step(
"Check the addition of an integer to a floating point number: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
def test_able_to_add_two_floating_point_numbers(self):
"""
Should be able to add two floating point numbers
:return:
"""
allure.dynamic.title("Add two floating point numbers")
allure.dynamic.severity(allure.severity_level.BLOCKER)
numbers = [34.999, 1.0]
with allure.step(
"Check the addition of two floating point numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [1.0, 34.999]
with allure.step(
"Check the addition of two floating point numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [1.0000, 34.999]
with allure.step(
"Check the addition of two floating point numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
def test_able_to_add_a_negative_integer_and_zero(self):
"""
Should be able to add a negative integer and zero
:return:
"""
allure.dynamic.title("Add a negative integer and zero")
allure.dynamic.severity(allure.severity_level.BLOCKER)
numbers = [0, -5]
with allure.step(
"Check the addition of negative integer and zero: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [-5, 0]
with allure.step(
"Check the addition of negative integer and zero: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [5, -0]
with allure.step(
"Check the addition of negative integer and zero: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [-0, 5]
with allure.step(
"Check the addition of negative integer and zero: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
def test_able_to_add_zero_and_a_positive_integer(self):
"""
Should be able to add zero and a positive integer
:return:
"""
allure.dynamic.title("Add zero and a positive integer")
allure.dynamic.severity(allure.severity_level.BLOCKER)
numbers = [0, 0]
with allure.step("Check the addition of zero and a positive integer: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [0, 1]
with allure.step("Check the addition of zero and a positive integer: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [1, 0]
with allure.step("Check the addition of zero and a positive integer: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [0, 5]
with allure.step("Check the addition of zero and a positive integer: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
def test_able_to_add_a_negative_integer_with_a_positive_number(self):
"""
Should be able to add a negative integer with a positive number
:return:
"""
allure.dynamic.title("Add a negative integer with a positive number")
allure.dynamic.severity(allure.severity_level.BLOCKER)
numbers = [-5, 5]
with allure.step("Check the addition of a negative integer with a positive number: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [5, -5]
with allure.step("Check the addition of a negative integer with a positive number: {}".format(numbers)):
self.perform_addition(numbers)
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [6, -5]
with allure.step("Check the addition of a negative integer with a positive number: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [-6, 5]
with allure.step("Check the addition of a negative integer with a positive number: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
def test_able_to_add_two_large_positive_integers(self):
"""
Should be able to add two large positive integers
:return:
"""
allure.dynamic.title("Add two large positive integers")
allure.dynamic.severity(allure.severity_level.BLOCKER)
numbers = [999999, 999999]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [999999, 99999]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [99999, 99999]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [53645567, 78967875]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [9999999999, 567457362343241]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [300000000, 900000000]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [900000000, 900000000]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [999999999, 1]
with allure.step("Check the addition of integer numbers: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
def test_able_to_add_a_negative_floating_point_and_a_positive_integer(self):
"""
Should be able to add a negative floating point and a positive integer
:return:
"""
allure.dynamic.title("Add a negative floating point and a positive integer")
allure.dynamic.severity(allure.severity_level.BLOCKER)
numbers = [-1987.50, 1987]
with allure.step("Check the addition of negative floating point and a positive integer: {}".format(numbers)):
self.perform_addition(numbers)
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
numbers = [1987, -1987.50]
with allure.step("Check the addition of negative floating point and a positive integer: {}".format(numbers)):
self.perform_addition(numbers)
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
def test_able_to_add_a_positive_integer_to_the_results_of_a_previous_operation(self):
"""
Should be able to add a positive integer to the results of a previous operation
:return:
"""
allure.dynamic.title("Add a positive integer to the results of a previous operation")
allure.dynamic.severity(allure.severity_level.BLOCKER)
# 1500 - 2000 = -500 + 500 =
numbers = [1500, -2000, -500, 500]
with allure.step("Enter following: 1500 - 2000 = -500 + 500 ="):
self.enter_digit(numbers[0])
self.app.plus.tap()
self.enter_digit(numbers[1])
self.app.plus.tap()
self.enter_digit(numbers[2])
self.app.plus.tap()
self.enter_digit(numbers[3])
self.app.equal.tap()
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(sum(numbers))
# 6 * 2 + 8 = 20
numbers = [6, 2, 8]
with allure.step("Enter following: 6 * 2 + 8 ="):
self.enter_digit(numbers[0])
self.app.multiplication.tap()
self.enter_digit(numbers[1])
self.app.plus.tap()
self.enter_digit(numbers[2])
self.app.equal.tap()
with allure.step("Check the result"):
assert self.app.screen_result.formatted_text == self.eval_formula(6 * 2 + 8)
def test_able_to_add_a_positive_floating_point_number_to_the_results_of_a_previous_operation(self):
"""
Should | |
actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.move_book),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(library.Book(
name='name_value',
author='author_value',
title='title_value',
read=True,
))
response = await client.move_book(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == library.MoveBookRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, library.Book)
assert response.name == 'name_value'
assert response.author == 'author_value'
assert response.title == 'title_value'
assert response.read is True
@pytest.mark.asyncio
async def test_move_book_async_from_dict():
await test_move_book_async(request_type=dict)
def test_move_book_field_headers():
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = library.MoveBookRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.move_book),
'__call__') as call:
call.return_value = library.Book()
client.move_book(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_move_book_field_headers_async():
client = LibraryServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = library.MoveBookRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.move_book),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(library.Book())
await client.move_book(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_move_book_flattened():
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.move_book),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = library.Book()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.move_book(
name='name_value',
other_shelf_name='other_shelf_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].other_shelf_name == 'other_shelf_name_value'
def test_move_book_flattened_error():
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.move_book(
library.MoveBookRequest(),
name='name_value',
other_shelf_name='other_shelf_name_value',
)
@pytest.mark.asyncio
async def test_move_book_flattened_async():
client = LibraryServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.move_book),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = library.Book()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(library.Book())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.move_book(
name='name_value',
other_shelf_name='other_shelf_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].other_shelf_name == 'other_shelf_name_value'
@pytest.mark.asyncio
async def test_move_book_flattened_error_async():
client = LibraryServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.move_book(
library.MoveBookRequest(),
name='name_value',
other_shelf_name='other_shelf_name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.LibraryServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.LibraryServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = LibraryServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.LibraryServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = LibraryServiceClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.LibraryServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = LibraryServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.LibraryServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.LibraryServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.LibraryServiceGrpcTransport,
transports.LibraryServiceGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.LibraryServiceGrpcTransport,
)
def test_library_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.LibraryServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_library_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.example.library_v1.services.library_service.transports.LibraryServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.LibraryServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'create_shelf',
'get_shelf',
'list_shelves',
'delete_shelf',
'merge_shelves',
'create_book',
'get_book',
'list_books',
'delete_book',
'update_book',
'move_book',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
@requires_google_auth_gte_1_25_0
def test_library_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.example.library_v1.services.library_service.transports.LibraryServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.LibraryServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_library_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.example.library_v1.services.library_service.transports.LibraryServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.LibraryServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
),
quota_project_id="octopus",
)
def test_library_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.example.library_v1.services.library_service.transports.LibraryServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.LibraryServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_library_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
LibraryServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_library_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
LibraryServiceClient()
adc.assert_called_once_with(
scopes=(),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.LibraryServiceGrpcTransport,
transports.LibraryServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_library_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.LibraryServiceGrpcTransport,
transports.LibraryServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_library_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.LibraryServiceGrpcTransport, grpc_helpers),
(transports.LibraryServiceGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_library_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"library-example.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
),
scopes=["1", "2"],
default_host="library-example.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.LibraryServiceGrpcTransport, transports.LibraryServiceGrpcAsyncIOTransport])
def test_library_service_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_library_service_host_no_port():
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='library-example.googleapis.com'),
)
assert client.transport._host == 'library-example.googleapis.com:443'
def test_library_service_host_with_port():
client = LibraryServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='library-example.googleapis.com:8000'),
)
assert client.transport._host == 'library-example.googleapis.com:8000'
def test_library_service_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.LibraryServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_library_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.LibraryServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, | |
"""
Module containing all necessary calculations, functions and classes, for ionization calculations from
Budapest-Florida code output.
It uses the interface given in tcdata.py module.
Most important feature is:
Ionization.IonizationForRawprofile static method calculates ionization for given rawProfile object.
"""
#TODO:
# - Callable functions from outside
# - Testing utilites
# - resolve circular dependencies
from math import pi
from math import exp
from math import sqrt
import astropy.constants as consts
import astropy.units as u
from astropy.units.core import _recreate_irreducible_unit
import numpy as np
#from . import tcdata as tcdata
#import __init__
#import tcdata.tcdata as tcdata
try:
from . import tcdata as tcdata
except (SystemError,ImportError):
#this should be removed when all tests are defined in tests
import tcdata as tcdata
class IterationError(Exception):
def __init__(self, *args: object) -> None:
super().__init__(*args)
class ConstantsCalculator:
"""
Class calculating the right hand side of Saha equations.
"""
R_HII = 0.0
R_HeII = 0.0
R_HeIII = 0.0
n_H = 0.0
n_He = 0.0
M_He = 6.6464764e-24
M_H = 1.6735575e-24
X=0.75
Y=0.2499
def __init__(self, X=0, Y=0):
self.R_HII = 0.0
self.R_HeII = 0.0
self.R_HeIII = 0.0
self.n_H = 0.0
self.n_He = 0.0
self.M_He = 6.6464764e-24
self.M_H = 1.6735575e-24
self.X=X
self.Y=Y
#@staticmethod
def RHS_calc(self, T, khi, g_factor):
m_e = consts.m_e.cgs.value
k_B = consts.k_B.cgs.value
h = consts.h.cgs.value
RHS = (2 * pi * m_e * k_B * T) ** 1.5 / h ** 3 * g_factor * exp(-khi / k_B / T)
return RHS
def Calc_R_HII(self, Temp):
khi = 13.54 * u.eV
self.R_HII = self.RHS_calc(Temp, khi.to_value(u.erg), 1 )
return None
def Calc_R_HeII(self, Temp):
khi = 24.48 * u.eV
self.R_HeII = self.RHS_calc(Temp, khi.to_value(u.erg), 4 )
return None
def Calc_R_HeIII(self, Temp):
khi = 54.17 * u.eV
self.R_HeIII = self.RHS_calc(Temp, khi.to_value(u.erg), 1 )
return None
def Calc_n_H(self, rho):
self.n_H = self.X * rho / self.M_H
return None
def Calc_n_He(self, rho):
self.n_He = self.Y * rho / self.M_He
return None
def Calc_consts(self,rho,Temp):
self.Calc_n_H(rho)
self.Calc_n_He(rho)
self.Calc_R_HII(Temp)
self.Calc_R_HeII(Temp)
self.Calc_R_HeIII(Temp)
return None
class IonizationData(tcdata.BaseData):
"""
Class which communicates with tcdata objects.
"""
def __init__(self):
ionization_columnnames = ('HII_fraction','HeII_fraction','HeIII_fraction')
super().__init__(dict(zip(ionization_columnnames,[0,0,0])),ionization_columnnames)
@classmethod
def initWithData(cls,x,y,z):
obj = IonizationData()
obj.datablock['HII_fraction'] = x
obj.datablock['HeII_fraction'] = y
obj.datablock['HeIII_fraction'] = z
return obj
def injectToDataPoint(self, datapoint_obj : tcdata.DataPoint):
#print(self.datablock)
datapoint_obj.insertColumn(self.datablock,self.column_names)
#print(datapoint_obj.HII_fraction)
return datapoint_obj
class Ionization:
"""
Class mainly responsible for ionization calculations.
"""
def __init__(self,tc_cell_object : tcdata.DataPoint,X,Y,rho=None,T=None ):
self.Constants = ConstantsCalculator(X,Y)
self.x = 0
self.y = 0
self.z = 0
self.tc_object = tc_cell_object
self.Constants.X = X
self.Constants.Y = Y
if rho is None or T is None:
self.Constants.Calc_consts(1/self.tc_object.spec_vol,self.tc_object.temperature)
else:
self.Constants.Calc_consts(rho,T)
@staticmethod
def diff(x1,y1,z1,x2,y2,z2):
dx=x2-x1
dy=y2-y1
dz=z2-z1
difference=sqrt(dx**2+dy**2+dz**2)
return difference
@staticmethod
def __Discriminant(b,c):
return b ** 2 - 4 *c
def __CalcSecondOrder(self,b,c):
det = self.__Discriminant(b,c)
return (- b + sqrt(det))/2
def __FirstIteration(self):
"""
In this step we calcute x whitout y and z, calculate y without z and calculate z with y~1-z
"""
x = y = z = 0
b_x = self.Constants.R_HII / self.Constants.n_H
c_x = - self.Constants.R_HII / self.Constants.n_H
x = self.__CalcSecondOrder(b_x, c_x)
b_y = (x* self.Constants.n_H + self.Constants.R_HeII) / self.Constants.n_He
c_y = - self.Constants.R_HeII / self.Constants.n_He
y = self.__CalcSecondOrder(b_y,c_y)
b_z = (x* self.Constants.n_H + self.Constants.n_He + self.Constants.R_HeIII) / self.Constants.n_He
c_z = - self.Constants.R_HeIII / self.Constants.n_He
z = self.__CalcSecondOrder(b_z, c_z)
#print(x,y,z)
return x,y,z
def __SecondIteration(self,x0,y0,z0):
"""
In this step we uses previous x0 y0 and z0 values, with full n_e.
Here we calcute frist z and after that y, because 1-y-z < 0 or should use only 1-y?
"""
b_x = (y0 * self.Constants.n_He + 2 * z0 * self.Constants.n_He + self.Constants.R_HII) / self.Constants.n_H
c_x = - self.Constants.R_HII / self.Constants.n_H
b_y = (x0* self.Constants.n_H + self.Constants.R_HeII + 2 * z0 * self.Constants.n_He) / self.Constants.n_He
c_y = (z0 - 1) * self.Constants.R_HeII / self.Constants.n_He
y = self.__CalcSecondOrder(b_y,c_y)
b_z = (x0* self.Constants.n_H + y * self.Constants.n_He) / (2 * self.Constants.n_He)
c_z = - y * self.Constants.R_HeIII / (2* self.Constants.n_He)
x = self.__CalcSecondOrder(b_x, c_x)
z = self.__CalcSecondOrder(b_z, c_z)
self.x = x
self.y = y
self.z = z
return x,y,z
def Calculation(self):
"""
The calculation of the ionization fractions.
"""
x,y,z = self.__FirstIteration()
x,y,z = self.__SecondIteration(x,y,z)
next_iterations = NewtonianIterator(x,y,z,self.Constants)
x,y,z = next_iterations.NewtonianIteration()
self.x = x
self.y = y
self.z = z
return x,y,z
def Reload(self, tc_cell_object : tcdata.DataPoint,X,Y,rho=None, T=None):
self.x = 0
self.y = 0
self.z = 0
self.tc_object = tc_cell_object
self.Constants.X = X
self.Constants.Y = Y
if rho is None or T is None:
self.Constants.Calc_consts(1/self.tc_object.spec_vol,self.tc_object.temperature)
else:
self.Constants.Calc_consts(rho,T)
@staticmethod
def IonizationForRawprofile(raw_obj : tcdata.RawProfiles,X,Y):
calculator = Ionization(raw_obj[1],X,Y)
for cell_obj in raw_obj:
#print(cell_obj.zone)
if cell_obj.temperature <= 0:
iondata = IonizationData.initWithData(0,0,0)
iondata.injectToDataPoint(cell_obj)
continue
calculator.Reload(cell_obj,X,Y)
x,y,z = calculator.Calculation()
iondata = IonizationData.initWithData(x,y,z)
iondata.injectToDataPoint(cell_obj)
# print(iondata.datablock)
# print(cell_obj.data('HeII_fraction'))
#print(raw_obj[15].HII_fraction)
return raw_obj
class NewtonianIterator:
"""
Helper class for Newton iterations.
"""
def __init__(self,x0,y0,z0,_Constants : ConstantsCalculator):
self.Constants = _Constants
self.__x_vec_n = np.array([x0,y0,z0])
self.__x_vec_np1 = np.array([x0,x0,x0])
def __f_vector(self,x_vec) -> np.array :
x = x_vec[0]
y = x_vec[1]
z = x_vec[2]
b_x = (y * self.Constants.n_He + 2 * z * self.Constants.n_He + self.Constants.R_HII) / self.Constants.n_H
c_x = - self.Constants.R_HII / self.Constants.n_H
b_y = (x* self.Constants.n_H + self.Constants.R_HeII + 2 * z * self.Constants.n_He) / self.Constants.n_He
c_y = (z - 1) * self.Constants.R_HeII / self.Constants.n_He
b_z = (x* self.Constants.n_H + y * self.Constants.n_He) / (2 * self.Constants.n_He)
c_z = - y * self.Constants.R_HeIII / (2* self.Constants.n_He)
fvec = np.array(
[x ** 2 + x * b_x + c_x,
y ** 2 + y * b_y + c_y,
z ** 2 + z * b_z + c_z]
)
return fvec
def __jacobian(self,x_vec = None) -> np.array :
if x_vec is None:
x_vec = self.__x_vec_n
x = x_vec[0]
y = x_vec[1]
z = x_vec[2]
b_x = (y * self.Constants.n_He + 2 * z * self.Constants.n_He + self.Constants.R_HII) / self.Constants.n_H
b_y = (x* self.Constants.n_H + self.Constants.R_HeII + 2 * z * self.Constants.n_He) / self.Constants.n_He
b_z = (x* self.Constants.n_H + y * self.Constants.n_He) / (2 * self.Constants.n_He)
thejacobian = np.array(
[
[2 *x + b_x, x * self.Constants.n_He/self.Constants.n_H, 2* x *self.Constants.n_He / self.Constants.n_H],
[y * self.Constants.n_H / self.Constants.n_He, 2*y + b_y, 2 * y + self.Constants.R_HeII / self.Constants.n_He] ,
[0.5 *z * self.Constants.n_H / self.Constants.n_He, 0.5 * (z - self.Constants.R_HeIII / self.Constants.n_He), 2*z + b_z]
]
)
return thejacobian
def NewtonianIteration(self,x_vec = None):
"""
Starting from the third step, we use Newton-Raphson method using previous results as starting values.
"""
if x_vec is None:
x_vec = self.__x_vec_n
x_vec_np1 = self.__x_vec_np1
else:
x_vec=np.array([x0,y0,z0])
iteration_cnt = 0
while True:
Jac=self.__jacobian(x_vec)
x_vec_np1 = x_vec - np.linalg.inv(Jac).dot(self.__f_vector(x_vec))
iteration_cnt += 1
if Ionization.diff(*x_vec_np1,*x_vec) < 1e-8 or iteration_cnt >= 1000:
x_vec=x_vec_np1
if iteration_cnt >= 1000:
raise IterationError("Iteration not converge")
break
x_vec=x_vec_np1
return x_vec[0],x_vec[1],x_vec[2]
if __name__ == '__main__':
infile = open("adat_ready.txt", "r")
lines = [line.strip("\n") for line in infile]
infile.close()
data=list()
for line in lines:
data.append(list(float(words) for words in line.split()))
datablock = [[] for i in range(4) ]
print(len(lines))
line= None
outfile = open("kimenet.txt","w")
# adatsor
for line in data:
if len(line) == 0:
continue
rho = 1/line[8] #adat_ready.txt
T = line[10] # adat_ready.txt
if T <= 0:
continue
datablock[0].append(T)
# x0,y0,z0 = FirstIteration()
# x0,y0,z0 = SecondIteration(x0,y0,z0)
# x0,y0,z0 = NewtonianIteration(x0,y0,z0)
ionization_obj = Ionization(tcdata.DataPoint(""),0.75,0.2499,rho,T)
x0,y0,z0 = ionization_obj.Calculation()
datablock[1].append(x0)
datablock[2].append(y0)
datablock[3].append(z0)
outfile.write("{0} {1:8.6E} {2:8.6E} {3:8.6E} {4:8.6E} {5:8.6E} {6:8.6E}\n".format(T,x0,y0,z0,line[2],line[3],line[4]))
###Checking
"""n_e=x2*Constants.n_H + (y2 + 2 * z2) * Constants.n_He
n_e0=x0*Constants.n_H + (y0 + 2 * z0) * Constants.n_He
egy1 = x2 * n_e / (1 - x2) - Constants.R_HII
egy2 = y2 * n_e / (1 - y2 - z2) - Constants.R_HeII
egy3 = z2 * n_e / y2 - Constants.R_HeIII
egy01 = x0 * n_e0 / (1 - x0) - Constants.R_HII
egy02 = y0 * n_e0 / (1 - y0 - z0) - Constants.R_HeII
egy03 = z0 * n_e0 / y0 - Constants.R_HeIII
"""
#print(" pontosság: ", (egy1/Constants.R_HII+egy1/(x0 * n_e / (1 - x0)))/2)
#y0 = #fsolve(f02, 0.675486)
#z0 = #fsolve(f03, 0.664258)
#print(x0,y0,z0)
#x,y,z, = -1, -1,-1
#if T > 100000:
# x0,y0,z0 = 0.99 , 0.01, 0.99
#elif T > 50000:
# x0,y0,z0 = 0.9576, 0.6548, 0.4012
| |
<reponame>matthewpipie/vectra_api_tools
import json
import requests
import warnings
import html
import re
warnings.filterwarnings('always', '.*', PendingDeprecationWarning)
class HTTPException(Exception):
def __init__(self, response):
"""
Custom exception class to report possible API errors
The body is contructed by extracting the API error code from the requests.Response object
"""
try:
r = response.json()
if 'detail' in r:
detail = r['detail']
elif 'errors' in r:
detail = r['errors'][0]['title']
elif '_meta' in r:
detail = r['_meta']['message']
else:
detail = response.content
except Exception:
detail = response.content
body = 'Status code: {code} - {detail}'.format(code=str(response.status_code), detail=detail)
super().__init__(body)
def request_error_handler(func):
def request_handler(self, *args, **kwargs):
response = func(self, *args, **kwargs)
if response.status_code in [200, 201, 204]:
return response
else:
raise HTTPException(response)
return request_handler
def validate_api_v2(func):
def api_validator(self, *args, **kwargs):
if self.version == 2:
return func(self, *args, **kwargs)
else:
raise NotImplementedError('Method only accessible via v2 of API')
return api_validator
def deprecation(message):
warnings.warn(message, PendingDeprecationWarning)
def param_deprecation(key):
message = '{0} will be deprecated with Vectra API v1 which will be annouced in an upcoming release'.format(key)
warnings.warn(message, PendingDeprecationWarning)
class VectraClient(object):
def __init__(self, url=None, token=None, user=None, password=None, verify=False):
"""
Initialize Vectra client
:param url: IP or hostname of Vectra brain (ex https://www.example.com) - required
:param token: API token for authentication when using API v2*
:param user: Username to authenticate to Vectra brain when using API v1*
:param password: Password when using username to authenticate using API v1*
:param verify: Verify SSL (default: False) - optional
*Either token or user are required
"""
self.url = url
self.version = 2 if token else 1
self.verify = verify
url = VectraClient._remove_trailing_slashes(url)
if token:
self.url = '{url}/api/v2'.format(url=url)
self.headers = {
'Authorization': "Token " + token.strip(),
'Content-Type': "application/json",
'Cache-Control': "no-cache"
}
elif user and password:
self.url = '{url}/api'.format(url=url)
self.auth = (user, password)
deprecation('Deprecation of the Vectra API v1 will be announced in an upcoming release. Migrate to API v2'
' when possible')
else:
raise RuntimeError("At least one form of authentication is required. Please provide a token or username"
" and password")
@staticmethod
def _remove_trailing_slashes(url):
url = url[:-1] if url.endswith('/') else url
return url
@staticmethod
def _generate_campaign_params(args):
"""
Generate query parameters for campaigns based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['fields', 'dst_ip', 'target_domain', 'state', 'name', 'last_updated_gte',
'note_modified_timestamp_gte','page', 'page_size']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid campaign query parameter'.format(str(k)))
return params
@staticmethod
def _generate_host_params(args):
"""
Generate query parameters for hosts based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['active_traffic', 'all', 'c_score', 'c_score_gte', 'certainty', 'certainty_gte',
'fields', 'has_active_traffic', 'include_detection_summaries', 'is_key_asset', 'is_targeting_key_asset',
'key_asset', 'last_detection_timestamp', 'last_source', 'mac_address', 'max_id', 'min_id',
'name', 'note_modified_timestamp_gte', 'ordering','page', 'page_size', 'privilege_category',
'privilege_level', 'privilege_level_gte', 'state', 't_score', 't_score_gte', 'tags',
'targets_key_asset', 'threat', 'threat_gte']
deprecated_keys = ['c_score', 'c_score_gte', 'key_asset', 't_score', 't_score_gte', 'targets_key_asset']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid host query parameter'.format(str(k)))
if k in deprecated_keys: param_deprecation(k)
return params
@staticmethod
def _generate_host_by_id_params(args):
"""
Generate query parameters for host based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['fields', 'include_external', 'include_ldap']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid host query parameter'.format(str(k)))
return params
@staticmethod
def _generate_detection_params(args):
"""
Generate query parameters for detections based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['c_score', 'c_score_gte', 'category', 'certainty', 'certainty_gte', 'description',
'detection', 'detection_category', 'detection_type', 'fields', 'host_id', 'is_targeting_key_asset',
'is_triaged', 'last_timestamp', 'max_id', 'min_id', 'note_modified_timestamp_gte', 'ordering',
'page', 'page_size', 'src_ip', 'state', 't_score', 't_score_gte', 'tags', 'targets_key_asset',
'threat', 'threat_gte']
deprecated_keys = ['c_score', 'c_score_gte', 'category', 'detection', 't_score', 't_score_gte', 'targets_key_asset']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid detection query parameter'.format(str(k)))
if k in deprecated_keys: param_deprecation(k)
return params
@staticmethod
def _generate_group_params(args):
"""
Generate query parameters for groups based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['description', 'domains', 'host_ids', 'host_names', 'last_modified_by',
'last_modified_timestamp', 'name', 'page', 'page_size', 'type']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid group query parameter'.format(str(k)))
return params
@staticmethod
def _generate_rule_params(args):
"""
Generate query parameters for rules based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['contains', 'fields', 'include_templates', 'page', 'page_size', 'ordering']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid rule query parameter'.format(str(k)))
return params
@staticmethod
def _generate_rule_by_id_params(args):
"""
Generate query parameters for rule based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['fields']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid rule query parameter'.format(str(k)))
return params
@staticmethod
def _generate_user_params(args):
"""
Generate query parameters for users based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['username', 'role', 'account_type', 'authentication_profile', 'last_login_gte']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid user query parameter'.format(str(k)))
return params
@staticmethod
def _generate_ip_address_params(args):
"""
Generate query parameters for ip address queries based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['include_ipv4', 'include_ipv6']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid ip address query parameter'.format(str(k)))
return params
@staticmethod
def _generate_subnet_params(args):
"""
Generate query parameters for subnet queries based on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['ordering', 'search']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid subnet query parameter'.format(str(k)))
return params
@staticmethod
def _generate_internal_network_params(args):
"""
Generate query parameters for internal network queries based on provided argsbased on provided args
:param args: dict of keys to generate query params
:rtype: dict
"""
params = {}
valid_keys = ['include_ipv4', 'include_ipv6']
for k, v in args.items():
if k in valid_keys:
if v is not None: params[k] = v
else:
raise ValueError('argument {} is an invalid internal network query parameter'.format(str(k)))
return params
@validate_api_v2
@request_error_handler
def _get_request(self, url, **kwargs):
"""
Do a get request on the provided URL
This is used by paginated endpoints
:rtype: requests.Response
"""
params = {}
for k, v in kwargs.items():
params[k] = v
if self.version == 2:
return requests.get(url, headers=self.headers, params=params, verify=self.verify)
else:
return requests.get(url, auth=self.auth, params=params, verify=self.verify)
@validate_api_v2
@request_error_handler
def get_campaigns(self, **kwargs):
"""
Query all campaigns - all parameters are optional
:param dst_ip: filter on campaign destination IP
:param target_domain: filter on campaign destination domain
:param state: campaign state, possible values are: init, active, closed, closed_never_active
:param name: filter on campaign name
:param last_updated_gte: return only campaigns with a last updated timestamp gte (datetime)
:param note_modified_timestamp_gte: return only campaigns with a last updated timestamp on their note gte (datetime)
:param fields: comma separated string of fields to be filtered and returned
possible values are: id, dst_ip, target_domain, state, name, last_updated,
note, note_modified_by, note_modified_timestamp
:param page: page number to return (int)
:param page_size: number of object to return in repsonse (int)
"""
return requests.get('{url}/campaigns'.format(url=self.url), headers=self.headers,
params=self._generate_campaign_params(kwargs), verify=self.verify)
def | |
= [slice(None), slice(tl_y, br_y), slice(tl_x, br_x)]
>>> # Note: I'm not 100% sure this work right with non-intergral slices
>>> outputs = kwimage.subpixel_slice(inputs, index)
Example:
>>> inputs = np.arange(5 * 5 * 3).reshape(5, 5, 3)
>>> index = [slice(0, 3), slice(0, 3)]
>>> outputs = subpixel_slice(inputs, index)
>>> index = [slice(0.5, 3.5), slice(-0.5, 2.5)]
>>> outputs = subpixel_slice(inputs, index)
>>> inputs = np.arange(5 * 5).reshape(1, 5, 5).astype(float)
>>> index = [slice(None), slice(3, 6), slice(3, 6)]
>>> outputs = subpixel_slice(inputs, index)
>>> print(outputs)
[[[18. 19. 0.]
[23. 24. 0.]
[ 0. 0. 0.]]]
>>> index = [slice(None), slice(3.5, 6.5), slice(2.5, 5.5)]
>>> outputs = subpixel_slice(inputs, index)
>>> print(outputs)
[[[20. 21. 10.75]
[11.25 11.75 6. ]
[ 0. 0. 0. ]]]
"""
subpixel_starts = np.array(
[0 if sl.start is None else sl.start for sl in index])
subpixel_stops = np.array(
[inputs.shape[i] if sl.stop is None else sl.stop
for i, sl in enumerate(index)])
is_fractional = ((subpixel_starts % 1) + (subpixel_stops % 1)) > 0
if not np.any(is_fractional):
# If none of the slices are fractional just do the simple thing
int_index = [slice(int(s), int(t)) for s, t in
zip(subpixel_starts, subpixel_stops)]
outputs, _ = _padded_slice(inputs, int_index)
else:
interp_axes = np.where(is_fractional)[0]
shift = -subpixel_starts[interp_axes]
output_shape = subpixel_stops - subpixel_starts
if np.any(output_shape % 1 > 0):
output_shape = np.ceil(output_shape)
# raise ValueError('the slice length must be integral')
output_shape = output_shape.astype(int)
outputs = subpixel_translate(inputs, shift, interp_axes=interp_axes,
output_shape=output_shape)
return outputs
def subpixel_translate(inputs, shift, interp_axes=None, output_shape=None):
"""
Translates an image by a subpixel shift value using bilinear interpolation
Args:
inputs (ArrayLike): data to translate
shift (Sequence):
amount to translate each dimension specified by `interp_axes`.
Note: if inputs contains more than one "image" then all "images" are
translated by the same amount. This function contains no mechanism
for translating each image differently. Note that by default
this is a y,x shift for 2 dimensions.
interp_axes (Sequence, default=None):
axes to perform interpolation on, if not specified the final
`n` axes are interpolated, where `n=len(shift)`
output_shape (tuple, default=None):
if specified the output is returned with this shape, otherwise
Notes:
This function powers most other functions in this file.
Speedups here can go a long way.
Example:
>>> inputs = np.arange(5) + 1
>>> print(inputs.tolist())
[1, 2, 3, 4, 5]
>>> outputs = subpixel_translate(inputs, 1.5)
>>> print(outputs.tolist())
[0.0, 0.5, 1.5, 2.5, 3.5]
Example:
>>> # xdoctest: +REQUIRES(module:torch)
>>> inputs = torch.arange(9).view(1, 1, 3, 3).float()
>>> print(inputs.long())
tensor([[[[0, 1, 2],
[3, 4, 5],
[6, 7, 8]]]])
>>> outputs = subpixel_translate(inputs, (-.4, .5), output_shape=(1, 1, 2, 5))
>>> print(outputs)
tensor([[[[0.6000, 1.7000, 2.7000, 1.6000, 0.0000],
[2.1000, 4.7000, 5.7000, 3.1000, 0.0000]]]])
Ignore:
>>> inputs = np.arange(5)
>>> shift = -.6
>>> interp_axes = None
>>> subpixel_translate(inputs, -.6)
>>> subpixel_translate(inputs[None, None, None, :], -.6)
>>> inputs = np.arange(25).reshape(5, 5)
>>> shift = (-1.6, 2.3)
>>> interp_axes = (0, 1)
>>> subpixel_translate(inputs, shift, interp_axes, output_shape=(9, 9))
>>> subpixel_translate(inputs, shift, interp_axes, output_shape=(3, 4))
"""
impl = kwarray.ArrayAPI.impl(inputs)
if output_shape is None:
output_shape = inputs.shape
if interp_axes is None:
shift = _ensure_arraylike(shift)
interp_axes = np.arange(-len(shift), 0)
else:
interp_axes = _ensure_arraylike(interp_axes)
shift = _ensure_arraylike(shift, len(interp_axes))
ndims = len(inputs.shape) # number of inputs dimensions
interp_order = len(interp_axes) # number of interpolated dimensions
output_dims = [output_shape[i] for i in interp_axes]
# The negative shift defines the new start coordinate
start = -shift
# Sample values (using padded slice to deal with borders)
# border_mode = 'zeros'
# if border_mode == 'zeros':
# padkw = dict(pad_mode='constant', constant_value=0)
# if border_mode == 'edge':
# padkw = dict(pad_mode='edge')
padkw = {}
if np.all(start % 1 == 0):
# short circuit common simple cases where no interpolation is needed
relevant_slice = [slice(None)] * ndims
for i, x, d in zip(interp_axes, map(int, start), output_dims):
relevant_slice[i] = slice(x, x + d)
subpxl_vals, _ = _padded_slice(inputs, relevant_slice, **padkw)
elif interp_order == 1:
i, = interp_axes
width, = output_dims
x, = start
# Get quantized pixel locations near subpixel pts
x0 = int(np.floor(x))
x1 = x0 + 1
# Find linear weights
wa = (x1 - x)
wb = (x - x0)
# Create a (potentially negative) slice containing the relvant area
relevant_slice = [slice(None)] * ndims
relevant_slice[i] = slice(x0, x1 + width)
relevant, _ = _padded_slice(inputs, relevant_slice, **padkw)
if impl.dtype_kind(relevant) != 'f':
relevant = impl.astype(relevant, 'float32')
# Take subslices of the relevant area
sl_a = [slice(None)] * ndims
sl_b = [slice(None)] * ndims
# Sample values (using padded slice to deal with borders)
sl_a[i] = slice(0, width)
sl_b[i] = slice(1, width + 1)
Ia = relevant[tuple(sl_a)]
Ib = relevant[tuple(sl_b)]
# Perform the linear interpolation
subpxl_vals = (wa * Ia) + (wb * Ib)
elif interp_order == 2:
j, i = interp_axes
height, width = output_dims
y, x = start
# Get quantized pixel locations near subpixel pts
start0 = kwarray.ArrayAPI.ifloor(start)
start1 = start0 + 1
alpha = start1 - start
beta = start - start0
# Find bilinear weights
wa = alpha[1] * alpha[0]
wb = alpha[1] * beta[0]
wc = beta[1] * alpha[0]
wd = beta[1] * beta[0]
# Create a (potentially negative) slice containing the relvant area
relevant_slice = [slice(None)] * ndims
y0, x0 = start0
y1, x1 = start1
relevant_slice[j] = slice(y0, y1 + height)
relevant_slice[i] = slice(x0, x1 + width)
relevant, _ = _padded_slice(inputs, relevant_slice, **padkw)
if impl.dtype_kind(relevant) != 'f':
relevant = impl.astype(relevant, 'float32')
# Take subslices of the relevant area
sl_a = [slice(None)] * ndims
sl_b = [slice(None)] * ndims
sl_c = [slice(None)] * ndims
sl_d = [slice(None)] * ndims
# Sample values (using padded slice to deal with borders)
sl_a[j] = slice(0, height)
sl_a[i] = slice(0, width)
sl_b[j] = slice(1, height + 1)
sl_b[i] = slice(0, width)
sl_c[j] = slice(0, height)
sl_c[i] = slice(1, width + 1)
sl_d[j] = slice(1, height + 1)
sl_d[i] = slice(1, width + 1)
Ia = relevant[tuple(sl_a)]
Ib = relevant[tuple(sl_b)]
Ic = relevant[tuple(sl_c)]
Id = relevant[tuple(sl_d)]
# Perform the bilinear interpolation
subpxl_vals = (wa * Ia) + (wb * Ib) + (wc * Ic) + (wd * Id)
else:
raise NotImplementedError('trilinear interpolation is not implemented')
return subpxl_vals
def _padded_slice(data, in_slice, ndim=None, pad_slice=None,
pad_mode='constant', **padkw):
"""
Allows slices with out-of-bound coordinates. Any out of bounds coordinate
will be sampled via padding.
Note:
Negative slices have a different meaning here then they usually do.
Normally, they indicate a wrap-around or a reversed stride, but here
they index into out-of-bounds space (which depends on the pad mode).
For example a slice of -2:1 literally samples two pixels to the left of
the data and one pixel from the data, so you get two padded values and
one data value.
Args:
data (Sliceable[T]): data to slice into. Any channels must be the last dimension.
in_slice (Tuple[slice, ...]): slice for each dimensions
ndim (int): number of spatial dimensions
pad_slice (List[int|Tuple]): additional padding of the slice
Returns:
Tuple[Sliceable, List] :
data_sliced: subregion of the input data (possibly with padding,
depending on if the original slice went out of bounds)
st_dims : a list indicating the low and high space-time coordinate
values of the returned data slice.
Example:
>>> data = np.arange(5)
>>> in_slice = [slice(-2, 7)]
>>> data_sliced, st_dims = _padded_slice(data, in_slice)
>>> print(ub.repr2(data_sliced, with_dtype=False))
>>> print(st_dims)
np.array([0, 0, 0, 1, 2, 3, 4, 0, 0])
[(-2, 7)]
>>> data_sliced, st_dims = _padded_slice(data, in_slice, pad_slice=(3, 3))
>>> print(ub.repr2(data_sliced, with_dtype=False))
>>> print(st_dims)
np.array([0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 0, 0, 0, 0, 0])
[(-5, 10)]
>>> data_sliced, st_dims = _padded_slice(data, slice(3, 4), pad_slice=[(1, 0)])
>>> print(ub.repr2(data_sliced, with_dtype=False))
>>> print(st_dims)
np.array([2, 3])
[(2, 4)]
"""
# TODO: use kwarray instead
if isinstance(in_slice, slice):
in_slice = [in_slice]
ndim = len(in_slice)
data_dims = data.shape[:ndim]
low_dims = [sl.start for sl in in_slice]
high_dims = [sl.stop for sl in in_slice]
data_slice, extra_padding = _rectify_slice(data_dims, low_dims, high_dims,
pad_slice=pad_slice)
in_slice_clipped = tuple(slice(*d) for d in data_slice)
# Get the parts of the image | |
<filename>pandaharvester/harvestermonitor/htcondor_monitor.py
import re
import time
import datetime
import threading
import random
import xml.etree.ElementTree as ET
try:
import subprocess32 as subprocess
except Exception:
import subprocess
try:
from threading import get_ident
except ImportError:
from thread import get_ident
import six
from concurrent.futures import ThreadPoolExecutor as Pool
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestercore.core_utils import SingletonWithID
from pandaharvester.harvestercore.work_spec import WorkSpec
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvestercore.fifos import SpecialFIFOBase
try:
import htcondor
except ImportError:
CONDOR_API = 'command'
else:
CONDOR_API = 'python'
# logger
baseLogger = core_utils.setup_logger('htcondor_monitor')
## Run shell function
def _runShell(cmd):
cmd = str(cmd)
p = subprocess.Popen(cmd.split(), shell=False, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdOut, stdErr = p.communicate()
retCode = p.returncode
return (retCode, stdOut, stdErr)
## Native HTCondor status map
CONDOR_JOB_STATUS_MAP = {
'1': 'idle',
'2': 'running',
'3': 'removed',
'4': 'completed',
'5': 'held',
'6': 'transferring_output',
'7': 'suspended',
}
## List of job ads required
CONDOR_JOB_ADS_LIST = [
'ClusterId', 'ProcId', 'JobStatus', 'LastJobStatus',
'JobStartDate', 'EnteredCurrentStatus', 'ExitCode',
'HoldReason', 'LastHoldReason', 'RemoveReason',
]
## harvesterID
harvesterID = harvester_config.master.harvester_id
## generate condor job id with schedd host from workspec
def condor_job_id_from_workspec(workspec):
return '{0}#{1}'.format(workspec.submissionHost, workspec.batchID)
## Condor queue cache fifo
class CondorQCacheFifo(six.with_metaclass(SingletonWithID, SpecialFIFOBase)):
global_lock_id = -1
def __init__(self, target, *args, **kwargs):
name_suffix = target.split('.')[0]
self.titleName = 'CondorQCache_{0}'.format(name_suffix)
SpecialFIFOBase.__init__(self)
def lock(self, score=None):
lock_key = format(int(random.random() * 2**32), 'x')
if score is None:
score = time.time()
retVal = self.putbyid(self.global_lock_id, lock_key, score)
if retVal:
return lock_key
return None
def unlock(self, key=None, force=False):
peeked_tuple = self.peekbyid(id=self.global_lock_id)
if peeked_tuple.score is None or peeked_tuple.item is None:
return True
elif force or self.decode(peeked_tuple.item) == key:
self.release([self.global_lock_id])
return True
else:
return False
## Condor job ads query
class CondorJobQuery(six.with_metaclass(SingletonWithID, object)):
## class lock
classLock = threading.Lock()
## Query commands
orig_comStr_list = [
'condor_q -xml',
'condor_history -xml',
]
# Bad text of redundant xml roots to eleminate from condor XML
badtext = """
</classads>
<?xml version="1.0"?>
<!DOCTYPE classads SYSTEM "classads.dtd">
<classads>
"""
def __init__(self, cacheEnable=False, cacheRefreshInterval=None, useCondorHistory=True, *args, **kwargs):
self.submissionHost = str(kwargs.get('id'))
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobQuery.__init__')
# Initialize
with self.classLock:
tmpLog.debug('Start')
self.lock = threading.Lock()
self.condor_api = CONDOR_API
self.condor_schedd = None
self.condor_pool = None
self.cacheEnable = False
if self.submissionHost in ('LOCAL', 'None'):
tmpLog.debug('submissionHost is {0}, treated as local schedd. Skipped'.format(self.submissionHost))
else:
try:
self.condor_schedd, self.condor_pool = self.submissionHost.split(',')[0:2]
except ValueError:
tmpLog.error('Invalid submissionHost: {0} . Skipped'.format(self.submissionHost))
if self.condor_api == 'python':
try:
self.secman = htcondor.SecMan()
self.renew_session()
except Exception as e:
self.condor_api = 'command'
tmpLog.warning('Using condor command instead due to exception from unsupported version of python or condor api: {0}'.format(e))
self.cacheEnable = cacheEnable
if self.cacheEnable:
self.cache = ([], 0)
self.cacheRefreshInterval = cacheRefreshInterval
self.useCondorHistory = useCondorHistory
tmpLog.debug('Initialize done')
def get_all(self, batchIDs_list=[]):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobQuery.get_all')
# Get all
tmpLog.debug('Start')
job_ads_all_dict = {}
if self.condor_api == 'python':
try:
if self.cacheEnable:
job_ads_all_dict = self.query_with_python_cached(batchIDs_list)
else:
job_ads_all_dict = self.query_with_python(batchIDs_list)
except RuntimeError as e:
tmpLog.error(e)
if self.lock.acquire(False):
self.renew_session()
self.lock.release()
except Exception as e:
tmpLog.warning('Using condor command instead due to exception from unsupported version of python or condor api: {0}'.format(e))
job_ads_all_dict = self.query_with_command(batchIDs_list)
else:
job_ads_all_dict = self.query_with_command(batchIDs_list)
return job_ads_all_dict
def query_with_command(self, batchIDs_list=[]):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobQuery.query_with_command')
## Start query
tmpLog.debug('Start query')
job_ads_all_dict = {}
batchIDs_set = set(batchIDs_list)
for orig_comStr in self.orig_comStr_list:
## String of batchIDs
batchIDs_str = ' '.join(list(batchIDs_set))
## Command
if 'condor_q' in orig_comStr or ('condor_history' in orig_comStr and batchIDs_set):
name_opt = '-name {0}'.format(self.condor_schedd) if self.condor_schedd else ''
pool_opt = '-pool {0}'.format(self.condor_pool) if self.condor_pool else ''
ids = batchIDs_str
comStr = '{cmd} {name_opt} {pool_opt} {ids}'.format(cmd=orig_comStr,
name_opt=name_opt,
pool_opt=pool_opt,
ids=ids)
else:
# tmpLog.debug('No batch job left to query in this cycle by this thread')
continue
tmpLog.debug('check with {0}'.format(comStr))
(retCode, stdOut, stdErr) = _runShell(comStr)
if retCode == 0:
## Command succeeded
job_ads_xml_str = '\n'.join(str(stdOut).split(self.badtext))
if '<c>' in job_ads_xml_str:
## Found at least one job
## XML parsing
xml_root = ET.fromstring(job_ads_xml_str)
def _getAttribute_tuple(attribute_xml_element):
## Attribute name
_n = str(attribute_xml_element.get('n'))
## Attribute value text
_t = ' '.join(attribute_xml_element.itertext())
return (_n, _t)
## Every batch job
for _c in xml_root.findall('c'):
job_ads_dict = dict()
## Every attribute
attribute_iter = map(_getAttribute_tuple, _c.findall('a'))
job_ads_dict.update(attribute_iter)
batchid = str(job_ads_dict['ClusterId'])
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
job_ads_all_dict[condor_job_id] = job_ads_dict
## Remove batch jobs already gotten from the list
if batchid in batchIDs_set:
batchIDs_set.discard(batchid)
else:
## Job not found
tmpLog.debug('job not found with {0}'.format(comStr))
continue
else:
## Command failed
errStr = 'command "{0}" failed, retCode={1}, error: {2} {3}'.format(comStr, retCode, stdOut, stdErr)
tmpLog.error(errStr)
if len(batchIDs_set) > 0:
## Job unfound via both condor_q or condor_history, marked as unknown worker in harvester
for batchid in batchIDs_set:
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
job_ads_all_dict[condor_job_id] = dict()
tmpLog.info( 'Unfound batch jobs of submissionHost={0}: {1}'.format(
self.submissionHost, ' '.join(list(batchIDs_set)) ) )
## Return
return job_ads_all_dict
def query_with_python(self, batchIDs_list=[]):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobQuery.query_with_python')
## Start query
tmpLog.debug('Start query')
job_ads_all_dict = {}
batchIDs_set = set(batchIDs_list)
if self.useCondorHistory:
query_method_list = [self.schedd.xquery, self.schedd.history]
else:
query_method_list = [self.schedd.xquery]
for query_method in query_method_list:
## Make requirements
batchIDs_str = ','.join(list(batchIDs_set))
requirements = 'member(ClusterID, {{{0}}})'.format(batchIDs_str)
tmpLog.debug('Query method: {0} ; batchIDs: "{1}"'.format(query_method.__name__, batchIDs_str))
## Query
jobs_iter = query_method(requirements=requirements, projection=CONDOR_JOB_ADS_LIST)
for job in jobs_iter:
job_ads_dict = dict(job)
batchid = str(job_ads_dict['ClusterId'])
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
job_ads_all_dict[condor_job_id] = job_ads_dict
## Remove batch jobs already gotten from the list
batchIDs_set.discard(batchid)
if len(batchIDs_set) == 0:
break
## Remaining
if len(batchIDs_set) > 0:
## Job unfound via both condor_q or condor_history, marked as unknown worker in harvester
for batchid in batchIDs_set:
condor_job_id = '{0}#{1}'.format(self.submissionHost, batchid)
job_ads_all_dict[condor_job_id] = dict()
tmpLog.info( 'Unfound batch jobs of submissionHost={0}: {1}'.format(
self.submissionHost, ' '.join(list(batchIDs_set)) ) )
## Return
return job_ads_all_dict
def query_with_python_cached(self, batchIDs_list=[]):
# Make logger
tmpLog = core_utils.make_logger(baseLogger, 'submissionHost={0}'.format(self.submissionHost), method_name='CondorJobQuery.query_with_python_cached')
## Start query
tmpLog.debug('Start query')
cache_fifo = CondorQCacheFifo(target=self.submissionHost, id='{0},{1}'.format(self.submissionHost, get_ident()))
job_ads_all_dict = {}
batchIDs_set = set(batchIDs_list)
## query from cache
def cache_query(requirements=None, projection=CONDOR_JOB_ADS_LIST, timeout=60):
# query from condor xquery and update cache to fifo
def update_cache(lockInterval=90):
tmpLog.debug('update_cache')
# acquire lock with score timestamp
score = time.time() - self.cacheRefreshInterval + lockInterval
lock_key = cache_fifo.lock(score=score)
if lock_key is not None:
# acquired lock, update from condor schedd
tmpLog.debug('got lock, updating cache')
jobs_iter_orig = self.schedd.xquery(requirements=requirements, projection=projection)
jobs_iter = [ dict(job) for job in jobs_iter_orig ]
timeNow = time.time()
cache_fifo.put(jobs_iter, timeNow)
self.cache = (jobs_iter, timeNow)
# release lock
retVal = cache_fifo.unlock(key=lock_key)
if retVal:
tmpLog.debug('done update cache and unlock')
else:
tmpLog.warning('cannot unlock... Maybe something wrong')
return jobs_iter
else:
tmpLog.debug('cache fifo locked by other thread. Skipped')
return None
# remove invalid or outdated caches from fifo
def cleanup_cache(timeout=60):
tmpLog.debug('cleanup_cache')
id_list = list()
attempt_timestamp = time.time()
n_cleanup = 0
while True:
if time.time() > attempt_timestamp + timeout:
tmpLog.debug('time is up when cleanup cache. Skipped')
break
peeked_tuple = cache_fifo.peek(skip_item=True)
if peeked_tuple is None:
tmpLog.debug('empty cache fifo')
break
elif peeked_tuple.score is not None \
and time.time() <= peeked_tuple.score + self.cacheRefreshInterval:
tmpLog.debug('nothing expired')
break
elif peeked_tuple.id is not None:
retVal = cache_fifo.release([peeked_tuple.id])
if isinstance(retVal, int):
n_cleanup += retVal
else:
# problematic
tmpLog.warning('got nothing when cleanup cache, maybe problematic. Skipped')
break
tmpLog.debug('cleaned up {0} objects in cache fifo'.format(n_cleanup))
# start
jobs_iter = tuple()
try:
attempt_timestamp = time.time()
while True:
if time.time() > attempt_timestamp + timeout:
# skip cache_query if too long
tmpLog.debug('cache_query got timeout ({0} seconds). Skipped '.format(timeout))
break
# get latest cache
peeked_tuple = cache_fifo.peeklast(skip_item=True)
if peeked_tuple is not None and peeked_tuple.score is not None:
# got something
if peeked_tuple.id == cache_fifo.global_lock_id:
if time.time() <= peeked_tuple.score + self.cacheRefreshInterval:
# lock
tmpLog.debug('got fifo locked. Wait and retry...')
time.sleep(random.uniform(1, 5))
continue
else:
# expired lock
tmpLog.debug('got lock expired. Clean up and retry...')
cleanup_cache()
continue
elif time.time() <= peeked_tuple.score + self.cacheRefreshInterval:
# got valid cache
_obj, _last_update = self.cache
if _last_update >= peeked_tuple.score:
# valid local cache
tmpLog.debug('valid local cache')
jobs_iter = _obj
else:
# valid fifo cache
tmpLog.debug('update local cache from fifo')
peeked_tuple_with_item = cache_fifo.peeklast()
if peeked_tuple_with_item is not None \
and peeked_tuple.id != cache_fifo.global_lock_id \
and peeked_tuple_with_item.item is not None:
jobs_iter = cache_fifo.decode(peeked_tuple_with_item.item)
self.cache = (jobs_iter, peeked_tuple_with_item.score)
else:
tmpLog.debug('peeked invalid cache fifo object. Wait and retry...')
time.sleep(random.uniform(1, 5))
continue
else:
# cache expired
tmpLog.debug('update cache in fifo')
retVal = update_cache()
if retVal is not None:
jobs_iter = retVal
cleanup_cache()
break
else:
# no cache in fifo, check with size again
if cache_fifo.size() == 0:
if time.time() > attempt_timestamp + random.uniform(10, 30):
# have waited for long enough, update cache
tmpLog.debug('waited enough, update | |
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1024 'Z' (7 pixels wide)
0x00, #
0xFE, # OOOOOOO
0x02, # O
0x04, # O
0x08, # O
0x08, # O
0x10, # O
0x20, # O
0x20, # O
0x40, # O
0x80, # O
0xFE, # OOOOOOO
0x00, #
0x00, #
0x00, #
0x00, #
# @1040 '[' (2 pixels wide)
0x00, #
0xC0, # OO
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0xC0, # OO
0x00, #
# @1056 '\' (4 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x20, # O
0x20, # O
0x20, # O
0x10, # O
0x10, # O
0x00, #
0x00, #
0x00, #
0x00, #
# @1072 ']' (2 pixels wide)
0x00, #
0xC0, # OO
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0xC0, # OO
0x00, #
# @1088 '^' (5 pixels wide)
0x00, #
0x20, # O
0x20, # O
0x50, # O O
0x50, # O O
0x50, # O O
0x88, # O O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @1104 '_' (7 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0xFE, # OOOOOOO
0x00, #
0x00, #
# @1120 '`' (2 pixels wide)
0x00, #
0x80, # O
0x40, # O
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
0x00, #
# @1136 'a' (5 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x60, # OO
0x90, # O O
0x88, # O O
0x18, # OO
0x68, # OO O
0x88, # O O
0x98, # O OO
0x68, # OO O
0x00, #
0x00, #
0x00, #
0x00, #
# @1152 'b' (6 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0xB0, # O OO
0xC8, # OO O
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0xC8, # OO O
0xB0, # O OO
0x00, #
0x00, #
0x00, #
0x00, #
# @1168 'c' (5 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x30, # OO
0x48, # O O
0x88, # O O
0x80, # O
0x80, # O
0x88, # O O
0x48, # O O
0x30, # OO
0x00, #
0x00, #
0x00, #
0x00, #
# @1184 'd' (6 pixels wide)
0x00, #
0x04, # O
0x04, # O
0x04, # O
0x34, # OO O
0x4C, # O OO
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x4C, # O OO
0x34, # OO O
0x00, #
0x00, #
0x00, #
0x00, #
# @1200 'e' (6 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x30, # OO
0x48, # O O
0x84, # O O
0xFC, # OOOOOO
0x80, # O
0x84, # O O
0x48, # O O
0x30, # OO
0x00, #
0x00, #
0x00, #
0x00, #
# @1216 'f' (3 pixels wide)
0x00, #
0x60, # OO
0x40, # O
0x40, # O
0xE0, # OOO
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x00, #
0x00, #
0x00, #
0x00, #
# @1232 'g' (6 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, #
0x34, # OO O
0x4C, # O OO
0x84, # O O
0x84, # O O
0x84, # O O
0x84, # O O
0x4C, # O OO
0x34, # OO O
0x84, # O O
0x88, # O O
0x70, # OOO
0x00, #
# @1248 'h' (5 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0xB0, # O OO
0xC8, # OO O
0x88, # O O
0x88, # O O
0x88, # O O
0x88, # O O
0x88, # O O
0x88, # O O
0x00, #
0x00, #
0x00, #
0x00, #
# @1264 'i' (1 pixels wide)
0x00, #
0x80, # O
0x00, #
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x00, #
0x00, #
0x00, #
0x00, #
# @1280 'j' (2 pixels wide)
0x00, #
0x40, # O
0x00, #
0x00, #
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0x40, # O
0xC0, # OO
0x00, #
# @1296 'k' (5 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x88, # O O
0x90, # O O
0xA0, # O O
0xE0, # OOO
0x90, # O O
0x90, # O O
0x88, # O O
0x88, # O O
0x00, #
0x00, #
0x00, #
0x00, #
# @1312 'l' (1 pixels wide)
0x00, #
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x80, # O
0x00, #
0x00, #
0x00, #
0x00, #
# @1328 'm' (9 pixels wide)
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0xB7, 0x00, # O OO OOO
0xCC, 0x80, # OO OO O
0x88, 0x80, # O O O
0x88, 0x80, # O O O
0x88, 0x80, # O O O
0x88, 0x80, # O O O
0x88, 0x80, # O O O
0x88, 0x80, # O O O
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
0x00, 0x00, #
# @1360 'n' (5 pixels wide)
0x00, #
0x00, #
0x00, #
0x00, | |
"CSF", "GM", "WM"],
"title": "MRBrainS Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "ABIDE Confusion Matrix", PlotType.HEATMAP_PLOT,
params={
"opts": {"columnnames": ["VM", "GM", "CSF", "Background"],
"rownames": ["Background", "CSF", "GM", "WM"],
"title": "ABIDE Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Runtime", PlotType.TEXT_PLOT,
params={"opts": {"title": "Runtime"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger, "Dice score per class per epoch", every=1,
params={"title": "Dice score on test patches per class per epoch",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Hausdorff Distance per class per epoch on reconstructed iSEG image",
every=1,
params={
"title": "Hausdorff Distance per class per epoch on reconstructed iSEG image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Hausdorff Distance per class per epoch on reconstructed MRBrainS image",
every=1,
params={
"title": "Hausdorff Distance per class per epoch on reconstructed MRBrainS image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Hausdorff Distance per class per epoch on reconstructed ABIDE image",
every=1,
params={
"title": "Hausdorff Distance per class per epoch on reconstructed ABIDE image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Dice score per class per epoch on reconstructed iSEG image",
every=1,
params={
"title": "Dice score per class per epoch on reconstructed iSEG image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Dice score per class per epoch on reconstructed MRBrainS image", every=1,
params={
"title": "Dice score per class per epoch on reconstructed MRBrainS image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Dice score per class per epoch on reconstructed ABIDE image",
every=1,
params={
"title": "Dice score per class per epoch on reconstructed ABIDE image",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth iSEG Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth iSEG Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth MRBrainS Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger, "Per Dataset Mean Hausdorff Distance", every=1,
params={"title": "Per Dataset Mean Hausdorff Distance",
"legend": list(dataset_configs.keys())}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
Checkpoint(save_folder, monitor_fn=lambda model_trainer: model_trainer.valid_loss, delta=0.01,
mode=MonitorMode.MIN), Event.ON_EPOCH_END) \
.with_event_handler(PlotAvgGradientPerLayer(visdom_logger, every=25), Event.ON_TRAIN_BATCH_END)
return trainer
elif self._trainer == TrainerType.WGAN_Multimodal:
trainer = WGANMultimodalTrainer(training_config, model_trainers, dataloaders[0], dataloaders[1],
dataloaders[2],
reconstruction_datasets, normalized_reconstructor, input_reconstructor,
segmentation_reconstructor, augmented_input_reconstructor,
gt_reconstructor,
run_config, dataset_configs, save_folder) \
.with_event_handler(PrintTrainingStatus(every=25), Event.ON_BATCH_END) \
.with_event_handler(PrintMonitors(every=25), Event.ON_BATCH_END) \
.with_event_handler(PlotMonitors(visdom_logger), Event.ON_EPOCH_END) \
.with_event_handler(PlotLR(visdom_logger), Event.ON_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Generated Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Generated Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Generated Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Generated Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Generated Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Generated Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Generated Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Generated Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Generated Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Generated Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Generated Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Generated Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Input Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Input Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Input Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Input Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Input Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Input Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Input Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Input Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Input Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Input Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Input Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Input Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Segmented Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Segmented Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Test Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Segmented Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Segmentation Ground Truth Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Segmentation Ground Truth Batch Process {}".format(
run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Segmentation Ground Truth Batch Process {}".format(
run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Label Map Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Label Map Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Label Map Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Background Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Background Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "CSF Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "CSF Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "GM Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "GM Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "WM Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "WM Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Inputs Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Background Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Background Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "CSF Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "CSF Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "GM Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "GM Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "WM Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "WM Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Pie Plot", PlotType.PIE_PLOT,
params={"opts": {"title": "Classification hit per classes",
"legend": list(map(lambda key: key,
dataset_configs.keys())) + [
"Fake Class"]}},
every=25), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Pie Plot True", PlotType.PIE_PLOT,
params={"opts": {"title": "Batch data distribution",
"legend": list(map(lambda key: key,
dataset_configs.keys())) + [
"Fake Class"]}},
every=25), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Mean Hausdorff Distance", PlotType.LINE_PLOT,
params={"opts": | |
<reponame>lrivallain/pyvcloud
# VMware vCloud Director Python SDK
# Copyright (c) 2014-2018 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import shutil
import tarfile
import tempfile
import time
import traceback
import urllib
from lxml import etree
from lxml import objectify
from pyvcloud.vcd.acl import Acl
from pyvcloud.vcd.client import ApiVersion
from pyvcloud.vcd.client import E
from pyvcloud.vcd.client import E_OVF
from pyvcloud.vcd.client import EntityType
from pyvcloud.vcd.client import find_link
from pyvcloud.vcd.client import get_links
from pyvcloud.vcd.client import MetadataDomain
from pyvcloud.vcd.client import MetadataValueType
from pyvcloud.vcd.client import MetadataVisibility
from pyvcloud.vcd.client import NSMAP
from pyvcloud.vcd.client import QueryResultFormat
from pyvcloud.vcd.client import RelationType
from pyvcloud.vcd.client import ResourceType
from pyvcloud.vcd.exceptions import DownloadException
from pyvcloud.vcd.exceptions import EntityNotFoundException
from pyvcloud.vcd.exceptions import InvalidParameterException
from pyvcloud.vcd.exceptions import OperationNotSupportedException
from pyvcloud.vcd.exceptions import UploadException
from pyvcloud.vcd.metadata import Metadata
from pyvcloud.vcd.system import System
from pyvcloud.vcd.utils import extract_id
from pyvcloud.vcd.utils import get_admin_href
from pyvcloud.vcd.utils import get_non_admin_href
from pyvcloud.vcd.utils import get_safe_members_in_tar_file
from pyvcloud.vcd.utils import is_admin
from pyvcloud.vcd.utils import retrieve_compute_policy_id_from_href
from pyvcloud.vcd.utils import to_dict
from pyvcloud.vcd.utils import update_vm_compute_policy_element
from pyvcloud.vcd.utils import VDC_COMPUTE_POLICY_MAX_API_VERSION
from pyvcloud.vcd.utils import VDC_COMPUTE_POLICY_MIN_API_VERSION
from pyvcloud.vcd.utils import VM_SIZING_POLICY_MIN_API_VERSION
# Uptil pyvcloud v20.0.0 1 MB was the default chunk size,
# in constrast vCD H5 UI uses 50MB for upload chunk size,
# 10MB is a happy medium between 50MB and 1MB.
DEFAULT_CHUNK_SIZE = 10 * 1024 * 1024
TENANT_CONTEXT_HDR = 'X-VMWARE-VCLOUD-TENANT-CONTEXT'
class Org(object):
def __init__(self, client, href=None, resource=None):
"""Constructor for Org objects.
:param pyvcloud.vcd.client.Client client: the client that will be used
to make REST calls to vCD.
:param str href: URI of the entity.
:param lxml.objectify.ObjectifiedElement resource: object
containing EntityType.ORG XML data representing the organization.
"""
self.client = client
if href is None and resource is None:
raise InvalidParameterException('Org initialization failed as '
'arguments are either invalid '
'or None')
self.href = href
self.resource = resource
if resource is not None:
self.href = resource.get('href')
self.href_admin = get_admin_href(self.href)
def reload(self):
"""Reloads the resource representation of the organization.
This method should be called in between two method invocations on the
Org object, if the former call changes the representation of the
organization in vCD.
"""
self.resource = self.client.get_resource(self.href)
def get_name(self):
"""Retrieves the name of the organization.
:return: name of the organization.
:rtype: str
"""
if self.resource is None:
self.reload()
return self.resource.get('name')
def create_catalog(self, name, description):
"""Create a catalog in the organization.
:param str name: name of the catalog to be created.
:param str description: description of the catalog to be created.
:return: an object containing EntityType.ADMIN_CATALOG XML data
representing a sparsely populated catalog element.
:rtype: lxml.objectify.ObjectifiedElement
"""
if self.resource is None:
self.reload()
payload = E.AdminCatalog(E.Description(description), name=name)
extra_headers = {TENANT_CONTEXT_HDR:
extract_id(self.resource.attrib['id'])}
return self.client.post_linked_resource(
self.resource, RelationType.ADD, EntityType.ADMIN_CATALOG.value,
payload, extra_headers=extra_headers)
def delete_catalog(self, name):
"""Delete a catalog in the organization.
:param str name: name of the catalog to be deleted.
:raises: EntityNotFoundException: if the named catalog can not be
found.
:raises: sub-class of VcdResponseException: if the REST call is not
successful.
"""
catalog_admin_resource = self.get_catalog(
name=name, is_admin_operation=True)
self.client.delete_linked_resource(
catalog_admin_resource, RelationType.REMOVE, media_type=None)
def list_catalogs(self):
"""List all catalogs in the organization.
:return: a list of dictionaries, where each item contains information
about a catalog in the organization.
:rtype: list
"""
if self.client.is_sysadmin():
resource_type = ResourceType.ADMIN_CATALOG.value
else:
resource_type = ResourceType.CATALOG.value
result = []
q = self.client.get_typed_query(
resource_type, query_result_format=QueryResultFormat.ID_RECORDS)
records = list(q.execute())
for r in records:
result.append(
to_dict(
r, resource_type=resource_type, exclude=['owner', 'org']))
return result
def get_catalog(self, name, is_admin_operation=False):
"""Retrieves a catalog by name.
:param str name: name of the catalog to be retrieved.
:param bool is_admin_operation: if set True, will return the admin
view of the catalog.
:return: an object containing EntityType.CATALOG or
EntityType.ADMIN_CATALOG XML data representing the catalog.
:rtype: lxml.objectify.ObjectifiedElement
:raises: EntityNotFoundException: if the named catalog can not be
found.
"""
if self.resource is None:
self.reload()
if self.client.get_api_version() < ApiVersion.VERSION_33.value:
links = get_links(
self.resource,
rel=RelationType.DOWN,
media_type=EntityType.CATALOG.value)
else:
if hasattr(self.resource, "Catalogs"):
catalogs = self.resource.Catalogs
for catalog in catalogs:
if hasattr(catalog, "CatalogReference"):
if name == catalog.CatalogReference.get("name"):
href = catalog.CatalogReference.get("href")
return self.client.get_resource(href)
else:
links = self.client.get_resource_link_from_query_object(
self.resource,
media_type=EntityType.RECORDS.value,
type='catalog')
if links:
for link in links:
if name == link.name:
if is_admin_operation:
href = get_admin_href(link.href)
else:
href = link.href
return self.client.get_resource(href)
raise EntityNotFoundException('Catalog not found (or)'
' Access to resource is forbidden')
def update_catalog(self, old_catalog_name, new_catalog_name, description):
"""Update the name and/or description of a catalog.
:param str old_catalog_name: current name of the catalog.
:param str new_catalog_name: new name of the catalog.
:param str description: new description of the catalog.
:return: an object containing EntityType.ADMIN_CATALOG XML data
describing the updated catalog.
:rtype: lxml.objectify.ObjectifiedElement
:raises: EntityNotFoundException: if the named catalog can not be
found.
"""
admin_catalog_resource = self.get_catalog(
old_catalog_name, is_admin_operation=True)
if new_catalog_name is not None:
admin_catalog_resource.set('name', new_catalog_name)
if description is not None:
admin_catalog_resource['Description'] = E.Description(description)
return self.client.put_linked_resource(
admin_catalog_resource,
rel=RelationType.EDIT,
media_type=EntityType.ADMIN_CATALOG.value,
contents=admin_catalog_resource)
def share_catalog(self, name, share=True):
"""Share a catalog with all org-admins of all organizations.
This operation can be performed by only System Administrators.
:param str name: name of the catalog to be shared.
:raises: EntityNotFoundException: if the named catalog can not be
found.
"""
# The link to publish catalog was moved to /api/catalog/{id}'s body in
# vCD 9.1 (api v30.0). In previous versions the link to share catalog
# was present in the /api/admin/catalog/{id}'s body.
# Currently the lowest supported vCD api version is 29.0, which is the
# only version for which we need to look in /api/admin/catalog/{id}'s
# body while trying to share a catalog.
# TODO() remove this conditional logic once we stop supporting api
# version 29.0.
if self.client.get_api_version() == ApiVersion.VERSION_29.value:
catalog_resource = self.get_catalog(name, is_admin_operation=True)
else:
catalog_resource = self.get_catalog(name)
is_published = 'true' if share else 'false'
params = E.PublishCatalogParams(E.IsPublished(is_published))
return self.client.post_linked_resource(
resource=catalog_resource,
rel=RelationType.PUBLISH,
media_type=EntityType.PUBLISH_CATALOG_PARAMS.value,
contents=params)
def change_catalog_owner(self, catalog_name, user_name):
"""Change the ownership of catalog to a given user.
This operation can be performed by only users with admin privileges.
:param str catalog_name: name of the catalog whose ownership needs
to be changed
:param str user_name: name of the new owner of the catalog
"""
catalog_admin_resource = self.get_catalog(
catalog_name, is_admin_operation=True)
new_user_resource = self.get_user(user_name)
owner_resource = catalog_admin_resource.Owner
owner_resource.User.set('href', new_user_resource.get('href'))
objectify.deannotate(owner_resource)
return self.client.put_linked_resource(
resource=catalog_admin_resource,
rel=RelationType.DOWN,
media_type=EntityType.OWNER.value,
contents=owner_resource)
def list_catalog_items(self, name):
"""Retrieve all items in a catalog.
:param str name: name of the catalog whose items need to be retrieved.
:return: a list of dictionaries. Each dict object contains 'name' and
'id' of an item in the catalog.
:rtype: dict
:raises: EntityNotFoundException: if the named catalog can not be
found.
"""
catalog_resource = self.get_catalog(name)
items = []
for item in catalog_resource.CatalogItems.getchildren():
items.append({'name': item.get('name'), 'id': item.get('id')})
return items
def get_catalog_item(self, name, item_name):
"""Retrieve an item in a catalog.
:param str name: name of the catalog whose item needs to be retrieved.
:param str item_name: name of the item which needs to be retrieved.
:return: an object containing EntityType.MEDIA or
EntityTYPE.VAPP_TEMPLATE XML data describing the entity
corresponding to the catalog item.
:rtype: lxml.objectify.ObjectifiedElement
:raises: EntityNotFoundException: if the catalog/named item can not be
found.
"""
catalog_resource = self.get_catalog(name)
for item in catalog_resource.CatalogItems.getchildren():
if item.get('name') == item_name:
return self.client.get_resource(item.get('href'))
raise EntityNotFoundException('Catalog item not found.')
def delete_catalog_item(self, name, item_name):
"""Delete an item from a catalog.
:param str name: name of the catalog whose item needs to be deleted.
:param str item_name: name of the item which needs to be deleted.
:raises: EntityNotFoundException: if the catalog/named item can not be
found.
"""
catalog_resource = self.get_catalog(name)
for item in catalog_resource.CatalogItems.getchildren():
if item.get('name') == item_name:
self.client.delete_resource(item.get('href'))
return
raise EntityNotFoundException('Catalog item not found.')
def _is_enable_download_required(self, entity_resource, item_type):
"""Helper method to determine need for download enabling.
:param lxml.objectify.ObjectifiedElement entity_resource: an object
containing EntityType.MEDIA or EntityType.VAPP_TEMPLATE XML data
describing the entity corresponding to the catalog item which
needs to be downloaded.
:param str item_type: type of entity we are trying to enable for
download. Valid values are EntityType.VAPP_TEMPLATE and
EntityType.MEDIA.
return: True, if the entity needs to | |
from https://github.com/google/jax/issues/1907
key = jax.random.PRNGKey(0)
key, split = jax.random.split(key)
n = 5
def func(D0):
def shift(R, dR, **unused_kwargs):
return R + dR
def apply_fn(R):
return D0 * R
Rinit = jax.random.uniform(split, (n,3), minval=0.0, maxval=5.0,
dtype=jnp.float32)
def move(R,i):
F = apply_fn(R)
return shift(R, 0.001 * F), jnp.array([0.])
move = api.remat(move)
R, temp = lax.scan(move, Rinit, jnp.arange(2))
return R[0, 0]
api.grad(func)(5.0) # doesn't crash
def test_remat_jit2(self):
@api.jit
def f(x):
y = 2 * x
@api.remat
def g():
return y
return g()
self.assertAllClose(f(3), 6, check_dtypes=False)
def test_remat_nontrivial_env(self):
# simplified from https://github.com/google/jax/issues/2030
@api.remat
def foo(state, dt=0.5, c=1):
u, u_t = state
u_tt = c**2 * u
u_t = u_t + u_tt * dt
return (u, u_t)
@partial(api.jit, static_argnums=(1,))
def _multi_step(state, count, dt, c):
f = lambda s, _: (foo(s, dt, c), _)
return lax.scan(f, state, None, count)
def multi_step(state, count, dt=1/jnp.sqrt(2), c=1):
return _multi_step(state, count, dt, c)
def loss(u0, target, steps, dt=1/jnp.sqrt(2), c=1):
init = (u0, jnp.zeros_like(u0))
(uf, _), _ = multi_step(init, steps, dt, c)
return ((uf - target) ** 2).mean()
target = jnp.zeros((128, 128))
u0 = jnp.ones_like(target)
loss(u0, target, 10) # doesn't crash
def test_remat_jit3(self):
# https://github.com/google/jax/issues/2180
def f(w, x):
a = jnp.dot(x, w)
b = jnp.einsum("btd,bTd->btT", a, a)
c = jnp.einsum("btT,btd->btd", b, a)
return jnp.sum(c)
w = jnp.ones([1, 1])
x = jnp.ones([1, 1, 1])
f = api.remat(f)
api.grad(f)(w, x) # doesn't crash
@api.jit
def mul(a, b):
return a * b
def f(w, x):
a = mul(w, x)
b = mul(a, a)
return b
w = 1.
x = 1.
f = api.remat(f)
api.grad(f)(w, x) # doesn't crash
def test_remat_scan2(self):
# https://github.com/google/jax/issues/1963
def scan_bug(x0):
f = lambda x, _: (x + 1, None)
def scanned_f(x, _):
return lax.scan(f, x, xs=None, length=1)[0], None
x, _ = jax.remat(scanned_f)(x0, None)
return x
jax.grad(scan_bug)(1.0) # doesn't crash
def test_remat_jit_static_argnum_omnistaging(self):
# https://github.com/google/jax/issues/2833
def named_call(f):
def named_f(*args):
f_ = lu.wrap_init(lambda: (f(*args),))
out, = core.call_p.bind(f_)
return out
return named_f
def f(a_bool, y):
if a_bool:
return y + 1
else:
return y
api.jit(named_call(f), static_argnums=0)(True, 1) # no crash
def test_remat_eval_counter(self):
# https://github.com/google/jax/issues/2737
add_one_p = Primitive('add_one')
add_one = add_one_p.bind
num_evals = 0
@contextmanager
def assertEvals(n):
start = num_evals
yield
assert num_evals - start == n
def add_one_impl(x):
nonlocal num_evals
num_evals += 1
return x + 1
add_one_p.def_impl(add_one_impl)
def add_one_jvp(pin, tin):
pout = add_one(pin[0])
return pout, pout * tin[0]
ad.primitive_jvps[add_one_p] = add_one_jvp
add_one_p.def_abstract_eval(lambda x: x)
v = np.zeros((1,))
f = jax.remat(add_one)
g = jax.remat(lambda x: add_one(f(x)))
# 2 calls needed to evaluate g
with assertEvals(2):
_, vjp = jax.vjp(g, v)
# 2 calls made while transposing g, 1 call made while transposing f
with assertEvals(3):
vjp(v)
@jax._src.util.curry
def call(f, *args):
return jax.core.call(
jax.linear_util.wrap_init(lambda *args: [f(*args)]),
*args, name='foo')[0]
f = call(add_one)
g = jax.remat(lambda x: add_one(f(x)))
# 2 calls needed to evaluate g
with assertEvals(2):
_, vjp = jax.vjp(g, v)
# 2 calls made while transposing g, no reevaluation for transposition of f
with assertEvals(2):
vjp(v)
def test_escaped_tracer_remat(self):
# b/169779185
def f():
seq = [jnp.zeros([])]
def g():
seq[0] += 1 # this is line 7 btw
return seq[0]
api.remat(g)()
api.remat(g)()
with self.assertRaisesRegex(core.UnexpectedTracerError, "global state"):
api.jit(f)()
class JaxprTest(jtu.JaxTestCase):
def test_scalar_literals(self):
jaxpr = api.make_jaxpr(lambda x: x + 2)(42)
self.assertLen(jaxpr.jaxpr.constvars, 0)
def test_abstract_inputs(self):
jaxpr = api.make_jaxpr(lambda x: x + 2.)(
types.SimpleNamespace(shape=(), dtype=np.float32))
self.assertEqual(jaxpr.in_avals[0].shape, ())
self.assertEqual(jaxpr.in_avals[0].dtype, np.float32)
def test_const(self):
def fun(x):
return (x, 1., np.zeros(1))
expected = """
{ lambda a ; b.
let
in (b, 1.0, a) }
"""
jaxpr = api.make_jaxpr(fun)(0.)
self.assertMultiLineStrippedEqual(expected, str(jaxpr))
def test_cond(self):
def f(x):
return lax.cond(x >= 0.,
x + 1.,
lambda xt: xt + x,
x + 2.,
lambda xf: xf - x)
expected = """
{ lambda ; a.
let b = ge a 0.0
c = add a 1.0
d = add a 2.0
e = convert_element_type[ new_dtype=int32
weak_type=False ] b
f = cond[ branches=( { lambda ; e_ a b c.
let d = sub c a
in (d,) }
{ lambda ; a f_ b c.
let d = add b a
in (d,) } )
linear=(False, False, False, False) ] e a a c d
in (f,) }
"""
jaxpr = api.make_jaxpr(f)(3.)
self.assertMultiLineStrippedEqual(expected, str(jaxpr))
def test_make_jaxpr_static_argnums(self):
def f(x, y):
return x + y
jaxpr = api.make_jaxpr(f, static_argnums=(1,))(2, 3)
self.assertIn('3', str(jaxpr))
def test_make_jaxpr_return_shape(self):
_, shape_tree = api.make_jaxpr(lambda x: (x + 1, jnp.zeros(2, jnp.float32)),
return_shape=True)(np.int32(1))
expected = (api.ShapeDtypeStruct(shape=(), dtype=jnp.int32),
api.ShapeDtypeStruct(shape=(2,), dtype=jnp.float32))
self.assertEqual(shape_tree, expected)
def test_make_jaxpr_axis_env(self):
def f(x):
return x - lax.psum(x, 'i')
jaxpr = api.make_jaxpr(f, axis_env=[('i', 4)])(2)
self.assertIn('psum', str(jaxpr))
def test_make_jaxpr_named(self):
def f(x):
return x - lax.psum(x, 'i')
x = types.SimpleNamespace(
shape=(2, 3), dtype=jnp.float32, named_shape={'i': 10})
jaxpr = api.make_jaxpr(f, axis_env=[('i', 10)])(x)
named_shapes = [v.aval.named_shape for v in jaxpr.jaxpr.eqns[1].invars]
self.assertEqual(named_shapes, [{'i': 10}, {}])
class CustomJVPTest(jtu.JaxTestCase):
def test_basic(self):
@api.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = 3.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(api.jvp(f, (x,), (1.,)),
(jnp.sin(x), 2 * jnp.cos(x)))
self.assertAllClose(api.grad(f)(x), 2 * jnp.cos(x))
def test_invariance(self):
@api.custom_jvp
def f(x):
return jnp.cos(2 * x) / 2.
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return (f(x), 3 * g)
f.defjvp(f_jvp)
def f2(x):
y, _ = api.jvp(f, (x,), (x,))
return y
def f3(x):
y, _ = api.jvp(f2, (x,), (x,))
return y
x = 1.
self.assertAllClose(api.jvp(f, (x,), (x,)),
api.jvp(f2, (x,), (x,)),
check_dtypes=False)
self.assertAllClose(api.jvp(f, (x,), (x,)),
api.jvp(f3, (x,), (x,)),
check_dtypes=False)
def test_python_control_flow(self):
@api.custom_jvp
def f(x):
if x > 0:
return jnp.sin(x)
else:
return jnp.cos(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
if x > 0:
return f(x), 2 * g
else:
return f(x), 3 * g
f.defjvp(f_jvp)
x = 2.
self.assertAllClose(f(x), jnp.sin(x))
self.assertAllClose(f(-x), jnp.cos(-x))
self.assertAllClose(api.jvp(f, (x,), (1.,)),
(jnp.sin(x), 2.),
check_dtypes=False)
self.assertAllClose(api.jvp(f, (-x,), (1.,)),
(jnp.cos(-x), 3.),
check_dtypes=False)
self.assertAllClose(api.grad(f)(x), 2., check_dtypes=False)
self.assertAllClose(api.grad(f)(-x), 3., check_dtypes=False)
def test_vmap(self):
@api.custom_jvp
def f(x):
assert jnp.ndim(x) == 0
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
assert jnp.ndim(x) == jnp.ndim(g) == 0
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = jnp.arange(3.)
xx = jnp.arange(6.).reshape(2, 3)
# vmap of f
self.assertAllClose(api.vmap(f)(x), jnp.sin(x))
self.assertAllClose(api.vmap(api.vmap(f))(xx), jnp.sin(xx))
# vmap of jvp of f
self.assertAllClose(api.vmap(lambda x: api.jvp(f, (x,), (x,)))(x),
(jnp.sin(x), 2 * jnp.cos(x) * x))
self.assertAllClose(api.vmap(api.vmap(lambda x: api.jvp(f, (x,), (x,))))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
# jvp of vmap of f
self.assertAllClose(api.jvp(api.vmap(f), (x,), (x,)),
(jnp.sin(x), 2 * jnp.cos(x) * x))
self.assertAllClose(api.jvp(api.vmap(api.vmap(f)), (xx,), (xx,)),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
# vmap of jvp of vmap of f
self.assertAllClose(api.vmap(lambda x: api.jvp(api.vmap(f), (x,), (x,)))(xx),
(jnp.sin(xx), 2 * jnp.cos(xx) * xx))
def test_jit(self):
@api.custom_jvp
def f(x):
return jnp.sin(x)
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * jnp.cos(x) * g
f.defjvp(f_jvp)
x = 3.
# jit
self.assertAllClose(api.jit(f)(x), jnp.sin(x))
self.assertAllClose(api.jit(api.jit(f))(x), jnp.sin(x))
# jit of jvp
self.assertAllClose(api.jit(lambda x: api.jvp(f, (x,), (x,)))(x),
(jnp.sin(x), 2 * jnp.cos(x) * x),
check_dtypes=False)
# jvp of jit
self.assertAllClose(api.jvp(api.jit(f), (x,), (x,)),
(jnp.sin(x), 2 * jnp.cos(x) * x),
check_dtypes=False)
def test_pytrees(self):
@api.custom_jvp
def f(x):
return {'b': jnp.sin(x['a'])}
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), {'b': 2 * jnp.cos(x['a']) * g['a']}
f.defjvp(f_jvp)
x = {'a': 3.}
self.assertAllClose(f(x)['b'], jnp.sin(x['a']))
self.assertAllClose(api.jvp(f, (x,), (x,)),
({'b': jnp.sin(x['a'])},
{'b': 2 * jnp.cos(x['a']) * x['a']}),
check_dtypes=False)
def test_kwargs(self):
# from https://github.com/google/jax/issues/1938
@api.custom_jvp
def my_fun(x, y, c=1.):
return c * (x + y)
def my_jvp(primals, tangents):
x, y, c = primals
t_x, t_y, t_c = tangents
return my_fun(x, y, c), t_c
my_fun.defjvp(my_jvp)
f = lambda x, y: jnp.square(my_fun(x, y, c=2.)).sum()
f(10., 5.) # doesn't crash
api.jvp(f, (10., 5.), (1., 1.)) # doesn't crash
def test_initial_style(self):
@api.custom_jvp
def f(x):
return 3 * x
def f_jvp(primals, tangents):
x, = primals
g, = tangents
return f(x), 2 * g
f.defjvp(f_jvp)
def foo(x):
out, _ = lax.scan(lambda c, _: (f(c), None), x, None, length=1)
return out
ans = api.grad(foo)(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.jit(foo))(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(foo))(3.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(api.grad(foo))(3.)
expected = 0.
self.assertAllClose(ans, expected, check_dtypes=False)
ans = | |
import json
import logging
from cryptojwt.exception import BadSignature
from cryptojwt.jws.exception import JWSException
from cryptojwt.key_jar import KeyJar
from fedoidcmsg import ClientMetadataStatement
from fedoidcmsg import DoNotCompare
from fedoidcmsg import IgnoreKeys
from fedoidcmsg import MetadataStatementError
from fedoidcmsg import is_lesser
from fedoidcmsg import unfurl
from oidcmsg.exception import MissingSigningKey
from oidcmsg.oauth2 import Message
from oidcmsg.time_util import utc_time_sans_frac
__author__ = 'roland'
logger = logging.getLogger(__name__)
class ParseError(Exception):
pass
class ParseInfo(object):
def __init__(self):
self.input = None
self.parsed_statement = []
self.error = {}
self.result = None
self.branch = {}
self.keyjar = None
self.signing_keys = None
class LessOrEqual(object):
"""
Class in which to store the parse result from flattening a compounded
metadata statement.
"""
def __init__(self, iss='', sup=None, exp=0, keyjar=None, **kwargs):
"""
:param iss: Issuer ID
:param sup: Superior
:type sup: LessOrEqual instance
:param exp: Expiration time
"""
if sup:
self.fo = sup.fo
else:
self.fo = iss
self.iss = iss
self.sup = sup
self.err = {}
self.le = {}
self.exp = exp
self.keyjar = keyjar
def __setitem__(self, key, value):
self.le[key] = value
def keys(self):
return self.le.keys()
def items(self):
return self.le.items()
def __getitem__(self, item):
return self.le[item]
def __contains__(self, item):
return item in self.le
def sup_items(self):
"""
Items (key+values) from the superior
"""
if self.sup:
return self.sup.le.items()
else:
return {}
def eval(self, orig):
"""
Apply the less or equal algorithm on the ordered list of metadata
statements
:param orig: Start values
:return:
"""
_le = {}
_err = []
for k, v in self.sup_items():
if k in DoNotCompare:
continue
if k in orig:
if is_lesser(orig[k], v):
_le[k] = orig[k]
else:
_err.append({'claim': k, 'policy': orig[k], 'err': v,
'signer': self.iss})
else:
_le[k] = v
for k, v in orig.items():
if k in DoNotCompare:
continue
if k not in _le:
_le[k] = v
self.le = _le
self.err = _err
def protected_claims(self):
"""
Someone in the list of signers has said this information is OK
"""
if self.sup:
return self.sup.le
def unprotected_and_protected_claims(self):
"""
This is both verified and self asserted information. As expected
verified information beats self-asserted so if there is both
self-asserted and verified values for a claim then only the verified
will be returned.
"""
if self.sup:
res = {}
for k, v in self.le.items():
if k not in self.sup.le:
res[k] = v
else:
res[k] = self.sup.le[k]
return res
else:
return self.le
def is_expired(self):
now = utc_time_sans_frac()
if self.exp < now:
return True
if self.sup:
return self.sup.is_expired()
else:
return False
def le_dict(les):
return dict([(l.fo, l) for l in les])
def get_fo(ms):
try:
_mds = ms['metadata_statements']
except KeyError:
return ms['iss']
else:
# should only be one
try:
assert len(_mds) == 1
except AssertionError:
raise MetadataStatementError('Branching not allowed')
_ms = list(_mds.values())[0]
return get_fo(_ms)
class Operator(object):
"""
An operator in a OIDC federation.
"""
def __init__(self, self_signer=None, jwks_bundle=None, httpcli=None,
iss=None, lifetime=3600, verify_ssl=True):
"""
:param self_signer: A Signing Service instance
:param jwks_bundle: Contains the federation operators signing keys
for all the federations this instance wants to talk to.
If present it MUST be a JWKSBundle instance.
:param httpcli: A http client to use when information has to be
fetched from somewhere else
:param iss: Issuer ID
:param lifetime: Default lifetime of signed statements produced
by this signer.
:param verify_ssl: Whether SSL certificates should be verified
"""
self.self_signer = self_signer
self.jwks_bundle = jwks_bundle
self.httpcli = httpcli
self.iss = iss
self.failed = {}
self.lifetime = lifetime
self.verify_ssl = verify_ssl
def signing_keys_as_jwks(self):
"""
Build a JWKS from the signing keys belonging to the self signer
:return: Dictionary
"""
_l = [x.serialize() for x in self.self_signer.keyjar.get_signing_key()]
if not _l:
_l = [x.serialize() for x in
self.self_signer.keyjar.get_signing_key(owner=self.iss)]
return {'keys': _l}
def signing_keys_as_jwks_json(self):
return json.dumps(self.signing_keys_as_jwks())
def _ums(self, pr, meta_s, keyjar):
try:
_pi = self.unpack_metadata_statement(
jwt_ms=meta_s, keyjar=keyjar)
except (JWSException, BadSignature,
MissingSigningKey) as err:
logger.error('Encountered: {}'.format(err))
pr.error[meta_s] = err
else:
pr.branch[meta_s] = _pi
if _pi.result:
pr.parsed_statement.append(_pi.result)
pr.signing_keys = _pi.signing_keys
return pr
def self_signed(self, ms_dict, jwt_ms, cls):
kj = KeyJar()
kj.import_jwks_as_json(ms_dict['signing_keys'], ms_dict['iss'])
return cls().from_jwt(jwt_ms, keyjar=kj)
def _unpack(self, ms_dict, keyjar, cls, jwt_ms=None, liss=None):
"""
:param ms_dict: Metadata statement as a dictionary
:param keyjar: A keyjar with the necessary FO keys
:param cls: What class to map the metadata into
:param jwt_ms: Metadata statement as a JWS
:param liss: List of FO issuer IDs
:return: ParseInfo instance
"""
if liss is None:
liss = []
_pr = ParseInfo()
_pr.input = ms_dict
ms_flag = False
if 'metadata_statements' in ms_dict:
ms_flag = True
for iss, _ms in ms_dict['metadata_statements'].items():
if liss and iss not in liss:
continue
_pr = self._ums(_pr, _ms, keyjar)
if 'metadata_statement_uris' in ms_dict:
ms_flag = True
if self.httpcli:
for iss, url in ms_dict['metadata_statement_uris'].items():
if liss and iss not in liss:
continue
rsp = self.httpcli(method='GET', url=url,
verify=self.verify_ssl)
if rsp.status_code == 200:
_pr = self._ums(_pr, rsp.text, keyjar)
else:
raise ParseError(
'Could not fetch jws from {}'.format(url))
for _ms in _pr.parsed_statement:
if _ms: # can be None
loaded = False
try:
keyjar.import_jwks_as_json(_ms['signing_keys'],
ms_dict['iss'])
except KeyError:
pass
except TypeError:
try:
keyjar.import_jwks(_ms['signing_keys'], ms_dict['iss'])
except Exception as err:
logger.error(err)
raise
else:
loaded = True
else:
loaded = True
if loaded:
logger.debug(
'Loaded signing keys belonging to {} into the '
'keyjar'.format(ms_dict['iss']))
if ms_flag is True and not _pr.parsed_statement:
return _pr
if jwt_ms:
logger.debug("verifying signed JWT: {}".format(jwt_ms))
try:
_pr.result = cls().from_jwt(jwt_ms, keyjar=keyjar)
except MissingSigningKey:
if 'signing_keys' in ms_dict:
try:
_pr.result = self.self_signed(ms_dict, jwt_ms, cls)
except MissingSigningKey as err:
logger.error('Encountered: {}'.format(err))
_pr.error[jwt_ms] = err
except (JWSException, BadSignature, KeyError) as err:
logger.error('Encountered: {}'.format(err))
_pr.error[jwt_ms] = err
else:
_pr.result = ms_dict
if _pr.result and _pr.parsed_statement:
_prr = _pr.result
_res = {}
for x in _pr.parsed_statement:
if x:
_res[get_fo(x)] = x
_msg = Message(**_res)
logger.debug('Resulting metadata statement: {}'.format(_msg))
_pr.result['metadata_statements'] = _msg
return _pr
def unpack_metadata_statement(self, ms_dict=None, jwt_ms='', keyjar=None,
cls=ClientMetadataStatement, liss=None):
"""
Starting with a signed JWT or a JSON document unpack and verify all
the separate metadata statements.
:param ms_dict: Metadata statement as a dictionary
:param jwt_ms: Metadata statement as JWT
:param keyjar: Keys that should be used to verify the signature of the
document
:param cls: What type (Class) of metadata statement this is
:param liss: list of FO identifiers that matters. The rest will be
ignored
:return: A ParseInfo instance
"""
if not keyjar:
if self.jwks_bundle:
keyjar = self.jwks_bundle.as_keyjar()
else:
keyjar = KeyJar()
if jwt_ms:
try:
ms_dict = unfurl(jwt_ms)
except JWSException as err:
logger.error('Could not unfurl jwt_ms due to {}'.format(err))
raise
if ms_dict:
return self._unpack(ms_dict, keyjar, cls, jwt_ms, liss)
else:
raise AttributeError('Need one of ms_dict or jwt_ms')
def pack_metadata_statement(self, metadata, receiver='', iss='', lifetime=0,
sign_alg=''):
"""
Given a MetadataStatement instance create a signed JWT.
:param metadata: Original metadata statement as a MetadataStatement
instance
:param receiver: Receiver (audience) of the JWT
:param iss: Issuer ID if different from default
:param lifetime: jWT signature life time
:param sign_alg: JWT signature algorithm
:return: A JWT
"""
return self.self_signer.sign(metadata, receiver=receiver, iss=iss,
lifetime=lifetime, sign_alg=sign_alg)
def evaluate_metadata_statement(self, metadata, keyjar=None):
"""
Computes the resulting metadata statement from a compounded metadata
statement.
If something goes wrong during the evaluation an exception is raised
:param metadata: The compounded metadata statement as a dictionary
:return: A list of :py:class:`fedoidc.operator.LessOrEqual`
instances, one per FO.
"""
# start from the innermost metadata statement and work outwards
res = dict([(k, v) for k, v in metadata.items() if k not in IgnoreKeys])
les = []
if 'metadata_statements' in metadata:
for fo, ms in metadata['metadata_statements'].items():
if isinstance(ms, str):
ms = json.loads(ms)
for _le in self.evaluate_metadata_statement(ms):
if isinstance(ms, Message):
le = LessOrEqual(sup=_le, **ms.to_dict())
else: # Must be a dict
le = LessOrEqual(sup=_le, **ms)
if le.is_expired():
logger.error(
'This metadata statement has expired: {}'.format(ms)
)
logger.info('My time: {}'.format(utc_time_sans_frac()))
continue
le.eval(res)
les.append(le)
return les
else: # this is the innermost
try:
_iss = metadata['iss']
except:
le = LessOrEqual()
le.eval(res)
else:
le = LessOrEqual(iss=_iss, exp=metadata['exp'])
le.eval(res)
les.append(le)
return les
def correct_usage(self, metadata, federation_usage):
"""
Remove MS paths that are marked to be used for another usage
:param metadata: Metadata statement as dictionary
:param federation_usage: In which context this is expected to used.
:return: Filtered Metadata statement.
"""
if 'metadata_statements' in metadata:
_msl = {}
for fo, ms in metadata['metadata_statements'].items():
if not isinstance(ms, | |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from __future__ import division, print_function
from .image import Image
import os
import pygame
import numpy as np
PYGAME_INITIALIZED = False
__all__ = [
'Display'
]
class Display(object):
"""
WindowsStream opens a window (Pygame Display Surface) to which you
can write images. The default resolution is (640, 480) but you can
also specify (0, 0) which will maximize the display. Flags are
pygame constants, including:
By default display will attempt to scale the input image to fit neatly
on the screen with minimal distortion. This means that if the aspect
ratio matches the screen it will scale cleanly. If your image does not
match the screen aspect ratio we will scale it to fit nicely while
maintaining its natural aspect ratio. Because PhloxAR performs this
scaling there are two sets of input mouse coordinates, the
(mouse_x, mouse_y) which scale to the image, and (mouse_raw_x, mouse_raw_y)
which do are the actual screen coordinates.
pygame.FULLSCREEN: create a fullscreen display.
pygame.DOUBLEBUF: recommended for HWSURFACE or OPENGL.
pygame.HWSURFACE: hardware accelerated, only in FULLSCREEN.
pygame.OPENGL: create an opengl renderable display.
pygame.RESIZABLE: display window should be sizeable.
pygame.NOFRAME: display window will have no border or controls.
Display should be used in a while loop with the isDone() method,
which checks events and sets the following internal state controls:
mouse_x: the x position of the mouse cursor on the input image.
mouse_y: the y position of the mouse cursor on the input image.
mouse_raw_x: The position of the mouse on the screen.
mouse_raw_y: The position of the mouse on the screen.
Note:
The mouse position on the screen is not the mouse position on the
image. If you are trying to draw on the image or take in coordinates
use mouse_x and mouse_y as these values are scaled along with the image.
mouse_l: the state of the left button.
mouse_r: the state of the right button.
mouse_m: the state of the middle button.
mouse_wheel_u: scroll wheel has been moved up.
mouse_wheel_d: the wheel has been clicked towards the bottom of the mouse.
"""
res = ''
src_res = ''
src_offset = ''
screen = ''
event_handler = ''
mq = ''
done = False
mouse_x = 0
mouse_y = 0
# actual (x, y) position on the screen
mouse_raw_x = 0
mouse_raw_y = 0
mouse_l = 0
mouse_r = 0
mouse_wheel_u = 0
mouse_wheel_d = 0
scale_x = 1.0
scale_y = 1.0
offset_x = 0
offset_y = 0
img_w = 0
img_h = 0
# lb for last left button & rb for right button
last_lb = 0
last_rb = 0
lb_down = None
lb_up = None
rb_down = None
rb_up = None
display_type = None
do_clamp = None
pressed = []
def __init__(self, res=(640, 480), flags=0, title='PhloxAR',
disptype='standard', headless=False):
"""
This is the generic display object. You are able to set the
display type.
The standard display type will pop up a window.
The notebook display type is to be used in conjunction with
IPython Notebooks. If you have IPython Notebooks installed you
just need to start IPython Notebooks an open in your browser.
:param res: the size of the display in pixels
:param flags: pygame flags
:param title: the title bar on the display
:param disptype: type of display. Options are as follows:
'standard': a pygame window
'notebook': IPython web notebook output.
:param headless: if False we ignore headless mode. If True, all
rendering is suspended.
"""
global PYGAME_INITIALIZED
if headless:
os.environ['SDL_VIDEODRIVER'] = 'dummy'
if not PYGAME_INITIALIZED:
if not disptype == 'notebook':
pygame.init()
PYGAME_INITIALIZED = True
self.scale_x = 1.0
self.scale_y = 1.0
self.offset_x = 0
self.offset_y = 0
self.last_lb = 0
self.last_rb = 0
self.lb_down = 0
self.rb_down = 0
self.lb_up = 0
self.rb_up = 0
self.pressed = None
self.display_type = disptype
self.mouse_raw_x = 0
self.mouse_raw_y = 0
self.res = res
self.do_clamp = False
if not disptype == 'notebook':
self.screen = pygame.display.set_mode(res, flags)
# checks if phloxar.png exists
if os.path.isfile(os.path.join(LAUNCH_PATH, 'sample_images', 'phloxar.png')):
plxlogo = Image('phloxar').scale(32, 32)
pygame.display.set_icon(plxlogo.surface())
if flags != pygame.FULLSCREEN and flags != pygame.NOFRAME:
pygame.display.set_caption(title)
def left_button_up_pos(self):
"""
Returns the position where the left mouse button go up.
:return: an (x, y) mouse position tuple.
Note:
You must call 'check_events' or 'is_done' in you main display loop
for this method to work.
"""
return self.lb_up
def left_button_down_pos(self):
"""
Returns the position where the left mouse button go down.
:return: an (x, y) mouse position tuple.
Note:
You must call 'check_events' or 'is_done' in you main display loop
for this method to work.
"""
return self.lb_down
def right_button_up_pos(self):
"""
Returns the position where the right mouse button go up.
:return: an (x, y) mouse position tuple.
Note:
You must call 'check_events' or 'is_done' in you main display loop
for this method to work.
"""
return self.rb_up
def right_button_down_pos(self):
"""
Returns the position where the right mouse button go down.
:return: an (x, y) mouse position tuple.
Note:
You must call 'check_events' or 'is_done' in you main display loop
for this method to work.
"""
return self.rb_down
def points2boundingbox(self, pt0, pt1):
"""
Given two screen coordinates return the bounding box in x, y, w, h
format. This is helpful for drawing regions on the display.
:param pt0: first points
:param pt1: second points
:return: (x, y, w, h) tuple
"""
max_x = np.max((pt0[0], pt1[0]))
max_y = np.max((pt0[1], pt1[1]))
min_x = np.min((pt0[0], pt1[0]))
min_y = np.min((pt0[1], pt1[1]))
return min_x, min_y, max_x-min_x, max_y-min_y
def write_frame(self, img, fit=True):
"""
Copies the given Image object to the display, you can also use
Image.save()
Write frame try to fit the image to the display with the minimum
amount of distortion possible. When fit=True write frame will decide
how to scale the image such that aspect ratio is maintained and the
smallest amount of distortion possible is completed. This means the
axis that has the minimum scaling needed will be shrunk or enlarged
to match the display.
:param img: the PhloxAR Image to save to the display
:param fit: if False, write frame will crop and center the image
as best it can. If the image is too big it is cropped
and centered. If it is too small it is centered. If
it is too big along one axis that axis is cropped and
the other axis is centered if necessary.
:return: None
"""
wnd_ratio = self.res[0] / self.res[1]
img_ratio = img.width / img.height
self.src_res = img.size()
self.img_w = img.width
self.img_h = img.height
self.scale_x = 1.0
self.scale_y = 1.0
self.offset_x = 0
self.offset_y = 0
if img.size() == self.res:
s = img.surface()
self.screen.blit(s, s.get_rect())
pygame.display.flip()
elif img_ratio == wnd_ratio:
self.scale_x = img.width / self.res[0]
self.scale_y = img.height / self.res[1]
img = img.scale(self.res[0], self.res[1])
s = img.surface()
self.screen.blit(s, s.get_rect())
pygame.display.flip()
elif fit:
# scale factors
wscale = img.width / self.res[0]
hscale = img.height / self.res[1]
w = img.width
h = img.height
# shrink what is the percent reduction
if wscale > 1:
wscale = 1.0 - (1 / wscale)
else:
# grow the image by a percentage
wscale = 1.0 - wscale
if hscale > 1:
hscale = 1.0 - (1 / hscale)
else:
hscale = 1.0 - hscale
if wscale == 0:
x = 0
y = (self.res[1] - img.height) / 2
w = img.width
h = img.height
s = img.surface()
elif hscale == 0:
x = (self.res[0] - img.width) / 2
y = 0
w = img.width
h = img.height
s = img.surface()
elif wscale < hscale:
# width has less distortion
sfactor = self.res[0] / img.width
w = int(img.width * sfactor)
h = int(img.height * sfactor)
if w > self.res[0] or h > self.res[1]:
sfactor = self.res[1] / img.heigt
w = int(img.width * sfactor)
h = int(img.height * sfactor)
x = (self.res[0] - w) / 2
y = 0
else:
x = 0
y = (self.res[1] - h) / 2
| |
optional
A constant that specify the scaling factor for the features of this
namespace.
Examples
--------
>>> from vowpalwabbit.DFtoVW import Namespace, Feature
>>> ns_one_feature = Namespace(Feature("a"))
>>> ns_multi_features = Namespace([Feature("a"), Feature("b")])
>>> ns_one_feature_with_name = Namespace(Feature("a"), name="FirstNamespace")
>>> ns_one_feature_with_name_and_value = Namespace(Feature("a"), name="FirstNamespace", value=2)
Returns
-------
self : Namespace
"""
if (value is not None) and (name is None):
raise ValueError(
"Namespace can't have a 'value' argument without a 'name' argument"
)
self.name = name
self.value = value
self.features = (
list(features) if isinstance(features, (list, set)) else [features]
)
self.check_attributes_type()
def check_attributes_type(self):
"""Check if attributes are of valid type.
Raises
------
TypeError
If one of the attribute is not valid.
"""
for attribute_name in ["name", "value"]:
attribute_value = getattr(self, attribute_name)
if attribute_value is not None and not isinstance(
attribute_value, self.expected_type[attribute_name]
):
raise TypeError(
"In Namespace, argument '{attribute_name}' should be either of the following type(s): {types}".format(
attribute_name=attribute_name,
types=repr(
[
x.__name__
for x in self.expected_type[attribute_name]
]
)[1:-1],
)
)
valid_feature = all(
[
isinstance(feature, self.expected_type["features"])
for feature in self.features
]
)
if not valid_feature:
raise TypeError(
"In Namespace, argument 'features' should be a Feature or a list of Feature."
)
def process(self):
"""Returns the Namespace string representation"""
out = ["|"]
if self.name is not None:
out += str(self.name)
if self.value is not None:
out += [":", str(self.value)]
return "".join(out)
class DFtoVW:
"""Convert a pandas DataFrame to a suitable VW format.
Instances of this class are built with classes such as SimpleLabel,
MulticlassLabel, Feature or Namespace.
The class also provided a convenience constructor to initialize the class
based on the target/features column names only.
"""
def __init__(
self,
df,
features=None,
namespaces=None,
label=None,
tag=None,
):
"""Initialize a DFtoVW instance.
Parameters
----------
df : pandas.DataFrame
The dataframe to convert to VW input format.
features: Feature/list of Feature
One or more Feature object(s).
namespaces : Namespace/list of Namespace
One or more Namespace object(s), each of being composed of one or
more Feature object(s).
label : SimpleLabel/MulticlassLabel/MultiLabel
The label.
tag : str/int/float
The tag (used as identifiers for examples).
Examples
--------
>>> from vowpalwabbit.DFtoVW import DFtoVW, SimpleLabel, Feature
>>> import pandas as pd
>>> df = pd.DataFrame({"y": [1], "a": [2], "b": [3], "c": [4]})
>>> conv1 = DFtoVW(df=df,
label=SimpleLabel("y"),
features=Feature("a"))
>>> conv1.convert_df()
>>> conv2 = DFtoVW(df=df,
label=SimpleLabel("y"),
features=[Feature(col) for col in ["a", "b"]])
>>> conv2.convert_df()
>>> conv3 = DFtoVW(df=df,
label=SimpleLabel("y"),
namespaces=Namespace(
name="DoubleIt", value=2,
features=Feature(value="a", rename_feature="feat_a")))
>>> conv3.convert_df()
>>> conv4 = DFtoVW(df=df,
label=SimpleLabel("y"),
namespaces=[Namespace(name="NS1", features=[Feature(col) for col in ["a", "c"]]),
Namespace(name="NS2", features=Feature("b"))])
>>> conv4.convert_df()
Returns
-------
self : DFtoVW
"""
self.df = df
self.n_rows = df.shape[0]
self.label = label
self.tag = _Tag(tag) if tag else None
if features is not None:
self.check_features_type(features)
self.set_namespaces_or_features(namespaces, features)
self.check_label_type()
self.check_namespaces_type()
self.check_columns_existence_in_df()
self.check_columns_type_and_values()
@classmethod
def from_colnames(cls, y, x, df, label_type="simple_label"):
"""Build DFtoVW instance using column names only.
Parameters
----------
y : str/list of str
The column for the label.
x : str/list of str
The column(s) for the feature(s).
df : pandas.DataFrame
The dataframe used.
label_type: str (default: 'simple_label')
The type of the label. Available labels: 'simple_label', 'multiclass', 'multilabel'.
Raises
------
TypeError
If argument label is not of valid type.
ValueError
If argument label_type is not valid.
Examples
--------
>>> from vowpalwabbit.DFtoVW import DFtoVW
>>> import pandas as pd
>>> df = pd.DataFrame({"y": [1], "x": [2]})
>>> conv = DFtoVW.from_colnames(y="y", x="x", df=df)
>>> conv.convert_df()
>>> df2 = pd.DataFrame({"y": [1], "x1": [2], "x2": [3], "x3": [4]})
>>> conv2 = DFtoVW.from_colnames(y="y", x=set(df2.columns) - set("y"), df=df2)
>>> conv2.convert_df()
Returns
-------
DFtoVW
A initialized DFtoVW instance.
"""
dict_label_type = {
"simple_label": SimpleLabel,
"multiclass": MulticlassLabel,
"multilabel": MultiLabel,
}
if label_type not in dict_label_type:
raise ValueError(
"'label_type' should be either of the following string: {label_types}".format(
label_types=repr(list(dict_label_type.keys()))[1:-1]
)
)
y = y if isinstance(y, list) else [y]
if not all(isinstance(yi, str) for yi in y):
raise TypeError(
"Argument 'y' should be a string or a list of string(s)."
)
if label_type != "multilabel":
if len(y) == 1:
y = y[0]
else:
raise ValueError(
"When label_type is 'simple_label' or 'multiclass', argument 'y' should be a string or a list of exactly one string."
)
label = dict_label_type[label_type](y)
x = x if isinstance(x, list) else [x]
if not all(isinstance(xi, str) for xi in x):
raise TypeError(
"Argument 'x' should be a string or a list of string."
)
namespaces = Namespace(
features=[Feature(value=colname) for colname in x]
)
return cls(namespaces=namespaces, label=label, df=df)
def check_features_type(self, features):
"""Check if the features argument is of type Feature.
Parameters
----------
features: (list of) Feature,
The features argument to check.
Raises
------
TypeError
If the features is not a Feature of a list of Feature.
"""
if isinstance(features, list):
valid_feature = all(
[isinstance(feature, Feature) for feature in features]
)
else:
valid_feature = isinstance(features, Feature)
if not valid_feature:
raise TypeError(
"Argument 'features' should be a Feature or a list of Feature."
)
def set_namespaces_or_features(self, namespaces, features):
"""Set namespaces attributes
Parameters
----------
namespaces: Namespace / list of Namespace objects
The namespaces argument.
features: Feature / list of Feature objects
The features argument.
Raise
-----
ValueError:
If argument 'features' or 'namespaces' are not valid.
"""
if (features is None) and (namespaces is None):
raise ValueError("Missing 'features' or 'namespace' argument")
if (features is not None) and (namespaces is not None):
raise ValueError(
"Arguments supplied for both 'features' and 'namespaces', only one of the these arguments should be supplied."
)
if features is not None:
namespaces = Namespace(features=features)
namespaces = (
list(namespaces)
if isinstance(namespaces, (list, set))
else [namespaces]
)
self.namespaces = namespaces
def check_label_type(self):
"""Check label type.
Raises
------
TypeError
If label is not of type SimpleLabel or MulticlassLabel.
"""
available_labels = (SimpleLabel, MulticlassLabel, MultiLabel)
if self.label is None:
pass
else:
if not isinstance(self.label, available_labels):
raise TypeError(
"Argument 'label' should be either of the following type: {label_types}.".format(
label_types=repr(
[x.__name__ for x in available_labels]
)[1:-1]
)
)
def check_namespaces_type(self):
"""Check if namespaces arguments are of type Namespace.
Raises
------
TypeError
If namespaces are not of type Namespace or list of Namespace.
"""
wrong_type_namespaces = [
not isinstance(namespace, Namespace)
for namespace in self.namespaces
]
if any(wrong_type_namespaces):
raise TypeError(
"Argument 'namespaces' should be a Namespace or a list of Namespace."
)
def check_columns_existence_in_df(self):
"""Check if the columns are in the dataframe."""
absent_cols = {}
df_colnames = set(self.df.columns)
try:
missing_cols = self.label.columns
except AttributeError:
pass
else:
if missing_cols is not None:
type_label = type(self.label).__name__
absent_cols[type_label] = list(missing_cols - df_colnames)
try:
missing_cols = self.tag.columns
except AttributeError:
pass
else:
if missing_cols is not None:
absent_cols["tag"] = list(missing_cols - df_colnames)
all_features = [
feature
for namespace in self.namespaces
for feature in namespace.features
]
missing_features_cols = []
for feature in all_features:
try:
missing_cols = feature.columns
except AttributeError:
pass
else:
if missing_cols is not None:
missing_features_cols += list(missing_cols - df_colnames)
absent_cols["Feature"] = sorted(list(set(missing_features_cols)))
absent_cols = {
key: value
for (key, value) in absent_cols.items()
if len(value) > 0
}
self.generate_missing_col_error(absent_cols)
def generate_missing_col_error(self, absent_cols_dict):
"""Generate error if some columns are missing
Raises
------
ValueError
If one or more columns are not in the dataframe.
"""
if absent_cols_dict:
msg_error = ""
for attribute_name, missing_cols in absent_cols_dict.items():
missing_cols = (
repr(missing_cols)[1:-1]
if isinstance(missing_cols, list)
else missing_cols
)
if len(msg_error) > 0:
msg_error += " "
msg_error += "In '{attribute}': column(s) {colnames} not found in dataframe.".format(
attribute=attribute_name, colnames=missing_cols,
)
raise ValueError(msg_error)
def check_columns_type_and_values(self):
"""Check columns type and values range"""
for instance in [self.tag, self.label]:
self.check_instance_columns(instance)
for namespace in self.namespaces:
for feature in namespace.features:
self.check_instance_columns(feature)
def check_instance_columns(self, instance):
"""Check the columns type and values of a given instance.
The method iterate through the attributes and look for _Col type
attribute. Once found, the method use the _Col methods to check the
type and the value range of the column. Also, the instance type in
which the errors occur are prepend to the error message to be more
explicit about where the error occurs in the formula.
Raises
------
TypeError
If a column is not of valid type.
ValueError
If a column | |
import pytest
import os, thread
from hippy.debugger import Connection, Message
from hippy.objspace import ObjSpace
from testing.test_interpreter import MockInterpreter, preparse
class TestDebugger(object):
def setup_method(self, meth):
self.read_fd1, self.write_fd1 = os.pipe()
self.read_fd2, self.write_fd2 = os.pipe()
def teardown_method(self, meth):
os.close(self.read_fd1)
os.close(self.write_fd1)
os.close(self.read_fd2)
os.close(self.write_fd2)
def run(self, interp, source):
source = preparse(source)
bc = interp.compile(source)
interp.run_bytecode(bc)
def test_basic_pipe_communication(self):
d = Connection(self.read_fd1, self.write_fd2)
os.write(self.write_fd1, "breakpoint a;")
assert d.read() == Message("breakpoint", ["a"])
os.write(self.write_fd1, "echo a;echo")
assert d.read() == Message("echo", ["a"])
os.write(self.write_fd1, " b;echo a;echo b;echo c;")
assert d.read() == Message("echo", ["b"])
assert d.read() == Message("echo", ["a"])
assert d.read() == Message("echo", ["b"])
assert d.read() == Message("echo", ["c"])
d.write(Message("foo", ["a", "b"]))
s = os.read(self.read_fd2, 100)
assert s == "foo a b;"
def test_basic_debugger(self):
space = ObjSpace()
interp = MockInterpreter(space)
interp.setup_debugger(self.read_fd1, self.write_fd2)
c = Connection(self.read_fd2, self.write_fd1)
c.write(Message("force_breakpoint", ["f"])); ok(c)
c.write(Message("continue", None)); ok(c)
interp.debugger.run_debugger_loop(interp)
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["Breakpoint set f"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["Continuing"])
self.run(interp, """
function f() {
}
""")
def c1():
assert c.read() == Message("echo", ["stop breakpoint f"])
ok(c)
assert c.read() == Message(">", None)
c.write(Message("continue", None)); ok(c)
thread.start_new_thread(c1, ())
self.run(interp, "f();")
assert not c.more_pending_messages()
assert not interp.debugger.conn.more_pending_messages()
assert not interp.msgs
interp.shutdown()
def test_eval(self):
space = ObjSpace()
interp = MockInterpreter(space)
interp.setup_debugger(self.read_fd1, self.write_fd2)
c = Connection(self.read_fd2, self.write_fd1)
c.write(Message("force_breakpoint", ["f"])); ok(c)
c.write(Message("continue", None)); ok(c)
interp.debugger.run_debugger_loop(interp)
self.run(interp, """
function f($a, $b, $x) {
}
""")
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["Breakpoint set f"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["Continuing"])
ok(c)
c.write(Message("eval", ["$x"])); ok(c)
c.write(Message("backtrace", None)); ok(c)
c.write(Message("continue", None)); ok(c)
self.run(interp, """
f(1, 2, 3);
""")
assert c.read() == Message("echo", ["stop breakpoint f"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["int(3)"])
assert c.read() == Message(">", None)
exp_traceback = ["<input>", "f", "2", "function f($a, $b, $x) {",
"<input>", "<main>", "2", "f(1, 2, 3);"]
msg = c.read()
assert msg == Message("traceback", exp_traceback)
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ['Continuing'])
assert not c.more_pending_messages()
assert not interp.debugger.conn.more_pending_messages()
assert not interp.msgs
interp.shutdown()
def test_empty_backtrace(self):
space = ObjSpace()
interp = MockInterpreter(space)
interp.setup_debugger(self.read_fd1, self.write_fd2)
c = Connection(self.read_fd2, self.write_fd1)
c.write(Message("backtrace", None)); ok(c)
c.write(Message("continue", None)); ok(c)
interp.debugger.run_debugger_loop(interp)
self.run(interp, "")
assert c.read() == Message(">", None)
assert c.read() == Message("warn", ["empty backtrace"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ['Continuing'])
assert not c.more_pending_messages()
assert not interp.debugger.conn.more_pending_messages()
assert not interp.msgs
interp.shutdown()
def test_class_breakpoint_and_traceback(self):
space = ObjSpace()
interp = MockInterpreter(space)
interp.setup_debugger(self.read_fd1, self.write_fd2)
c = Connection(self.read_fd2, self.write_fd1)
c.write(Message("force_breakpoint", ["klass::method"])); ok(c)
c.write(Message("continue", None)); ok(c)
interp.debugger.run_debugger_loop(interp)
def c1():
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["Breakpoint set klass::method"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["Continuing"])
assert c.read() == Message("echo", ["stop breakpoint klass::method"])
ok(c)
assert c.read() == Message(">", None)
c.write(Message("eval", ["$x"])); ok(c)
c.write(Message("continue", None)); ok(c)
thread.start_new_thread(c1, ())
self.run(interp, """
class klass {
function method($x) {
}
}
$x = new klass();
$x->method(13);
""")
assert c.read() == Message("echo", ["int(13)"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ['Continuing'])
assert not c.more_pending_messages()
assert not interp.debugger.conn.more_pending_messages()
assert not interp.msgs
interp.shutdown()
def test_next_step(self):
pytest.xfail()
space = ObjSpace()
interp = MockInterpreter(space)
interp.setup_debugger(self.read_fd1, self.write_fd2)
c = Connection(self.read_fd2, self.write_fd1)
c.write(Message("force_breakpoint", ["f"])); ok(c)
c.write(Message("continue", None)); ok(c)
interp.debugger.run_debugger_loop(interp)
def c1():
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["Breakpoint set f"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["Continuing"])
assert c.read() == Message("echo", ["stop breakpoint f"])
ok(c)
assert c.read() == Message(">", None)
c.write(Message("eval", ["$a"]))
assert c.read() == Message("echo", ["int(1)"])
ok(c)
assert c.read() == Message(">", None)
c.write(Message("next", None))
assert c.read() == Message("linechange", ["<input>", "3", "f",
" $z = $a + $b + $c;"])
ok(c)
assert c.read() == Message(">", None)
c.write(Message("next", None))
assert c.read() == Message("linechange", ["<input>", "4", "f",
" $a = $z - 3 + g();"])
ok(c)
assert c.read() == Message(">", None)
c.write(Message("eval", ["$z"]))
assert c.read() == Message("echo", ["int(6)"])
ok(c)
assert c.read() == Message(">", None)
c.write(Message("step", None))
assert c.read() == Message("echo", ["enter g"])
ok(c)
assert c.read() == Message("linechange", ["<input>", "8", "g",
" return 13;"])
ok(c)
c.write(Message("continue", None))
ok(c)
thread.start_new_thread(c1, ())
self.run(interp, """
function f($a, $b, $c) {
$z = $a + $b + $c;
$a = $z - 3 + g();
return $a;
}
function g() {
return 13;
}
f(1, 2, 3);
""")
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ['Continuing'])
assert not c.more_pending_messages()
assert not interp.debugger.conn.more_pending_messages()
assert not interp.msgs
interp.shutdown()
def test_step_precise_position(self):
space = ObjSpace()
interp = MockInterpreter(space)
interp.setup_debugger(self.read_fd1, self.write_fd2)
c = Connection(self.read_fd2, self.write_fd1)
c.write(Message("step", None)); ok(c); ok(c)
c.write(Message("step", None)); ok(c)
c.write(Message("step", None)); ok(c); ok(c)
c.write(Message("continue", None)); ok(c); ok(c)
interp.debugger.run_debugger_loop(interp)
interp.echo = interp.print_expr # <= hack
self.run(interp, """
$a = 5;
echo 'foobar';
echo 'baz';
""")
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["enter <main>"])
assert c.read() == Message("linechange", ["<input>", "2", "<main>",
"$a = 5;"])
assert c.read() == Message(">", None)
assert c.read() == Message("linechange", ["<input>", "3", "<main>",
"echo 'foobar';"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ['string(6) "foobar"'])
assert c.read() == Message("linechange", ["<input>", "4", "<main>",
"echo 'baz';"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ['Continuing'])
assert c.read() == Message("echo", ['string(3) "baz"'])
assert not c.more_pending_messages()
assert not interp.debugger.conn.more_pending_messages()
assert not interp.msgs
interp.shutdown()
def test_step_enter_leave(self):
space = ObjSpace()
interp = MockInterpreter(space)
interp.setup_debugger(self.read_fd1, self.write_fd2)
c = Connection(self.read_fd2, self.write_fd1)
c.write(Message("step", None)); ok(c); ok(c)
c.write(Message("step", None)); ok(c); ok(c)
c.write(Message("backtrace", None)); ok(c)
c.write(Message("step", None)); ok(c); ok(c); ok(c); ok(c)
c.write(Message("step", None)); ok(c); ok(c); ok(c)
c.write(Message("step", None)); ok(c); ok(c)
c.write(Message("continue", None)); ok(c); ok(c)
interp.debugger.run_debugger_loop(interp)
interp.echo = interp.print_expr # <= hack
self.run(interp, """
function F() {
echo 'fff';
}
F(); F();
F();
""")
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["enter <main>"])
assert c.read() == Message("linechange", ["<input>", "5", "<main>",
"F(); F();"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["enter F"])
assert c.read() == Message("linechange", ["<input>", "3", "F",
" echo 'fff';"])
assert c.read() == Message(">", None)
exp_traceback = ["<input>", "F", "3", " echo 'fff';",
"<input>", "<main>", "5", "F(); F();"]
assert c.read() == Message("traceback", exp_traceback)
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ['string(3) "fff"'])
assert c.read() == Message("echo", ["leave F"])
assert c.read() == Message("echo", ["enter F"])
assert c.read() == Message("linechange", ["<input>", "3", "F",
" echo 'fff';"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ['string(3) "fff"'])
assert c.read() == Message("echo", ["leave F"])
assert c.read() == Message("linechange", ["<input>", "6", "<main>",
"F();"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["enter F"])
assert c.read() == Message("linechange", ["<input>", "3", "F",
" echo 'fff';"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ['Continuing'])
assert c.read() == Message("echo", ['string(3) "fff"'])
assert not c.more_pending_messages()
assert not interp.debugger.conn.more_pending_messages()
assert not interp.msgs
interp.shutdown()
def test_next_is_step_if_not_a_call(self):
space = ObjSpace()
interp = MockInterpreter(space)
interp.setup_debugger(self.read_fd1, self.write_fd2)
c = Connection(self.read_fd2, self.write_fd1)
c.write(Message("next", None)); ok(c); ok(c)
c.write(Message("next", None)); ok(c); ok(c)
c.write(Message("continue", None)); ok(c)
interp.debugger.run_debugger_loop(interp)
interp.echo = interp.print_expr # <= hack
self.run(interp, """
echo 'fff';
$a = 5;
""")
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["enter <main>"])
assert c.read() == Message("linechange", ["<input>", "2", "<main>",
"echo 'fff';"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ['string(3) "fff"'])
assert c.read() == Message("linechange", ["<input>", "3", "<main>",
"$a = 5;"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ['Continuing'])
assert not c.more_pending_messages()
assert not interp.debugger.conn.more_pending_messages()
assert not interp.msgs
interp.shutdown()
def test_next_skips_all_nested_calls(self):
space = ObjSpace()
interp = MockInterpreter(space)
interp.setup_debugger(self.read_fd1, self.write_fd2)
c = Connection(self.read_fd2, self.write_fd1)
c.write(Message("next", None)); ok(c); ok(c)
c.write(Message("next", None)); ok(c)
c.write(Message("continue", None)); ok(c)
interp.debugger.run_debugger_loop(interp)
interp.echo = interp.print_expr # <= hack
self.run(interp, """
function F() {
return G();
}
function G() {
return 42;
}
$a = F();
$a++;
""")
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ["enter <main>"])
assert c.read() == Message("linechange", ["<input>", "8", "<main>",
"$a = F();"])
assert c.read() == Message(">", None)
assert c.read() == Message("linechange", ["<input>", "9", "<main>",
"$a++;"])
assert c.read() == Message(">", None)
assert c.read() == Message("echo", ['Continuing'])
assert not c.more_pending_messages()
assert not interp.debugger.conn.more_pending_messages()
assert not interp.msgs
interp.shutdown()
def test_next_passes_over_calls_on_the_same_line(self):
space = ObjSpace()
interp = MockInterpreter(space)
interp.setup_debugger(self.read_fd1, self.write_fd2)
c = Connection(self.read_fd2, self.write_fd1)
c.write(Message("next", None)); ok(c); ok(c)
c.write(Message("next", None)); ok(c); ok(c); ok(c)
| |
= Vertex(name = 'V_311',
particles = [ P.G0, P.G__plus__, P.sl3__minus__, P.sv3__tilde__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1134})
V_312 = Vertex(name = 'V_312',
particles = [ P.A0, P.H__plus__, P.sl3__minus__, P.sv3__tilde__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_2247})
V_313 = Vertex(name = 'V_313',
particles = [ P.a, P.sl3__plus__, P.sl3__minus__ ],
color = [ '1' ],
lorentz = [ L.VSS2 ],
couplings = {(0,0):C.GC_412})
V_314 = Vertex(name = 'V_314',
particles = [ P.vt__tilde__, P.x1__plus__, P.sl3__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_861})
V_315 = Vertex(name = 'V_315',
particles = [ P.vt__tilde__, P.x2__plus__, P.sl3__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3 ],
couplings = {(0,0):C.GC_879})
V_316 = Vertex(name = 'V_316',
particles = [ P.tau__plus__, P.n1, P.sl3__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,1):C.GC_951,(0,0):C.GC_102})
V_317 = Vertex(name = 'V_317',
particles = [ P.tau__plus__, P.n2, P.sl3__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,1):C.GC_952,(0,0):C.GC_125})
V_318 = Vertex(name = 'V_318',
particles = [ P.tau__plus__, P.n3, P.sl3__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,1):C.GC_953,(0,0):C.GC_148})
V_319 = Vertex(name = 'V_319',
particles = [ P.tau__plus__, P.n4, P.sl3__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS3, L.FFS4 ],
couplings = {(0,1):C.GC_954,(0,0):C.GC_171})
V_320 = Vertex(name = 'V_320',
particles = [ P.sl1__plus__, P.sl1__minus__, P.sl3__plus__, P.sl3__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_424})
V_321 = Vertex(name = 'V_321',
particles = [ P.sl2__plus__, P.sl2__minus__, P.sl3__plus__, P.sl3__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_425})
V_322 = Vertex(name = 'V_322',
particles = [ P.sl3__plus__, P.sl3__plus__, P.sl3__minus__, P.sl3__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_426})
V_323 = Vertex(name = 'V_323',
particles = [ P.sl4__plus__, P.sl4__minus__, P.sv1__tilde__, P.sv1 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_445})
V_324 = Vertex(name = 'V_324',
particles = [ P.sl4__plus__, P.sl4__minus__, P.sv2__tilde__, P.sv2 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_445})
V_325 = Vertex(name = 'V_325',
particles = [ P.sl4__plus__, P.sl4__minus__, P.sv3__tilde__, P.sv3 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_445})
V_326 = Vertex(name = 'V_326',
particles = [ P.a, P.a, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_444})
V_327 = Vertex(name = 'V_327',
particles = [ P.h02, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_1867})
V_328 = Vertex(name = 'V_328',
particles = [ P.h01, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_1866})
V_329 = Vertex(name = 'V_329',
particles = [ P.h01, P.h01, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_990})
V_330 = Vertex(name = 'V_330',
particles = [ P.h02, P.h02, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_989})
V_331 = Vertex(name = 'V_331',
particles = [ P.A0, P.A0, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1094})
V_332 = Vertex(name = 'V_332',
particles = [ P.G0, P.G0, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1093})
V_333 = Vertex(name = 'V_333',
particles = [ P.G__minus__, P.G__plus__, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1093})
V_334 = Vertex(name = 'V_334',
particles = [ P.H__minus__, P.H__plus__, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1094})
V_335 = Vertex(name = 'V_335',
particles = [ P.a, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.VSS2 ],
couplings = {(0,0):C.GC_443})
V_336 = Vertex(name = 'V_336',
particles = [ P.e__plus__, P.n1, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_62})
V_337 = Vertex(name = 'V_337',
particles = [ P.e__plus__, P.n2, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_63})
V_338 = Vertex(name = 'V_338',
particles = [ P.e__plus__, P.n3, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_64})
V_339 = Vertex(name = 'V_339',
particles = [ P.e__plus__, P.n4, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_65})
V_340 = Vertex(name = 'V_340',
particles = [ P.sl1__plus__, P.sl1__minus__, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_460})
V_341 = Vertex(name = 'V_341',
particles = [ P.sl2__plus__, P.sl2__minus__, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_461})
V_342 = Vertex(name = 'V_342',
particles = [ P.sl3__plus__, P.sl3__minus__, P.sl4__plus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_462})
V_343 = Vertex(name = 'V_343',
particles = [ P.sl4__plus__, P.sl4__plus__, P.sl4__minus__, P.sl4__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_463})
V_344 = Vertex(name = 'V_344',
particles = [ P.sl5__plus__, P.sl5__minus__, P.sv1__tilde__, P.sv1 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_466})
V_345 = Vertex(name = 'V_345',
particles = [ P.sl5__plus__, P.sl5__minus__, P.sv2__tilde__, P.sv2 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_466})
V_346 = Vertex(name = 'V_346',
particles = [ P.sl5__plus__, P.sl5__minus__, P.sv3__tilde__, P.sv3 ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_466})
V_347 = Vertex(name = 'V_347',
particles = [ P.a, P.a, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.VVSS1 ],
couplings = {(0,0):C.GC_465})
V_348 = Vertex(name = 'V_348',
particles = [ P.h02, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_1869})
V_349 = Vertex(name = 'V_349',
particles = [ P.h01, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSS1 ],
couplings = {(0,0):C.GC_1868})
V_350 = Vertex(name = 'V_350',
particles = [ P.h01, P.h01, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_992})
V_351 = Vertex(name = 'V_351',
particles = [ P.h02, P.h02, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_991})
V_352 = Vertex(name = 'V_352',
particles = [ P.A0, P.A0, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1096})
V_353 = Vertex(name = 'V_353',
particles = [ P.G0, P.G0, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1095})
V_354 = Vertex(name = 'V_354',
particles = [ P.G__minus__, P.G__plus__, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1095})
V_355 = Vertex(name = 'V_355',
particles = [ P.H__minus__, P.H__plus__, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_1096})
V_356 = Vertex(name = 'V_356',
particles = [ P.a, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.VSS2 ],
couplings = {(0,0):C.GC_464})
V_357 = Vertex(name = 'V_357',
particles = [ P.mu__plus__, P.n1, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_66})
V_358 = Vertex(name = 'V_358',
particles = [ P.mu__plus__, P.n2, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_67})
V_359 = Vertex(name = 'V_359',
particles = [ P.mu__plus__, P.n3, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_68})
V_360 = Vertex(name = 'V_360',
particles = [ P.mu__plus__, P.n4, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.FFS4 ],
couplings = {(0,0):C.GC_69})
V_361 = Vertex(name = 'V_361',
particles = [ P.sl1__plus__, P.sl1__minus__, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
couplings = {(0,0):C.GC_481})
V_362 = Vertex(name = 'V_362',
particles = [ P.sl2__plus__, P.sl2__minus__, P.sl5__plus__, P.sl5__minus__ ],
color = [ '1' ],
lorentz = [ L.SSSS1 ],
| |
<filename>targeter/targeter.py
import argparse
import functools
import re
from datetime import datetime, timezone
import aiohttp
import discord
from dateutil.parser import parse
from redbot.core import checks, commands
from redbot.core.commands import BadArgument, Converter, RoleConverter
from redbot.core.utils.chat_formatting import humanize_list, pagify
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
PERMS = [
"add_reactions",
"administrator",
"attach_files",
"ban_members",
"change_nickname",
"connect",
"create_instant_invite",
"deafen_members",
"embed_links",
"external_emojis",
"kick_members",
"manage_channels",
"manage_emojis",
"manage_guild",
"manage_messages",
"manage_nicknames",
"manage_roles",
"manage_webhooks",
"mention_everyone",
"move_members",
"mute_members",
"priority_speaker",
"read_message_history",
"read_messages",
"send_messages",
"send_tts_messages",
"speak",
"stream",
"use_voice_activation",
"view_audit_log",
]
# Large pieces of the argument parser is taken from Sinbad's cogs. I based mine off of https://github.com/mikeshardmind/SinbadCogs/blob/v3/scheduler/converters.py#L23
class NoExitParser(argparse.ArgumentParser):
def error(self, message):
raise BadArgument()
class Args(Converter):
async def convert(self, ctx, argument):
argument = argument.replace("—", "--")
parser = NoExitParser(description="Targeter argument parser", add_help=False)
# Nicknames / Usernames
names = parser.add_argument_group()
names.add_argument("--nick", nargs="*", dest="nick", default=[])
names.add_argument("--user", nargs="*", dest="user", default=[])
names.add_argument("--name", nargs="*", dest="name", default=[])
names.add_argument("--not-nick", nargs="*", dest="not-nick", default=[])
names.add_argument("--not-user", nargs="*", dest="not-user", default=[])
names.add_argument("--not-name", nargs="*", dest="not-name", default=[])
names.add_argument("--a-nick", dest="a-nick", action="store_true")
names.add_argument("--no-nick", dest="no-nick", action="store_true")
discs = parser.add_mutually_exclusive_group()
discs.add_argument("--disc", nargs="*", dest="disc", default=[])
discs.add_argument("--not-disc", nargs="*", dest="ndisc", default=[])
# Roles
parser.add_argument("--roles", nargs="*", dest="roles", default=[])
parser.add_argument("--any-role", nargs="*", dest="any-role", default=[])
parser.add_argument("--not-roles", nargs="*", dest="not-roles", default=[])
parser.add_argument("--not-any-role", nargs="*", dest="not-any-role", default=[])
single = parser.add_mutually_exclusive_group()
single.add_argument("--a-role", dest="a-role", action="store_true")
single.add_argument("--no-role", dest="no-role", action="store_true")
# Date stuff
jd = parser.add_argument_group()
jd.add_argument("--joined-on", nargs="*", dest="joined-on", default=[])
jd.add_argument("--joined-before", nargs="*", dest="joined-be", default=[])
jd.add_argument("--joined-after", nargs="*", dest="joined-af", default="")
cd = parser.add_argument_group()
cd.add_argument("--created-on", nargs="*", dest="created-on", default=[])
cd.add_argument("--created-before", nargs="*", dest="created-be", default=[])
cd.add_argument("--created-after", nargs="*", dest="created-af", default=[])
# Status / Activity / Device / Just Basically Profile Stuff
parser.add_argument("--status", nargs="*", dest="status", default=[])
parser.add_argument("--device", nargs="*", dest="device", default=[])
bots = parser.add_mutually_exclusive_group()
bots.add_argument("--only-bots", dest="bots", action="store_true")
bots.add_argument("--no-bots", dest="nbots", action="store_true")
parser.add_argument("--activity-type", nargs="*", dest="at", default=[])
parser.add_argument("--activity", nargs="*", dest="a", default=[])
at = parser.add_mutually_exclusive_group()
at.add_argument("--no-activity", dest="na", action="store_true")
at.add_argument("--an-activity", dest="aa", action="store_true")
# Permissions
parser.add_argument("--perms", nargs="*", dest="perms", default=[])
parser.add_argument("--any-perm", nargs="*", dest="any-perm", default=[])
parser.add_argument("--not-perms", nargs="*", dest="not-perms", default=[])
parser.add_argument("--not-any-perm", nargs="*", dest="not-any-perm", default=[])
# Extra
parser.add_argument("--format", nargs="*", dest="format", default=["menu"])
try:
vals = vars(parser.parse_args(argument.split(" ")))
except Exception as exc:
raise BadArgument() from exc
try:
for key, value in vals.items():
if type(value) == list:
split_words = value
word_list = []
tmp = ""
for word in split_words:
if not word.startswith('"') and not word.endswith('"') and not tmp:
if word.startswith(r"\""):
word = word[1:]
word_list.append(word)
else:
echanged = False
if word.endswith(r"\""):
word = word[:-2] + '"'
echanged = True
schanged = False
if word.startswith(r"\""):
word = word[1:]
schanged = True
if word.startswith('"') and not schanged:
if word.startswith('"') and word.endswith('"') and len(word) > 1:
word_list.append(word)
else:
if tmp.endswith(" "):
word_list.append(tmp)
tmp = ""
continue
tmp += word[1:] + " "
elif word.endswith('"') and not echanged:
tmp += word[:-1]
word_list.append(tmp)
tmp = ""
else:
if schanged or echanged:
word_list.append(word)
continue
tmp += word + " "
if tmp:
raise BadArgument("A quote was started but never finished.")
vals[key] = word_list
except Exception as e:
raise BadArgument(str(e))
if any(s for s in vals["status"] if not s.lower() in ["online", "dnd", "idle", "offline"]):
raise BadArgument(
"Invalid status. Must be either `online`, `dnd`, `idle` or `offline`."
)
# Useeeeeeeeeeeeeeeeeeeernames (and Stuff)
if vals["disc"]:
new = []
for disc in vals["disc"]:
if len(disc) != 4:
raise BadArgument("Discriminators must have the length of 4")
try:
new.append(int(disc))
except ValueError:
raise BadArgument("Discriminators must be valid integers")
vals["disc"] = new
if vals["ndisc"]:
new = []
for disc in vals["ndisc"]:
if len(disc) != 4:
raise BadArgument("Discriminators must have the length of 4")
try:
new.append(int(disc))
except ValueError:
raise BadArgument("Discriminators must be valid integers")
vals["ndisc"] = new
# Rooooooooooooooooles
rc = RoleConverter()
new = []
for role in vals["roles"]:
r = await rc.convert(ctx, role)
if not r:
raise BadArgument(f"Couldn't find a role matching: {role}")
new.append(r)
vals["roles"] = new
new = []
for role in vals["any-role"]:
r = await rc.convert(ctx, role)
if not r:
raise BadArgument(f"Couldn't find a role matching: {role}")
new.append(r)
vals["any-role"] = new
new = []
for role in vals["not-roles"]:
r = await rc.convert(ctx, role)
if not r:
raise BadArgument(f"Couldn't find a role matching: {role}")
new.append(r)
vals["not-roles"] = new
new = []
for role in vals["not-any-role"]:
r = await rc.convert(ctx, role)
if not r:
raise BadArgument(f"Couldn't find a role matching: {role}")
new.append(r)
vals["not-any-role"] = new
# Daaaaaaaaaaaaaaaaaates
if vals["joined-on"]:
try:
vals["joined-on"] = parse(" ".join(vals["joined-on"]))
except:
raise BadArgument("Failed to parse --joined-on argument")
if vals["joined-be"]:
try:
vals["joined-be"] = parse(" ".join(vals["joined-be"]))
except:
raise BadArgument("Failed to parse --joined-be argument")
if vals["joined-af"]:
try:
vals["joined-af"] = parse(" ".join(vals["joined-af"]))
except:
raise BadArgument("Failed to parse --joined-after argument")
if vals["created-on"]:
try:
vals["created-on"] = parse(" ".join(vals["created-on"]))
except:
raise BadArgument("Failed to parse --created-on argument")
if vals["created-be"]:
try:
vals["created-be"] = parse(" ".join(vals["created-be"]))
except:
raise BadArgument("Failed to parse --created-be argument")
if vals["created-af"]:
try:
vals["created-af"] = parse(" ".join(vals["created-af"]))
except:
raise BadArgument("Failed to parse --created-af argument")
# Actiiiiiiiiiiiiiiiiivities
if vals["device"]:
if not all(d in ["desktop", "mobile", "web"] for d in vals["device"]):
raise BadArgument("Bad device. Must be `desktop`, `mobile` or `web`.")
if vals["at"]:
at = discord.ActivityType
switcher = {
"unknown": at.unknown,
"playing": at.playing,
"streaming": at.streaming,
"listening": at.listening,
"watching": at.watching,
"competing": at.competing
}
if not all([a.lower() in switcher for a in vals["at"]]):
raise BadArgument(
"Invalid Activity Type. Must be either `unknown`, `playing`, `streaming`, `listening`, `competing` or `watching`."
)
new = [switcher[name.lower()] for name in vals["at"]]
vals["at"] = new
new = []
for perm in vals["perms"]:
perm = perm.replace(" ", "_")
if not perm.lower() in PERMS:
raise BadArgument(
f"Invalid permission. Run `{ctx.prefix}target permissions` to see a list of valid permissions."
)
new.append(perm)
vals["perms"] = new
new = []
for perm in vals["any-perm"]:
perm = perm.replace(" ", "_")
if not perm.lower() in PERMS:
raise BadArgument(
f"Invalid permission. Run `{ctx.prefix}target permissions` to see a list of valid permissions."
)
new.append(perm)
vals["any-perm"] = new
new = []
for perm in vals["not-perms"]:
perm = perm.replace(" ", "_")
if not perm.lower() in PERMS:
raise BadArgument(
f"Invalid permission. Run `{ctx.prefix}target permissions` to see a list of valid permissions."
)
new.append(perm)
vals["not-perms"] = new
new = []
for perm in vals["not-any-perm"]:
perm = perm.replace(" ", "_")
if not perm.lower() in PERMS:
raise BadArgument(
f"Invalid permission. Run `{ctx.prefix}target permissions` to see a list of valid permissions."
)
new.append(perm)
vals["not-any-perm"] = new
if vals["format"]:
if not vals["format"][0].lower() in ["page", "menu"]:
raise BadArgument(
"Invalid format. Must be `page` for in a bin or `menu` for in an embed."
)
vals["format"] = vals["format"][0].lower()
return vals
class Targeter(commands.Cog):
"""Target members and get a list of them based on the passed arguments"""
def __init__(self, bot):
self.bot = bot
self.conv = Args() # For evals
self.s = aiohttp.ClientSession()
async def red_delete_data_for_user(self, **kwargs):
"""This cog does not store user data"""
return
async def post(self, string):
async with self.s.put("http://bin.doyle.la", data=string.encode("utf-8")) as post:
text = await post.text()
return text
def lookup(self, ctx, args):
matched = ctx.guild.members
passed = []
# --- Go through each possible argument ---
# -- Nicknames/Usernames --
if args["nick"]:
matched_here = []
for user in matched:
if any(
[user.nick and piece.lower() in user.nick.lower() for piece in args["nick"]]
):
matched_here.append(user)
passed.append(matched_here)
if args["user"]:
matched_here = []
for user in matched:
if any([piece.lower() in user.name.lower() for piece in args["user"]]):
matched_here.append(user)
passed.append(matched_here)
if args["name"]:
matched_here = []
for user in matched:
if any([piece.lower() in user.display_name.lower() for piece in args["name"]]):
matched_here.append(user)
passed.append(matched_here)
if args["not-nick"]:
matched_here = []
for user in matched:
if not any(
[
user.nick and piece.lower() in user.nick.lower()
for piece in args["not-nick"]
]
):
matched_here.append(user)
passed.append(matched_here)
if args["not-user"]:
matched_here = []
for user in matched:
if not any([piece.lower() in user.name.lower() for piece in args["not-user"]]):
matched_here.append(user)
passed.append(matched_here)
if args["not-name"]:
matched_here = []
for user in matched:
if not any(
[piece.lower() in user.display_name.lower() for piece in args["not-name"]]
):
matched_here.append(user)
passed.append(matched_here)
if args["a-nick"]:
matched_here = []
for user in matched:
if user.nick:
matched_here.append(user)
passed.append(matched_here)
if args["no-nick"]:
matched_here = []
for user in matched:
if not user.nick:
matched_here.append(user)
passed.append(matched_here)
if args["disc"]:
matched_here = []
for user in matched:
if any([disc == int(user.discriminator) for disc in args["disc"]]):
matched_here.append(user)
passed.append(matched_here)
if args["ndisc"]:
matched_here = []
for user in matched:
if not any([disc == int(user.discriminator) for disc in args["ndisc"]]):
matched_here.append(user)
passed.append(matched_here)
# -- End Nicknames/Usernames --
# -- Roles --
if args["roles"]:
matched_here = []
for user in matched:
ur = [role.id for role in user.roles]
if all(role.id in ur for | |
1 or hroots[0] != root_join_order[nridx]:
break
if nridx == 0: return partitions
# Now merge all other partitions from 0 to nridx-1 into nridx
bigone = []
tozero=[]
for idx,part in (enumerate(partitions)):
if idx > nridx: break
if part:
bigone = list(bigone) + list(part)
tozero.append(idx)
break
if not bigone: return partitions
for idx in tozero: partitions[idx] = OrderByBlock([])
partitions[nridx] = OrderByBlock(bigone + list(partitions[nridx]))
return partitions
# ------------------------------------------------------------------------------
# guaranteed to return a list the same size as the root_join_order. The ideal
# case is that the orderbys are in the same order as the root_join_order. BUG
# NOTE: This partitioning scheme is flawed. It only works in a special case
# (when the join clause matches the ordering statement). See below for temporary
# fix.
# ------------------------------------------------------------------------------
def partition_orderbys(root_join_order, orderbys=[]):
partitions = make_orderby_partitions(root_join_order,orderbys)
partitions = remove_orderby_gaps(partitions)
partitions = merge_orderby_partitions(root_join_order,partitions)
return partitions
# ------------------------------------------------------------------------------
# Because of the logical bug generating valid sorts (what I was doing previously
# only works for a special case), a temporary solution is to merge all
# partitions into the lowest root with an orderby statement.
# ------------------------------------------------------------------------------
def partition_orderbys_simple(root_join_order, orderbys=[]):
partitions = [OrderByBlock([])]*len(root_join_order)
if not orderbys: return partitions
visited = set([hashable_path(ob.path.meta.root) for ob in orderbys])
# Loop through the root_join_order until all orderby statements have been
# visited.
for i,root in enumerate(root_join_order):
rp = hashable_path(root)
visited.discard(rp)
if not visited:
partitions[i] = OrderByBlock(orderbys)
return partitions
raise RuntimeError("Shouldn't reach here")
#------------------------------------------------------------------------------
# QuerySpec stores all the parameters needed to generate a query plan in one
# data-structure
# ------------------------------------------------------------------------------
class QuerySpec(object):
allowed = [ "roots", "join", "where", "order_by", "ordered",
"group_by", "tuple", "distinct", "bind", "select",
"heuristic", "joh" ]
def __init__(self,**kwargs):
for k,v in kwargs.items():
if k not in QuerySpec.allowed:
raise ValueError("Trying to set unknown parameter '{}'".format(k))
if v is None:
raise ValueError(("Error for QuerySpec parameter '{}': 'None' "
"values are not allowed").format(k))
self._params = dict(kwargs)
# Return a new QuerySpec with added parameters
def newp(self, **kwargs):
if not kwargs: return self
nparams = dict(self._params)
for k,v in kwargs.items():
if v is None:
raise ValueError("Cannot specify empty '{}'".format(v))
if k in self._params:
raise ValueError("Cannot specify '{}' multiple times".format(k))
nparams[k] = v
return QuerySpec(**nparams)
# Return a new QuerySpec with modified parameters
def modp(self, **kwargs):
if not kwargs: return self
nparams = dict(self._params)
for k,v in kwargs.items():
if v is None:
raise ValueError("Cannot specify empty '{}'".format(v))
nparams[k] = v
return QuerySpec(**nparams)
# Return a new QuerySpec with specified parameters deleted
def delp(self, keys=[]):
if not keys: return self
nparams = dict(self._params)
for k in keys: nparams.pop(k,None)
return QuerySpec(**nparams)
# Return the value of a parameter - behaves slightly differently to simply
# specify the parameter as an attribute because you can return a default
# value if the parameter is not set.
def getp(self,name,default=None):
return self._params.get(name,default)
def bindp(self, *args, **kwargs):
where = self.where
if where is None:
raise ValueError("'where' must be specified before binding placeholders")
np = {}
pp = {}
for p in where.placeholders:
if isinstance(p, NamedPlaceholder): np[p.name] = p
elif isinstance(p, PositionalPlaceholder): pp[p.posn] = p
for idx, v in enumerate(args):
if idx not in pp:
raise ValueError(("Trying to bind value '{}' to positional "
"argument '{}' but there is no corresponding "
"positional placeholder in where clause "
"'{}'").format(v,idx,where))
for k,v in kwargs.items():
if k not in np:
raise ValueError(("Trying to bind value '{}' to named "
"argument '{}' but there is no corresponding "
"named placeholder in where clause "
"'{}'").format(v,k,where))
nwhere = where.ground(*args, **kwargs)
return self.modp(where=nwhere,bind=True)
def fill_defaults(self):
toadd = dict(self._params)
for n in [ "roots","join","where","order_by" ]:
v = self._params.get(n,None)
if v is None: toadd[n]=[]
toadd["group_by"] = self._params.get("group_by",[])
toadd["bind"] = self._params.get("bind",{})
toadd["tuple"] = self._params.get("tuple",False)
toadd["distinct"] = self._params.get("distinct",False)
toadd["heuristic"] = self._params.get("heuristic",False)
toadd["joh"] = self._params.get("joh",oppref_join_order)
# Note: No default values for "select" so calling its attribute will
# return None
if toadd: return QuerySpec(**toadd)
else: return self
def __getattr__(self, item):
if item not in QuerySpec.allowed:
raise ValueError(("Trying to get the value of unknown parameter "
"'{}'").format(item))
return self._params.get(item,None)
def __str__(self):
return str(self._params)
def __repr__(self):
return repr(self._params)
# Replace any None with a []
def fix_query_spec(inspec):
join = inspec.join if inspec.join else []
where = inspec.where if inspec.where else []
order_by = inspec.order_by if inspec.order_by else []
return QuerySpec(roots=inspec.roots, join=join,
where=where, order_by=order_by)
# ------------------------------------------------------------------------------
# Takes a list of paths that have an index, then based on a
# list of root paths and a query specification, builds the queryplan.
# ------------------------------------------------------------------------------
def make_query_plan_preordered_roots(indexed_paths, root_join_order,
qspec):
qspec = fix_query_spec(qspec)
joins = qspec.join
whereclauses = qspec.where
orderbys = qspec.order_by
joinset=set(joins)
clauseset=set(whereclauses)
visited=set({})
orderbys=list(orderbys)
if not root_join_order:
raise ValueError("Cannot make query plan with empty root join order")
# orderbygroups = partition_orderbys(root_join_order, orderbys)
orderbygroups = partition_orderbys_simple(root_join_order, orderbys)
# For a set of visited root paths and a set of comparator
# statements return the subset of join statements that only reference paths
# that have been visited. Removes these joins from the original set.
def visitedsubset(visited, inset):
outlist=[]
for comp in inset:
if visited.issuperset([hashable_path(r) for r in comp.roots]):
outlist.append(comp)
for comp in outlist: inset.remove(comp)
return outlist
# Generate a list of JoinQueryPlan consisting of a root path and join
# comparator and clauses that only reference previous plans in the list.
output=[]
for idx,(root,rorderbys) in enumerate(zip(root_join_order,orderbygroups)):
if rorderbys: rorderbys = OrderByBlock(rorderbys)
visited.add(hashable_path(root))
rpjoins = visitedsubset(visited, joinset)
rpclauses = visitedsubset(visited, clauseset)
if rpclauses: rpclauses = ClauseBlock(rpclauses)
joinsc, rpclauses = make_join_pair(rpjoins, rpclauses,rorderbys)
if not rpclauses: rpclauses = []
rpjoins = [joinsc] if joinsc else []
output.append(JoinQueryPlan.from_specification(indexed_paths,
root_join_order[:idx],
root,rpjoins,rpclauses,
rorderbys))
return QueryPlan(output)
# ------------------------------------------------------------------------------
# Join-order heuristics. The heuristic is a function that takes a set of
# indexes, and a query specification with a set of roots and join/where/order_by
# expressions. It then returns an ordering over the roots that are used to
# determine how the joins are built. To interpret the returned list of root
# paths: the first element will be the outer loop query and the last will be the
# inner loop query.
#
# Providing two fixed heuristics: 1) fixed_join_order is a heuristic generator
# and the user specifies the exact ordering, 2) basic_join_order simply retains
# the ordering given as part of the query specification.
#
# The default heuristic, oppref_join_order, is a operator preference heuristic.
# The idea is to assign a preference value to each join expression based on the
# number of join expressions connected with a root path and the operator
# preference. The higher the value the further it is to the outer loop. The
# intuition is that the joins reduce the number of tuples, so by assigning the
# joins early you generate the fewest tuples. Note: not sure about my intuitions
# here. Need to look more closely at the mysql discussion on query execution.
# ------------------------------------------------------------------------------
def fixed_join_order(*roots):
def validate(r):
r=path(r)
if not r.meta.is_root:
raise ValueError(("Bad query roots specification '{}': '{}' is not "
"a root path").format(roots, r))
return r
if not roots:
raise ValueError("Missing query roots specification: cannot create "
"a fixed join order heuristic from an empty list")
paths = [validate(r) for r in roots]
hashables = set([ hashable_path(r) for r in roots ])
def fixed_join_order_heuristic(indexed_paths, qspec):
hps = set([hashable_path(r) for r in qspec.roots])
if hps != set(hashables):
raise ValueError(("Mis-matched query roots: fixed join order "
"heuristic '{}' must contain exactly the "
"roots '{}").format(roots, qspec.roots))
return list(paths)
return fixed_join_order_heuristic
def basic_join_order(indexed_paths, qspec):
return [path(r) for r in qspec.roots]
def oppref_join_order(indexed_paths, qspec):
roots= qspec.roots
joins= qspec.join
root2val = { hashable_path(rp) : 0 for rp in roots }
for join in joins:
for rp in join.roots:
hrp = hashable_path(rp)
v = root2val.setdefault(hrp, 0)
root2val[hrp] += join.preference
return [path(hrp) for hrp in \
sorted(root2val.keys(), key = lambda k : root2val[k], reverse=True)]
# ------------------------------------------------------------------------------
# Take a join order heuristic, a list of joins, and a list of clause blocks and
# and generates a query.
# ------------------------------------------------------------------------------
def make_query_plan(indexed_paths, qspec):
qspec = qspec.fill_defaults()
root_order = qspec.joh(indexed_paths, qspec)
return make_query_plan_preordered_roots(indexed_paths, root_order, qspec)
#------------------------------------------------------------------------------
# Implementing Queries - taking a QuerySpec, QueryPlan, and a FactMap and
# generating an actual query.
# ------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Creates a mechanism for sorting using the order_by statements within queries.
#
# Works by creating a | |
version_dict['version']['disks'][disk_name]['type_of_disk'] = \
m.groupdict()['type_of_disk']
continue
# BIOS Flash Firmware Hub @ 0x0, 0KB
m = p10.match(line)
if m:
bios_flash = m.groupdict()['bios_flash']
version_dict['version']['bios_flash'] = bios_flash
# 0: Ext: Management0/0 : address is 5001.0003.0000, irq 11
# 1: Ext: GigabitEthernet0/0 : address is 5001.0003.0001, irq 11
# 2: Ext: GigabitEthernet0/1 : address is 5001.0003.0002, irq 10
# 3: Ext: GigabitEthernet0/2 : address is 5001.0003.0003, irq 10
# 4: Ext: GigabitEthernet0/3 : address is 5001.0003.0004, irq 11
# 5: Ext: GigabitEthernet0/4 : address is 5001.0003.0005, irq 11
# 6: Ext: GigabitEthernet0/5 : address is 5001.0003.0006, irq 10
# 7: Ext: GigabitEthernet0/6 : address is 5001.0003.0007, irq 10
m = p11.match(line)
if m:
intf_number = int(m.groupdict()['intf_number'])
if 'interfaces' not in version_dict['version']:
version_dict['version']['interfaces'] = {}
if intf_number not in version_dict['version']['interfaces']:
version_dict['version']['interfaces'][intf_number] = {}
version_dict['version']['interfaces'][intf_number]['interface'] = \
m.groupdict()['interface']
version_dict['version']['interfaces'][intf_number]['mac_addr'] = \
m.groupdict()['mac_addr']
version_dict['version']['interfaces'][intf_number]['intf_irq'] = \
int(m.groupdict()['intf_irq'])
continue
# 5: Int: Not used : irq 11
# 6: Int: Not used : irq 5
m = p11_1.match(line)
if m:
intf_number = int(m.groupdict()['intf_number'])
if 'interfaces' not in version_dict['version']:
version_dict['version']['interfaces'] = {}
if intf_number not in version_dict['version']['interfaces']:
version_dict['version']['interfaces'][intf_number] = {}
version_dict['version']['interfaces'][intf_number]['interface'] = \
m.groupdict()['interface'].strip()
version_dict['version']['interfaces'][intf_number]['intf_irq'] = \
int(m.groupdict()['intf_irq'])
continue
# License mode: Smart Licensing
m = p12.match(line)
if m:
license_mode = m.groupdict()['license_mode']
version_dict['version']['license_mode'] = license_mode
continue
# ASAv Platform License State: Unlicensed
m = p13.match(line)
if m:
license_state = m.groupdict()['license_state']
version_dict['version']['license_state'] = license_state
continue
# No active entitlement: no feature tier and no throughput level configured
m = p14.match(line)
if m:
entitlement = m.groupdict()['entitlement']
version_dict['version']['entitlement'] = entitlement
continue
# *Memory resource allocation is more than the permitted limit.
m = p15.match(line)
if m:
mem_allocation = m.groupdict()['mem_allocation']
version_dict['version']['mem_allocation'] = mem_allocation
continue
# Maximum VLANs : 50
# Maximum VLANs : 150 perpetual
m = p16.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'max_vlans' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['max_vlans'] = {}
version_dict['version']['licensed_features']['max_vlans']['status'] = \
m.groupdict()['max_vlans']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['max_vlans']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# Inside Hosts : Unlimited
# Inside Hosts : Unlimited perpetual
m = p17.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'inside_hosts' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['inside_hosts'] = {}
version_dict['version']['licensed_features']['inside_hosts']['status'] = \
m.groupdict()['inside_hosts']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['inside_hosts']['time_remaining'] = \
m.groupdict()['time_remaining']
# Failover : Active/Standby
# Failover : Active/Active
m = p18.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'failover' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['failover'] = {}
version_dict['version']['licensed_features']['failover']['status'] = \
m.groupdict()['failover']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['failover']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# Encryption-DES : Enabled
# VPN-DES : Enabled perpetual
m = p19.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'crypto_des' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['crypto_des'] = {}
version_dict['version']['licensed_features']['crypto_des']['status'] = \
m.groupdict()['crypto_des']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['crypto_des']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# Encryption-3DES-AES : Enabled
# VPN-3DES-AES : Enabled perpetual
m = p20.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'crypto_3des_aes' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['crypto_3des_aes'] = {}
version_dict['version']['licensed_features']['crypto_3des_aes']['status'] = \
m.groupdict()['crypto_3des_aes']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['crypto_3des_aes']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# Security Contexts : 10
m = p21.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'security_contexts' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['security_contexts'] = {}
version_dict['version']['licensed_features']['security_contexts']['status'] = \
m.groupdict()['security_contexts']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['security_contexts']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# Carrier : Disabled
m = p22.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'carrier' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['carrier'] = {}
version_dict['version']['licensed_features']['carrier']['status'] = \
m.groupdict()['carrier']
continue
# AnyConnect Premium Peers : 2
# AnyConnect Premium Peers : 2 perpetual
m = p23.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'anyconnect_premium_peers' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['anyconnect_premium_peers'] = {}
version_dict['version']['licensed_features']['anyconnect_premium_peers']['status'] = \
m.groupdict()['anyconnect_premium_peers']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['anyconnect_premium_peers']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# AnyConnect Essentials : Disabled
# AnyConnect Essentials : Disabled perpetual
m = p24.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'anyconnect_essentials' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['anyconnect_essentials'] = {}
version_dict['version']['licensed_features']['anyconnect_essentials']['status'] = \
m.groupdict()['anyconnect_essentials']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['anyconnect_essentials']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# Other VPN Peers : 250
# Other VPN Peers : 750 perpetual
m = p25.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'other_vpn_peers' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['other_vpn_peers'] = {}
version_dict['version']['licensed_features']['other_vpn_peers']['status'] = \
m.groupdict()['other_vpn_peers']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['other_vpn_peers']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# Total VPN Peers : 250
# Total VPN Peers : 750 perpetual
m = p26.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'total_vpn_peers' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['total_vpn_peers'] = {}
version_dict['version']['licensed_features']['total_vpn_peers']['status'] = \
m.groupdict()['total_vpn_peers']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['total_vpn_peers']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# AnyConnect for Mobile : Disabled
# AnyConnect for Mobile : Disabled perpetual
m = p27.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'anyconnect_for_mobile' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['anyconnect_for_mobile'] = {}
version_dict['version']['licensed_features']['anyconnect_for_mobile']['status'] = \
m.groupdict()['anyconnect_for_mobile']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['anyconnect_for_mobile']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# AnyConnect for Cisco VPN Phone : Disabled
# AnyConnect for Cisco VPN Phone : Disabled perpetual
m = p28.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
m.groupdict()['anyconnect_for_cisco_vpn_phone']
if 'anyconnect_for_cisco_vpn_phone' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['anyconnect_for_cisco_vpn_phone'] = {}
version_dict['version']['licensed_features']['anyconnect_for_cisco_vpn_phone']['status'] = \
m.groupdict()['anyconnect_for_cisco_vpn_phone']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['anyconnect_for_cisco_vpn_phone']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# Advanced Endpoint Assessment : Disabled
# Advanced Endpoint Assessment : Disabled perpetual
m = p29.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'advanced_endpoint_assessment' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['advanced_endpoint_assessment'] = {}
version_dict['version']['licensed_features']['advanced_endpoint_assessment']['status'] = \
m.groupdict()['advanced_endpoint_assessment']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['advanced_endpoint_assessment']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# Shared License : Disabled
# Shared License : Enabled perpetual
m = p30.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'shared_license' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['shared_license'] = {}
version_dict['version']['licensed_features']['shared_license']['status'] = \
m.groupdict()['shared_license']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['shared_license']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# Total TLS Proxy Sessions : 2
m = p31.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'total_tls_proxy_sessions' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['total_tls_proxy_sessions'] = {}
version_dict['version']['licensed_features']['total_tls_proxy_sessions']['status'] = \
m.groupdict()['total_tls_proxy_sessions']
continue
# Botnet Traffic Filter : Enabled
# Botnet Traffic Filter : Enabled 646 days
m = p32.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'botnet_traffic_filter' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['botnet_traffic_filter'] = {}
version_dict['version']['licensed_features']['botnet_traffic_filter']['status'] = \
m.groupdict()['botnet_traffic_filter']
if m.groupdict()['time_remaining'] is not None:
version_dict['version']['licensed_features']['botnet_traffic_filter']['time_remaining'] = \
m.groupdict()['time_remaining']
continue
# Cluster : Disabled
m = p33.match(line)
if m:
if 'licensed_features' not in version_dict['version']:
version_dict['version']['licensed_features'] = {}
if 'cluster' not in version_dict['version']['licensed_features']:
version_dict['version']['licensed_features']['cluster'] = {}
version_dict['version']['licensed_features']['cluster']['status'] = \
m.groupdict()['cluster']
continue
# Serial Number: 9A5BHB00D2D
m = p34.match(line)
if m:
serial_number = m.groupdict()['serial_number']
version_dict['version']['serial_number'] = serial_number
continue
# Image type : Release
m = p35.match(line)
if m:
image_type = m.groupdict()['image_type']
version_dict['version']['image_type'] = image_type
continue
# Key version : A
m = p36.match(line)
if m:
key_version = m.groupdict()['key_version']
version_dict['version']['key_version'] = key_version
continue
# Configuration last modified by enable_15 at 20:39:39.869 UTC Mon Jun 7 2021
m = p37.match(line)
# Configuration has not been modified since last system restart.
m2 = p37_1.match(line)
if m:
last_modified_by = m.groupdict()['last_modified_by']
last_modified_date = m.groupdict()['last_modified_date']
version_dict['version']['last_modified_by'] = last_modified_by
version_dict['version']['last_modified_date'] = last_modified_date
elif m2:
# We get here if the config hasn't been changed since last restart
last_modified_by = ''
last_modified_date = m2.groupdict()['last_modified_date']
version_dict['version']['last_modified_by'] = last_modified_by
version_dict['version']['last_modified_date'] = last_modified_date
continue
# Encryption hardware device : Cisco ASA-55x0 on-board accelerator (revision 0x0)
m = p38.match(line)
if m:
if 'encryption_hardware' not in version_dict['version']:
version_dict['version']['encryption_hardware'] = {}
version_dict['version']['encryption_hardware']['encryption_device'] = \
m.groupdict()['encryption_device']
continue
# Boot microcode : CN1000-MC-BOOT-2.00
m = p39.match(line)
if m:
if 'encryption_hardware' not in version_dict['version']:
version_dict['version']['encryption_hardware'] = {}
version_dict['version']['encryption_hardware']['boot_microcode'] = \
m.groupdict()['boot_microcode']
continue
# SSL/IKE microcode: CNLite-MC-SSLm-PLUS-2.03
m = p40.match(line)
if m:
if 'encryption_hardware' not in version_dict['version']:
version_dict['version']['encryption_hardware'] = {}
version_dict['version']['encryption_hardware']['ssl_ike_microcode'] = \
m.groupdict()['ssl_ike_microcode']
continue
# IPsec microcode : CNlite-MC-IPSECm-MAIN-2.06
m = p41.match(line)
if m:
if 'encryption_hardware' not in version_dict['version']:
version_dict['version']['encryption_hardware'] = {}
version_dict['version']['encryption_hardware']['ipsec_microcode'] = \
m.groupdict()['ipsec_microcode']
continue
# GTP/GPRS : | |
an exponential fit to the intensity histogram.
Intensity threshold will be set at where the exponential function will have dropped
to exp_intensity_filter (Default = 0.01).
min_peaks: int
Minimum number of peaks to keep, unless less are present from the start.
Default = 10.
max_peaks: int
Maximum number of peaks to keep. Set to 'None' to ignore (Default = 'None').
aim_min_peaks: int
Minium number of peaks to keep (if present) during exponential filtering.
"""
# Fixed parameters:
num_bins = 100 # number of bins for histogram
min_peaks_for_exp_fit = 25 # With less peaks exponential fit doesn't make any sense.
if aim_min_peaks is None: # aim_min_peaks is not given
aim_min_peaks = min_peaks
if isinstance(peaks, list):
peaks = np.array(peaks)
if peaks.shape[1] != 2:
print("Peaks were given in unexpected format...")
# Remove peaks outside min_frag <-> max_frag window:
keep_idx = np.where((peaks[:, 0] > min_frag) & (peaks[:, 0] < max_frag))[0]
peaks = peaks[keep_idx, :]
# Remove peaks based on relative intensity below min_intensity_perc/100 * max_intensity
if min_intensity_perc > 0:
intensity_thres = np.max(peaks[:, 1]) * min_intensity_perc/100
keep_idx = np.where((peaks[:, 0] > min_frag) & (peaks[:, 0] < max_frag)
& (peaks[:, 1] > intensity_thres))[0]
if len(keep_idx) > min_peaks:
peaks = peaks[keep_idx, :]
# Fit exponential to peak intensity distribution
if (exp_intensity_filter is not None) and len(peaks) >= min_peaks_for_exp_fit:
peaks = exponential_peak_filter(peaks,
exp_intensity_filter,
aim_min_peaks,
num_bins)
# Sort by peak intensity
peaks = peaks[np.lexsort((peaks[:, 0], peaks[:, 1])), :]
if max_peaks is not None:
# TODO: now array is transfered back to list (to store as json later). Seems weird.
return [(x[0], x[1]) for x in peaks[-max_peaks:, :]]
else:
return [(x[0], x[1]) for x in peaks]
else:
# Sort by peak intensity
peaks = peaks[np.lexsort((peaks[:, 0], peaks[:, 1])), :]
if max_peaks is not None:
return [(x[0], x[1]) for x in peaks[-max_peaks:, :]]
else:
return [(x[0], x[1]) for x in peaks]
def exponential_peak_filter(peaks,
exp_intensity_filter,
aim_min_peaks,
num_bins):
"""Fit exponential to peak intensity distribution and
Args:
-------
peaks: list of tuples
List of tuples containing (m/z, intensity) pairs.
exp_intensity_filter: float
Intensity threshold will be set where exponential fit to intensity
histogram drops below 1 - exp_intensity_filter.
aim_min_peaks: int
Desired minimum number of peaks. Filtering step will stop removing peaks
when it reaches aim_min_peaks.
num_bins: int
Number of bins for histogram (to fit exponential to).
Returns
-------
Filtered List of tuples containing (m/z, intensity) pairs.
"""
def exponential_func(x, a0, alpha):
return a0*np.exp(-alpha*x)
# Ignore highest peak for further analysis
peaks2 = peaks.copy()
peaks2[np.where(peaks2[:, 1] == np.max(peaks2[:, 1])), :] = 0
# Create histogram
hist, bins = np.histogram(peaks2[:, 1], bins=num_bins)
offset = np.where(hist == np.max(hist))[0][0] # Take maximum intensity bin as starting point
last = int(num_bins/2)
bins_select = bins[offset:last]
hist_select = hist[offset:last]
# Try exponential fit:
try:
popt, _ = curve_fit(exponential_func,
bins_select,
hist_select,
p0=(peaks.shape[0], 1e-4))
lower_guess_offset = bins[max(0, offset-1)]
threshold = lower_guess_offset \
- np.log(1 - exp_intensity_filter) / popt[1]
except RuntimeError:
print("RuntimeError for ", len(peaks),
" peaks. Use 1/2 mean intensity as threshold.")
threshold = np.mean(peaks2[:, 1])/2
except TypeError:
print("Unclear TypeError for ", len(peaks),
" peaks. Use 1/2 mean intensity as threshold.")
print("Bins:", bins_select, "and hist:", hist_select)
threshold = np.mean(peaks2[:, 1])/2
keep_idx = np.where(peaks[:, 1] > threshold)[0]
if len(keep_idx) < aim_min_peaks:
peaks = peaks[np.lexsort((peaks[:, 0], peaks[:, 1])), :][-aim_min_peaks:]
else:
peaks = peaks[keep_idx, :]
return peaks
# ----------------------------------------------------------------------------
# -------------------------- Functions to load MS data------------------------
# ----------------------------------------------------------------------------
def load_ms_data(path_data,
path_json,
filefilter="*.*",
results_file=None,
num_decimals=3,
min_frag=0.0, max_frag=1000.0,
min_loss=5.0, max_loss=500.0,
min_intensity_perc=0.0,
exp_intensity_filter=0.01,
min_keep_peaks_0=10,
min_keep_peaks_per_mz=20/200,
min_peaks=10,
max_peaks=None,
aim_min_peak=None,
peak_loss_words=['peak_', 'loss_']):
"""Collect spectra from set of files.
Partly taken from ms2ldaviz.
Prototype. Needs to be replaces by more versatile parser, accepting more MS data formats.
# TODO: add documentation.
# TODO: consider removing this function alltogether and only allow for MGF input.
"""
spectra = []
spectra_dict = {}
ms_documents = []
ms_documents_intensity = []
dirs = os.listdir(path_data)
spectra_files = fnmatch.filter(dirs, filefilter)
if results_file is not None:
try:
spectra_dict = functions.json_to_dict(path_json + results_file)
spectra_metadata = pd.read_csv(path_json + results_file[:-5] + "_metadata.csv")
print("Spectra json file found and loaded.")
spectra = dict_to_spectrum(spectra_dict)
collect_new_data = False
with open(path_json + results_file[:-4] + "txt", "r") as f:
for line in f:
line = line.replace('"', '').replace("'", "").replace(
"[", "").replace("]", "").replace("\n", "")
ms_documents.append(line.split(", "))
with open(path_json + results_file[:-5] + "_intensity.txt", "r") as f:
for line in f:
line = line.replace("[", "").replace("]", "")
ms_documents_intensity.append([int(x) for x in line.split(", ")])
except FileNotFoundError:
print("Could not find file ", path_json, results_file)
print("New data from ", path_data, " will be imported.")
collect_new_data = True
# Read data from files if no pre-stored data is found:
if spectra_dict == {} or results_file is None:
# Run over all spectrum files:
for i, filename in enumerate(spectra_files):
# Show progress
if (i+1) % 10 == 0 or i == len(spectra_files)-1:
print('\r', ' Load spectrum ', i+1, ' of ', len(spectra_files), ' spectra.', end="")
if min_keep_peaks_per_mz != 0\
and min_keep_peaks_0 > min_peaks:
# TODO: remove following BAD BAD hack:
# Import first (acutally only needed is PRECURSOR MASS)
spec = Spectrum(min_frag=min_frag,
max_frag=max_frag,
min_loss=min_loss,
max_loss=max_loss,
min_intensity_perc=min_intensity_perc,
exp_intensity_filter=exp_intensity_filter,
min_peaks=min_peaks,
max_peaks=max_peaks,
aim_min_peak=aim_min_peak)
# Load spectrum data from file:
spec.read_spectrum(path_data, filename, i)
# Scale the min_peak filter
def min_peak_scaling(x, a, b):
return int(a + b * x)
min_peaks_scaled = min_peak_scaling(spec.precursor_mz,
min_keep_peaks_0,
min_keep_peaks_per_mz)
else:
min_peaks_scaled = min_peaks
spectrum = Spectrum(min_frag=min_frag,
max_frag=max_frag,
min_loss=min_loss,
max_loss=max_loss,
min_intensity_perc=min_intensity_perc,
exp_intensity_filter=exp_intensity_filter,
min_peaks=min_peaks,
max_peaks=max_peaks,
aim_min_peak=min_peaks_scaled)
# Load spectrum data from file:
spectrum.read_spectrum(path_data, filename, i)
# Get precursor mass (later used to calculate losses!)
if spec.precursor_mz is not None:
if 'Precursor_MZ' in spec.metadata:
spec.precursor_mz = float(spec.metadata['Precursor_MZ'])
else:
spec.precursor_mz = spec.parent_mz
# Calculate losses:
spectrum.get_losses()
# Collect in form of list of spectrum objects, and as dictionary
spectra.append(spectrum)
spectra_dict[filename] = spectrum.__dict__
ms_documents, ms_documents_intensity, spectra_metadata = create_ms_documents(
spectra,
num_decimals,
peak_loss_words,
min_loss, max_loss)
# Add filenames to metadata
filenames = []
for spectrum in spectra:
filenames.append(spectrum.filename)
spectra_metadata["filename"] = filenames
# Save collected data
if collect_new_data:
spectra_metadata.to_csv(path_json + results_file[:-5] + "_metadata.csv", index=False)
functions.dict_to_json(spectra_dict, path_json + results_file)
# Store documents
with open(path_json + results_file[:-4] + "txt", "w") as f:
for s in ms_documents:
f.write(str(s) +"\n")
with open(path_json + results_file[:-5] + "_intensity.txt", "w") as f:
for s in ms_documents_intensity:
f.write(str(s) +"\n")
return spectra, spectra_dict, ms_documents, ms_documents_intensity, spectra_metadata
def load_MGF_data(file_mgf,
file_json=None,
num_decimals=2,
min_frag=0.0, max_frag=1000.0,
min_loss=10.0, max_loss=200.0,
min_intensity_perc=0.0,
exp_intensity_filter=0.01,
min_keep_peaks_0=10,
min_keep_peaks_per_mz=20/200,
min_peaks=10,
max_peaks=None,
peak_loss_words=['peak_', 'loss_'],
ignore_losses=False,
create_docs=True):
"""Collect spectra from MGF file.
1) Importing MGF file - based on pyteomics parser.
2) Filter spectra: can be based on mininum relative intensity or based on
and exponential intenstiy distribution.
3) Create documents with peaks (and losses) as words. Words are constructed
from peak mz values and restricted to 'num_decimals' decimals.
Args:
-------
file_mgf: str
MGF file that should be imported.
file_json: str
File under which already processed data is stored. If not None and if it
exists, data will simply be imported from that file.
Otherwise data will be imported from file_mgf and final results are stored
under file_json.(default= None).
num_decimals: int
Number of decimals to keep from each peak-position for creating words.
min_frag: float
Lower limit of m/z to take into account (Default = 0.0).
max_frag: float
Upper limit of m/z to take into account (Default = 1000.0).
min_loss: float
Lower limit of losses to take into account (Default = 10.0).
max_loss: float
Upper limit of losses to take into account (Default = 200.0).
min_intensity_perc: float
Filter out peaks with intensities lower than the min_intensity_perc percentage
of the highest peak intensity. min_intensity_perc = 1.0 will lead to removal of
all peaks with intensities below 1% of the maximum intensity.
(Default = 0.0, essentially meaning: OFF).
exp_intensity_filter: float
Filter out peaks by applying an exponential fit to the intensity histogram.
Intensity threshold will be set at where the exponential function will have dropped
to exp_intensity_filter (Default = 0.01).
min_keep_peaks_0: float
Factor to describe constant of mininum peaks per spectrum with increasing
parentmass. Formula is: int(min_keep_peaks_0 + min_keep_peaks_per_mz * parentmass).
min_keep_peaks_per_mz: float
Factor to describe linear increase of mininum peaks per spectrum with increasing
parentmass. Formula is: int(min_keep_peaks_0 | |
<filename>tests/modules/test_blazeMeterUploader.py
import json
import logging
import math
import os
import shutil
import time
from io import BytesIO
from bzt import TaurusException
from bzt.bza import Master, Session
from bzt.modules.aggregator import DataPoint, KPISet
from bzt.modules.blazemeter import BlazeMeterUploader, ResultsFromBZA
from bzt.modules.blazemeter import MonitoringBuffer
from bzt.six import HTTPError
from bzt.six import iteritems, viewvalues
from tests import BZTestCase, random_datapoint, RESOURCES_DIR
from tests.mocks import EngineEmul, BZMock
class TestBlazeMeterUploader(BZTestCase):
def test_some_errors(self):
mock = BZMock()
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': {"result": []},
'https://a.blazemeter.com/api/v4/sessions/1': {"result": {'id': 1, "note": "somenote"}},
'https://a.blazemeter.com/api/v4/masters/1': {"result": {'id': 1, "note": "somenote"}},
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {'id': 1}},
'https://a.blazemeter.com/api/v4/tests': {"result": {'id': 1}},
'https://a.blazemeter.com/api/v4/tests/1/start-external': {"result": {
"session": {'id': 1, "testId": 1, "userId": 1},
"master": {'id': 1},
"signature": "sign"
}},
'https://data.blazemeter.com/api/v4/image/1/files?signature=sign': {"result": True},
'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1' +
'&pq=0&target=labels_bulk&update=1': {},
'https://a.blazemeter.com/api/v4/sessions/1/stop': {"result": True},
'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1': {'result': {'session': {}}}
})
mock.mock_patch.update({
'https://a.blazemeter.com/api/v4/sessions/1': {"result": {"id": 1, "note": "somenote"}},
'https://a.blazemeter.com/api/v4/masters/1': {"result": {"id": 1, "note": "somenote"}},
})
obj = BlazeMeterUploader()
mock.apply(obj._user)
obj.parameters['project'] = 'Proj name'
obj.settings['token'] = '<PASSWORD>'
obj.settings['browser-open'] = 'none'
obj.engine = EngineEmul()
obj.prepare()
obj.startup()
obj.engine.stopping_reason = ValueError('wrong value')
obj.aggregated_second(random_datapoint(10))
obj.kpi_buffer[-1][DataPoint.CUMULATIVE][''][KPISet.ERRORS] = [
{'msg': 'Forbidden', 'cnt': 10, 'type': KPISet.ERRTYPE_ASSERT, 'urls': [], KPISet.RESP_CODES: '111'},
{'msg': 'Allowed', 'cnt': 20, 'type': KPISet.ERRTYPE_ERROR, 'urls': [], KPISet.RESP_CODES: '222'}]
obj.post_process()
obj.log.info("Requests: %s", mock.requests)
# check for note appending in _postproc_phase3()
reqs = mock.requests[-4:]
self.assertIn('api/v4/sessions/1', reqs[0]['url'])
self.assertIn('api/v4/sessions/1', reqs[1]['url'])
self.assertIn('api/v4/masters/1', reqs[2]['url'])
self.assertIn('api/v4/masters/1', reqs[3]['url'])
self.assertIn('ValueError: wrong value', str(reqs[1]['data']))
self.assertIn('ValueError: wrong value', str(reqs[3]['data']))
labels = mock.requests[8]['data']
if not isinstance(labels, str):
labels = labels.decode("utf-8")
obj.log.info("Labels: %s", labels)
data = json.loads(str(labels))
self.assertEqual(1, len(data['labels']))
total_item = data['labels'][0]
self.assertEqual('ALL', total_item['name'])
self.assertEqual(total_item['assertions'],
[{'failureMessage': 'Forbidden', 'failures': 10, 'name': 'All Assertions'}])
self.assertEqual(total_item['errors'], [{'m': 'Allowed', 'count': 20, 'rc': '222'}])
def test_no_notes_for_public_reporting(self):
mock = BZMock()
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/sessions/1/terminate-external': {},
'https://data.blazemeter.com/submit.php?session_id=1&signature=None&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {},
})
obj = BlazeMeterUploader()
obj.parameters['project'] = 'Proj name'
obj.settings['token'] = '' # public reporting
obj.settings['browser-open'] = 'none'
obj.engine = EngineEmul()
mock.apply(obj._user)
obj.prepare()
obj._session = Session(obj._user, {'id': 1, 'testId': 1, 'userId': 1})
obj._master = Master(obj._user, {'id': 1})
obj.engine.stopping_reason = ValueError('wrong value')
obj.aggregated_second(random_datapoint(10))
obj.kpi_buffer[-1][DataPoint.CUMULATIVE][''][KPISet.ERRORS] = [
{'msg': 'Forbidden', 'cnt': 10, 'type': KPISet.ERRTYPE_ASSERT, 'urls': [], KPISet.RESP_CODES: '111'},
{'msg': 'Allowed', 'cnt': 20, 'type': KPISet.ERRTYPE_ERROR, 'urls': [], KPISet.RESP_CODES: '222'}]
obj.send_monitoring = False
obj.post_process()
# TODO: looks like this whole block of checks is useless
# check for note appending in _postproc_phase3()
reqs = [{'url': '', 'data': ''} for _ in range(4)] # add template for minimal size
reqs = (reqs + mock.requests)[-4:]
self.assertNotIn('api/v4/sessions/1', reqs[0]['url'])
self.assertNotIn('api/v4/sessions/1', reqs[1]['url'])
self.assertNotIn('api/v4/masters/1', reqs[2]['url'])
self.assertNotIn('api/v4/masters/1', reqs[3]['url'])
if reqs[1]['data']:
self.assertNotIn('ValueError: wrong value', reqs[1]['data'])
if reqs[3]['data']:
self.assertNotIn('ValueError: wrong value', reqs[3]['data'])
def test_check(self):
mock = BZMock()
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {"result": []},
'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': {"result": []}
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {
"id": 1,
"name": "boo",
"userId": 2,
"description": None,
"created": time.time(),
"updated": time.time(),
"organizationId": None
}},
'https://a.blazemeter.com/api/v4/tests': {"result": {'id': 1}},
'https://a.blazemeter.com/api/v4/tests/1/start-external': {"result": {
'session': {'id': 1, 'userId': 1, 'testId': 1},
'master': {'id': 1, 'userId': 1},
'signature': 'sign'}},
'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': [
{},
{"result": {'session': {"statusCode": 140, 'status': 'ENDED'}}},
{},
],
'https://data.blazemeter.com/api/v4/image/1/files?signature=sign': [
IOError("monitoring push expected fail"),
{"result": True},
{"result": True},
{"result": True},
{"result": True},
{"result": True},
{"result": True},
{"result": True},
{"result": True},
],
'https://a.blazemeter.com/api/v4/sessions/1/stop': {},
'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1':
{"result": {'session': {}}}
})
obj = BlazeMeterUploader()
obj.parameters['project'] = 'Proj name'
obj.settings['token'] = '123'
obj.settings['browser-open'] = 'none'
obj.engine = EngineEmul()
shutil.copy(__file__, os.path.join(obj.engine.artifacts_dir, os.path.basename(__file__)))
mock.apply(obj._user)
obj._user.timeout = 0.1
obj.prepare()
obj.startup()
for x in range(0, 31):
obj.aggregated_second(random_datapoint(x))
mon = [{"ts": 1, "source": "local", "cpu": 1, "mem": 2, "bytes-recv": 100, "other": 0}]
obj.monitoring_data(mon)
obj.check()
for x in range(32, 65):
obj.aggregated_second(random_datapoint(x))
obj.last_dispatch = time.time() - 2 * obj.send_interval
self.assertRaises(KeyboardInterrupt, obj.check)
obj.aggregated_second(random_datapoint(10))
obj.shutdown()
log_file = obj.engine.create_artifact('log', '.tmp')
handler = logging.FileHandler(log_file)
obj.engine.log.parent.addHandler(handler)
obj.engine.config.get('modules').get('shellexec').get('env')['TAURUS_INDEX_ALL'] = 1
obj.post_process()
self.assertEqual(20, len(mock.requests))
obj.engine.log.parent.removeHandler(handler)
def test_monitoring_buffer_limit_option(self):
obj = BlazeMeterUploader()
obj.engine = EngineEmul()
mock = BZMock(obj._user)
obj.settings["monitoring-buffer-limit"] = 100
obj.prepare()
for i in range(1000):
mon = [{"ts": i, "source": "local", "cpu": float(i) / 1000 * 100, "mem": 2, "bytes-recv": 100, "other": 0}]
obj.monitoring_data(mon)
for source, buffer in iteritems(obj.monitoring_buffer.data):
self.assertLessEqual(len(buffer), 100)
self.assertEqual(1, len(mock.requests))
def test_direct_feeding(self):
obj = BlazeMeterUploader()
self.sniff_log(obj.log)
obj.engine = EngineEmul()
mock = BZMock(obj._user)
mock.mock_post.update({
'https://data.blazemeter.com/submit.php?session_id=direct&signature=sign&test_id=None&user_id=None&pq=0&target=labels_bulk&update=1': {},
'https://data.blazemeter.com/api/v4/image/direct/files?signature=sign': {"result": True},
'https://a.blazemeter.com/api/v4/sessions/direct/stop': {"result": True},
'https://data.blazemeter.com/submit.php?session_id=direct&signature=sign&test_id=None&user_id=None&pq=0&target=engine_health&update=1': {'result': {'session': {}}}
})
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/sessions/direct': {"result": {}}
})
mock.mock_patch.update({
'https://a.blazemeter.com/api/v4/sessions/direct': {"result": {}}
})
obj.parameters['session-id'] = 'direct'
obj.parameters['signature'] = 'sign'
obj.settings['token'] = '<PASSWORD>'
obj.prepare()
obj.startup()
obj.check()
obj.shutdown()
obj.engine.stopping_reason = TaurusException("To cover")
obj.post_process()
self.assertNotIn("Failed to finish online", self.log_recorder.warn_buff.getvalue())
self.assertEquals('direct', obj._session['id'])
self.assertEqual(9, len(mock.requests), "Requests were: %s" % mock.requests)
def test_anonymous_feeding(self):
obj = BlazeMeterUploader()
obj.engine = EngineEmul()
obj.browser_open = False
mock = BZMock(obj._user)
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/sessions': {"result": {
"signature": "sign",
"publicTokenUrl": "publicUrl",
"session": {"id": 1, "testId": 1, "userId": 1},
"master": {"id": 1},
}},
'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {},
'https://data.blazemeter.com/api/v4/image/1/files?signature=sign': {"result": True},
'https://data.blazemeter.com/submit.php?session_id=1&signature=sign&test_id=1&user_id=1&pq=0&target=engine_health&update=1': {'result': {'session': {}}},
})
obj.prepare()
obj.startup()
obj.check()
obj.shutdown()
obj.post_process()
self.assertEquals(1, obj._session['id'])
self.assertEqual(6, len(mock.requests), "Requests were: %s" % mock.requests)
def test_401(self):
obj = BlazeMeterUploader()
obj.engine = EngineEmul()
mock = BZMock(obj._user)
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/web/version': HTTPError(None, None, None, None, None, ),
})
self.assertRaises(HTTPError, obj.prepare)
def test_multiple_reporters_one_monitoring(self):
obj1 = BlazeMeterUploader()
obj1.engine = EngineEmul()
BZMock(obj1._user)
obj2 = BlazeMeterUploader()
obj2.engine = EngineEmul()
BZMock(obj2._user)
obj1.prepare()
obj2.prepare()
for i in range(10):
mon = [{"ts": i, "source": "local", "cpu": float(i) / 1000 * 100, "mem": 2, "bytes-recv": 100, "other": 0}]
obj1.monitoring_data(mon)
obj2.monitoring_data(mon)
def test_public_report(self):
mock = BZMock()
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {"result": []}
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {'id': 1}},
'https://a.blazemeter.com/api/v4/tests': {'result': {'id': 'unittest1'}},
'https://a.blazemeter.com/api/v4/tests/unittest1/start-external': {"result": {
'session': {'id': 'sess1', 'userId': 1, 'testId': 1},
'master': {'id': 'master1', 'userId': 1},
'signature': ''
}},
'https://a.blazemeter.com/api/v4/masters/master1/public-token': {'result': {'publicToken': '<PASSWORD>Token'}},
'https://data.blazemeter.com/submit.php?session_id=sess1&signature=&test_id=1&user_id=1&pq=0&target=labels_bulk&update=1': {
"result": {'session': {}}},
'https://data.blazemeter.com/api/v4/image/sess1/files?signature=': {'result': True},
})
obj = BlazeMeterUploader()
obj.settings['token'] = '<PASSWORD>'
obj.settings['browser-open'] = 'none'
obj.settings['public-report'] = True
obj.settings['send-monitoring'] = False
obj.engine = EngineEmul()
mock.apply(obj._user)
self.sniff_log(obj.log)
obj.prepare()
obj.startup()
obj.aggregated_second(random_datapoint(10))
obj.check()
obj.shutdown()
obj.post_process()
log_buff = self.log_recorder.info_buff.getvalue()
log_line = "Public report link: https://a.blazemeter.com/app/?public-token=publicToken#/masters/master1/summary"
self.assertIn(log_line, log_buff)
logging.warning("\n".join([x['url'] for x in mock.requests]))
self.assertEqual(14, len(mock.requests))
def test_new_project_existing_test(self):
obj = BlazeMeterUploader()
mock = BZMock(obj._user)
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {'result': [
{'id': 1, 'name': '<NAME>', 'configuration': {"type": 'external'}}
]},
'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': {'result': []}
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {"id": 1}},
'https://a.blazemeter.com/api/v4/tests': {"result": {"id": 1}},
})
obj.parameters['project'] = 'Proj name'
obj.settings['token'] = '<PASSWORD>'
obj.settings['browser-open'] = 'none'
obj.engine = EngineEmul()
obj.prepare()
self.assertEquals('https://a.blazemeter.com/api/v4/projects', mock.requests[4]['url'])
self.assertEquals('POST', mock.requests[4]['method'])
self.assertEquals('https://a.blazemeter.com/api/v4/tests', mock.requests[6]['url'])
self.assertEquals('POST', mock.requests[6]['method'])
def test_new_project_new_test(self):
obj = BlazeMeterUploader()
mock = BZMock(obj._user)
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/tests?workspaceId=1&name=Taurus+Test': {'result': []},
'https://a.blazemeter.com/api/v4/projects?workspaceId=1': {'result': []}
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {"id": 1}},
'https://a.blazemeter.com/api/v4/tests': {"result": {"id": 1}},
})
obj.settings['token'] = '<PASSWORD>'
obj.settings['browser-open'] = 'none'
obj.engine = EngineEmul()
obj.prepare()
self.assertEquals('https://a.blazemeter.com/api/v4/projects', mock.requests[6]['url'])
self.assertEquals('POST', mock.requests[6]['method'])
self.assertEquals('https://a.blazemeter.com/api/v4/tests', mock.requests[7]['url'])
self.assertEquals('POST', mock.requests[7]['method'])
def test_existing_project_new_test(self):
obj = BlazeMeterUploader()
mock = BZMock(obj._user)
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/tests?projectId=1&name=Taurus+Test': {'result': []},
'https://a.blazemeter.com/api/v4/projects?workspaceId=1': {'result': [
{'id': 1, 'name': 'Proj name'}
]}
})
mock.mock_post.update({
'https://a.blazemeter.com/api/v4/projects': {"result": {"id": 1}},
'https://a.blazemeter.com/api/v4/tests': {"result": {"id": 1}},
})
obj.parameters['project'] = 'Proj name'
obj.settings['token'] = '<PASSWORD>'
obj.settings['browser-open'] = 'none'
obj.engine = EngineEmul()
obj.prepare()
self.assertEquals('https://a.blazemeter.com/api/v4/tests', mock.requests[6]['url'])
self.assertEquals('POST', mock.requests[6]['method'])
class TestBlazeMeterClientUnicode(BZTestCase):
def test_unicode_request(self):
"""
test UnicodeDecodeError in BlazeMeterClient._request()
"""
session = Session(data={'id': 1})
mock = BZMock(session)
mock.mock_post['https://data.blazemeter.com/api/v4/image/1/files?signature=None'] = {"result": 1}
session.upload_file(RESOURCES_DIR + "jmeter/unicode_file")
def test_binary_unicode_error(self):
session = Session(data={'id': 1})
mock = BZMock(session)
mock.mock_post['https://data.blazemeter.com/api/v4/image/1/files?signature=None'] = {"result": 1}
with open(RESOURCES_DIR + "jmeter/jmeter-dist-2.13.zip", 'rb') as fds:
zip_content = fds.read()
session.upload_file("jtls_and_more.zip", zip_content)
class DummyHttpResponse(object):
def __init__(self):
self.fake_socket = BytesIO()
self.fake_socket.write(open(RESOURCES_DIR + "unicode_file", 'rb').read())
def read(self):
self.fake_socket.seek(0)
return self.fake_socket.read(1024)
def dummy_urlopen(*args, **kwargs):
del args, kwargs
return DummyHttpResponse()
class TestResultsFromBZA(BZTestCase):
@staticmethod
def convert_kpi_errors(errors):
result = {}
for error in errors:
result[error['msg']] = {'count': error['cnt'], 'rc': error['rc']}
return result
@staticmethod
def get_errors_mock(errors, assertions=None):
# return mock of server response for errors specified in internal format (see __get_errors_from_BZA())
result = []
if not assertions:
assertions = {}
for _id in list(set(list(errors.keys()) + list(assertions.keys()))): # unique keys from both dictionaries
errors_list = []
if errors.get(_id):
for msg in errors[_id]:
errors_list.append({
"m": msg,
"count": errors[_id][msg]["count"],
"rc": errors[_id][msg]["rc"]})
assertions_list = []
if assertions.get(_id):
for msg in assertions[_id]:
assertions_list.append({
"failureMessage": msg,
"failures": assertions[_id][msg]["count"],
"name": "All Assertions"})
result.append({
"_id": _id,
"name": _id,
"assertions": assertions_list,
"samplesNotCounted": 0,
"assertionsNotCounted": 0,
"otherErrorsCount": 0,
"errors": errors_list})
return {
"https://a.blazemeter.com/api/v4/masters/1/reports/errorsreport/data?noDataError=false": {
"api_version": 4,
"error": None,
"result": result}}
def test_get_errors(self):
mock = BZMock()
mock.mock_get.update({
'https://a.blazemeter.com/api/v4/data/labels?master_id=1': {
"api_version": 4,
"error": None,
"result": [{
"sessions": ["r-t-5746a8e38569a"],
"id": "ALL",
"name": "ALL"
}, {
"sessions": ["r-t-5746a8e38569a"],
"id": "e843ff89a5737891a10251cbb0db08e5",
"name": "http://blazedemo.com/"}]},
'https://a.blazemeter.com/api/v4/data/kpis?interval=1&from=0&master_ids%5B%5D=1&kpis%5B%5D=t&kpis%5B%5D=lt&kpis%5B%5D=by&kpis%5B%5D=n&kpis%5B%5D=ec&kpis%5B%5D=ts&kpis%5B%5D=na&labels%5B%5D=ALL&labels%5B%5D=e843ff89a5737891a10251cbb0db08e5': {
"api_version": 4,
"error": None,
"result": [{
"labelId": "ALL",
"labelName": "ALL",
"label": "ALL",
"kpis": [{
"n": 1, "na": 1, "ec": 0, "p90": 0, "t_avg": 817, "lt_avg": 82,
"by_avg": 0, "n_avg": 1, "ec_avg": 0, "ts": 1464248743
}, {"n": 1, "na": | |
<reponame>alsyz/genieparser<filename>src/genie/libs/parser/iosxe/tests/test_show_bgp.py
# Python
import unittest
from unittest.mock import Mock
# ATS
from pyats.topology import Device
from pyats.topology import loader
# Metaparser
from genie.metaparser.util.exceptions import SchemaEmptyParserError, \
SchemaMissingKeyError
# iosxe show_bgp
from genie.libs.parser.iosxe.show_bgp import ShowIpBgpSummary,\
ShowIpBgpAllSummary,\
ShowIpBgpNeighborsAdvertisedRoutes,\
ShowBgpSummary
# ==============================
# Unit test for
# * 'show ip bgp all summary'
# ==============================
class TestShowIpBgpAllSummary(unittest.TestCase):
device = Device(name='aDevice')
empty_output = {'execute.return_value': ''}
golden_parsed_output1 = {
'bgp_id': 5918,
'vrf':
{'default':
{'neighbor':
{'192.168.10.253':
{'address_family':
{'vpnv4 unicast':
{'activity_paths': '5564772/1540171',
'activity_prefixes': '2722567/700066',
'as': 60103,
'attribute_entries': '5098/4898',
'bgp_table_version': 9370482,
'cache_entries':
{'filter-list':
{'memory_usage': 0,
'total_entries': 0},
'route-map':
{'memory_usage': 0,
'total_entries': 0}},
'community_entries':
{'memory_usage': 60056,
'total_entries': 2301},
'entries':
{'AS-PATH':
{'memory_usage': 4824,
'total_entries': 201},
'rrinfo':
{'memory_usage': 20080,
'total_entries': 502}},
'input_queue': 0,
'local_as': 5918,
'msg_rcvd': 0,
'msg_sent': 0,
'output_queue': 0,
'path':
{'memory_usage': 482879760,
'total_entries': 4023998},
'prefixes':
{'memory_usage': 517657344,
'total_entries': 2022099},
'route_identifier': '10.169.197.254',
'routing_table_version': 9370482,
'scan_interval': 60,
'state_pfxrcd': 'Idle',
'tbl_ver': 1,
'total_memory': 1001967936,
'up_down': 'never',
'version': 4}}}}}}}
golden_output1 = {'execute.return_value': '''
Router#show ip bgp all summary
Load for five secs: 2%/0%; one minute: 10%; five minutes: 9%
Time source is NTP, 20:34:39.724 EST Wed Jun 2 2016
For address family: VPNv4 Unicast
BGP router identifier 10.169.197.254, local AS number 5918
BGP table version is 9370482, main routing table version 9370482
2022099 network entries using 517657344 bytes of memory
4023998 path entries using 482879760 bytes of memory
5098/4898 BGP path/bestpath attribute entries using 1345872 bytes of memory
502 BGP rrinfo entries using 20080 bytes of memory
201 BGP AS-PATH entries using 4824 bytes of memory
2301 BGP extended community entries using 60056 bytes of memory
0 BGP route-map cache entries using 0 bytes of memory
0 BGP filter-list cache entries using 0 bytes of memory
BGP using 1001967936 total bytes of memory
BGP activity 2722567/700066 prefixes, 5564772/1540171 paths, scan interval 60 secs
Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
192.168.10.253 4 65555 299 332 9370482 0 0 02:27:39 100
192.168.10.253 4 60001 299 333 9370482 0 0 02:27:46 100
192.168.10.253 4 60002 299 333 9370482 0 0 02:27:45 100
192.168.10.253 4 60003 299 331 9370482 0 0 02:27:40 100
192.168.10.253 4 60004 299 334 9370482 0 0 02:27:39 100
192.168.10.253 4 60005 299 334 9370482 0 0 02:27:41 100
192.168.10.253 4 60006 299 333 9370482 0 0 02:27:43 100
192.168.10.253 4 60007 299 332 9370482 0 0 02:27:41 100
192.168.10.253 4 60100 0 0 1 0 0 never Idle
192.168.10.253 4 60101 0 0 1 0 0 never Idle
192.168.10.253 4 60102 0 0 1 0 0 never Idle
192.168.10.253 4 60103 0 0 1 0 0 never Idle
'''}
golden_parsed_output2 = {
"bgp_id": 65109,
"vrf": {
"VRF1": {
"neighbor": {
"192.168.10.253": {
"address_family": {
"vpnv4": {
"version": 4,
"as": 65555,
"msg_rcvd": 10112,
"msg_sent": 10107,
"tbl_ver": 263,
"input_queue": 0,
"output_queue": 0,
"up_down": "3d05h",
"state_pfxrcd": "13",
"route_identifier": "10.169.197.254",
"local_as": 65109,
"bgp_table_version": 263,
"routing_table_version": 263,
"attribute_entries": "106/104",
"prefixes": {
"total_entries": 126,
"memory_usage": 32256
},
"path": {
"total_entries": 189,
"memory_usage": 25704
},
"total_memory": 92688,
"activity_prefixes": "226/0",
"activity_paths": "4035/3696",
"scan_interval": 60,
"cache_entries": {
"route-map": {
"total_entries": 0,
"memory_usage": 0
},
"filter-list": {
"total_entries": 0,
"memory_usage": 0
}
},
"entries": {
"rrinfo": {
"total_entries": 1,
"memory_usage": 40
},
"AS-PATH": {
"total_entries": 2,
"memory_usage": 64
}
},
"community_entries": {
"total_entries": 102,
"memory_usage": 3248
}
}
}
}
}
},
"default": {
"neighbor": {
"192.168.10.253": {
"address_family": {
"vpnv4": {
"version": 4,
"as": 65555,
"msg_rcvd": 0,
"msg_sent": 0,
"tbl_ver": 1,
"input_queue": 0,
"output_queue": 0,
"up_down": "never",
"state_pfxrcd": "Idle",
"route_identifier": "10.169.197.254",
"local_as": 65109,
"bgp_table_version": 263,
"routing_table_version": 263,
"attribute_entries": "106/104",
"prefixes": {
"total_entries": 126,
"memory_usage": 32256
},
"path": {
"total_entries": 189,
"memory_usage": 25704
},
"total_memory": 92688,
"activity_prefixes": "226/0",
"activity_paths": "4035/3696",
"scan_interval": 60,
"cache_entries": {
"route-map": {
"total_entries": 0,
"memory_usage": 0
},
"filter-list": {
"total_entries": 0,
"memory_usage": 0
}
},
"entries": {
"rrinfo": {
"total_entries": 1,
"memory_usage": 40
},
"AS-PATH": {
"total_entries": 2,
"memory_usage": 64
}
},
"community_entries": {
"total_entries": 102,
"memory_usage": 3248
}
}
}
},
"192.168.36.119": {
"address_family": {
"vpnv4": {
"version": 4,
"as": 65109,
"msg_rcvd": 10293,
"msg_sent": 10213,
"tbl_ver": 263,
"input_queue": 0,
"output_queue": 0,
"up_down": "3d05h",
"state_pfxrcd": "62",
"route_identifier": "10.169.197.254",
"local_as": 65109,
"bgp_table_version": 263,
"routing_table_version": 263,
"attribute_entries": "106/104",
"prefixes": {
"total_entries": 126,
"memory_usage": 32256
},
"path": {
"total_entries": 189,
"memory_usage": 25704
},
"total_memory": 92688,
"activity_prefixes": "226/0",
"activity_paths": "4035/3696",
"scan_interval": 60,
"cache_entries": {
"route-map": {
"total_entries": 0,
"memory_usage": 0
},
"filter-list": {
"total_entries": 0,
"memory_usage": 0
}
},
"entries": {
"rrinfo": {
"total_entries": 1,
"memory_usage": 40
},
"AS-PATH": {
"total_entries": 2,
"memory_usage": 64
}
},
"community_entries": {
"total_entries": 102,
"memory_usage": 3248
}
}
}
},
"192.168.36.120": {
"address_family": {
"vpnv4": {
"version": 4,
"as": 65109,
"msg_rcvd": 9930,
"msg_sent": 9826,
"tbl_ver": 263,
"input_queue": 0,
"output_queue": 0,
"up_down": "3d02h",
"state_pfxrcd": "62",
"route_identifier": "10.169.197.254",
"local_as": 65109,
"bgp_table_version": 263,
"routing_table_version": 263,
"attribute_entries": "106/104",
"prefixes": {
"total_entries": 126,
"memory_usage": 32256
},
"path": {
"total_entries": 189,
"memory_usage": 25704
},
"total_memory": 92688,
"activity_prefixes": "226/0",
"activity_paths": "4035/3696",
"scan_interval": 60,
"cache_entries": {
"route-map": {
"total_entries": 0,
"memory_usage": 0
},
"filter-list": {
"total_entries": 0,
"memory_usage": 0
}
},
"entries": {
"rrinfo": {
"total_entries": 1,
"memory_usage": 40
},
"AS-PATH": {
"total_entries": 2,
"memory_usage": 64
}
},
"community_entries": {
"total_entries": 102,
"memory_usage": 3248
}
}
}
}
}
}
}
}
def test_show_ip_bgp_all_summary_empty(self):
self.device1 = Mock(**self.empty_output)
bgp_summary_obj = ShowIpBgpAllSummary(device=self.device1)
with self.assertRaises(SchemaEmptyParserError):
parsed_output = bgp_summary_obj.parse()
def test_show_ip_bgp_all_summary_golden1(self):
self.maxDiff = None
self.device = Mock(**self.golden_output1)
obj = ShowIpBgpAllSummary(device=self.device)
parsed_output = obj.parse()
self.assertEqual(parsed_output, self.golden_parsed_output1)
def test_show_ip_bgp_all_summary_golden2(self):
def mapper(key):
return self.outputs[key]
raw1 = '''\
address-family ipv4 vrf VRF1
bgp router-id 192.168.10.254
redistribute connected
redistribute static
neighbor 192.168.10.253 remote-as 65555
neighbor 192.168.10.253 timers 30 90 15
neighbor 192.168.10.253 activate
neighbor 192.168.10.253 as-override
neighbor 192.168.10.253 route-map prepend in
'''
golden_output2 = '''\
PE1#show ip bgp vpnv4 all summary
Load for five secs: 1%/0%; one minute: 1%; five minutes: 1%
Time source is NTP, 05:46:49.882 EST Tue May 28 2019
BGP router identifier 10.169.197.254, local AS number 65109
BGP table version is 263, main routing table version 263
126 network entries using 32256 bytes of memory
189 path entries using 25704 bytes of memory
106/104 BGP path/bestpath attribute entries using 31376 bytes of memory
1 BGP rrinfo entries using 40 bytes of memory
2 BGP AS-PATH entries using 64 bytes of memory
102 BGP extended community entries using 3248 bytes of memory
0 BGP route-map cache entries using 0 bytes of memory
0 BGP filter-list cache entries using 0 bytes of memory
BGP using 92688 total bytes of memory
BGP activity 226/0 prefixes, 4035/3696 paths, scan interval 60 secs
Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd
192.168.10.253 4 65555 10112 10107 263 0 0 3d05h 13
192.168.10.253 4 65555 0 0 1 0 0 never Idle
192.168.36.119 4 65109 10293 10213 263 0 0 3d05h 62
192.168.36.120 4 65109 9930 9826 263 0 0 3d02h 62
'''
self.outputs = {}
self.maxDiff = None
self.outputs['show ip bgp vpnv4 all summary'] = golden_output2
self.outputs['show run | sec address-family ipv4 vrf'] = raw1
self.outputs['show run | sec address-family ipv6 vrf'] = ''
self.device.execute = Mock()
self.device.execute.side_effect = mapper
obj = ShowIpBgpAllSummary(device=self.device)
parsed_output = obj.parse(address_family='vpnv4')
self.assertEqual(parsed_output, self.golden_parsed_output2)
def test_show_ip_bgp_all_summary_golden3(self):
def mapper(key):
return self.outputs[key]
raw1 = '''
[2019-06-05 09:46:16,345] +++ R1_xe: executing command 'show ip bgp all summary' +++
show ip bgp all summary
For address family: IPv4 Unicast
BGP router identifier 10.4.1.1, local AS number 65000
BGP table version is 4, main routing table version 4
3 network entries using 744 bytes of memory
3 path entries using 408 bytes of memory
3/3 BGP path/bestpath attribute entries using 840 bytes of memory
2 BGP extended community entries using 500 bytes of memory
0 BGP route-map cache entries using 0 bytes of memory
0 BGP filter-list cache entries using 0 bytes of memory
BGP using 2492 total bytes of memory
BGP activity 12/0 prefixes, 12/0 paths, scan interval 60 secs
Neighbor V AS MsgRcvd MsgSent | |
<reponame>Wouter-Bekker-AI/yolov5-dataset-builder<filename>scripts/Helper.py
import cv2 as cv
import shutil
import numpy as np
import os
import pandas as pd
from tqdm import tqdm
import random
import time
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import skewnorm
from PIL import Image, ExifTags
import torch
from pathlib import Path
def read_label(label_path):
with open(label_path) as l:
content = l.readlines()
return content
def rescale_frame(frame, x=640, y=640):
dimensions = (x, y)
return cv.resize(frame, dimensions, interpolation=cv.INTER_AREA)
def draw_box(image, label_filepath, normalized=True):
lines = read_label(label_filepath)
new = []
for line in lines:
new.append(line.split(' '))
# .shape[0] = Height
# .shape[1] = Width
# .shape[2] = Colour Channels
for line in new:
if len(line) == 6:
conf = float(line[5])
conf = (round(conf * 100)) / 100
else:
conf = 0
if normalized:
x = float(line[1]) * image.shape[1]
y = float(line[2]) * image.shape[0]
w = float(line[3]) * image.shape[1]
h = float(line[4]) * image.shape[0]
else:
x = float(line[1])
y = float(line[2])
w = float(line[3])
h = float(line[4])
x1 = round(x - (w / 2))
y1 = round(y - (h / 2))
x2 = round(x + (w / 2))
y2 = round(y + (h / 2))
# b g r
cv.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), thickness=2)
font = cv.FONT_HERSHEY_SIMPLEX
if conf == 0:
image = cv.putText(image, line[0], (x1, y1), font, 0.75, (0, 0, 255), thickness=2)
else: # add conf to label if available
image = cv.putText(image, f"{line[0]} {conf}", (x1, y1), font, 0.75, (0, 0, 255), thickness=2)
return image
def empty_folder(folder):
for file in tqdm(os.listdir(folder), desc=f"Emptying {folder}..."):
path = f"{folder}{file}"
try:
shutil.rmtree(path)
except OSError:
os.remove(path)
def integrity_check(images_folder, labels_folder):
for filename in tqdm(os.listdir(images_folder), desc='Converting all images to .jpg'):
filepath = images_folder + filename
base_name = os.path.splitext(os.path.basename(filepath))[0]
extension = os.path.splitext(os.path.basename(filepath))[1]
if extension != '.jpg':
img = cv.imread(filepath)
os.remove(filepath)
cv.imwrite(f"{images_folder}{base_name}.jpg", img, [int(cv.IMWRITE_JPEG_QUALITY), 100])
print('\nChecking if there is a label for every image') # Check if there is a label for every image
for filename in tqdm(os.listdir(images_folder)):
base_name = os.path.splitext(os.path.basename(filename))[0]
image_filepath = f"{images_folder}{filename}"
label_filepath = f"{labels_folder}{base_name}.txt"
try:
with open(label_filepath) as f:
lines = f.readlines()
if len(lines) < 1: # empty file
print(
f"\nFile -{labels_folder}{base_name}.txt- is empty."
f" Deleting this file and image at {image_filepath}")
os.remove(image_filepath)
os.remove(label_filepath)
if len(np.unique(lines)) < len(lines): # duplicate rows todo NEED SOME FIXING LOOK AT 010000096.txt
print(
f"\nFile -{label_filepath}- has duplicate lines."
f" Deleting this file and image at {image_filepath}")
os.remove(image_filepath)
os.remove(label_filepath)
except Exception as e:
print(f"\nFile -{label_filepath}- not found. Deleting image at {image_filepath}. Exception is {e}")
os.remove(image_filepath)
print('\nChecking if there is a image for every label') # Check if there is a image for every label
for filename in tqdm(os.listdir(labels_folder)):
base_name = os.path.splitext(os.path.basename(filename))[0]
image_filepath = f"{images_folder}{base_name}.jpg"
label_filepath = f"{labels_folder}{filename}"
if not os.path.isfile(image_filepath):
print(f"\nImage at -{image_filepath}- not found. Deleting label at -{label_filepath}")
os.remove(label_filepath)
print('\nChecking label format')
for filename in tqdm(os.listdir(labels_folder)): # remove space at the end of a line
base_name = os.path.splitext(os.path.basename(filename))[0]
filepath = f"{labels_folder}{filename}"
with open(filepath, 'r+') as f:
lines = f.readlines()
new_lines = []
i = 1
error_found = 0
for line in lines:
if line.endswith(" \n"):
error_found = 1
if i == len(lines):
line = line.replace(' \n', '')
else:
line = line.replace(' \n', '\n')
print(f"\nFound a double space in {filename} and fixed it")
new_lines.append(line)
else:
new_lines.append(line)
if error_found == 1:
os.remove(filepath)
with open(filepath, 'a+') as g: # write the correct file
for line in new_lines:
g.write(line)
for test_line in new_lines: # check if the label is correct and if not then delete image and label
test_line = test_line.split(' ')
for item in test_line:
try:
if float(item) > 1:
print(f"\nError in {filepath}")
print(f"\nLine is {test_line}")
print(f"\nDeleted both the label file and image file")
os.remove(filepath)
os.remove(f"{images_folder}{base_name}.jpg")
except Exception as e:
print(f"\nCan't fix {filepath} the item is {item} and test_line is {test_line}."
f" Exception is {e}")
os.remove(filepath)
os.remove(f"{images_folder}{base_name}.jpeg")
def label_images(images_folder, labels_folder, labeled_images_folder):
empty_folder(labeled_images_folder)
for filename in tqdm(os.listdir(images_folder), desc='Labeling Images...'):
base_name = os.path.splitext(os.path.basename(filename))[0]
image_filepath = f"{images_folder}{filename}"
label_filepath = f"{labels_folder}{base_name}.txt"
if os.path.isfile(label_filepath):
img = cv.imread(image_filepath)
img = draw_box(img, label_filepath)
basedir = os.getcwd()
os.chdir(labeled_images_folder)
cv.imwrite(base_name + '.jpg', img, [int(cv.IMWRITE_JPEG_QUALITY), 100])
os.chdir(basedir)
def rename(images_folder, labels_folder, name):
i = 0
for filename in os.listdir(images_folder):
filepath = images_folder + filename
base_name = os.path.splitext(os.path.basename(filepath))[0]
extension = os.path.splitext(os.path.basename(filepath))[1]
basedir = os.getcwd() # rename both the image and label
os.chdir(images_folder)
os.rename(filename, f"{name}_{i}{extension}")
os.chdir(basedir)
os.chdir(labels_folder)
os.rename(f"{base_name}.txt", f"{name}_{i}.txt")
os.chdir(basedir)
i += 1
def check_resolution(folder, w_threshold=320, h_threshold=180, less_more='less',
show=False, rescale_save=False, delete=False):
# or_and = 'or' todo implement or_and
t0 = time.time()
count = 0
found = False
print(f"\nChecking image resolution...")
for image in tqdm(os.listdir(folder)):
# base_name = os.path.splitext(os.path.basename(image))[0]
image_filepath = f"{folder}{image}"
# label_filepath = f"More weapons/Labels/{base_name}.txt"
img = cv.imread(image_filepath)
w_scale = img.shape[1]
h_scale = img.shape[0]
if less_more == 'less':
if img.shape[1] < w_threshold or img.shape[0] < h_threshold:
found = True
if img.shape[1] < w_threshold:
w_scale = w_threshold
else:
w_scale = img.shape[1]
if img.shape[0] < h_threshold:
h_scale = h_threshold
else:
h_scale = img.shape[0]
else:
if img.shape[1] > w_threshold or img.shape[0] > h_threshold:
found = True
if img.shape[1] > w_threshold:
w_scale = w_threshold
else:
w_scale = img.shape[1]
if img.shape[0] > h_threshold:
h_scale = h_threshold
else:
h_scale = img.shape[0]
if found:
count += 1
found = False
if show:
cv.imshow(f"{image} has Width: {img.shape[1]} Height: {img.shape[0]}", img)
cv.waitKey(0)
cv.destroyAllWindows()
# if count == 10:
# break
if rescale_save:
img = rescale_frame(img, w_scale, h_scale)
cv.imwrite(image_filepath, img, [int(cv.IMWRITE_JPEG_QUALITY), 100])
if delete:
os.remove(image_filepath)
t1 = time.time()
print(f"Found {count} images in {t1 - t0} sec.")
def plot_distribution(target_folder):
dist = []
for label in tqdm(os.listdir(target_folder)):
label_filepath = f"{target_folder}{label}"
lines = read_label(label_filepath)
split = []
for line in lines:
split.append(line.split(' '))
for part in split:
dist.append(part[5])
sns.set_style('white')
dist = np.array(dist)
dist = dist.astype(np.float)
print(len(dist))
count, bins_count = np.histogram(dist, bins=10)
# finding the PDF of the histogram using count values
pdf = count / sum(count)
# using numpy np.cumsum to calculate the CDF
# We can also find using the PDF values by looping and adding
cdf = np.cumsum(pdf)
# plotting PDF and CDF
plt.plot(bins_count[1:] * 100 - 8, pdf / 10, color="red", label="PDF")
plt.plot(bins_count[1:] * 100 - 8, cdf / 10, label="CDF")
plt.legend()
dist = dist * 100
sns.distplot(dist, kde_kws={"color": "lime"}, fit=skewnorm, bins=90)
xt = []
for i in range(0, 102, 2):
xt.append(i)
plt.xticks(xt)
plt.ylim(0, 0.02)
plt.xlim(30, 70)
for xc in range(101):
plt.axvline(x=xc, color='black', alpha=0.1)
plt.xlabel('% Certainty')
plt.show()
def video_to_frames(video_path = f"data/videos/Hunting Trespasser gets Painted.mp4",
video_labels = f"data/videos/video_labels/", video_frames = f"data/videos/video_frames/",
labeled_video_frames = f"More weapons/videos/labeled_video_frames/", show = False,
check = True, label = True):
empty_folder(video_frames)
frame_n = 1
cap = cv.VideoCapture(video_path)
max_frames = int(cap.get(cv.CAP_PROP_FRAME_COUNT))
while cap.isOpened():
print(f"Processing frame {frame_n}/{max_frames}")
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
basedir = os.getcwd()
os.chdir(video_frames)
cv.imwrite(f"{str(frame_n)}.jpg", frame, [int(cv.IMWRITE_JPEG_QUALITY), 100])
os.chdir(basedir)
if show:
font = cv.FONT_HERSHEY_SIMPLEX
frame = cv.putText(frame, f"{str(frame_n)}/{max_frames}", (5, 25), font, 1, (0, 0, 255), thickness=2)
cv.imshow('frame', frame)
if cv.waitKey(1) == ord('q'):
break
frame_n += 1
cap.release()
cv.destroyAllWindows()
if check:
integrity_check(video_frames, video_labels)
if label:
label_images(video_frames, video_labels, labeled_video_frames)
def check_bbox_size(images_folder=f"data/Images/", labels_folder=f"data/Labels/", show=False, less_more='less',
w_ratio=0.0, h_ratio=0.0):
print('\nScanning')
i = 0
for filename in tqdm(os.listdir(images_folder)):
base_name = os.path.splitext(os.path.basename(filename))[0]
image_filepath = f"{images_folder}{filename}"
label_filepath = f"{labels_folder}{base_name}.txt"
found = 0
lines = read_label(label_filepath)
new = []
for line in lines:
new.append(line.split(' '))
# line[3] = Width
# line[4] = Height
for line in new:
if w_ratio != 0:
if less_more == 'less':
if float(line[3]) < w_ratio: # and float(line[4]) < size:
found = True
else:
if float(line[3]) > w_ratio: # and float(line[4]) < size:
found = True
if h_ratio != 0:
if less_more == 'less':
if float(line[4]) < h_ratio: # and float(line[4]) < size:
found = True
else:
if float(line[4]) > h_ratio: # and float(line[4]) < size:
found = True
if found:
i += 1
if show:
img = cv.imread(image_filepath)
img = draw_box(img, label_filepath)
cv.imshow('1', img)
cv.waitKey(0)
if less_more == 'less':
print(f"found a total of {i} files with x < {w_ratio} and y < {h_ratio}")
else:
print(f"found a total of {i} files with x > {w_ratio} and y > {h_ratio}")
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL | |
def forward(self, input_token, target_token, timestep, *inputs):
"""
Decoder step inputs correspond one-to-one to encoder outputs.
"""
log_probs_per_model = []
state_outputs = []
next_state_input = len(self.models)
# underlying assumption is each model has same vocab_reduction_module
vocab_reduction_module = self.models[0].decoder.vocab_reduction_module
if vocab_reduction_module is not None:
possible_translation_tokens = inputs[len(self.models)]
next_state_input += 1
else:
possible_translation_tokens = None
for i, model in enumerate(self.models):
encoder_output = inputs[i]
prev_hiddens = []
prev_cells = []
for _ in range(len(model.decoder.layers)):
prev_hiddens.append(inputs[next_state_input])
prev_cells.append(inputs[next_state_input + 1])
next_state_input += 2
prev_input_feed = inputs[next_state_input].view(1, -1)
next_state_input += 1
if (
self.enable_precompute_reduced_weights
and hasattr(model.decoder, "_precompute_reduced_weights")
and possible_translation_tokens is not None
):
# (output_projection_w, output_projection_b)
reduced_output_weights = inputs[next_state_input : next_state_input + 2]
next_state_input += 2
else:
reduced_output_weights = None
# no batching, we only care about care about "max" length
src_length_int = int(encoder_output.size()[0])
src_length = torch.LongTensor(np.array([src_length_int]))
# notional, not actually used for decoder computation
src_tokens = torch.LongTensor(np.array([[0] * src_length_int]))
src_embeddings = encoder_output.new_zeros(encoder_output.shape)
encoder_out = (
encoder_output,
prev_hiddens,
prev_cells,
src_length,
src_tokens,
src_embeddings,
)
# store cached states, use evaluation mode
model.decoder._is_incremental_eval = True
model.eval()
# placeholder
incremental_state = {}
# cache previous state inputs
utils.set_incremental_state(
model.decoder,
incremental_state,
"cached_state",
(prev_hiddens, prev_cells, prev_input_feed),
)
decoder_output = model.decoder(
input_token.view(1, 1),
encoder_out,
incremental_state=incremental_state,
possible_translation_tokens=possible_translation_tokens,
)
logits, _, _ = decoder_output
log_probs = F.log_softmax(logits, dim=2)
log_probs_per_model.append(log_probs)
(next_hiddens, next_cells, next_input_feed) = utils.get_incremental_state(
model.decoder, incremental_state, "cached_state"
)
for h, c in zip(next_hiddens, next_cells):
state_outputs.extend([h, c])
state_outputs.append(next_input_feed)
if reduced_output_weights is not None:
state_outputs.extend(reduced_output_weights)
average_log_probs = torch.mean(
torch.cat(log_probs_per_model, dim=0), dim=0, keepdim=True
)
if possible_translation_tokens is not None:
reduced_indices = torch.zeros(self.vocab_size).long().fill_(self.unk_token)
# ONNX-exportable arange (ATen op)
possible_translation_token_range = torch._dim_arange(
like=possible_translation_tokens, dim=0
)
reduced_indices[
possible_translation_tokens
] = possible_translation_token_range
reduced_index = reduced_indices.index_select(dim=0, index=target_token)
score = average_log_probs.view((-1,)).index_select(
dim=0, index=reduced_index
)
else:
score = average_log_probs.view((-1,)).index_select(
dim=0, index=target_token
)
word_reward = self.word_rewards.index_select(0, target_token)
score += word_reward
self.input_names = ["prev_token", "target_token", "timestep"]
for i in range(len(self.models)):
self.input_names.append(f"fixed_input_{i}")
if possible_translation_tokens is not None:
self.input_names.append("possible_translation_tokens")
outputs = [score]
self.output_names = ["score"]
for i in range(len(self.models)):
self.output_names.append(f"fixed_input_{i}")
outputs.append(inputs[i])
if possible_translation_tokens is not None:
self.output_names.append("possible_translation_tokens")
outputs.append(possible_translation_tokens)
for i, state in enumerate(state_outputs):
outputs.append(state)
self.output_names.append(f"state_output_{i}")
self.input_names.append(f"state_input_{i}")
return tuple(outputs)
class CharSourceEncoderEnsemble(nn.Module):
def __init__(self, models, src_dict=None):
super().__init__()
self.models = models
self.src_dict = src_dict
for i, model in enumerate(self.models):
model.prepare_for_onnx_export_()
self._modules[f"model_{i}"] = model
self.enable_precompute_reduced_weights = False
def forward(self, src_tokens, src_lengths, char_inds, word_lengths):
outputs = []
output_names = []
states = []
# (seq_length, batch_size) for compatibility with Caffe2
src_tokens_seq_first = src_tokens.t()
futures = []
for model in self.models:
# evaluation mode
model.eval()
futures.append(
torch.jit._fork(
model.encoder,
src_tokens_seq_first,
src_lengths,
char_inds,
word_lengths,
)
)
# underlying assumption is each model has same vocab_reduction_module
vocab_reduction_module = self.models[0].decoder.vocab_reduction_module
possible_translation_tokens = None
if vocab_reduction_module is not None:
possible_translation_tokens = vocab_reduction_module(
src_tokens=src_tokens, decoder_input_tokens=None
)
# Precompute reduced decoder weight matrices.
# Once we have possible_translation_tokens, we need to gather rows
# out of each output_projection_{w,b} tensor for the decoders to
# use. We do it here because these reduced matrices are used on each
# step of the beam search, and this turns out to be a relatively
# expensive operation.
reduced_weights = {}
for i, model in enumerate(self.models):
if (
self.enable_precompute_reduced_weights
and hasattr(model.decoder, "_precompute_reduced_weights")
and possible_translation_tokens is not None
):
reduced_weights[i] = torch.jit._fork(
model.decoder._precompute_reduced_weights,
possible_translation_tokens,
)
# XXX: This loop is where we wait() for each encoder's output to be
# ready. If you're trying to add more ops, they should probably not
# go in this loop!
for i, (model, future) in enumerate(zip(self.models, futures)):
encoder_out = torch.jit._wait(future)
# "primary" encoder output (vector representations per source token)
encoder_outputs = encoder_out[0]
outputs.append(encoder_outputs)
output_names.append(f"encoder_output_{i}")
if hasattr(model.decoder, "_init_prev_states"):
states.extend(model.decoder._init_prev_states(encoder_out))
if (
self.enable_precompute_reduced_weights
and hasattr(model.decoder, "_precompute_reduced_weights")
and possible_translation_tokens is not None
):
states.extend(torch.jit._wait(reduced_weights[i]))
if possible_translation_tokens is not None:
outputs.append(possible_translation_tokens)
output_names.append("possible_translation_tokens")
for i, state in enumerate(states):
outputs.append(state)
output_names.append(f"initial_state_{i}")
self.output_names = output_names
return tuple(outputs)
@classmethod
def build_from_checkpoints(
cls,
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
lexical_dict_paths=None,
):
models, src_dict, _ = load_models_from_checkpoints(
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
lexical_dict_paths,
)
return cls(models, src_dict=src_dict)
class BeamSearchAndDecode(torch.jit.ScriptModule):
"""
Combines the functionality of BeamSearch and BeamDecode
"""
def __init__(
self,
models,
tgt_dict,
src_tokens,
src_lengths,
eos_token_id,
length_penalty,
nbest,
beam_size,
stop_at_eos,
word_reward=0,
unk_reward=0,
quantize=False,
):
super().__init__()
self.beam_search = BeamSearch(
models,
tgt_dict,
src_tokens,
src_lengths,
beam_size,
word_reward,
unk_reward,
quantize,
)
self.beam_decode = BeamDecode(
eos_token_id, length_penalty, nbest, beam_size, stop_at_eos
)
self.input_names = [
"src_tokens",
"src_lengths",
"prev_token",
"prev_scores",
"attn_weights",
"prev_hypos_indices",
"num_steps",
]
self.output_names = [
"beam_output",
"hypothesis_score",
"token_level_scores",
"back_alignment_weights",
"best_indices",
]
@torch.jit.script_method
def forward(
self,
src_tokens: torch.Tensor,
src_lengths: torch.Tensor,
prev_token: torch.Tensor,
prev_scores: torch.Tensor,
attn_weights: torch.Tensor,
prev_hypos_indices: torch.Tensor,
num_steps: int,
) -> List[Tuple[Tensor, float, List[float], Tensor, Tensor]]:
beam_search_out = self.beam_search(
src_tokens,
src_lengths,
prev_token,
prev_scores,
attn_weights,
prev_hypos_indices,
num_steps,
)
all_tokens, all_scores, all_weights, all_prev_indices = beam_search_out
outputs = torch.jit.annotate(
List[Tuple[Tensor, float, List[float], Tensor, Tensor]], []
)
outputs = self.beam_decode(
all_tokens, all_scores, all_weights, all_prev_indices, num_steps
)
return outputs
@classmethod
def build_from_checkpoints(
cls,
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
beam_size,
length_penalty,
nbest,
word_reward=0,
unk_reward=0,
lexical_dict_paths=None,
):
length = 10
models, _, tgt_dict = load_models_from_checkpoints(
checkpoint_filenames,
src_dict_filename,
dst_dict_filename,
lexical_dict_paths,
)
src_tokens = torch.LongTensor(np.ones((length, 1), dtype="int64"))
src_lengths = torch.IntTensor(np.array([length], dtype="int32"))
eos_token_id = tgt_dict.eos()
return cls(
models,
tgt_dict,
src_tokens,
src_lengths,
eos_token_id,
length_penalty=length_penalty,
nbest=nbest,
beam_size=beam_size,
stop_at_eos=True,
word_reward=word_reward,
unk_reward=unk_reward,
quantize=True,
)
def save_to_pytorch(self, output_path):
def pack(s):
if hasattr(s, "_pack"):
s._pack()
def unpack(s):
if hasattr(s, "_unpack"):
s._unpack()
self.apply(pack)
torch.jit.save(self, output_path)
self.apply(unpack)
@torch.jit.script
def finalize_hypos_loop_tokens(
finalized_tokens_list: List[Tensor],
finalized_idxs,
pad_idx: int,
finalized_tokens,
finalized_scores,
):
for i in range(finalized_idxs.size(0)):
cutoff = finalized_tokens[i].ne(pad_idx)
tokens = finalized_tokens[i][cutoff]
finalized_tokens_list[finalized_idxs[i]] = tokens
return finalized_tokens_list
@torch.jit.script
def finalize_hypos_loop_scores(
finalized_scores_list: List[Tensor],
finalized_idxs,
pad_idx: int,
finalized_tokens,
finalized_scores,
):
for i in range(finalized_idxs.size(0)):
cutoff = finalized_scores[i].ne(pad_idx)
scores = finalized_scores[i][cutoff]
finalized_scores_list[finalized_idxs[i]] = scores
return finalized_scores_list
@torch.jit.script
def finalize_hypos_loop_attns(
finalized_attns_list: List[Tensor],
finalized_alignments_list: List[Tensor],
finalized_idxs,
pad_idx: int,
finalized_tokens,
finalized_scores,
finalized_attn,
):
for i in range(finalized_idxs.size(0)):
cutoff = finalized_tokens[i].ne(pad_idx)
hypo_attn = finalized_attn[i][cutoff]
alignment = hypo_attn.max(dim=1)[1]
finalized_attns_list[finalized_idxs[i]] = hypo_attn
finalized_alignments_list[finalized_idxs[i]] = alignment
return finalized_attns_list, finalized_alignments_list
class IterativeRefinementGenerateAndDecode(torch.jit.ScriptModule):
def __init__(self, models, tgt_dict, max_iter=1, quantize=True, check_trace=True):
super().__init__()
src_tokens = torch.tensor([[4, 2]])
src_lengths = torch.tensor([2])
self.models = models
generator = IterativeRefinementGenerator(
self.models, tgt_dict, max_iter=max_iter
)
if quantize:
generator = torch.quantization.quantize_dynamic(
generator, {torch.nn.Linear}, dtype=torch.qint8, inplace=True
)
enc_inputs = (src_tokens, src_lengths)
self.generator = torch.jit.trace(
generator, enc_inputs, _force_outplace=True, check_trace=check_trace
)
@torch.jit.script_method
def forward(
self, src_tokens: torch.Tensor, src_lengths: torch.Tensor
) -> List[Tuple[Tensor, float, Tensor]]:
return [
(x.long(), float(y), at)
for x, y, at in list(self.generator(src_tokens.t(), src_lengths))
]
def save_to_pytorch(self, output_path):
def pack(s):
if hasattr(s, "_pack"):
s._pack()
def unpack(s):
if hasattr(s, "_unpack"):
s._unpack()
self.apply(pack)
torch.jit.save(self, output_path)
self.apply(unpack)
@classmethod
def build_from_checkpoints(
cls,
checkpoint_filenames,
src_dict_filename,
tgt_dict_filename,
lexical_dict_paths=None,
max_iter=1,
):
models, _, tgt_dict = load_models_from_checkpoints(
checkpoint_filenames,
src_dict_filename,
tgt_dict_filename,
lexical_dict_paths,
)
return cls(models, tgt_dict=tgt_dict, max_iter=max_iter)
@torch.jit.script
def is_a_loop(pad_idx: int, x, y, s, a):
b, l_x, l_y = x.size(0), x.size(1), y.size(1)
if l_x > l_y:
y = torch.cat([y, torch.zeros([b, l_x - l_y]).to(y).fill_(pad_idx)], 1)
s = torch.cat([s, torch.zeros([b, l_x - l_y]).to(s)], 1)
if a.size()[0] > 0:
a = torch.cat([a, torch.zeros([b, l_x - l_y, a.size(2)]).to(a)], 1)
elif l_x < l_y:
x = torch.cat([x, torch.zeros([b, l_y - l_x]).to(x).fill_(pad_idx)], 1)
return (x == y).all(1), y, s, a
@torch.jit.script
def last_step(step: int, max_iter: int, terminated):
if step == max_iter: # reach last iteration, terminate
terminated.fill_(1)
return terminated
class IterativeRefinementGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
eos_penalty=0.0,
max_iter=2,
max_ratio=2,
decoding_format=None,
retain_dropout=False,
adaptive=True,
):
"""
Generates translations based on iterative refinement.
Args:
tgt_dict: target dictionary
"""
super().__init__()
self.models = models
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.adaptive = adaptive
for i, model in enumerate(self.models):
model.prepare_for_onnx_export_()
model.eval()
if hasattr(model, "get_student_model"):
model = model.get_student_model()
self.models[i] = model
self._modules[f"model_{i}"] = model
def forward(
self, src_tokens: torch.Tensor, src_lengths: torch.Tensor
) -> Tuple[Tuple[Tensor, Tensor, Tensor]]:
o1, o2, o3, _ = self.generate(self.models, src_tokens, src_lengths)
return tuple((x, y.float().mean(), z) for x, y, z in zip(o1, o2, o3))
@torch.no_grad()
def generate(self, models, src_tokens, src_lengths, prefix_tokens=None):
# TODO: model ensemble
assert len(models) == 1, "only support single model"
model = models[0]
bsz, src_len = src_tokens.size()
sent_idxs = torch.arange(bsz)
# encoding
encoder_out = model.encoder(src_tokens, src_lengths)
# initialize buffers (very model specific, with length prediction or not)
prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
finalized_tokens_list = [torch.tensor(0) for _ in range(bsz)]
finalized_scores_list = [torch.tensor(0) for | |
the attr
while not a == '':
a = a.split(":")
a, new = ':'.join(a[:-1]), a[-1]
attr = new
if k in form:
# If the form has the split key get its value
tainted_split_key = k
if should_be_tainted(k):
tainted_split_key = taint_string(k)
item = form[k]
if isinstance(item, record):
# if the value is mapped to a record, check if it
# has the attribute, if it has it, convert it to
# a tuple and set it
if hasattr(item, attr):
value = tuple(getattr(item, attr))
setattr(item, attr, value)
else:
# It is mapped to a list of records
for x in item:
# loop through the records
if hasattr(x, attr):
# If the record has the attribute
# convert it to a tuple and set it
value = tuple(getattr(x, attr))
setattr(x, attr, value)
# Do the same for the tainted counterpart
if tainted_split_key in taintedform:
tainted = taintedform[tainted_split_key]
if isinstance(item, record):
seq = tuple(getattr(tainted, attr))
setattr(tainted, attr, seq)
else:
for trec in tainted:
if hasattr(trec, attr):
seq = getattr(trec, attr)
seq = tuple(seq)
setattr(trec, attr, seq)
else:
# the form does not have the split key
tainted_key = key
if should_be_tainted(key):
tainted_key = taint_string(key)
if key in form:
# if it has the original key, get the item
# convert it to a tuple
item = form[key]
item = tuple(form[key])
form[key] = item
if tainted_key in taintedform:
tainted = tuple(taintedform[tainted_key])
taintedform[tainted_key] = tainted
if meth:
if 'PATH_INFO' in environ:
path = environ['PATH_INFO']
while path[-1:] == '/':
path = path[:-1]
else:
path = ''
other['PATH_INFO'] = path = "%s/%s" % (path, meth)
self._hacked_path = 1
def postProcessInputs(self):
"""Process the values in request.form to decode strings to unicode.
"""
for name, value in self.form.items():
self.form[name] = _decode(value, default_encoding)
def resolve_url(self, url):
# Attempt to resolve a url into an object in the Zope
# namespace. The url must be a fully-qualified url. The
# method will return the requested object if it is found
# or raise the same HTTP error that would be raised in
# the case of a real web request. If the passed in url
# does not appear to describe an object in the system
# namespace (e.g. the host, port or script name dont
# match that of the current request), a ValueError will
# be raised.
if url.find(self.script) != 0:
raise ValueError('Different namespace.')
path = url[len(self.script):]
while path and path[0] == '/':
path = path[1:]
while path and path[-1] == '/':
path = path[:-1]
req = self.clone()
rsp = req.response
req['PATH_INFO'] = path
object = None
# Try to traverse to get an object. Note that we call
# the exception method on the response, but we don't
# want to actually abort the current transaction
# (which is usually the default when the exception
# method is called on the response).
try:
object = req.traverse(path)
except Exception as exc:
rsp.exception()
req.clear()
raise exc.__class__(rsp.errmsg)
# The traversal machinery may return a "default object"
# like an index_html document. This is not appropriate
# in the context of the resolve_url method so we need
# to ensure we are getting the actual object named by
# the given url, and not some kind of default object.
if hasattr(object, 'id'):
if callable(object.id):
name = object.id()
else:
name = object.id
elif hasattr(object, '__name__'):
name = object.__name__
else:
name = ''
if name != os.path.split(path)[-1]:
object = req.PARENTS[0]
req.clear()
return object
def clone(self):
# Return a clone of the current request object
# that may be used to perform object traversal.
environ = self.environ.copy()
environ['REQUEST_METHOD'] = 'GET'
if self._auth:
environ['HTTP_AUTHORIZATION'] = self._auth
if self.response is not None:
response = self.response.__class__()
else:
response = None
clone = self.__class__(None, environ, response, clean=1)
clone['PARENTS'] = [self['PARENTS'][-1]]
directlyProvides(clone, *directlyProvidedBy(self))
return clone
def getHeader(self, name, default=None, literal=False):
"""Return the named HTTP header, or an optional default
argument or None if the header is not found. Note that
both original and CGI-ified header names are recognized,
e.g. 'Content-Type', 'CONTENT_TYPE' and 'HTTP_CONTENT_TYPE'
should all return the Content-Type header, if available.
"""
environ = self.environ
if not literal:
name = name.replace('-', '_').upper()
val = environ.get(name, None)
if val is not None:
return val
if name[:5] != 'HTTP_':
name = 'HTTP_%s' % name
return environ.get(name, default)
get_header = getHeader # BBB
def get(self, key, default=None, returnTaints=0,
URLmatch=re.compile('URL(PATH)?([0-9]+)$').match,
BASEmatch=re.compile('BASE(PATH)?([0-9]+)$').match,
):
"""Get a variable value
Return a value for the required variable name.
The value will be looked up from one of the request data
categories. The search order is environment variables,
other variables, form data, and then cookies.
"""
other = self.other
if key in other:
if key == 'REQUEST':
return self
return other[key]
if key[:1] == 'U':
match = URLmatch(key)
if match is not None:
pathonly, n = match.groups()
path = self._script + self._steps
n = len(path) - int(n)
if n < 0:
raise KeyError(key)
if pathonly:
path = [''] + path[:n]
else:
path = [other['SERVER_URL']] + path[:n]
URL = '/'.join(path)
if 'PUBLISHED' in other:
# Don't cache URLs until publishing traversal is done.
other[key] = URL
self._urls = self._urls + (key,)
return URL
if key in isCGI_NAMEs or key[:5] == 'HTTP_':
environ = self.environ
if key in environ and (key not in hide_key):
return environ[key]
return ''
if key == 'REQUEST':
return self
if key[:1] == 'B':
match = BASEmatch(key)
if match is not None:
pathonly, n = match.groups()
path = self._steps
n = int(n)
if n:
n = n - 1
if len(path) < n:
raise KeyError(key)
v = self._script + path[:n]
else:
v = self._script[:-1]
if pathonly:
v.insert(0, '')
else:
v.insert(0, other['SERVER_URL'])
URL = '/'.join(v)
if 'PUBLISHED' in other:
# Don't cache URLs until publishing traversal is done.
other[key] = URL
self._urls = self._urls + (key,)
return URL
if key == 'BODY' and self._file is not None:
p = self._file.tell()
self._file.seek(0)
v = self._file.read()
self._file.seek(p)
self.other[key] = v
return v
if key == 'BODYFILE' and self._file is not None:
v = self._file
self.other[key] = v
return v
v = self.common.get(key, _marker)
if v is not _marker:
return v
if self._lazies:
v = self._lazies.get(key, _marker)
if v is not _marker:
if callable(v):
v = v()
self[key] = v # Promote lazy value
del self._lazies[key]
return v
# Return tainted data first (marked as suspect)
if returnTaints:
v = self.taintedform.get(key, _marker)
if v is not _marker:
other[key] = v
return v
# Untrusted data *after* trusted data
v = self.form.get(key, _marker)
if v is not _marker:
other[key] = v
return v
# Return tainted data first (marked as suspect)
if returnTaints:
v = self.taintedcookies.get(key, _marker)
if v is not _marker:
other[key] = v
return v
# Untrusted data *after* trusted data
v = self.cookies.get(key, _marker)
if v is not _marker:
other[key] = v
return v
return default
def __getitem__(self, key, default=_marker, returnTaints=0):
v = self.get(key, default, returnTaints=returnTaints)
if v is _marker:
raise KeyError(key)
return v
# Using the getattr protocol to retrieve form values and similar
# is discouraged and is likely to be deprecated in the future.
# request.get(key) or request[key] should be used instead
def __getattr__(self, key, default=_marker, returnTaints=0):
v = self.get(key, default, returnTaints=returnTaints)
if v is _marker:
if key == 'locale':
# we only create the _locale on first access, as setting it
# up might be slow and we don't want to slow down every
# request
if self._locale is _marker:
self.setupLocale()
return self._locale
if key == 'debug':
return self._debug
raise AttributeError(key)
return v
def set_lazy(self, key, callable):
self._lazies[key] = callable
def __contains__(self, key, returnTaints=0):
return self.has_key(key, returnTaints=returnTaints) # NOQA
def has_key(self, key, returnTaints=0):
try:
self.__getitem__(key, returnTaints=returnTaints)
except Exception:
return 0
else:
return 1
def keys(self, returnTaints=0):
keys = {}
keys.update(self.common)
keys.update(self._lazies)
for key in self.environ.keys():
if ((key in isCGI_NAMEs or key[:5] == 'HTTP_') and
(key not in hide_key)):
keys[key] = 1
# Cache URLN | |
gapxy)
# set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
for i in range(1,3):
landmark_curved[index][i][1] = y_centerline_fit[iz_curved[index]]
# set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
landmark_curved[index][3][2], landmark_curved[index][3][1], landmark_curved[index][4][2], landmark_curved[index][4][1] = get_points_perpendicular_to_curve(polyy, polyy.deriv(), iz_curved[index], gapxy)
# set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
for i in range(3,5):
landmark_curved[index][i][0] = x_centerline_fit[iz_curved[index]]
elif centerline_fitting=='splines':
for index in range(0, n_iz_curved, 1):
# calculate d (ax+by+cz+d=0)
# print iz_curved[index]
a=x_centerline_deriv[iz_curved[index]]
b=y_centerline_deriv[iz_curved[index]]
c=z_centerline_deriv[iz_curved[index]]
x=x_centerline_fit[iz_curved[index]]
y=y_centerline_fit[iz_curved[index]]
z=iz_curved[index]
d=-(a*x+b*y+c*z)
#print a,b,c,d,x,y,z
# set coordinates for landmark at the center of the cross
landmark_curved[index][0][0], landmark_curved[index][0][1], landmark_curved[index][0][2] = x_centerline_fit[iz_curved[index]], y_centerline_fit[iz_curved[index]], iz_curved[index]
# set y coordinate to y_centerline_fit[iz] for elements 1 and 2 of the cross
for i in range(1,3):
landmark_curved[index][i][1] = y_centerline_fit[iz_curved[index]]
# set x and z coordinates for landmarks +x and -x, forcing de landmark to be in the orthogonal plan and the distance landmark/curve to be gapxy
x_n=Symbol('x_n')
landmark_curved[index][2][0],landmark_curved[index][1][0]=solve((x_n-x)**2+((-1/c)*(a*x_n+b*y+d)-z)**2-gapxy**2,x_n) #x for -x and +x
landmark_curved[index][1][2]=(-1/c)*(a*landmark_curved[index][1][0]+b*y+d) #z for +x
landmark_curved[index][2][2]=(-1/c)*(a*landmark_curved[index][2][0]+b*y+d) #z for -x
# set x coordinate to x_centerline_fit[iz] for elements 3 and 4 of the cross
for i in range(3,5):
landmark_curved[index][i][0] = x_centerline_fit[iz_curved[index]]
# set coordinates for landmarks +y and -y. Here, x coordinate is 0 (already initialized).
y_n=Symbol('y_n')
landmark_curved[index][4][1],landmark_curved[index][3][1]=solve((y_n-y)**2+((-1/c)*(a*x+b*y_n+d)-z)**2-gapxy**2,y_n) #y for -y and +y
landmark_curved[index][3][2]=(-1/c)*(a*x+b*landmark_curved[index][3][1]+d)#z for +y
landmark_curved[index][4][2]=(-1/c)*(a*x+b*landmark_curved[index][4][1]+d)#z for -y
# #display
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.plot(x_centerline_fit, y_centerline_fit,z_centerline, 'g')
# ax.plot(x_centerline, y_centerline,z_centerline, 'r')
# ax.plot([landmark_curved[i][j][0] for i in range(0, n_iz_curved) for j in range(0, 5)], \
# [landmark_curved[i][j][1] for i in range(0, n_iz_curved) for j in range(0, 5)], \
# [landmark_curved[i][j][2] for i in range(0, n_iz_curved) for j in range(0, 5)], '.')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# plt.show()
# Get coordinates of landmarks along straight centerline
#==========================================================================================
print '\nGet coordinates of landmarks along straight centerline...'
landmark_straight = [ [ [ 0 for i in range(0,3)] for i in range (0,5) ] for i in iz_curved ] # same structure as landmark_curved
# calculate the z indices corresponding to the Euclidean distance between two consecutive points on the curved centerline (approximation curve --> line)
iz_straight = [0 for i in range (0,gapz+1)]
#print iz_straight,len(iz_straight)
for index in range(1, n_iz_curved, 1):
# compute vector between two consecutive points on the curved centerline
vector_centerline = [x_centerline_fit[iz_curved[index]] - x_centerline_fit[iz_curved[index-1]], \
y_centerline_fit[iz_curved[index]] - y_centerline_fit[iz_curved[index-1]], \
iz_curved[index] - iz_curved[index-1]]
# compute norm of this vector
norm_vector_centerline = numpy.linalg.norm(vector_centerline, ord=2)
# round to closest integer value
norm_vector_centerline_rounded = int(round(norm_vector_centerline,0))
# assign this value to the current z-coordinate on the straight centerline
iz_straight[index] = iz_straight[index-1] + norm_vector_centerline_rounded
# initialize x0 and y0 to be at the center of the FOV
x0 = int(round(nx/2))
y0 = int(round(ny/2))
for index in range(0, n_iz_curved, 1):
# set coordinates for landmark at the center of the cross
landmark_straight[index][0][0], landmark_straight[index][0][1], landmark_straight[index][0][2] = x0, y0, iz_straight[index]
# set x, y and z coordinates for landmarks +x
landmark_straight[index][1][0], landmark_straight[index][1][1], landmark_straight[index][1][2] = x0 + gapxy, y0, iz_straight[index]
# set x, y and z coordinates for landmarks -x
landmark_straight[index][2][0], landmark_straight[index][2][1], landmark_straight[index][2][2] = x0-gapxy, y0, iz_straight[index]
# set x, y and z coordinates for landmarks +y
landmark_straight[index][3][0], landmark_straight[index][3][1], landmark_straight[index][3][2] = x0, y0+gapxy, iz_straight[index]
# set x, y and z coordinates for landmarks -y
landmark_straight[index][4][0], landmark_straight[index][4][1], landmark_straight[index][4][2] = x0, y0-gapxy, iz_straight[index]
# # display
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# #ax.plot(x_centerline_fit, y_centerline_fit,z_centerline, 'r')
# ax.plot([landmark_straight[i][j][0] for i in range(0, n_iz_curved) for j in range(0, 5)], \
# [landmark_straight[i][j][1] for i in range(0, n_iz_curved) for j in range(0, 5)], \
# [landmark_straight[i][j][2] for i in range(0, n_iz_curved) for j in range(0, 5)], '.')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# plt.show()
#
# Create NIFTI volumes with landmarks
#==========================================================================================
# Pad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV
# N.B. IT IS VERY IMPORTANT TO PAD ALSO ALONG X and Y, OTHERWISE SOME LANDMARKS MIGHT GET OUT OF THE FOV!!!
print '\nPad input volume to deal with the fact that some landmarks on the curved centerline might be outside the FOV...'
sct.run('c3d '+fname_centerline_orient+' -pad '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox '+str(padding)+'x'+str(padding)+'x'+str(padding)+'vox 0 -o tmp.centerline_pad.nii.gz')
# TODO: don't pad input volume: no need for that! instead, try to increase size of hdr when saving landmarks.
# Open padded centerline for reading
print '\nOpen padded centerline for reading...'
file = nibabel.load('tmp.centerline_pad.nii.gz')
data = file.get_data()
hdr = file.get_header()
# Create volumes containing curved and straight landmarks
data_curved_landmarks = data * 0
data_straight_landmarks = data * 0
# initialize landmark value
landmark_value = 1
# Loop across cross index
for index in range(0, n_iz_curved, 1):
# loop across cross element index
for i_element in range(0, 5, 1):
# get x, y and z coordinates of curved landmark (rounded to closest integer)
x, y, z = int(round(landmark_curved[index][i_element][0])), int(round(landmark_curved[index][i_element][1])), int(round(landmark_curved[index][i_element][2]))
# attribute landmark_value to the voxel and its neighbours
data_curved_landmarks[x+padding-1:x+padding+2, y+padding-1:y+padding+2, z+padding-1:z+padding+2] = landmark_value
# get x, y and z coordinates of straight landmark (rounded to closest integer)
x, y, z = int(round(landmark_straight[index][i_element][0])), int(round(landmark_straight[index][i_element][1])), int(round(landmark_straight[index][i_element][2]))
# attribute landmark_value to the voxel and its neighbours
data_straight_landmarks[x+padding-1:x+padding+2, y+padding-1:y+padding+2, z+padding-1:z+padding+2] = landmark_value
# increment landmark value
landmark_value = landmark_value + 1
# Write NIFTI volumes
hdr.set_data_dtype('uint32') # set imagetype to uint8 #TODO: maybe use int32
print '\nWrite NIFTI volumes...'
img = nibabel.Nifti1Image(data_curved_landmarks, None, hdr)
nibabel.save(img, 'tmp.landmarks_curved.nii.gz')
print '.. File created: tmp.landmarks_curved.nii.gz'
img = nibabel.Nifti1Image(data_straight_landmarks, None, hdr)
nibabel.save(img, 'tmp.landmarks_straight.nii.gz')
print '.. File created: tmp.landmarks_straight.nii.gz'
# Estimate deformation field by pairing landmarks
#==========================================================================================
# Dilate landmarks (because nearest neighbour interpolation will be later used, therefore some landmarks may "disapear" if they are single points)
#print '\nDilate landmarks...'
#sct.run(fsloutput+'fslmaths tmp.landmarks_curved.nii -kernel box 3x3x3 -dilD tmp.landmarks_curved_dilated -odt short')
#sct.run(fsloutput+'fslmaths tmp.landmarks_straight.nii -kernel box 3x3x3 -dilD tmp.landmarks_straight_dilated -odt short')
# Estimate rigid transformation
print '\nEstimate rigid transformation between paired landmarks...'
sct.run('ANTSUseLandmarkImagesToGetAffineTransform tmp.landmarks_straight.nii.gz tmp.landmarks_curved.nii.gz rigid tmp.curve2straight_rigid.txt')
# Apply rigid transformation
print '\nApply rigid transformation to curved landmarks...'
sct.run('WarpImageMultiTransform 3 tmp.landmarks_curved.nii.gz tmp.landmarks_curved_rigid.nii.gz -R tmp.landmarks_straight.nii.gz tmp.curve2straight_rigid.txt --use-NN')
# Estimate b-spline transformation curve --> straight
print '\nEstimate b-spline transformation: curve --> straight...'
sct.run('ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_straight.nii.gz tmp.landmarks_curved_rigid.nii.gz tmp.warp_curve2straight.nii.gz 5x5x5 3 2 0')
# Concatenate rigid and non-linear transformations...
print '\nConcatenate rigid and non-linear transformations...'
#sct.run('ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
# TODO: use sct.run() when output from the following command will be different from 0 (currently there seem to be a bug)
cmd = 'ComposeMultiTransform 3 tmp.curve2straight.nii.gz -R tmp.landmarks_straight.nii.gz tmp.warp_curve2straight.nii.gz tmp.curve2straight_rigid.txt'
print('>> '+cmd)
commands.getstatusoutput(cmd)
# Estimate b-spline transformation straight --> curve
# TODO: invert warping field instead of estimating a new one
print '\nEstimate b-spline transformation: straight --> curve...'
sct.run('ANTSUseLandmarkImagesToGetBSplineDisplacementField tmp.landmarks_curved_rigid.nii.gz tmp.landmarks_straight.nii.gz tmp.warp_straight2curve.nii.gz 5x5x5 3 2 0')
# Concatenate rigid and non-linear transformations...
print '\nConcatenate rigid and non-linear transformations...'
#sct.run('ComposeMultiTransform 3 tmp.warp_rigid.nii -R tmp.landmarks_straight.nii tmp.warp.nii tmp.curve2straight_rigid.txt')
# TODO: use sct.run() when output from the following command will be different from 0 (currently there seem to be a bug)
cmd = 'ComposeMultiTransform 3 tmp.straight2curve.nii.gz -R tmp.landmarks_straight.nii.gz -i tmp.curve2straight_rigid.txt tmp.warp_straight2curve.nii.gz'
print('>> '+cmd)
commands.getstatusoutput(cmd)
#print '\nPad input image...'
#sct.run('c3d '+fname_anat+' -pad '+str(padz)+'x'+str(padz)+'x'+str(padz)+'vox '+str(padz)+'x'+str(padz)+'x'+str(padz)+'vox 0 -o tmp.anat_pad.nii')
# Unpad landmarks...
# THIS WAS REMOVED ON 2014-06-03 because the output data was cropped at the edge, which caused landmarks to sometimes disappear
# print '\nUnpad landmarks...'
# sct.run('fslroi tmp.landmarks_straight.nii.gz tmp.landmarks_straight_crop.nii.gz '+str(padding)+' '+str(nx)+' '+str(padding)+' '+str(ny)+' '+str(padding)+' '+str(nz))
# Apply deformation to input image
print '\nApply transformation to input image...'
sct.run('WarpImageMultiTransform 3 '+fname_anat+' tmp.anat_rigid_warp.nii.gz -R tmp.landmarks_straight.nii.gz '+interpolation_warp+ ' tmp.curve2straight.nii.gz')
# sct.run('WarpImageMultiTransform 3 '+fname_anat+' tmp.anat_rigid_warp.nii.gz -R tmp.landmarks_straight_crop.nii.gz '+interpolation_warp+ ' tmp.curve2straight.nii.gz')
# Generate output file (in current folder)
# TODO: do not uncompress the warping field, it is too time consuming!
print '\nGenerate output file (in current folder)...'
sct.generate_output_file('tmp.curve2straight.nii.gz','./','warp_curve2straight',ext_anat) # warping field
sct.generate_output_file('tmp.straight2curve.nii.gz','./','warp_straight2curve',ext_anat) # warping field
sct.generate_output_file('tmp.anat_rigid_warp.nii.gz','./',file_anat+'_straight',ext_anat) # straightened anatomic
# Delete | |
queryset=RackReservation.objects.all(),
widget=forms.MultipleHiddenInput()
)
user = forms.ModelChoiceField(
queryset=User.objects.order_by(
'username'
),
required=False
)
tenant = forms.ModelChoiceField(
queryset=Tenant.objects.all(),
required=False
)
description = forms.CharField(
max_length=100,
required=False
)
class Meta:
nullable_fields = []
#
# Manufacturers
#
class ManufacturerForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Manufacturer
fields = [
'name', 'slug',
]
class ManufacturerCSVForm(forms.ModelForm):
class Meta:
model = Manufacturer
fields = Manufacturer.csv_headers
help_texts = {
'name': 'Manufacturer name',
'slug': 'URL-friendly slug',
}
#
# Device types
#
class DeviceTypeForm(BootstrapMixin, CustomFieldForm):
slug = SlugField(
slug_source='model'
)
tags = TagField(
required=False
)
class Meta:
model = DeviceType
fields = [
'manufacturer', 'model', 'slug', 'part_number', 'u_height', 'is_full_depth', 'subdevice_role', 'comments',
'tags',
]
class DeviceTypeCSVForm(forms.ModelForm):
manufacturer = forms.ModelChoiceField(
queryset=Manufacturer.objects.all(),
required=True,
to_field_name='name',
help_text='Manufacturer name',
error_messages={
'invalid_choice': 'Manufacturer not found.',
}
)
subdevice_role = CSVChoiceField(
choices=SUBDEVICE_ROLE_CHOICES,
required=False,
help_text='Parent/child status'
)
class Meta:
model = DeviceType
fields = DeviceType.csv_headers
help_texts = {
'model': 'Model name',
'slug': 'URL-friendly slug',
}
class DeviceTypeBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=DeviceType.objects.all(),
widget=forms.MultipleHiddenInput()
)
manufacturer = forms.ModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False
)
u_height = forms.IntegerField(
min_value=1,
required=False
)
is_full_depth = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect(),
label='Is full depth'
)
class Meta:
nullable_fields = []
class DeviceTypeFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = DeviceType
q = forms.CharField(
required=False,
label='Search'
)
manufacturer = FilterChoiceField(
queryset=Manufacturer.objects.annotate(
filter_count=Count('device_types')
),
to_field_name='slug'
)
subdevice_role = forms.NullBooleanField(
required=False,
label='Subdevice role',
widget=forms.Select(
choices=add_blank_choice(SUBDEVICE_ROLE_CHOICES)
)
)
console_ports = forms.NullBooleanField(
required=False,
label='Has console ports',
widget=forms.Select(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
console_server_ports = forms.NullBooleanField(
required=False,
label='Has console server ports',
widget=forms.Select(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_ports = forms.NullBooleanField(
required=False,
label='Has power ports',
widget=forms.Select(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
power_outlets = forms.NullBooleanField(
required=False,
label='Has power outlets',
widget=forms.Select(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
interfaces = forms.NullBooleanField(
required=False,
label='Has interfaces',
widget=forms.Select(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
pass_through_ports = forms.NullBooleanField(
required=False,
label='Has pass-through ports',
widget=forms.Select(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
#
# Device component templates
#
class ConsolePortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = ConsolePortTemplate
fields = [
'device_type', 'name',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class ConsolePortTemplateCreateForm(ComponentForm):
name_pattern = ExpandableNameField(
label='Name'
)
class ConsoleServerPortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = ConsoleServerPortTemplate
fields = [
'device_type', 'name',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class ConsoleServerPortTemplateCreateForm(ComponentForm):
name_pattern = ExpandableNameField(
label='Name'
)
class PowerPortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = PowerPortTemplate
fields = [
'device_type', 'name',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class PowerPortTemplateCreateForm(ComponentForm):
name_pattern = ExpandableNameField(
label='Name'
)
class PowerOutletTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = PowerOutletTemplate
fields = [
'device_type', 'name',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class PowerOutletTemplateCreateForm(ComponentForm):
name_pattern = ExpandableNameField(
label='Name'
)
class InterfaceTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = InterfaceTemplate
fields = [
'device_type', 'name', 'form_factor', 'mgmt_only',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class InterfaceTemplateCreateForm(ComponentForm):
name_pattern = ExpandableNameField(
label='Name'
)
form_factor = forms.ChoiceField(
choices=IFACE_FF_CHOICES
)
mgmt_only = forms.BooleanField(
required=False,
label='Management only'
)
class InterfaceTemplateBulkEditForm(BootstrapMixin, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=InterfaceTemplate.objects.all(),
widget=forms.MultipleHiddenInput()
)
form_factor = forms.ChoiceField(
choices=add_blank_choice(IFACE_FF_CHOICES),
required=False
)
mgmt_only = forms.NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect,
label='Management only'
)
class Meta:
nullable_fields = []
class FrontPortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = FrontPortTemplate
fields = [
'device_type', 'name', 'type', 'rear_port', 'rear_port_position',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class FrontPortTemplateCreateForm(ComponentForm):
name_pattern = ExpandableNameField(
label='Name'
)
type = forms.ChoiceField(
choices=PORT_TYPE_CHOICES
)
rear_port_set = forms.MultipleChoiceField(
choices=[],
label='Rear ports',
help_text='Select one rear port assignment for each front port being created.'
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Determine which rear port positions are occupied. These will be excluded from the list of available mappings.
occupied_port_positions = [
(front_port.rear_port_id, front_port.rear_port_position)
for front_port in self.parent.frontport_templates.all()
]
# Populate rear port choices
choices = []
rear_ports = RearPortTemplate.objects.filter(device_type=self.parent)
for rear_port in rear_ports:
for i in range(1, rear_port.positions + 1):
if (rear_port.pk, i) not in occupied_port_positions:
choices.append(
('{}:{}'.format(rear_port.pk, i), '{}:{}'.format(rear_port.name, i))
)
self.fields['rear_port_set'].choices = choices
def clean(self):
# Validate that the number of ports being created equals the number of selected (rear port, position) tuples
front_port_count = len(self.cleaned_data['name_pattern'])
rear_port_count = len(self.cleaned_data['rear_port_set'])
if front_port_count != rear_port_count:
raise forms.ValidationError({
'rear_port_set': 'The provided name pattern will create {} ports, however {} rear port assignments '
'were selected. These counts must match.'.format(front_port_count, rear_port_count)
})
def get_iterative_data(self, iteration):
# Assign rear port and position from selected set
rear_port, position = self.cleaned_data['rear_port_set'][iteration].split(':')
return {
'rear_port': int(rear_port),
'rear_port_position': int(position),
}
class RearPortTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = RearPortTemplate
fields = [
'device_type', 'name', 'type', 'positions',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class RearPortTemplateCreateForm(ComponentForm):
name_pattern = ExpandableNameField(
label='Name'
)
type = forms.ChoiceField(
choices=PORT_TYPE_CHOICES
)
positions = forms.IntegerField(
min_value=1,
max_value=64,
initial=1,
help_text='The number of front ports which may be mapped to each rear port'
)
class DeviceBayTemplateForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = DeviceBayTemplate
fields = [
'device_type', 'name',
]
widgets = {
'device_type': forms.HiddenInput(),
}
class DeviceBayTemplateCreateForm(ComponentForm):
name_pattern = ExpandableNameField(
label='Name'
)
#
# Device roles
#
class DeviceRoleForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = DeviceRole
fields = [
'name', 'slug', 'color', 'vm_role',
]
class DeviceRoleCSVForm(forms.ModelForm):
slug = SlugField()
class Meta:
model = DeviceRole
fields = DeviceRole.csv_headers
help_texts = {
'name': 'Name of device role',
'color': 'RGB color in hexadecimal (e.g. 00ff00)'
}
#
# Platforms
#
class PlatformForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Platform
fields = [
'name', 'slug', 'manufacturer', 'napalm_driver', 'napalm_args',
]
widgets = {
'napalm_args': SmallTextarea(),
}
class PlatformCSVForm(forms.ModelForm):
slug = SlugField()
manufacturer = forms.ModelChoiceField(
queryset=Manufacturer.objects.all(),
required=False,
to_field_name='name',
help_text='Manufacturer name',
error_messages={
'invalid_choice': 'Manufacturer not found.',
}
)
class Meta:
model = Platform
fields = Platform.csv_headers
help_texts = {
'name': 'Platform name',
}
#
# Devices
#
class DeviceForm(BootstrapMixin, TenancyForm, CustomFieldForm):
site = forms.ModelChoiceField(
queryset=Site.objects.all(),
widget=forms.Select(
attrs={
'filter-for': 'rack',
}
)
)
rack = ChainedModelChoiceField(
queryset=Rack.objects.all(),
chains=(
('site', 'site'),
),
required=False,
widget=APISelect(
api_url='/api/dcim/racks/?site_id={{site}}',
display_field='display_name',
attrs={
'filter-for': 'position',
}
)
)
position = forms.TypedChoiceField(
required=False,
empty_value=None,
help_text="The lowest-numbered unit occupied by the device",
widget=APISelect(
api_url='/api/dcim/racks/{{rack}}/units/?face={{face}}',
disabled_indicator='device'
)
)
manufacturer = forms.ModelChoiceField(
queryset=Manufacturer.objects.all(),
widget=forms.Select(
attrs={
'filter-for': 'device_type',
}
)
)
device_type = ChainedModelChoiceField(
queryset=DeviceType.objects.all(),
chains=(
('manufacturer', 'manufacturer'),
),
label='Device type',
widget=APISelect(
api_url='/api/dcim/device-types/?manufacturer_id={{manufacturer}}',
display_field='model'
)
)
cluster_group = forms.ModelChoiceField(
queryset=ClusterGroup.objects.all(),
required=False,
widget=forms.Select(
attrs={'filter-for': 'cluster', 'nullable': 'true'}
)
)
cluster = ChainedModelChoiceField(
queryset=Cluster.objects.all(),
chains=(
('group', 'cluster_group'),
),
required=False,
widget=APISelect(
api_url='/api/virtualization/clusters/?group_id={{cluster_group}}',
)
)
comments = CommentField()
tags = TagField(required=False)
local_context_data = JSONField(required=False)
class Meta:
model = Device
fields = [
'name', 'device_role', 'device_type', 'serial', 'asset_tag', 'site', 'rack', 'position', 'face',
'status', 'platform', 'primary_ip4', 'primary_ip6', 'cluster_group', 'cluster', 'tenant_group', 'tenant',
'comments', 'tags', 'local_context_data'
]
help_texts = {
'device_role': "The function this device serves",
'serial': "Chassis serial number",
'local_context_data': "Local config context data overwrites all source contexts in the final rendered "
"config context",
}
widgets = {
'face': forms.Select(
attrs={
'filter-for': 'position',
}
),
}
def __init__(self, *args, **kwargs):
# Initialize helper selectors
instance = kwargs.get('instance')
# Using hasattr() instead of "is not None" to avoid RelatedObjectDoesNotExist on required field
if instance and hasattr(instance, 'device_type'):
initial = kwargs.get('initial', {}).copy()
initial['manufacturer'] = instance.device_type.manufacturer
kwargs['initial'] = initial
super().__init__(*args, **kwargs)
if self.instance.pk:
# Compile list of choices for primary IPv4 and IPv6 addresses
for family in [4, 6]:
ip_choices = [(None, '---------')]
# Gather PKs of all interfaces belonging to this Device or a peer VirtualChassis member
interface_ids = self.instance.vc_interfaces.values('pk')
# Collect interface IPs
interface_ips = IPAddress.objects.select_related('interface').filter(
family=family, interface_id__in=interface_ids
)
if interface_ips:
ip_list = [(ip.id, '{} ({})'.format(ip.address, ip.interface)) for ip in interface_ips]
ip_choices.append(('Interface IPs', ip_list))
# Collect NAT IPs
nat_ips = IPAddress.objects.select_related('nat_inside').filter(
family=family, nat_inside__interface__in=interface_ids
)
if nat_ips:
ip_list = [(ip.id, '{} ({})'.format(ip.address, ip.nat_inside.address)) for ip in nat_ips]
ip_choices.append(('NAT IPs', ip_list))
self.fields['primary_ip{}'.format(family)].choices = ip_choices
# If editing an existing device, exclude it from the list of occupied rack units. This ensures that a device
# can be flipped from one face to another.
self.fields['position'].widget.attrs['api-url'] += '&exclude={}'.format(self.instance.pk)
# Limit platform by manufacturer
self.fields['platform'].queryset = Platform.objects.filter(
Q(manufacturer__isnull=True) | Q(manufacturer=self.instance.device_type.manufacturer)
)
else:
# An object that doesn't exist yet can't have any IPs assigned to it
self.fields['primary_ip4'].choices = []
self.fields['primary_ip4'].widget.attrs['readonly'] = True
self.fields['primary_ip6'].choices = []
self.fields['primary_ip6'].widget.attrs['readonly'] = True
# Rack position
pk = self.instance.pk if self.instance.pk else None
try:
if self.is_bound and self.data.get('rack') and str(self.data.get('face')):
position_choices = Rack.objects.get(pk=self.data['rack']) \
.get_rack_units(face=self.data.get('face'), exclude=pk)
elif self.initial.get('rack') and str(self.initial.get('face')):
position_choices = Rack.objects.get(pk=self.initial['rack']) \
.get_rack_units(face=self.initial.get('face'), exclude=pk)
else:
position_choices = []
except Rack.DoesNotExist:
position_choices = []
self.fields['position'].choices = [('', '---------')] + [
(p['id'], {
'label': p['name'],
'disabled': bool(p['device'] and p['id'] != self.initial.get('position')),
}) for p in position_choices
]
# Disable rack assignment if this is a | |
not data.positive:
msg = "Can't use negative index %s" % value
if msg:
global error_occurred
error_occurred = True
print("ERROR - %s near line %i" % (msg, t.lineno(1)))
def p_type_def_1(t):
'''type_def : TYPEDEF declaration SEMI'''
# declarations is a type_info
d = t[2]
lineno = t.lineno(1)
sortno = t.lineno(3) + 0.5
if d.type == 'void':
global error_occurred
error_occurred = True
print("ERROR - can't use void in typedef at line %i" % lineno)
return
d.lineno = lineno
if id_unique(d.id, d.type, lineno):
if d.type == 'enum':
info = d.create_enum(lineno, sortno)
elif d.type == 'struct':
info = d.create_struct(lineno, sortno)
elif d.type == 'union':
info = d.create_union(lineno, sortno)
else:
info = d
name_dict[d.id] = info
def p_type_def_2(t):
'''type_def : ENUM ID enum_body SEMI'''
id = t[2]
body = t[3]
lineno = t.lineno(1)
sortno = t.lineno(4) + 0.5
if id_unique(id, 'enum', lineno):
name_dict[id] = enum_info(id, body, lineno, sortno)
def p_type_def_3(t):
'''type_def : STRUCT ID struct_body SEMI'''
id = t[2]
body = t[3]
lineno = t.lineno(1)
if id_unique(id, 'struct', lineno):
name_dict[id] = struct_info(id, body, lineno)
def p_type_def_4(t):
'''type_def : UNION ID union_body SEMI'''
id = t[2]
body = t[3]
lineno = t.lineno(1)
if id_unique(id, 'union', lineno):
name_dict[id] = union_info(id, body, lineno)
def p_declaration_1(t):
'''declaration : type_specifier ID'''
t[1].id = t[2]
t[0] = t[1]
def p_declaration_2(t):
'''declaration : type_specifier ID LBRACKET value RBRACKET
| type_specifier ID LT optional_value GT
| OPAQUE ID LBRACKET value RBRACKET
| OPAQUE ID LT optional_value GT
| STRING ID LT optional_value GT'''
if not isinstance(t[1], type_info):
t[1] = type_info(t[1], t.lineno(1))
t[1].id = t[2]
t[1].array = True
if t[3] == '[':
t[1].fixed = True
else:
t[1].fixed = False
t[1].len = t[4]
t[0] = t[1]
def p_declaration_3(t):
'''declaration : type_specifier STAR ID'''
# encode this as the equivalent 'type_specifier ID LT 1 GT'
if not isinstance(t[1], type_info):
t[1] = type_info(t[1], t.lineno(1))
t[1].id = t[3]
t[1].array = True
t[1].fixed = False
t[1].len = '1'
t[0] = t[1]
def p_declaration_4(t):
'''declaration : VOID'''
t[0] = type_info(t[1], t.lineno(1))
def p_type_specifier_1(t):
'''type_specifier : UNSIGNED INT
| UNSIGNED HYPER'''
t[0] = type_info('u' + t[2], t.lineno(1))
def p_type_specifier_2(t):
'''type_specifier : INT
| HYPER
| FLOAT
| DOUBLE
| QUADRUPLE
| BOOL
| ID
| UNSIGNED
| enum_type_spec
| struct_type_spec
| union_type_spec'''
# FRED - Note UNSIGNED is not in spec
if isinstance(t[1], type_info):
t[0] = t[1]
else:
t[0] = type_info(t[1], t.lineno(1))
def p_enum_type_spec(t):
'''enum_type_spec : ENUM enum_body'''
t[0] = type_info("enum", t.lineno(1), body=t[2])
def p_struct_type_spec(t):
'''struct_type_spec : STRUCT struct_body'''
t[0] = type_info("struct", t.lineno(1), body=t[2])
def p_union_type_spec(t):
'''union_type_spec : UNION union_body'''
t[0] = type_info("union", t.lineno(1), body=t[2])
def p_union_body(t):
'''union_body : SWITCH LPAREN declaration RPAREN LBRACE switch_body RBRACE'''
t[0] = [Case_Spec(['switch'], [t[3]])] + t[6]
def p_switch_body(t):
'''switch_body : case_spec_list default_declaration'''
# default_declaration is a list of type_info
t[0] = t[1] + [Case_Spec(['default'], t[2])]
def p_case_spec(t):
'''case_spec : case_statement_list declaration SEMI'''
# Note a declaration is a type_info
# case_* are both lists of strings (values)
t[0] = [Case_Spec(t[1], [t[2]])]
def p_nonempty_lists(t):
'''case_spec_list : case_spec case_spec_list
| case_spec
case_statement_list : case_statement case_statement_list
| case_statement'''
if len(t) == 2:
t[0] = t[1]
else:
t[0] = t[1] + t[2]
def p_case_statement(t):
'''case_statement : CASE value COLON'''
t[0] = [t[2]]
def p_default_declaration_1(t):
'''default_declaration : empty'''
t[0] = []
def p_default_declaration_(t):
'''default_declaration : DEFAULT COLON declaration SEMI'''
t[0] = [t[3]]
def p_struct_body(t):
'''struct_body : LBRACE declaration_list RBRACE'''
# Returns a list of type_info declarations
t[0] = t[2]
def p_declaration_list_1(t):
'''declaration_list : declaration SEMI'''
t[0] = [t[1]]
def p_declaration_list_2(t):
'''declaration_list : declaration SEMI declaration_list'''
t[0] = [t[1]] + t[3]
def p_enum_body(t):
'''enum_body : LBRACE enum_constant_list RBRACE'''
# Returns a list of const_info
t[0] = t[2]
def p_enum_constant(t):
'''enum_constant : ID EQUALS value'''
global name_dict, error_occurred
id = t[1]
value = t[3]
lineno = t.lineno(1)
if id_unique(id, 'enum', lineno):
info = name_dict[id] = const_info(id, value, lineno, enum=True)
if not (value[0].isdigit() or value[0] == '-'):
# We have a name instead of a constant, make sure it is defined
if value not in name_dict:
error_occurred = True
print("ERROR - can't derefence %s at line %s" % (value, lineno))
elif not isinstance(name_dict[value], const_info):
error_occurred = True
print("ERROR - reference to %s at line %s is not a constant" %\
(value, lineno))
else:
info.positive = name_dict[value].positive
t[0] = [info]
else:
t[0] = []
def p_enum_constant_list_1(t):
'''enum_constant_list : enum_constant'''
t[0] = t[1]
def p_enum_constant_list_2(t):
'''enum_constant_list : enum_constant COMMA enum_constant_list'''
t[0] = t[1] + t[3]
def p_empty(t):
'empty :'
def p_error(t):
global error_occurred
error_occurred = True
if t:
print("Syntax error at '%s' (lineno %d)" % (t.value, t.lineno))
else:
print("Syntax error: unexpectedly hit EOF")
#
# RPC specific routines follow
#
def p_program_def(t):
'''program_def : PROGRAM ID LBRACE version_def version_def_list RBRACE EQUALS constant SEMI'''
print("Ignoring program %s = %s" % (t[2], t[8]))
global name_dict
id = t[2]
value = t[8]
lineno = t.lineno(1)
if id_unique(id, 'program', lineno):
name_dict[id] = const_info(id, value, lineno)
def p_version_def(t):
'''version_def : VERSION ID LBRACE procedure_def procedure_def_list RBRACE EQUALS constant SEMI'''
global name_dict
id = t[2]
value = t[8]
lineno = t.lineno(1)
if id_unique(id, 'version', lineno):
name_dict[id] = const_info(id, value, lineno)
def p_version_def_list(t):
'''version_def_list : version_def version_def_list
| empty'''
def p_procedure_def(t):
'''procedure_def : proc_return ID LPAREN proc_firstarg type_specifier_list RPAREN EQUALS constant SEMI'''
global name_dict
id = t[2]
value = t[8]
lineno = t.lineno(1)
if id_unique(id, 'procedure', lineno):
name_dict[id] = const_info(id, value, lineno)
def p_procedure_def_list(t):
'''procedure_def_list : procedure_def procedure_def_list
| empty'''
def p_proc_return(t):
'''proc_return : type_specifier
| VOID'''
def p_proc_firstarg(t):
'''proc_firstarg : type_specifier
| VOID'''
def p_type_specifier_list(t):
'''type_specifier_list : COMMA type_specifier type_specifier_list
| empty'''
##########################################################################
# #
# Global Variables #
# #
##########################################################################
error_occurred = False # Parsing of infile status
INDENT = 4 # Number of spaces for each indent level
indent = ' ' * INDENT
indent2 = indent * 2
##########################################################################
# #
# Helper classes and functions #
# #
##########################################################################
def id_unique(id, name, lineno):
"""Returns True if id not already used. Otherwise, invokes error"""
if id in name_dict:
global error_occurred
error_occurred = True
print("ERROR - %s definition %s at line %s conflicts with %s" % \
(name, id, lineno, name_dict[id]))
return False
else:
return True
class Case_Spec(object):
def __init__(self, cases, declarations):
self.cases = cases
self.declarations = declarations
def __str__(self):
return "cases %s: %s" % (self.cases, self.declarations)
class Info(object):
def __init__(self):
self.lineno = None
self.sortno = None
self.type = None
self.array = False
self.parent = False
def __str__(self):
return "%s %s at line %s" % (self.type, self.id, self.lineno)
def __cmp__(self, other):
"""Sort on lineno, but send None to end"""
# FRED - not used
if self.lineno == other.lineno == None:
return 0
if self.lineno == None:
return 1
if other.lineno == None:
return -1
if self.lineno < other.lineno:
return -1
elif self.lineno == other.lineno:
return 0
else:
return 1
def __cmp__(self, other):
"""Sort on lineno, but send None to end"""
if self.sortno < other.sortno:
return -1
elif self.sortno == other.sortno:
return 0
else:
return 1
def const_output(self):
return None
def type_output(self):
return None
def pack_output(Self):
return None
def _get_filter(self):
if use_filters:
filter1 = "%sif hasattr(self, 'filter_%s'):\n" % (indent2, self.id)
filter2 = "%sdata = getattr(self, 'filter_%s')(data)\n" % (indent*3, self.id)
return filter1 + filter2
else:
return ''
def getTypeHintStr(self, inTypeClass:bool=False) -> str:
hint = None
# Types which have no "list"
if(self.type == "opaque"):
return "bytes"
if(self.type == "string"):
return "bytes"
# Types which may be arrayed:
if(self.type in ["uint","int"]):
hint = "int"
if(self.type == "bool"):
hint = "bool"
if(hint is not None):
if(self.array):
return "List[%s]"% (hint)
return hint
if(self.type in ["struct","union"]):
typeInfo = name_dict.get(self.id)
else:
typeInfo = name_dict.get(self.type)
if(typeInfo is not None and typeInfo is not self):
return typeInfo.getTypeHintStr(inTypeClass)
print(f"{typeInfo}: Unknown type {self.type} {self.id}")
if(inTypeClass):
return "'%s'" %(self.type)
else:
return "types.%s" % (self.type)
def _get_pack_header(self):
header = "%sdef pack_%s(self, data: %s) -> None:\n" % (indent, self.id, self.getTypeHintStr())
return header + self._get_filter()
def unpack_output(self):
return None
def _get_unpack_footer(self):
footer = "%sreturn data\n" % indent2
return self._get_filter() + footer
def brackets(self):
if self.array:
if self.fixed:
out = "[%s]"
else:
out = "<%s>"
if self.len is None:
out = out % ''
else:
out = out % self.len
else:
out = ''
return out
def fullname(self, value):
"""Put 'const.' in front if needed"""
if value[0].isdigit() or value[0]=='-':
return value
else:
return | |
<filename>AnDe/AnDe.py
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 26 11:36:48 2021
@author: ChandrimaBiswas
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import os
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import MinMaxScaler
from sklearn import preprocessing
import hdbscan
import tracemalloc
from sklearn.metrics import ConfusionMatrixDisplay
clear = lambda:os.system('clear')
# extract data from csv files
# seperate the Catagorical data and Numaric Data.
# Handel the missing value from data set.
def extractData():
DP = input("Please, Input the Location of CSV:")
while True:
DS = pd.read_csv(DP,low_memory=False)
missingId = []
missingValue = str(DS.isnull().values.any())
if missingValue == "True":
print("There has some missing value.")
DS = DS.replace('Infinity', np.nan)
totalMissing = DS.isnull().sum().sum()
percentMissing = (totalMissing / (DS.count().sum() + DS.isnull().sum().sum())) * 100
for rows in DS:
if DS[rows].isnull().sum() != 0:
missingId.append(rows)
percentMissingRow=(DS[rows].isnull().sum()/DS[rows].count().sum())* 100
if percentMissingRow >= 30:
DS = DS.drop(rows, 1)
print("The missing values are in the columns:",missingId)
print("Total number of missing Values:" , totalMissing)
print(percentMissing,"%")
num_cols = DS.columns.get_indexer(DS._get_numeric_data().columns)
total_cols=DS.columns.get_indexer(DS.columns)
while True:
withCat = input("Do you want to include Catagorical data [y/n]:")
if withCat == "y" or withCat == "n":
break
else:
print("Give your answer only in y or n .\n\n")
if withCat == "y":
Xdata = DS.iloc[:,total_cols[:-1]].values
transform = ColumnTransformer([("Servers", OneHotEncoder(categories = "auto"), [1,2,3])], remainder="passthrough")
Xdata = transform.fit_transform(Xdata)
Ydata = DS.iloc[:,total_cols[-1]].values
le = preprocessing.LabelEncoder()
le.fit(Ydata)
Ydata=le.transform(Ydata)
elif withCat == "n":
Xdata = DS.iloc[:,num_cols[:-1]].values
Ydata = DS.iloc[:,num_cols[-1]].values
while True:
decision = input("Scaling data with MinMaxScaler [y/n]:")
if decision == "y" or decision == "n":
break
else:
print("Give your answer only in y or n.\n\n")
if decision == "y":
Xdata = MinMaxScaler(feature_range=(0, 1)).fit_transform(Xdata)
return Xdata,Ydata
else:
return Xdata,Ydata
#Hierarchical Clustering
def hierarchicalClustering(data,Ydata):
from sklearn.cluster import AgglomerativeClustering
while True:
print("Agglomerative Clustering")
clusterNumber = input("How many clusters you want?:")
try:
clusterNumber = int(clusterNumber)
except ValueError:
print("Error\n\n")
if type(clusterNumber) == int:
n = 0
clusters = []
while n < clusterNumber:#Converting nClusters into an array of n clusters [n] for use it later
clusters.append(n)
n+=1
break
while True:
linkage = input("The linkage criterion determines which distance to use [‘ward’, ‘complete’, ‘average’, ‘single’]:")
if linkage == "ward" or linkage == "complete"or linkage == "average"or linkage == "single":
break
else:
print("Give your answer Correctly.\n\n")
print("\nClustering...\n")
outliers_fraction = 0.1
start_time = time.time()
Aggo = AgglomerativeClustering(n_clusters = clusterNumber,affinity='euclidean',linkage=linkage,compute_distances=True)
print("Data Successfully Clustered By Agglomerative Clustering")
AggoData = Aggo.fit(data)
Xdata_Aggo = Aggo.fit_predict(data)
Zdata = AggoData.labels_
runTime=(time.time() - start_time)
distance = Aggo.distances_
number_of_outliers = int(outliers_fraction*len(distance))
sorted_index_array = np.argsort(distance)
sorted_array = distance[sorted_index_array]
rslt = sorted_array[-number_of_outliers : ]
threshold = rslt.min()
anomaly = (distance >= threshold).astype(int)
HR = pd.crosstab(Ydata,Zdata)
maxVal = HR.idxmax()
return Zdata,clusters,Xdata_Aggo,clusterNumber,anomaly,runTime,maxVal
#K-Means Clustering
def kmeansClustering(data,labels):
from sklearn.cluster import KMeans
while True:
print("Kmeans Clustering")
clusterNumber = input("How many clusters you want?:")
try:
clusterNumber = int(clusterNumber)
except ValueError:
print("Error\n\n")
if type(clusterNumber) == int:
n = 0
clusters = []
while n < clusterNumber:
clusters.append(n)
n+=1
break
outliers_fraction = 0.01
start_time = time.time()
KMEANS = KMeans(n_clusters = clusterNumber, init = "k-means++",max_iter = 300,n_init = 10,random_state = 0)
print("Data Successfully Clustered with K-means")
kmeans = KMEANS.fit(data)
Xdata_Kmeans = KMEANS.fit_predict(data)
Zdata = kmeans.labels_
runTime=(time.time() - start_time)
inertia = KMEANS.inertia_
distance = getDistanceByPoint(data, KMEANS)
number_of_outliers = int(outliers_fraction*len(distance))
threshold = distance.nlargest(number_of_outliers).min()
anomaly = (distance >= threshold).astype(int)
ClusterCenter=kmeans.cluster_centers_
#Kmeans Results
kmeansR = pd.crosstab(labels,Zdata)
maxVal = kmeansR.idxmax()
return Zdata,clusters,Xdata_Kmeans,ClusterCenter,inertia,clusterNumber,anomaly,runTime,maxVal
# return Series of distance between each point and his distance with the closest centroid
def getDistanceByPoint(data, model):
distance = pd.Series()
for i in range(0,len(data)):
Xa = data[i]
Xb = model.cluster_centers_[model.labels_[i]-1]
distance.at[i] = np.linalg.norm(Xa-Xb)
return distance
#HDBSCAN Algorithm
def hdbscanClustering(Xdata,Ydata):
print("HDBSCAN Clustering")
#Computing DBSCAN
clusterNumber = input("Minimun size of clusters you want?:")
try:
clusterNumber = int(clusterNumber)
except ValueError:
print("Error\n\n")
start_time = time.time()
hdb = hdbscan.HDBSCAN(min_cluster_size=clusterNumber)
print("Data Successfully Clustered with HDBSCAN")
Zdata = hdb.fit_predict(Xdata)+1
runTime = (time.time() - start_time)
threshold = pd.Series(hdb.outlier_scores_).quantile(0.9)
outliers = np.where(hdb.outlier_scores_ > threshold)[0]
n_clusters = len(set(Zdata))
n = 0
clusters = []
while n < n_clusters:
clusters.append(n)
n += 1
HDBR = pd.crosstab(Ydata,Zdata)
maxVal = HDBR.idxmax()
return Zdata,clusters,outliers,runTime,maxVal
#DBSCAN Algorithm
def dbscanClustering(Xdata,Ydata):
from sklearn.cluster import DBSCAN
while True:
print("DBSCAN Clustering")
epsilon = input("epsilon in Decimal:")
try:
epsilon = float(epsilon)
except ValueError:
print("Decimal numbers only")
if type(epsilon) == float:
break
while True:
minSamples = input("Min Samples In Integer:")
try:
minSamples = int(minSamples)
except ValueError:
print("Integer Numbers only")
if type(minSamples) == int:
break
#Computing DBSCAN
start_time = time.time()
db = DBSCAN(eps= epsilon, min_samples = minSamples,algorithm ='auto').fit(Xdata)
print("Data Successfully Clustered by DBSCAN")
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
Zdata = db.labels_
runTime=(time.time() - start_time)
nClusters = len(set(Zdata))
noise_ = list(Zdata).count(-1)
clusters = []
if noise_ > 0:
i = -1
while i + 1 < nClusters:
clusters.append(i)
i += 1
else:
i=0
while i < nClusters:
clusters.append(i)
i += 1
#DBSCAN Results
dbscanR = pd.crosstab(Ydata,Zdata)
maxVal = dbscanR.idxmax()
return Zdata,clusters,noise_,runTime,maxVal
def isolationForest(Xdata,Ydata):#Isolation Forest algorithm
from sklearn.ensemble import IsolationForest
print("Isolation Forest Clustering")
while True:
contamination = input("Contamination value between [0,0.5]: ")
try:
contamination = float(contamination)
except ValueError:
print("Enter a Number between [0,0.5]")
if type(contamination) == float and (contamination >= 0 and contamination <= 0.5):
break
start_time = time.time()
Zdata = IsolationForest(max_samples = "auto",contamination = contamination).fit_predict(Xdata)
print("Data Successfully Clustered by Isolation Forest")
runTime=(time.time() - start_time)
Zdata = np.array(Zdata,dtype = object)
n = -1
clusters = []
IFR = pd.crosstab(Ydata,Zdata)
maxVal = IFR.idxmax()
while n < len(IFR.columns):
clusters.append(n)
n += 2
return Zdata,clusters,runTime,maxVal
def LFO(Xdata,Ydata):#Local Outlier Factor algorithm
from sklearn.neighbors import LocalOutlierFactor
print("Local Outlier Factor Clustering")
while True:
contamination = input("Contamination value between [0,0.5]: ")
try:
contamination = float(contamination)
except ValueError:
print("Enter a Number")
if type(contamination) == float and (contamination > 0 and contamination <= 0.5):
break
start_time = time.time()
lof = LocalOutlierFactor(contamination = contamination,algorithm = 'auto').fit_predict(Xdata)
print("Data Successfully Clustered by Local Outlier Factor")
runTime=(time.time() - start_time)
n = -1
clusters = []
LOFR = pd.crosstab(Ydata,lof)
maxVal = LOFR.idxmax()
while n < len(LOFR.columns):
clusters.append(n)
n += 2
return lof,clusters,runTime,maxVal
def hierarchicalVisualization(Xdata,Xdata_aggo,nClusters,anomaly):
# Visualising the clusters
length=len(Xdata)
color=['yellow','blue', 'green','cyan','magenta','violet', 'Antique ruby','Aqua','Blush' ]
for i in range (0,nClusters):
plt.scatter(Xdata[Xdata_aggo == i, 0], Xdata[Xdata_aggo == i,1],s = length, c=color[i], label = 'Cluster'+ str(i+1))
plt.scatter(Xdata[:-1][anomaly == 1,0], Xdata[:-1][anomaly == 1,1], s = length, c = 'red', label = 'Outliers')
plt.title('Agglomerative Clustering')
plt.show()
bars = ('anomaly','normal')
Xdata_pos = range(len(bars))
# Create bars
barlist=plt.bar(Xdata_pos, [len(anomaly[anomaly==1]),len(anomaly[anomaly==0])])
plt.xticks(Xdata_pos, bars)
barlist[0].set_color('r')
plt.title('Agglomerative Bar Chart')
plt.show()
def hdbscanVisualization(Xdata,outliers):
# Visualising the clusters
plt.scatter(Xdata.T[0],Xdata.T[1], s=100, linewidth=0, c='gray', alpha=0.25)
plt.scatter(Xdata[outliers].T[0], Xdata[outliers].T[1],s=100, linewidth=0, c='red', alpha=0.5)
plt.title('HDBSCAN Clustering')
plt.show()
bars = ('anomaly','normal')
Xdata_pos = range(len(bars))
# Create bars
barlist=plt.bar(Xdata_pos, [len(Xdata[outliers].T[0]),len(Xdata.T[0])])
plt.xticks(Xdata_pos, bars)
plt.title('HDBSCAN Bar Chart')
barlist[0].set_color('r')
plt.show()
def kmeansVisualization(Xdata,Xdata_Kmeans,ClusterCenter,nClusters,anomaly):
# Visualising the clusters
color=['yellow','blue', 'green','cyan','magenta','violet', 'ruby','Aqua','Blush' ]
for i in range (0,nClusters):
plt.scatter(Xdata[Xdata_Kmeans == i, 0], Xdata[Xdata_Kmeans == i,1],s = 100, c=color[i], label = 'Cluster'+ str(i+1))
plt.scatter(ClusterCenter[:,0], ClusterCenter[:,1], s = 300, c = 'Black', label = 'Centroids')
plt.scatter(Xdata[anomaly == 1,0], Xdata[anomaly == 1,1], s = 100, c = 'red', label = 'Outliers')
plt.title('K-means Clustering')
plt.show()
bars = ('anomaly','normal')
Xdata_pos = range(len(bars))
# Create bars
barlist=plt.bar(Xdata_pos, [len(anomaly[anomaly==1]),len(anomaly[anomaly==0])])
plt.xticks(Xdata_pos, bars)
plt.title('K-means Bar Chart')
barlist[0].set_color('r')
plt.show()
def dbscanVisualization(Xdata,dblabels,dbClusters):
# Visualising the clusters
color=['yellow','blue', 'green','cyan','magenta','violet', 'orange']
a=0
if dbClusters[a] == -1:
plt.scatter(Xdata[dblabels == dbClusters[0], 0], Xdata[dblabels == dbClusters[0],1],s = 100, c='red', label = 'Cluster'+ str(1))
a=+1
for i in range (a,len(dbClusters)):
plt.scatter(Xdata[dblabels == dbClusters[i], 0], Xdata[dblabels == dbClusters[i],1],s = 100, c=color[i % len(color)], label = 'Cluster'+ str(i+1))
plt.title('DBSCAN Clustering')
plt.show()
bars = ('anomaly','normal')
Xdata_pos = range(len(bars))
# Create bars
barlist=plt.bar(Xdata_pos, [len(Xdata[dblabels==-1]),len(Xdata[dblabels!=-1])])
plt.xticks(Xdata_pos, bars)
plt.title('DBSCAN Bar Chart')
barlist[0].set_color('r')
plt.show()
def isolationForestVisualization(Xdata,ifLabels,ifNclusters):
# Visualising the | |
<reponame>Vladimir-Ivanov-Git/raw_packet
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# region Description
"""
dhcp_fuzz.py: DHCPv4 fuzzing script
Author: <NAME>
License: MIT
Copyright 2020, Raw-packet Project
"""
# endregion
# region Import
from sys import path
from os.path import dirname, abspath
from argparse import ArgumentParser
from socket import socket, AF_PACKET, SOCK_RAW
from struct import pack
from socket import inet_aton
from time import sleep
from typing import Union, List, Dict
from re import sub
from paramiko import RSAKey
from pathlib import Path
from os.path import isfile
from os import remove
from scapy.all import rdpcap, BOOTP
# endregion
# region Authorship information
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
# endregion
tested_index: int = 0
transactions: List[int] = list()
send_transactions: Dict[int, List[int]] = {}
# region Get address of IPv4 gateway over ssh
def get_ipv4_gateway_over_ssh(ssh_user: str = 'root',
ssh_password: Union[None, str] = None,
ssh_pkey: Union[None, RSAKey] = None,
ssh_host: str = '192.168.0.1',
os: str = 'MacOS',
network_interface: str = 'en0') -> Union[None, str]:
"""
Get IPv4 gateway address over SSH
:param ssh_user: SSH Username
:param ssh_password: <PASSWORD>
:param ssh_pkey: SSH Private key
:param ssh_host: SSH Host
:param os: MacOS, Linux or Windows (Installation of OpenSSH For Windows: https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse)
:param network_interface: Network interface
:return: IPv4 gateway address or None if error
"""
gateway_ipv4_address: Union[None, str] = None
try:
if os == 'MacOS':
route_table_command: str = 'netstat -nr | grep default | grep ' + network_interface + \
' | awk \'{print $2}\''
elif os == 'Linux':
route_table_command: str = 'route -n | grep UG | grep ' + network_interface + \
' | awk \'{print $2}\''
else:
route_table_command: str = 'ipconfig | findstr /i "Gateway"'
route_table_result: str = base.exec_command_over_ssh(command=route_table_command,
ssh_user=ssh_user,
ssh_password=<PASSWORD>,
ssh_pkey=ssh_pkey,
ssh_host=ssh_host)
route_table_result: List[str] = route_table_result.splitlines()
route_table_result: str = route_table_result[0]
if os == 'Windows':
route_table_result: str = route_table_result.replace(' .', '').replace(' :', '')
route_table_result: str = sub(r' +', ' ', route_table_result)
route_table_result: List[str] = route_table_result.split()
route_table_result: str = route_table_result[2]
assert base.ip_address_validation(route_table_result), \
'Bad IPv4 address: ' + base.error_text(route_table_result)
assert base.ip_address_in_range(route_table_result, first_ip_address, last_ip_address), \
'Router IPv4 address: ' + base.error_text(route_table_result) + \
' not in range: ' + base.info_text(first_ip_address + ' - ' + last_ip_address)
return route_table_result
except AssertionError as Error:
base.print_error(Error.args[0])
return gateway_ipv4_address
except IndexError:
return gateway_ipv4_address
# endregion
# region Start DHCPv4 client over ssh
def dhclient_over_ssh(ssh_user: str = 'root',
ssh_password: Union[None, str] = None,
ssh_pkey: Union[None, RSAKey] = None,
ssh_host: str = '192.168.0.1',
os: str = 'MacOS',
network_interface: str = 'en0') -> bool:
"""
Start DHCPv4 client over ssh
:param ssh_user: SSH Username
:param ssh_password: SSH Password
:param ssh_pkey: SSH Private key
:param ssh_host: SSH Host
:param os: MacOS, Linux or Windows (Installation of OpenSSH For Windows: https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse)
:param network_interface: Network interface
:return: True if success or False if error
"""
if os == 'MacOS':
dhclient_command: str = 'ipconfig set ' + network_interface + ' DHCP'
elif os == 'Linux':
dhclient_command: str = 'rm -f /var/lib/dhcp/dhclient.leases; dhclient ' + network_interface
else:
dhclient_command: str = 'ipconfig /release && ipconfig /renew'
return base.exec_command_over_ssh(command=dhclient_command,
ssh_user=ssh_user,
ssh_password=<PASSWORD>,
ssh_pkey=ssh_pkey,
ssh_host=ssh_host,
need_output=False)
# endregion
# region Start tshark over ssh
def start_tshark_over_ssh(ssh_user: str = 'root',
ssh_password: Union[None, str] = None,
ssh_pkey: Union[None, RSAKey] = None,
ssh_host: str = '192.168.0.1',
os: str = 'MacOS',
network_interface: str = 'en0') -> bool:
"""
Start tshark over ssh
:param ssh_user: SSH Username
:param ssh_password: <PASSWORD>
:param ssh_pkey: SSH Private key
:param ssh_host: SSH Host
:param os: MacOS, Linux or Windows (Installation of OpenSSH For Windows: https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse)
:param network_interface: Network interface
:return: True if success or False if error
"""
if os == 'Linux' or os == 'MacOS':
start_tshark_command: str = 'rm -f /tmp/dhcp.pcap; tshark -i ' + network_interface + \
' -w /tmp/dhcp.pcap -f "ether src ' + your_mac_address + '"'
else:
start_tshark_command: str = 'cd C:\Windows\Temp && del /f dhcp.pcap && tshark -i ' + network_interface + \
' -w dhcp.pcap -f "ether src ' + your_mac_address + '"'
return base.exec_command_over_ssh(command=start_tshark_command,
ssh_user=ssh_user,
ssh_password=<PASSWORD>,
ssh_pkey=ssh_pkey,
ssh_host=ssh_host,
need_output=False)
# endregion
# region Stop tshark over ssh
def stop_tshark_over_ssh(ssh_user: str = 'root',
ssh_password: Union[None, str] = None,
ssh_pkey: Union[None, RSAKey] = None,
ssh_host: str = '192.168.0.1',
os: str = 'MacOS') -> bool:
"""
Stop tshark over ssh
:param ssh_user: SSH Username
:param ssh_password: <PASSWORD> Password
:param ssh_pkey: SSH Private key
:param ssh_host: SSH Host
:param os: MacOS, Linux or Windows (Installation of OpenSSH For Windows: https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse)
:return: True if success or False if error
"""
if os == 'Linux' or os == 'MacOS':
stop_tshark_command: str = 'pkill tshark'
else:
stop_tshark_command: str = 'taskkill /IM "tshark.exe" /F'
return base.exec_command_over_ssh(command=stop_tshark_command,
ssh_user=ssh_user,
ssh_password=<PASSWORD>,
ssh_pkey=ssh_pkey,
ssh_host=ssh_host,
need_output=False)
# endregion
# region Make DHCPv4 reply packet
def make_reply(bootp_transaction_id: int = 1,
dhcpv4_message_type: int = 2) -> bytes:
if tested_index == len(tested_parameters):
base.info_text('Exit ...')
exit(0)
bootp_packet: bytes = pack('!B', tested_parameters[tested_index]['BOOTP']['message_type']) # Message type
bootp_packet += pack('!B', tested_parameters[tested_index]['BOOTP']['hardware_type']) # Hardware type: 1 - Ethernet
bootp_packet += pack('!B', tested_parameters[tested_index]['BOOTP']['hardware_length']) # Hardware address length: 6 - Ethernet header length
bootp_packet += pack('!B', tested_parameters[tested_index]['BOOTP']['hops']) # Number of hops
bootp_packet += pack('!L', bootp_transaction_id) # Transaction ID
bootp_packet += pack('!H', 0) # Seconds elapsed
bootp_packet += pack('!H', tested_parameters[tested_index]['BOOTP']['flags']) # Flags
bootp_packet += pack('!4s', inet_aton(tested_parameters[tested_index]['BOOTP']['client_ip'])) # CIADDR - Client IP address
bootp_packet += pack('!4s', inet_aton(tested_parameters[tested_index]['BOOTP']['your_ip'])) # YIADDR - Your client IP address
bootp_packet += pack('!4s', inet_aton(tested_parameters[tested_index]['BOOTP']['next_server_ip'])) # SIADDR - Next server IP address
bootp_packet += pack('!4s', inet_aton(tested_parameters[tested_index]['BOOTP']['relay_agent_ip'])) # GIADDR - Relay agent IP address
bootp_packet += eth.convert_mac(mac_address=tested_parameters[tested_index]['BOOTP']['client_mac']) # CHADDR - Client hardware address
bootp_packet += b''.join(pack('B', 0) for _ in range(10)) # Client hardware address padding
bootp_packet += b''.join(pack('B', 0) for _ in range(64)) # Server host name
bootp_packet += b''.join(pack('B', 0) for _ in range(128)) # Boot file name
bootp_packet += dhcpv4.dhcp_magic_cookie # DHCPv4 magic cookie
dhcpv4_packet: bytes = pack('!3B', 53, 1, dhcpv4_message_type) # 53 DHCPv4 message type
dhcpv4_packet += pack('!' '2B' '4s', 54, 4, inet_aton(tested_parameters[tested_index]['DHCP']['server_identifier'])) # 54 DHCPv4 server identifier
dhcpv4_packet += pack('!' '2B' 'L', 51, 4, tested_parameters[tested_index]['DHCP']['lease_time']) # 51 - DHCPv4 IP address lease time option
dhcpv4_packet += pack('!' '2B' '4s', 1, 4, inet_aton(tested_parameters[tested_index]['DHCP']['subnet_mask'])) # 1 - DHCPv4 Subnet mask option
dhcpv4_packet += pack('!' '2B' '4s', 3, 4, inet_aton(tested_parameters[tested_index]['DHCP']['router'])) # 3 - DHCPv4 Router option (Router IPv4 address)
dhcpv4_packet += pack('!' '2B' '4s', 6, 4, inet_aton(tested_parameters[tested_index]['DHCP']['dns_server'])) # 6 - DHCPv4 DNS option (Domain name server IPv4 address)
dhcpv4_packet += pack('!' '2B', 15, len(tested_parameters[tested_index]['DHCP']['domain'])) + \
tested_parameters[tested_index]['DHCP']['domain'] # 15 - DHCPv4 Domain name option
dhcpv4_packet += pack('B', 255) # 255 - End of DHCPv4 options
eth_header: bytes = eth.make_header(source_mac=tested_parameters[tested_index]['Ethernet']['source_mac_address'],
destination_mac=tested_parameters[tested_index]['Ethernet']['destination_mac_address'],
network_type=tested_parameters[tested_index]['Ethernet']['network_type'])
ip_header: bytes = ipv4.make_header(source_ip=tested_parameters[tested_index]['Network']['source_ip_address'],
destination_ip=tested_parameters[tested_index]['Network']['destination_ip_address'],
data_len=len(bootp_packet + dhcpv4_packet),
transport_protocol_len=udp.header_length,
transport_protocol_type=udp.header_type)
udp_header: bytes = udp.make_header(source_port=tested_parameters[tested_index]['Transport']['source_port'],
destination_port=tested_parameters[tested_index]['Transport']['destination_port'],
data_length=len(bootp_packet + dhcpv4_packet))
return eth_header + ip_header + udp_header + bootp_packet + dhcpv4_packet
# endregion
# region DHCPv4 reply
def reply(packet: Dict):
global tested_index
if 'DHCPv4' in packet.keys():
# DHCPv4 Discover
if packet['DHCPv4'][53] == 1:
base.print_info('Index of tested parameters: ', str(tested_index))
if packet['BOOTP']['transaction-id'] not in transactions:
transactions.append(packet['BOOTP']['transaction-id'])
else:
tested_index += 1
reply_packet = make_reply(bootp_transaction_id=packet['BOOTP']['transaction-id'], dhcpv4_message_type=2)
raw_socket.send(reply_packet)
base.print_info('DHCPv4 Discover from: ', packet['Ethernet']['source'])
# DHCPv4 Request
if packet['DHCPv4'][53] == 3:
reply_packet = make_reply(bootp_transaction_id=packet['BOOTP']['transaction-id'], dhcpv4_message_type=5)
raw_socket.send(reply_packet)
base.print_info('DHCPv4 Request from: ', packet['Ethernet']['source'])
sleep(2)
current_gateway_ipv4_address = get_ipv4_gateway_over_ssh(ssh_user=args.target_ssh_user,
ssh_password=<PASSWORD>,
ssh_pkey=private_key,
ssh_host=args.target_ip,
os=args.target_os,
network_interface=args.target_interface)
if current_gateway_ipv4_address is not None:
base.print_success('Index: ', str(tested_index),
' Gateway: ', current_gateway_ipv4_address,
' Parameters: ', str(tested_parameters[tested_index]))
else:
base.print_error('Index: ', str(tested_index),
' Gateway: ', 'None',
' Parameters: ', str(tested_parameters[tested_index]))
tested_index += 1
dhclient_over_ssh(ssh_user=args.target_ssh_user,
ssh_password=args.target_ssh_pass,
ssh_pkey=private_key,
ssh_host=args.target_ip,
os=args.target_os,
network_interface=args.target_interface)
# endregion
# region Main function
if __name__ == '__main__':
# region Import Raw-packet classes
path.append(dirname(dirname(dirname(abspath(__file__)))))
from raw_packet.Utils.base import Base
from raw_packet.Utils.network import RawEthernet, RawIPv4, RawUDP, RawDHCPv4, RawSniff
from raw_packet.Utils.tm import ThreadManager
base: Base = Base()
eth: RawEthernet = RawEthernet()
ipv4: RawIPv4 = RawIPv4()
udp: RawUDP = RawUDP()
sniff: RawSniff = RawSniff()
dhcpv4: RawDHCPv4 = RawDHCPv4()
thread_manager: ThreadManager = ThreadManager(2)
# endregion
# region Raw socket
raw_socket: socket = socket(AF_PACKET, SOCK_RAW)
# endregion
# region Check user and platform
base.check_user()
base.check_platform()
# endregion
# region Parse script arguments
parser: ArgumentParser = ArgumentParser(description='DHCPv4 fuzzing script')
parser.add_argument('-i', '--interface', help='Set interface name for send ARP packets', default=None)
parser.add_argument('-m', '--target_mac', help='Set target MAC address', required=True)
parser.add_argument('-t', '--target_ip', help='Set target IPv4 address', required=True)
parser.add_argument('-o', '--target_os', help='Set target OS (MacOS, Linux, | |
* cenrace', 'detailed'),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total', 'hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
cty_tr_bg_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('cenrace', 'hispanic', 'votingage', 'hhinstlevels', 'hhgq', 'votingage * hispanic',
'hhgq', 'hispanic * cenrace', 'votingage * cenrace', 'votingage * hispanic',
'votingage * hispanic * cenrace',
'detailed'),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('cenrace', 'hispanic', 'votingage', 'hhinstlevels', 'hhgq', 'votingage * hispanic',
'hhgq', 'hispanic * cenrace', 'votingage * cenrace', 'votingage * hispanic',
'votingage * hispanic * cenrace',
'detailed'),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
default_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('total', 'cenrace', 'hispanic', 'votingage', 'hhinstlevels', 'hhgq', 'votingage * hispanic',
'hhgq', 'hispanic * cenrace', 'votingage * cenrace', 'votingage * hispanic',
'votingage * hispanic * cenrace', 'detailed'),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('total', 'cenrace', 'hispanic', 'votingage', 'hhinstlevels', 'hhgq', 'votingage * hispanic',
'hhgq', 'hispanic * cenrace', 'votingage * cenrace', 'votingage * hispanic',
'votingage * hispanic * cenrace', 'detailed'),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total', 'hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
query_ordering = {}
for geolevel in levels:
if geolevel in ("State"):
query_ordering[geolevel] = st_ordering
elif geolevel in ("County", "Tract", "Block_Group"):
query_ordering[geolevel] = cty_tr_bg_ordering
else:
query_ordering[geolevel] = default_ordering
return query_ordering
class Strategy2a_St_Cty_isoTot(PL94Strategy, USLevelStrategy):
@staticmethod
def make(levels):
geos_qs_props_dict = defaultdict(lambda: defaultdict(dict))
geos_qs_props_dict.update({
CC.GEODICT_GEOLEVELS: levels,
})
for level in geos_qs_props_dict[CC.GEODICT_GEOLEVELS]:
if level == "US": # No 'total' query
geos_qs_props_dict[CC.DPQUERIES][level] = ("hispanic * cenrace11cats", "votingage", "hhinstlevels", "hhgq",
"hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = tuple([Fr(1, 6)] * 6)
elif level == "State": # In Redistricting, separately tuned rho on 'total'
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(509,512),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(1,1024))
elif level == "County": # In Redistricting, separately tuned rho on 'total'
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(1,7),Fr(1,7),Fr(1,7),Fr(1,7),
Fr(1,7),Fr(1,7),Fr(1,7))
else: # Only per-geolevel rho actively tuned in other geolevels; query allocations left at default
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(1,7),Fr(1,7),Fr(1,7),Fr(1,7),
Fr(1,7),Fr(1,7),Fr(1,7))
return geos_qs_props_dict
class Strategy2a_St_Cty_isoTot_Ordering:
@staticmethod
def make(levels):
# levels = USGeolevelsNoTractGroup.getLevels()
us_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq'),
1: ('hhgq',),
2: ('hispanic * cenrace11cats * votingage',),
3: ('detailed',),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels'),
1: ('hhgq',),
2: ('hispanic * cenrace11cats * votingage',),
3: ('detailed',),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total', 'hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
st_cty_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq'),
2: ('hhgq',),
3: ('hispanic * cenrace11cats * votingage',),
4: ('detailed',),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hispanic * cenrace11cats', 'votingage', 'hhinstlevels'),
2: ('hhgq',),
3: ('hispanic * cenrace11cats * votingage',),
4: ('detailed',),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total',),
1: ('hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
subCounty_ordering = {
CC.L2_QUERY_ORDERING: {
0: {
0: ('total', 'hispanic * cenrace11cats', 'votingage', 'hhinstlevels', 'hhgq'),
1: ('hhgq',),
2: ('hispanic * cenrace11cats * votingage',),
3: ('detailed',),
},
},
CC.L2_CONSTRAIN_TO_QUERY_ORDERING: {
0: {
0: ('total', 'hispanic * cenrace11cats', 'votingage', 'hhinstlevels'),
1: ('hhgq',),
2: ('hispanic * cenrace11cats * votingage',),
3: ('detailed',),
},
},
CC.ROUNDER_QUERY_ORDERING: {
0: {
0: ('total', 'hhgq', 'hhgq * hispanic', 'hhgq * hispanic * cenrace', 'hhgq * votingage * hispanic * cenrace',
'detailed'),
},
},
}
query_ordering = {}
for geolevel in levels:
if geolevel == "US":
query_ordering[geolevel] = us_ordering
elif geolevel in ("State", "County"):
query_ordering[geolevel] = st_cty_ordering
else:
query_ordering[geolevel] = subCounty_ordering
return query_ordering
class Strategy2b_St_Cty_isoTot(PL94Strategy, USLevelStrategy):
@staticmethod
def make(levels):
geos_qs_props_dict = defaultdict(lambda: defaultdict(dict))
geos_qs_props_dict.update({
CC.GEODICT_GEOLEVELS: levels,
})
for level in geos_qs_props_dict[CC.GEODICT_GEOLEVELS]:
if level == "US": # No 'total' query
geos_qs_props_dict[CC.DPQUERIES][level] = ("hispanic * cenrace11cats", "votingage", "hhinstlevels", "hhgq",
"hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(1019,1024))
elif level == "State": # In Redistricting, separately tuned rho on 'total'
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(905,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(57,512))
elif level == "County": # In Redistricting, separately tuned rho on 'total'
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(1,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(1018,1024))
else: # Only per-geolevel rho actively tuned in other geolevels; query allocations left at default
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(1,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(1018,1024))
return geos_qs_props_dict
class Strategy2b_St_Cty_B_optSpine_ppmfCandidate(PL94Strategy, USLevelStrategy):
@staticmethod
def make(levels):
geos_qs_props_dict = defaultdict(lambda: defaultdict(dict))
geos_qs_props_dict.update({
CC.GEODICT_GEOLEVELS: levels,
})
for level in geos_qs_props_dict[CC.GEODICT_GEOLEVELS]:
if level == "US": # No 'total' query
geos_qs_props_dict[CC.DPQUERIES][level] = ("hispanic * cenrace11cats", "votingage", "hhinstlevels", "hhgq",
"hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(1019,1024))
elif level == "State": # In Redistricting, separately tuned rho on 'total'
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(815,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(204,1024))
elif level == "County": # In Redistricting, separately tuned rho on 'total'
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(342,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(677,1024))
elif level == "Block":
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(190,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(829,1024))
else: # Only per-geolevel rho actively tuned in other geolevels; query allocations left at default
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(1,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(1018,1024))
return geos_qs_props_dict
class Strategy2b_St_Cty_B_aianSpine_ppmfCandidate(PL94Strategy, USLevelStrategy):
@staticmethod
def make(levels):
geos_qs_props_dict = defaultdict(lambda: defaultdict(dict))
geos_qs_props_dict.update({
CC.GEODICT_GEOLEVELS: levels,
})
for level in geos_qs_props_dict[CC.GEODICT_GEOLEVELS]:
if level == "US": # No 'total' query
geos_qs_props_dict[CC.DPQUERIES][level] = ("hispanic * cenrace11cats", "votingage", "hhinstlevels", "hhgq",
"hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(1019,1024))
elif level == "State": # In Redistricting, separately tuned rho on 'total'
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(815,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(204,1024))
elif level == "County": # In Redistricting, separately tuned rho on 'total'
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(342,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(677,1024))
elif level == "Block":
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(208,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(811,1024))
else: # Only per-geolevel rho actively tuned in other geolevels; query allocations left at default
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(1,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(1018,1024))
return geos_qs_props_dict
class Strategy2b_St_Cty_BG_optSpine_ppmfCandidate(PL94Strategy, USLevelStrategy):
@staticmethod
def make(levels):
geos_qs_props_dict = defaultdict(lambda: defaultdict(dict))
geos_qs_props_dict.update({
CC.GEODICT_GEOLEVELS: levels,
})
for level in geos_qs_props_dict[CC.GEODICT_GEOLEVELS]:
if level == "US": # No 'total' query
geos_qs_props_dict[CC.DPQUERIES][level] = ("hispanic * cenrace11cats", "votingage", "hhinstlevels", "hhgq",
"hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(1019,1024))
elif level == "State": # In Redistricting, separately tuned rho on 'total'
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(815,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(204,1024))
elif level == "County": # In Redistricting, separately tuned rho on 'total'
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(342,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(677,1024))
elif level == "Block_Group":
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * cenrace11cats * votingage", "detailed")
geos_qs_props_dict[CC.QUERIESPROP][level] = (Fr(530,1024),Fr(1,1024),Fr(1,1024),Fr(1,1024),
Fr(1,1024),Fr(1,1024),Fr(489,1024))
else: # Only per-geolevel rho actively tuned in other geolevels; query allocations left at default
geos_qs_props_dict[CC.DPQUERIES][level] = ("total", "hispanic * cenrace11cats", "votingage", "hhinstlevels",
"hhgq", "hispanic * | |
tree
header_all_in_i, unheader_all_in_i = check_header(item)
# Logic for applying and removing headers
if any([header_all_in_p, header_all_in_s, header_all_in_i]):
if item['content'][0] != '*':
item.update(content='* ' + item['content'])
for ci in child_items:
if not ci['content'].startswith('*'):
ci.update(content='* ' + ci['content'])
if any([unheader_all_in_p, unheader_all_in_s]):
if item['content'][0] == '*':
item.update(content=item['content'][2:])
if unheader_all_in_i:
[ci.update(content=ci['content'][2:])
for ci in child_items]
# Logic for recurring lists
if not args.regeneration:
try:
# If old label is present, reset it
if item['r_tag'] == 1:
item['r_tag'] = 0
api.items.update(item['id'])
except:
pass
# If options turned on, start recurring lists logic
if args.regeneration is not None or args.end:
run_recurring_lists_logic(
args, api, item, child_items, child_items_all, regen_labels_id)
# If options turned on, start labelling logic
if label_id is not None:
# Skip processing an item if it has already been checked or is a header
if item['checked'] == 1:
continue
if item['content'].startswith('*'):
# Remove next action label if it's still present
remove_label(item, label_id, overview_item_ids,overview_item_labels)
continue
# Check item type
item_type, item_type_changed = get_item_type(
args, item, project_type)
if item_type is not None:
logging.debug('Identified \'%s\' as %s type',
item['content'], item_type)
# Determine hierarchy types for logic
hierarchy_types = [item_type,
section_type, project_type]
active_types = [type(x) != type(None)
for x in hierarchy_types]
# If it is a parentless task
if item['parent_id'] == 0:
if active_types[0]:
# Do item types
active_type = item_type
add_label(
item, label_id, overview_item_ids, overview_item_labels)
elif active_types[1]:
# Do section types
active_type = section_type
if section_type == 'sequential' or section_type == 's-p':
if not first_found_section:
add_label(
item, label_id, overview_item_ids, overview_item_labels)
first_found_section = True
elif section_type == 'parallel' or section_type == 'p-s':
add_label(
item, label_id, overview_item_ids, overview_item_labels)
elif active_types[2]:
# Do project types
active_type = project_type
if project_type == 'sequential' or project_type == 's-p':
if not first_found_project:
add_label(
item, label_id, overview_item_ids, overview_item_labels)
first_found_project = True
elif project_type == 'parallel' or project_type == 'p-s':
add_label(
item, label_id, overview_item_ids, overview_item_labels)
# Mark other conditions too
if first_found_section == False and active_types[1]:
first_found_section = True
if first_found_project is False and active_types[2]:
first_found_project = True
# If there are children
if len(child_items) > 0:
# Check if item state has changed, if so clean children for good measure
if item_type_changed == 1:
[remove_label(child_item, label_id, overview_item_ids, overview_item_labels)
for child_item in child_items]
# If a sub-task, inherit parent task type
if item['parent_id'] !=0:
try:
active_type = item['parent_type']
except:
pass
# Process sequential tagged items (item_type can overrule project_type)
if active_type == 'sequential' or active_type == 'p-s':
for child_item in child_items:
# Ignore headered children
if child_item['content'].startswith('*'):
continue
# Pass item_type down to the children
child_item['parent_type'] = active_type
# Pass label down to the first child
if child_item['checked'] == 0 and label_id in item['labels']:
add_label(
child_item, label_id, overview_item_ids, overview_item_labels)
remove_label(
item, label_id, overview_item_ids, overview_item_labels)
else:
# Clean for good measure
remove_label(
child_item, label_id, overview_item_ids, overview_item_labels)
# Process parallel tagged items or untagged parents
elif active_type == 'parallel' or (active_type == 's-p' and label_id in item['labels']):
remove_label(
item, label_id, overview_item_ids, overview_item_labels)
for child_item in child_items:
# Ignore headered children
if child_item['content'].startswith('*'):
continue
child_item['parent_type'] = active_type
if child_item['checked'] == 0:
# child_first_found = True
add_label(
child_item, label_id, overview_item_ids, overview_item_labels)
# Remove labels based on start / due dates
# If item is too far in the future, remove the next_action tag and skip
try:
if args.hide_future > 0 and 'due' in item.data and item['due'] is not None:
due_date = datetime.strptime(
item['due']['date'], "%Y-%m-%d")
future_diff = (
due_date - datetime.today()).days
if future_diff >= args.hide_future:
remove_label(
item, label_id, overview_item_ids, overview_item_labels)
continue
except:
# Hide-future not set, skip
continue
# If start-date has not passed yet, remove label
try:
f1 = item['content'].find('start=')
f2 = item['content'].find('start=due-')
if f1 > -1 and f2 == -1:
f_end = item['content'][f1+6:].find(' ')
if f_end > -1:
start_date = item['content'][f1 +
6:f1+6+f_end]
else:
start_date = item['content'][f1+6:]
# If start-date hasen't passed, remove all labels
start_date = datetime.strptime(
start_date, args.dateformat)
future_diff = (
datetime.today()-start_date).days
if future_diff < 0:
remove_label(
item, label_id, overview_item_ids, overview_item_labels)
[remove_label(child_item, label_id, overview_item_ids,
overview_item_labels) for child_item in child_items]
continue
except:
logging.warning(
'Wrong start-date format for item: "%s". Please use "start=<DD-MM-YYYY>"', item['content'])
continue
# Recurring task friendly - remove label with relative change from due date
try:
f = item['content'].find('start=due-')
if f > -1:
f1a = item['content'].find(
'd') # Find 'd' from 'due'
f1b = item['content'].rfind(
'd') # Find 'd' from days
f2 = item['content'].find('w')
f_end = item['content'][f+10:].find(' ')
if f_end > -1:
offset = item['content'][f+10:f+10+f_end-1]
else:
offset = item['content'][f+10:-1]
try:
item_due_date = item['due']['date']
item_due_date = datetime.strptime(
item_due_date, '%Y-%m-%d')
except:
logging.warning(
'No due date to determine start date for item: "%s".', item['content'])
continue
if f1a != f1b and f1b > -1: # To make sure it doesn't trigger if 'w' is chosen
td = timedelta(days=int(offset))
elif f2 > -1:
td = timedelta(weeks=int(offset))
# If we're not in the offset from the due date yet, remove all labels
start_date = item_due_date - td
future_diff = (
datetime.today()-start_date).days
if future_diff < 0:
remove_label(
item, label_id, overview_item_ids, overview_item_labels)
[remove_label(child_item, label_id, overview_item_ids,
overview_item_labels) for child_item in child_items]
continue
except:
logging.warning(
'Wrong start-date format for item: %s. Please use "start=due-<NUM><d or w>"', item['content'])
continue
# call overdue_recurring_completed
if args.overdue is not None and args.overdue:
overdue_recurring_completed(api)
return overview_item_ids, overview_item_labels
# Main
def main():
# Version
current_version = 'v1.5'
# Main process functions.
parser = argparse.ArgumentParser(
formatter_class=make_wide(argparse.HelpFormatter, w=120, h=60))
parser.add_argument('-a', '--api_key',
help='takes your Todoist API Key.', type=str)
parser.add_argument(
'-l', '--label', help='enable next action labelling. Define which label to use.', type=str)
parser.add_argument(
'-r', '--regeneration', help='enable regeneration of sub-tasks in recurring lists. Chose overall mode: 0 - regen off, 1 - regen all (default), 2 - regen only if all sub-tasks are completed. Task labels can be used to overwrite this mode.', nargs='?', const='1', default=None, type=int)
parser.add_argument(
'-o', '--overdue', help='overdue daily tasks next occurence gets set to today instead of tomorrow. Chose mode: 0 - off, 1 - on', nargs='?', const='1', default=None, type=int)
parser.add_argument(
'-e', '--end', help='enable alternative end-of-day time instead of default midnight. Enter a number from 1 to 24 to define which hour is used.', type=int)
parser.add_argument(
'-d', '--delay', help='specify the delay in seconds between syncs (default 5).', default=5, type=int)
parser.add_argument(
'-pp', '--pp_suffix', help='change suffix for parallel-parallel labeling (default "//").', default='//')
parser.add_argument(
'-ss', '--ss_suffix', help='change suffix for sequential-sequential labeling (default "--").', default='--')
parser.add_argument(
'-ps', '--ps_suffix', help='change suffix for parallel-sequential labeling (default "/-").', default='/-')
parser.add_argument(
'-sp', '--sp_suffix', help='change suffix for sequential-parallel labeling (default "-/").', default='-/')
parser.add_argument(
'-df', '--dateformat', help='strptime() format of starting date (default "%%d-%%m-%%Y").', default='%d-%m-%Y')
parser.add_argument(
'-hf', '--hide_future', help='prevent labelling of future tasks beyond a specified number of days.', default=0, type=int)
parser.add_argument(
'--onetime', help='update Todoist once and exit.', action='store_true')
parser.add_argument(
'--nocache', help='disables caching data to disk for quicker syncing.', action='store_true')
parser.add_argument('--debug', help='enable debugging and store detailed to a log file.',
action='store_true')
parser.add_argument('--inbox', help='the method the Inbox should be processed with.',
default=None, choices=['parallel', 'sequential'])
args = parser.parse_args()
print(args)
# Addition of regeneration labels
args.regen_label_names = ('Regen_off', 'Regen_all',
'Regen_all_if_completed')
# Set debug
if args.debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(level=log_level,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
handlers=[logging.FileHandler(
'debug.log', 'w+', 'utf-8'),
logging.StreamHandler()]
)
# Check for updates
check_for_update(current_version)
# Initialise api
api, label_id, regen_labels_id = initialise(args)
# Start main loop
while True:
start_time = time.time()
sync(api)
# Evaluate projects, sections, and items
overview_item_ids, overview_item_labels = autodoist_magic(
args, api, label_id, regen_labels_id)
# Commit the queue with changes
if label_id is not None:
update_labels(api, label_id, overview_item_ids,
overview_item_labels)
if len(api.queue):
len_api_q = len(api.queue)
api.commit()
if len_api_q == 1:
logging.info(
'%d change committed to Todoist.', len_api_q)
else:
logging.info(
'%d changes committed to Todoist.', len_api_q)
else:
logging.info('No changes in queue, skipping sync.')
# If onetime is set, exit after first execution.
if args.onetime:
break
# Set a delay before next sync
end_time = time.time()
delta_time = end_time - start_time
if args.delay - delta_time < 0:
logging.debug(
'Computation time %d is larger than the specified delay %d. Sleeping skipped.', delta_time, args.delay)
elif args.delay >= 0:
sleep_time = args.delay - delta_time
logging.debug('Sleeping | |
silent_option))
else: os.system("chown -R {} {}".format(owner+":"+group, path))
else:
if sudo: os.system("sudo chown {} {} {}".format(owner+":"+group, path, silent_option))
else: os.system("chown {} {} {}".format(owner+":"+group, path, silent_option))
def check(self, owner=None, group=None, sudo=False, silent=False, iterate=False, recursive=False, path=None): # combine [recursive] and [iterate] to walk all set all files in an directory and check it with the given permission.
if path == None: path = self.path
if group == None:
if OS in ["macos"]: group = "wheel"
elif OS in ["linux"]: group = "root"
else: raise ValueError("Unsupported operating system [{}].".format(OS))
_owner_, _group_ = self.get(path=path)
if _owner_ != owner or _group_ != group:
self.set(owner=owner, group=group, sudo=sudo, silent=silent, recursive=recursive, path=path)
if recursive and iterate and Files.directory(self.path):
for dirpath, subdirs, files in os.walk(self.path):
for path in subdirs:
#print("DIRECTORY:",path)
#print("> FULL PATH NAME:",dirpath+"/"+path)
if path not in ["lost+found"]:
file_path = Formats.FilePath(dirpath+"/"+path)
file_path.ownership.check(owner=owner, group=group, sudo=sudo, silent=silent)
for path in files:
#print("FILE NAME:",path)
#print("> FULL PATH:",dirpath+"/"+path)
file_path = Formats.FilePath(dirpath+"/"+path)
file_path.ownership.check(owner=owner, group=group, sudo=sudo, silent=silent)
class Permission(object):
def __init__(self, path=None, load=False):
# docs.
DOCS = {
"module":"FilePath.Permission",
"initialized":False,
"description":[],
"chapter": "Defaults", }
# defaults.
#self.__class__.__name__ = "Permission"
# init.
self.path = path
self.permission = None
if load: self.permission = self.get()
# - info:
def get(self, path=None):
if path == None: path = self.path
status = os.stat(path)
permission = oct(status.st_mode)[-3:]
return permission
def set(self,
# the permission (int) (#1).
permission=None,
# the path (optional) (overwrites self.path) (#2).
path=None,
# root permission required.
sudo=False,
# recursive.
recursive=False,
# silent.
silent=False,
):
if path == None: path = self.path
silent_option = ""
if silent: silent_option = ' 2> /dev/null'
if recursive:
if sudo: os.system("sudo chmod -R {} {} {}".format(permission, path, silent_option))
else: os.system("chmod -R {} {} {}".format(permission, path, silent_option))
else:
if sudo: os.system("sudo chmod {} {} {}".format(permission, path, silent_option))
else: os.system("chmod {} {} {}".format(permission, path, silent_option))
def check(self, permission=None, sudo=False, silent=False, iterate=False, recursive=False, path=None): # combine [recursive] and [iterate] to walk all set all files in an directory and check it with the given permission.
if path == None: path = self.path
if self.get(path=path) != permission:
self.set(permission=permission, sudo=sudo, silent=silent, recursive=recursive, path=path)
if recursive and iterate and Files.directory(path):
for dirpath, subdirs, files in os.walk(path):
for path in subdirs:
#print("DIR NAME:",path)
#print("> FULL PATH:",dirpath+"/"+path)
if path not in ["lost+found"]:
file_path = Formats.FilePath(dirpath+"/"+path)
file_path.permission.check(permission=permission, sudo=sudo, silent=silent)
for path in files:
#print("FILE NAME:",path)
#print("> FULL PATH:",dirpath+"/"+path)
file_path = Formats.FilePath(dirpath+"/"+path)
file_path.permission.check(permission=permission, sudo=sudo, silent=silent)
#
# the string object class.
class String(object):
def __init__(self,
# the string's value (str) (#1).
string="",
# the path (str, FilePath) (param #2).
path=False,
# load the data on initialization.
load=False,
# the default array (will be created if file path does not exist).
default=None,
):
# docs.
DOCS = {
"module":"String",
"initialized":False,
"description":[],
"chapter": "Defaults", }
# init.
self.string = str(string)
# path.
if path == False: self.file_path = self.fp = None # used in local memory (not fysical)
else: self.file_path = self.fp = Formats.FilePath(path)
if default != None and not Files.exists(self.file_path.path): self.save(array=default)
if load: self.load()
#
def save(self, string=None, path=None, sudo=False):
if string == None: string = self.string
if path == None: path = self.file_path.path
utils.__check_memory_only__(path)
self.string = str(string)
return Files.save(path, str(string), format="str", sudo=sudo)
def load(self, default=None, sudo=False):
utils.__check_memory_only__(self.file_path.path)
if not os.path.exists(self.file_path.path) and default != None:
self.save(default, sudo=sudo)
self.string = Files.load(self.file_path.path, format="str", sudo=sudo)
return self.string
def is_numerical(self):
for i in ["q", "w", "e", "r", "t", "y", "u", "i", "o", "p", "a", "s", "d", "f", "g", "h", "j", "k", "l", "z", "x", "c", "v", "b", "n", "m"]:
if i in self.string.lower(): return False
return True
def bash(self):
a = self.string.replace('(','\(').replace(')','\)').replace("'","\'").replace(" ","\ ").replace("$","\$").replace("!","\!").replace("?","\?").replace("@","\@").replace("$","\$").replace("%","\%").replace("^","\^").replace("&","\&").replace("*","\*").replace("'","\'").replace('"','\"')
return a
def identifier(self):
x = self.string.lower().replace(' ','-')
return x
def variable_format(self,
exceptions={
"smart_card":"smartcard",
"smart_cards":"smartcards" ,
"web_server":"webserver" ,
},
):
s, c = "", 0
for i in self.string:
try:
n = self.string[c+1]
except:
n = "none"
try:
p = self.string[c-1]
except:
p = "none"
if s != "" and i.lower() != i and str(n).lower() == str(n) and str(p).lower() == str(p):
s += "_"
s += i.lower()
c += 1
if s in list(exceptions.keys()):
return exceptions[s]
else:
return s
def class_format(self):
s, next_capital = "", False
for i in self.string:
if i == "_":
next_capital = True
elif next_capital:
s += i.upper()
else:
s += i
return s
def capitalized_scentence(self):
x = self.string.split(" ")
cap = [y.capitalize() for y in x]
return " ".join(cap)
def capitalized_word(self):
try:
new = self.string[0].upper()
c = 0
for i in self.string:
if c > 0: new += i
c += 1
return new
except IndexError: return self.string
def generate(self,
# the length of the generated string.
length=6,
# include digits.
digits=False,
# include capital letters.
capitalize=False,
# include special characters.
special=False,
):
charset = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
if capitalize:
for i in ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]: charset.append(i.upper())
if digits: digits = ["1","2","3","4","5","6","7","8","9","0"]
else: digits = []
if special: special = ["!", "?", "&", "#","@", "*"]
else: special = []
s = ""
for i in range(length):
if len(digits) > 0 and random.randrange(1,101) <= 40:
s += digits[random.randrange(0, len(digits))]
elif len(special) > 0 and random.randrange(1,101) <= 10:
s += special[random.randrange(0, len(special))]
else:
s += charset[random.randrange(0, len(charset))]
return s
#
# iterate a string (backwards) to check the first occurency of a specified charset.
def first_occurence(self, charset=[" ", "\n"], reversed=False, string=None):
if string == None: string = self.string
if reversed:
c, space_newline_id = len(string)-1, ""
for _ in string:
char = string[c]
if char in charset:
a = 0
for i in charset:
if i == char: return i
c -= 1
return None
else:
c, space_newline_id = 0, ""
for _ in string:
char = string[c]
if char in charset:
a = 0
for i in charset:
if i == char: return i
c += 1
return None
# splice a string into before/after by a first occurence.
# if include is True and both include_before and inluce_after are False it includes at before.
def before_after_first_occurence(self, slicer=" ", include=True, include_before=False, include_after=False, string=None):
if isinstance(slicer, list):
first = self.first_occurence(charset=slicer, string=string)
return self.before_after_first_occurence(slicer=first, include=include, include_before=include_before, include_after=include_after, string=string)
else:
if string == None: string = self.string
before, after, slice_count, slices, _last_ = "", "", string.count(slicer), 0, ""
for char in string:
if len(_last_) >= len(slicer): _last_ = _last_[1:]
_last_ += char
if _last_ == slicer:
slices += 1
if include:
if slices != slice_count or include_before:
before += char
elif include_after:
after += char
else:
before += char
elif slices > 0:
after += char
else:
before += char
return before, after
# splice a string into before/selected/after by a first occurence.
def before_selected_after_first_occurence(self, slicer=" ", string=None):
if string == None: string = self.string
before, selected, after, slice_count, open, _last_ = "", "", "", string.count(slicer), False, ""
selected_sliced_count = 0
for char in string:
if isinstance(slicer, str) and len(_last_) >= len(slicer): _last_ = _last_[1:]
elif isinstance(slicer, list) and len(_last_) >= len(slicer[selected_sliced_count]): _last_ = _last_[1:]
_last_ += char
if (isinstance(slicer, str) and _last_ == slicer) or (isinstance(slicer, list) and _last_ == slicer[selected_sliced_count]):
selected_sliced_count += 1
selected += char
if open: open = False
else: open = True
elif open:
after += char
else:
before += char
return before, selected, after
# splice a string into before/after by a last occurence.
# if include is True and both include_before and inluce_after are False it includes at before.
def before_after_last_occurence(self, slicer=" ", include=True, include_before=False, include_after=False, string=None):
if string == None: string = self.string
before, after, slice_count, slices, _last_ = "", "", string.count(slicer), 0, ""
for char in string:
if len(_last_) >= len(slicer): _last_ = _last_[1:]
_last_ += char
if _last_ == slicer:
slices += 1
if include:
if slices != slice_count or include_before:
before += char
elif include_after:
after += char
else:
before += char
elif slices == slice_count:
after += char
else:
before += char
return before, after
# splice a string into before/selected/after by a last occurence.
def before_selected_after_last_occurence(self, slicer=" ", string=None):
if string == None: string = self.string
before, selected, after, slice_count, slices, _last_ = "", "", "", string.count(slicer), 0, ""
for char in string:
if len(_last_) >= len(slicer): _last_ = _last_[1:]
_last_ += char
if _last_ == slicer:
slices += 1
selected += char
elif slices == slice_count:
after += char
else:
before += char
return before, selected, after
# get the first text between an 2 string identifiers [start,end] by depth.
# identifiers must be parameter number 1.
def between(self, identifiers=["{","}"], depth=1, include=True, string=None):
# vars.
if string == None: string = self.string
keep_last = [len(identifiers[0]), len(identifiers[1])]
last = ["", ""]
unadded = ""
s, open, opened, first_open = "", 0, False, False
# iterate.
for i in string:
# set last & unadded.
unadded += i
last[0] += i
last[1] += i
if len(last[0]) > keep_last[0]:
last[0] = str(String(last[0]).remove_first(1))
if len(last[1]) > keep_last[1]:
last[1] = str(String(last[1]).remove_first(1))
# check ids.
if last[0] == identifiers[0]:
open += 1
first_open = True
elif last[1] == identifiers[1]:
open -= 1
if open >= depth:
if include or open == depth:
if include and first_open:
s += identifiers[0]
unadded = ""
first_open = False
else:
s += unadded
unadded = ""
opened = True
if opened and open < depth:
if include:
s += unadded
unadded = ""
break
# remainders.
if unadded != "" and opened and open < depth:
if include:
s += unadded
unadded = ""
# handler.
return Formats.String(s)
#
# get the text with betwee & replace the inside between str with a new str.
def replace_between(self,
# the between identifiers (list) (#1).
identifiers=["{","}"],
# the new string (str) (#2).
to="",
# the identifiers depth.
depth=1,
# the optional string.
string=None,
):
update = False
if string == None:
update = True
string = self.string
sliced = self.between(identifiers, depth=depth, include=True, string=string)
string = string.replace(str(sliced), to)
if update:
self.string = string
return string
#
# increase version.
def increase_version(self):
# version 2.
#
path = "/tmp/increase_version"
Files.save(path, f"""version='{self.string}"""+"""' && echo $version | awk -F. -v OFS=. 'NF==1{print ++$NF}; NF>1{if(length($NF+1)>length($NF))$(NF-1)++; $NF=sprintf("%0*d", length($NF), ($NF+1)%(10^length($NF))); print}'""")
return subprocess.check_output([f"bash", path]).decode().replace("\n","")
# | |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""
oef.messages
~~~~~~~~~~~~
This module contains classes to manage serialization of data in Protobuf messages.
"""
from abc import ABC, abstractmethod
from typing import Optional, Union, List
from enum import Enum
from oef.schema import Description
from oef import agent_pb2, fipa_pb2
from oef.query import Query
NoneType = type(None)
CFP_TYPES = Union[Query, bytes, NoneType]
PROPOSE_TYPES = Union[bytes, List[Description]]
class OEFErrorOperation(Enum):
"""Operation code for the OEF. It is returned in the OEF Error messages."""
REGISTER_SERVICE = 0
UNREGISTER_SERVICE = 1
REGISTER_DESCRIPTION = 2
UNREGISTER_DESCRIPTION = 3
class BaseMessage(ABC):
"""
An abstract class to represent the messages exchanged with the OEF.
Every subclass must implement the :func:`~oef.messages.to_envelope` method
that serialize the data into a protobuf message.
"""
def __init__(self, msg_id):
"""
Initialize a message.
:param msg_id: the identifier of the message.
"""
self.msg_id = msg_id
@abstractmethod
def to_envelope(self) -> agent_pb2.Envelope:
"""
Pack the message into a protobuf message.
:return: the envelope.
"""
class RegisterDescription(BaseMessage):
"""
This message is used for registering a new agent in the Agent Directory of an OEF Node.
The agent is described by a :class:`~oef.schema.Description` object.
It is used in the method :func:`~oef.core.OEFCoreInterface.register_agent`.
"""
def __init__(self, msg_id: int, agent_description: Description):
"""
Initialize a RegisterDescription message.
:param msg_id: the identifier of the message.
:param agent_description: the agent's description.
"""
super().__init__(msg_id)
self.agent_description = agent_description
def to_envelope(self) -> agent_pb2.Envelope:
envelope = agent_pb2.Envelope()
envelope.msg_id = self.msg_id
envelope.register_description.CopyFrom(self.agent_description.to_agent_description_pb())
return envelope
class RegisterService(BaseMessage):
"""
This message is used for registering a new agent in the Service Directory of an OEF Node.
The service agent is described by a :class:`~oef.schema.Description` object.
It is used in the method :func:`~oef.core.OEFCoreInterface.register_service`.
"""
def __init__(self, msg_id: int, service_description: Description):
"""
Initialize a RegisterService message.
:param msg_id: the identifier of the message.
:param service_description: the service agent's description.
"""
super().__init__(msg_id)
self.service_description = service_description
def to_envelope(self) -> agent_pb2.Envelope:
envelope = agent_pb2.Envelope()
envelope.msg_id = self.msg_id
envelope.register_service.CopyFrom(self.service_description.to_agent_description_pb())
return envelope
class UnregisterDescription(BaseMessage):
"""
This message is used for unregistering an agent in the Agent Directory of an OEF Node.
It is used in the method :func:`~oef.core.OEFCoreInterface.unregister_agent`.
"""
def __init__(self, msg_id: int):
"""Initialize a UnregisterDescription message.
:param msg_id: the identifier of the message.
"""
super().__init__(msg_id)
def to_envelope(self) -> agent_pb2.Envelope:
envelope = agent_pb2.Envelope()
envelope.msg_id = self.msg_id
envelope.unregister_description.CopyFrom(agent_pb2.Envelope.Nothing())
return envelope
class UnregisterService(BaseMessage):
"""
This message is used for unregistering a `(service agent, description)` in the Service Directory of an OEF Node.
The service agent is described by a :class:`~oef.schema.Description` object.
It is used in the method :func:`~oef.core.OEFCoreInterface.unregister_service`.
"""
def __init__(self, msg_id: int, service_description):
"""
Initialize a UnregisterService message.
:param msg_id: the identifier of the message.
:param service_description: the service agent's description.
"""
super().__init__(msg_id)
self.service_description = service_description
def to_envelope(self) -> agent_pb2.Envelope:
envelope = agent_pb2.Envelope()
envelope.msg_id = self.msg_id
envelope.unregister_service.CopyFrom(self.service_description.to_agent_description_pb())
return envelope
class SearchAgents(BaseMessage):
"""
This message is used for searching agents in the Agent Directory of an OEF Node.
It contains:
* a search id, that identifies the search query. This id will be used
by the sender in order to distinguish different incoming search results.
* a query, i.e. a list of constraints defined over a data model.
If everything works correctly, eventually, the sender of the message will receive a
search result message and the agent's :func:`~oef.core.OEFCoreInterface.on_search_result` will be executed.
It is used in the method :func:`~oef.core.OEFCoreInterface.search_agents`.
"""
def __init__(self, msg_id: int, query: Query):
"""
Initialize a SearchAgents message.
:param msg_id: the identifier of the message.
:param query: the query that describe the agent we are looking for.
"""
super().__init__(msg_id)
self.query = query
def to_envelope(self):
envelope = agent_pb2.Envelope()
envelope.msg_id = self.msg_id
envelope.search_agents.query.CopyFrom(self.query.to_pb())
return envelope
class SearchServices(BaseMessage):
"""
This message is used for searching services in the Service Directory of an OEF Node.
It contains:
* a search id, that identifies the search query. This id will be used
by the sender in order to distinguish different incoming search results.
* a query, i.e. a list of constraints defined over a data model.
If everything works correctly, eventually, the sender of the message will receive a
search result message and the agent's :func:`~oef.core.OEFCoreInterface.on_search_result` is executed.
It is used in the method :func:`~oef.core.OEFCoreInterface.search_services`.
"""
def __init__(self, msg_id: int, query: Query):
"""
Initialize a SearchServices message.
:param msg_id: the identifier of the message.
:param query: the query that describe the agent we are looking for.
"""
super().__init__(msg_id)
self.query = query
def to_envelope(self) -> agent_pb2.Envelope:
envelope = agent_pb2.Envelope()
envelope.msg_id = self.msg_id
envelope.search_services.query.CopyFrom(self.query.to_pb())
return envelope
class AgentMessage(BaseMessage, ABC):
"""
This type of message is used for interacting with other agents, via an OEF Node.
There are five different type of agent messages:
1. :class:`.Message`, to convey a generic message (that is, a sequence of bytes).
2. :class:`.CFP`, to make a `Call For Proposals` for some resources.
3. :class:`.Propose`, to make a `Proposal` about a specific resource.
4. :class:`.Accept`, to accept a previous `Proposal`.
5. :class:`.Decline`, to decline the negotiation.
Using message 1 is the most generic way to interact with other OEF agent. It is flexible, but requires
extra development efforts to come up with a working protocol.
Messages 2-5 are used in the negotiation protocol, where some agents are buyers and other are sellers.
The protocol is compliant with FIPA specifications.
"""
class Message(AgentMessage):
"""
This message is used to send a generic message to other agents.
It contains:
* a dialogue id, that identifies the dialogue in which the message is sent.
* a destination, that is the public key of the recipient of the message.
* a sequence of bytes, that is the content of the message.
If everything works correctly, eventually, the recipient will receive the content of the message
and the recipient's :func:`~oef.core.OEFCoreInterface.on_message` is executed.
It is used in the method :func:`~oef.core.OEFCoreInterface.send_message`.
"""
def __init__(self, msg_id: int,
dialogue_id: int,
destination: str,
msg: bytes):
"""
Initialize a simple message.
:param msg_id: the identifier of the message.
:param dialogue_id: the identifier of the dialogue.
:param destination: the public key of the recipient agent.
:param msg: the content of the message.
"""
super().__init__(msg_id)
self.dialogue_id = dialogue_id
self.destination = destination
self.msg = msg
def to_envelope(self) -> agent_pb2.Envelope:
agent_msg = agent_pb2.Agent.Message()
agent_msg.dialogue_id = self.dialogue_id
agent_msg.destination = self.destination
agent_msg.content = self.msg
envelope = agent_pb2.Envelope()
envelope.msg_id = self.msg_id
envelope.send_message.CopyFrom(agent_msg)
return envelope
class CFP(AgentMessage):
"""
This message is used to send a `Call For Proposals`.
It contains:
* a message id, that is an unique identifier for a message, given the dialogue.
* a dialogue id, that identifies the dialogue in which the message is sent.
* a destination, that is the public key of the recipient of the message.
* a target id, that is, the identifier of the message to whom this message is targeting, in a given dialogue.
* a query, that describes the resources the sender is interested in.
If everything works correctly, eventually, the recipient will receive the content of the message
and the recipient's :func:`~oef.core.OEFCoreInterface.on_cfp` is executed.
It is used in the method :func:`~oef.core.OEFCoreInterface.send_cfp`.
"""
def __init__(self, msg_id: int,
dialogue_id: int,
destination: str,
target: int,
query: CFP_TYPES):
"""
Initialize a `Call For Proposal` message.
:param msg_id: the unique identifier of the message in the dialogue denoted by ``dialogue_id``.
:param dialogue_id: the identifier of the dialogue.
:param destination: the public key of the recipient agent.
:param target: the identifier of the message to whom this message is targeting.
:param query: the query, an instance of `~oef.schema.Query`, ``bytes``, or ``None``.
"""
super().__init__(msg_id)
self.dialogue_id = dialogue_id
self.destination = destination
self.query = query
self.target = target
def to_envelope(self) -> agent_pb2.Agent.Message:
fipa_msg | |
in parms:
hstrainRef = wx.CheckBox(DData,wx.ID_ANY,label=Pa)
hstrainRef.thisown = False
hstrainRef.SetValue(ref)
Indx[hstrainRef.GetId()] = [G2frame.hist,Id]
hstrainRef.Bind(wx.EVT_CHECKBOX, OnHstrainRef)
hstrainSizer.Add(hstrainRef,0,WACV|wx.LEFT,5)
# azmthOff = G2G.ValidatedTxtCtrl(G2frame.dataDisplay,data,'azmthOff',nDig=(10,2),typeHint=float,OnLeave=OnAzmthOff)
hstrainVal = wx.TextCtrl(DData,wx.ID_ANY,'%.3g'%(val),style=wx.TE_PROCESS_ENTER)
Indx[hstrainVal.GetId()] = [G2frame.hist,Id]
hstrainVal.Bind(wx.EVT_TEXT_ENTER,OnHstrainVal)
hstrainVal.Bind(wx.EVT_KILL_FOCUS,OnHstrainVal)
hstrainSizer.Add(hstrainVal,0,WACV)
return hstrainSizer
def PoTopSizer(POData):
poSizer = wx.FlexGridSizer(0,6,5,5)
choice = ['March-Dollase','Spherical harmonics']
POtype = choice[['MD','SH'].index(POData[0])]
poSizer.Add(wx.StaticText(DData,wx.ID_ANY,' Preferred orientation model '),0,WACV)
POType = wx.ComboBox(DData,wx.ID_ANY,value=POtype,choices=choice,
style=wx.CB_READONLY|wx.CB_DROPDOWN)
POType.Bind(wx.EVT_COMBOBOX, OnPOType)
poSizer.Add(POType)
if POData[0] == 'SH':
poSizer.Add(wx.StaticText(DData,wx.ID_ANY,' Harmonic order: '),0,WACV)
poOrder = wx.ComboBox(DData,wx.ID_ANY,value=str(POData[4]),choices=[str(2*i) for i in range(18)],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
poOrder.Bind(wx.EVT_COMBOBOX,OnPOOrder)
poSizer.Add(poOrder,0,WACV)
poRef = wx.CheckBox(DData,wx.ID_ANY,label=' Refine? ')
poRef.SetValue(POData[2])
poRef.Bind(wx.EVT_CHECKBOX,OnPORef)
poSizer.Add(poRef,0,WACV)
return poSizer
def MDDataSizer(POData):
poSizer = wx.BoxSizer(wx.HORIZONTAL)
poRef = wx.CheckBox(DData,wx.ID_ANY,label=' March-Dollase ratio: ')
poRef.SetValue(POData[2])
poRef.Bind(wx.EVT_CHECKBOX,OnPORef)
poSizer.Add(poRef,0,WACV|wx.LEFT,5)
poVal = G2G.ValidatedTxtCtrl(DData,POData,1,nDig=(10,3),typeHint=float,min=0.)
poSizer.Add(poVal,0,WACV)
poSizer.Add(wx.StaticText(DData,wx.ID_ANY,' Unique axis, H K L: '),0,WACV)
h,k,l =POData[3]
poAxis = wx.TextCtrl(DData,wx.ID_ANY,'%3d %3d %3d'%(h,k,l),style=wx.TE_PROCESS_ENTER)
poAxis.Bind(wx.EVT_TEXT_ENTER,OnPOAxis)
poAxis.Bind(wx.EVT_KILL_FOCUS,OnPOAxis)
poSizer.Add(poAxis,0,WACV)
return poSizer
def SHDataSizer(POData):
def OnODFValue(invalid,value,tc):
G2plt.PlotSizeStrainPO(G2frame,data,G2frame.hist)
ODFSizer = wx.FlexGridSizer(0,8,2,2)
ODFkeys = list(POData[5].keys())
ODFkeys.sort()
for odf in ODFkeys:
ODFSizer.Add(wx.StaticText(DData,wx.ID_ANY,odf),0,WACV)
ODFval = G2G.ValidatedTxtCtrl(DData,POData[5],odf,nDig=(8,3),typeHint=float,OnLeave=OnODFValue)
ODFSizer.Add(ODFval,0,WACV|wx.LEFT,5)
return ODFSizer
def SHPenalty(POData):
def OnHKLList(event):
dlg = G2G.G2MultiChoiceDialog(G2frame, 'Select penalty hkls',
'Penalty hkls',hkls,filterBox=False)
try:
if dlg.ShowModal() == wx.ID_OK:
POData[6] = [hkls[i] for i in dlg.GetSelections()]
if not POData[6]:
POData[6] = ['',]
else:
return
finally:
dlg.Destroy()
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
A = G2lat.cell2A(generalData['Cell'][1:7])
hkls = G2lat.GenPfHKLs(10,SGData,A)
shPenalty = wx.BoxSizer(wx.HORIZONTAL)
shPenalty.Add(wx.StaticText(DData,wx.ID_ANY,' Negative MRD penalty list: '),0,WACV)
shPenalty.Add(wx.ComboBox(DData,value=POData[6][0],choices=POData[6],
style=wx.CB_DROPDOWN),0,WACV|wx.LEFT,5)
hklList = wx.Button(DData,label='Select penalty hkls')
hklList.Bind(wx.EVT_BUTTON,OnHKLList)
shPenalty.Add(hklList,0,WACV)
shPenalty.Add(wx.StaticText(DData,wx.ID_ANY,' Zero MRD tolerance: '),0,WACV)
shToler = G2G.ValidatedTxtCtrl(DData,POData,7,nDig=(10,2),typeHint=float)
shPenalty.Add(shToler,0,WACV)
return shPenalty
def ExtSizer(Type):
def OnSCExtType(event):
Obj = event.GetEventObject()
item = Indx[Obj.GetId()]
UseList[item[0]]['Extinction'][item[1]] = Obj.GetValue()
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
def OnEref(event):
Obj = event.GetEventObject()
item = Indx[Obj.GetId()]
UseList[item[0]]['Extinction'][2][item[1]][1] = Obj.GetValue()
def OnExtRef(event):
Obj = event.GetEventObject()
UseList[G2frame.hist]['Extinction'][1] = Obj.GetValue()
if Type == 'HKLF':
extSizer = wx.BoxSizer(wx.VERTICAL)
typeSizer = wx.BoxSizer(wx.HORIZONTAL)
typeSizer.Add(wx.StaticText(DData,wx.ID_ANY,' Extinction type: '),0,WACV)
Choices = ['None','Primary','Secondary Type I','Secondary Type II',] # remove 'Secondary Type I & II'
typeTxt = wx.ComboBox(DData,wx.ID_ANY,choices=Choices,value=UseList[G2frame.hist]['Extinction'][1],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[typeTxt.GetId()] = [G2frame.hist,1]
typeTxt.Bind(wx.EVT_COMBOBOX,OnSCExtType)
typeSizer.Add(typeTxt)
typeSizer.Add(wx.StaticText(DData,wx.ID_ANY,' Approx: '),0,WACV)
Choices=['Lorentzian','Gaussian']
approxTxT = wx.ComboBox(DData,wx.ID_ANY,choices=Choices,value=UseList[G2frame.hist]['Extinction'][0],
style=wx.CB_READONLY|wx.CB_DROPDOWN)
Indx[approxTxT.GetId()] = [G2frame.hist,0]
approxTxT.Bind(wx.EVT_COMBOBOX,OnSCExtType)
typeSizer.Add(approxTxT)
if UseList[G2frame.hist]['Extinction'][1] == 'None':
extSizer.Add(typeSizer,0,WACV)
else:
extSizer.Add(typeSizer,0,WACV|wx.BOTTOM,5)
if 'Tbar' in UseList[G2frame.hist]['Extinction'][2]: #skipped for TOF
valSizer =wx.BoxSizer(wx.HORIZONTAL)
valSizer.Add(wx.StaticText(DData,wx.ID_ANY,' Tbar(mm):'),0,WACV)
tbarVal = G2G.ValidatedTxtCtrl(DData,UseList[G2frame.hist]['Extinction'][2],'Tbar',
min=0.,nDig=(10,3),typeHint=float)
valSizer.Add(tbarVal,0,WACV)
valSizer.Add(wx.StaticText(DData,wx.ID_ANY,' cos(2ThM):'),0,WACV)
cos2tm = G2G.ValidatedTxtCtrl(DData,UseList[G2frame.hist]['Extinction'][2],'Cos2TM',
min=0.,max=1.,nDig=(10,3),typeHint=float)
valSizer.Add(cos2tm,0,WACV)
extSizer.Add(valSizer,0,WACV)
val2Sizer =wx.BoxSizer(wx.HORIZONTAL)
if 'Primary' in UseList[G2frame.hist]['Extinction'][1]:
Ekey = ['Ep',]
elif 'Secondary Type II' == UseList[G2frame.hist]['Extinction'][1]:
Ekey = ['Es',]
elif 'Secondary Type I' == UseList[G2frame.hist]['Extinction'][1]:
Ekey = ['Eg',]
else:
Ekey = ['Eg','Es']
for ekey in Ekey:
Eref = wx.CheckBox(DData,wx.ID_ANY,label=ekey+' : ')
Eref.SetValue(UseList[G2frame.hist]['Extinction'][2][ekey][1])
Indx[Eref.GetId()] = [G2frame.hist,ekey]
Eref.Bind(wx.EVT_CHECKBOX, OnEref)
val2Sizer.Add(Eref,0,WACV|wx.LEFT,5)
Eval = G2G.ValidatedTxtCtrl(DData,UseList[G2frame.hist]['Extinction'][2][ekey],0,
min=0.,nDig=(10,3,'g'),typeHint=float)
val2Sizer.Add(Eval,0,WACV)
extSizer.Add(val2Sizer,0,WACV)
else: #PWDR
extSizer = wx.BoxSizer(wx.HORIZONTAL)
extRef = wx.CheckBox(DData,wx.ID_ANY,label=' Extinction: ')
extRef.SetValue(UseList[G2frame.hist]['Extinction'][1])
extRef.Bind(wx.EVT_CHECKBOX, OnExtRef)
extSizer.Add(extRef,0,WACV|wx.LEFT,5)
extVal = G2G.ValidatedTxtCtrl(DData,UseList[G2frame.hist]['Extinction'],0,
min=0.,nDig=(10,2),typeHint=float)
extSizer.Add(extVal,0,WACV)
return extSizer
def BabSizer():
def OnBabRef(event):
Obj = event.GetEventObject()
item,bab = Indx[Obj.GetId()]
UseList[item]['Babinet']['Bab'+bab][1] = Obj.GetValue()
babSizer = wx.BoxSizer(wx.HORIZONTAL)
for bab in ['A','U']:
babRef = wx.CheckBox(DData,wx.ID_ANY,label=' Babinet '+bab+': ')
babRef.SetValue(UseList[G2frame.hist]['Babinet']['Bab'+bab][1])
Indx[babRef.GetId()] = [G2frame.hist,bab]
babRef.Bind(wx.EVT_CHECKBOX, OnBabRef)
babSizer.Add(babRef,0,WACV|wx.LEFT,5)
babVal = G2G.ValidatedTxtCtrl(DData,UseList[G2frame.hist]['Babinet']['Bab'+bab],0,
nDig=(10,3),min=0.,typeHint=float)
babSizer.Add(babVal,0,WACV)
return babSizer
def FlackSizer():
def OnFlackRef(event):
Obj = event.GetEventObject()
UseList[G2frame.hist]['Flack'][1] = Obj.GetValue()
flackSizer = wx.BoxSizer(wx.HORIZONTAL)
flackRef = wx.CheckBox(DData,wx.ID_ANY,label=' Flack parameter: ')
flackRef.SetValue(UseList[G2frame.hist]['Flack'][1])
flackRef.Bind(wx.EVT_CHECKBOX, OnFlackRef)
flackSizer.Add(flackRef,0,WACV|wx.LEFT,5)
flackVal = G2G.ValidatedTxtCtrl(DData,UseList[G2frame.hist]['Flack'],0,nDig=(10,3),typeHint=float)
flackSizer.Add(flackVal,0,WACV)
return flackSizer
def twinSizer():
def OnAddTwin(event):
twinMat = np.array([[-1,0,0],[0,-1,0],[0,0,-1]]) #inversion by default
twinVal = 0.0
UseList[G2frame.hist]['Twins'].append([twinMat,twinVal])
nNonM = UseList[G2frame.hist]['Twins'][0][1][2]
for i in range(nNonM):
UseList[G2frame.hist]['Twins'].append([False,0.0])
addtwin.SetValue(False)
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
def OnMat(event):
event.Skip()
Obj = event.GetEventObject()
it,im = Indx[Obj.GetId()]
newMat = Obj.GetValue().split()
try:
uvw = [int(newMat[i]) for i in range(3)]
except ValueError:
uvw = UseList[G2frame.hist]['Twins'][it][0][im]
UseList[G2frame.hist]['Twins'][it][0][im] = uvw
Obj.SetValue('%3d %3d %3d'%(uvw[0],uvw[1],uvw[2]))
def OnTwinVal(invalid,value,tc):
it = Indx[tc.GetId()]
sumTw = 0.
for it,twin in enumerate(UseList[G2frame.hist]['Twins']):
if it:
sumTw += twin[1]
UseList[G2frame.hist]['Twins'][0][1][0] = 1.-sumTw
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
def OnTwinRef(event):
Obj = event.GetEventObject()
UseList[G2frame.hist]['Twins'][0][1][1] = Obj.GetValue()
def OnTwinInv(event):
Obj = event.GetEventObject()
it = Indx[Obj.GetId()]
UseList[G2frame.hist]['Twins'][it][0] = Obj.GetValue()
def OnTwinDel(event):
Obj = event.GetEventObject()
it = Indx[Obj.GetId()]
nNonM = UseList[G2frame.hist]['Twins'][0][1][2]
for i in range(nNonM):
del UseList[G2frame.hist]['Twins'][1+i+it]
del UseList[G2frame.hist]['Twins'][it]
sumTw = 0.
for it,twin in enumerate(UseList[G2frame.hist]['Twins']):
if it:
sumTw += twin[1]
UseList[G2frame.hist]['Twins'][0][1][0] = 1.-sumTw
if len(UseList[G2frame.hist]['Twins']) == 1:
UseList[G2frame.hist]['Twins'][0][1][1] = False
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
nTwin = len(UseList[G2frame.hist]['Twins'])
twinsizer = wx.BoxSizer(wx.VERTICAL)
topsizer = wx.BoxSizer(wx.HORIZONTAL)
topsizer.Add(wx.StaticText(DData,wx.ID_ANY,' Merohedral twins: '),0,WACV)
#temporary - add twin not allowed if nonmerohedral twins present
# if nTwin == 1 or 'bool' not in str(type(UseList[G2frame.hist]['Twins'][1][0])):
addtwin = wx.CheckBox(DData,wx.ID_ANY,label=' Add Twin Law')
addtwin.Bind(wx.EVT_CHECKBOX, OnAddTwin)
topsizer.Add(addtwin,0,WACV|wx.LEFT,5)
twinsizer.Add(topsizer)
Indx = {}
if nTwin > 1:
for it,Twin in enumerate(UseList[G2frame.hist]['Twins']):
twinMat,twinVal = Twin
matSizer = wx.BoxSizer(wx.HORIZONTAL)
if it:
Style = wx.TE_PROCESS_ENTER
TwVal = Twin[1]
else:
Style = wx.TE_READONLY
TwVal = Twin[1][0]
if 'bool' not in str(type(Twin[0])):
matSizer.Add(wx.StaticText(DData,-1,' Twin Law: '),0,WACV|wx.LEFT,5)
for im,Mat in enumerate(twinMat):
mat = wx.TextCtrl(DData,wx.ID_ANY,'%3d %3d %3d'%(Mat[0],Mat[1],Mat[2]),
style=Style)
if it:
Indx[mat.GetId()] = [it,im]
mat.Bind(wx.EVT_TEXT_ENTER,OnMat)
mat.Bind(wx.EVT_KILL_FOCUS,OnMat)
else:
mat.SetBackgroundColour(VERY_LIGHT_GREY)
matSizer.Add(mat,0,WACV|wx.LEFT,5)
else:
matSizer.Add(wx.StaticText(DData,-1,' Nonmerohedral twin component %d: '%(it)),0,WACV|wx.LEFT,5)
if not SGData['SGInv']:
twinv = wx.CheckBox(DData,wx.ID_ANY,label=' Use enantiomorph?')
twinv.SetValue(Twin[0])
Indx[twinv.GetId()] = it
twinv.Bind(wx.EVT_CHECKBOX, OnTwinInv)
matSizer.Add(twinv,0,WACV)
twinsizer.Add(matSizer,0,WACV|wx.LEFT,5)
valSizer = wx.BoxSizer(wx.HORIZONTAL)
valSizer.Add(wx.StaticText(DData,-1,label=' Twin element fraction:'),0,WACV)
if it:
twinval = G2G.ValidatedTxtCtrl(DData,UseList[G2frame.hist]['Twins'][it],1,nDig=(10,3),
min=0.,max=1.,typeHint=float,OnLeave=OnTwinVal)
Indx[twinval.GetId()] = it
else:
twinval = wx.TextCtrl(DData,-1,'%.3f'%(TwVal),style=Style)
twinval.SetBackgroundColour(VERY_LIGHT_GREY)
valSizer.Add(twinval,0,WACV)
if it and 'bool' not in str(type(Twin[0])):
twindel = wx.CheckBox(DData,wx.ID_ANY,label=' Delete?')
Indx[twindel.GetId()] = it
twindel.Bind(wx.EVT_CHECKBOX, OnTwinDel)
valSizer.Add(twindel,0,WACV)
elif not it:
twinref = wx.CheckBox(DData,wx.ID_ANY,label=' Refine?')
twinref.SetValue(Twin[1][1])
twinref.Bind(wx.EVT_CHECKBOX, OnTwinRef)
valSizer.Add(twinref,0,WACV)
twinsizer.Add(valSizer,0,WACV|wx.LEFT,5)
return twinsizer
def OnSelect(event):
G2frame.hist = G2frame.dataWindow.HistsInPhase[DData.select.GetSelection()]
oldFocus = wx.Window.FindFocus()
G2plt.PlotSizeStrainPO(G2frame,data,G2frame.hist)
wx.CallLater(100,RepaintHistogramInfo)
if oldFocus: wx.CallAfter(oldFocus.SetFocus)
def RepaintHistogramInfo(Scroll=0):
if 'phoenix' in wx.version():
G2frame.bottomSizer.Clear(True)
# deal with case where this is called after another tree item has been selected
try:
DData.Shown
except RuntimeError:
if GSASIIpath.GetConfigValue('debug'):
print('DBG: DData window deleted. Ignoring RepaintHistogramInfo, forcing redraw')
# Repaint called while DData window deleted, force redraw of entire window
import GSASIIdataGUI
G2frame.PickIdText = ''
wx.CallLater(100,GSASIIdataGUI.SelectDataTreeItem,G2frame,G2frame.GPXtree.Selection)
return
else:
# deal with case where this is called after another tree item has been selected
if DData.__class__ is not wx._windows.ScrolledWindow:
# fix bug where this is called after the Window is deleted
return
G2frame.bottomSizer.DeleteWindows()
Indx.clear()
G2frame.bottomSizer = ShowHistogramInfo()
mainSizer.Add(G2frame.bottomSizer)
mainSizer.Layout()
G2frame.dataWindow.Refresh()
DData.SetVirtualSize(mainSizer.GetMinSize())
DData.Scroll(0,Scroll)
G2frame.dataWindow.SendSizeEvent()
def ShowHistogramInfo():
'''This creates a sizer with all the information pulled out from the Phase/data dict
'''
def OnUseData(event):
Obj = event.GetEventObject()
UseList[G2frame.hist]['Use'] = Obj.GetValue()
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
def OnLeBail(event):
Obj = event.GetEventObject()
if not UseList[G2frame.hist]['LeBail']:
UseList[G2frame.hist]['newLeBail'] = True
Obj.SetLabel('Do new LeBail extraction?')
UseList[G2frame.hist]['LeBail'] = Obj.GetValue()
def OnResetSize(event):
Obj = event.GetEventObject()
Obj.SetValue(False)
item,name = Indx[Obj.GetId()]
if name == 'isotropic':
UseList[item]['Size'][1][0] = 1.0
elif name == 'uniaxial':
UseList[item]['Size'][1][0] = 1.0
UseList[item]['Size'][1][1] = 1.0
elif name == 'ellipsoidal':
for i in range(3):
UseList[item]['Size'][4][i] = 1.0
UseList[item]['Size'][4][i+3] = 0.0
G2plt.PlotSizeStrainPO(G2frame,data,item)
wx.CallLater(100,RepaintHistogramInfo,DData.GetScrollPos(wx.VERTICAL))
def OnSizeAxis(event):
event.Skip()
Obj = event.GetEventObject()
Saxis = Obj.GetValue().split()
try:
hkl = [int(Saxis[i]) for i in range(3)]
except (ValueError,IndexError):
hkl = UseList[G2frame.hist]['Size'][3]
if not np.any(np.array(hkl)):
hkl = UseList[G2frame.hist]['Size'][3]
UseList[G2frame.hist]['Size'][3] = hkl
h,k,l = hkl
Obj.SetValue('%3d %3d %3d'%(h,k,l))
def OnFixVals(event):
Obj = event.GetEventObject()
UseList[G2frame.hist]['Fix FXU'] = Obj.GetValue()
if G2frame.hist not in UseList:
G2frame.ErrorDialog('Missing data error',
G2frame.hist+' not in GSAS-II data tree')
return
if 'Use' not in UseList[G2frame.hist]: #patch
UseList[G2frame.hist]['Use'] = True
if 'LeBail' not in UseList[G2frame.hist]:
UseList[G2frame.hist]['LeBail'] = False
if 'newLeBail' not in UseList[G2frame.hist]:
UseList[G2frame.hist]['newLeBail'] = True
if 'Babinet' not in UseList[G2frame.hist]:
UseList[G2frame.hist]['Babinet'] = {'BabA':[0.0,False],'BabU':[0.0,False]}
if 'Fix FXU' not in UseList[G2frame.hist]:
UseList[G2frame.hist]['Fix FXU'] = ' '
bottomSizer = wx.BoxSizer(wx.VERTICAL)
useBox = wx.BoxSizer(wx.HORIZONTAL)
useData = wx.CheckBox(DData,wx.ID_ANY,label='Use Histogram: '+G2frame.hist+' ?')
useData.Bind(wx.EVT_CHECKBOX, OnUseData)
useData.SetValue(UseList[G2frame.hist]['Use'])
useBox.Add(useData,0,WACV)
if not generalData['doPawley'] and 'PWDR' in G2frame.hist[:4]:
lbLabel = 'Redo LeBail extraction? '
if UseList[G2frame.hist]['newLeBail']:
lbLabel = 'Do new LeBail extraction?'
lebail = wx.CheckBox(DData,wx.ID_ANY,label=lbLabel)
lebail.Bind(wx.EVT_CHECKBOX, OnLeBail)
lebail.SetValue(UseList[G2frame.hist]['LeBail'])
useBox.Add(lebail,0,WACV)
if UseList[G2frame.hist]['LeBail']:
G2frame.SetStatusText('To reset LeBail, cycle LeBail check box.',1)
bottomSizer.Add(useBox,0,WACV|wx.TOP|wx.BOTTOM|wx.LEFT,5)
fixBox = wx.BoxSizer(wx.HORIZONTAL)
parmChoice = [' ','X','XU','U','F','FX','FXU','FU']
if generalData['Type'] == 'magnetic':
parmChoice += ['M','MX','MXU','MU','MF','MFX','MFXU','MFU']
fixBox.Add(wx.StaticText(DData,label=' In sequential refinement, fix these in '+generalData['Name']+' for this histogram: '),0,WACV)
fixVals = wx.ComboBox(DData,value=UseList[G2frame.hist]['Fix FXU'],choices=parmChoice,
style=wx.CB_DROPDOWN)
fixVals.Bind(wx.EVT_COMBOBOX,OnFixVals)
fixBox.Add(fixVals,0,WACV)
bottomSizer.Add(fixBox)
#TODO - put Sequential refinement fix F? fix X? fix U? CheckBox here
bottomSizer.Add(ScaleSizer(),0,WACV|wx.BOTTOM,5)
if G2frame.hist[:4] == 'PWDR':
if UseList[G2frame.hist]['Size'][0] == 'isotropic':
isoSizer = wx.BoxSizer(wx.HORIZONTAL)
isoSizer.Add(TopSizer(' Domain size model: ',['isotropic','uniaxial','ellipsoidal'],
'Size',OnSizeType),0,WACV)
isoSizer.Add(LGmixSizer('Size',OnLGmixVal,OnLGmixRef))
isoSizer.Add(ResetSizer('isotropic',OnResetSize),0,WACV)
bottomSizer.Add(isoSizer)
bottomSizer.Add(IsoSizer(u'size(\xb5m): ','Size','%.3f',
OnSizeVal,OnSizeRef),0,WACV|wx.BOTTOM,5)
elif UseList[G2frame.hist]['Size'][0] == 'uniaxial':
uniSizer = wx.BoxSizer(wx.HORIZONTAL)
uniSizer.Add(TopSizer(' Domain size model: ',['isotropic','uniaxial','ellipsoidal'],
'Size',OnSizeType),0,WACV)
uniSizer.Add(LGmixSizer('Size',OnLGmixVal,OnLGmixRef))
uniSizer.Add(ResetSizer('uniaxial',OnResetSize),0,WACV)
bottomSizer.Add(UniSizer('Size',OnSizeAxis),0,WACV)
bottomSizer.Add(uniSizer)
bottomSizer.Add(UniDataSizer(u'size(\xb5m): ','Size','%.3f',OnSizeVal,OnSizeRef)
,0,WACV|wx.BOTTOM,5)
elif UseList[G2frame.hist]['Size'][0] | |
<reponame>florianjehn/IPCC-Reports-Focus-Overview<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 17 10:12:26 2021
@author: <NAME>
"""
import os
import pandas as pd
import numpy as np
import re
import random
def read_ipcc_counts_temp():
"""reads all counts of temperatures for all reports and makes on df"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "temperatures")
all_df = pd.DataFrame()
for file in files:
file_df = pd.read_csv("Results" + os.sep + "temperatures" + os.sep + file, sep=";", index_col=0)
file_df.columns = [file[:-4]]
all_df = pd.concat([all_df, file_df], axis=1)
return all_df.transpose()
def read_ipcc_counts_dates():
"""reads all counts of dates for all reports and makes on df"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "dates")
all_df = pd.DataFrame()
for file in files:
file_df = pd.read_csv("Results" + os.sep + "dates" + os.sep + file, sep=";", index_col=0)
file_df.columns = [file[:-4]]
all_df = pd.concat([all_df, file_df], axis=1)
return all_df.transpose()
def read_ipcc_counts_rfc():
"""reads all counts of reasons of concern for all reports and makes on df"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "reasons_for_concern")
all_df = pd.DataFrame()
for file in files:
file_df = pd.read_csv("Results" + os.sep + "reasons_for_concern" + os.sep + file, sep=";", index_col=0)
file_df.columns = [file[:-4]]
all_df = pd.concat([all_df, file_df], axis=1)
return all_df.transpose()
def read_false_positive():
"""reads in all the counted false/true positive rates for the temperatres in the
IPCC and calculates a true positive rate for each entry"""
files = os.listdir(os.getcwd()+os.sep+"Results"+ os.sep + "false_positive_check_files")
all_df = pd.DataFrame()
for file in files:
# only read those files that contains the counting results
if "results" not in file:
continue
file_df = pd.read_csv("Results" + os.sep + "false_positive_check_files" + os.sep + file, sep=",", index_col=0)
# calculate the true positive rate
file_df["True Positive Rate [%]"] = (file_df["n true positive"]/(file_df["n true positive"]+file_df["n false positive"]))*100
# Arange the df for seaborn
file_df["Temperature [°C]"] = file_df.index
file_df.reset_index(inplace=True, drop=True)
all_df = pd.concat([all_df, file_df])
return all_df
def scale_counts(ipcc_counts):
"""scale the counts by overall sum"""
sums = ipcc_counts.sum(axis=1)
for col in ipcc_counts:
ipcc_counts[col] = ipcc_counts[col]/sums*100
return ipcc_counts
def read_meta():
"""reads in the meta data of the reports"""
meta = pd.read_csv("Reports" + os.sep + "meta_data_reports.tsv", sep="\t")
meta["Year"] = meta["Year"].astype("str")
return meta
def group_temps(ipcc_counts):
"""groups the temperatures into three categories"""
ipcc_counts["0.5°C - 2°C"] = ipcc_counts[" 0.5°C"] + ipcc_counts[" 1°C"] + ipcc_counts[" 1.5°C"] +ipcc_counts[" 2°C"]
ipcc_counts["2.5°C - 4°C"] = ipcc_counts[" 2.5°C"] + ipcc_counts[" 3°C"] + ipcc_counts[" 3.5°C"] +ipcc_counts[" 4°C"]
ipcc_counts["≥ 4.5°C"] = ipcc_counts[" 4.5°C"] + ipcc_counts[" 5°C"] + ipcc_counts[" 5.5°C"] +ipcc_counts[" 6°C"] +ipcc_counts[" 6.5°C"] + ipcc_counts[" 7°C"] + ipcc_counts[" 7.5°C"] +ipcc_counts[" 8°C"] + ipcc_counts[" 8.5°C"] + ipcc_counts[" 9°C"] + ipcc_counts[" 9.5°C"] +ipcc_counts[" 10°C"]
return ipcc_counts.iloc[:,20:]
def merge_counts_meta(ipcc_counts, meta):
"""merges the df with the counted temperatures/rfcs with the metadata"""
return pd.merge(meta, ipcc_counts, right_index=True, left_on="count_names")
def lookup_names():
""""Returns lookup dict for different files names to merge them"""
lookup_dict = {
"IPCC_AR6_WGI_Full_Report":"counts_IPCC_AR6_WGI_Full_Report_parsed",
"IPCC_AR6_WGII_Full_Report":"counts_IPCC_AR6_WGII_FinalDraft_FullReport_parsed",
"IPCC_AR6_WGIII_FinalDraft_FullReport":"counts_IPCC_AR6_WGIII_FinalDraft_FullReport_parsed",
"SROCC_FullReport_FINAL":"counts_SROCC_FullReport_FINAL_parsed",
"210714-IPCCJ7230-SRCCL-Complete-BOOK-HRES":"counts_210714-IPCCJ7230-SRCCL-Complete-BOOK-HRES_parsed",
"SR15_Full_Report_Low_Res":"counts_SR15_Full_Report_Low_Res_parsed",
"SYR_AR5_FINAL_full":"counts_SYR_AR5_FINAL_full_wcover_parsed",
"ipcc_wg3_ar5_full":"counts_ipcc_wg3_ar5_full_parsed",
"WGIIAR5-PartA_FINAL":"counts_WGIIAR5-PartA_FINAL_parsed",
"WGIIAR5-PartB_FINAL":"counts_WGIIAR5-PartB_FINAL_parsed",
"WG1AR5_all_final":"counts_WG1AR5_all_final_parsed",
"SREX_Full_Report-1":"counts_SREX_Full_Report-1_parsed",
"SRREN_Full_Report-1":"counts_SRREN_Full_Report-1_parsed",
"ar4_syr_full_report":"counts_ar4_syr_full_report_parsed",
"ar4_wg2_full_report":"counts_ar4_wg2_full_report_parsed",
"ar4_wg1_full_report-1":"counts_ar4_wg1_full_report-1_parsed",
"ar4_wg3_full_report-1":"counts_ar4_wg3_full_report-1_parsed",
"sroc_full-1":"counts_sroc_full-1_parsed",
"srccs_wholereport-1":"counts_srccs_wholereport-1_parsed",
"SYR_TAR_full_report":"counts_SYR_TAR_full_report_parsed",
"WGII_TAR_full_report-2":"counts_WGII_TAR_full_report-2_parsed",
"WGI_TAR_full_report":"counts_WGI_TAR_full_report_parsed",
"WGIII_TAR_full_report":"counts_WGIII_TAR_full_report_parsed",
"srl-en-1":"counts_srl-en-1_parsed",
"srtt-en-1":"counts_srtt-en-1_parsedd",
"emissions_scenarios-1":"counts_emissions_scenarios-1_parsed",
"av-en-1":"counts_av-en-1_parsed",
"The-Regional-Impact":"counts_The-Regional-Impact_parsed",
"2nd-assessment-en-1":"counts_2nd-assessment-en-1_parsed",
"ipcc_sar_wg_III_full_report":"counts_ipcc_sar_wg_III_full_report_parsed",
"ipcc_sar_wg_II_full_report":"counts_ipcc_sar_wg_II_full_report_parsed",
"ipcc_sar_wg_I_full_report":"counts_ipcc_sar_wg_I_full_report_parsed",
"climate_change_1994-2":"counts_climate_change_1994-2_parsed",
# "ipcc-technical-guidelines-1994n-1":"", # could not read in, but also contains no temp mentions
"ipcc_wg_I_1992_suppl_report_full_report":"counts_ipcc_wg_I_1992_suppl_report_full_report_parsed",
"ipcc_wg_II_1992_suppl_report_full_report":"counts_ipcc_wg_II_1992_suppl_report_full_report_parsed",
"ipcc_90_92_assessments_far_full_report":"counts_ipcc_90_92_assessments_far_full_report_parsed",
"ipcc_far_wg_III_full_report":"counts_ipcc_far_wg_III_full_report_parsed",
"ipcc_far_wg_II_full_report":"counts_ipcc_far_wg_II_full_report_parsed",
"ipcc_far_wg_I_full_report":"counts_ipcc_far_wg_I_full_report_parsed",
}
return lookup_dict
def create_temp_keys():
"""Creates a list of strings for all temperatures the paper looked at"""
temps = []
for i,temp in enumerate(np.arange(0.5,10.1,0.5)):
if i % 2 != 0:
temps.append(" "+str(int(temp))+"°C")
else:
temps.append(" "+str(temp)+"°C" )
return temps
def combine_all_raw_strings():
"""combines all raw strings into one big file to search through"""
reports = [file for file in os.listdir(os.getcwd() + os.sep + "Raw IPCC Strings") if file[-4:] == ".csv" ]
all_reports = " "
for report in reports:
print("Starting with " + report)
report_df = pd.read_csv(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + report, sep="\t", usecols=[0])
report_list = report_df[report_df.columns[0]].tolist()
report_str = " ".join([str(item) for item in report_list])
all_reports += report_str
with open(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + "all_ipcc_strings.csv", 'w', encoding='utf-8') as f:
# this file is not included in the repository, as it is too large for Github
f.write(all_reports)
def create_temp_dict():
"""Creates a dictionary for all the single temperatures to count and returns it"""
temp_dict = {}
for i in np.arange(0.5,10.5, 0.5):
# Test if it is a float or not to format it right
if i == int(i):
# Add an empty space at the beginnign to make sure this is not counting e.g. 1.5°C as 5°C
key = " " + str(int(i)) + "°C"
else:
key = " " + str(i )+ "°C"
temp_dict[key] = 0
return temp_dict
def get_all_string(report):
with open(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + report, 'r', encoding='utf-8') as f:
return f.read()
def count_temperatures(report):
"""counts all temperatures between 0.5°C and 10°C in 0.5°C steps"""
temp_dict = create_temp_dict()
report_df = pd.read_csv(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + report, sep="\t", usecols=[0])
report_list = report_df[report_df.columns[0]].tolist()
report_str = " ".join([str(item) for item in report_list])
# count how often a temperature occures
for temp in temp_dict.keys():
number_of_occurences = report_str.count(temp)
print("Found " + temp + " " + str(number_of_occurences) + " time(s)")
temp_dict[temp] += number_of_occurences
# Save the results for the single pdf
temp_counts_pdf = pd.DataFrame.from_dict(temp_dict, orient="index")
temp_counts_pdf.to_csv("Results" + os.sep + "temperatures" + os.sep + "counts_" + report[:-4] + ".csv", sep=";")
def count_temp_in_all_reports():
"""iterates over all reports"""
reports = [file for file in os.listdir(os.getcwd() + os.sep + "Raw IPCC Strings") if file[-4:] == ".csv" ]
for report in reports:
print("Starting with " + report)
count_temperatures(report)
def create_rfc_dict():
"""Creates a dictionary for all "reasons for concern" to count and returns it"""
rfc_dict = {
"unique and threatened systems":0,
"extreme climate events":0,
"distribution of impacts":0,
"aggregate impacts":0,
"large-scale singular event":0
}
return rfc_dict
def count_rfc(report):
"""counts all reasons of concerns mentioned in a given report"""
rfc_dict = create_rfc_dict()
report_df = pd.read_csv(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + report, sep="\t", usecols=[0])
report_list = report_df[report_df.columns[0]].tolist()
report_str = " ".join([str(item) for item in report_list])
# count how often a temperature occures
for reason_for_concern in rfc_dict.keys():
number_of_occurences = report_str.count(reason_for_concern)
print("Found " + reason_for_concern + " " + str(number_of_occurences) + " time(s)")
rfc_dict[reason_for_concern] += number_of_occurences
# Save the results for the single pdf
rfc_counts_pdf = pd.DataFrame.from_dict(rfc_dict, orient="index")
rfc_counts_pdf.to_csv("Results" + os.sep + "reasons_for_concern" + os.sep + "counts_" + report[:-4] + ".csv", sep=";")
def count_rfc_in_all_reports():
"""iterates over all reports"""
reports = [file for file in os.listdir(os.getcwd() + os.sep + "Raw IPCC Strings") if file[-4:] == ".csv" ]
for report in reports:
print("Starting with " + report)
count_rfc(report)
def read_ipcc_string():
"""reads in the string that contains all reports"""
with open(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + "all_ipcc_strings.csv", 'r', encoding='utf-8') as f:
return str(f.readlines())
def find_all_temp_occurence(ipcc_string):
"""finds all occurences for all temperatures"""
temp_dict = {}
for i in [1,1.5,2,3,4,5,6,7,8,9,10]:
# Test if it is a float or not to format it right
if i == int(i):
# Add an empty space at the beginnign to make sure this is not counting e.g. 1.5°C as 5°C
key = " " + str(int(i)) + "°C"
else:
key = " " + str(i)+ "°C"
temp_dict[key] = [m.start() for m in re.finditer(key, ipcc_string)]
return temp_dict
def get_strings_around_temps(temp_dict, ipcc_string, n_temp_sample=10, sample_length=250):
"""extracts the text around a given index in the string of all ipcc reports"""
# number of files created with independent samples
amount_files = 6
for file in range(amount_files):
with open(os.getcwd() + os.sep + "Results" + os.sep + "false_positive_check_files" + os.sep + "false_positive_"+str(file+1)+".csv", 'w', encoding='utf-8') as f:
for temp in temp_dict.keys():
random_temp_sample = random.sample(temp_dict[temp],n_temp_sample)
for index in random_temp_sample:
f.write(ipcc_string[int(index-(sample_length/2)):int(index+(sample_length/2))]+"\n\n")
def count_dates(report):
"""counts all dates between 2000 and 2100 in 10 year steps"""
dates = {str(year):0 for year in range(2000,2105,10)}
report_df = pd.read_csv(os.getcwd() + os.sep + "Raw IPCC Strings" + os.sep + report, sep="\t", usecols=[0])
report_list = report_df[report_df.columns[0]].tolist()
report_str = " ".join([str(item) for item in report_list])
# count how often a temperature occures
for date in dates.keys():
number_of_occurences = report_str.count(date)
print("Found " + date + " " + str(number_of_occurences) + " time(s)")
dates[date] += number_of_occurences
# Save the results for the single pdf
temp_counts_pdf = pd.DataFrame.from_dict(dates, | |
import pickle, glob, sys, csv, warnings
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.metrics import accuracy_score, confusion_matrix, auc, roc_curve
from feature_extraction_utils import _load_file, _save_file, _get_node_info
from scipy.stats import multivariate_normal
from scipy.ndimage.filters import median_filter
from time import time
from mpi4py import MPI
import numpy as np
from utils import *
# Do not display warnings in the output file
warnings.filterwarnings('ignore')
def _LOO(X_tr_, Z_tr_, W_tr_, theta_):
def __validation_dataset(X_, Z_, W_, i):
return X_[..., i], Z_[..., i], W_[..., i]
def __training_dataset(X_, Z_, W_, i):
x_ = np.delete(X_, i, axis = 2)
z_ = np.delete(Z_, i, axis = 1)
w_ = np.delete(W_, i, axis = 1)
return x_, z_, w_
try:
n = X_tr_.shape[-1]
e_ = np.zeros((n,))
for i in range(n):
x_tr_, z_tr_, w_tr_ = __training_dataset(X_tr_, Z_tr_, W_tr_, i)
x_val_, z_val_, w_val_ = __validation_dataset(X_tr_, Z_tr_, W_tr_, i)
model_ = _train(x_tr_, w_tr_, theta_)[0]
e_[i] = _test(x_val_, w_val_, theta_, model_)
return e_.mean()
except:
return -1.
def _train(X_, W_, theta_, n_init = 3):
t_init = time()
# Model Fit
X_ = X_.reshape(60, 80, X_.shape[-2], X_.shape[-1])
_N_0, _N_1 = _Iterated_Conditional_Modes(X_, W_, _cliques, beta = theta_[-1], gamma = theta_[1], n_eval = 100, n_init = n_init)
tm = time() - t_init
return [[_N_0, _N_1], False], tm
def _test(X_, W_, theta_, model_):
# Predict Probabilities
X_ = X_.reshape(60, 80, X_.shape[-1])[..., np.newaxis]
Z_hat_ = _predict_proba(X_, _cliques, beta = theta_[-1], _N_0 = model_[0][0], _N_1 = model_[0][1], T_init = 4., T_min = 1e-4, epsilon = theta_[3], n_eval = 100)
Z_hat_ = Z_hat_[..., 0].reshape(Z_hat_.shape[0]*Z_hat_.shape[1], Z_hat_.shape[2])
W_hat_ = _classify(Z_hat_, prob = theta_[0], invert_label = model_[1])
return _scores(W_, W_hat_)[-1]
# Test Results for Computing Time
def _predict(X_, theta_, model_):
X_ = X_.reshape(60, 80, X_.shape[-1])[..., np.newaxis]
# Initial time
t_init = time()
# Do the segmentation
Z_hat_ = _predict_proba(X_, _cliques, beta = theta_[-1], _N_0 = model_[0][0], _N_1 = model_[0][1], T_init = 4., T_min = 1e-4, epsilon = theta_[3], n_eval = 100)
Z_hat_ = np.swapaxes(Z_hat_, 2, 3)
Z_hat_ = Z_hat_.reshape(Z_hat_.shape[0]*Z_hat_.shape[1]*Z_hat_.shape[2], Z_hat_.shape[3])
W_hat_ = _classify(Z_hat_, prob = theta_[0], invert_label = model_[-1])
# Get this frame time
tm = time() - t_init
return W_hat_, tm
# ICM Optimization for Markov Random Field Model
def _Iterated_Conditional_Modes(X_, W_, _cliques, beta, gamma, n_eval, n_init):
# Fit Multivariate Normal Distribution to each class samples
def __class_distribution(X_, W_, M, N, D, n, gamma):
x_ = X_.swapaxes(2, 3).reshape(M*N*n, D)
w_ = W_.reshape(M*N*n)
# Find each class elements
idx_0_ = w_ == 0
idx_1_ = w_ == 1
# sample mean for each class
mu_0_ = np.mean(x_[idx_0_, :], axis = 0)
mu_1_ = np.mean(x_[idx_1_, :], axis = 0)
# sample covariance for each class
E_0_ = np.cov(x_[idx_0_, :].T) + np.eye(D)*gamma
E_1_ = np.cov(x_[idx_1_, :].T) + np.eye(D)*gamma
# Define Normal Distribution for each clasee
return multivariate_normal(mu_0_, E_0_), multivariate_normal(mu_1_, E_1_)
# Evaluate Likelihood
def __likelihood(X_, _N_0, _N_1, M, N, D):
x_ = X_.reshape(M*N, D)
return _N_0.logpdf(x_), _N_1.logpdf(x_)
# Energy Potential Function
def __prior(W_, cliques_, beta, M, N):
# Prior based on neigborhood class
def ___neigborhood(w, W_, i, j, cliques_, beta, M, N):
prior = 0
# Loop over neigbors
for clique_ in cliques_:
k = i + clique_[0]
m = j + clique_[1]
if k < 0 or m < 0 or k >= M or m >= N:
pass
else:
if w == W_[k, m]:
prior += beta
else:
prior -= beta
return prior
# Variable Initialization
prior_0_ = np.zeros((M, N))
prior_1_ = np.zeros((M, N))
# Loop over Pixels in an Image
for i in range(M):
for j in range(N):
# Energy function Value and Prior Probability
prior_0_[i, j] = ___neigborhood(0, W_, i, j, cliques_, beta, M, N)
prior_1_[i, j] = ___neigborhood(1, W_, i, j, cliques_, beta, M, N)
return prior_0_, prior_1_
# Evaluate Energy and Classification
def __energy(lik_, pri_, M, N):
# Variables Initialization
W_ = np.zeros((M, N))
Z_ = np.zeros((M, N, 2))
Z_[..., 0] = lik_[0].reshape(M, N) + pri_[0]
Z_[..., 1] = lik_[1].reshape(M, N) + pri_[1]
# Maximum Energy Classification
idx_ = Z_[..., 0] < Z_[..., 1]
W_[idx_] = 1
# Maximum Pixel's Energy
U_ = Z_[..., 0].copy()
U_[idx_] = Z_[idx_, 1]
return U_, W_
# Random Initialization
def __rand_init(X_):
M, N, D, n = X_.shape
W_init_ = np.zeros((M*N, n))
for i in range(n):
idx_ = np.random.randint(0, M*N - 1, size = M*N//2)
W_init_[idx_, i] = 1.
return W_init_.reshape(M, N, n)
def __run_icm(X_, W_, cliques_, beta, gamma, n_eval):
# Constants Initialization
M, N, D, n = X_.shape
# Stopping criteria Initialization
u_k = - np.inf
U_k_1_ = np.zeros((M, N, n))
W_k_1_ = np.zeros((M, N, n))
# Class Distribution Inference
_N_0, _N_1 = __class_distribution(X_, W_, M, N, D, n, gamma)
# Loop over No. of evaluation
for k in range(n_eval):
# loop over No. of samples
for i in range(n):
# Evaluate likelihood function
lik_ = __likelihood(X_[..., i], _N_0, _N_1, M, N, D)
# Evaluate prior function
prior_ = __prior(W_[..., i], cliques_, beta, M, N)
# Current Evaluation Weights Initialization
U_k_1_[..., i], W_k_1_[..., i] = __energy(lik_, prior_, M, N)
# Current Evaluation Total Energy
u_k_1 = U_k_1_.sum()
#print('>>> No Iter.: {} Energy: {}'.format(k, u_k_1))
# Stop if it is a minima
if u_k >= u_k_1:
break
# If not keep optimizing
else:
# Update next iteration objective functions
W_ = W_k_1_.copy()
u_k = u_k_1.copy()
# Class Distribution Inference
_N_0, _N_1 = __class_distribution(X_, W_, M, N, D, n, gamma)
return _N_0, _N_1, u_k
# Variables Initialization
cliques_ = [C_0_, C_1_, C_1_ + C_2_][_cliques]
energy_ = np.zeros((n_init,))
class_ = []
for i in range(n_init):
#print('>>> No. Init.: {} '.format(i))
W_ = __rand_init(X_)
_N_0, _N_1, u = __run_icm(X_, W_, cliques_, beta, gamma, n_eval)
energy_[i] = u
class_.append([_N_0, _N_1])
return class_[np.argmax(energy_)]
# Stochastic Optimization of the Markov Random Field Model
def _predict_proba(X_, cliques_, beta, _N_0, _N_1, n_eval = 5, T_init = 4., T_min = 1e-4, epsilon = 0.75):
# Cooling Function
def __exp_cooling(T, epsilon):
return T * epsilon
# Perturbation Generate from Importance Sampling
def __importance_perturbation(U_, M, N):
E_ = U_.copy()
E_ = np.absolute(E_ - np.max(E_))
E_ = E_ / np.sum(E_)
i_, j_ = np.where(E_ == np.random.choice(E_.flatten(), 1, p = E_.flatten()))
return i_[0], j_[0]
# Evaluate Likelhood
def __likelihood(X_, _N_0, _N_1, M, N, D):
x_ = X_.reshape(M*N, D)
Z_ = np.zeros((M, N, 2))
Z_[..., 0] = _N_0.logpdf(x_).reshape(M, N)
Z_[..., 1] = _N_1.logpdf(x_).reshape(M, N)
return Z_
# Compute the Pixels' Energy
def __eval_energy(Z_, M, N):
# Variables initialization
W_ = np.zeros((M, N))
E_ = Z_[..., 0].copy()
# Maximum Energy Energy Classification
idx_ = Z_[..., 0] < Z_[..., 1]
E_[idx_] = Z_[idx_, 1]
W_[idx_] = 1
return E_, W_
# Prior based on neigborhood class
def __neigborhood(w, W_, i, j, cliques_, beta):
M, N = W_.shape
prior = 0
# Loop over neigbors
for clique_ in cliques_:
k = i + clique_[0]
m = j + clique_[1]
if k < 0 or m < 0 or k >= M or m >= N:
pass
else:
if w == W_[k, m]:
prior += beta
else:
prior -= beta
return prior
# Calcualate the pixels' posterior energy
def __eval_posterior(Z_, U_, W_, i, j, cliques_, beta):
E_ = U_.copy()
G_ = W_.copy()
Z_[i, j, 0] = Z_[i, j, 0] + __neigborhood(0, W_, i, j, cliques_, beta)
Z_[i, j, 1] = Z_[i, j, 1] + __neigborhood(1, W_, i, j, cliques_, beta)
if G_[i, j] == 1:
G_[i, j] = 0
E_[i, j] = Z_[i, j, 0]
else:
G_[i, j] = 1
E_[i, j] = Z_[i, j, 1]
return E_, G_, Z_
# Softmax function to transform energy to probabilities
def __softmax(x_):
z_ = np.exp(x_)
return z_ / np.tile( np.sum(z_, axis = 1)[:, np.newaxis], (1, 2))
# Compute the sum of the values to add to 1
def __hardmax(x_):
x_ = x_ - x_.min()
return np.nan_to_num(x_ / np.tile(np.sum(x_, axis = 1)[:, np.newaxis], (1, 2)))
# Main Run Simulate Anniling Initialization
def __run_optimization(X_, W_, cliques_, beta, _N_0, _N_1, M, N, D):
# Eval Likelihood
Z_ = __likelihood(X_, _N_0, _N_1, M, N, D)
# Initialization of Energy Function
U_, W_ = __eval_energy(Z_, M, N)
# Variables initialization
n_accept = 0
T = | |
"Kernel", lineno),
"Complex",
[self._parse_numeric_string(s)],
None,
lineno
)
def _parse_int(self, s):
if "X" in s:
base = 16
elif "O" in s:
base = 8
elif "B" in s:
base = 2
else:
base = 10
if base != 10:
# Strip off the leading 0[xob]
s = s[2:]
val = rbigint()
i = 0
while i < len(s):
c = ord(s[i])
if ord("a") <= c <= ord("z"):
digit = c - ord("a") + 10
elif ord("A") <= c <= ord("Z"):
digit = c - ord("A") + 10
elif ord("0") <= c <= ord("9"):
digit = c - ord("0")
else:
break
if digit >= base:
break
val = val.mul(rbigint.fromint(base)).add(rbigint.fromint(digit))
i += 1
try:
return ast.ConstantInt(val.toint())
except OverflowError:
return ast.ConstantBigInt(val)
pg = ParserGenerator([
"CLASS", "MODULE", "DEF", "UNDEF", "BEGIN", "RESCUE", "ENSURE", "END",
"IF", "UNLESS", "THEN", "ELSIF", "ELSE", "CASE", "WHEN", "WHILE",
"UNTIL", "FOR", "BREAK", "NEXT", "REDO", "RETRY", "IN", "DO",
"DO_COND", "DO_BLOCK", "RETURN", "YIELD", "SUPER", "SELF", "NIL",
"TRUE", "FALSE", "AND", "OR", "NOT", "IF_MOD", "UNLESS_MOD",
"WHILE_MOD", "UNTIL_MOD", "RESCUE_MOD", "ALIAS", "DEFINED",
"lBEGIN", "lEND", "__LINE__", "__FILE__", "__ENCODING__", "DO_LAMBDA",
"IDENTIFIER", "FID", "GVAR", "IVAR", "CONSTANT", "CVAR", "LABEL",
"CHAR", "UPLUS", "UMINUS", "UMINUS_NUM", "POW", "CMP", "EQ", "EQQ",
"NEQ", "GEQ", "LEQ", "ANDOP", "OROP", "MATCH", "NMATCH", "DOT", "DOT2",
"DOT3", "AREF", "ASET", "LSHFT", "RSHFT", "COLON2", "COLON3", "ANDDOT",
"OP_ASGN", "ASSOC", "LPAREN", "LPAREN2", "RPAREN", "LPAREN_ARG",
"LBRACK", "RBRACK", "LBRACE", "LBRACE_ARG", "STAR", "STAR2", "DSTAR",
"AMPER", "AMPER2", "TILDE", "PERCENT", "DIVIDE", "PLUS", "MINUS",
"LT", "GT", "PIPE", "BANG", "CARET", "LCURLY", "RCURLY", "BACK_REF2",
"SYMBEG", "STRING_BEG", "XSTRING_BEG", "REGEXP_BEG", "WORDS_BEG",
"QWORDS_BEG", "STRING_DBEG", "STRING_DVAR", "STRING_END", "LAMBDA",
"LAMBEG", "NTH_REF", "BACK_REF", "STRING_CONTENT", "INTEGER", "FLOAT",
"REGEXP_END", "SYMBOLS_BEG", "QSYMBOLS_BEG", "RATIONAL", "IMAGINARY",
"LABEL_END",
"LITERAL_EQUAL", "LITERAL_COLON", "LITERAL_COMMA", "LITERAL_LBRACKET",
"LITERAL_SEMICOLON", "LITERAL_QUESTION_MARK", "LITERAL_SPACE",
"LITERAL_NEWLINE",
], precedence=[
("nonassoc", ["LOWEST"]),
("nonassoc", ["LBRACE_ARG"]),
("nonassoc", ["IF_MOD", "UNLESS_MOD", "WHILE_MOD", "UNTIL_MOD"]),
("left", ["OR", "AND"]),
("right", ["NOT"]),
("nonassoc", ["DEFINED"]),
("right", ["LITERAL_EQUAL", "OP_ASGN"]),
("left", ["RESCUE_MOD"]),
("right", ["LITERAL_QUESTION_MARK", "LITERAL_COLON"]),
("nonassoc", ["DOT2", "DOT3"]),
("left", ["OROP"]),
("left", ["ANDOP"]),
("nonassoc", ["CMP", "EQ", "EQQ", "NEQ", "MATCH", "NMATCH"]),
("left", ["GT", "GEQ", "LT", "LEQ"]),
("left", ["PIPE", "CARET"]),
("left", ["AMPER2"]),
("left", ["LSHFT", "RSHFT"]),
("left", ["PLUS", "MINUS"]),
("left", ["STAR2", "DIVIDE", "PERCENT"]),
("right", ["UMINUS_NUM", "UMINUS"]),
("right", ["POW"]),
("right", ["BANG", "TILDE", "UPLUS"]),
], cache_id="topaz")
def error_handler(state, token):
raise ParsingError(
"Token(%s, %s)" % (token.gettokentype(), token.getstr()),
token.getsourcepos()
)
pg.error(error_handler)
@pg.production("program : top_compstmt")
def program(self, p):
"""
program : {
lexer.setState(LexState.EXPR_BEG);
support.initTopLocalVariables();
} top_compstmt {
// ENEBO: Removed !compile_for_eval which probably is to reduce warnings
if ($2 != null) {
/* last expression should not be void */
if ($2 instanceof BlockNode) {
support.checkUselessStatement($<BlockNode>2.getLast());
} else {
support.checkUselessStatement($2);
}
}
support.getResult().setAST(support.addRootNode($2, support.getPosition($2)));
}
"""
# TODO: sym table setup, and useless statement
return BoxAST(ast.Main(ast.Block(p[0].getastlist()) if p[0] is not None else ast.Nil()))
@pg.production("top_compstmt : top_stmts opt_terms")
def top_compstmt(self, p):
return p[0]
@pg.production("top_stmts : none")
def top_stmts_none(self, p):
return p[0]
@pg.production("top_stmts : top_stmt")
def top_stmts_top_stmt(self, p):
return self.new_list(p[0])
@pg.production("top_stmts : top_stmts terms top_stmt")
def top_stmts(self, p):
return self.append_to_list(p[0], p[2])
@pg.production("top_stmts : error top_stmt")
def top_stmts_error(self, p):
return p[1]
@pg.production("top_stmt : stmt")
def top_stmt_stmt(self, p):
return p[0]
@pg.production("top_stmt : lBEGIN LCURLY top_compstmt RCURLY")
def top_stmt_lbegin(self, p):
"""
top_stmt : stmt
| klBEGIN {
if (support.isInDef() || support.isInSingle()) {
support.yyerror("BEGIN in method");
}
} tLCURLY top_compstmt tRCURLY {
support.getResult().addBeginNode(new PreExe19Node($1.getPosition(), support.getCurrentScope(), $4));
$$ = null;
}
"""
raise NotImplementedError(p)
@pg.production("bodystmt : compstmt opt_rescue opt_else opt_ensure")
def bodystmt(self, p):
body = ast.Block(p[0].getastlist()) if p[0] is not None else ast.Nil()
if p[1] is not None:
except_handlers = p[1].getastlist()
body = ast.TryExcept(body, except_handlers, ast.Nil())
elif p[2] is not None:
body = ast.TryExcept(body, [], p[2].getast())
if p[3] is not None:
body = ast.TryFinally(body, ast.Block(p[3].getastlist()))
return BoxAST(body)
@pg.production("compstmt : stmts opt_terms")
def compstmt(self, p):
"""
compstmt : stmts opt_terms {
if ($1 instanceof BlockNode) {
support.checkUselessStatements($<BlockNode>1);
}
$$ = $1;
}
"""
# TODO: checkUslessStatements?
return p[0]
@pg.production("stmts : none")
def stmts_none(self, p):
return p[0]
@pg.production("stmts : stmt_or_begin")
def stmts_stmt(self, p):
return self.new_list(p[0])
@pg.production("stmts : stmts terms stmt_or_begin")
def stmts(self, p):
return self.append_to_list(p[0], p[2])
@pg.production("stmts : error stmt")
def stmts_error(self, p):
return p[1]
@pg.production("stmt_or_begin : stmt")
def stmt_or_begin(self, p):
return p[0]
@pg.production("stmt_or_begin : lBEGIN LCURLY top_compstmt RCURLY")
def stmt_or_begin_curly(self, p):
raise NotImplementedError
@pg.production("stmt : ALIAS fitem alias_after_fitem fitem")
def stmt_alias_fitem(self, p):
return BoxAST(ast.Alias(p[1].getast(), p[3].getast(), p[0].getsourcepos().lineno))
@pg.production("alias_after_fitem : ")
def alias_after_fitem(self, p):
self.lexer.state = self.lexer.EXPR_FNAME
@pg.production("stmt : ALIAS GVAR GVAR")
def stmt_alias_gvar(self, p):
"""
kALIAS tGVAR tGVAR {
$$ = new VAliasNode($1.getPosition(), (String) $2.getValue(), (String) $3.getValue());
}
"""
raise NotImplementedError(p)
@pg.production("stmt : ALIAS GVAR BACK_REF")
def stmt_alias_gvar_backref(self, p):
"""
kALIAS tGVAR tBACK_REF {
$$ = new VAliasNode($1.getPosition(), (String) $2.getValue(), "$" + $<BackRefNode>3.getType());
}
"""
raise NotImplementedError(p)
@pg.production("stmt : ALIAS GVAR NTH_REF")
def stmt_alias_gvar_nref(self, p):
"""
kALIAS tGVAR tNTH_REF {
support.yyerror("can't make alias for the number variables");
}
"""
raise NotImplementedError(p)
@pg.production("stmt : UNDEF undef_list")
def stmt_undef(self, p):
return BoxAST(ast.Undef(p[1].getastlist(), p[0].getsourcepos().lineno))
@pg.production("stmt : stmt IF_MOD expr_value")
def stmt_ifmod(self, p):
return self._new_stmt(ast.If(
p[2].getast(),
ast.Block([p[0].getast()]),
ast.Nil(),
))
@pg.production("stmt : stmt UNLESS_MOD expr_value")
def stmt_unlessmod(self, p):
return self._new_stmt(ast.If(
p[2].getast(),
ast.Nil(),
ast.Block([p[0].getast()]),
))
@pg.production("stmt : BEGIN bodystmt END WHILE_MOD expr_value")
def stmt_while_mod(self, p):
return self._new_stmt(ast.While(
p[4].getast(),
p[1].getast(),
post_check=True
))
@pg.production("stmt : stmt WHILE_MOD expr_value")
def stmt_while_mod(self, p):
return self._new_stmt(ast.While(
p[2].getast(),
ast.Block([p[0].getast()])
))
@pg.production("stmt : BEGIN bodystmt END UNTIL_MOD expr_value")
def stmt_until_mod(self, p):
return self._new_stmt(ast.Until(
p[4].getast(),
p[1].getast(),
post_check=True
))
@pg.production("stmt : stmt UNTIL_MOD expr_value")
def stmt_until_mod(self, p):
return self._new_stmt(ast.Until(
p[2].getast(),
ast.Block([p[0].getast()])
))
@pg.production("stmt : stmt RESCUE_MOD stmt")
def stmt_rescue_mod(self, p):
lineno = p[1].getsourcepos().lineno
return self._new_stmt(ast.TryExcept(
ast.Block([p[0].getast()]),
[
ast.ExceptHandler(
[ast.LookupConstant(ast.Scope(lineno), "StandardError", lineno)],
None,
ast.Block([p[2].getast()]),
)
],
ast.Nil()
))
@pg.production("stmt : lEND LCURLY compstmt RCURLY")
def stmt_lend(self, p):
"""
klEND tLCURLY compstmt tRCURLY {
if (support.isInDef() || support.isInSingle()) {
support.warn(ID.END_IN_METHOD, $1.getPosition(), "END in method; use at_exit");
}
$$ = new PostExeNode($1.getPosition(), $3);
}
"""
raise NotImplementedError(p)
@pg.production("stmt : command_asgn")
def stmt_command_assign(self, p):
return self.new_stmt(p[0])
@pg.production("stmt : mlhs LITERAL_EQUAL command_call")
def stmt_mlhs_equal_command_call(self, p):
return self._new_stmt(ast.MultiAssignment(
p[0].getassignment(),
p[2].getast()
))
@pg.production("stmt : lhs LITERAL_EQUAL mrhs")
def stmt_lhs_equal_mrhs(self, p):
return self._new_stmt(ast.Assignment(p[0].getast(), ast.Array(p[2].getastlist())))
@pg.production("stmt : mlhs LITERAL_EQUAL mrhs_arg")
def stmt_mlhs_equal_arg_value(self, p):
return self._new_stmt(ast.MultiAssignment(
p[0].getassignment(),
p[2].getast()
))
@pg.production("stmt : expr")
def stmt_expr(self, p):
return self.new_stmt(p[0])
@pg.production("command_asgn : lhs LITERAL_EQUAL command_rhs")
def command_asgn_lhs_equal_command_call(self, p):
return BoxAST(ast.Assignment(
p[0].getast(),
p[2].getast()
))
@pg.production("command_asgn : var_lhs OP_ASGN command_rhs")
def command_asgn_var(self, p):
return self.new_augmented_assignment(p[1], p[0], p[2])
@pg.production("command_asgn : primary_value LITERAL_LBRACKET opt_call_args rbracket OP_ASGN command_rhs")
def command_asgn_subscript_op_asgn_command_call(self, p):
raise NotImplementedError(p)
@pg.production("command_asgn : primary_value call_op IDENTIFIER OP_ASGN command_rhs")
def command_asgn_method_op_asgn_command_call(self, p):
raise NotImplementedError(p)
@pg.production("command_asgn : primary_value call_op CONSTANT OP_ASGN command_rhs")
def command_asgnmethod_constant_op_asgn_command_call(self, p):
raise NotImplementedError(p)
@pg.production("command_asgn : primary_value COLON2 CONSTANT OP_ASGN command_rhs")
def command_asgnprimary_value_colon_constant_op_asgn_command_call(self, p):
raise NotImplementedError(p)
@pg.production("command_asgn : primary_value COLON2 IDENTIFIER OP_ASGN command_rhs")
def command_asgnconstant_op_asgn_command_call(self, p):
raise NotImplementedError(p)
@pg.production("command_asgn : backref OP_ASGN command_rhs")
def command_asgnbackref_op_asgn_command_call(self, p):
raise NotImplementedError(p)
self.backref_assign_error(p[0])
@pg.production("command_rhs : command_call", precedence="OP_ASGN")
def command_rhs_call(self, p):
return p[0]
@pg.production("command_rhs : command_call RESCUE_MOD stmt")
def command_rhs_call_rescue(self, p):
lineno = p[1].getsourcepos().lineno
return self._new_stmt(ast.TryExcept(
ast.Block([p[0].getast()]),
[
ast.ExceptHandler(
[ast.LookupConstant(ast.Scope(lineno), "StandardError", lineno)],
None,
ast.Block([p[2].getast()]),
)
],
ast.Nil()
))
@pg.production("command_rhs : command_asgn")
def command_rhs_asgn(self, p):
return p[0]
@pg.production("expr : command_call")
def expr_command_call(self, p):
return p[0]
@pg.production("expr : expr AND expr")
def expr_and(self, p):
return self.new_and(p[0], p[2])
@pg.production("expr : expr OR expr")
def expr_or(self, p):
return self.new_or(p[0], p[2])
@pg.production("expr : NOT opt_nl expr")
def expr_not(self, p):
return self.new_call(p[2], self.new_token(p[0], "!", "!"), None)
@pg.production("expr : BANG command_call")
def expr_bang_command_call(self, p):
return self.new_call(p[1], self.new_token(p[0], "!", "!"), None)
@pg.production("expr : arg")
def expr_arg(self, p):
return p[0]
@pg.production("expr_value : expr")
def expr_value(self, p):
"""
expr {
support.checkExpression($1);
}
"""
# TODO: checkExpression?
return p[0]
@pg.production("command_call : command")
def command_call_command(self, p):
return p[0]
@pg.production("command_call : block_command")
def command_call_block_command(self, p):
return p[0]
@pg.production("block_command : block_call")
def block_command_block_call(self, p):
return p[0]
@pg.production("block_command : block_call call_op2 operation2 command_args")
def block_command_dot(self, p):
return self.new_call(p[0], p[2], p[3])
@pg.production("cmd_brace_block : LBRACE_ARG brace_body RCURLY")
def cmd_brace_block(self, p):
box = self.new_send_block(p[0].getsourcepos().lineno, p[1].getblockparam(), p[1].getblockstmts())
self.save_and_pop_scope(box.getast())
return box
@pg.production("fcall : operation")
def fcall(self, p):
return p[0]
@pg.production("command : fcall command_args", precedence="LOWEST")
def command_operation_command_args(self, p):
return self.new_fcall(p[0], p[1])
@pg.production("command : fcall command_args cmd_brace_block")
def command_operation_command_args_cmd_brace_block(self, p):
return self.combine_send_block(self.new_fcall(p[0], p[1]), p[2])
@pg.production("command : primary_value call_op operation2 command_args", precedence="LOWEST")
def command_method_call_args(self, p):
return self.new_call(p[0], p[2], p[3])
@pg.production("command : primary_value call_op operation2 command_args | |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Process wind data for adjacent years and save processed data to a NetCDF file per subset. Settings, such as target file name,
are imported from config.py.
Example::
$ python process_data.py : process all latitudes (all subsets)
$ python process_data.py -s subsetID : process individual subset with ID subsetID
$ python process_data.py -s ID1 -e ID2 : process range of subsets starting at subset ID1 until ID2 (inclusively)
$ python process_data.py -h : display this help
"""
import xarray as xr
import numpy as np
from timeit import default_timer as timer
from scipy.stats import percentileofscore
from os.path import join as path_join
import sys
import getopt
import dask
try:
from .utils import compute_level_heights, flatten_dict
from ..utils.convenience_utils import hour_to_date_str
from .config import start_year, final_year, era5_data_dir, model_level_file_name_format, surface_file_name_format,\
output_file_name, output_file_name_subset, read_n_lats_per_subset
from ..utils.wind_resource_utils import calc_power
except ImportError:
# TODO hotfix to be able to run single location plotting...
from utils import compute_level_heights, flatten_dict
from convenience_utils import hour_to_date_str
from config import start_year, final_year, era5_data_dir, model_level_file_name_format, surface_file_name_format,\
output_file_name, output_file_name_subset, read_n_lats_per_subset
from wind_resource_utils import calc_power
# Overwrite default with single-threaded scheduler.
dask.config.set(scheduler='synchronous')
# Set the relevant heights for the different analysis types in meter.
analyzed_heights = {
'floor': 50.,
'ceilings': [200., 300., 400., 500., 1000., 1250.],
'fixed': [100.],
'integration_ranges': [(50., 150.), (10., 500.)],
}
dimension_sizes = {
'ceilings': len(analyzed_heights['ceilings']),
'fixed': len(analyzed_heights['fixed']),
'integration_ranges': len(analyzed_heights['integration_ranges']),
}
integration_range_ids = list(range(dimension_sizes['integration_ranges']))
# Heights above the ground at which the wind speed is evaluated (using interpolation).
heights_of_interest = [500., 440.58, 385.14, 334.22, 287.51, 244.68, 200., 169.50, 136.62,
100., 80., 50., 30.96, 10.]
# Add the analyzed_heights values to the heights_of_interest list and remove duplicates.
heights_of_interest = set(heights_of_interest + [analyzed_heights['floor']] + analyzed_heights['ceilings'] +
analyzed_heights['fixed'] +
[h for int_range in analyzed_heights['integration_ranges'] for h in int_range])
heights_of_interest = sorted(heights_of_interest, reverse=True)
# Determine the analyzed_heights ids in heights_of_interest.
analyzed_heights_ids = {
'floor': heights_of_interest.index(analyzed_heights['floor']),
'ceilings': [heights_of_interest.index(h) for h in analyzed_heights['ceilings']],
'fixed': [heights_of_interest.index(h) for h in analyzed_heights['fixed']],
'integration_ranges': []
}
for int_range in analyzed_heights['integration_ranges']:
analyzed_heights_ids['integration_ranges'].append([heights_of_interest.index(int_range[0]),
heights_of_interest.index(int_range[1])])
def get_statistics(vals):
"""Determine mean and 5th, 32nd, and 50th percentile of input values.
Args:
vals (list): Series of floats.
Returns:
tuple of float: Tuple with mean and 5th, 32nd, and 50th percentile of series.
"""
mean = np.mean(vals)
perc5, perc32, perc50 = np.percentile(vals, [5., 32., 50.])
return mean, perc5, perc32, perc50
def get_percentile_ranks(vals, scores=(150., 200., 250.)):
""""Determine percentile ranks corresponding to the scores.
Args:
vals (list): Series of floats.
scores (tuple of float, optional): Percentile scores. Defaults to [5., 32., 50.].
Returns:
list of float: List with percentile ranks.
"""
ranks = [percentileofscore(vals, s) for s in scores]
return ranks
def read_raw_data(start_year, final_year):
""""Read ERA5 wind data for adjacent years.
Args:
start_year (int): Read data starting from this year.
final_year (int): Read data up to this year.
Returns:
tuple of Dataset, ndarray, ndarray, ndarray, and ndarray: Tuple containing reading object of multiple wind
data (netCDF) files, longitudes of grid, latitudes of grid, model level numbers, and timestamps in hours since
1900-01-01 00:00:0.0.
"""
# Construct the list of input NetCDF files
ml_files = []
sfc_files = []
for y in range(start_year, final_year+1):
for m in range(1, 13):
ml_files.append(path_join(era5_data_dir, model_level_file_name_format.format(y, m)))
sfc_files.append(path_join(era5_data_dir, surface_file_name_format.format(y, m)))
# Load the data from the NetCDF files.
ds = xr.open_mfdataset(ml_files+sfc_files, decode_times=False)
lons = ds['longitude'].values
lats = ds['latitude'].values
levels = ds['level'].values # Model level numbers.
hours = ds['time'].values # Hours since 1900-01-01 00:00:0.0, see: print(nc.variables['time']).
dlevels = np.diff(levels)
if not (np.all(dlevels == 1) and levels[-1] == 137):
i_highest_level = len(levels) - np.argmax(dlevels[::-1] > 1) - 1
print("Not all the downloaded model levels are consecutive. Only model levels up to {} are evaluated."
.format(levels[i_highest_level]))
levels = levels[i_highest_level:]
else:
i_highest_level = 0
return ds, lons, lats, levels, hours, i_highest_level
def merge_output_files(start_year, final_year, max_subset_id):
""""Merge subset-wise output files to one total output file, the arguments are given to specify
the matching files
Args:
start_year (int): First year of processing
final_year (int): Final year of processing
max_subset_id (int): Maximal subset id
"""
all_year_subset_files = [output_file_name_subset.format(**{'start_year': start_year,
'final_year': final_year,
'lat_subset_id': subset_id,
'max_lat_subset_id': max_subset_id})
for subset_id in range(max_subset_id+1)]
print('All data for the years {} to {} is read from subset_files from 0 to {}'.format(start_year, final_year,
max_subset_id))
nc = xr.open_mfdataset(all_year_subset_files, concat_dim='latitude')
nc.to_netcdf(output_file_name.format(**{'start_year': start_year, 'final_year': final_year}))
return 0
def check_for_missing_data(hours):
""""Print message if hours are missing in timestamp series.
Args:
hours (list): Hour timestamps.
"""
d_hours = np.diff(hours)
gaps = d_hours != 1
if np.any(gaps):
i_gaps = np.argwhere(gaps)
for i in i_gaps:
print("Gap found between {} and {}.".format(hour_to_date_str(hours[i]),
hour_to_date_str(hours[i+1])))
def process_grid_subsets(output_file, start_subset_id=0, end_subset_id=-1):
""""Execute analyses on the data of the complete grid and save the processed data to a netCDF file.
By default all subsets are analyzed
Args:
output_file (str): Name of netCDF file to which the results are saved for the respective
subset. (including format {} placeholders)
start_subset_id (int): Starting subset id to be analyzed
end_subset_id (int): Last subset id to be analyzed
(set to -1 to process all subsets after start_subset_id)
"""
ds, lons, lats, levels, hours, i_highest_level = read_raw_data(start_year, final_year)
check_for_missing_data(hours)
# Reading the data of all grid points from the NetCDF file all at once requires a lot of memory. On the other hand,
# reading the data of all grid points one by one takes up a lot of CPU. Therefore, the dataset is analysed in
# pieces: the subsets are read and processed consecutively.
n_subsets = int(np.ceil(float(len(lats)) / read_n_lats_per_subset))
# Define subset range to be processed in this run
if end_subset_id == -1:
subset_range = range(start_subset_id, n_subsets)
else:
subset_range = range(start_subset_id, end_subset_id+1)
if subset_range[-1] > (n_subsets-1):
raise ValueError("Requested subset ID ({}) is higher than maximal subset ID {}."
.format(subset_range[-1], (n_subsets-1)))
# Loop over all specified subsets to write processed data to the output file.
counter = 0
total_iters = len(lats) * len(lons)*len(subset_range)/n_subsets
start_time = timer()
for i_subset in subset_range:
# Find latitudes corresponding to the current i_subset
i_lat0 = i_subset * read_n_lats_per_subset
if i_lat0+read_n_lats_per_subset < len(lats):
lat_ids_subset = range(i_lat0, i_lat0 + read_n_lats_per_subset)
else:
lat_ids_subset = range(i_lat0, len(lats))
lats_subset = lats[lat_ids_subset]
print("Subset {}, Latitude(s) analysed: {} to {}".format(i_subset, lats_subset[0], lats_subset[-1]))
# Initialize result arrays for this subset
res = initialize_result_dict(lats_subset, lons)
print(' Result array configured, reading subset input now, time lapsed: {:.2f} hrs'
.format(float(timer()-start_time)/3600))
# Read data for the subset latitudes
v_levels_east = ds.variables['u'][:, i_highest_level:, lat_ids_subset, :].values
v_levels_north = ds.variables['v'][:, i_highest_level:, lat_ids_subset, :].values
v_levels = (v_levels_east**2 + v_levels_north**2)**.5
t_levels = ds.variables['t'][:, i_highest_level:, lat_ids_subset, :].values
q_levels = ds.variables['q'][:, i_highest_level:, lat_ids_subset, :].values
try:
surface_pressure = ds.variables['sp'][:, lat_ids_subset, :].values
except KeyError:
surface_pressure = np.exp(ds.variables['lnsp'][:, lat_ids_subset, :].values)
print(' Input read, performing statistical analysis now, time lapsed: {:.2f} hrs'
.format(float(timer()-start_time)/3600))
for i_lat_in_subset in range(len(lat_ids_subset)): # Saves a file for each subset.
for i_lon in range(len(lons)):
if (i_lon % 20) == 0: # Give processing info every 20 longitudes
print(' {} of {} longitudes analyzed, satistical analysis of longitude {}, time lapsed: '
'{:.2f} hrs'.format(i_lon, len(lons), lons[i_lon], float(timer()-start_time)/3600))
counter += 1
level_heights, density_levels = compute_level_heights(levels,
surface_pressure[:, i_lat_in_subset, i_lon],
t_levels[:, :, i_lat_in_subset, i_lon],
q_levels[:, :, i_lat_in_subset, i_lon])
# Determine wind at altitudes of interest by means of interpolating the raw wind data.
v_req_alt = np.zeros((len(hours), len(heights_of_interest))) # Interpolation results array.
rho_req_alt = np.zeros((len(hours), len(heights_of_interest)))
for i_hr in range(len(hours)):
if not np.all(level_heights[i_hr, 0] > heights_of_interest):
raise ValueError("Requested height ({:.2f} m) is higher than height of highest model level."
.format(level_heights[i_hr, 0]))
v_req_alt[i_hr, :] = np.interp(heights_of_interest, level_heights[i_hr, ::-1],
v_levels[i_hr, ::-1, i_lat_in_subset, i_lon])
rho_req_alt[i_hr, :] = np.interp(heights_of_interest, level_heights[i_hr, ::-1],
density_levels[i_hr, ::-1])
p_req_alt = calc_power(v_req_alt, rho_req_alt)
# Determine wind statistics at fixed heights of interest.
for i_out, fixed_height_id in enumerate(analyzed_heights_ids['fixed']):
v_mean, v_perc5, v_perc32, v_perc50 = get_statistics(v_req_alt[:, fixed_height_id])
res['fixed']['wind_speed']['mean'][i_out, i_lat_in_subset, i_lon] = v_mean
res['fixed']['wind_speed']['percentile'][5][i_out, i_lat_in_subset, i_lon] = v_perc5
res['fixed']['wind_speed']['percentile'][32][i_out, i_lat_in_subset, i_lon] = v_perc32
res['fixed']['wind_speed']['percentile'][50][i_out, i_lat_in_subset, i_lon] = v_perc50
v_ranks = get_percentile_ranks(v_req_alt[:, fixed_height_id], [4., 8., 14., 25.])
res['fixed']['wind_speed']['rank'][4][i_out, i_lat_in_subset, i_lon] = v_ranks[0]
res['fixed']['wind_speed']['rank'][8][i_out, i_lat_in_subset, i_lon] = v_ranks[1]
res['fixed']['wind_speed']['rank'][14][i_out, i_lat_in_subset, i_lon] = v_ranks[2]
res['fixed']['wind_speed']['rank'][25][i_out, i_lat_in_subset, i_lon] = v_ranks[3]
p_fixed_height = p_req_alt[:, fixed_height_id]
p_mean, p_perc5, p_perc32, p_perc50 = get_statistics(p_fixed_height)
res['fixed']['wind_power_density']['mean'][i_out, i_lat_in_subset, i_lon] = p_mean
res['fixed']['wind_power_density']['percentile'][5][i_out, i_lat_in_subset, i_lon] = p_perc5
res['fixed']['wind_power_density']['percentile'][32][i_out, i_lat_in_subset, i_lon] = p_perc32
res['fixed']['wind_power_density']['percentile'][50][i_out, i_lat_in_subset, i_lon] = p_perc50
p_ranks = get_percentile_ranks(p_fixed_height, [40., 300., 1600., 9000.])
res['fixed']['wind_power_density']['rank'][40][i_out, i_lat_in_subset, i_lon] | |
# pubsub completion notification
def test_cron_abort_expired_task_to_run_retry(self):
pub_sub_calls = self.mock_pub_sub()
run_result = self._quick_reap(
1,
0,
pubsub_topic='projects/abc/topics/def',
task_slices=[
task_request.TaskSlice(
expiration_secs=600,
properties=_gen_properties(idempotent=True),
wait_for_capacity=False),
])
# Fake first try bot died.
self.assertEqual(1, len(pub_sub_calls)) # PENDING -> RUNNING
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual(([], 1, 0), task_scheduler.cron_handle_bot_died())
self.assertEqual(State.BOT_DIED, run_result.key.get().state)
self.assertEqual(State.PENDING, run_result.result_summary_key.get().state)
self.assertEqual(1, self.execute_tasks())
self.assertEqual(2, len(pub_sub_calls)) # RUNNING -> PENDING
# BOT_DIED is kept instead of EXPIRED.
abandoned_ts = self.mock_now(
self.now, run_result.request_key.get().expiration_secs+1)
self.assertEqual(
(['1d69b9f088008910'], []),
task_scheduler.cron_abort_expired_task_to_run())
self.assertEqual(1, len(task_result.TaskRunResult.query().fetch()))
expected = self._gen_result_summary_reaped(
abandoned_ts=abandoned_ts,
completed_ts=abandoned_ts,
costs_usd=[0.],
id='1d69b9f088008910',
internal_failure=True,
modified_ts=abandoned_ts,
started_ts=self.now,
state=State.BOT_DIED)
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
self.assertEqual(1, self.execute_tasks())
self.assertEqual(3, len(pub_sub_calls)) # PENDING -> BOT_DIED
def test_cron_abort_expired_fallback(self):
# 1 and 4 have capacity.
self.bot_dimensions[u'item'] = [u'1', u'4']
self._register_bot(0, self.bot_dimensions)
result_summary = self._quick_schedule(
4,
task_slices=[
task_request.TaskSlice(
expiration_secs=600,
properties=_gen_properties(
dimensions={u'pool': [u'default'], u'item': [u'1']})),
task_request.TaskSlice(
expiration_secs=600,
properties=_gen_properties(
dimensions={u'pool': [u'default'], u'item': [u'2']})),
task_request.TaskSlice(
expiration_secs=600,
properties=_gen_properties(
dimensions={u'pool': [u'default'], u'item': [u'3']})),
task_request.TaskSlice(
expiration_secs=600,
properties=_gen_properties(
dimensions={u'pool': [u'default'], u'item': [u'4']})),
])
self.assertEqual(State.PENDING, result_summary.state)
self.assertEqual(0, result_summary.current_task_slice)
# Expire the first slice.
self.mock_now(self.now, 601)
# cron job 'expires' the task slices but not the whole task.
self.assertEqual(
([], ['1d69b9f088008910']),
task_scheduler.cron_abort_expired_task_to_run())
result_summary = result_summary.key.get()
self.assertEqual(State.PENDING, result_summary.state)
# Skipped the second and third TaskSlice.
self.assertEqual(3, result_summary.current_task_slice)
def test_cron_abort_expired_fallback_wait_for_capacity(self):
# 1 has capacity.
self.bot_dimensions[u'item'] = [u'1']
self._register_bot(0, self.bot_dimensions)
result_summary = self._quick_schedule(
2,
task_slices=[
task_request.TaskSlice(
expiration_secs=600,
properties=_gen_properties(
dimensions={u'pool': [u'default'], u'item': [u'1']}),
wait_for_capacity=False),
task_request.TaskSlice(
expiration_secs=600,
properties=_gen_properties(
dimensions={u'pool': [u'default'], u'item': [u'2']}),
wait_for_capacity=True),
])
self.assertEqual(State.PENDING, result_summary.state)
self.assertEqual(0, result_summary.current_task_slice)
# Expire the first slice.
self.mock_now(self.now, 601)
self.assertEqual(
([], ['1d69b9f088008910']),
task_scheduler.cron_abort_expired_task_to_run())
result_summary = result_summary.key.get()
self.assertEqual(State.PENDING, result_summary.state)
# Wait for the second TaskSlice even if there is no capacity.
self.assertEqual(1, result_summary.current_task_slice)
def test_cron_handle_bot_died(self):
pub_sub_calls = self.mock_pub_sub()
# Test first retry, then success.
run_result = self._quick_reap(
1,
0,
pubsub_topic='projects/abc/topics/def',
task_slices=[
task_request.TaskSlice(
expiration_secs=600,
properties=_gen_properties(idempotent=True),
wait_for_capacity=False),
])
self.assertEqual(1, len(pub_sub_calls)) # PENDING -> RUNNING
request = run_result.request_key.get()
def is_in_negative_cache(t):
to_run_key = task_to_run.request_to_task_to_run_key(request, t, 0)
return task_to_run._lookup_cache_is_taken_async(to_run_key).get_result()
self.assertEqual(True, is_in_negative_cache(1)) # Was just reaped.
self.assertEqual(False, is_in_negative_cache(2))
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual(([], 1, 0), task_scheduler.cron_handle_bot_died())
self.assertEqual(1, self.execute_tasks())
self.assertEqual(2, len(pub_sub_calls)) # RUNNING -> PENDING
self.assertEqual(False, is_in_negative_cache(1))
self.assertEqual(False, is_in_negative_cache(2))
# Refresh and compare:
expected = self._gen_result_summary_reaped(
costs_usd=[0.],
id='1d69b9f088008910',
modified_ts=now_1,
state=State.PENDING,
try_number=1)
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
expected = self._gen_run_result(
abandoned_ts=now_1,
completed_ts=now_1,
id='1d69b9f088008911',
internal_failure=True,
modified_ts=now_1,
state=State.BOT_DIED)
self.assertEqual(expected, run_result.key.get().to_dict())
# Task was retried.
now_2 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 2)
bot_dimensions_second = self.bot_dimensions.copy()
bot_dimensions_second[u'id'] = [u'localhost-second']
self._register_bot(1, bot_dimensions_second)
_request, _, run_result = task_scheduler.bot_reap_task(
bot_dimensions_second, 'abc', None)
self.assertEqual(1, self.execute_tasks())
self.assertEqual(3, len(pub_sub_calls)) # PENDING -> RUNNING
self.assertEqual(2, run_result.try_number)
self.assertEqual(False, is_in_negative_cache(1))
self.assertEqual(True, is_in_negative_cache(2)) # Was just reaped.
self.assertEqual(
State.COMPLETED,
task_scheduler.bot_update_task(
run_result_key=run_result.key,
bot_id='localhost-second',
cipd_pins=None,
output='Foo1',
output_chunk_start=0,
exit_code=0,
duration=0.1,
hard_timeout=False,
io_timeout=False,
cost_usd=0.1,
outputs_ref=None,
performance_stats=None))
expected = self._gen_result_summary_reaped(
bot_dimensions=bot_dimensions_second,
bot_id=u'localhost-second',
completed_ts=now_2,
costs_usd=[0., 0.1],
duration=0.1,
exit_code=0,
id='1d69b9f088008910',
modified_ts=now_2,
started_ts=now_2,
state=State.COMPLETED,
try_number=2)
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
self.assertEqual(0.1, run_result.key.get().cost_usd)
self.assertEqual(4, len(pub_sub_calls)) # RUNNING -> COMPLETED
def test_cron_handle_bot_died_no_update_not_idempotent(self):
# A bot reaped a task but the handler returned HTTP 500, leaving the task in
# a lingering state.
pub_sub_calls = self.mock_pub_sub()
# Test first try, then success.
run_result = self._quick_reap(
1,
0,
pubsub_topic='projects/abc/topics/def',
task_slices=[
task_request.TaskSlice(
expiration_secs=600,
properties=_gen_properties(),
wait_for_capacity=False),
])
self.assertEqual(1, len(pub_sub_calls)) # PENDING -> RUNNING
# Bot becomes MIA.
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual(([], 1, 0), task_scheduler.cron_handle_bot_died())
self.assertEqual(1, self.execute_tasks())
self.assertEqual(2, len(pub_sub_calls)) # RUNNING -> PENDING
# Refresh and compare:
expected = self._gen_result_summary_reaped(
costs_usd=[0.],
id='1d69b9f088008910',
modified_ts=now_1,
state=State.PENDING,
try_number=1)
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
expected = self._gen_run_result(
abandoned_ts=now_1,
completed_ts=now_1,
id='1d69b9f088008911',
internal_failure=True,
modified_ts=now_1,
state=task_result.State.BOT_DIED)
self.assertEqual(expected, run_result.key.get().to_dict())
# Task was retried.
now_2 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 2)
bot_dimensions_second = self.bot_dimensions.copy()
bot_dimensions_second[u'id'] = [u'localhost-second']
self._register_bot(1, bot_dimensions_second)
_request, _, run_result = task_scheduler.bot_reap_task(
bot_dimensions_second, 'abc', None)
self.assertEqual(1, self.execute_tasks())
self.assertEqual(3, len(pub_sub_calls)) # PENDING -> RUNNING
self.assertEqual(2, run_result.try_number)
self.assertEqual(
task_result.State.COMPLETED,
task_scheduler.bot_update_task(
run_result_key=run_result.key,
bot_id='localhost-second',
cipd_pins=None,
output='Foo1',
output_chunk_start=0,
exit_code=0,
duration=0.1,
hard_timeout=False,
io_timeout=False,
cost_usd=0.1,
outputs_ref=None,
performance_stats=None))
expected = self._gen_result_summary_reaped(
bot_dimensions=bot_dimensions_second,
bot_id=u'localhost-second',
completed_ts=now_2,
costs_usd=[0., 0.1],
duration=0.1,
exit_code=0,
id='1d69b9f088008910',
modified_ts=now_2,
started_ts=now_2,
state=task_result.State.COMPLETED,
try_number=2)
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
self.assertEqual(0.1, run_result.key.get().cost_usd)
self.assertEqual(4, len(pub_sub_calls)) # RUNNING -> COMPLETED
def test_cron_handle_bot_died_broken_task(self):
# Not sure why, but this was observed on the fleet: the TaskRequest is
# missing from the DB. This test ensures the cron job doesn't throw in this
# situation.
run_result = self._quick_reap(
1,
0,
task_slices=[
task_request.TaskSlice(
expiration_secs=60,
properties=_gen_properties(),
wait_for_capacity=False),
])
to_run_key = task_to_run.request_to_task_to_run_key(
run_result.request_key.get(), 1, 0)
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
# Very unusual, the TaskRequest disappeared:
run_result.request_key.delete()
self.assertEqual(
(['1d69b9f088008911'], 0, 0), task_scheduler.cron_handle_bot_died())
def test_bot_poll_http_500_but_bot_reapears_after_BOT_PING_TOLERANCE(self):
# A bot reaped a task, sleeps for over BOT_PING_TOLERANCE (2 minutes), then
# sends a ping.
# In the meantime the cron job ran, saw the job idle with 0 update for more
# than BOT_PING_TOLERANCE, re-enqueue it.
run_result = self._quick_reap(
1,
0,
task_slices=[
task_request.TaskSlice(
expiration_secs=
3*int(task_result.BOT_PING_TOLERANCE.total_seconds()),
properties=_gen_properties(),
wait_for_capacity=False),
])
to_run_key_1 = task_to_run.request_to_task_to_run_key(
run_result.request_key.get(), 1, 0)
self.assertIsNone(to_run_key_1.get().queue_number)
# See _handle_dead_bot() with special case about non-idempotent task that
# were never updated.
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual(([], 1, 0), task_scheduler.cron_handle_bot_died())
# Now the task is available. Bot magically wakes up (let's say a laptop that
# went to sleep). The update is denied.
self.assertEqual(
None,
task_scheduler.bot_update_task(
run_result_key=run_result.key,
bot_id='localhost-second',
cipd_pins=None,
output='Foo1',
output_chunk_start=0,
exit_code=0,
duration=0.1,
hard_timeout=False,
io_timeout=False,
cost_usd=0.1,
outputs_ref=None,
performance_stats=None))
# Confirm it is denied.
run_result = run_result.key.get()
self.assertEqual(State.BOT_DIED, run_result.state)
result_summary = run_result.result_summary_key.get()
self.assertEqual(State.PENDING, result_summary.state)
# The old TaskToRun is not reused.
self.assertIsNone(to_run_key_1.get().queue_number)
to_run_key_2 = task_to_run.request_to_task_to_run_key(
run_result.request_key.get(), 2, 0)
self.assertTrue(to_run_key_2.get().queue_number)
def test_cron_handle_bot_died_same_bot_denied(self):
# Test first retry, then success.
run_result = self._quick_reap(
1,
0,
task_slices=[
task_request.TaskSlice(
expiration_secs=600,
properties=_gen_properties(idempotent=True),
wait_for_capacity=False),
])
self.assertEqual(1, run_result.try_number)
self.assertEqual(State.RUNNING, run_result.state)
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual(([], 1, 0), task_scheduler.cron_handle_bot_died())
# Refresh and compare:
# The interesting point here is that even though the task is PENDING, it has
# worker information from the initial BOT_DIED task.
expected = self._gen_run_result(
abandoned_ts=now_1,
completed_ts=now_1,
id='1d69b9f088008911',
internal_failure=True,
modified_ts=now_1,
state=State.BOT_DIED)
self.assertEqual(expected, run_result.key.get().to_dict())
expected = self._gen_result_summary_pending(
bot_dimensions=self.bot_dimensions.copy(),
bot_version=u'abc',
bot_id=u'localhost',
costs_usd=[0.],
id='1d69b9f088008910',
modified_ts=now_1,
try_number=1)
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
# Task was retried but the same bot polls again, it's denied the task.
now_2 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 2)
request, _, run_result = task_scheduler.bot_reap_task(
self.bot_dimensions, 'abc', None)
self.assertIsNone(request)
self.assertIsNone(run_result)
def test_cron_handle_bot_died_second(self):
# Test two tries internal_failure's leading to a BOT_DIED status.
run_result = self._quick_reap(
1,
0,
task_slices=[
task_request.TaskSlice(
expiration_secs=600,
properties=_gen_properties(idempotent=True),
wait_for_capacity=False),
])
request = run_result.request_key.get()
def is_in_negative_cache(t):
to_run_key = task_to_run.request_to_task_to_run_key(request, t, 0)
return task_to_run._lookup_cache_is_taken_async(to_run_key).get_result()
self.assertEqual(1, run_result.try_number)
self.assertEqual(True, is_in_negative_cache(1)) # Was just reaped.
self.assertEqual(False, is_in_negative_cache(2))
self.assertEqual(State.RUNNING, run_result.state)
self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1)
self.assertEqual(([], 1, 0), task_scheduler.cron_handle_bot_died())
self.assertEqual(False, is_in_negative_cache(1))
self.assertEqual(False, is_in_negative_cache(2))
# A second bot comes to reap the task.
now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 2)
bot_dimensions_second = self.bot_dimensions.copy()
bot_dimensions_second[u'id'] = [u'localhost-second']
self._register_bot(1, bot_dimensions_second)
_request, _, run_result = task_scheduler.bot_reap_task(
bot_dimensions_second, 'abc', None)
self.assertTrue(run_result)
self.assertEqual(False, is_in_negative_cache(1))
# Was just tried to be reaped.
self.assertEqual(True, is_in_negative_cache(2))
now_2 = self.mock_now(self.now + 2 * task_result.BOT_PING_TOLERANCE, 3)
self.assertEqual(
(['1d69b9f088008912'], 0, 0), task_scheduler.cron_handle_bot_died())
self.assertEqual(([], 0, 0), task_scheduler.cron_handle_bot_died())
self.assertEqual(False, is_in_negative_cache(1))
self.assertEqual(False, is_in_negative_cache(2))
expected = self._gen_result_summary_reaped(
abandoned_ts=now_2,
completed_ts=now_2,
bot_dimensions=bot_dimensions_second,
bot_id=u'localhost-second',
costs_usd=[0., 0.],
id='1d69b9f088008910',
internal_failure=True,
modified_ts=now_2,
started_ts=now_1,
state=State.BOT_DIED,
try_number=2)
self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
def test_cron_handle_bot_died_ignored_expired(self):
run_result = self._quick_reap(
1,
0,
task_slices=[
task_request.TaskSlice(
expiration_secs=600,
properties=_gen_properties(),
wait_for_capacity=False),
])
self.assertEqual(1, run_result.try_number)
self.assertEqual(State.RUNNING, run_result.state)
self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 601)
self.assertEqual(
(['1d69b9f088008911'], 0, 0), task_scheduler.cron_handle_bot_died())
def test_cron_handle_external_cancellations(self):
es_address = 'externalscheduler_address'
es_id = 'es_id'
external_schedulers = [
pools_config.ExternalSchedulerConfig(es_address, es_id, None, True),
pools_config.ExternalSchedulerConfig(es_address, es_id, None, True),
pools_config.ExternalSchedulerConfig(es_address, es_id, None, False),
]
self.mock_pool_config('es-pool', external_schedulers=external_schedulers)
known_pools = pools_config.known()
self.assertEqual(len(known_pools), 1)
calls = []
def mock_get_cancellations(es_cfg):
calls.append(es_cfg)
c = plugin_pb2.GetCancellationsResponse.Cancellation()
# Note: This task key is invalid, but that helps to exercise
# the exception handling in the handler.
# Also, in the wild we would not be making duplicate calls with the same
# task and bot; this is simply convenient for testing.
c.task_id = "task1"
c.bot_id = "bot1"
return [c]
self.mock(external_scheduler, 'get_cancellations', mock_get_cancellations)
task_scheduler.cron_handle_external_cancellations()
self.execute_tasks()
self.assertEqual(len(calls), 2)
self.assertEqual(len(self._enqueue_calls), 2)
def test_cron_handle_external_cancellations_none(self):
es_address = 'externalscheduler_address'
es_id = 'es_id'
external_schedulers = [
pools_config.ExternalSchedulerConfig(es_address, es_id, None, True),
pools_config.ExternalSchedulerConfig(es_address, es_id, None, True),
pools_config.ExternalSchedulerConfig(es_address, es_id, None, False),
]
self.mock_pool_config('es-pool', external_schedulers=external_schedulers)
known_pools = pools_config.known()
self.assertEqual(len(known_pools), 1)
calls = []
def mock_get_cancellations(es_cfg):
calls.append(es_cfg)
return None
self.mock(external_scheduler, 'get_cancellations', mock_get_cancellations)
task_scheduler.cron_handle_external_cancellations()
self.assertEqual(len(calls), 2)
self.assertEqual(len(self._enqueue_calls), 0)
def mock_pool_config(
self,
name,
scheduling_users=None,
scheduling_groups=None,
trusted_delegatees=None,
service_accounts=None,
service_accounts_groups=None,
external_schedulers=None):
self._known_pools = self._known_pools or set()
self._known_pools.add(name)
def mocked_get_pool_config(pool):
if pool == name:
return pools_config.PoolConfig(
name=name,
rev='rev',
scheduling_users=frozenset(scheduling_users or []),
scheduling_groups=frozenset(scheduling_groups or | |
<filename>Empirical_Roofline_Tool-1.1.0/Python/ert_core.py
import sys,operator,subprocess,os,glob,filecmp,math
import socket,platform,time,json,optparse,ast
from ert_utils import *
def text_list_2_string(text_list):
return reduce(operator.add,[t+" " for t in text_list])
class ert_core:
def __init__(self):
self.ert_version = "1.1.0"
self.dict = {}
self.metadata = {}
self.metadata["ERT_VERSION"] = self.ert_version
hostname = socket.gethostname()
try:
new_hostname = socket.gethostbyaddr(hostname)
except socket.herror:
new_hostname = hostname
hostname = new_hostname
hostname = os.getenv("NERSC_HOST",hostname)
self.metadata["HOSTNAME"] = hostname
self.metadata["UNAME"] = platform.uname()
def build_only(self, option, opt, value, parser):
parser.values.build = True
parser.values.run = False
parser.values.post = False
def run_only(self, option, opt, value, parser):
parser.values.build = False
parser.values.run = True
parser.values.post = False
def post_only(self, option, opt, value, parser):
parser.values.build = False
parser.values.run = False
parser.values.post = True
def flags(self):
parser = optparse.OptionParser(usage="%prog [-h] [--help] [options] config_file",version="%prog " + self.ert_version)
build_group = optparse.OptionGroup(parser,"Build options");
build_group.add_option("--build",dest="build",action="store_true",default=True,help="Build the micro-kernels [default]")
build_group.add_option("--no-build",dest="build",action="store_false",default=True,help="Don't build the micro-kernels")
build_group.add_option("--build-only",dest="build",action="callback",callback=self.build_only,help="Only build the micro-kernels")
parser.add_option_group(build_group)
run_group = optparse.OptionGroup(parser,"Run options");
run_group.add_option("--run",dest="run",action="store_true",default=True,help="Run the micro-kernels [default]")
run_group.add_option("--no-run",dest="run",action="store_false",default=True,help="Don't run the micro-kernels")
run_group.add_option("--run-only",dest="run",action="callback",callback=self.run_only,help="Only run the micro-kernels")
parser.add_option_group(run_group)
post_group = optparse.OptionGroup(parser,"Post-processing options");
post_group.add_option("--post",dest="post",action="store_true",default=True,help="Run the post-processing [default]")
post_group.add_option("--no-post",dest="post",action="store_false",default=True,help="Don't run the post-processing")
post_group.add_option("--post-only",dest="post",action="callback",callback=self.post_only,help="Only run the post-processing")
post_group.add_option("--gnuplot",dest="gnuplot",action="store_true",default=True,help="Generate graphs using GNUplot [default]")
post_group.add_option("--no-gnuplot",dest="gnuplot",action="store_false",default=True,help="Don't generate graphs using GNUplot")
parser.add_option_group(post_group)
parser.add_option("--verbose",dest="verbose",action="store",nargs=1,default=1,type=int,help="Set the verbosity of the screen output [default = %default]. = 0 : no output, = 1 : outlines progress, = 2 : good for debugging (prints all commands)")
parser.add_option("--quiet",dest="verbose",action="store_const",const=0,help="Don't generate any screen output, '--verbose=0'")
(options,args) = parser.parse_args()
def nullusage(fd):
fd.write("\n")
if len(args) < 1:
parser.print_help()
parser.print_usage = nullusage
parser.error("no configuration file given")
if len(args) > 1:
parser.print_help()
parser.print_usage = nullusage
parser.error("more than one configuration file given")
self.options = options
self.configure_filename = args[0]
return 0
def configure(self):
if self.options.verbose > 0:
print
print "Reading configuration from '%s'..." % self.configure_filename
try:
configure_file = open(self.configure_filename,"r")
except IOError:
sys.stderr.write("Unable to open '%s'...\n" % self.configure_filename)
return 1
self.dict["CONFIG"] = {}
for line in configure_file:
line = line[:-1]
if len(line) > 0 and line[0] != "#":
line = line.split()
if len(line) > 0:
target = line[0]
value = line[1:]
if len(target) > 0:
if target == "ERT_PRECISION":
prec_list = value[0].split(",")
self.dict["CONFIG"][target] = prec_list
else:
self.dict["CONFIG"][target] = value
if "ERT_MPI" not in self.dict["CONFIG"]:
self.dict["CONFIG"]["ERT_MPI"] = [False]
if "ERT_OPENMP" not in self.dict["CONFIG"]:
self.dict["CONFIG"]["ERT_OPENMP"] = [False]
if "ERT_GPU" not in self.dict["CONFIG"]:
self.dict["CONFIG"]["ERT_GPU"] = [False]
if "ERT_PRECISION" not in self.dict["CONFIG"]:
self.dict["CONFIG"]["ERT_PRECISION"] = ["FP64"]
if "ERT_OCL" not in self.dict["CONFIG"]:
self.dict["CONFIG"]["ERT_OCL"] = [False]
self.results_dir = self.dict["CONFIG"]["ERT_RESULTS"][0]
made_results = make_dir_if_needed(self.results_dir,"results",False)
if self.options.verbose > 0:
if made_results:
print " Making new results directory, %s..." % self.results_dir
else:
print " Using existing results directory, %s..." % self.results_dir
run_files = glob.glob("%s/Run.[0-9][0-9][0-9]" % self.results_dir)
used_run_files = []
used_run_list = []
no_dir = True
for run_file in run_files:
run_configure_filename = "%s/config.ert" % run_file
if os.path.exists(run_configure_filename):
if filecmp.cmp(self.configure_filename,run_configure_filename):
self.results_dir = run_file
no_dir = False
if self.options.verbose > 0:
print " Using existing run directory, %s..." % self.results_dir
break
else:
used_run_files.append(run_file)
used_run_list.append(int(run_file[-3:]))
else:
used_run_files.append(run_file)
used_run_list.append(int(run_file[-3:]))
if no_dir:
if self.options.build or self.options.run:
if len(used_run_list) == 0:
used_run_list = [0]
for n in xrange(1,max(used_run_list)+2):
if n not in used_run_list:
self.results_dir = "%s/Run.%03d" % (self.results_dir,n)
if self.options.verbose > 0:
if made_results:
print " Making new run directory, '%s'..." % self.results_dir
else:
print
print "*** WARNING ***"
print "**"
print "** Making new run directory, '%s'," % self.results_dir
print "** because the current connfiguration file, '%s' " % self.configure_filename
print "** doesn't match the configuration files, 'config.ert', under:"
print "**"
for u in sorted(used_run_files):
print "** %s" % u
print "**"
print "*** WARNING ***"
command = ["mkdir",self.results_dir]
if execute_noshell(command,self.options.verbose > 1) != 0:
sys.stderr.write("Unable to make new run directory, '%s'\n" % self.results_dir)
return 1
command = ["cp",self.configure_filename,"%s/config.ert" % self.results_dir]
if execute_noshell(command,self.options.verbose > 1) != 0:
sys.stderr.write("Unable to copy configuration file, '%s', into new run directory, %s\n" % (self.configure_filename,self.results_dir))
return 1
break
else:
sys.stderr.write("\nNo run directory for '%s' found under '%s'\n" % (self.configure_filename,self.results_dir))
return 1
if self.options.verbose > 0:
print
return 0
def build(self):
if self.options.build:
if self.options.verbose > 0:
if self.options.verbose > 1:
print
print " Building ERT core code..."
command_prefix = \
self.dict["CONFIG"]["ERT_CC"] + \
self.dict["CONFIG"]["ERT_CFLAGS"] + \
["-I%s/Kernels" % self.exe_path] + \
["-DERT_FLOP=%d" % self.flop] + \
["-DERT_ALIGN=%s" % self.dict["CONFIG"]["ERT_ALIGN"][0]] + \
["-DERT_MEMORY_MAX=%s" % self.dict["CONFIG"]["ERT_MEMORY_MAX"][0]] + \
["-DERT_WORKING_SET_MIN=%s" % self.dict["CONFIG"]["ERT_WORKING_SET_MIN"][0]] + \
["-DERT_TRIALS_MIN=%s" % self.dict["CONFIG"]["ERT_TRIALS_MIN"][0]]
if self.dict["CONFIG"]["ERT_MPI"][0] == "True":
command_prefix += ["-DERT_MPI"] + self.dict["CONFIG"]["ERT_MPI_CFLAGS"]
if self.dict["CONFIG"]["ERT_OPENMP"][0] == "True":
command_prefix += ["-DERT_OPENMP"] + self.dict["CONFIG"]["ERT_OPENMP_CFLAGS"]
if self.dict["CONFIG"]["ERT_GPU"][0] == "True":
command_prefix += ["-DERT_GPU"] + self.dict["CONFIG"]["ERT_GPU_CFLAGS"]
for p in self.dict["CONFIG"]["ERT_PRECISION"]:
command_prefix += ["-DERT_%s" % p]
command = command_prefix + \
["-c","%s/Drivers/%s.cxx" % (self.exe_path,self.dict["CONFIG"]["ERT_DRIVER"][0])] + \
["-o","%s/%s.o" % (self.flop_dir,self.dict["CONFIG"]["ERT_DRIVER"][0])]
if execute_noshell(command,self.options.verbose > 1) != 0:
sys.stderr.write("Compiling driver, %s, failed\n" % self.dict["CONFIG"]["ERT_DRIVER"][0])
return 1
if self.dict["CONFIG"]["ERT_OCL"][0] != "True":
command = command_prefix + \
["-c","%s/Kernels/%s.cxx" % (self.exe_path,self.dict["CONFIG"]["ERT_KERNEL"][0])] + \
["-o","%s/%s.o" % (self.flop_dir,self.dict["CONFIG"]["ERT_KERNEL"][0])]
if execute_noshell(command,self.options.verbose > 1) != 0:
sys.stderr.write("Compiling kernel, %s, failed\n" % self.dict["CONFIG"]["ERT_KERNEL"][0])
return 1
command = self.dict["CONFIG"]["ERT_LD"] + \
self.dict["CONFIG"]["ERT_LDFLAGS"]
if self.dict["CONFIG"]["ERT_MPI"][0] == "True":
command += self.dict["CONFIG"]["ERT_MPI_LDFLAGS"]
if self.dict["CONFIG"]["ERT_OPENMP"][0] == "True":
command += self.dict["CONFIG"]["ERT_OPENMP_LDFLAGS"]
if self.dict["CONFIG"]["ERT_GPU"][0] == "True":
command += self.dict["CONFIG"]["ERT_GPU_LDFLAGS"]
if self.dict["CONFIG"]["ERT_OCL"][0] != "True":
command += ["%s/%s.o" % (self.flop_dir,self.dict["CONFIG"]["ERT_DRIVER"][0])] + \
["%s/%s.o" % (self.flop_dir,self.dict["CONFIG"]["ERT_KERNEL"][0])] + \
self.dict["CONFIG"]["ERT_LDLIBS"] + \
["-o","%s/%s.%s" % (self.flop_dir,self.dict["CONFIG"]["ERT_DRIVER"][0],self.dict["CONFIG"]["ERT_KERNEL"][0])]
else:
command += ["%s/%s.o" % (self.flop_dir,self.dict["CONFIG"]["ERT_DRIVER"][0])] + \
self.dict["CONFIG"]["ERT_LDLIBS"] + \
["-o","%s/%s" % (self.flop_dir,self.dict["CONFIG"]["ERT_DRIVER"][0])]
if execute_noshell(command,self.options.verbose > 1) != 0:
sys.stderr.write("Linking code failed\n")
return 1
return 0
def add_metadata(self,outputname):
try:
output = open(outputname,"a")
except IOError:
sys.stderr.write("Unable to open output file, %s, to add metadata\n" % outputfile)
return 1
for k,v in self.metadata.items():
output.write("%s %s\n" % (k,v))
for k,v in self.dict.items():
output.write("%s %s\n" % (k,v))
output.close()
return 0
def run(self):
if self.options.run:
if self.options.verbose > 0:
if self.options.verbose > 1:
print
print " Running ERT core code..."
self.run_list = []
if self.dict["CONFIG"]["ERT_MPI"][0] == "True":
mpi_procs_list = parse_int_list(self.dict["CONFIG"]["ERT_MPI_PROCS"][0])
else:
mpi_procs_list = [1]
if self.dict["CONFIG"]["ERT_OPENMP"][0] == "True":
openmp_threads_list = parse_int_list(self.dict["CONFIG"]["ERT_OPENMP_THREADS"][0])
else:
openmp_threads_list = [1]
if self.dict["CONFIG"]["ERT_MPI"][0] == "True":
if self.dict["CONFIG"]["ERT_OPENMP"][0] == "True":
procs_threads_list = parse_int_list(self.dict["CONFIG"]["ERT_PROCS_THREADS"][0])
else:
procs_threads_list = mpi_procs_list
else:
if self.dict["CONFIG"]["ERT_OPENMP"][0] == "True":
procs_threads_list = openmp_threads_list
else:
procs_threads_list = [1]
if self.dict["CONFIG"]["ERT_GPU"][0] == "True":
gpu_blocks_list = parse_int_list(self.dict["CONFIG"]["ERT_GPU_BLOCKS"][0])
else:
gpu_blocks_list = [1]
if self.dict["CONFIG"]["ERT_GPU"][0] == "True":
gpu_threads_list = parse_int_list(self.dict["CONFIG"]["ERT_GPU_THREADS"][0])
else:
gpu_threads_list = [1]
if self.dict["CONFIG"]["ERT_GPU"][0] == "True":
blocks_threads_list = parse_int_list(self.dict["CONFIG"]["ERT_BLOCKS_THREADS"][0])
else:
blocks_threads_list = [1]
num_experiments = int(self.dict["CONFIG"]["ERT_NUM_EXPERIMENTS"][0])
base_command = list_2_string(self.dict["CONFIG"]["ERT_RUN"])
for mpi_procs in mpi_procs_list:
for openmp_threads in openmp_threads_list:
if mpi_procs * openmp_threads in procs_threads_list:
for gpu_blocks in gpu_blocks_list:
for gpu_threads in gpu_threads_list:
if gpu_blocks * gpu_threads in blocks_threads_list:
print_str = ""
if self.dict["CONFIG"]["ERT_MPI"][0] == "True":
mpi_dir = "%s/MPI.%04d" % (self.flop_dir,mpi_procs)
print_str += "MPI %d, " % mpi_procs
else:
mpi_dir = self.flop_dir
if self.options.run:
make_dir_if_needed(mpi_dir,"run",self.options.verbose > 1)
if self.dict["CONFIG"]["ERT_OPENMP"][0] == "True":
openmp_dir = "%s/OpenMP.%04d" % (mpi_dir,openmp_threads)
print_str += "OpenMP %d, " % openmp_threads
else:
openmp_dir = mpi_dir
if self.options.run:
make_dir_if_needed(openmp_dir,"run",self.options.verbose > 1)
if self.dict["CONFIG"]["ERT_GPU"][0] == "True":
gpu_dir = "%s/GPU_Blocks.%04d" % (openmp_dir,gpu_blocks)
print_str += "GPU blocks %d, " % gpu_blocks
else:
gpu_dir = openmp_dir
if self.options.run:
make_dir_if_needed(gpu_dir,"run",self.options.verbose > 1)
if self.dict["CONFIG"]["ERT_GPU"][0] == "True":
run_dir = "%s/GPU_Threads.%04d" % (gpu_dir,gpu_threads)
print_str += "GPU threads %d, " % gpu_threads
else:
run_dir = gpu_dir
if self.options.run:
make_dir_if_needed(run_dir,"run",self.options.verbose > 1)
self.run_list.append(run_dir)
if print_str == "":
print_str = "serial"
else:
print_str = print_str[:-2]
if self.options.run:
if os.path.exists("%s/run.done" % run_dir):
if self.options.verbose > 1:
print " Skipping %s - already run" % print_str
else:
if self.options.verbose > 0:
print " %s" % print_str
command = base_command
command = command.replace("ERT_OPENMP_THREADS",str(openmp_threads))
command = command.replace("ERT_MPI_PROCS",str(mpi_procs))
if self.dict["CONFIG"]["ERT_OCL"][0] == "True":
command = command.replace("ERT_CODE","%s/%s" % (self.flop_dir,self.dict["CONFIG"]["ERT_DRIVER"][0]))
elif self.dict["CONFIG"]["ERT_GPU"][0] == "True":
command = command.replace("ERT_CODE","%s/%s.%s %d %d" % (self.flop_dir,self.dict["CONFIG"]["ERT_DRIVER"][0],self.dict["CONFIG"]["ERT_KERNEL"][0],gpu_blocks,gpu_threads))
else:
command = command.replace("ERT_CODE","%s/%s.%s" % (self.flop_dir,self.dict["CONFIG"]["ERT_DRIVER"][0],self.dict["CONFIG"]["ERT_KERNEL"][0]))
command = "(" + command + ") > %s/try.ERT_TRY_NUM 2>&1 " % run_dir
for t in xrange(1,num_experiments+1):
output = "%s/try.%03d" % (run_dir,t)
cur_command = command
cur_command = cur_command.replace("ERT_TRY_NUM","%03d" % t)
self.metadata["TIMESTAMP_DATA"] = time.time()
if execute_shell(cur_command,self.options.verbose > 1) != 0:
sys.stderr.write("Unable to complete %s, experiment %d\n" % (run_dir,t))
return 1
if self.add_metadata(output) != 0:
return 1
command = ["touch","%s/run.done" % run_dir]
if execute_noshell(command,self.options.verbose > 1) != 0:
sys.stderr.write("Unable to make 'run.done' file in %s\n" % run_dir)
return 1
if self.options.verbose > 1:
print
| |
<reponame>bentley/tools<filename>se/spelling.py
#!/usr/bin/env python3
"""
Defines various spelling-related helper functions.
"""
from pathlib import Path
from pkg_resources import resource_filename
import regex
import se
DICTIONARY = [] # Store our hyphenation dictionary so we don't re-read the file on every pass
def modernize_hyphenation(xhtml: str) -> str:
"""
Convert old-timey hyphenated compounds into single words based on the passed DICTIONARY.
INPUTS
xhtml: A string of XHTML to modernize
OUTPUTS:
A string representing the XHTML with its hyphenation modernized
"""
# First, initialize our dictionary if we haven't already
if not se.spelling.DICTIONARY:
se.spelling.DICTIONARY = set(line.strip().lower() for line in open(resource_filename("se", str(Path("data") / "words"))))
# Easy fix for a common case
xhtml = regex.sub(r"\b([Nn])ow-a-days\b", r"\1owadays", xhtml) # now-a-days -> nowadays
result = regex.findall(r"\b[^\W\d_]+\-[^\W\d_]+\b", xhtml)
for word in set(result): # set() removes duplicates
new_word = word.replace("-", "").lower()
if new_word in se.spelling.DICTIONARY:
# To preserve capitalization of the first word, we get the individual parts
# then replace the original match with them joined together and titlecased.
lhs = regex.sub(r"\-.+$", r"", word)
rhs = regex.sub(r"^.+?\-", r"", word)
xhtml = regex.sub(r"" + lhs + "-" + rhs, lhs + rhs.lower(), xhtml)
# Quick fix for a common error cases
xhtml = xhtml.replace("z3998:nonfiction", "z3998:non-fiction")
xhtml = regex.sub(r"\b([Dd])og’seared", r"\1og’s-eared", xhtml)
xhtml = regex.sub(r"\b([Mm])anat-arms", r"\1an-at-arms", xhtml)
xhtml = regex.sub(r"\b([Tt])abled’hôte", r"\1able-d’hôte", xhtml)
return xhtml
def modernize_spelling(xhtml: str) -> str:
"""
Convert old-timey spelling on a case-by-case basis.
INPUTS
xhtml: A string of XHTML to modernize
language: The IETF language tag of the XHTML, like "en-US" or "en-GB"
OUTPUTS:
A string representing the XHTML with its spelling modernized
"""
# What language are we using?
language = regex.search(r"<html[^>]+?xml:lang=\"([^\"]+)\"", xhtml)
if language is None or (language.group(1) != "en-US" and language.group(1) != "en-GB"):
raise se.InvalidLanguageException("No valid xml:lang attribute in <html> root. Only en-US and en-GB are supported.")
# ADDING NEW WORDS TO THIS LIST:
# A good way to check if a word is "archaic" is to do a Google N-Gram search: https://books.google.com/ngrams/graph?case_insensitive=on&year_start=1800&year_end=2000&smoothing=3
# Remember that en-US and en-GB differ significantly, and just because a word might seem strange to you, doesn't mean it's not the common case in the other variant.
# If Google N-Gram shows that a word has declined significantly in usage in BOTH en-US and en-GB (or the SE editor-in-chief makes an exception) then it may be a good candidate to add to this list.
xhtml = regex.sub(r"\b([Dd])evelope\b", r"\1evelop", xhtml) # develope -> develop
xhtml = regex.sub(r"\b([Oo])ker\b", r"\1cher", xhtml) # oker -> ocher
xhtml = regex.sub(r"\b([Ww])ellnigh\b", r"\1ell-nigh", xhtml) # wellnigh -> well-nigh
xhtml = regex.sub(r"\b([Tt]he|[Aa]nd|[Oo]r) what not(?! to)\b", r"\1 whatnot", xhtml) # what not -> whatnot
xhtml = regex.sub(r"\b([Gg])ood\-bye?\b", r"\1oodbye", xhtml) # good-by -> goodbye
xhtml = regex.sub(r"\b([Hh])ind(u|oo)stanee", r"\1industani", xhtml) # hindoostanee -> hindustani
xhtml = regex.sub(r"\b([Hh])indoo", r"\1indu", xhtml) # hindoo -> hindu
xhtml = regex.sub(r"\b([Ee])xpence", r"\1xpense", xhtml) # expence -> expense
xhtml = regex.sub(r"\b([Ll])otos", r"\1otus", xhtml) # lotos -> lotus
xhtml = regex.sub(r"\b([Ss])collop", r"\1callop", xhtml) # scollop -> scallop
xhtml = regex.sub(r"\b([Ss])ubtil(?!(ize|izing))", r"\1ubtle", xhtml) # subtil -> subtle (but "subtilize" and "subtilizing")
xhtml = regex.sub(r"\bQuoiff", r"Coif", xhtml) # quoiff -> coif
xhtml = regex.sub(r"\bquoiff", r"coif", xhtml) # quoiff -> coif
xhtml = regex.sub(r"\bIndorse", r"Endorse", xhtml) # indorse -> endorse
xhtml = regex.sub(r"\bindorse", r"endorse", xhtml) # indorse -> endorse
xhtml = regex.sub(r"\bIntrust", r"Entrust", xhtml) # Intrust -> Entrust
xhtml = regex.sub(r"\bintrust", r"entrust", xhtml) # intrust -> entrust
xhtml = regex.sub(r"\bPhantas(y|ie)", r"Fantasy", xhtml) # phantasie -> fantasy
xhtml = regex.sub(r"\bphantas(y|ie)", r"fantasy", xhtml) # phantasie -> fantasy
xhtml = regex.sub(r"\bPhantastic", r"Fantastic", xhtml) # phantastic -> fantastic
xhtml = regex.sub(r"\bphantastic", r"fantastic", xhtml) # phantastic -> fantastic
xhtml = regex.sub(r"\bPhrensy", r"Frenzy", xhtml) # Phrensy -> Frenzy
xhtml = regex.sub(r"\bphrensy", r"frenzy", xhtml) # phrensy -> frenzy
xhtml = regex.sub(r"\b([Mm])enage\b", r"\1énage", xhtml) # menage -> ménage
xhtml = regex.sub(r"([Hh])ypothenuse", r"\1ypotenuse", xhtml) # hypothenuse -> hypotenuse
xhtml = regex.sub(r"[‘’]([Bb])us\b", r"\1us", xhtml) # ’bus -> bus
xhtml = regex.sub(r"([Nn])aïve", r"\1aive", xhtml) # naïve -> naive
xhtml = regex.sub(r"([Nn])a[ïi]vet[ée]", r"\1aivete", xhtml) # naïveté -> naivete
xhtml = regex.sub(r"&c\.", r"etc.", xhtml) # &c. -> etc.
xhtml = regex.sub(r"([Pp])rot[ée]g[ée]", r"\1rotégé", xhtml) # protege -> protégé
xhtml = regex.sub(r"([Tt])ete-a-tete", r"\1ête-à-tête", xhtml) # tete-a-tete -> tête-à-tête
xhtml = regex.sub(r"([Vv])is-a-vis", r"\1is-à-vis", xhtml) # vis-a-vis _> vis-à-vis
xhtml = regex.sub(r"([Ff])acade", r"\1açade", xhtml) # facade -> façade
xhtml = regex.sub(r"([Cc])h?ateau(s?\b)", r"\1hâteau\2", xhtml) # chateau -> château
xhtml = regex.sub(r"([Hh])abitue", r"\1abitué", xhtml) # habitue -> habitué
xhtml = regex.sub(r"\b([Bb])lase\b", r"\1lasé", xhtml) # blase -> blasé
xhtml = regex.sub(r"\b([Bb])bee[’']s[ \-]wax\b", r"\1eeswax", xhtml) # bee’s-wax -> beeswax
xhtml = regex.sub(r"\b([Cc])afe\b", r"\1afé", xhtml) # cafe -> café
xhtml = regex.sub(r"\b([Cc])afes\b", r"\1afés", xhtml) # cafes -> cafés; We break up cafe so that we don't catch 'cafeteria'
xhtml = regex.sub(r"([Mm])êlée", r"\1elee", xhtml) # mêlée -> melee
xhtml = regex.sub(r"\b([Ff])ete([sd])?\b", r"\1ête\2", xhtml) # fete -> fête
xhtml = regex.sub(r"\b([Rr])ôle\b", r"\1ole", xhtml) # rôle -> role
xhtml = regex.sub(r"\b([Cc])oö", r"\1oo", xhtml) # coö -> coo (as in coöperate)
xhtml = regex.sub(r"\b([Rr])eë", r"\1ee", xhtml) # reë -> ree (as in reëvaluate)
xhtml = regex.sub(r"\b([Dd])aïs\b", r"\1ais", xhtml) # daïs -> dais
xhtml = regex.sub(r"\b([Cc])oup\-de\-grace", r"\1oup-de-grâce", xhtml) # coup-de-grace -> coup-de-grâce
xhtml = regex.sub(r"\b([Cc])anape", r"\1anapé", xhtml) # canape -> canapé
xhtml = regex.sub(r"\b([Pp])recis\b", r"\1récis", xhtml) # precis -> précis
xhtml = regex.sub(r"\b([Gg])ood\-by([^e])", r"\1oodbye\2", xhtml) # good-by -> goodbye
xhtml = regex.sub(r"\b([Gg])ood\-night", r"\1ood night", xhtml) # good-night -> good night
xhtml = regex.sub(r"\b([Gg])ood\-morning", r"\1ood morning", xhtml) # good-morning -> good morning
xhtml = regex.sub(r"\b([Gg])ood\-evening", r"\1ood evening", xhtml) # good-evening -> good evening
xhtml = regex.sub(r"\b([Gg])ood\-day", r"\1ood day", xhtml) # good-day -> good day
xhtml = regex.sub(r"\b([Gg])ood\-afternoon", r"\1ood afternoon", xhtml) # good-afternoon -> good afternoon
xhtml = regex.sub(r"\b([Bb])ete noir", r"\1ête noir", xhtml) # bete noir -> bête noir
xhtml = regex.sub(r"\bEclat\b", r"Éclat", xhtml) # eclat -> éclat
xhtml = regex.sub(r"\beclat\b", r"éclat", xhtml) # eclat -> éclat
xhtml = regex.sub(r"\ba la\b", r"à la", xhtml) # a la -> à la
xhtml = regex.sub(r"\ba propos\b", r"apropos", xhtml) # a propos -> apropos
xhtml = regex.sub(r"\bper cent(s?)\b", r"percent\1", xhtml) # per cent -> percent
xhtml = regex.sub(r"\bpercent\.(\s+[a-z])", r"percent\1", xhtml) # percent. followed by lowercase -> percent
xhtml = regex.sub(r"\bpercent\.,\b", r"percent,", xhtml) # per cent. -> percent
xhtml = regex.sub(r"\b([Ff])iance", r"\1iancé", xhtml) # fiance -> fiancé
xhtml = regex.sub(r"\b([Oo])utre\b", r"\1utré", xhtml) # outre -> outré
xhtml = regex.sub(r"\b([Ff])etich", r"\1etish", xhtml) # fetich -> fetish
xhtml = regex.sub(r"\b([Pp])igstye\b", r"\1igsty", xhtml) # pigstye -> pigsty
xhtml = regex.sub(r"\b([Pp])igstyes\b", r"\1igsties", xhtml) # pigstyes -> pigsties
xhtml = regex.sub(r"\b([Cc])lew(s?)\b", r"\1lue\2", xhtml) # clew -> clue
xhtml = regex.sub(r"\b[ÀA]\s?propos\b", r"Apropos", xhtml) # à propos -> apropos
xhtml = regex.sub(r"\b[àa]\s?propos\b", r"apropos", xhtml) # à propos -> apropos
xhtml = regex.sub(r"\b([Nn])ew comer(s?)\b", r"\1ewcomer\2", xhtml) # new comer -> newcomer
xhtml = regex.sub(r"\b([Pp])ease\b(?![ \-]pudding)", r"\1eas", xhtml) # pease -> peas (but "pease pudding")
xhtml = regex.sub(r"\b([Ss])uch like\b", r"\1uchlike", xhtml) # such like -> suchlike
xhtml = regex.sub(r"\b([Ee])mployé", r"\1mployee", xhtml) # employé -> employee
xhtml = regex.sub(r"\b(?<!ancien )([Rr])égime", r"\1egime", xhtml) # régime -> regime (but "ancien régime")
xhtml = regex.sub(r"\b([Bb])urthen", r"\1urden", xhtml) # burthen -> burden
xhtml = regex.sub(r"\b([Dd])isburthen", r"\1isburden", xhtml) # disburthen -> disburthen
xhtml = regex.sub(r"\b[EÉ]lys[eé]e", r"Élysée", xhtml) # Elysee -> Élysée
xhtml = regex.sub(r"\b([Ll])aw suit", r"\1awsuit", xhtml) # law suit -> lawsuit
xhtml = regex.sub(r"\bIncase", r"Encase", xhtml) # incase -> encase
xhtml = regex.sub(r"\bincase", r"encase", xhtml) # incase -> encase
xhtml = regex.sub(r"\b([Cc])ocoa-?nut", r"\1oconut", xhtml) # cocoanut / cocoa-nut -> coconut
xhtml = regex.sub(r"\b([Ww])aggon", r"\1agon", xhtml) # waggon -> wagon
xhtml = regex.sub(r"\b([Ss])wop", r"\1wap", xhtml) # swop -> swap
xhtml = regex.sub(r"\b([Ll])acquey", r"\1ackey", xhtml) # lacquey -> lackey
xhtml = regex.sub(r"\b([Bb])ric-à-brac", r"\1ric-a-brac", xhtml) # bric-à-brac -> bric-a-brac
xhtml = regex.sub(r"\b([Kk])iosque", r"\1iosk", xhtml) # kiosque -> kiosk
xhtml = regex.sub(r"\b([Dd])epôt", r"\1epot", xhtml) # depôt -> depot
xhtml = regex.sub(r"(?<![Cc]ompl)exion", r"ection", xhtml) # -extion -> -exction (connexion, reflexion, etc., but "complexion")
xhtml = regex.sub(r"\b([Dd])ulness", r"\1ullness", xhtml) # dulness -> dullness
xhtml = regex.sub(r"\b([Ff])iord", r"\1jord", xhtml) # fiord -> fjord
xhtml = regex.sub(r"\b([Ff])ulness\b", r"\1ullness", xhtml) # fulness -> fullness (but not for ex. thoughtfulness)
xhtml = regex.sub(r"\b’([Pp])hone", r"\1hone", xhtml) # ’phone -> phone
xhtml = regex.sub(r"\b([Ss])hew", r"\1how", xhtml) # shew -> show
xhtml = regex.sub(r"\b([Tt])rowsers", r"\1rousers", xhtml) # trowsers -> trousers
xhtml = regex.sub(r"\b([Bb])iass", r"\1ias", xhtml) # biass -> bias
xhtml = regex.sub(r"\b([Cc])huse", r"\1hoose", xhtml) # chuse -> choose
xhtml = regex.sub(r"\b([Cc])husing", r"\1hoosing", xhtml) # chusing -> choosing
xhtml = regex.sub(r"\b([Cc])ontroul(s?)\b", r"\1ontrol\2", xhtml) # controul -> control
xhtml = regex.sub(r"\b([Cc])ontroul(ing|ed)", r"\1ontroll\2", xhtml) # controuling/ed -> controlling/ed
xhtml = regex.sub(r"\b([Ss])urpriz(e|ing)", r"\1urpris\2", xhtml) # surprize->surprise, surprizing->surprising
xhtml = regex.sub(r"\b([Dd])oat\b", r"\1ote", xhtml) # doat -> dote
xhtml = regex.sub(r"\b([Dd])oat(ed|ing)", r"\1ot\2", xhtml) # doating -> doting
xhtml = regex.sub(r"\b([Ss])topt", r"\1topped", xhtml) # stopt -> stopped
xhtml = regex.sub(r"\b([Ss])tept", r"\1tepped", xhtml) # stept -> stepped
xhtml = regex.sub(r"\b([Ss])ecresy", r"\1ecrecy", xhtml) # secresy -> secrecy
xhtml = regex.sub(r"\b([Mm])esalliance", r"\1ésalliance", xhtml) # mesalliance -> mésalliance
xhtml = regex.sub(r"\b([Ss])ate\b", r"\1at", xhtml) # sate -> sat
xhtml = regex.sub(r"\b([Aa])ttache\b", r"\1ttaché", xhtml) # attache -> attaché
xhtml = regex.sub(r"\b([Pp])orte[\- ]coch[eè]re\b", r"\1orte-cochère", xhtml) # porte-cochere -> porte-cochère
xhtml = regex.sub(r"\b([Nn])égligée?(s?)\b", r"\1egligee\2", xhtml) # négligée -> negligee
xhtml = regex.sub(r"\b([Ss])hort cut(s?)\b", r"\1hortcut\2", xhtml) # short cut -> shortcut
xhtml = regex.sub(r"\b([Ff])ocuss", r"\1ocus", xhtml) # focuss -> focus
xhtml = regex.sub(r"\b([Mm])ise[ \-]en[ \-]sc[eè]ne", r"\1ise-en-scène", xhtml) # mise en scene -> mise-en-scène
xhtml = regex.sub(r"\b([Nn])ee\b", r"\1ée", xhtml) # nee -> née
xhtml = regex.sub(r"\b([Ee])au[ \-]de[ \-]Cologne\b", r"\1au de cologne", xhtml) # eau de Cologne -> eau de cologne
xhtml = regex.sub(r"\b([Ss])enor", r"\1eñor", xhtml) # senor -> señor (senores, senorita/s, etc.)
xhtml = regex.sub(r"\b([Gg])ramme?(s)?\b", r"\1ram\2", xhtml) # gramm/grammes -> gram/grams
xhtml = regex.sub(r"\b([Aa])larum\b", r"\1larm", xhtml) # alarum -> alarm
xhtml = regex.sub(r"\b([Bb])owlder(s?)\b", r"\1oulder\2", xhtml) # bowlder/bowlders -> boulder/boulders
xhtml = regex.sub(r"\b([Dd])istingue\b", r"\1istingué", xhtml) # distingue -> distingué
xhtml = regex.sub(r"\b[EÉ]cart[eé]\b", r"Écarté", xhtml) # ecarte -> écarté
xhtml = regex.sub(r"\b[eé]cart[eé]\b", | |
#
# Copyright (c) 2014 NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author : <NAME> <<EMAIL>>
#
"""
Module handling authentication of users. Also applies login policies
such as rate limiting.
"""
from __future__ import annotations
import logging
from dataclasses import asdict, dataclass, field
from datetime import datetime
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type
from bson import ObjectId
from eduid_common.api import exceptions
from eduid_common.authn import get_vccs_client
from eduid_common.misc.timeutil import utc_now
from eduid_userdb import MongoDB
from eduid_userdb.credentials import Credential, Password
from eduid_userdb.exceptions import UserHasNotCompletedSignup
from eduid_userdb.idp import IdPUser, IdPUserDb
from vccs_client import VCCSClientHTTPError, VCCSPasswordFactor
from eduid_webapp.idp.settings.common import IdPConfig
logger = logging.getLogger(__name__)
@dataclass
class AuthnData(object):
"""
Data about a successful authentication.
Returned from functions performing authentication.
"""
cred_id: str
timestamp: datetime = field(default_factory=utc_now)
def to_dict(self) -> Dict[str, Any]:
""" Return the object in dict format (serialized for storing in MongoDB). """
res = asdict(self)
# rename 'timestamp' to 'authn_ts' when writing to the database to match legacy code
res['authn_ts'] = res.pop('timestamp')
return res
@classmethod
def from_dict(cls: Type[AuthnData], data: Mapping[str, Any]) -> AuthnData:
""" Construct element from a data dict in database format. """
_data = dict(data) # to not modify callers data
# 'timestamp' is called 'authn_ts' in the database for legacy reasons
if 'authn_ts' in _data:
_data['timestamp'] = _data.pop('authn_ts')
return cls(**_data)
class IdPAuthn(object):
"""
:param config: IdP configuration data
"""
def __init__(
self, config: IdPConfig, userdb: IdPUserDb,
):
self.config = config
self.userdb = userdb
self.auth_client = get_vccs_client(config.vccs_url)
# already checked with isinstance in app init
assert config.mongo_uri is not None
self.authn_store = AuthnInfoStore(uri=config.mongo_uri)
def password_authn(self, username: str, password: str) -> Tuple[Optional[IdPUser], Optional[AuthnData]]:
"""
Authenticate someone using a username and password.
:returns: The IdPUser found, and AuthnData on success
"""
try:
user = self.userdb.lookup_user(username)
except UserHasNotCompletedSignup:
# XXX Redirect user to some kind of info page
return None, None
if not user:
logger.info(f'Unknown user : {repr(username)}')
# XXX we effectively disclose there was no such user by the quick
# response in this case. Maybe send bogus auth request to backends?
return None, None
logger.debug(f'Found user {user}')
cred = self._verify_username_and_password2(user, password)
if not cred:
return None, None
return user, AuthnData(cred_id=cred.key)
def _verify_username_and_password2(self, user: IdPUser, password: str) -> Optional[Password]:
"""
Attempt to verify that a password is valid for a specific user.
Currently, the naive approach of looping through all the users password credentials
is taken. This is bad because the more passwords a user has, the more likely an
online attacker is to guess any one of them.
:return: IdPUser on successful authentication
:rtype: Credential | None
"""
pw_credentials = user.credentials.filter(Password).to_list()
if self.authn_store: # requires optional configuration
authn_info = self.authn_store.get_user_authn_info(user)
if authn_info.failures_this_month > self.config.max_authn_failures_per_month:
logger.info(
"User {!r} AuthN failures this month {!r} > {!r}".format(
user, authn_info.failures_this_month, self.config.max_authn_failures_per_month
)
)
raise exceptions.EduidTooManyRequests("Too Many Requests")
# Optimize list of credentials to try based on which credentials the
# user used in the last successful authentication. This optimization
# is based on plain assumption, no measurements whatsoever.
last_creds = authn_info.last_used_credentials
sorted_creds = sorted(pw_credentials, key=lambda x: x.credential_id not in last_creds)
if sorted_creds != pw_credentials:
logger.debug(
"Re-sorted list of credentials into\n{}\nbased on last-used {!r}".format(sorted_creds, last_creds)
)
pw_credentials = sorted_creds
return self._authn_passwords(user, password, pw_credentials)
def _authn_passwords(self, user: IdPUser, password: str, pw_credentials: Sequence[Password]) -> Optional[Password]:
"""
Perform the final actual authentication of a user based on a list of (password) credentials.
:param user: User object
:param password: <PASSWORD>
:param pw_credentials: Password credentials to try
:return: Credential used, or None if authentication failed
"""
for cred in pw_credentials:
try:
factor = VCCSPasswordFactor(password, str(cred.credential_id), str(cred.salt))
except ValueError as exc:
logger.info(f'User {user} password factor {cred.credential_id} unusable: {exc}')
continue
logger.debug(f"Password-authenticating {user}/{cred.credential_id} with VCCS: {factor}")
user_id = str(user.user_id)
try:
if self.auth_client.authenticate(user_id, [factor]):
logger.debug(f'VCCS authenticated user {user}')
# Verify that the credential had been successfully used in the last 18 months
# (Kantara AL2_CM_CSM#050).
if self.credential_expired(cred):
logger.info(f'User {user} credential {cred.key} has expired')
raise exceptions.EduidForbidden('CREDENTIAL_EXPIRED')
self.log_authn(user, success=[cred.credential_id], failure=[])
return cred
except VCCSClientHTTPError as exc:
if exc.http_code == 500:
logger.debug(f'VCCS credential {cred.credential_id} might be revoked')
continue
logger.debug(f'VCCS username-password authentication FAILED for user {user}')
self.log_authn(user, success=[], failure=[cred.credential_id for cred in pw_credentials])
return None
def credential_expired(self, cred: Password) -> bool:
"""
Check that a credential hasn't been unused for too long according to Kantara AL2_CM_CSM#050.
:param cred: Authentication credential
"""
if not self.authn_store: # requires optional configuration
logger.debug(f"Can't check if credential {cred.key} is expired, no authn_store available")
return False
last_used = self.authn_store.get_credential_last_used(cred.credential_id)
if last_used is None:
# Can't disallow this while there is a short-path from signup to dashboard unforch...
logger.debug('Allowing never-used credential {!r}'.format(cred))
return False
now = utc_now()
delta = now - last_used
logger.debug(f'Credential {cred.key} last used {delta.days} days ago')
return delta.days >= int(365 * 1.5)
def log_authn(self, user: IdPUser, success: Sequence[str], failure: Sequence[str]) -> None:
"""
Log user authn success as well as failures.
:param user: User
:param success: List of successfully authenticated credentials
:param failure: List of failed credentials
"""
if not self.authn_store: # requires optional configuration
return None
if success:
self.authn_store.credential_success(success)
if success or failure:
self.authn_store.update_user(user.user_id, success, failure)
return None
class AuthnInfoStore:
"""
In this database, information about users have ObjectId _id's corresponding to user.user_id,
and information about credentials have string _id's.
Example:
User info:
{
"_id" : ObjectId("5fc5f6a318e93a5e90212c0e"),
"success_ts" : ISODate("2020-12-01T07:54:24.309Z"),
"last_credential_ids" : [
"5fc5f6ab18e93a5e90212c11"
],
"fail_count" : {
"202012" : 0
},
"success_count" : {
"202012" : 1
}
}
Credential info:
{
"_id" : "5fc5f74618e93a5e90212c16",
"success_ts" : ISODate("2020-12-01T07:56:58.665Z")
}
"""
def __init__(self, uri: str, db_name: str = 'eduid_idp_authninfo', collection_name: str = 'authn_info'):
logger.debug('Setting up AuthnInfoStore')
self._db = MongoDB(db_uri=uri, db_name=db_name)
self.collection = self._db.get_collection(collection_name)
def credential_success(self, cred_ids: Sequence[str], ts: Optional[datetime] = None) -> None:
"""
Kantara AL2_CM_CSM#050 requires that any credential that is not used for
a period of 18 months is disabled (taken to mean revoked).
Therefore we need to log all successful authentications and have a cron
job handling the revoking of unused credentials.
:param cred_ids: List of Credential ID
:param ts: Optional timestamp
:return: None
"""
if ts is None:
ts = utc_now()
# Update all existing entries in one go would've been nice, but pymongo does not
# return meaningful data for multi=True, so it is not possible to figure out
# which entries were actually updated :(
for this in cred_ids:
self.collection.save({'_id': this, 'success_ts': ts})
return None
def update_user(
self, user_id: ObjectId, success: Sequence[str], failure: Sequence[str], ts: Optional[datetime] = None
) -> None:
"""
Log authentication result data for this user.
The fail_count.month is logged to be able to lock users out after too
many failed authentication attempts in a month (yet unspecific Kantara
requirement).
The success_count.month is logged for symmetry.
The last_credential_ids are logged so that the IdP can sort
the list of credentials giving preference | |
self.colors.keys():
cr = Color(c, v, p.get_shades())
cr.default = p.get_core_shade()
self._colors[c] = cr
self.log.info(
f"Color '{c}' extracted from '{p.get_palette_name()}'")
self.log.info(f"Total of {len(self.colors) - previous} new colors "
f"added to the current color list")
def _extract(self, name: str) -> Color:
name = name.strip()
if name not in self.colors.keys():
self.log.warn(f"Color '{name}' not found in current palette. "
f"Checking if it is present in other available "
f"palettes.")
self._generate_additional_colors()
if name not in self.colors.keys():
if name in SYNONYM.keys():
self.log.info(f"{SYNONYM[name]} is used instead {name}")
return self.colors[SYNONYM[name]]
# If color is still not present, then it is an error
if name not in self.colors.keys():
self.log.error(f"Unable to find '{name}'. Please check "
f"spelling error. Currently available colors: "
f"{list(self.colors.keys())}", exception=KeyError)
return self.colors[name]
def _extract_color_list(self, name, no_of_colors: int, *,
starting_shade: float = None,
ending_shade: float = None,
reverse: bool = False,
**kwargs) -> List[ColorString]:
# If staring and ending shades are not provided, take the ones
# present in the default shades
if starting_shade is None:
starting_shade = min(self._value.get_shades())
if ending_shade is None:
ending_shade = max(self._value.get_shades())
shades = [starting_shade + x * (ending_shade - starting_shade) / no_of_colors
for x in range(no_of_colors)]
if reverse:
shades = reversed(shades)
return [self._extract(name).shade(s) for s in shades]
def random(self, no_of_colors: int = 1, *,
shade: float = None,
alpha: float = None,
starting_shade: float = None,
ending_shade: float = None,
gradient: bool = True,
avoid: list = None,
reverse: bool = False,
force_list: bool = False,
print_colors: bool = False,
seed=None,
**kwargs):
"""
Generate random color(s) from current palette.
>>> p = Palette()
>>> p.random() # Single random color
>>> p.random(no_of_colors=5) # 5 random colors
>>> p.random(shade=20) # Random color whos shade is 20
>>> p.random(shade=60, no_of_colors=8) # 8 random colors whose shade is 60
>>> p.random(starting_shade=50, ending_shade=80, no_of_colors=4) #Random colors whos shades are between 50-80
>>> p.random(no_of_colors=4, gradient=False) # 4 completely random colors with random shades
>>> p.random(no_of_colors=40, avoid=["blue"]) # 40 random colors but do not use "blue"
>>> p.random(no_of_colors=10, seed=100) # Generate 10 random color with seed 100
>>> p.random(force_list=True) # List containing single random color
>>> p.random(print_colors=True) # Print color on the console
>>> p.random(no_of_colors=5, reverse=True) # 5 random colors whos shades should arange in darker to lighter
:param no_of_colors: Number of colors (default: 1)
:param shade: Shade of the color (default: palette's default). This
will be ignored when number of colors are greater than 1 and
starting_shade /ending_shade arguments are provided
:param alpha: Transparency value (beteen 0-1). This will only be
considered if palette 'color_mode' supports Alpha channel. This will
be applied to all colors.
:param starting_shade: Starting shade of colors (used when number of
colors are more than 1.)
:param ending_shade: Ending shade of colors (used when number of
colors are more than 1.)
:param gradient: If True, all shades (not colors) will be sorted in
ascending order. (default: True)
:param avoid: List of colors which should not be considered while
generating random numbers. (default: white, black)
:param reverse: If True, shades will be ordered in descending order
:param force_list: If True, return type will always be list. Else
when no of colors is 1, this function will return str/tuple.
:param print_colors: If True, colors generated will be printed on
the console.
:param seed: Seed for random number generator (will override the
global palette seed)
:param kwargs: Other named arguments
:return: Str/Tuple/list of random colors depending above options
"""
_param_deprecation(self.log, "ignore_gray", **kwargs)
_param_deprecation(self.log, "force_gray", **kwargs)
if seed is not None:
self.seed = seed
if avoid is None:
avoid = ["white", "black"]
use_only_shade = True
if shade is not None and starting_shade is not None:
use_only_shade = False
if shade is not None and ending_shade is not None:
use_only_shade = False
if starting_shade is None:
starting_shade = min(self._value.get_shades())
if ending_shade is None:
ending_shade = max(self._value.get_shades())
# remove the restricted colors provided by 'avoid'
accepted_colors = []
for x in self.colors.values():
if x.name not in avoid:
accepted_colors.append(x)
# Select random colors
colors = []
for i in range(no_of_colors):
colors.append(random.choice(accepted_colors))
# If shade is specified, directly return the selected colors
if shade is not None:
if no_of_colors == 1 and not force_list:
return self._send(colors[0].shade(shade), alpha=alpha,
print_colors=print_colors)
return self._send([x.shade(shade) for x in colors], alpha=alpha,
print_colors=print_colors)
# Select the shade
# First check if we can return one of the standard shade.
# This will provide proper palette shade and will also reduce the
# computation time of all conversion
possible_shades = [x for x in self._value.get_shades() if
starting_shade <= x <= ending_shade]
shades = []
if no_of_colors <= len(possible_shades):
for i in range(no_of_colors):
shades.append(random.choice(possible_shades))
else:
for i in range(no_of_colors):
shades = random.randint(starting_shade, ending_shade)
# If gradient is true, sort the shades
if gradient:
shades = list(sorted(shades))
if reverse:
shades = list(reversed(shades))
if use_only_shade and shade is not None:
shades = [shade for _ in range(len(shades))]
if no_of_colors == 1 and not force_list:
return self._send(colors[0].shade(shades[0]), alpha=alpha,
print_colors=print_colors)
return self._send([x[0].shade(x[1]) for x in zip(colors, shades)],
alpha=alpha, print_colors=print_colors)
def random_balanced(self, no_of_colors: int = 1):
"""
Generates balanced random colors by defining shade. It essentially
just predefines the shade to palettes default shade.
:param no_of_colors: Number of colors
:return: str/tuple/list based on number of colors and global
'color_mode'
"""
return self.random(no_of_colors, shade=self._value.get_core_shade())
def random_gradient(self, no_of_colors: int = 3, *,
shade: float = None,
alpha: float = 1,
print_colors: bool = False,
complementary=True,
**kwargs):
"""
Generates random gradient between two colors
>>> p = Palette()
>>> p.random_gradient() # Random gradient between two colors
>>> p.random_gradient(no_of_colors=5) # Generates gradient between 2 colors and also adds 3 more colors between them
>>> p.random_gradient(complementary=False) # Use totally random colors for the gradient
:param no_of_colors: Number of in-between colors (default: 3)
:param shade: Shades of color
:param alpha: Alpha value between 0-1 (will ne applied to all colors)
:param print_colors: If True, prints colors on the console
:param complementary: If True, generates gradient between two
complementary colors. (default: True)
:param kwargs: Other named arguments
:return: List of colors representing gradient
"""
_param_deprecation(self.log, "ignore_gray", **kwargs)
colors = set()
colors.add(random.choice(list(self.colors.values())))
colors.add(random.choice(list(self.colors.values())))
if shade is None:
shade = self._value.get_core_shade()
colors = [x.shade(shade) for x in colors]
if complementary:
colors[1] = ColorString(get_complementary(colors[0]))
if no_of_colors < 3:
return self._send(colors[:no_of_colors], alpha=alpha,
print_colors=print_colors)
no_of_colors = no_of_colors - 2
mid_colors = color_in_between(colors[0], colors[1], no_of_colors)
mid_colors = [ColorString(x) for x in mid_colors]
mid_colors.insert(0, colors[0])
mid_colors.append(colors[1])
return self._send(mid_colors, alpha=alpha, print_colors=print_colors)
@staticmethod
@deprecated("This method will be removed from Palette class in future. "
"Please use ColorMap class for this purpose")
def cmap_from(matplotlib, hex_color_list: list):
"""Creates custom cmap from given hex_color list.
Use :class:`~ColorMap` for more refined control. Color inputs should
be HEX format.
:param matplotlib: matplotlib object (https://matplotlib.org/)
:param hex_color_list: List of colors in Hex format
:return: *LinearSegmentedColormap* segment which can be used with
*matplotlib* plots.
"""
if type(hex_color_list) is not list:
raise Exception("Please provide list of colors")
try:
return matplotlib.colors.LinearSegmentedColormap.from_list(
"cMap_secret_colors", hex_color_list)
except AttributeError:
raise Exception("Add 'matplotlib' as a first argument. For "
"example, import matplotlib; palette.cmap_from("
"matplotlib, "
"palette.red());")
def _common_color(self, name, kwargs):
del kwargs["self"]
color = self._extract(name)
if kwargs["no_of_colors"] > 1:
if kwargs["gradient"]:
colors = self._extract_color_list(name, **kwargs)
else:
shades = []
for i in range(int(kwargs["no_of_colors"])):
shades.append(random.randint(
self._value.get_shades()[-1],
self._value.get_shades()[0]
))
colors = [color.shade(x) for x in shades]
return self._send(colors, **kwargs)
else:
shade = color.default
if kwargs["shade"]:
shade = kwargs["shade"]
return self._send(color.shade(shade), **kwargs)
def _named_color(self, name: str, system: str, strict: bool):
both = [W3_DATA, X11_DATA]
if system == "x11":
both = list(reversed(both))
if strict:
# If it is strict search, take only named data
data = both[0]
else:
# If it is not strict search, merge both names such that one
# will get priority over another
data = {**both[1], **both[0]}
if name not in data.keys():
raise KeyError(f"Unfortunately, '{name}' is not found in "
f"available naming datasets. Please check "
f"spelling mistake. This search is case sensitive"
f" if 'strict_search' option is enabled.")
return Color(name, [self.white(), data[name], self.black()],
[0, 50, 100])
| |
without extension
# NOTE: not compatible with use of the -i option
xtitle_filename = os.path.split(xfile)[1]
xtitle_filename = os.path.splitext(xtitle_filename)[0]
logging.info('Title:[%s]', NP.strunicodeout(xtitle_filename))
# For each pic found on Flickr 1st check title and then Sets
pic_index = 0
for pic in search_is_uploaded.find('photos').findall('photo'):
pic_index += 1
logging.debug('idx=[%s] pic.id=[%s] '
'pic.title=[%s] pic.tags=[%s]',
pic_index,
pic.attrib['id'],
NP.strunicodeout(pic.attrib['title']),
NP.strunicodeout(pic.attrib['tags']))
# Use NP.strunicodeout in comparison to avoid warning:
# "UnicodeWarning: Unicode equal comparison failed to
# convert both arguments to Unicode"
logging.debug('xtitle_filename/type=[%s]/[%s] '
'pic.attrib[title]/type=[%s]/[%s]',
NP.strunicodeout(xtitle_filename),
type(xtitle_filename),
NP.strunicodeout(pic.attrib['title']),
type(pic.attrib['title']))
logging.info('Compare Titles=[%s]',
(NP.strunicodeout(xtitle_filename) ==
NP.strunicodeout(pic.attrib['title'])))
# if pic with checksum has a different title, continue
if not (NP.strunicodeout(xtitle_filename) ==
NP.strunicodeout(pic.attrib['title'])):
logging.info('Different titles: File:[%s] Flickr:[%s]',
NP.strunicodeout(xtitle_filename),
NP.strunicodeout(pic.attrib['title']))
continue
ctx_success, resp, ctx_errcode = faw.flickrapi_fn(
self.nuflickr.photos.getAllContexts, (),
dict(photo_id=pic.attrib['id']),
3, 8, True, caughtcode='195')
if not (ctx_success and ctx_errcode == 0):
# CODING: how to indicate an error?
# Possibly raising an exception?
# raise Exception('photos_getAllContexts: '
# 'Max attempts exhausted.')
NP.niceprint(' IS_UPLOADED=[ERROR#2]',
fname='isuploaded', verbosity=3,
logalso=logging.WARNING)
return ret_is_photo_uploaded, ret_photos_uploaded, \
ret_photo_id, ret_uploaded_no_set
logging.info('len(resp.findall(''set'')):[%s]',
len(resp.findall('set')))
# B) checksum, title, empty setname, Count=1
# THEN EXISTS, ASSIGN SET IF tag album IS FOUND
if not resp.findall('set'):
# CODING: Consider one additional result for PHOTO UPLOADED
# WITHOUT SET WITH ALBUM TAG when row exists on DB. Mark
# such row on the database files.set_id to null
# to force re-assigning to Album/Set on flickr.
tfind, _ = self.photos_find_tag(
photo_id=pic.attrib['id'],
intag='album:{}'
.format(xsetname))
if tfind:
NP.niceprint(' IS_UPLOADED=[UPLOADED WITHOUT'
' SET WITH ALBUM TAG]',
fname='isuploaded', verbosity=2,
logalso=logging.WARNING)
ret_is_photo_uploaded = True
ret_photo_id = pic.attrib['id']
ret_uploaded_no_set = True
return ret_is_photo_uploaded, ret_photos_uploaded, \
ret_photo_id, ret_uploaded_no_set
# B1) checksum, title, empty setname, Count=1
# THEN NOT EXISTS, IGNORE IF tag album NOT FOUND
else:
NP.niceprint('IS_UPLOADED=[UPLOADED WITHOUT'
' SET WITHOUT ALBUM TAG]',
fname='isuploaded', verbosity=2,
logalso=logging.WARNING)
ret_is_photo_uploaded = False
ret_uploaded_no_set = True
for setinlist in resp.findall('set'):
logging.warning('Output for setinlist: %s',
xml.etree.ElementTree.tostring(
setinlist,
encoding='utf-8',
method='xml'))
logging.warning(
'\nCheck : id:[%s] File:[%s]\n'
'Check : Title:[%s] Set:[%s]\n'
'Flickr: Title:[%s] Set:[%s] Tags:[%s]\n',
pic.attrib['id'],
NP.strunicodeout(xfile),
NP.strunicodeout(xtitle_filename),
NP.strunicodeout(xsetname),
NP.strunicodeout(pic.attrib['title']),
NP.strunicodeout(setinlist.attrib['title']),
NP.strunicodeout(pic.attrib['tags']))
logging.warning(
'Compare Sets=[%s]',
(NP.strunicodeout(xsetname) ==
NP.strunicodeout(setinlist.attrib['title'])))
# C) checksum, title, setname (1 or more), Count>=1
# THEN EXISTS
if (NP.strunicodeout(xsetname) ==
NP.strunicodeout(setinlist.attrib['title'])):
NP.niceprint(' IS_UPLOADED=[TRUE WITH SET]',
fname='isuploaded', verbosity=2,
logalso=logging.WARNING)
ret_is_photo_uploaded = True
ret_photo_id = pic.attrib['id']
ret_uploaded_no_set = False
return ret_is_photo_uploaded, ret_photos_uploaded, \
ret_photo_id, ret_uploaded_no_set
else:
# D) checksum, title, other setname, Count>=1
# THEN NOT EXISTS
NP.niceprint(' IS_UPLOADED=[FALSE OTHER SET, '
'CONTINUING SEARCH IN SETS]',
fname='isuploaded', verbosity=2,
logalso=logging.WARNING)
continue
return ret_is_photo_uploaded, ret_photos_uploaded, \
ret_photo_id, ret_uploaded_no_set
# -------------------------------------------------------------------------
# photos_find_tag
#
# Determines if tag is assigned to a pic.
#
def photos_find_tag(self, photo_id, intag):
""" photos_find_tag
Determines if intag is assigned to a pic.
Returns:
found_tag = False/True
tag_id = tag_id if found
"""
logging.info('find_tag: photo:[%s] intag:[%s]', photo_id, intag)
tag_success, tag_result, tag_errcode = faw.flickrapi_fn(
self.nuflickr.tags.getListPhoto, (),
dict(photo_id=photo_id),
3, 15, True, caughtcode='205')
if tag_success and tag_errcode == 0:
tag_id = None
for tag in tag_result.find('photo').find('tags').findall('tag'):
logging.info('photos_find_tag tag:[%s]', tag.attrib['raw'])
if (NP.strunicodeout(tag.attrib['raw']) ==
NP.strunicodeout(intag)):
tag_id = tag.attrib['id']
logging.info('Found tag_id:[%s] for intag:[%s]',
tag_id, intag)
return True, tag_id
return False, ''
# -------------------------------------------------------------------------
# photos_remove_tag
#
# Local Wrapper for Flickr photos.removeTag
# The tag to remove from the photo. This parameter should contain
# a tag id, as returned by flickr.photos.getInfo.
#
def photos_remove_tag(self, tag_id):
""" photos_remove_tag
Local Wrapper for Flickr photos.removeTag
The tag to remove from the photo. This parameter should contain
a tag id, as returned by flickr.photos.getInfo.
"""
logging.info('remove_tag: tag_id:[%s]', tag_id)
get_success, _, _ = faw.flickrapi_fn(
self.nuflickr.tags.removeTag, (),
dict(tag_id=tag_id),
3, 5, False, caughtcode='206')
return get_success
# -------------------------------------------------------------------------
# photos_set_dates
#
# Update Date/Time Taken on Flickr for Video files
#
def photos_set_dates(self, photo_id, datetxt):
""" photos_set_dates
Update Date/Time Taken on Flickr for Video files
"""
logging.warning(' Setting Date:[%s] Id=[%s]', datetxt, photo_id)
get_success, get_result, get_errcode = faw.flickrapi_fn(
self.nuflickr.photos.setdates, (),
dict(photo_id=photo_id,
date_taken='{!s}'.format(datetxt),
date_taken_granularity=0),
3, 15, True, caughtcode='210')
if get_success and get_errcode == 0:
logging.debug('Set Date Response: OK!')
else:
logging.error('Set Date Response: NOK!')
return get_result
# -------------------------------------------------------------------------
# madd_albums_tag
#
# madd_albums_tag wrapper for multiprocessing purposes
#
def madd_albums_tag(self, lock, running, mutex, filelist, c_total, cur):
""" madd_albums_tag
Wrapper function for multiprocessing support to call add_albums_tag
with a chunk of the files.
lock = for database access control in multiprocessing
running = shared value to count processed files in
multiprocessing
mutex = for running access control in multiprocessing
cur = cursor for database access control in multiprocessing
Remarks:
code, cur = Not used as function does not update database.
pylint: disable=unused-argument
"""
# CODING pylint
# pylint: disable=unused-argument
for i, afile in enumerate(filelist):
logging.warning('===Element of Chunk:[%s] file:[%s]', i, afile)
# CODING: ALBUM_TAGS_01: Refactor with code at ALBUM_TAGS_02
# afile[0] = files_id
# afile[1] = path
# afile[2] = set_name
# afile[3] = set_id
NP.niceprint('ID:[{!s}] Path:[{!s}] Set:[{!s}] SetID:[{!s}]'
.format(str(afile[0]), afile[1], afile[2], afile[3]),
fname='addAlbumMigrate')
# row[1] = path for the file from table files
setname = faw.set_name_from_file(afile[1],
self.xcfg.FILES_DIR,
self.xcfg.FULL_SET_NAME)
tfind, tid = self.photos_find_tag(
photo_id=afile[0],
intag='album:{}'.format(afile[2]
if afile[2] is not None
else setname))
NP.niceprint(' Find Tag:[{!s}] TagId:[{!s}]'
.format(tfind, tid), verbosity=1,
logalso=logging.WARNING)
if not tfind:
get_success, _, get_errcode = faw.flickrapi_fn(
self.nuflickr.photos.addTags, (),
dict(photo_id=afile[0],
tags='album:"{}"'.format(afile[2]
if afile[2] is not None
else setname)),
2, 2, False, caughtcode='214')
a_result = get_success and get_errcode == 0
NP.niceprint('{!s}: Photo_id:[{!s}] File:[{!s}]'
.format('Added album tag'
if a_result
else ' Failed tagging',
str(afile[0]),
NP.strunicodeout(afile[1])),
fname='addAlbumMigrate',
logalso=logging.WARNING)
else:
NP.niceprint(' Found Tag:[{!s}] TagId:[{!s}]'
.format(tfind, tid), verbosity=1,
logalso=logging.WARNING)
logging.debug('===Multiprocessing=== in.mutex.acquire(w)')
mutex.acquire()
running.value += 1
xcount = running.value
mutex.release()
logging.info('===Multiprocessing=== out.mutex.release(w)')
# Show number of files processed so far
NP.niceprocessedfiles(xcount, c_total, False,
msg='Album tag Added')
# Control pace (rate limit)of each proceess
rate_limited.rate_5_callspersecond()
# -------------------------------------------------------------------------
# add_albums_tag
#
# Prepare for version 2.7.0 Add album info to loaded pics
#
def add_albums_tag(self):
""" add_albums_tag
Adds tag:album to pics
"""
# ---------------------------------------------------------------------
# Local Variables
#
# mlockdb = multiprocessing Lock for access to Database
# mmutex = multiprocessing mutex for access to value mrunning
# mrunning = multiprocessing Value to count processed photos
mlockdb = None
mmutex = None
mrunning = None
if not self.check_token():
# authenticate sys.exits in case of failure
self.authenticate()
con, cur = litedb.connect(self.xcfg.DB_PATH)
if not litedb.execute(con,
'SELECT#215',
None, self.args.processes, # No need for lock
cur,
'SELECT files_id, path, sets.name, sets.set_id '
'FROM files LEFT OUTER JOIN sets ON '
'files.set_id = sets.set_id',
dbcaughtcode='215'):
return False
existing_media = cur.fetchall()
logging.info('len(existing_media)=[%s]', len(existing_media))
count_total = len(existing_media)
# running in multi processing mode
if self.args.processes and self.args.processes > 0:
logging.debug('Running [%s] processes pool.',
self.args.processes)
logging.debug('__name__:[%s] to prevent recursive calling)!',
__name__)
# To prevent recursive calling, check if __name__ == '__main__'
# if __name__ == '__main__':
mp.mprocessing(self.args.processes,
mlockdb,
mrunning,
mmutex,
existing_media,
self.madd_albums_tag,
cur)
# running in single processing mode
else:
count = 0
count_total = len(existing_media)
for row in existing_media:
count += 1
# CODING: ALBUM_TAGS_02: Refactor with code at ALBUM_TAGS_01
# row[0] = files_id
# row[1] = path
# row[2] = set_name
# row[3] = set_id
NP.niceprint('ID:[{!s}] Path:[{!s}] Set:[{!s}] SetID:[{!s}]'
.format(str(row[0]), row[1], row[2], row[3]),
fname='addAlbumMigrate')
# row[1] = path for the file from table files
setname = faw.set_name_from_file(row[1],
self.xcfg.FILES_DIR,
self.xcfg.FULL_SET_NAME)
tfind, tid = self.photos_find_tag(
photo_id=row[0],
intag='album:{}'.format(row[2]
if row[2] is not None
else setname))
NP.niceprint(' Find Tag:[{!s}] TagId:[{!s}]'
.format(tfind, tid), verbosity=1,
logalso=logging.WARNING)
if not tfind:
get_success, _, get_errcode = faw.flickrapi_fn(
self.nuflickr.photos.addTags, (),
dict(photo_id=row[0],
tags='album:"{}"'.format(row[2]
if row[2] is not None
else setname)),
2, 2, False, caughtcode='218')
a_result = get_success and get_errcode == 0
NP.niceprint('{!s}: Photo_id:[{!s}] [{!s}]'
.format('Added album tag'
if a_result
else ' Failed tagging',
str(row[0]),
NP.strunicodeout(row[1])),
fname='addAlbumMigrate',
logalso=logging.WARNING)
else:
NP.niceprint(' Found Tag:[{!s}] TagId:[{!s}]'
.format(tfind, tid), verbosity=1,
logalso=logging.WARNING)
NP.niceprocessedfiles(count, count_total, False,
msg='Album tag Added')
NP.niceprocessedfiles(count, count_total, True,
msg='Album tag Added')
return True
# -------------------------------------------------------------------------
# list_bad_files
#
# List badfiles recorded on Local DB from previous loads
#
def list_bad_files(self):
""" list_bad_files
List badfiles recorded on Local DB from previous loads
"""
NP.niceprint('*****Listing badfiles: Start.*****',
fname='list_bad_files')
con, cur = litedb.connect(self.xcfg.DB_PATH)
if not litedb.execute(con,
'SELECT#218',
None, self.args.processes, # No need for lock
cur,
'SELECT files_id, path, set_id, md5, tagged, '
'last_modified '
'FROM badfiles ORDER BY | |
bool = False,
modules_query: Optional[Tuple[str, str]] = None,
extensions_ver: Optional[str] = None,
architectures_ver: Optional[str] = None,
archives_query: Optional[List[str]] = None,
tool_name: Optional[str] = None,
is_long_listing: bool = False,
):
"""
Construct MetadataFactory.
:param spec: When set, the MetadataFactory will filter out all versions of
Qt that don't fit this SimpleSpec.
:param is_latest_version: When True, the MetadataFactory will find all versions of Qt
matching filters, and only print the most recent version
:param modules_query: [Version of Qt, architecture] for which to list modules
:param extensions_ver: Version of Qt for which to list extensions
:param architectures_ver: Version of Qt for which to list architectures
:param archives_query: [Qt_Version, architecture, *module_names]: used to print list of archives
:param tool_name: Name of a tool, without architecture, ie "tools_qtcreator" or "tools_ifw"
:param is_long_listing: If true, long listing is used for tools output
"""
self.logger = getLogger("aqt.metadata")
self.archive_id = archive_id
self.spec = spec
if archive_id.is_tools():
if tool_name:
if not tool_name.startswith("tools_"):
tool_name = "tools_" + tool_name
if is_long_listing:
self.request_type = "tool long listing"
self._action = lambda: self.fetch_tool_long_listing(tool_name)
else:
self.request_type = "tool variant names"
self._action = lambda: self.fetch_tool_modules(tool_name)
else:
self.request_type = "tools"
self._action = self.fetch_tools
elif is_latest_version:
self.request_type = "latest version"
self._action = lambda: Versions(self.fetch_latest_version())
elif modules_query:
self.request_type = "modules"
version, arch = modules_query
self._action = lambda: self.fetch_modules(self._to_version(version), arch)
elif extensions_ver:
self.request_type = "extensions"
self._action = lambda: self.fetch_extensions(self._to_version(extensions_ver))
elif architectures_ver:
self.request_type = "architectures"
self._action = lambda: self.fetch_arches(self._to_version(architectures_ver))
elif archives_query:
if len(archives_query) < 2:
raise CliInputError("The '--archives' flag requires a 'QT_VERSION' and an 'ARCHITECTURE' parameter.")
self.request_type = "archives for modules" if len(archives_query) > 2 else "archives for qt"
version, arch, modules = archives_query[0], archives_query[1], archives_query[2:]
self._action = lambda: self.fetch_archives(self._to_version(version), arch, modules)
else:
self.request_type = "versions"
self._action = self.fetch_versions
def getList(self) -> Union[List[str], Versions, ToolData]:
return self._action()
def fetch_arches(self, version: Version) -> List[str]:
self.validate_extension(version)
if self.archive_id.extension == "src_doc_examples":
return []
qt_ver_str = self._get_qt_version_str(version)
modules = self._fetch_module_metadata(self.archive_id.to_folder(qt_ver_str))
arches = []
for name in modules.keys():
ver, arch = name.split(".")[-2:]
if ver == qt_ver_str:
arches.append(arch)
return arches
def fetch_extensions(self, version: Version) -> List[str]:
versions_extensions = MetadataFactory.get_versions_extensions(
self.fetch_http(self.archive_id.to_url()), self.archive_id.category
)
filtered = filter(
lambda ver_ext: ver_ext[0] == version and ver_ext[1],
versions_extensions,
)
return list(map(lambda ver_ext: ver_ext[1], filtered))
def fetch_versions(self) -> Versions:
def filter_by(ver_ext: Tuple[Optional[Version], str]) -> bool:
version, extension = ver_ext
return version and (self.spec is None or version in self.spec) and (self.archive_id.extension == extension)
def get_version(ver_ext: Tuple[Version, str]):
return ver_ext[0]
versions_extensions = MetadataFactory.get_versions_extensions(
self.fetch_http(self.archive_id.to_url()), self.archive_id.category
)
versions = sorted(filter(None, map(get_version, filter(filter_by, versions_extensions))))
iterables = itertools.groupby(versions, lambda version: version.minor)
return Versions(iterables)
def fetch_latest_version(self) -> Optional[Version]:
return self.fetch_versions().latest()
def fetch_tools(self) -> List[str]:
html_doc = self.fetch_http(self.archive_id.to_url())
return list(MetadataFactory.iterate_folders(html_doc, "tools"))
def fetch_tool_modules(self, tool_name: str) -> List[str]:
tool_data = self._fetch_module_metadata(tool_name)
return list(tool_data.keys())
def fetch_tool_by_simple_spec(self, tool_name: str, simple_spec: SimpleSpec) -> Optional[Dict[str, str]]:
# Get data for all the tool modules
all_tools_data = self._fetch_module_metadata(tool_name)
return self.choose_highest_version_in_spec(all_tools_data, simple_spec)
def fetch_tool_long_listing(self, tool_name: str) -> ToolData:
return ToolData(self._fetch_module_metadata(tool_name))
def validate_extension(self, qt_ver: Version) -> None:
"""
Checks extension, and raises CliInputError if invalid.
Rules:
1. On Qt6 for Android, an extension for processor architecture is required.
2. On any platform other than Android, or on Qt5, an extension for
processor architecture is forbidden.
3. The "wasm" extension only works on desktop targets for Qt 5.13-5.15, or for 6.2+
"""
if (
self.archive_id.target == "android"
and qt_ver.major == 6
and self.archive_id.extension not in ArchiveId.EXTENSIONS_REQUIRED_ANDROID_QT6
):
raise CliInputError(
"Qt 6 for Android requires one of the following extensions: "
f"{ArchiveId.EXTENSIONS_REQUIRED_ANDROID_QT6}. "
"Please add your extension using the `--extension` flag."
)
if self.archive_id.extension in ArchiveId.EXTENSIONS_REQUIRED_ANDROID_QT6 and (
self.archive_id.target != "android" or qt_ver.major != 6
):
raise CliInputError(f"The extension '{self.archive_id.extension}' is only valid for Qt 6 for Android")
is_in_wasm_range = qt_ver in SimpleSpec(">=5.13,<6") or qt_ver in SimpleSpec(">=6.2.0")
if "wasm" in self.archive_id.extension and (self.archive_id.target != "desktop" or not is_in_wasm_range):
raise CliInputError(
f"The extension '{self.archive_id.extension}' is only available in Qt 5.13-5.15 and 6.2+ on desktop."
)
@staticmethod
def choose_highest_version_in_spec(
all_tools_data: Dict[str, Dict[str, str]], simple_spec: SimpleSpec
) -> Optional[Dict[str, str]]:
# Get versions of all modules. Fail if version cannot be determined.
try:
tools_versions = [
(name, tool_data, Version.permissive(tool_data["Version"])) for name, tool_data in all_tools_data.items()
]
except ValueError:
return None
# Remove items that don't conform to simple_spec
tools_versions = filter(lambda tool_item: tool_item[2] in simple_spec, tools_versions)
try:
# Return the conforming item with the highest version.
# If there are multiple items with the same version, the result will not be predictable.
return max(tools_versions, key=operator.itemgetter(2))[1]
except ValueError:
# There were no tools that fit the simple_spec
return None
def _to_version(self, qt_ver: str) -> Version:
"""
Turns a string in the form of `5.X.Y | latest` into a semantic version.
If the string does not fit either of these forms, CliInputError will be raised.
If qt_ver == latest, and no versions exist corresponding to the filters specified,
then CliInputError will be raised.
If qt_ver == latest, and an HTTP error occurs, requests.RequestException will be raised.
:param qt_ver: Either the literal string `latest`, or a semantic version
with each part separated with dots.
"""
assert qt_ver
if qt_ver == "latest":
latest_version = self.fetch_latest_version()
if not latest_version:
msg = "There is no latest version of Qt with the criteria '{}'".format(self.describe_filters())
raise CliInputError(msg)
return latest_version
try:
version = Version(qt_ver)
except ValueError as e:
raise CliInputError(e) from e
return version
@staticmethod
def fetch_http(rest_of_url: str) -> str:
base_urls = Settings.baseurl, random.choice(Settings.fallbacks)
for i, base_url in enumerate(base_urls):
try:
url = posixpath.join(base_url, rest_of_url)
return getUrl(
url=url,
timeout=(Settings.connection_timeout, Settings.response_timeout),
)
except (ArchiveDownloadError, ArchiveConnectionError) as e:
if i == len(base_urls) - 1:
raise e from e
@staticmethod
def iterate_folders(html_doc: str, filter_category: str = "") -> Generator[str, None, None]:
def table_row_to_folder(tr: bs4.element.Tag) -> str:
try:
return tr.find_all("td")[1].a.contents[0].rstrip("/")
except (AttributeError, IndexError):
return ""
soup: bs4.BeautifulSoup = bs4.BeautifulSoup(html_doc, "html.parser")
for row in soup.body.table.find_all("tr"):
content: str = table_row_to_folder(row)
if not content or content == "Parent Directory":
continue
if content.startswith(filter_category):
yield content
@staticmethod
def get_versions_extensions(html_doc: str, category: str) -> Iterator[Tuple[Optional[Version], str]]:
def folder_to_version_extension(folder: str) -> Tuple[Optional[Version], str]:
components = folder.split("_", maxsplit=2)
ext = "" if len(components) < 3 else components[2]
ver = "" if len(components) < 2 else components[1]
return (
get_semantic_version(qt_ver=ver, is_preview="preview" in ext),
ext,
)
return map(
folder_to_version_extension,
MetadataFactory.iterate_folders(html_doc, category),
)
@staticmethod
def _has_nonempty_downloads(element: ElementTree.Element) -> bool:
"""Returns True if the element has an empty '<DownloadableArchives/>' tag"""
downloads = element.find("DownloadableArchives")
return downloads is not None and downloads.text
def _get_qt_version_str(self, version: Version) -> str:
"""Returns a Qt version, without dots, that works in the Qt repo urls and Updates.xml files"""
# NOTE: The url at `<base>/<host>/<target>/qt5_590/` does not exist; the real one is `qt5_59`
patch = (
""
if version.prerelease or self.archive_id.is_preview() or version in SimpleSpec("5.9.0")
else str(version.patch)
)
return f"{version.major}{version.minor}{patch}"
def _fetch_module_metadata(self, folder: str, predicate: Optional[Callable[[ElementTree.Element], bool]] = None):
rest_of_url = posixpath.join(self.archive_id.to_url(), folder, "Updates.xml")
xml = self.fetch_http(rest_of_url)
return xml_to_modules(
xml,
predicate=predicate if predicate else MetadataFactory._has_nonempty_downloads,
)
def fetch_modules(self, version: Version, arch: str) -> List[str]:
"""Returns list of modules"""
self.validate_extension(version)
qt_ver_str = self._get_qt_version_str(version)
# Example: re.compile(r"^(preview\.)?qt\.(qt5\.)?590\.(.+)$")
pattern = re.compile(r"^(preview\.)?qt\.(qt" + str(version.major) + r"\.)?" + qt_ver_str + r"\.(.+)$")
modules_meta = self._fetch_module_metadata(self.archive_id.to_folder(qt_ver_str))
def to_module_arch(name: str) -> Tuple[Optional[str], Optional[str]]:
_match = pattern.match(name)
if not _match:
return None, None
module_with_arch = _match.group(3)
if self.archive_id.is_no_arch() or "." not in module_with_arch:
return module_with_arch, None
module, arch = module_with_arch.rsplit(".", 1)
if module.startswith("addons."):
module = module[len("addons.") :]
return module, arch
modules = set()
for name in modules_meta.keys():
module, _arch = to_module_arch(name)
if _arch == arch:
modules.add(module)
return sorted(modules)
def fetch_archives(self, version: Version, arch: str, modules: List[str]) -> List[str]:
qt_version_str = self._get_qt_version_str(version)
nonempty = MetadataFactory._has_nonempty_downloads
def all_modules(element: ElementTree.Element) -> bool:
_module, _arch = element.find("Name").text.split(".")[-2:]
return _arch == arch and _module != qt_version_str and nonempty(element)
def specify_modules(element: ElementTree.Element) -> bool:
_module, _arch = element.find("Name").text.split(".")[-2:]
return _arch == arch and _module in modules and nonempty(element)
def no_modules(element: ElementTree.Element) -> bool:
name: Optional[str] = element.find("Name").text
return name and name.endswith(f".{qt_version_str}.{arch}") and nonempty(element)
predicate = no_modules if not modules else all_modules if "all" in modules else specify_modules
try:
mod_metadata = self._fetch_module_metadata(self.archive_id.to_folder(qt_version_str), | |
<reponame>ChameleonCloud/portal<filename>tas/forms.py
import re
from django import forms
import logging
from pytas.http import TASClient
logger = logging.getLogger(__name__)
ELIGIBLE = "Eligible"
INELIGIBLE = "Ineligible"
REQUESTED = "Requested"
PI_ELIGIBILITY = (
("", "Choose One"),
(ELIGIBLE, ELIGIBLE),
(INELIGIBLE, INELIGIBLE),
(REQUESTED, REQUESTED),
)
USER_PROFILE_TITLES = (
("", "Choose one"),
("Center Non-Researcher Staff", "Center Non-Researcher Staff"),
("Center Researcher Staff", "Center Researcher Staff"),
("Faculty", "Faculty"),
("Government User", "Government User"),
("Graduate Student", "Graduate Student"),
("High School Student", "High School Student"),
("High School Teacher", "High School Teacher"),
("Industrial User", "Industrial User"),
("Unaffiliated User", "Unaffiliated User"),
("Nonprofit User", "Nonprofit User"),
("NSF Graduate Research Fellow", "NSF Graduate Research Fellow"),
("Other User", "Other User"),
("Postdoctorate", "Postdoctorate"),
("Undergraduate Student", "Undergraduate Student"),
("Unknown", "Unknown"),
("University Non-Research Staff", "University Non-Research Staff"),
(
"University Research Staff",
"University Research Staff (excluding postdoctorates)",
),
)
# ISO-3166 list; this matches what is in the keycloak-chameleon extension.
# TODO(jason): pull this list from an endpoint exposed by Keycloak? Hard to keep
# this in sync (but how often do new countries come around?)
COUNTRY_LIST = (
("", "Choose one"),
("Afghanistan", "Afghanistan"),
("Åland Islands", "Åland Islands"),
("Albania", "Albania"),
("Algeria", "Algeria"),
("American Samoa", "American Samoa"),
("Andorra", "Andorra"),
("Angola", "Angola"),
("Anguilla", "Anguilla"),
("Antarctica", "Antarctica"),
("Antigua and Barbuda", "Antigua and Barbuda"),
("Argentina", "Argentina"),
("Armenia", "Armenia"),
("Aruba", "Aruba"),
("Australia", "Australia"),
("Austria", "Austria"),
("Azerbaijan", "Azerbaijan"),
("Bahamas", "Bahamas"),
("Bahrain", "Bahrain"),
("Bangladesh", "Bangladesh"),
("Barbados", "Barbados"),
("Belarus", "Belarus"),
("Belgium", "Belgium"),
("Belize", "Belize"),
("Benin", "Benin"),
("Bermuda", "Bermuda"),
("Bhutan", "Bhutan"),
("Bolivia (Plurinational State of)", "Bolivia (Plurinational State of)"),
("Bonaire, Sint Eustatius and Saba", "Bonaire, Sint Eustatius and Saba"),
("Bosnia and Herzegovina", "Bosnia and Herzegovina"),
("Botswana", "Botswana"),
("Bouvet Island", "Bouvet Island"),
("Brazil", "Brazil"),
("British Indian Ocean Territory", "British Indian Ocean Territory"),
("Brunei Darussalam", "Brunei Darussalam"),
("Bulgaria", "Bulgaria"),
("Burkina Faso", "Burkina Faso"),
("Burundi", "Burundi"),
("Cabo Verde", "Cabo Verde"),
("Cambodia", "Cambodia"),
("Cameroon", "Cameroon"),
("Canada", "Canada"),
("Cayman Islands", "Cayman Islands"),
("Central African Republic", "Central African Republic"),
("Chad", "Chad"),
("Chile", "Chile"),
("China", "China"),
("Christmas Island", "Christmas Island"),
("Cocos (Keeling) Islands", "Cocos (Keeling) Islands"),
("Colombia", "Colombia"),
("Comoros", "Comoros"),
("Congo", "Congo"),
("Congo, Democratic Republic of the", "Congo, Democratic Republic of the"),
("Cook Islands", "Cook Islands"),
("Costa Rica", "Costa Rica"),
("Côte d'Ivoire", "Côte d'Ivoire"),
("Croatia", "Croatia"),
("Cuba", "Cuba"),
("Curaçao", "Curaçao"),
("Cyprus", "Cyprus"),
("Czechia", "Czechia"),
("Denmark", "Denmark"),
("Djibouti", "Djibouti"),
("Dominica", "Dominica"),
("Dominican Republic", "Dominican Republic"),
("Ecuador", "Ecuador"),
("Egypt", "Egypt"),
("El Salvador", "El Salvador"),
("Equatorial Guinea", "Equatorial Guinea"),
("Eritrea", "Eritrea"),
("Estonia", "Estonia"),
("Eswatini", "Eswatini"),
("Ethiopia", "Ethiopia"),
("Falkland Islands (Malvinas)", "Falkland Islands (Malvinas)"),
("Faroe Islands", "Faroe Islands"),
("Fiji", "Fiji"),
("Finland", "Finland"),
("France", "France"),
("French Guiana", "French Guiana"),
("French Polynesia", "French Polynesia"),
("French Southern Territories", "French Southern Territories"),
("Gabon", "Gabon"),
("Gambia", "Gambia"),
("Georgia", "Georgia"),
("Germany", "Germany"),
("Ghana", "Ghana"),
("Gibraltar", "Gibraltar"),
("Greece", "Greece"),
("Greenland", "Greenland"),
("Grenada", "Grenada"),
("Guadeloupe", "Guadeloupe"),
("Guam", "Guam"),
("Guatemala", "Guatemala"),
("Guernsey", "Guernsey"),
("Guinea", "Guinea"),
("Guinea-Bissau", "Guinea-Bissau"),
("Guyana", "Guyana"),
("Haiti", "Haiti"),
("Heard Island and McDonald Islands", "Heard Island and McDonald Islands"),
("Holy See", "Holy See"),
("Honduras", "Honduras"),
("Hong Kong", "Hong Kong"),
("Hungary", "Hungary"),
("Iceland", "Iceland"),
("India", "India"),
("Indonesia", "Indonesia"),
("Iran (Islamic Republic of)", "Iran (Islamic Republic of)"),
("Iraq", "Iraq"),
("Ireland", "Ireland"),
("Isle of Man", "Isle of Man"),
("Israel", "Israel"),
("Italy", "Italy"),
("Jamaica", "Jamaica"),
("Japan", "Japan"),
("Jersey", "Jersey"),
("Jordan", "Jordan"),
("Kazakhstan", "Kazakhstan"),
("Kenya", "Kenya"),
("Kiribati", "Kiribati"),
(
"Korea (Democratic People's Republic of)",
"Korea (Democratic People's Republic of)",
),
("Korea, Republic of", "Korea, Republic of"),
("Kuwait", "Kuwait"),
("Kyrgyzstan", "Kyrgyzstan"),
("Lao People's Democratic Republic", "Lao People's Democratic Republic"),
("Latvia", "Latvia"),
("Lebanon", "Lebanon"),
("Lesotho", "Lesotho"),
("Liberia", "Liberia"),
("Libya", "Libya"),
("Liechtenstein", "Liechtenstein"),
("Lithuania", "Lithuania"),
("Luxembourg", "Luxembourg"),
("Macao", "Macao"),
("Madagascar", "Madagascar"),
("Malawi", "Malawi"),
("Malaysia", "Malaysia"),
("Maldives", "Maldives"),
("Mali", "Mali"),
("Malta", "Malta"),
("Marshall Islands", "Marshall Islands"),
("Martinique", "Martinique"),
("Mauritania", "Mauritania"),
("Mauritius", "Mauritius"),
("Mayotte", "Mayotte"),
("Mexico", "Mexico"),
("Micronesia (Federated States of)", "Micronesia (Federated States of)"),
("Moldova, Republic of", "Moldova, Republic of"),
("Monaco", "Monaco"),
("Mongolia", "Mongolia"),
("Montenegro", "Montenegro"),
("Montserrat", "Montserrat"),
("Morocco", "Morocco"),
("Mozambique", "Mozambique"),
("Myanmar", "Myanmar"),
("Namibia", "Namibia"),
("Nauru", "Nauru"),
("Nepal", "Nepal"),
("Netherlands", "Netherlands"),
("New Caledonia", "New Caledonia"),
("New Zealand", "New Zealand"),
("Nicaragua", "Nicaragua"),
("Niger", "Niger"),
("Nigeria", "Nigeria"),
("Niue", "Niue"),
("Norfolk Island", "Norfolk Island"),
("North Macedonia", "North Macedonia"),
("Northern Mariana Islands", "Northern Mariana Islands"),
("Norway", "Norway"),
("Oman", "Oman"),
("Pakistan", "Pakistan"),
("Palau", "Palau"),
("Palestine, State of", "Palestine, State of"),
("Panama", "Panama"),
("Papua New Guinea", "Papua New Guinea"),
("Paraguay", "Paraguay"),
("Peru", "Peru"),
("Philippines", "Philippines"),
("Pitcairn", "Pitcairn"),
("Poland", "Poland"),
("Portugal", "Portugal"),
("Puerto Rico", "Puerto Rico"),
("Qatar", "Qatar"),
("Réunion", "Réunion"),
("Romania", "Romania"),
("Russian Federation", "Russian Federation"),
("Rwanda", "Rwanda"),
("Saint Barthélemy", "Saint Barthélemy"),
(
"Saint Helena, Ascension and <NAME>",
"Saint Helena, Ascension and <NAME>",
),
("Saint Kitts and Nevis", "Saint Kitts and Nevis"),
("Saint Lucia", "Saint Lucia"),
("Saint Martin (French part)", "Saint Martin (French part)"),
("Saint Pierre and Miquelon", "Saint Pierre and Miquelon"),
("Saint Vincent and the Grenadines", "Saint Vincent and the Grenadines"),
("Samoa", "Samoa"),
("San Marino", "San Marino"),
("Sao Tome and Principe", "Sao Tome and Principe"),
("Saudi Arabia", "Saudi Arabia"),
("Senegal", "Senegal"),
("Serbia", "Serbia"),
("Seychelles", "Seychelles"),
("Sierra Leone", "Sierra Leone"),
("Singapore", "Singapore"),
("Sint Maarten (Dutch part)", "Sint Maarten (Dutch part)"),
("Slovakia", "Slovakia"),
("Slovenia", "Slovenia"),
("Solomon Islands", "Solomon Islands"),
("Somalia", "Somalia"),
("South Africa", "South Africa"),
(
"South Georgia and the South Sandwich Islands",
"South Georgia and the South Sandwich Islands",
),
("South Sudan", "South Sudan"),
("Spain", "Spain"),
("Sri Lanka", "Sri Lanka"),
("Sudan", "Sudan"),
("Suriname", "Suriname"),
("Svalbard and <NAME>", "Svalbard and <NAME>"),
("Sweden", "Sweden"),
("Switzerland", "Switzerland"),
("Syrian Arab Republic", "Syrian Arab Republic"),
("Taiwan, Province of China", "Taiwan, Province of China"),
("Tajikistan", "Tajikistan"),
("Tanzania, United Republic of", "Tanzania, United Republic of"),
("Thailand", "Thailand"),
("Timor-Leste", "Timor-Leste"),
("Togo", "Togo"),
("Tokelau", "Tokelau"),
("Tonga", "Tonga"),
("Trinidad and Tobago", "Trinidad and Tobago"),
("Tunisia", "Tunisia"),
("Turkey", "Turkey"),
("Turkmenistan", "Turkmenistan"),
("Turks and Caicos Islands", "Turks and Caicos Islands"),
("Tuvalu", "Tuvalu"),
("Uganda", "Uganda"),
("Ukraine", "Ukraine"),
("United Arab Emirates", "United Arab Emirates"),
(
"United Kingdom of Great Britain and Northern Ireland",
"United Kingdom of Great Britain and Northern Ireland",
),
("United States of America", "United States of America"),
("United States Minor Outlying Islands", "United States Minor Outlying Islands"),
("Uruguay", "Uruguay"),
("Uzbekistan", "Uzbekistan"),
("Vanuatu", "Vanuatu"),
("Venezuela (Bolivarian Republic of)", "Venezuela (Bolivarian Republic of)"),
("Viet Nam", "Viet Nam"),
("Virgin Islands (British)", "Virgin Islands (British)"),
("Virgin Islands (U.S.)", "Virgin Islands (U.S.)"),
("Wallis and Futuna", "Wallis and Futuna"),
("Western Sahara", "Western Sahara"),
("Yemen", "Yemen"),
("Zambia", "Zambia"),
("Zimbabwe", "Zimbabwe"),
)
class EmailConfirmationForm(forms.Form):
code = forms.CharField(
label="Enter Your Verification Code",
required=True,
error_messages={
"required": "Please enter the verification code you received via email."
},
)
username = forms.CharField(label="Enter Your Chameleon Username", required=True)
def check_password_policy(user, password, confirm_password):
"""
Checks the password for meeting the minimum password policy requirements:
* Must be a minimum of 8 characters in length
* Must contain characters from at least three of the following: uppercase letters,
lowercase letters, numbers, symbols
* Must NOT contain the username or the first or last name from the profile
Returns:
A boolean value indicating if the password meets the policy,
An error message string or None
"""
if password != confirm_password:
return False, "The password provided does not match the confirmation."
if len(password) < 8:
return (
False,
"The password provided is too short. Please review the password criteria.",
)
char_classes = 0
for cc in ["[a-z]", "[A-Z]", "[0-9]", "[^a-zA-Z0-9]"]:
if re.search(cc, password):
char_classes += 1
if char_classes < 3:
return False, "The password provided does not meet the complexity requirements."
pwd_without_case = password.lower()
if user["username"].lower() in pwd_without_case:
return (
False,
"The password provided must not contain parts of your name or username.",
)
if (
user["firstName"].lower() in pwd_without_case
or user["lastName"].lower() in pwd_without_case
):
return (
False,
"The password provided must not contain parts of your name or username.",
)
return True, None
class RecoverUsernameForm(forms.Form):
email = forms.CharField(label="Enter Your Email Address", required=True)
class PasswordResetRequestForm(forms.Form):
username = forms.CharField(label="Enter Your Chameleon Username", required=True)
class PasswordResetConfirmForm(forms.Form):
username = forms.CharField(label="Enter Your Chameleon Username", required=True)
code = forms.CharField(label="Reset Code", required=True)
password = forms.CharField(
widget=forms.PasswordInput, label="Password", required=True
)
confirm_password = forms.CharField(
widget=forms.PasswordInput,
label="Confirm Password",
required=True,
help_text="Passwords must meet the following criteria:<ul>"
"<li>Must not contain your username or parts of "
"your full name;</li><li>Must be a minimum of 8 characters "
"in length;</li><li>Must contain characters from at least "
"three of the | |
return resp["router"]
@atomic.action_timer("neutron.show_router")
def get_router(self, router_id, fields=_NONE):
"""Get router details
:param router_id: Router ID
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
return self.client.show_router(router_id, **body)["router"]
@atomic.action_timer("neutron.add_interface_router")
def add_interface_to_router(self, router_id, subnet_id=_NONE,
port_id=_NONE):
"""Add interface to router.
:param router_id: The ID of the router.
:param subnet_id: The ID of the subnet. One of subnet_id or port_id
must be specified.
:param port_id: The ID of the port. One of subnet_id or port_id must
be specified.
"""
if (subnet_id and port_id) or (not subnet_id and not port_id):
raise TypeError("One of subnet_id or port_id must be specified "
"while adding interface to router.")
body = _clean_dict(subnet_id=subnet_id, port_id=port_id)
return self.client.add_interface_router(router_id, body)
@atomic.action_timer("neutron.remove_interface_router")
def remove_interface_from_router(self, router_id, subnet_id=_NONE,
port_id=_NONE):
"""Remove interface from router
:param router_id: The ID of the router.
:param subnet_id: The ID of the subnet. One of subnet_id or port_id
must be specified.
:param port_id: The ID of the port. One of subnet_id or port_id must
be specified.
"""
from neutronclient.common import exceptions as neutron_exceptions
if (subnet_id and port_id) or (not subnet_id and not port_id):
raise TypeError("One of subnet_id or port_id must be specified "
"to remove interface from router.")
body = _clean_dict(subnet_id=subnet_id, port_id=port_id)
try:
self.client.remove_interface_router(router_id, body)
except (neutron_exceptions.BadRequest,
neutron_exceptions.NotFound):
# Some neutron plugins don't use router as
# the device ID. Also, some plugin doesn't allow
# to update the ha router interface as there is
# an internal logic to update the interface/data model
# instead.
LOG.exception("Failed to remove an interface from a router.")
@atomic.action_timer("neutron.add_gateway_router")
def add_gateway_to_router(self, router_id, network_id, enable_snat=None,
external_fixed_ips=None):
"""Adds an external network gateway to the specified router.
:param router_id: Router ID
:param enable_snat: whether SNAT should occur on the external gateway
or not
"""
gw_info = {"network_id": network_id}
if enable_snat is not None:
if self.supports_extension("ext-gw-mode", silent=True):
gw_info["enable_snat"] = enable_snat
if external_fixed_ips is not None:
gw_info["external_fixed_ips"] = external_fixed_ips
self.client.add_gateway_router(router_id, gw_info)
@atomic.action_timer("neutron.remove_gateway_router")
def remove_gateway_from_router(self, router_id):
"""Removes an external network gateway from the specified router.
:param router_id: Router ID
"""
self.client.remove_gateway_router(router_id)
@atomic.action_timer("neutron.update_router")
def update_router(self, router_id, name=_NONE, admin_state_up=_NONE,
description=_NONE, external_gateway_info=_NONE,
distributed=_NONE, ha=_NONE):
"""Update router.
:param router_id: The ID of the router to update.
:param name: Human-readable name of the resource.
:param admin_state_up: The administrative state of the resource, which
is up (true) or down (false). Default is true.
:param description: A human-readable description for the resource.
:param external_gateway_info: The external gateway information of
the router. If the router has an external gateway, this would be
a dict with network_id, enable_snat and external_fixed_ips.
:param distributed: true indicates a distributed router. It is
available when dvr extension is enabled.
:param ha: true indicates a highly-available router. It is available
when l3-ha extension is enabled.
"""
body = _clean_dict(
name=name,
external_gateway_info=external_gateway_info,
description=description,
distributed=distributed,
ha=ha,
admin_state_up=admin_state_up
)
if not body:
raise TypeError("No updates for a router.")
return self.client.update_router(router_id, {"router": body})["router"]
@atomic.action_timer("neutron.delete_router")
def delete_router(self, router_id):
"""Delete router
:param router_id: Router ID
"""
self.client.delete_router(router_id)
@staticmethod
def _filter_routers(routers, subnet_ids):
for router in routers:
gtw_info = router["external_gateway_info"]
if gtw_info is None:
continue
if any(fixed_ip["subnet_id"] in subnet_ids
for fixed_ip in gtw_info["external_fixed_ips"]):
yield router
@atomic.action_timer("neutron.list_routers")
def list_routers(self, subnet_ids=_NONE, **kwargs):
"""List routers.
:param subnet_ids: Filter routers by attached subnet(s). Can be a
string or and an array with strings.
:param kwargs: additional router list filters
"""
routers = self.client.list_routers(**kwargs)["routers"]
if subnet_ids != _NONE:
routers = list(self._filter_routers(routers,
subnet_ids=subnet_ids))
return routers
@atomic.action_timer("neutron.create_port")
def create_port(self, network_id, **kwargs):
"""Create neutron port.
:param network_id: neutron network dict
:param kwargs: other optional neutron port creation params
(name is restricted param)
:returns: neutron port dict
"""
kwargs["name"] = self.generate_random_name()
body = _clean_dict(
network_id=network_id,
**kwargs
)
return self.client.create_port({"port": body})["port"]
@atomic.action_timer("neutron.show_port")
def get_port(self, port_id, fields=_NONE):
"""Get port details
:param port_id: Port ID
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
return self.client.show_port(port_id, **body)["port"]
@atomic.action_timer("neutron.update_port")
def update_port(self, port_id, **kwargs):
"""Update neutron port.
:param port_id: The ID of the port to update.
:param kwargs: other optional neutron port creation params
(name is restricted param)
:returns: neutron port dict
"""
body = _clean_dict(**kwargs)
if not body:
raise TypeError("No updates for a port.")
return self.client.update_port(port_id, {"port": body})["port"]
ROUTER_INTERFACE_OWNERS = ("network:router_interface",
"network:router_interface_distributed",
"network:ha_router_replicated_interface")
ROUTER_GATEWAY_OWNER = "network:router_gateway"
@atomic.action_timer("neutron.delete_port")
def delete_port(self, port):
"""Delete port.
:param port: Port ID or object
:returns bool: False if neutron returns NotFound error on port delete
"""
from neutronclient.common import exceptions as neutron_exceptions
if not isinstance(port, dict):
port = {"id": port, "device_owner": False}
if (port["device_owner"] in self.ROUTER_INTERFACE_OWNERS
or port["device_owner"] == self.ROUTER_GATEWAY_OWNER):
if port["device_owner"] == self.ROUTER_GATEWAY_OWNER:
self.remove_gateway_from_router(port["device_id"])
self.remove_interface_from_router(
router_id=port["device_id"], port_id=port["id"])
else:
try:
self.client.delete_port(port["id"])
except neutron_exceptions.PortNotFoundClient:
# port is auto-removed
pass
@atomic.action_timer("neutron.list_ports")
def list_ports(self, network_id=_NONE, device_id=_NONE, device_owner=_NONE,
status=_NONE, **kwargs):
"""List ports.
:param network_id: Filter the list result by the ID of the attached
network.
:param device_id: Filter the port list result by the ID of the device
that uses this port. For example, a server instance or a logical
router.
:param device_owner: Filter the port result list by the entity type
that uses this port. For example, compute:nova (server instance),
network:dhcp (DHCP agent) or network:router_interface
(router interface).
:param status: Filter the port list result by the port status.
Values are ACTIVE, DOWN, BUILD and ERROR.
:param kwargs: additional port list filters
"""
filters = _clean_dict(
network_id=network_id,
device_id=device_id,
device_owner=device_owner,
status=status,
**kwargs
)
return self.client.list_ports(**filters)["ports"]
@atomic.action_timer("neutron.create_floating_ip")
def create_floatingip(self, floating_network=None, project_id=_NONE,
fixed_ip_address=_NONE, floating_ip_address=_NONE,
port_id=_NONE, subnet_id=_NONE, dns_domain=_NONE,
dns_name=_NONE):
"""Create floating IP with floating_network.
:param floating_network: external network associated with floating IP.
:param project_id: The ID of the project.
:param fixed_ip_address: The fixed IP address that is associated with
the floating IP. If an internal port has multiple associated IP
addresses, the service chooses the first IP address unless you
explicitly define a fixed IP address in the fixed_ip_address
parameter.
:param floating_ip_address: The floating IP address. Default policy
settings enable only administrative users to set floating IP
addresses and some non-administrative users might require a
floating IP address. If you do not specify a floating IP address
in the request, the operation automatically allocates one.
:param port_id: The ID of a port associated with the floating IP.
To associate the floating IP with a fixed IP at creation time,
you must specify the identifier of the internal port.
:param subnet_id: The subnet ID on which you want to create the
floating IP.
:param dns_domain: A valid DNS domain.
:param dns_name: A valid DNS name.
"""
from neutronclient.common import exceptions as neutron_exceptions
if isinstance(floating_network, dict):
net_id = floating_network["id"]
elif floating_network:
network = self.find_network(floating_network)
if not network.get("router:external", False):
raise exceptions.NotFoundException(
f"Network '{network['name']} (id={network['id']})' is not "
f"external.")
net_id = network["id"]
else:
ext_networks = self.list_networks(router_external=True)
if not ext_networks:
raise exceptions.NotFoundException(
"Failed to allocate floating IP since no external "
"networks found.")
net_id = ext_networks[0]["id"]
description = None
if not CONF.openstack.pre_newton_neutron:
description = self.generate_random_name()
body = _clean_dict(
tenant_id=project_id,
description=description,
floating_network_id=net_id,
fixed_ip_address=fixed_ip_address,
floating_ip_address=floating_ip_address,
port_id=port_id,
subnet_id=subnet_id,
dns_domain=dns_domain,
dns_name=dns_name
)
try:
resp = self.client.create_floatingip({"floatingip": body})
return resp["floatingip"]
except neutron_exceptions.BadRequest as e:
error = "%s" % e
if "Unrecognized attribute" in error and "'description'" in error:
LOG.info("It looks like you have Neutron API of pre-Newton "
"OpenStack release. Setting "
"openstack.pre_newton_neutron option via Rally "
"configuration should fix an issue.")
raise
@atomic.action_timer("neutron.show_floating_ip")
def get_floatingip(self, floatingip_id, fields=_NONE):
"""Get floating IP details
:param floatingip_id: Floating IP ID
:param fields: The fields that you want the server to return. If no
fields list is specified, the networking API returns all
attributes allowed by the policy settings. By using fields
parameter, the API returns only the requested set of attributes.
"""
body = _clean_dict(fields=fields)
resp = self.client.show_floatingip(floatingip_id, **body)
return resp["floatingip"]
@atomic.action_timer("neutron.update_floating_ip")
def | |
<filename>frmod/analysis.py
"""
Frequency ratio model analysis.
Perform a landslide susceptibility analysis with the frequency ratio method.
@author: <NAME>
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import frmod.utils as utils
def get_freq_ratios_classic(vr,
mask,
binc=20,
nodata=-99999.,
categorical=False,
ls_marker=1,
nls_marker=0):
"""
Get the frequency ratio of the landslide parts and the whole area.
Parameters
----------
vr : Array
The array of the analyzed variable. Numeric values.
mask : Array
The array of the mask.
binc : int, optional
Bin count for the histogram of the non-categorical variables.
The default is 20.
nodata : int / float, optional
The nodata value of the vr grid.
The default is -99999..
categorical : bool, optional
Set true if the analysed variable raster is categorical.
Categories must be marked with unique integers in the rasters.
The dafault is False.
ls_marker : int, optional
The value marking the landslide parts.
The default is 1.
nls_marker : int, optional
The value marking the non-landslide parts.
The default is 0.
Returns
-------
frequency_ratios : Array
The frequency ratio values. Length: number of bins.
hst_bins : Array
Array containing the edges of the bins. Length: number of bins + 1.
fr_stat_df : pandas.DataFrame
DataFrame with the statistics.
Columns:
'min', 'max', 'LS_density', 'NLS_density', 'frequency_ratio'.
"""
vr = np.ma.masked_where((vr == nodata), vr)
in_all = np.logical_or(mask == ls_marker, mask == nls_marker)
ls_area = np.count_nonzero(mask == ls_marker)
all_area = np.count_nonzero(mask == nls_marker) + ls_area
if categorical:
bin_edges = np.unique(vr[in_all])
bin_edges = np.append(bin_edges, bin_edges[-1] + 1)
ls_hst = np.histogram(vr[mask == ls_marker],
bins=bin_edges,
density=False)
all_hst = np.histogram(vr[in_all],
bins=bin_edges,
density=False)
else:
glob_lim = (vr[in_all].min(),
vr[in_all].max())
ls_hst = np.histogram(vr[mask == ls_marker], bins=binc,
range=glob_lim, density=False)
all_hst = np.histogram(vr[in_all], bins=binc,
range=glob_lim, density=False)
# Histogram density for the landslide part
ls_hst_d = ls_hst[0] / ls_area
# Histogram density for the non-landslide part
all_hst_d = all_hst[0] / all_area
frequency_ratios = ls_hst_d / all_hst_d
hst_bins = all_hst[1]
mn = hst_bins[:-1]
mx = hst_bins[1:]
# Create a pd.DataFrame for the bins, densities, and the frequency ratio
data = [mn, mx, ls_hst_d, all_hst_d, frequency_ratios]
# columns = ["Min", "Max", "LS_density", "NLS_density", "frequency_ratio"]
data = {'min': mn,
'max': mx,
'LS_density': ls_hst_d,
'NLS_density': all_hst_d,
'frequency_ratio': frequency_ratios}
fr_stat_df = pd.DataFrame(data=data)
return frequency_ratios, hst_bins, fr_stat_df
def get_freq_ratios(vr,
mask,
binc=100,
nodata=-9999.,
categorical=False,
normalize=False,
ls_marker=1,
nls_marker=0):
"""
Get the frequency ratio of the landslide and non-landslide parts.
Parameters
----------
vr : Array
The array of the analyzed variable. Numeric values.
mask : Array
The array of the mask.
binc : int, optional
Bin count for the histogram of the non-categorical variables.
The default is 100.
categorical : bool, optional
Set true if the analysed variable raster is categorical.
Categories must be marked with unique integers in the rasters.
The dafault is False.
normalize : bool, optional
Set True for normalized weights (0, 1)
The default is False.
ls_marker : int, optional
The value marking the landslide parts.
The default is 1.
nls_marker : int, optional
The value marking the non-landslide parts.
The default is 0.
Returns
-------
frequency_ratios : Array
The frequency ratio values. Length: number of bins.
hst_bins : Array
Array containing the edges of the bins. Length: number of bins + 1.
fr_stat_df : pandas.DataFrame
DataFrame with the statistics.
"""
ls_area = np.count_nonzero(mask == ls_marker)
nls_area = np.count_nonzero(mask == nls_marker)
if categorical:
bin_edges = np.unique(vr[(vr != nodata)])
bin_edges = np.append(bin_edges, bin_edges[-1] + 1)
ls_hst = np.histogram(vr[mask == ls_marker],
bins=bin_edges,
density=False)
nls_hst = np.histogram(vr[mask == nls_marker],
bins=bin_edges,
density=False)
else:
glob_lim = (vr[vr != nodata].min(),
vr[vr != nodata].max())
ls_hst = np.histogram(vr[mask == ls_marker], bins=binc,
range=glob_lim, density=False)
nls_hst = np.histogram(vr[mask == nls_marker], bins=binc,
range=glob_lim, density=False)
# Histogram density for the landslide part
ls_hst_d = ls_hst[0] / ls_area
# Histogram density for the non-landslide part
nls_hst_d = nls_hst[0] / nls_area
# Histogram bins
hst_bins = ls_hst[1]
mn = hst_bins[:-1]
mx = hst_bins[1:]
# Frequency ratios
fr = ls_hst_d / nls_hst_d
if normalize:
frequency_ratios = (fr - fr.min()) / (fr.max() - fr.min())
else:
frequency_ratios = fr
# Create a pd.DataFrame for the bins, densities, and the frequency ratio
data = [mn, mx, ls_hst_d, nls_hst_d, frequency_ratios]
# columns = ["Min", "Max", "LS_density", "NLS_density", "frequency_ratio"]
data = {'min': mn,
'max': mx,
'LS_density': ls_hst_d,
'NLS_density': nls_hst_d,
'frequency_ratio': fr}
fr_stat_df = pd.DataFrame(data=data)
return frequency_ratios, hst_bins, fr_stat_df
# TODO: a name change is due to reclass_array
# TODO Use more generic variable names. This can reclass any array.
def reclass_raster(vr, f_ratios, bin_edges, verbose=False):
"""
Create an array with the frequency ratios.
Parameters
----------
vr : Array
Array of the analysed variable to be reclassified.
f_ratios : Array
The frequency ratio values.
Length: number of bins.
bin_edges : Array
Array containing the edges of the bins.
Length: number of bins + 1.
verbose : bool
Set True to print the bin ranges and reclass values.
Returns
-------
reclassed : Array
Reclassified array with the appropriate frequency ratio values.
"""
reclassed = np.ones(vr.shape) * -99999
for i in range(0, len(f_ratios)):
# Reclassifying the raster by assigning the frequency ratio
# values of each bin to the corresponding raster values.
mn = bin_edges[:-1][i]
mx = bin_edges[1:][i]
vrange = mx - mn
to_reclass = (vr >= mn) & (vr < mx)
reclassed[to_reclass] = f_ratios[i]
if verbose:
print("Min: {} Max: {} Range: {} Ratio: {}".format(
mn, mx, vrange, f_ratios[i])
)
return reclassed
def show_grid(grid, nodata, name='Grid', **kwargs):
"""
Plot a grid, nodata values are masked.
Plot a grid (2D array). Values equal to nodata will be masked.
**kwargs are keywords passed to matplotplit.pyplot.imshow that
is used for plotting the masked grid. Eg. color maps (cmap).
Parameters
----------
grid : Array
Grid to plot.
nodata : int / float
Nodata value of the grid. Nodata values will be masked.
name : str, optional
The title of the plot. The default is 'Grid'.
Returns
-------
None.
"""
masked_grid = np.ma.masked_where((grid == nodata), grid)
plt.figure()
plt.title(name)
plt.imshow(masked_grid, **kwargs)
plt.colorbar()
plt.show()
class VRaster():
"""Variable raster, input for frequency ratio analysis."""
def __init__(self, name, path, bins=10, categorical=False):
"""
Create the VRaster object.
Parameters
----------
name : str
Name of the VRaster.
path : str
Path to the GDAL-compatible raster file.
bins : int, optional
Number of histogram bins. The default is 10.
categorical : bool, optional
True if it is a categorical variable, eg: geology.
The default is False.
Returns
-------
None.
"""
self.name = name
self.path = path
self.bins = bins
self.categorical = categorical
self.nodata = utils.get_nodata_value(path)
# Convert input grid to array
self.grid = utils.raster2array(path)
# Calculate basic statistics for the grid
self.min = min(self.grid[self.grid != self.nodata])
self.max = max(self.grid[self.grid != self.nodata])
self.limits = (self.min, self.max)
def show(self, **kwargs):
"""
Plot the VRaster.grid.
Parameters
----------
**kwargs :
Keywords passed to show_grid.
Returns
-------
None.
"""
show_grid(self.grid, self.nodata, name=self.name, **kwargs)
def show_info(self):
"""
Show basic information about the VRaster.grid.
Returns
-------
None.
"""
valid_values = self.grid[self.grid != self.nodata]
if self.categorical:
print("Categorical!")
else:
average = np.mean(valid_values)
sdev = np.std(valid_values)
print("Name: {} Limits: {}".format(self.name, self.limits))
print("Mean: {} Standard deviation: {}".format(average, sdev))
class LandslideMask():
"""LandslideMask."""
def __init__(self, name, path, ls_marker=1, nls_marker=0, fold_count=5):
"""
Create a LandslideMask object.
Parameters
----------
name : str
Name of the landslide mask.
path : str
Path to the file used as the mask in the analysis.
ls_marker : int, optional
Value marking the landslide pixels.
The default is 1.
nls_marker : int, optional
Value marking the non-landslide pixels.
Must be different from the nodata value.
The default is 0.
fold_count : int, optional
The number of cross validation folds. The default is 5.
Returns
-------
None.
"""
# Name of the LandslideMask
self.name = name
# Path to the input mask file.
self.path = path
# NoData value of the input mask file.
self.nodata = utils.get_nodata_value(path)
# 2D array representation of the input mask file.
self.grid = utils.raster2array(path)
# Value marking the landslide cells.
self.ls_marker = ls_marker
# Value marking the non-landslide cells. Different from nodata!
self.nls_marker = nls_marker
# Number of folds
self.fold_count = fold_count
# | |
import pytest
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
import re
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance')
@pytest.fixture(scope="module", autouse=True)
def start_cluster():
try:
cluster.start()
instance.query("CREATE DATABASE test")
instance.query("CREATE TABLE test.table(x UInt32, y UInt32) ENGINE = MergeTree ORDER BY tuple()")
instance.query("INSERT INTO test.table VALUES (1,5), (2,10)")
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def cleanup_after_test():
try:
yield
finally:
instance.query("DROP USER IF EXISTS A, B")
instance.query("DROP TABLE IF EXISTS test.view_1")
def test_smoke():
instance.query("CREATE USER A")
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test.table", user='A')
instance.query('GRANT SELECT ON test.table TO A')
assert instance.query("SELECT * FROM test.table", user='A') == "1\t5\n2\t10\n"
instance.query('REVOKE SELECT ON test.table FROM A')
assert "Not enough privileges" in instance.query_and_get_error("SELECT * FROM test.table", user='A')
def test_grant_option():
instance.query("CREATE USER A")
instance.query("CREATE USER B")
instance.query('GRANT SELECT ON test.table TO A')
assert instance.query("SELECT * FROM test.table", user='A') == "1\t5\n2\t10\n"
assert "Not enough privileges" in instance.query_and_get_error("GRANT SELECT ON test.table TO B", user='A')
instance.query('GRANT SELECT ON test.table TO A WITH GRANT OPTION')
instance.query("GRANT SELECT ON test.table TO B", user='A')
assert instance.query("SELECT * FROM test.table", user='B') == "1\t5\n2\t10\n"
instance.query('REVOKE SELECT ON test.table FROM A, B')
def test_revoke_requires_grant_option():
instance.query("CREATE USER A")
instance.query("CREATE USER B")
instance.query("GRANT SELECT ON test.table TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
expected_error = "Not enough privileges"
assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("GRANT SELECT ON test.table TO A")
expected_error = "privileges have been granted, but without grant option"
assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("GRANT SELECT ON test.table TO A WITH GRANT OPTION")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("REVOKE SELECT ON test.table FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
instance.query("GRANT SELECT ON test.table TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("REVOKE SELECT ON test.* FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
instance.query("GRANT SELECT ON test.table TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("REVOKE ALL ON test.* FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
instance.query("GRANT SELECT ON test.table TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("REVOKE ALL ON *.* FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
instance.query("REVOKE GRANT OPTION FOR ALL ON *.* FROM A")
instance.query("GRANT SELECT ON test.table TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
expected_error = "privileges have been granted, but without grant option"
assert expected_error in instance.query_and_get_error("REVOKE SELECT ON test.table FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("GRANT SELECT ON test.* TO A WITH GRANT OPTION")
instance.query("GRANT SELECT ON test.table TO B")
assert instance.query("SHOW GRANTS FOR B") == "GRANT SELECT ON test.table TO B\n"
instance.query("REVOKE SELECT ON test.table FROM B", user='A')
assert instance.query("SHOW GRANTS FOR B") == ""
def test_implicit_show_grants():
instance.query("CREATE USER A")
assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "0\n"
assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "0\n"
assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", user="A") == "0\n"
instance.query("GRANT SELECT(x) ON test.table TO A")
assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT(x) ON test.table TO A\n"
assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "1\n"
assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "1\n"
assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", user="A") == "1\n"
instance.query("GRANT SELECT ON test.table TO A")
assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT ON test.table TO A\n"
assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "1\n"
assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "1\n"
assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", user="A") == "2\n"
instance.query("GRANT SELECT ON test.* TO A")
assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT ON test.* TO A\n"
assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "1\n"
assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "1\n"
assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", user="A") == "2\n"
instance.query("GRANT SELECT ON *.* TO A")
assert instance.query("SHOW GRANTS FOR A") == "GRANT SELECT ON *.* TO A\n"
assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "1\n"
assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "1\n"
assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", user="A") == "2\n"
instance.query("REVOKE ALL ON *.* FROM A")
assert instance.query("select count() FROM system.databases WHERE name='test'", user="A") == "0\n"
assert instance.query("select count() FROM system.tables WHERE database='test' AND name='table'", user="A") == "0\n"
assert instance.query("select count() FROM system.columns WHERE database='test' AND table='table'", user="A") == "0\n"
def test_implicit_create_view_grant():
instance.query("CREATE USER A")
expected_error = "Not enough privileges"
assert expected_error in instance.query_and_get_error("CREATE VIEW test.view_1 AS SELECT 1", user="A")
instance.query("GRANT CREATE TABLE ON test.* TO A")
instance.query("CREATE VIEW test.view_1 AS SELECT 1", user="A")
assert instance.query("SELECT * FROM test.view_1") == "1\n"
instance.query("REVOKE CREATE TABLE ON test.* FROM A")
instance.query("DROP TABLE test.view_1")
assert expected_error in instance.query_and_get_error("CREATE VIEW test.view_1 AS SELECT 1", user="A")
def test_implicit_create_temporary_table_grant():
instance.query("CREATE USER A")
expected_error = "Not enough privileges"
assert expected_error in instance.query_and_get_error("CREATE TEMPORARY TABLE tmp(name String)", user="A")
instance.query("GRANT CREATE TABLE ON test.* TO A")
instance.query("CREATE TEMPORARY TABLE tmp(name String)", user="A")
instance.query("REVOKE CREATE TABLE ON *.* FROM A")
assert expected_error in instance.query_and_get_error("CREATE TEMPORARY TABLE tmp(name String)", user="A")
def test_introspection():
instance.query("CREATE USER A")
instance.query("CREATE USER B")
instance.query('GRANT SELECT ON test.table TO A')
instance.query('GRANT CREATE ON *.* TO B WITH GRANT OPTION')
assert instance.query("SHOW USERS") == TSV([ "A", "B", "default" ])
assert instance.query("SHOW CREATE USERS A") == TSV([ "CREATE USER A" ])
assert instance.query("SHOW CREATE USERS B") == TSV([ "CREATE USER B" ])
assert instance.query("SHOW CREATE USERS A,B") == TSV([ "CREATE USER A", "CREATE USER B" ])
assert instance.query("SHOW CREATE USERS") == TSV([ "CREATE USER A", "CREATE USER B", "CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default" ])
assert instance.query("SHOW GRANTS FOR A") == TSV([ "GRANT SELECT ON test.table TO A" ])
assert instance.query("SHOW GRANTS FOR B") == TSV([ "GRANT CREATE ON *.* TO B WITH GRANT OPTION" ])
assert instance.query("SHOW GRANTS FOR A,B") == TSV([ "GRANT SELECT ON test.table TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION" ])
assert instance.query("SHOW GRANTS FOR B,A") == TSV([ "GRANT SELECT ON test.table TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION" ])
assert instance.query("SHOW GRANTS FOR ALL") == TSV([ "GRANT SELECT ON test.table TO A", "GRANT CREATE ON *.* TO B WITH GRANT OPTION", "GRANT ALL ON *.* TO default WITH GRANT OPTION" ])
assert instance.query("SHOW GRANTS", user='A') == TSV([ "GRANT SELECT ON test.table TO A" ])
assert instance.query("SHOW GRANTS", user='B') == TSV([ "GRANT CREATE ON *.* TO B WITH GRANT OPTION" ])
expected_access1 = "CREATE USER A\n"\
"CREATE USER B\n"\
"CREATE USER default IDENTIFIED WITH plaintext_password SETTINGS PROFILE default"
expected_access2 = "GRANT SELECT ON test.table TO A\n"\
"GRANT CREATE ON *.* TO B WITH GRANT OPTION\n"\
"GRANT ALL ON *.* TO default WITH GRANT OPTION\n"
assert expected_access1 in instance.query("SHOW ACCESS")
assert expected_access2 in instance.query("SHOW ACCESS")
assert instance.query("SELECT name, storage, auth_type, auth_params, host_ip, host_names, host_names_regexp, host_names_like, default_roles_all, default_roles_list, default_roles_except from system.users WHERE name IN ('A', 'B') ORDER BY name") ==\
TSV([[ "A", "local directory", "no_password", "{}", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ],
[ "B", "local directory", "no_password", "{}", "['::/0']", "[]", "[]", "[]", 1, "[]", "[]" ]])
assert instance.query("SELECT * from system.grants WHERE user_name IN ('A', 'B') ORDER BY user_name, access_type, grant_option") ==\
TSV([[ "A", "\N", "SELECT", "test", "table", "\N", 0, 0 ],
[ "B", "\N", "CREATE", "\N", "\N", "\N", 0, 1 ]])
def test_current_database():
instance.query("CREATE USER A")
instance.query("GRANT SELECT ON table TO A", database="test")
assert instance.query("SHOW GRANTS FOR A") == TSV([ "GRANT SELECT ON test.table TO A" ])
assert | |
effective_at: str
:param as_at: The asAt datetime at which to retrieve the person's relations. Defaults to return the latest LUSID AsAt time if not specified.
:type as_at: datetime
:param filter: Expression to filter the relations. Users should provide null or empty string for this field until further notice.
:type filter: str
:param identifier_types: Identifiers types (as property keys) used for referencing Persons or Legal Entities. These take the format {domain}/{scope}/{code} e.g. \"Person/CompanyDetails/Role\". They must be from the \"Person\" or \"LegalEntity\" domain. Only identifier types stated will be used to look up relevant entities in relations. If not applicable, provide an empty array.
:type identifier_types: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfRelation, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id_type_scope',
'id_type_code',
'code',
'effective_at',
'as_at',
'filter',
'identifier_types'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_person_relations" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) > 64): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_person_relations`, length must be less than or equal to `64`") # noqa: E501
if self.api_client.client_side_validation and ('code' in local_var_params and # noqa: E501
len(local_var_params['code']) < 1): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_person_relations`, length must be greater than or equal to `1`") # noqa: E501
if self.api_client.client_side_validation and 'code' in local_var_params and not re.search(r'^[a-zA-Z0-9\-_]+$', local_var_params['code']): # noqa: E501
raise ApiValueError("Invalid value for parameter `code` when calling `get_person_relations`, must conform to the pattern `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id_type_scope' in local_var_params:
path_params['idTypeScope'] = local_var_params['id_type_scope'] # noqa: E501
if 'id_type_code' in local_var_params:
path_params['idTypeCode'] = local_var_params['id_type_code'] # noqa: E501
if 'code' in local_var_params:
path_params['code'] = local_var_params['code'] # noqa: E501
query_params = []
if 'effective_at' in local_var_params and local_var_params['effective_at'] is not None: # noqa: E501
query_params.append(('effectiveAt', local_var_params['effective_at'])) # noqa: E501
if 'as_at' in local_var_params and local_var_params['as_at'] is not None: # noqa: E501
query_params.append(('asAt', local_var_params['as_at'])) # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
if 'identifier_types' in local_var_params and local_var_params['identifier_types'] is not None: # noqa: E501
query_params.append(('identifierTypes', local_var_params['identifier_types'])) # noqa: E501
collection_formats['identifierTypes'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
header_params['Accept-Encoding'] = "gzip, deflate, br"
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
response_types_map = {
200: "ResourceListOfRelation",
400: "LusidValidationProblemDetails",
}
return self.api_client.call_api(
'/api/persons/{idTypeScope}/{idTypeCode}/{code}/relations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_person_relationships(self, id_type_scope, id_type_code, code, **kwargs): # noqa: E501
"""[EXPERIMENTAL] GetPersonRelationships: Get Relationships for Person # noqa: E501
Get relationships for the specified person. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_person_relationships(id_type_scope, id_type_code, code, async_req=True)
>>> result = thread.get()
:param id_type_scope: Scope of the person's identifier type. (required)
:type id_type_scope: str
:param id_type_code: Code of the person's identifier type. (required)
:type id_type_code: str
:param code: Code of the person under specified identifier type's scope and code. This together with stated identifier type uniquely identifies the person. (required)
:type code: str
:param effective_at: The effective datetime or cut label at which to get relationships. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve relationships. Defaults to return the latest LUSID AsAt time if not specified.
:type as_at: datetime
:param filter: Expression to filter relationships. Users should provide null or empty string for this field until further notice.
:type filter: str
:param identifier_types: Identifiers types (as property keys) used for referencing Persons or Legal Entities. These take the format {domain}/{scope}/{code} e.g. \"Person/CompanyDetails/Role\". They must be from the \"Person\" or \"LegalEntity\" domain. Only identifier types stated will be used to look up relevant entities in relationships. If not applicable, provide an empty array.
:type identifier_types: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: ResourceListOfRelationship
"""
kwargs['_return_http_data_only'] = True
return self.get_person_relationships_with_http_info(id_type_scope, id_type_code, code, **kwargs) # noqa: E501
def get_person_relationships_with_http_info(self, id_type_scope, id_type_code, code, **kwargs): # noqa: E501
"""[EXPERIMENTAL] GetPersonRelationships: Get Relationships for Person # noqa: E501
Get relationships for the specified person. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_person_relationships_with_http_info(id_type_scope, id_type_code, code, async_req=True)
>>> result = thread.get()
:param id_type_scope: Scope of the person's identifier type. (required)
:type id_type_scope: str
:param id_type_code: Code of the person's identifier type. (required)
:type id_type_code: str
:param code: Code of the person under specified identifier type's scope and code. This together with stated identifier type uniquely identifies the person. (required)
:type code: str
:param effective_at: The effective datetime or cut label at which to get relationships. Defaults to the current LUSID system datetime if not specified.
:type effective_at: str
:param as_at: The asAt datetime at which to retrieve relationships. Defaults to return the latest LUSID AsAt time if not specified.
:type as_at: datetime
:param filter: Expression to filter relationships. Users should provide null or empty string for this field until further notice.
:type filter: str
:param identifier_types: Identifiers types (as property keys) used for referencing Persons or Legal Entities. These take the format {domain}/{scope}/{code} e.g. \"Person/CompanyDetails/Role\". They must be from the \"Person\" or \"LegalEntity\" domain. Only identifier types stated will be used to look up relevant entities in relationships. If not applicable, provide an empty array.
:type identifier_types: list[str]
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(ResourceListOfRelationship, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id_type_scope',
'id_type_code',
'code',
'effective_at',
'as_at',
'filter',
'identifier_types'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in | |
<gh_stars>0
#!/usr/bin/env python
# coding=utf-8
import base64
import os
import re
import time
import datetime
import hashlib
import string
import random
import pickle
import zlib
import math
from google.appengine.ext import webapp
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api.labs import taskqueue
from google.appengine.ext import db
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template
from v2ex.babel import Member
from v2ex.babel import Counter
from v2ex.babel import Section
from v2ex.babel import Node
from v2ex.babel import Topic
from v2ex.babel import Reply
from v2ex.babel import Notification
from v2ex.babel import SYSTEM_VERSION
from v2ex.babel.security import *
from v2ex.babel.ua import *
from v2ex.babel.da import *
from v2ex.babel.l10n import *
from v2ex.babel.ext.cookies import Cookies
from v2ex.babel.ext.sessions import Session
from django.utils import simplejson as json
template.register_template_library('v2ex.templatetags.filters')
import config
TOPIC_PAGE_SIZE = 100
class NewTopicHandler(webapp.RequestHandler):
def get(self, node_name):
site = GetSite()
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['system_version'] = SYSTEM_VERSION
member = CheckAuth(self)
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
template_values['page_title'] = site.title + u' › ' + l10n.create_new_topic.decode('utf-8')
can_create = False
if site.topic_create_level > 999:
if member:
can_create = True
else:
if member:
if member.level <= site.topic_create_level:
can_create = True
if (member):
template_values['member'] = member
node = GetKindByName('Node', node_name)
if node is False:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'node_not_found.html')
output = template.render(path, template_values)
return self.response.out.write(output)
template_values['node'] = node
section = GetKindByNum('Section', node.section_num)
template_values['section'] = section
if site.use_topic_types:
types = site.topic_types.split("\n")
options = '<option value="0"> </option>'
i = 0
for a_type in types:
i = i + 1
detail = a_type.split(':')
options = options + '<option value="' + str(i) + '">' + detail[0] + '</option>'
tt = '<div class="sep5"></div><table cellpadding="5" cellspacing="0" border="0" width="100%"><tr><td width="60" align="right">Topic Type</td><td width="auto" align="left"><select name="type">' + options + '</select></td></tr></table>'
template_values['tt'] = tt
else:
template_values['tt'] = ''
if can_create:
if browser['ios']:
if node:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'new_topic.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'node_not_found.html')
else:
if node:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'new_topic.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'node_not_found.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'access_denied.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
self.redirect('/signin')
def post(self, node_name):
site = GetSite()
### BEGIN: CAN CONTINUE
can_continue = True
if ('Host' in self.request.headers):
if (self.request.headers['Host'] not in ['www.v2ex.com', 'v2ex.appspot.com', 'fast.v2ex.com', 'beta.v2ex.com', 'us.v2ex.com', 'jp.v2ex.com', 'eu.v2ex.com', 'localhost:10000']):
can_continue = True
else:
can_continue = False
if ('User-Agent' not in self.request.headers):
can_continue = False
if ('Cookie' not in self.request.headers):
can_continue = False
if ('Referer' in self.request.headers):
has_v2ex = False
if ('http://localhost:10000' in self.request.headers['Referer']):
has_v2ex = True
if ('http://www.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://v2ex.appspot.com' in self.request.headers['Referer']):
has_v2ex = True
if ('https://www.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://jp.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://eu.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://us.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('https://v2ex.appspot.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://fast.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://beta.v2ex.com' in self.request.headers['Referer']):
has_v2ex = True
if ('http://' + str(site.domain) in self.request.headers['Referer']):
has_v2ex = True
if has_v2ex is False:
can_continue = False
else:
can_continue = True
if ('Content-Type' in self.request.headers):
if self.request.headers['Content-Type'].startswith( 'application/x-www-form-urlencoded') is False:
can_continue = False
else:
can_continue = False
if can_continue is False:
return self.redirect('http://' + site.domain + '/')
### END: CAN CONTINUE
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['system_version'] = SYSTEM_VERSION
member = CheckAuth(self)
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
template_values['page_title'] = site.title + u' › ' + l10n.create_new_topic.decode('utf-8')
can_create = False
if site.topic_create_level > 999:
if member:
can_create = True
else:
if member:
if member.level <= site.topic_create_level:
can_create = True
if (member):
template_values['member'] = member
if can_create:
node = False
node = GetKindByName('Node', node_name)
template_values['node'] = node
section = False
if node:
section = GetKindByNum('Section', node.section_num)
template_values['section'] = section
errors = 0
# Verification: title
topic_title_error = 0
topic_title_error_messages = ['',
u'请输入主题标题',
u'主题标题长度不能超过 120 个字符'
]
topic_title = self.request.get('title').strip().replace("\n", " ")
if (len(topic_title) == 0):
errors = errors + 1
topic_title_error = 1
else:
if (len(topic_title) > 120):
errors = errors + 1
topic_title_error = 2
template_values['topic_title'] = topic_title
template_values['topic_title_error'] = topic_title_error
template_values['topic_title_error_message'] = topic_title_error_messages[topic_title_error]
# Verification: content
topic_content_error = 0
topic_content_error_messages = ['',
u'主题内容长度不能超过 200000 个字符'
]
topic_content = self.request.get('content').strip()
topic_content_length = len(topic_content)
if (topic_content_length > 0):
if (topic_content_length > 200000):
errors = errors + 1
topic_content_error = 1
template_values['topic_content'] = topic_content
template_values['topic_content_error'] = topic_content_error
template_values['topic_content_error_message'] = topic_content_error_messages[topic_content_error]
# Verification: type
if site.use_topic_types:
types = site.topic_types.split("\n")
if len(types) > 0:
topic_type = self.request.get('type').strip()
try:
topic_type = int(topic_type)
if topic_type < 0:
topic_type = 0
if topic_type > len(types):
topic_type = 0
if topic_type > 0:
detail = types[topic_type - 1].split(':')
topic_type_label = detail[0]
topic_type_color = detail[1]
except:
topic_type = 0
else:
topic_type = 0
options = '<option value="0"> </option>'
i = 0
for a_type in types:
i = i + 1
detail = a_type.split(':')
if topic_type == i:
options = options + '<option value="' + str(i) + '" selected="selected">' + detail[0] + '</option>'
else:
options = options + '<option value="' + str(i) + '">' + detail[0] + '</option>'
tt = '<div class="sep5"></div><table cellpadding="5" cellspacing="0" border="0" width="100%"><tr><td width="60" align="right">Topic Type</td><td width="auto" align="left"><select name="type">' + options + '</select></td></tr></table>'
template_values['tt'] = tt
else:
template_values['tt'] = ''
template_values['errors'] = errors
if (errors == 0):
topic = Topic(parent=node)
q = db.GqlQuery('SELECT * FROM Counter WHERE name = :1', 'topic.max')
if (q.count() == 1):
counter = q[0]
counter.value = counter.value + 1
else:
counter = Counter()
counter.name = 'topic.max'
counter.value = 1
q2 = db.GqlQuery('SELECT * FROM Counter WHERE name = :1', 'topic.total')
if (q2.count() == 1):
counter2 = q2[0]
counter2.value = counter2.value + 1
else:
counter2 = Counter()
counter2.name = 'topic.total'
counter2.value = 1
topic.num = counter.value
topic.title = topic_title
topic.content = topic_content
if len(topic_content) > 0:
topic.has_content = True
topic.content_length = topic_content_length
else:
topic.has_content = False
path = os.path.join(os.path.dirname(__file__), 'tpl', 'portion', 'topic_content.html')
output = template.render(path, {'topic' : topic})
topic.content_rendered = output.decode('utf-8')
topic.node = node
topic.node_num = node.num
topic.node_name = node.name
topic.node_title = node.title
topic.created_by = member.username
topic.member = member
topic.member_num = member.num
topic.last_touched = datetime.datetime.now()
ua = self.request.headers['User-Agent']
if (re.findall('Mozilla\/5.0 \(iPhone;', ua)):
topic.source = 'iPhone'
if (re.findall('Mozilla\/5.0 \(iPod;', ua)):
topic.source = 'iPod'
if (re.findall('Mozilla\/5.0 \(iPad;', ua)):
topic.source = 'iPad'
if (re.findall('Android', ua)):
topic.source = 'Android'
if (re.findall('Mozilla\/5.0 \(PLAYSTATION 3;', ua)):
topic.source = 'PS3'
if site.use_topic_types:
if topic_type > 0:
topic.type = topic_type_label
topic.type_color = topic_type_color
node.topics = node.topics + 1
node.put()
topic.put()
counter.put()
counter2.put()
memcache.delete('feed_index')
memcache.delete('Node_' + str(topic.node_num))
memcache.delete('Node::' + str(node.name))
memcache.delete('q_latest_16')
memcache.delete('home_rendered')
memcache.delete('home_rendered_mobile')
try:
taskqueue.add(url='/index/topic/' + str(topic.num))
except:
pass
# Change newbie status?
if member.newbie == 1:
now = datetime.datetime.now()
created = member.created
diff = now - created
if diff.seconds > (86400 * 60):
member.newbie = 0
member.put()
# Notifications: mention_topic
taskqueue.add(url='/notifications/topic/' + str(topic.key()))
self.redirect('/t/' + str(topic.num) + '#reply0')
else:
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'new_topic.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'new_topic.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'access_denied.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
self.redirect('/signin')
class TopicHandler(webapp.RequestHandler):
def get(self, topic_num):
site = GetSite()
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['rnd'] = random.randrange(1, 100)
reply_reversed = self.request.get('r')
if reply_reversed == '1':
reply_reversed = True
else:
reply_reversed = False
filter_mode = self.request.get('f')
if filter_mode == '1':
filter_mode = True
else:
filter_mode = False
template_values['reply_reversed'] = reply_reversed
template_values['filter_mode'] = filter_mode
template_values['system_version'] = SYSTEM_VERSION
errors = 0
template_values['errors'] = errors
member = CheckAuth(self)
template_values['member'] = member
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
if member is not False:
try:
blocked = pickle.loads(member.blocked.encode('utf-8'))
except:
blocked = []
if (len(blocked) > 0):
template_values['blocked'] = ','.join(map(str, blocked))
if member.level == 0:
template_values['is_admin'] = 1
else:
template_values['is_admin'] = 0
topic_num_str = str(topic_num)
if len(topic_num_str) > 8:
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'topic_not_found.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'topic_not_found.html')
output = template.render(path, template_values)
self.response.out.write(output)
return
topic = False
topic = memcache.get('Topic_' + str(topic_num))
if topic is None:
q = db.GqlQuery("SELECT * FROM Topic WHERE num = :1", int(topic_num))
if (q.count() == 1):
topic = q[0]
memcache.set('Topic_' + str(topic_num), topic, 86400)
can_edit = False
can_move = False
if topic:
if topic.content:
template_values['page_description'] = topic.content[:60] + ' - ' + topic.member.username
else:
template_values['page_description'] = topic.title[:60] + ' - | |
len(seqs) == 1, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG')
def test_screed_streaming_ufq():
# uncompressed fq
o = execute_streaming_diginorm(utils.get_test_data('test-fastq-reads.fq'))
seqs = [r.sequence for r in screed.open(o)]
assert seqs[0].startswith('CAGGCGCCCACCACCGTGCCCTCCAACCTGATGGT')
def test_screed_streaming_bzipfq():
# bzip compressed fq
o = execute_streaming_diginorm(utils.get_test_data('100-reads.fq.bz2'))
seqs = [r.sequence for r in screed.open(o)]
assert len(seqs) == 100, seqs
assert seqs[0].startswith('CAGGCGCCCACCACCGTGCCCTCCAACCTGATGGT'), seqs
def test_screed_streaming_bzipfa():
# bzip compressed fa
o = execute_streaming_diginorm(
utils.get_test_data('test-abund-read-2.fa.bz2'))
seqs = [r.sequence for r in screed.open(o)]
assert len(seqs) == 1, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG')
@pytest.mark.known_failing
def test_screed_streaming_gzipfq():
# gzip compressed fq
o = execute_streaming_diginorm(utils.get_test_data('100-reads.fq.gz'))
assert os.path.exists(o)
seqs = [r.sequence for r in screed.open(o)]
assert seqs[0].startswith('CAGGCGCCCACCACCGTGCCCTCCAACCTG')
@pytest.mark.known_failing
def test_screed_streaming_gzipfa():
o = execute_streaming_diginorm(
utils.get_test_data('test-abund-read-2.fa.gz'))
assert os.path.exists(o)
seqs = [r.sequence for r in screed.open(o)]
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGG')
def test_read_parser_streaming_ufa():
# uncompressed FASTA
_execute_load_graph_streaming(utils.get_test_data('random-20-a.fa'))
def test_read_parser_streaming_ufq():
# uncompressed FASTQ
_execute_load_graph_streaming(utils.get_test_data('random-20-a.fq'))
@pytest.mark.known_failing
def test_read_parser_streaming_bzfq():
# bzip compressed FASTQ
_execute_load_graph_streaming(utils.get_test_data('random-20-a.fq.bz2'))
def test_read_parser_streaming_gzfq():
# gzip compressed FASTQ
_execute_load_graph_streaming(utils.get_test_data('random-20-a.fq.gz'))
@pytest.mark.known_failing
def test_read_parser_streaming_bzfa():
# bzip compressed FASTA
_execute_load_graph_streaming(utils.get_test_data('random-20-a.fa.bz2'))
def test_read_parser_streaming_gzfa():
# gzip compressed FASTA
_execute_load_graph_streaming(utils.get_test_data('random-20-a.fa.gz'))
def test_readstats():
readstats_output = ("358 bp / 5 seqs; 71.6 average length",
"916 bp / 11 seqs; 83.3 average length")
args = [utils.get_test_data("test-sweep-reads.fq"),
utils.get_test_data("paired-mixed.fq")]
status, out, err = utils.runscript('readstats.py', args)
assert status == 0
for k in readstats_output:
assert k in out, (k, out)
def test_readstats_csv():
readstats_output = ("358,5,71.6," +
utils.get_test_data("test-sweep-reads.fq"),
"916,11,83.3," +
utils.get_test_data("paired-mixed.fq"))
args = [utils.get_test_data("test-sweep-reads.fq"),
utils.get_test_data("paired-mixed.fq"),
'--csv']
status, out, err = utils.runscript('readstats.py', args)
assert status == 0
for k in readstats_output:
assert k in out, (k, out)
def test_readstats_output():
readstats_output = ("358 bp / 5 seqs; 71.6 average length",
"916 bp / 11 seqs; 83.3 average length")
outfile = utils.get_temp_filename('output.txt')
args = ["-o", outfile,
utils.get_test_data("test-sweep-reads.fq"),
utils.get_test_data("paired-mixed.fq")]
status, _, _ = utils.runscript('readstats.py', args)
assert status == 0
out = open(outfile).read()
for k in readstats_output:
assert k in out, (k, out)
def test_readstats_empty():
expected_output = "No sequences found in 2 files"
args = [utils.get_test_data("test-empty.fa"),
utils.get_test_data("test-empty.fa.bz2")]
status, out, err = utils.runscript('readstats.py', args)
assert status == 0
assert expected_output in out
def test_trim_low_abund_1():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", infile]
utils.runscript('trim-low-abund.py', args, in_dir)
outfile = infile + '.abundtrim'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 1, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
def test_trim_low_abund_1_duplicate_filename_err():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", '-C', '1', infile, infile]
(status, out, err) = utils.runscript('trim-low-abund.py', args, in_dir,
fail_ok=True)
assert status == 1
assert "Error: Cannot input the same filename multiple times." in str(err)
def test_trim_low_abund_1_stdin_err():
args = ["-"]
(status, out, err) = utils.runscript('trim-low-abund.py', args,
fail_ok=True)
assert status == 1
assert "Accepting input from stdin; output filename must be provided" \
in str(err)
def test_trim_low_abund_2():
infile = utils.copy_test_data('test-abund-read-2.fa')
infile2 = utils.copy_test_data('test-abund-read-2.fa', 'copyDataTwo')
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", '-C', '1', infile, infile2]
utils.runscript('trim-low-abund.py', args, in_dir)
outfile = infile + '.abundtrim'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
def test_trim_low_abund_2_o_gzip():
infile = utils.copy_test_data('test-abund-read-2.fa')
infile2 = utils.copy_test_data('test-abund-read-2.fa', 'copyDataTwo')
outfile = utils.get_temp_filename('out.gz')
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", '-C', '1',
"-o", outfile, "--gzip",
infile, infile2]
utils.runscript('trim-low-abund.py', args, in_dir)
assert os.path.exists(outfile), outfile
x = list(screed.open(outfile))
assert len(x)
# make sure that FASTQ records are retained.
def test_trim_low_abund_3_fq_retained():
infile = utils.copy_test_data('test-abund-read-2.fq')
infile2 = utils.copy_test_data('test-abund-read-2.fq', 'copyDataTwo')
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", '-C', '1', infile, infile2]
utils.runscript('trim-low-abund.py', args, in_dir)
outfile = infile + '.abundtrim'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
# check for 'quality' string.
seqs = set([r.quality for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
assert '##################' in seqs
# test that the -V option does not trim sequences that are low abundance
def test_trim_low_abund_4_retain_low_abund():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", '-V', infile]
utils.runscript('trim-low-abund.py', args, in_dir)
outfile = infile + '.abundtrim'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
assert 'GGTTGACGGGGCTCAGGG' in seqs
# test that the -V option *does* trim sequences that are low abundance
def test_trim_low_abund_5_trim_high_abund():
infile = utils.copy_test_data('test-abund-read-3.fa')
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", '-V', infile]
utils.runscript('trim-low-abund.py', args, in_dir)
outfile = infile + '.abundtrim'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
# trimmed sequence @ error
assert 'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGC' in seqs
# test that -V/-Z setting - should not trip if -Z is set high enough.
def test_trim_low_abund_6_trim_high_abund_Z():
infile = utils.copy_test_data('test-abund-read-3.fa')
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", '-V', '-Z', '25', infile]
utils.runscript('trim-low-abund.py', args, in_dir)
outfile = infile + '.abundtrim'
assert os.path.exists(outfile), outfile
seqs = set([r.sequence for r in screed.open(outfile)])
assert len(seqs) == 2, seqs
# untrimmed seq.
badseq = 'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCgtgCCGCAGCTGTCGTCAGGG' \
'GATTTCCGGGCGG'
assert badseq in seqs # should be there, untrimmed
def test_trim_low_abund_keep_paired():
infile = utils.copy_test_data('test-abund-read-2.paired.fq')
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", "-V", infile]
utils.runscript('trim-low-abund.py', args, in_dir)
outfile = infile + '.abundtrim'
assert os.path.exists(outfile), outfile
seqs = [r.name for r in screed.open(outfile)]
assert seqs[-2:] == ['pair/1', 'pair/2'], seqs
def test_trim_low_abund_keep_paired_casava18():
infile = utils.copy_test_data('test-abund-read-2.paired2.fq')
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", "-V", infile]
utils.runscript('trim-low-abund.py', args, in_dir)
outfile = infile + '.abundtrim'
assert os.path.exists(outfile), outfile
seqs = [r.name for r in screed.open(outfile)]
assert seqs[-2:] == ['pair:foo 1::N', 'pair:foo 2::N'], seqs
def test_trim_low_abund_highfpr():
infile = utils.copy_test_data('test-abund-read-2.paired.fq')
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1", "-N", "1", "-V", infile]
code, out, err = utils.runscript('trim-low-abund.py', args, in_dir,
fail_ok=True)
assert code == 1
assert '** ERROR: the graph structure is too small' in err, err
def test_trim_low_abund_trimtest():
infile = utils.copy_test_data('test-abund-read-2.paired.fq')
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", "-Z", "2", "-C", "1",
"-V", infile]
utils.runscript('trim-low-abund.py', args, in_dir)
outfile = infile + '.abundtrim'
assert os.path.exists(outfile), outfile
for record in screed.open(outfile):
if record.name == 'seqtrim/1':
print(record.name, record.sequence)
assert record.sequence == \
'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCAGCC'
elif record.name == 'seqtrim/2':
print(record.name, record.sequence)
assert record.sequence == \
'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCAGCCGC'
elif record.name == 'seqtrim2/1':
print(record.name, record.sequence)
assert record.sequence == \
'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCA'
def test_trim_low_abund_trimtest_after_load():
infile = utils.copy_test_data('test-abund-read-2.paired.fq')
in_dir = os.path.dirname(infile)
saved_table = utils.get_temp_filename('save.ct')
args = ["-k", "17", "-x", "1e7", "-N", "2", saved_table, infile]
utils.runscript('load-into-counting.py', args, in_dir)
args = ["-Z", "2", "-C", "2", "-V", '--loadgraph', saved_table, infile]
utils.runscript('trim-low-abund.py', args, in_dir)
outfile = infile + '.abundtrim'
assert os.path.exists(outfile), outfile
for record in screed.open(outfile):
if record.name == 'seqtrim/1':
print(record.name, record.sequence)
assert record.sequence == \
'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCAGCC'
elif record.name == 'seqtrim/2':
print(record.name, record.sequence)
assert record.sequence == \
'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCAGCCGC'
elif record.name == 'seqtrim2/1':
print(record.name, record.sequence)
assert record.sequence == \
'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCA'
def test_trim_low_abund_trimtest_savegraph():
infile = utils.copy_test_data('test-abund-read-2.paired.fq')
in_dir = os.path.dirname(infile)
saved_table = utils.get_temp_filename('save.ct')
args = ["-k", "17", "-x", "1e7", "-N", "2",
"-Z", "2", "-C", "2", "-V", '--savegraph', saved_table, infile]
utils.runscript('trim-low-abund.py', args, in_dir)
outfile = infile + '.abundtrim'
assert os.path.exists(outfile), outfile
assert os.path.exists(saved_table)
for record in screed.open(outfile):
if record.name == 'seqtrim/1':
print(record.name, record.sequence)
assert record.sequence == \
'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCAGCC'
elif record.name == 'seqtrim/2':
print(record.name, record.sequence)
assert record.sequence == \
'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCAGCCGC'
elif record.name == 'seqtrim2/1':
print(record.name, record.sequence)
assert record.sequence == \
'GGTTGACGGGGCTCAGGGGGCGGCTGACTCCGAGAGACAGCA'
def test_trim_low_abund_no_summary_info_by_default():
infile = utils.copy_test_data("test-abund-read-2.fa")
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", "-o", "summary", infile]
_, out, err = utils.runscript('trim-low-abund.py', args, in_dir)
summary_fname = os.path.join(in_dir, "summary.info.json")
print(os.path.exists(summary_fname))
assert not os.path.exists(summary_fname), summary_fname
def test_trim_low_abund_summary_info_json():
# test JSON file with summary info is created
infile = utils.copy_test_data("test-abund-read-2.fa")
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", "--summary-info", "json",
"-o", "summary", infile]
_, out, err = utils.runscript('trim-low-abund.py', args, in_dir)
summary_fname = os.path.join(in_dir, "summary.info.json")
assert os.path.exists(summary_fname), summary_fname
with open(summary_fname) as f:
assert json.load(f), 'summary file does not contain valid JSON'
def test_trim_low_abund_summary_info_tsv():
# test TSV file with summary info is created
infile = utils.copy_test_data("test-abund-read-2.fa")
in_dir = os.path.dirname(infile)
args = ["-k", "17", "-x", "1e7", "-N", "2", "--summary-info", "tsv",
"-o", "summary", infile]
_, out, err = utils.runscript('trim-low-abund.py', args, in_dir)
summary_fname = os.path.join(in_dir, "summary.info.tsv")
assert os.path.exists(summary_fname), summary_fname
with open(summary_fname) as f:
reader = csv.DictReader(f, dialect='excel-tab')
lines = [row for row in reader]
assert len(lines) == 1
# test that -o/--out option outputs to STDOUT
def test_trim_low_abund_stdout():
infile = utils.copy_test_data('test-abund-read-2.fa')
in_dir = | |
True
dirLen = len(dir)
# this skips all of /var/lib/conarydb/
for (dirName, dirNameList, pathNameList) in os.walk(dir):
for path in pathNameList:
if path[0] == ".": continue
fullPath = dirName[dirLen:] + "/" + path
if fullPath == "/var/log/conary": continue
if fullPath.startswith("/var/lib/conarydb/"): continue
if paths.has_key(fullPath):
del paths[fullPath]
else:
self.fail("unexpected file %s" % fullPath)
if paths:
self.fail("files missing %s" % " ".join(paths.keys()))
def cookItem(self, *args, **kw):
return self.discardOutput(cook.cookItem, *args, **kw)
# Kludge to make debugging tests that only fail in Hudson easier
_printOnError = False
def cookObject(self, loader, prep=False, macros={}, sourceVersion = None,
serverIdx = 0, ignoreDeps = False, logBuild = False,
targetLabel = None, repos = None,
groupOptions = None, resume = None):
theClass = loader.getRecipe()
if repos is None:
repos = self.openRepository(serverIdx)
if sourceVersion is None:
sourceVersion = cook.guessSourceVersion(repos, theClass.name,
theClass.version,
self.cfg.buildLabel,
searchBuiltTroves=True)[0]
if not sourceVersion:
# just make up a sourceCount -- there's no version in
# the repository to compare against
sourceVersion = versions.VersionFromString('/%s/%s-1' % (
self.cfg.buildLabel.asString(),
theClass.version))
use.resetUsed()
try:
builtList, _ = self.captureOutput(cook.cookObject,
repos,
self.cfg,
[loader],
sourceVersion,
prep=prep, macros=macros,
allowMissingSource=True,
ignoreDeps=ignoreDeps,
logBuild=logBuild,
groupOptions=groupOptions,
resume=resume,
_printOnError=self._printOnError,
)
finally:
repos.close()
return builtList
def cookPackageObject(self, theClass, prep=False, macros={},
sourceVersion = None, serverIdx = 0,
ignoreDeps = False):
""" cook a package object, return the buildpackage components
and package obj
"""
repos = self.openRepository(serverIdx)
if sourceVersion is None:
sourceVersion, _ = cook.guessSourceVersion(repos, theClass.name,
theClass.version,
self.cfg.buildLabel,
searchBuiltTroves=True)
if not sourceVersion:
# just make up a sourceCount -- there's no version in
# the repository to compare against
sourceVersion = versions.VersionFromString('/%s/%s-1' % (
self.cfg.buildLabel.asString(),
theClass.version))
use.resetUsed()
stdout = os.dup(sys.stdout.fileno())
stderr = os.dup(sys.stderr.fileno())
null = os.open('/dev/null', os.O_WRONLY)
os.dup2(null, sys.stdout.fileno())
os.dup2(null, sys.stderr.fileno())
try:
res = cook._cookPackageObject(repos, self.cfg, theClass,
sourceVersion,
prep=prep, macros=macros,
ignoreDeps=ignoreDeps)
finally:
os.dup2(stdout, sys.stdout.fileno())
os.dup2(stderr, sys.stderr.fileno())
os.close(null)
os.close(stdout)
os.close(stderr)
repos.close()
if not res:
return None
#return bldList, recipeObj
return res[0:2]
def getRecipeObjFromRepos(self, name, repos):
stdout = os.dup(sys.stdout.fileno())
stderr = os.dup(sys.stderr.fileno())
null = os.open('/dev/null', os.O_WRONLY)
os.dup2(null, sys.stdout.fileno())
os.dup2(null, sys.stderr.fileno())
try:
loader, sourceVersion = loadrecipe.recipeLoaderFromSourceComponent(
name, self.cfg, repos)[0:2]
recipeObj = cook._cookPackageObject(repos, self.cfg, loader,
sourceVersion, prep=True, requireCleanSources=True)
finally:
os.dup2(stdout, sys.stdout.fileno())
os.dup2(stderr, sys.stderr.fileno())
os.close(null)
os.close(stdout)
os.close(stderr)
return recipeObj
def repairTroves(self, pkgList = [], root = None):
if root is None:
root = self.rootDir
repos = self.openRepository()
db = self.openDatabase(root = root)
troveList = []
for item in pkgList:
name, ver, flv = updatecmd.parseTroveSpec(item)
troves = db.findTrove(None, (name, ver, flv))
troveList += troves
db.repairTroves(repos, troveList)
def updatePkg(self, root, pkg=[], version = None, tagScript = None,
noScripts = False, keepExisting = False, replaceFiles = None,
resolve = False, depCheck = True, justDatabase = False,
flavor = None, recurse = True, sync = False,
info = False, fromFiles = [], checkPathConflicts = True,
test = False, migrate = False, keepRequired = None,
raiseError = False, callback = None, restartInfo = None,
applyCriticalOnly = False, syncChildren = False,
keepJournal = False, noRestart=False,
exactFlavors = False, replaceManagedFiles = False,
replaceModifiedFiles = False, replaceUnmanagedFiles = False,
replaceModifiedConfigFiles = False, skipCapsuleOps = False,
criticalUpdateInfo = None, modelFile = None):
if not isinstance(root, str) or not root[0] == '/':
# hack to allow passing of rootdir as first argument
# as we used to
if isinstance(root, list):
pkg = root
else:
pkg = [root]
root = self.rootDir
newcfg = self.cfg
newcfg.root = root
if callback is None:
callback = callbacks.UpdateCallback()
if replaceFiles is not None:
replaceManagedFiles = replaceFiles
replaceUnmanagedFiles = replaceFiles
replaceModifiedFiles = replaceFiles
replaceModifiedConfigFiles = replaceFiles
repos = self.openRepository()
if isinstance(pkg, (str, list)):
if isinstance(pkg, str):
if version is not None:
if type(version) is not str:
version = version.asString()
item = "%s=%s" % (pkg, version)
else:
item = pkg
if flavor is not None:
item += '[%s]' % flavor
pkgl = [ item ]
else:
assert(version is None)
assert(flavor is None)
pkgl = list(itertools.chain(*(util.braceExpand(x) for x in pkg)))
# For consistency's sake, if in migrate mode, fake the command
# line to say migrate
if migrate:
newSysArgv = [ 'conary', 'migrate' ]
else:
newSysArgv = [ 'conary', 'update' ]
oldSysArgv = sys.argv
# Add the packages to handle
newSysArgv.extend(pkgl)
newcfg.autoResolve = resolve
try:
if keepJournal:
k = { 'keepJournal' : True }
else:
k = {}
try:
sys.argv = newSysArgv
updatecmd.doUpdate(newcfg, pkgl,
tagScript=tagScript,
keepExisting=keepExisting,
replaceManagedFiles=\
replaceManagedFiles,
replaceUnmanagedFiles=\
replaceUnmanagedFiles,
replaceModifiedFiles=\
replaceModifiedFiles,
replaceModifiedConfigFiles=\
replaceModifiedConfigFiles,
depCheck=depCheck,
justDatabase=justDatabase,
recurse=recurse, split=True,
sync=sync, info=info,
fromFiles=fromFiles,
checkPathConflicts=checkPathConflicts,
test=test, migrate=migrate,
keepRequired=keepRequired,
callback=callback,
restartInfo=restartInfo,
applyCriticalOnly=applyCriticalOnly,
syncChildren=syncChildren,
forceMigrate=migrate,
noRestart=noRestart,
exactFlavors=exactFlavors,
criticalUpdateInfo=criticalUpdateInfo,
skipCapsuleOps=skipCapsuleOps,
noScripts=noScripts,
systemModelFile=modelFile,
**k)
finally:
sys.argv = oldSysArgv
except conaryclient.DependencyFailure, msg:
if raiseError:
raise
print msg
except errors.InternalConaryError, err:
raise
except errors.ConaryError, msg:
if raiseError:
raise
log.error(msg)
else:
# we have a changeset object; mimic what updatecmd does
assert(not info)
assert(not fromFiles)
assert(not test)
assert(checkPathConflicts)
cl = conaryclient.ConaryClient(self.cfg)
cl.setUpdateCallback(callback)
job = [ (x[0], (None, None), (x[1], x[2]),
not keepExisting) for x in
pkg.getPrimaryTroveList() ]
try:
try:
updJob, suggMap = cl.updateChangeSet(job,
keepExisting = keepExisting,
keepRequired = keepRequired,
recurse = recurse, split = True,
sync = sync,
fromChangesets = [ pkg ])
if depCheck:
assert(not suggMap)
if replaceFiles is None:
replaceFiles = False
# old applyUpdate API doesn't support separate args
assert(not replaceManagedFiles)
assert(not replaceUnmanagedFiles)
assert(not replaceModifiedFiles)
assert(not replaceModifiedConfigFiles)
cl.applyUpdate(updJob, replaceFiles = replaceFiles,
tagScript = tagScript, justDatabase = justDatabase,
keepJournal = keepJournal)
finally:
updJob.close()
cl.close()
except conaryclient.DependencyFailure, msg:
if raiseError:
raise
print msg
except errors.InternalConaryError, err:
raise
except errors.ConaryError, err:
if raiseError:
raise
log.error(err)
def verifyDatabase(self):
db = self.openDatabase()
for info in list(db.iterAllTroves()):
assert db.getTrove(*info).verifyDigests(), "Update failed"
def updateAll(self, **kw):
updatecmd.updateAll(self.cfg, **kw)
def localChangeset(self, root, pkg, fileName):
db = database.Database(root, self.cfg.dbPath)
newcfg = copy.deepcopy(self.cfg)
newcfg.root = root
db = database.Database(root, self.cfg.dbPath)
newcfg = copy.deepcopy(self.cfg)
newcfg.root = root
verify.LocalChangeSetCommand(db, newcfg, pkg, fileName)
db.close()
def changeset(self, repos, troveSpecs, fileName, recurse=True):
cscmd.ChangeSetCommand(self.cfg, troveSpecs, fileName,
recurse=recurse)
def erasePkg(self, root, pkg, version = None, tagScript = None,
depCheck = True, justDatabase = False, flavor = None,
test = False, recurse=True, callback = None,
skipCapsuleOps = False):
db = database.Database(root, self.cfg.dbPath)
try:
if type(pkg) == list:
sys.argv = [ 'conary', 'erase' ] + pkg
updatecmd.doUpdate(self.cfg, pkg,
tagScript = tagScript, depCheck = depCheck,
justDatabase = justDatabase,
updateByDefault = False, test = test,
recurse=recurse, callback=callback,
skipCapsuleOps = skipCapsuleOps)
elif version and flavor:
item = "%s=%s[%s]" % (pkg, version, flavor)
sys.argv = [ 'conary', 'erase', item ]
updatecmd.doUpdate(self.cfg, [ item ],
tagScript = tagScript, depCheck = depCheck,
justDatabase = justDatabase,
updateByDefault = False, test = test,
recurse=recurse, callback=callback,
skipCapsuleOps = skipCapsuleOps)
elif version:
item = "%s=%s" % (pkg, version)
sys.argv = [ 'conary', 'erase', item ]
updatecmd.doUpdate(self.cfg, [ item ],
tagScript = tagScript, depCheck = depCheck,
justDatabase = justDatabase,
updateByDefault = False, test = test,
recurse=recurse, callback=callback,
skipCapsuleOps = skipCapsuleOps)
elif flavor:
item = "%s[%s]" % (pkg, flavor)
sys.argv = [ 'conary', 'erase', item ]
updatecmd.doUpdate(self.cfg, [ item ],
tagScript = tagScript, depCheck = depCheck,
justDatabase = justDatabase,
updateByDefault = False, test = test,
recurse=recurse, callback=callback,
skipCapsuleOps = skipCapsuleOps)
else:
sys.argv = [ 'conary', 'erase', pkg ]
updatecmd.doUpdate(self.cfg, [ pkg ],
tagScript = tagScript, depCheck = depCheck,
justDatabase = justDatabase,
updateByDefault = False, test = test,
recurse=recurse, callback=callback,
skipCapsuleOps = skipCapsuleOps)
except conaryclient.DependencyFailure, msg:
print msg
except errors.ClientError, msg:
log.error(msg)
db.close()
def restoreTrove(self, root, *troveList):
rmv = conarycmd.RestoreCommand()
cfg = copy.copy(self.cfg)
cfg.root = root
return rmv.runCommand(cfg, {}, ( 'conary', 'restore' ) + troveList)
def removeFile(self, root, *pathList):
rmv = conarycmd.RemoveCommand()
cfg = copy.copy(self.cfg)
cfg.root = root
return rmv.runCommand(cfg, {}, ( 'conary', 'remove' ) + pathList)
def build(self, str, name, vars = None, buildDict = None,
sourceVersion = None, serverIdx = 0, logLevel = log.WARNING,
returnTrove = None, macros=None, prep = False):
(built, d) = self.buildRecipe(str, name, d = buildDict,
vars = vars,
sourceVersion = sourceVersion,
logLevel = logLevel, macros = macros,
prep = prep)
if prep:
return
(name, version, flavor) = built[0]
if returnTrove is None:
returnTroveList = [ name ]
else:
name = name.split(':')[0]
if not isinstance(returnTrove, (list, tuple)):
l = ( returnTrove, )
else:
l = returnTrove
returnTroveList = []
for compName in l:
if | |
<reponame>emailweixu/XWorld
"""
Copyright (c) 2017 Baidu Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import itertools
import numbers
import os
import random
from maze2d import spanning_tree_maze_generator
"""
Entity:
id - unique str for this entity
grid_types - "agent", "goal", "block", "boundary"
location - (x, y, z)
yaw - in radian
scale - (0, 1.0]
offset - [0, 1-scale]
name - name of the entity
asset_path - the model path
color - color of the entity
"""
class Entity:
def __init__(self, type, id=None, loc=None, yaw=0.0,
scale=1.0, offset=0.0, name=None, asset_path=None, color=None):
if not loc is None:
assert isinstance(loc, tuple) and len(loc) == 3
self.type = type
self.id = id
self.loc = loc
self.yaw = yaw
self.scale = scale
self.offset = offset
self.name = name
self.asset_path = asset_path
self.color = color
class XWorld3DEnv(object):
PI_2 = 1.5707963
PI = 3.1415926
curriculum_check_period = 100
def __init__(self, asset_path, max_height=10, max_width=10):
self.current_usage = {}
self.action_successful = False
self.grid_types = ["goal", "block", "agent", "boundary"]
## init dimensions
self.max_height = max_height
self.max_width = max_width
self.__clean_env()
## event messages
self.action_successful = False
self.agent_sent = ""
self.game_event = ""
self.curriculum_check_counter = 0
## load all items from asset_path
self.asset_path = asset_path
self.all_object_paths = []
for dirpath, _, files in os.walk(asset_path):
for f in files:
if f.endswith(".urdf"):
self.all_object_paths.append(os.path.join(dirpath, f))
self.set_goal_subtrees([])
## read item properties
color_file = os.path.join(asset_path, "properties.txt")
assert os.path.exists(color_file)
with open(color_file, "r") as f:
lines = f.read().splitlines()
self.color_table = {os.path.join(asset_path, l.split()[0]) : l.split()[1]\
for l in lines if not l.startswith("//") and not l == ""}
############################ interface with Python tasks ############################
def reset(self):
"""
Reset the map.
"""
self.__clean_env()
self._configure()
self.__instantiate_entities()
def get_current_usage(self):
self.curriculum_check_counter += 1
if self.curriculum_check_counter < XWorld3DEnv.curriculum_check_period \
or not self.current_usage:
return 0
## we take the average usage across all the tasks
usage = sum([sum(l) / float(len(l)) for l in self.current_usage.values()]) \
/ len(self.current_usage)
self.curriculum_check_counter = 0
return usage
def set_dims(self, h, w):
"""
Set the dimensions of the map. If h or w is less than self.max_height or
self.max_width, then walls will be automatically padded. The python user should
use coordinates in [0, h) and [0, w).
"""
assert h > 1 and w > 1
assert h <= self.max_height and w <= self.max_width
self.height = h
self.width = w
self.boundaries = self.__add_boundaries()
self.available_grids = list(set(itertools.product(range(w), range(h), (0,))))
random.shuffle(self.available_grids)
self.changed = True
def set_entity(self, type, loc=None, name=None):
"""
Add an entity of type to loc which must be currently empty
"""
self.set_entity_inst(Entity(type=type, loc=loc, name=name))
def set_entity_inst(self, e):
if not e.loc is None:
assert e.loc in self.available_grids
self.available_grids.remove(e.loc)
self.entity_nums[e.type] += 1
self.entities.append(e)
self.changed = True
def delete_entity(self, x):
"""
Delete an entity on the current map
"""
self.entities.remove(x)
self.entity_nums[x.type] -= 1
self.available_grids.append(x.loc)
self.changed = True
def move_entity(self, e, loc):
"""
Move entity e from its current location to loc
"""
self.delete_entity(e)
e.loc = loc
self.set_entity_inst(e)
def set_goal_subtrees(self, subtrees):
"""
Set goal directory substrees so that only goals in the selected subtrees
will be sampled when generating the map. The user can use this function to
control the number of goal classes.
The change of goal subtrees will only be reflected for the next game, after
reset() is called. The current game still uses old goal subtrees.
"""
goal_path = os.path.join(self.asset_path, "goal")
self.object_paths = copy.deepcopy(self.all_object_paths)
if len(subtrees) > 0:
self.object_paths \
= [p for p in self.object_paths \
if not p.startswith(goal_path) or p.split("/")[-3] in subtrees]
## get a hierarchy of all possible objects
key = lambda p: '_'.join(p.split('_')[:-1])
objects = itertools.groupby(sorted(self.object_paths, key=key), key=key)
self.items = {t : {} for t in self.grid_types}
for k, g in objects:
type = [t for t in k.split("/") if t in self.grid_types][0]
assert type in self.items
self.items[type][os.path.basename(k)] = list(g)
def get_max_dims(self):
"""
Get the max height and max width of the map
"""
return (self.max_height, self.max_width)
def get_dims(self):
return (self.height, self.width)
def get_n(self, type):
"""
Get the current number of entities on the map for type
"""
assert type in self.entity_nums
return self.entity_nums[type]
def get_all_possible_names(self, type):
"""
Return all possible names for type
'goal' - all unique object names
'block' - all block names
'agent' - all agent names
"""
return self.items[type].keys()
def get_all_colors(self):
"""
Return all possible colors in xworld
"""
return list(set(self.color_table.values()))
def get_agent(self):
"""
Get the agent information: (entity, agent sentence, action success)
"""
agent = [e for e in self.entities if e.type == "agent"][0]
return (agent, self.agent_sent, self.action_successful)
def get_goals(self):
"""
Return all the goals on the current map
"""
return [e for e in self.entities if e.type == "goal"]
def get_blocks(self):
"""
Return all the blocks on the current map
"""
return [e for e in self.entities if e.type == "block"]
def get_available_grids(self):
"""
Return all the available grids on the current map
"""
return self.available_grids
def get_entities(self):
"""
Return all the entities on the current map
"""
return self.entities
def record_environment_usage(self, task_name, x):
"""
Update the current environment usage
The higher the usage is, the better the agent handles the environment (so
it might be a good time now to move to more difficult scenarios)
This quantity can be used to generate a curriculum of the world
"""
self.current_usage[task_name] = x
######################## interface with C++ #############################
def dump_curriculum_progress(self):
return self.current_level
def env_changed(self):
"""
Whether the environment has been changed by the teacher during the current
stage of the task. If yes, then teaching_task.cpp will notify the simulator to update
the game environment.
"""
ret = self.changed
self.changed = False
return ret
def cpp_get_entities(self):
"""
C++ code gets entities information. Used by the underlying simulator.
"""
actual_entities = [e.__dict__ for e in self.entities]
boundary_entities = [e.__dict__ for e in self.boundaries]
return actual_entities + boundary_entities
def update_entities_from_cpp(self, entities):
"""
Update the environment from C++. The changes might be due to
the environment dynamics or the agent's actions.
Entities is a list of python dicts.
"""
self.entity_nums = {t : 0 for t in self.grid_types}
self.entities = [Entity(**i) for i in entities if not self.__is_boundary(i["id"])]
for e in self.entities:
self.entity_nums[e.type] += 1
# update available grids
self.available_grids = set(itertools.product(range(self.width), range(self.height), (0,)))
occupied = set([e.loc for e in self.entities])
self.available_grids -= occupied
self.available_grids = list(self.available_grids)
random.shuffle(self.available_grids)
def update_agent_sentence_from_cpp(self, sent):
"""
Update the agent sentence from the CPP simulator
"""
self.agent_sent = sent
def update_agent_action_success_from_cpp(self, successful):
"""
Update the agent action success from the CPP simulator
"""
self.action_successful = successful
def update_game_event_from_cpp(self, event):
"""
Update the game event from CPP simulator
"""
self.game_event = event
######################## private or protected #########################
def _configure(self):
"""
The user has to override this function to define how the map
will be generated after each session resetting
"""
raise NotImplementedError()
def __instantiate_entities(self):
"""
For each entity, select an instance from the object class it belongs to,
after which its properties are set.
The entities should have been set in _configure()
"""
Y, X = self.get_dims()
maze = spanning_tree_maze_generator(X, Y)
blocks = [(j, i, 0) for i,m in enumerate(maze) for j,b in enumerate(m) if b == '#']
## maybe not all blocks of the maze will be used later
random.shuffle(blocks)
## first remove all maze blocks from the available set
for b in blocks:
if b in self.available_grids:
self.available_grids.remove(b)
## select a random object path for each non-block entity
for i, e in enumerate(self.entities):
if e.name is None:
e.name = random.choice(self.get_all_possible_names(e.type))
e.id = "%s_%d" % (e.name, i)
if e.asset_path is None:
icons = self.items[e.type][e.name]
e.asset_path = random.choice(icons)
e.color = self.color_table[e.asset_path]
if e.loc is None and e.type != "block":
assert len(self.available_grids) > 0
e.loc = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.